Re: [PATCH 5/7] cbd: introuce cbd_backend

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



> +
> +struct cbd_backend_io {
> +	struct cbd_se		*se;
> +	u64			off;
> +	u32			len;
> +	struct bio		*bio;
> +	struct cbd_handler	*handler;
> +};
> +

why not use inline bvecs and avoid bio page allocation for reasonable
size ? instead of performing the allocation for each request ...

> +static inline void complete_cmd(struct cbd_handler *handler, u64 priv_data, int ret)
> +{
> +	struct cbd_ce *ce = get_compr_head(handler);
> +
> +	memset(ce, 0, sizeof(*ce));
> +	ce->priv_data = priv_data;
> +	ce->result = ret;
> +	CBDC_UPDATE_COMPR_HEAD(handler->channel_info->compr_head,
> +			       sizeof(struct cbd_ce),
> +			       handler->channel_info->compr_size);
> +
> +	cbdc_flush_ctrl(&handler->channel);
> +
> +	return;
> +}
> +
> +static void backend_bio_end(struct bio *bio)
> +{
> +	struct cbd_backend_io *backend_io = bio->bi_private;
> +	struct cbd_se *se = backend_io->se;
> +	struct cbd_handler *handler = backend_io->handler;
> +
> +	if (bio->bi_status == 0 &&
> +	    cbd_se_hdr_get_op(se->header.len_op) == CBD_OP_READ) {
> +		cbdc_copy_from_bio(&handler->channel, se->data_off, se->data_len, bio);
> +	}
> +
> +	complete_cmd(handler, se->priv_data, bio->bi_status);
> +
> +	bio_free_pages(bio);
> +	bio_put(bio);
> +	kfree(backend_io);
> +}
> +
> +static int cbd_bio_alloc_pages(struct bio *bio, size_t size, gfp_t gfp_mask)
> +{
> +	int ret = 0;
> +
> +        while (size) {
> +                struct page *page = alloc_pages(gfp_mask, 0);
> +                unsigned len = min_t(size_t, PAGE_SIZE, size);

alloc_page() call should be close to below check ..

> +
> +                if (!page) {
> +			pr_err("failed to alloc page");
> +			ret = -ENOMEM;
> +			break;
> +		}
> +
> +		ret = bio_add_page(bio, page, len, 0);
> +                if (unlikely(ret != len)) {
> +                        __free_page(page);
> +			pr_err("failed to add page");
> +                        break;
> +                }
> +
> +                size -= len;
> +        }
> +
> +	if (size)
> +		bio_free_pages(bio);
> +	else
> +		ret = 0;
> +
> +        return ret;
> +}

code formatting seems to be broken for above function plz check..

> +
> +static struct cbd_backend_io *backend_prepare_io(struct cbd_handler *handler, struct cbd_se *se, blk_opf_t opf)
> +{
> +	struct cbd_backend_io *backend_io;
> +	struct cbd_backend *cbdb = handler->cbdb;
> +
> +	backend_io = kzalloc(sizeof(struct cbd_backend_io), GFP_KERNEL);

will above allocation always succeed ? or NULL check should be here ?

> +	backend_io->se = se;
> +
> +	backend_io->handler = handler;
> +	backend_io->bio = bio_alloc_bioset(cbdb->bdev, roundup(se->len, 4096) / 4096, opf, GFP_KERNEL, &handler->bioset);
> +
> +	backend_io->bio->bi_iter.bi_sector = se->offset >> SECTOR_SHIFT;
> +	backend_io->bio->bi_iter.bi_size = 0;
> +	backend_io->bio->bi_private = backend_io;
> +	backend_io->bio->bi_end_io = backend_bio_end;
> +
> +	return backend_io;
> +}
> +
> +static int handle_backend_cmd(struct cbd_handler *handler, struct cbd_se *se)
> +{
> +	struct cbd_backend *cbdb = handler->cbdb;
> +	u32 len = se->len;
> +	struct cbd_backend_io *backend_io = NULL;
> +	int ret;
> +
> +	if (cbd_se_hdr_flags_test(se, CBD_SE_HDR_DONE)) {
> +		return 0 ;
> +	}
> +
> +	switch (cbd_se_hdr_get_op(se->header.len_op)) {
> +	case CBD_OP_PAD:
> +		cbd_se_hdr_flags_set(se, CBD_SE_HDR_DONE);
> +		return 0;
> +	case CBD_OP_READ:
> +		backend_io = backend_prepare_io(handler, se, REQ_OP_READ);
> +		break;
> +	case CBD_OP_WRITE:
> +		backend_io = backend_prepare_io(handler, se, REQ_OP_WRITE);
> +		break;
> +	case CBD_OP_DISCARD:
> +		ret = blkdev_issue_discard(cbdb->bdev, se->offset >> SECTOR_SHIFT,
> +				se->len, GFP_NOIO);

any specific reason to not use GFP_KERNEL ?

> +		goto complete_cmd;
> +	case CBD_OP_WRITE_ZEROS:
> +		ret = blkdev_issue_zeroout(cbdb->bdev, se->offset >> SECTOR_SHIFT,
> +				se->len, GFP_NOIO, 0);

any specific reason to not use GFP_KERNEL ?

> +		goto complete_cmd;
> +	case CBD_OP_FLUSH:
> +		ret = blkdev_issue_flush(cbdb->bdev);
> +		goto complete_cmd;
> +	default:
> +		pr_err("unrecognized op: %x", cbd_se_hdr_get_op(se->header.len_op));
> +		ret = -EIO;
> +		goto complete_cmd;
> +	}
> +
> +	if (!backend_io)
> +		return -ENOMEM;

there is no NULL check in the backend_prepare_io() not sure about
above condition in current code unless you return NULL ...

> +
> +	ret = cbd_bio_alloc_pages(backend_io->bio, len, GFP_NOIO);
> +	if (ret) {
> +		kfree(backend_io);
> +		return ret;
> +	}
> +
> +	if (cbd_se_hdr_get_op(se->header.len_op) == CBD_OP_WRITE) {
> +		cbdc_copy_to_bio(&handler->channel, se->data_off, se->data_len, backend_io->bio);
> +	}
> +
> +	submit_bio(backend_io->bio);
> +

unless I didn't understand the code, you are building a single bio from
incoming request, that might not have enough space to accommodate all
the data from incoming request, hence you are returning an error from
cbd_bio_alloc_pages() when bio_add_page() fail ...

bio_add_page() can fail for multiple reasons, instead of trying to
build only one bio that might be smaller for the size of the I/O and
returning error, why not use the chain of the small size bios ? that
way you will not run out of the space in single bio and still finish
the I/O by avoiding bio_add_page() failure that might happen due to
bio full ?

> +	return 0;
> +
> +complete_cmd:
> +	complete_cmd(handler, se->priv_data, ret);
> +	return 0;
> +}
> +
> +static void handle_work_fn(struct work_struct *work)
> +{
> +	struct cbd_handler *handler = container_of(work, struct cbd_handler, handle_work.work);
> +	struct cbd_se *se;
> +	int ret;
> +again:
> +	/* channel ctrl would be updated by blkdev queue */
> +	cbdc_flush_ctrl(&handler->channel);
> +	se = get_se_to_handle(handler);
> +	if (se == get_se_head(handler)) {
> +		if (cbdwc_need_retry(&handler->handle_worker_cfg)) {
> +			goto again;
> +		}
> +
> +		cbdwc_miss(&handler->handle_worker_cfg);
> +
> +		queue_delayed_work(handler->handle_wq, &handler->handle_work, usecs_to_jiffies(0));
> +		return;
> +	}
> +
> +	cbdwc_hit(&handler->handle_worker_cfg);
> +	cbdt_flush_range(handler->cbdb->cbdt, se, sizeof(*se));
> +	ret = handle_backend_cmd(handler, se);
> +	if (!ret) {
> +		/* this se is handled */
> +		handler->se_to_handle = (handler->se_to_handle + cbd_se_hdr_get_len(se->header.len_op)) % handler->channel_info->cmdr_size;

this is a really long line, if possible keep code under 80 char, I know
it's not a requirement anymore but it will match block drivers ..

-ck






[Index of Archives]     [Linux RAID]     [Linux SCSI]     [Linux ATA RAID]     [IDE]     [Linux Wireless]     [Linux Kernel]     [ATH6KL]     [Linux Bluetooth]     [Linux Netdev]     [Kernel Newbies]     [Security]     [Git]     [Netfilter]     [Bugtraq]     [Yosemite News]     [MIPS Linux]     [ARM Linux]     [Linux Security]     [Device Mapper]

  Powered by Linux