Re: [PATCH 3/7] block: copy offload support infrastructure

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



Hi SelvaKumar,

Thank you for the patch! Perhaps something to improve:

[auto build test WARNING on block/for-next]
[also build test WARNING on dm/for-next linus/master v5.14-rc6 next-20210817]
[cannot apply to linux-nvme/for-next]
[If your patch is applied to the wrong git tree, kindly drop us a note.
And when submitting patch, we suggest to use '--base' as documented in
https://git-scm.com/docs/git-format-patch]

url:    https://github.com/0day-ci/linux/commits/SelvaKumar-S/block-make-bio_map_kern-non-static/20210817-193111
base:   https://git.kernel.org/pub/scm/linux/kernel/git/axboe/linux-block.git for-next
config: hexagon-randconfig-r013-20210816 (attached as .config)
compiler: clang version 12.0.0
reproduce (this is a W=1 build):
        wget https://raw.githubusercontent.com/intel/lkp-tests/master/sbin/make.cross -O ~/bin/make.cross
        chmod +x ~/bin/make.cross
        # https://github.com/0day-ci/linux/commit/35fc502a7f20a7cd42432cee2777a621c40a3bd3
        git remote add linux-review https://github.com/0day-ci/linux
        git fetch --no-tags linux-review SelvaKumar-S/block-make-bio_map_kern-non-static/20210817-193111
        git checkout 35fc502a7f20a7cd42432cee2777a621c40a3bd3
        # save the attached .config to linux build tree
        COMPILER_INSTALL_PATH=$HOME/0day COMPILER=clang make.cross ARCH=hexagon 

If you fix the issue, kindly add following tag as appropriate
Reported-by: kernel test robot <lkp@xxxxxxxxx>

All warnings (new ones prefixed by >>):

>> block/blk-lib.c:197:5: warning: no previous prototype for function 'blk_copy_offload_submit_bio' [-Wmissing-prototypes]
   int blk_copy_offload_submit_bio(struct block_device *bdev,
       ^
   block/blk-lib.c:197:1: note: declare 'static' if the function is not intended to be used outside of this translation unit
   int blk_copy_offload_submit_bio(struct block_device *bdev,
   ^
   static 
>> block/blk-lib.c:250:5: warning: no previous prototype for function 'blk_copy_offload_scc' [-Wmissing-prototypes]
   int blk_copy_offload_scc(struct block_device *src_bdev, int nr_srcs,
       ^
   block/blk-lib.c:250:1: note: declare 'static' if the function is not intended to be used outside of this translation unit
   int blk_copy_offload_scc(struct block_device *src_bdev, int nr_srcs,
   ^
   static 
   2 warnings generated.


vim +/blk_copy_offload_submit_bio +197 block/blk-lib.c

   196	
 > 197	int blk_copy_offload_submit_bio(struct block_device *bdev,
   198			struct blk_copy_payload *payload, int payload_size,
   199			struct cio *cio, gfp_t gfp_mask)
   200	{
   201		struct request_queue *q = bdev_get_queue(bdev);
   202		struct bio *bio;
   203	
   204		bio = bio_map_kern(q, payload, payload_size, gfp_mask);
   205		if (IS_ERR(bio))
   206			return PTR_ERR(bio);
   207	
   208		bio_set_dev(bio, bdev);
   209		bio->bi_opf = REQ_OP_COPY | REQ_NOMERGE;
   210		bio->bi_iter.bi_sector = payload->dest;
   211		bio->bi_end_io = cio_bio_end_io;
   212		bio->bi_private = cio;
   213		atomic_inc(&cio->refcount);
   214		submit_bio(bio);
   215	
   216		return 0;
   217	}
   218	
   219	/* Go through all the enrties inside user provided payload, and determine the
   220	 * maximum number of entries in a payload, based on device's scc-limits.
   221	 */
   222	static inline int blk_max_payload_entries(int nr_srcs, struct range_entry *rlist,
   223			int max_nr_srcs, sector_t max_copy_range_sectors, sector_t max_copy_len)
   224	{
   225		sector_t range_len, copy_len = 0, remaining = 0;
   226		int ri = 0, pi = 1, max_pi = 0;
   227	
   228		for (ri = 0; ri < nr_srcs; ri++) {
   229			for (remaining = rlist[ri].len; remaining > 0; remaining -= range_len) {
   230				range_len = min3(remaining, max_copy_range_sectors,
   231									max_copy_len - copy_len);
   232				pi++;
   233				copy_len += range_len;
   234	
   235				if ((pi == max_nr_srcs) || (copy_len == max_copy_len)) {
   236					max_pi = max(max_pi, pi);
   237					pi = 1;
   238					copy_len = 0;
   239				}
   240			}
   241		}
   242	
   243		return max(max_pi, pi);
   244	}
   245	
   246	/*
   247	 * blk_copy_offload_scc	- Use device's native copy offload feature
   248	 * Go through user provide payload, prepare new payload based on device's copy offload limits.
   249	 */
 > 250	int blk_copy_offload_scc(struct block_device *src_bdev, int nr_srcs,
   251			struct range_entry *rlist, struct block_device *dest_bdev,
   252			sector_t dest, gfp_t gfp_mask)
   253	{
   254		struct request_queue *q = bdev_get_queue(dest_bdev);
   255		struct cio *cio = NULL;
   256		struct blk_copy_payload *payload;
   257		sector_t range_len, copy_len = 0, remaining = 0;
   258		sector_t src_blk, cdest = dest;
   259		sector_t max_copy_range_sectors, max_copy_len;
   260		int ri = 0, pi = 0, ret = 0, payload_size, max_pi, max_nr_srcs;
   261	
   262		cio = kzalloc(sizeof(struct cio), GFP_KERNEL);
   263		if (!cio)
   264			return -ENOMEM;
   265		atomic_set(&cio->refcount, 0);
   266	
   267		max_nr_srcs = q->limits.max_copy_nr_ranges;
   268		max_copy_range_sectors = q->limits.max_copy_range_sectors;
   269		max_copy_len = q->limits.max_copy_sectors;
   270	
   271		max_pi = blk_max_payload_entries(nr_srcs, rlist, max_nr_srcs,
   272						max_copy_range_sectors, max_copy_len);
   273		payload_size = struct_size(payload, range, max_pi);
   274	
   275		payload = kvmalloc(payload_size, gfp_mask);
   276		if (!payload) {
   277			ret = -ENOMEM;
   278			goto free_cio;
   279		}
   280		payload->src_bdev = src_bdev;
   281	
   282		for (ri = 0; ri < nr_srcs; ri++) {
   283			for (remaining = rlist[ri].len, src_blk = rlist[ri].src; remaining > 0;
   284							remaining -= range_len, src_blk += range_len) {
   285	
   286				range_len = min3(remaining, max_copy_range_sectors,
   287									max_copy_len - copy_len);
   288				payload->range[pi].len = range_len;
   289				payload->range[pi].src = src_blk;
   290				pi++;
   291				copy_len += range_len;
   292	
   293				/* Submit current payload, if crossing device copy limits */
   294				if ((pi == max_nr_srcs) || (copy_len == max_copy_len)) {
   295					payload->dest = cdest;
   296					payload->copy_nr_ranges = pi;
   297					ret = blk_copy_offload_submit_bio(dest_bdev, payload,
   298									payload_size, cio, gfp_mask);
   299					if (ret)
   300						goto free_payload;
   301	
   302					/* reset index, length and allocate new payload */
   303					pi = 0;
   304					cdest += copy_len;
   305					copy_len = 0;
   306					payload = kvmalloc(payload_size, gfp_mask);
   307					if (!payload) {
   308						ret = -ENOMEM;
   309						goto free_cio;
   310					}
   311					payload->src_bdev = src_bdev;
   312				}
   313			}
   314		}
   315	
   316		if (pi) {
   317			payload->dest = cdest;
   318			payload->copy_nr_ranges = pi;
   319			ret = blk_copy_offload_submit_bio(dest_bdev, payload, payload_size, cio, gfp_mask);
   320			if (ret)
   321				goto free_payload;
   322		}
   323	
   324		/* Wait for completion of all IO's*/
   325		ret = cio_await_completion(cio);
   326	
   327		return ret;
   328	
   329	free_payload:
   330		kvfree(payload);
   331	free_cio:
   332		cio_await_completion(cio);
   333		return ret;
   334	}
   335	

---
0-DAY CI Kernel Test Service, Intel Corporation
https://lists.01.org/hyperkitty/list/kbuild-all@xxxxxxxxxxxx

Attachment: .config.gz
Description: application/gzip


[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[Index of Archives]     [SCSI Target Devel]     [Linux SCSI Target Infrastructure]     [Kernel Newbies]     [IDE]     [Security]     [Git]     [Netfilter]     [Bugtraq]     [Yosemite News]     [MIPS Linux]     [ARM Linux]     [Linux Security]     [Linux RAID]     [Linux ATA RAID]     [Linux IIO]     [Samba]     [Device Mapper]

  Powered by Linux