Re: [PATCH] RDMA/umem: minor bug fix and cleanup in error handling paths

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



Hi John,

Thank you for the patch! Yet something to improve:

[auto build test ERROR on rdma/for-next]
[also build test ERROR on v5.0-rc8 next-20190301]
[if your patch is applied to the wrong git tree, please drop us a note to help improve the system]

url:    https://github.com/0day-ci/linux/commits/john-hubbard-gmail-com/RDMA-umem-minor-bug-fix-and-cleanup-in-error-handling-paths/20190302-233314
base:   https://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma.git for-next
config: i386-randconfig-x002-201908 (attached as .config)
compiler: gcc-8 (Debian 8.2.0-20) 8.2.0
reproduce:
        # save the attached .config to linux build tree
        make ARCH=i386 

All errors (new ones prefixed by >>):

   drivers/infiniband/core/umem_odp.c: In function 'ib_umem_odp_map_dma_pages':
>> drivers/infiniband/core/umem_odp.c:684:4: error: implicit declaration of function 'release_pages'; did you mean 'release_task'? [-Werror=implicit-function-declaration]
       release_pages(&local_page_list[j], npages - j);
       ^~~~~~~~~~~~~
       release_task
   cc1: some warnings being treated as errors

vim +684 drivers/infiniband/core/umem_odp.c

   559	
   560	/**
   561	 * ib_umem_odp_map_dma_pages - Pin and DMA map userspace memory in an ODP MR.
   562	 *
   563	 * Pins the range of pages passed in the argument, and maps them to
   564	 * DMA addresses. The DMA addresses of the mapped pages is updated in
   565	 * umem_odp->dma_list.
   566	 *
   567	 * Returns the number of pages mapped in success, negative error code
   568	 * for failure.
   569	 * An -EAGAIN error code is returned when a concurrent mmu notifier prevents
   570	 * the function from completing its task.
   571	 * An -ENOENT error code indicates that userspace process is being terminated
   572	 * and mm was already destroyed.
   573	 * @umem_odp: the umem to map and pin
   574	 * @user_virt: the address from which we need to map.
   575	 * @bcnt: the minimal number of bytes to pin and map. The mapping might be
   576	 *        bigger due to alignment, and may also be smaller in case of an error
   577	 *        pinning or mapping a page. The actual pages mapped is returned in
   578	 *        the return value.
   579	 * @access_mask: bit mask of the requested access permissions for the given
   580	 *               range.
   581	 * @current_seq: the MMU notifiers sequance value for synchronization with
   582	 *               invalidations. the sequance number is read from
   583	 *               umem_odp->notifiers_seq before calling this function
   584	 */
   585	int ib_umem_odp_map_dma_pages(struct ib_umem_odp *umem_odp, u64 user_virt,
   586				      u64 bcnt, u64 access_mask,
   587				      unsigned long current_seq)
   588	{
   589		struct ib_umem *umem = &umem_odp->umem;
   590		struct task_struct *owning_process  = NULL;
   591		struct mm_struct *owning_mm = umem_odp->umem.owning_mm;
   592		struct page       **local_page_list = NULL;
   593		u64 page_mask, off;
   594		int j, k, ret = 0, start_idx, npages = 0, page_shift;
   595		unsigned int flags = 0;
   596		phys_addr_t p = 0;
   597	
   598		if (access_mask == 0)
   599			return -EINVAL;
   600	
   601		if (user_virt < ib_umem_start(umem) ||
   602		    user_virt + bcnt > ib_umem_end(umem))
   603			return -EFAULT;
   604	
   605		local_page_list = (struct page **)__get_free_page(GFP_KERNEL);
   606		if (!local_page_list)
   607			return -ENOMEM;
   608	
   609		page_shift = umem->page_shift;
   610		page_mask = ~(BIT(page_shift) - 1);
   611		off = user_virt & (~page_mask);
   612		user_virt = user_virt & page_mask;
   613		bcnt += off; /* Charge for the first page offset as well. */
   614	
   615		/*
   616		 * owning_process is allowed to be NULL, this means somehow the mm is
   617		 * existing beyond the lifetime of the originating process.. Presumably
   618		 * mmget_not_zero will fail in this case.
   619		 */
   620		owning_process = get_pid_task(umem_odp->per_mm->tgid, PIDTYPE_PID);
   621		if (WARN_ON(!mmget_not_zero(umem_odp->umem.owning_mm))) {
   622			ret = -EINVAL;
   623			goto out_put_task;
   624		}
   625	
   626		if (access_mask & ODP_WRITE_ALLOWED_BIT)
   627			flags |= FOLL_WRITE;
   628	
   629		start_idx = (user_virt - ib_umem_start(umem)) >> page_shift;
   630		k = start_idx;
   631	
   632		while (bcnt > 0) {
   633			const size_t gup_num_pages = min_t(size_t,
   634					(bcnt + BIT(page_shift) - 1) >> page_shift,
   635					PAGE_SIZE / sizeof(struct page *));
   636	
   637			down_read(&owning_mm->mmap_sem);
   638			/*
   639			 * Note: this might result in redundent page getting. We can
   640			 * avoid this by checking dma_list to be 0 before calling
   641			 * get_user_pages. However, this make the code much more
   642			 * complex (and doesn't gain us much performance in most use
   643			 * cases).
   644			 */
   645			npages = get_user_pages_remote(owning_process, owning_mm,
   646					user_virt, gup_num_pages,
   647					flags, local_page_list, NULL, NULL);
   648			up_read(&owning_mm->mmap_sem);
   649	
   650			if (npages < 0) {
   651				if (npages != -EAGAIN)
   652					pr_warn("fail to get %zu user pages with error %d\n",
   653						gup_num_pages, npages);
   654				else
   655					pr_debug("fail to get %zu user pages with error %d\n",
   656						 gup_num_pages, npages);
   657				break;
   658			}
   659	
   660			bcnt -= min_t(size_t, npages << PAGE_SHIFT, bcnt);
   661			mutex_lock(&umem_odp->umem_mutex);
   662			for (j = 0; j < npages; j++, user_virt += PAGE_SIZE) {
   663				ret = ib_umem_odp_map_dma_single_page(
   664						umem_odp, k, local_page_list[j],
   665						access_mask, current_seq);
   666				if (ret < 0) {
   667					if (ret != -EAGAIN)
   668						pr_warn("ib_umem_odp_map_dma_single_page failed with error %d\n", ret);
   669					else
   670						pr_debug("ib_umem_odp_map_dma_single_page failed with error %d\n", ret);
   671					break;
   672				}
   673	
   674				p = page_to_phys(local_page_list[j]);
   675				k++;
   676			}
   677			mutex_unlock(&umem_odp->umem_mutex);
   678	
   679			if (ret < 0) {
   680				/*
   681				 * Release pages, starting at the the first page
   682				 * that experienced an error.
   683				 */
 > 684				release_pages(&local_page_list[j], npages - j);
   685				break;
   686			}
   687		}
   688	
   689		if (ret >= 0) {
   690			if (npages < 0 && k == start_idx)
   691				ret = npages;
   692			else
   693				ret = k - start_idx;
   694		}
   695	
   696		mmput(owning_mm);
   697	out_put_task:
   698		if (owning_process)
   699			put_task_struct(owning_process);
   700		free_page((unsigned long)local_page_list);
   701		return ret;
   702	}
   703	EXPORT_SYMBOL(ib_umem_odp_map_dma_pages);
   704	

---
0-DAY kernel test infrastructure                Open Source Technology Center
https://lists.01.org/pipermail/kbuild-all                   Intel Corporation

Attachment: .config.gz
Description: application/gzip


[Index of Archives]     [Linux USB Devel]     [Video for Linux]     [Linux Audio Users]     [Photo]     [Yosemite News]     [Yosemite Photos]     [Linux Kernel]     [Linux SCSI]     [XFree86]

  Powered by Linux