On Thu, Dec 19, 2019 at 03:46:46PM +0200, Leon Romanovsky wrote: > @@ -403,6 +390,7 @@ int ib_umem_odp_map_dma_pages(struct ib_umem_odp *umem_odp, u64 user_virt, > int j, k, ret = 0, start_idx, npages = 0; > unsigned int flags = 0, page_shift; > phys_addr_t p = 0; > + struct vm_area_struct **vmas; > > if (access_mask == 0) > return -EINVAL; > @@ -415,6 +403,12 @@ int ib_umem_odp_map_dma_pages(struct ib_umem_odp *umem_odp, u64 user_virt, > if (!local_page_list) > return -ENOMEM; > > + vmas = (struct vm_area_struct **)__get_free_page(GFP_KERNEL); > + if (!vmas) { > + ret = -ENOMEM; > + goto out_free_page_list; > + } I'd rather not do this on the fast path > + if ((1 << page_shift) > vma_kernel_pagesize(vmas[j])) { > + ret = -EFAULT; > + break; > + } And vma's cannot be de-refenced outside the mmap_sem There is already logic checking for linear contiguous pages: if (user_virt & ~page_mask) { p += PAGE_SIZE; if (page_to_phys(local_page_list[j]) != p) { ret = -EFAULT; break; } Why do we need to add the vma check? Jason