On 01-May-19 19:38, Jason Gunthorpe wrote: >> +static int __efa_mmap(struct efa_dev *dev, >> + struct efa_ucontext *ucontext, >> + struct vm_area_struct *vma, >> + struct efa_mmap_entry *entry) >> +{ >> + u8 mmap_flag = get_mmap_flag(entry->key); >> + u64 pfn = entry->address >> PAGE_SHIFT; >> + u64 address = entry->address; >> + u64 length = entry->length; >> + unsigned long va; >> + int err; >> + >> + ibdev_dbg(&dev->ibdev, >> + "Mapping address[%#llx], length[%#llx], mmap_flag[%d]\n", >> + address, length, mmap_flag); >> + >> + switch (mmap_flag) { >> + case EFA_MMAP_IO_NC: >> + err = rdma_user_mmap_io(&ucontext->ibucontext, vma, pfn, length, >> + pgprot_noncached(vma->vm_page_prot)); >> + break; >> + case EFA_MMAP_IO_WC: >> + err = rdma_user_mmap_io(&ucontext->ibucontext, vma, pfn, length, >> + pgprot_writecombine(vma->vm_page_prot)); >> + break; >> + case EFA_MMAP_DMA_PAGE: >> + for (va = vma->vm_start; va < vma->vm_end; >> + va += PAGE_SIZE, pfn++) { >> + err = vm_insert_page(vma, va, pfn_to_page(pfn)); >> + if (err) >> + break > > This loop doesn't bound the number of pfns it accesses, so it is a > security problem. > > The core code was checking this before Thanks Jason, Core code was checking for if (vma->vm_end - vma->vm_start != size) return ERR_PTR(-EINVAL); Our code explicitly sets size as 'vma->vm_end - vma->vm_start'. In addition, we validate that the mapping size matches the size of the allocated buffers which are being mapped (and bounded).