This switches the charge to pinned_vm to be consistent with other drivers that pin pages with FOLL_LONGTERM. It also allows the use of the vm_account helper struct which makes a future change to implement cgroup accounting of pinned pages easier to implement as that requires a reference to the cgroup to be maintained. Signed-off-by: Alistair Popple <apopple@xxxxxxxxxx> Cc: Alex Williamson <alex.williamson@xxxxxxxxxx> Cc: Cornelia Huck <cohuck@xxxxxxxxxx> Cc: kvm@xxxxxxxxxxxxxxx Cc: linux-kernel@xxxxxxxxxxxxxxx --- drivers/vfio/vfio_iommu_type1.c | 59 +++++++++------------------------- 1 file changed, 16 insertions(+), 43 deletions(-) diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c index 23c24fe..828f6c7 100644 --- a/drivers/vfio/vfio_iommu_type1.c +++ b/drivers/vfio/vfio_iommu_type1.c @@ -95,11 +95,11 @@ struct vfio_dma { size_t size; /* Map size (bytes) */ int prot; /* IOMMU_READ/WRITE */ bool iommu_mapped; - bool lock_cap; /* capable(CAP_IPC_LOCK) */ bool vaddr_invalid; struct task_struct *task; struct rb_root pfn_list; /* Ex-user pinned pfn list */ unsigned long *bitmap; + struct vm_account vm_account; }; struct vfio_batch { @@ -412,31 +412,6 @@ static int vfio_iova_put_vfio_pfn(struct vfio_dma *dma, struct vfio_pfn *vpfn) return ret; } -static int vfio_lock_acct(struct vfio_dma *dma, long npage, bool async) -{ - struct mm_struct *mm; - int ret; - - if (!npage) - return 0; - - mm = async ? get_task_mm(dma->task) : dma->task->mm; - if (!mm) - return -ESRCH; /* process exited */ - - ret = mmap_write_lock_killable(mm); - if (!ret) { - ret = __account_locked_vm(mm, abs(npage), npage > 0, dma->task, - dma->lock_cap); - mmap_write_unlock(mm); - } - - if (async) - mmput(mm); - - return ret; -} - /* * Some mappings aren't backed by a struct page, for example an mmap'd * MMIO range for our own or another device. These use a different @@ -715,16 +690,8 @@ static long vfio_pin_pages_remote(struct vfio_dma *dma, unsigned long vaddr, * externally pinned pages are already counted against * the user. */ - if (!rsvd && !vfio_find_vpfn(dma, iova)) { - if (!dma->lock_cap && - mm->locked_vm + lock_acct + 1 > limit) { - pr_warn("%s: RLIMIT_MEMLOCK (%ld) exceeded\n", - __func__, limit << PAGE_SHIFT); - ret = -ENOMEM; - goto unpin_out; - } + if (!rsvd && !vfio_find_vpfn(dma, iova)) lock_acct++; - } pinned++; npage--; @@ -744,7 +711,11 @@ static long vfio_pin_pages_remote(struct vfio_dma *dma, unsigned long vaddr, } out: - ret = vfio_lock_acct(dma, lock_acct, false); + if (vm_account_pinned(&dma->vm_account, lock_acct)) { + ret = -ENOMEM; + lock_acct = 0; + pr_warn("%s: RLIMIT_MEMLOCK exceeded\n", __func__); + } unpin_out: if (batch->size == 1 && !batch->offset) { @@ -759,6 +730,7 @@ static long vfio_pin_pages_remote(struct vfio_dma *dma, unsigned long vaddr, put_pfn(pfn, dma->prot); } vfio_batch_unpin(batch, dma); + vm_unaccount_pinned(&dma->vm_account, lock_acct); return ret; } @@ -782,7 +754,7 @@ static long vfio_unpin_pages_remote(struct vfio_dma *dma, dma_addr_t iova, } if (do_accounting) - vfio_lock_acct(dma, locked - unlocked, true); + vm_unaccount_pinned(&dma->vm_account, locked - unlocked); return unlocked; } @@ -805,7 +777,7 @@ static int vfio_pin_page_external(struct vfio_dma *dma, unsigned long vaddr, ret = 0; if (do_accounting && !is_invalid_reserved_pfn(*pfn_base)) { - ret = vfio_lock_acct(dma, 1, true); + ret = vm_account_pinned(&dma->vm_account, 1); if (ret) { put_pfn(*pfn_base, dma->prot); if (ret == -ENOMEM) @@ -833,7 +805,7 @@ static int vfio_unpin_page_external(struct vfio_dma *dma, dma_addr_t iova, unlocked = vfio_iova_put_vfio_pfn(dma, vpfn); if (do_accounting) - vfio_lock_acct(dma, -unlocked, true); + vm_unaccount_pinned(&dma->vm_account, unlocked); return unlocked; } @@ -921,7 +893,7 @@ static int vfio_iommu_type1_pin_pages(void *iommu_data, ret = vfio_add_to_pfn_list(dma, iova, phys_pfn); if (ret) { if (put_pfn(phys_pfn, dma->prot) && do_accounting) - vfio_lock_acct(dma, -1, true); + vm_unaccount_pinned(&dma->vm_account, 1); goto pin_unwind; } @@ -1162,7 +1134,7 @@ static long vfio_unmap_unpin(struct vfio_iommu *iommu, struct vfio_dma *dma, } if (do_accounting) { - vfio_lock_acct(dma, -unlocked, true); + vm_unaccount_pinned(&dma->vm_account, unlocked); return 0; } return unlocked; @@ -1674,7 +1646,8 @@ static int vfio_dma_do_map(struct vfio_iommu *iommu, */ get_task_struct(current->group_leader); dma->task = current->group_leader; - dma->lock_cap = capable(CAP_IPC_LOCK); + vm_account_init(&dma->vm_account, dma->task, NULL, VM_ACCOUNT_TASK | + (capable(CAP_IPC_LOCK) ? VM_ACCOUNT_BYPASS : 0)); dma->pfn_list = RB_ROOT; @@ -2398,7 +2371,7 @@ static void vfio_iommu_unmap_unpin_reaccount(struct vfio_iommu *iommu) if (!is_invalid_reserved_pfn(vpfn->pfn)) locked++; } - vfio_lock_acct(dma, locked - unlocked, true); + vm_unaccount_pinned(&dma->vm_account, locked - unlocked); } } -- git-series 0.9.1