Track locked_vm per dma struct, and create a new subroutine, both for use in a subsequent patch. No functional change. Fixes: c3cbab24db38 ("vfio/type1: implement interfaces to update vaddr") Cc: stable@xxxxxxxxxxxxxxx Signed-off-by: Steve Sistare <steven.sistare@xxxxxxxxxx> --- drivers/vfio/vfio_iommu_type1.c | 20 +++++++++++++++----- 1 file changed, 15 insertions(+), 5 deletions(-) diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c index 71f980b..889e920 100644 --- a/drivers/vfio/vfio_iommu_type1.c +++ b/drivers/vfio/vfio_iommu_type1.c @@ -101,6 +101,7 @@ struct vfio_dma { struct rb_root pfn_list; /* Ex-user pinned pfn list */ unsigned long *bitmap; struct mm_struct *mm; + long locked_vm; }; struct vfio_batch { @@ -413,22 +414,21 @@ static int vfio_iova_put_vfio_pfn(struct vfio_dma *dma, struct vfio_pfn *vpfn) return ret; } -static int vfio_lock_acct(struct vfio_dma *dma, long npage, bool async) +static int task_lock_acct(struct task_struct *task, struct mm_struct *mm, + bool lock_cap, long npage, bool async) { - struct mm_struct *mm; int ret; if (!npage) return 0; - mm = dma->mm; if (async && !mmget_not_zero(mm)) return -ESRCH; /* process exited */ ret = mmap_write_lock_killable(mm); if (!ret) { - ret = __account_locked_vm(mm, abs(npage), npage > 0, dma->task, - dma->lock_cap); + ret = __account_locked_vm(mm, abs(npage), npage > 0, task, + lock_cap); mmap_write_unlock(mm); } @@ -438,6 +438,16 @@ static int vfio_lock_acct(struct vfio_dma *dma, long npage, bool async) return ret; } +static int vfio_lock_acct(struct vfio_dma *dma, long npage, bool async) +{ + int ret; + + ret = task_lock_acct(dma->task, dma->mm, dma->lock_cap, npage, async); + if (!ret) + dma->locked_vm += npage; + return ret; +} + /* * Some mappings aren't backed by a struct page, for example an mmap'd * MMIO range for our own or another device. These use a different -- 1.8.3.1