On Tue, Jan 24, 2023 at 04:42:34PM +1100, Alistair Popple wrote: > @@ -385,20 +382,16 @@ struct siw_umem *siw_umem_get(u64 start, u64 len, bool writable) > if (!umem) > return ERR_PTR(-ENOMEM); > > - mm_s = current->mm; > - umem->owning_mm = mm_s; > umem->writable = writable; > > - mmgrab(mm_s); > + vm_account_init_current(&umem->vm_account); > > if (writable) > foll_flags |= FOLL_WRITE; > > - mmap_read_lock(mm_s); > + mmap_read_lock(current->mm); > > - mlock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT; > - > - if (num_pages + atomic64_read(&mm_s->pinned_vm) > mlock_limit) { > + if (vm_account_pinned(&umem->vm_account, num_pages)) { > rv = -ENOMEM; > goto out_sem_up; > } > @@ -429,7 +422,6 @@ struct siw_umem *siw_umem_get(u64 start, u64 len, bool writable) > goto out_sem_up; > > umem->num_pages += rv; > - atomic64_add(rv, &mm_s->pinned_vm); Also fixes the race bug Jason