On 27/06/2019 16:58, Michal Kalderon wrote: > +/* > + * Note this locking scheme cannot support removal of entries, except during > + * ucontext destruction when the core code guarentees no concurrency. > + */ > +u64 rdma_user_mmap_entry_insert(struct ib_ucontext *ucontext, void *obj, > + u64 address, u64 length, u8 mmap_flag) > +{ > + struct rdma_user_mmap_entry *entry; > + int err; > + > + entry = kmalloc(sizeof(*entry), GFP_KERNEL); > + if (!entry) > + return RDMA_USER_MMAP_INVALID; > + > + entry->obj = obj; > + entry->address = address; > + entry->length = length; > + entry->mmap_flag = mmap_flag; > + > + xa_lock(&ucontext->mmap_xa); > + entry->mmap_page = ucontext->mmap_xa_page; > + ucontext->mmap_xa_page += DIV_ROUND_UP(length, PAGE_SIZE); Hi Michal, I fixed this part to handle mmap_xa_page overflow: 7a5834e456f7 ("RDMA/efa: Handle mmap insertions overflow") > + err = __xa_insert(&ucontext->mmap_xa, entry->mmap_page, entry, > + GFP_KERNEL); > + xa_unlock(&ucontext->mmap_xa); > + if (err) { > + kfree(entry); > + return RDMA_USER_MMAP_INVALID; > + } > + > + ibdev_dbg(ucontext->device, > + "mmap: obj[0x%p] addr[%#llx], len[%#llx], key[%#llx] inserted\n", > + entry->obj, entry->address, entry->length, > + rdma_user_mmap_get_key(entry)); > + > + return rdma_user_mmap_get_key(entry); > +} > +EXPORT_SYMBOL(rdma_user_mmap_entry_insert);