Re: [PATCH rdma-next v3 09/11] RDMA/efa: Add EFA verbs implementation

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



On Thu, Mar 14, 2019 at 02:56:52PM +0200, Gal Pressman wrote:
> On 14-Mar-19 14:30, Jason Gunthorpe wrote:
> > On Thu, Mar 14, 2019 at 01:45:20PM +0200, Gal Pressman wrote:
> > 
> >> +/*
> >> + * Since we don't track munmaps, we can't know when a user stopped using his
> >> + * mmapped buffers.
> >> + * This should be called on dealloc_ucontext in order to drain the mmap entries
> >> + * and free the (unmapped) DMA buffers.
> >> + */
> >> +static void mmap_entries_remove_free(struct efa_dev *dev,
> >> +				     struct efa_ucontext *ucontext)
> >> +{
> >> +	struct efa_mmap_entry *entry, *tmp;
> >> +
> >> +	mutex_lock(&ucontext->lock);
> >> +	list_for_each_entry_safe(entry, tmp, &ucontext->pending_mmaps, list) {
> >> +		list_del(&entry->list);
> >> +		efa_dbg(&dev->ibdev.dev,
> >> +			"mmap: obj[0x%p] key[%#llx] addr[%#llx] len[%#llx] removed\n",
> >> +			entry->obj, entry->key, entry->address, entry->length);
> >> +		if (get_mmap_flag(entry->key) == EFA_MMAP_DMA_PAGE)
> >> +			/* DMA mapping is already gone, now free the pages */
> >> +			free_pages_exact(phys_to_virt(entry->address),
> >> +					 entry->length);
> >> +		kfree(entry);
> >> +	}
> >> +	mutex_unlock(&ucontext->lock);
> >> +}
> >> +
> >> +static struct efa_mmap_entry *mmap_entry_get(struct efa_dev *dev,
> >> +					     struct efa_ucontext *ucontext,
> >> +					     u64 key,
> >> +					     u64 len)
> >> +{
> >> +	struct efa_mmap_entry *entry, *tmp;
> >> +
> >> +	mutex_lock(&ucontext->lock);
> >> +	list_for_each_entry_safe(entry, tmp, &ucontext->pending_mmaps, list) {
> >> +		if (entry->key == key && entry->length == len) {
> >> +			efa_dbg(&dev->ibdev.dev,
> >> +				"mmap: obj[%p] key[0x%llx] addr[0x%llX] len[0x%llX] removed\n",
> >> +				entry->obj, key, entry->address,
> >> +				entry->length);
> >> +			mutex_unlock(&ucontext->lock);
> >> +			return entry;
> >> +		}
> >> +	}
> >> +	mutex_unlock(&ucontext->lock);
> >> +
> >> +	return NULL;
> >> +}
> >> +
> >> +static void mmap_entry_insert(struct efa_dev *dev,
> >> +			      struct efa_ucontext *ucontext,
> >> +			      struct efa_mmap_entry *entry,
> >> +			      u8 mmap_flag)
> >> +{
> >> +	mutex_lock(&ucontext->lock);
> >> +	entry->key = ucontext->mmap_key;
> >> +	set_mmap_flag(&entry->key, mmap_flag);
> >> +	ucontext->mmap_key += PAGE_SIZE;
> >> +	list_add_tail(&entry->list, &ucontext->pending_mmaps);
> >> +	efa_dbg(&dev->ibdev.dev,
> >> +		"mmap: obj[%p] addr[0x%llx], len[0x%llx], key[0x%llx] inserted\n",
> >> +		entry->obj, entry->address, entry->length, entry->key);
> >> +	mutex_unlock(&ucontext->lock);
> >> +}
> > 
> > This whole thing is just a cyclic allocating xarray..
> 
> I was under the impression that xarray doesn't work well for u64 keys?

This really doesn't need a u64 key, it is just handling some small
number of objects that it controls the number assignment for.

The mmap offset should be '>> PAGE_SHIFT' and checked for < U32_MAX and
just fed into xa_load.

xarray also has a range based approach to handle regions that are > PAGE_SHIFT,
should that come up in future.

Jason




[Index of Archives]     [Linux USB Devel]     [Video for Linux]     [Linux Audio Users]     [Photo]     [Yosemite News]     [Yosemite Photos]     [Linux Kernel]     [Linux SCSI]     [XFree86]

  Powered by Linux