Re: [PATCH v2 12/29] drm/xe: Add SVM garbage collector

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



On Mon, Dec 16, 2024 at 11:36:20AM +0100, Thomas Hellström wrote:
> On Wed, 2024-12-11 at 11:17 -0800, Matthew Brost wrote:
> > On Tue, Nov 19, 2024 at 03:45:33PM +0100, Thomas Hellström wrote:
> > > On Tue, 2024-10-15 at 20:25 -0700, Matthew Brost wrote:
> > > > Add basic SVM garbage collector which can destroy an SVM range
> > > > upon
> > > > an
> > > > MMU UNMAP event.
> > > > 
> > > > v2:
> > > >  - Flush garbage collector in xe_svm_close
> > > > 
> > > > Signed-off-by: Matthew Brost <matthew.brost@xxxxxxxxx>
> > > > ---
> > > >  drivers/gpu/drm/xe/xe_svm.c      | 87
> > > > +++++++++++++++++++++++++++++++-
> > > >  drivers/gpu/drm/xe/xe_svm.h      |  1 +
> > > >  drivers/gpu/drm/xe/xe_vm.c       |  4 ++
> > > >  drivers/gpu/drm/xe/xe_vm_types.h |  5 ++
> > > >  4 files changed, 95 insertions(+), 2 deletions(-)
> > > > 
> > > > diff --git a/drivers/gpu/drm/xe/xe_svm.c
> > > > b/drivers/gpu/drm/xe/xe_svm.c
> > > > index a9addaea316d..9c2f44cba166 100644
> > > > --- a/drivers/gpu/drm/xe/xe_svm.c
> > > > +++ b/drivers/gpu/drm/xe/xe_svm.c
> > > > @@ -30,6 +30,7 @@ xe_svm_range_alloc(struct drm_gpusvm *gpusvm)
> > > >  	if (!range)
> > > >  		return ERR_PTR(-ENOMEM);
> > > >  
> > > > +	INIT_LIST_HEAD(&range->garbage_collector_link);
> > > >  	xe_vm_get(gpusvm_to_vm(gpusvm));
> > > >  
> > > >  	return &range->base;
> > > > @@ -46,6 +47,24 @@ static struct xe_svm_range *to_xe_range(struct
> > > > drm_gpusvm_range *r)
> > > >  	return container_of(r, struct xe_svm_range, base);
> > > >  }
> > > >  
> > > > +static void
> > > > +xe_svm_garbage_collector_add_range(struct xe_vm *vm, struct
> > > > xe_svm_range *range,
> > > > +				   const struct
> > > > mmu_notifier_range
> > > > *mmu_range)
> > > > +{
> > > > +	struct xe_device *xe = vm->xe;
> > > > +
> > > > +	drm_gpusvm_range_set_unmapped(&range->base, mmu_range);
> > > > +
> > > > +	spin_lock(&vm->svm.garbage_collector.lock);
> > > > +	if (list_empty(&range->garbage_collector_link))
> > > > +		list_add_tail(&range->garbage_collector_link,
> > > > +			      &vm-
> > > > > svm.garbage_collector.range_list);
> > > > +	spin_unlock(&vm->svm.garbage_collector.lock);
> > > > +
> > > > +	queue_work(xe_device_get_root_tile(xe)->primary_gt-
> > > > > usm.pf_wq,
> > > > +		   &vm->svm.garbage_collector.work);
> > > > +}
> > > > +
> > > >  static u8
> > > >  xe_svm_range_notifier_event_begin(struct xe_vm *vm, struct
> > > > drm_gpusvm_range *r,
> > > >  				  const struct
> > > > mmu_notifier_range
> > > > *mmu_range,
> > > > @@ -88,7 +107,9 @@ xe_svm_range_notifier_event_end(struct xe_vm
> > > > *vm,
> > > > struct drm_gpusvm_range *r,
> > > >  	struct drm_gpusvm_ctx ctx = { .in_notifier = true, };
> > > >  
> > > >  	drm_gpusvm_range_unmap_pages(&vm->svm.gpusvm, r, &ctx);
> > > > -	/* TODO: Add range to garbage collector */
> > > > +	if (mmu_range->event == MMU_NOTIFY_UNMAP)
> > > > +		xe_svm_garbage_collector_add_range(vm,
> > > > to_xe_range(r),
> > > > +						   mmu_range);
> > > >  }
> > > >  
> > > >  static void xe_svm_invalidate(struct drm_gpusvm *gpusvm,
> > > > @@ -184,6 +205,58 @@ static void xe_svm_invalidate(struct
> > > > drm_gpusvm
> > > > *gpusvm,
> > > >  		xe_svm_range_notifier_event_end(vm, r,
> > > > mmu_range);
> > > >  }
> > > >  
> > > > +static int __xe_svm_garbage_collector(struct xe_vm *vm,
> > > > +				      struct xe_svm_range
> > > > *range)
> > > > +{
> > > > +	/* TODO: Do unbind */
> > > > +
> > > > +	drm_gpusvm_range_remove(&vm->svm.gpusvm, &range->base);
> > > > +
> > > > +	return 0;
> > > > +}
> > > > +
> > > > +static int xe_svm_garbage_collector(struct xe_vm *vm)
> > > > +{
> > > > +	struct xe_svm_range *range, *next;
> > > > +	int err;
> > > > +
> > > > +	lockdep_assert_held_write(&vm->lock);
> > > > +
> > > > +	if (xe_vm_is_closed_or_banned(vm))
> > > > +		return -ENOENT;
> > > > +
> > > > +	spin_lock(&vm->svm.garbage_collector.lock);
> > > > +	list_for_each_entry_safe(range, next,
> > > > +				 &vm-
> > > > > svm.garbage_collector.range_list,
> > > > +				 garbage_collector_link) {
> > > > +		list_del(&range->garbage_collector_link);
> > > > +		spin_unlock(&vm->svm.garbage_collector.lock);
> > > 
> > > This looks broken, what if someone removed the "next" entry here?
> > > You probably want to use list_next_entry_or_null();
> > > 
> > 
> > Yea, let me fix this loop structure.
> > 
> > > > +
> > > > +		err = __xe_svm_garbage_collector(vm, range);
> > > > +		if (err) {
> > > > +			drm_warn(&vm->xe->drm,
> > > > +				 "Garbage collection failed:
> > > > %d\n",
> > > > err);
> > > > +			xe_vm_kill(vm, true);
> > > > +			return err;
> > > > +		}
> > > > +
> > > > +		spin_lock(&vm->svm.garbage_collector.lock);
> > > > +	}
> > > > +	spin_unlock(&vm->svm.garbage_collector.lock);
> > > > +
> > > > +	return 0;
> > > > +}
> > > > +
> > > > +static void xe_svm_garbage_collector_work_func(struct
> > > > work_struct
> > > > *w)
> > > > +{
> > > > +	struct xe_vm *vm = container_of(w, struct xe_vm,
> > > > +					svm.garbage_collector.wo
> > > > rk);
> > > > +
> > > > +	down_write(&vm->lock);
> > > > +	xe_svm_garbage_collector(vm);
> > > > +	up_write(&vm->lock);
> > > > +}
> > > > +
> > > >  static const struct drm_gpusvm_ops gpusvm_ops = {
> > > >  	.range_alloc = xe_svm_range_alloc,
> > > >  	.range_free = xe_svm_range_free,
> > > > @@ -198,6 +271,11 @@ static const u64 fault_chunk_sizes[] = {
> > > >  
> > > >  int xe_svm_init(struct xe_vm *vm)
> > > >  {
> > > > +	spin_lock_init(&vm->svm.garbage_collector.lock);
> > > > +	INIT_LIST_HEAD(&vm->svm.garbage_collector.range_list);
> > > > +	INIT_WORK(&vm->svm.garbage_collector.work,
> > > > +		  xe_svm_garbage_collector_work_func);
> > > > +
> > > >  	return drm_gpusvm_init(&vm->svm.gpusvm, "Xe SVM", &vm-
> > > > >xe-
> > > > > drm,
> > > >  			       current->mm, NULL, 0, vm->size,
> > > >  			       SZ_512M, &gpusvm_ops,
> > > > fault_chunk_sizes,
> > > > @@ -211,6 +289,8 @@ void xe_svm_close(struct xe_vm *vm)
> > > >  	/* Flush running notifiers making xe_vm_close() visable
> > > > */
> > > >  	xe_svm_notifier_lock(vm);
> > > >  	xe_svm_notifier_unlock(vm);
> > > > +
> > > > +	flush_work(&vm->svm.garbage_collector.work);
> > > >  }
> > > >  
> > > >  void xe_svm_fini(struct xe_vm *vm)
> > > > @@ -241,7 +321,10 @@ int xe_svm_handle_pagefault(struct xe_vm
> > > > *vm,
> > > > struct xe_vma *vma,
> > > >  	lockdep_assert_held_write(&vm->lock);
> > > >  
> > > >  retry:
> > > > -	/* TODO: Run garbage collector */
> > > > +	/* Always process UNMAPs first so view SVM ranges is
> > > > current
> > > > */
> > > > +	err = xe_svm_garbage_collector(vm);
> > > > +	if (err)
> > > > +		return err;
> > > >  
> > > >  	r = drm_gpusvm_range_find_or_insert(&vm->svm.gpusvm,
> > > > fault_addr,
> > > >  					    xe_vma_start(vma),
> > > > xe_vma_end(vma),
> > > > diff --git a/drivers/gpu/drm/xe/xe_svm.h
> > > > b/drivers/gpu/drm/xe/xe_svm.h
> > > > index ee0bd1ae655b..06d90d0f71a6 100644
> > > > --- a/drivers/gpu/drm/xe/xe_svm.h
> > > > +++ b/drivers/gpu/drm/xe/xe_svm.h
> > > > @@ -17,6 +17,7 @@ struct xe_vma;
> > > >  
> > > >  struct xe_svm_range {
> > > >  	struct drm_gpusvm_range base;
> > > > +	struct list_head garbage_collector_link;
> > > >  	u8 tile_present;
> > > >  	u8 tile_invalidated;
> > > >  };
> > > > diff --git a/drivers/gpu/drm/xe/xe_vm.c
> > > > b/drivers/gpu/drm/xe/xe_vm.c
> > > > index 63aa0a25d3b7..399cbbdbddd5 100644
> > > > --- a/drivers/gpu/drm/xe/xe_vm.c
> > > > +++ b/drivers/gpu/drm/xe/xe_vm.c
> > > > @@ -3071,6 +3071,10 @@ int xe_vm_bind_ioctl(struct drm_device
> > > > *dev,
> > > > void *data, struct drm_file *file)
> > > >  		goto put_exec_queue;
> > > >  	}
> > > >  
> > > > +	/* Ensure all UNMAPs visable */
> > > > +	if (xe_vm_in_fault_mode(vm))
> > > > +		flush_work(&vm->svm.garbage_collector.work);
> > > 
> > > Hmm, what is someone added an UNMAP here?
> > > 
> > 
> > What we really trying to guard to against is user space doing
> > something
> > like this:
> > 
> > addr = malloc();
> > gpu access
> > free(addr)
> > bind_bo(addr);
> > 
> > We want to make sure all SVM mappings from the GPU access have
> > processed
> > the UNMAP events from the 'free(addr)'. So I think the code is fine
> > as
> > is - we just want to make sure UNMAP events prior to the IOCTL are
> > processed.
> 
> But the notion of "prior" only exists in the presence of some form of
> synchronization, like a lock. Let's say another thread calls a free
> either
> 
> a) before the flush_work
> b) racing with the flush_work
> c) after the flush_work
> 
> Is there any difference WRT correctness and how do we differentiate?
> 
> I don't think it's clear what this flush_work actually protects
> against.
> 

I still think this is ok.

Let's say we have 2 threads...

- Thread A munmap(address A)	- This address has a SVM GPU binding, we will get an UNMAP notifier
- Thread B address B = mmap()	- This happens to equal to address A
- Thread B bind BO(address B)	- We flush_work which ensure the UNMAP event is processed allow current view SVM state, avoiding bind returning -EBUSY

The key here is it is impossible for address A == B unless an UNMAP
event is queued in the garbage collector unless I'm completely missing
something. This is really the only race we care about - we care that
UNMAP events prior the bind matching the bind address are processed.

If other UNMAP events occur while processing the bind, that is fine as
they shouldn't colliding. Worst case if a collision occurs we'd return
-EBUSY in the bind IOCTL.

Does this make sense?

Matt

> Thanks,
> Thomas
> 
> 
> 
> 
> > 
> > Matt
> > 
> >  
> > > Thanks,
> > > Thomas
> > > 
> > > > +
> > > >  	err = down_write_killable(&vm->lock);
> > > >  	if (err)
> > > >  		goto put_vm;
> > > > diff --git a/drivers/gpu/drm/xe/xe_vm_types.h
> > > > b/drivers/gpu/drm/xe/xe_vm_types.h
> > > > index b736e53779d2..2eae3575c409 100644
> > > > --- a/drivers/gpu/drm/xe/xe_vm_types.h
> > > > +++ b/drivers/gpu/drm/xe/xe_vm_types.h
> > > > @@ -146,6 +146,11 @@ struct xe_vm {
> > > >  	struct {
> > > >  		/** @svm.gpusvm: base GPUSVM used to track fault
> > > > allocations */
> > > >  		struct drm_gpusvm gpusvm;
> > > > +		struct {
> > > > +			spinlock_t lock;
> > > > +			struct list_head range_list;
> > > > +			struct work_struct work;
> > > > +		} garbage_collector;
> > > >  	} svm;
> > > >  
> > > >  	struct xe_device *xe;
> > > 
> 



[Index of Archives]     [Linux DRI Users]     [Linux Intel Graphics]     [Linux USB Devel]     [Video for Linux]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]     [XFree86]     [Linux USB Devel]     [Video for Linux]     [Linux Audio Users]     [Linux Kernel]     [Linux SCSI]     [XFree86]
  Powered by Linux