Re: [PATCH v4 11/33] drm/xe: Nuke VM's mapping upon close

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



On Wed, 2025-01-29 at 11:51 -0800, Matthew Brost wrote:
> Clear root PT entry and invalidate entire VM's address space when
> closing the VM. Will prevent the GPU from accessing any of the VM's
> memory after closing.
> 
> v2:
>  - s/vma/vm in kernel doc (CI)
>  - Don't nuke migration VM as this occur at driver unload (CI)
> v3:
>  - Rebase and pull into SVM series (Thomas)
>  - Wait for pending binds (Thomas)
> 
> Signed-off-by: Matthew Brost <matthew.brost@xxxxxxxxx>
> ---
>  drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c | 24
> +++++++++++++++++++++
>  drivers/gpu/drm/xe/xe_gt_tlb_invalidation.h |  2 ++
>  drivers/gpu/drm/xe/xe_pt.c                  | 14 ++++++++++++
>  drivers/gpu/drm/xe/xe_pt.h                  |  3 +++
>  drivers/gpu/drm/xe/xe_vm.c                  | 22 +++++++++++++++++++
>  5 files changed, 65 insertions(+)
> 
> diff --git a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c
> b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c
> index 0a93831c0a02..1ef21ed01d1b 100644
> --- a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c
> +++ b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c
> @@ -410,6 +410,30 @@ int xe_gt_tlb_invalidation_range(struct xe_gt
> *gt,
>  	return send_tlb_invalidation(&gt->uc.guc, fence, action,
> len);
>  }
>  
> +/**
> + * xe_gt_tlb_invalidation_vm - Issue a TLB invalidation on this GT
> for a VM
> + * @gt: graphics tile
> + * @vm: VM to invalidate
> + *
> + * Invalidate entire VM's address space
> + */
> +void xe_gt_tlb_invalidation_vm(struct xe_gt *gt, struct xe_vm *vm)
> +{
> +	struct xe_gt_tlb_invalidation_fence fence;
> +	u64 range = 1ull << vm->xe->info.va_bits;
> +	int ret;
> +
> +	xe_gt_tlb_invalidation_fence_init(gt, &fence, true);
> +
> +	ret = xe_gt_tlb_invalidation_range(gt, &fence, 0, range, vm-
> >usm.asid);
> +	if (ret < 0) {
> +		xe_gt_tlb_invalidation_fence_fini(&fence);
> +		return;
> +	}
> +
> +	xe_gt_tlb_invalidation_fence_wait(&fence);
> +}
> +
>  /**
>   * xe_gt_tlb_invalidation_vma - Issue a TLB invalidation on this GT
> for a VMA
>   * @gt: GT structure
> diff --git a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.h
> b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.h
> index 672acfcdf0d7..abe9b03d543e 100644
> --- a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.h
> +++ b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.h
> @@ -12,6 +12,7 @@
>  
>  struct xe_gt;
>  struct xe_guc;
> +struct xe_vm;
>  struct xe_vma;
>  
>  int xe_gt_tlb_invalidation_init_early(struct xe_gt *gt);
> @@ -21,6 +22,7 @@ int xe_gt_tlb_invalidation_ggtt(struct xe_gt *gt);
>  int xe_gt_tlb_invalidation_vma(struct xe_gt *gt,
>  			       struct xe_gt_tlb_invalidation_fence
> *fence,
>  			       struct xe_vma *vma);
> +void xe_gt_tlb_invalidation_vm(struct xe_gt *gt, struct xe_vm *vm);
>  int xe_gt_tlb_invalidation_range(struct xe_gt *gt,
>  				 struct xe_gt_tlb_invalidation_fence
> *fence,
>  				 u64 start, u64 end, u32 asid);
> diff --git a/drivers/gpu/drm/xe/xe_pt.c b/drivers/gpu/drm/xe/xe_pt.c
> index 99b97bf37c05..c5060011ad43 100644
> --- a/drivers/gpu/drm/xe/xe_pt.c
> +++ b/drivers/gpu/drm/xe/xe_pt.c
> @@ -214,6 +214,20 @@ void xe_pt_destroy(struct xe_pt *pt, u32 flags,
> struct llist_head *deferred)
>  	xe_pt_free(pt);
>  }
>  
> +/**
> + * xe_pt_clear() - Clear a page-table.
> + * @xe: xe device.
> + * @pt: The page-table.
> + *
> + * Clears page-table by setting to zero.
> + */
> +void xe_pt_clear(struct xe_device *xe, struct xe_pt *pt)
> +{
> +	struct iosys_map *map = &pt->bo->vmap;
> +
> +	xe_map_memset(xe, map, 0, 0, SZ_4K);
> +}
> +
>  /**
>   * DOC: Pagetable building
>   *
> diff --git a/drivers/gpu/drm/xe/xe_pt.h b/drivers/gpu/drm/xe/xe_pt.h
> index 9ab386431cad..8e43912ae8e9 100644
> --- a/drivers/gpu/drm/xe/xe_pt.h
> +++ b/drivers/gpu/drm/xe/xe_pt.h
> @@ -13,6 +13,7 @@ struct dma_fence;
>  struct xe_bo;
>  struct xe_device;
>  struct xe_exec_queue;
> +struct xe_svm_range;
>  struct xe_sync_entry;
>  struct xe_tile;
>  struct xe_vm;
> @@ -35,6 +36,8 @@ void xe_pt_populate_empty(struct xe_tile *tile,
> struct xe_vm *vm,
>  
>  void xe_pt_destroy(struct xe_pt *pt, u32 flags, struct llist_head
> *deferred);
>  
> +void xe_pt_clear(struct xe_device *xe, struct xe_pt *pt);
> +
>  int xe_pt_update_ops_prepare(struct xe_tile *tile, struct xe_vma_ops
> *vops);
>  struct dma_fence *xe_pt_update_ops_run(struct xe_tile *tile,
>  				       struct xe_vma_ops *vops);
> diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
> index bc34e6738c8c..82026c5a154d 100644
> --- a/drivers/gpu/drm/xe/xe_vm.c
> +++ b/drivers/gpu/drm/xe/xe_vm.c
> @@ -1537,8 +1537,30 @@ struct xe_vm *xe_vm_create(struct xe_device
> *xe, u32 flags)
>  
>  static void xe_vm_close(struct xe_vm *vm)
>  {
> +	bool migration = (vm->flags & XE_VM_FLAG_MIGRATION);

Do we need a separate bool here? Only used in one place AFAICT.

Otherwise,
Reviewed-by: Thomas Hellström <thomas.hellstrom@xxxxxxxxxxxxxxx>


> +
>  	down_write(&vm->lock);
> +
>  	vm->size = 0;
> +
> +	if (!migration) {
> +		struct xe_tile *tile;
> +		struct xe_gt *gt;
> +		u8 id;
> +
> +		/* Wait for pending binds */
> +		dma_resv_wait_timeout(xe_vm_resv(vm),
> +				      DMA_RESV_USAGE_BOOKKEEP,
> +				      false, MAX_SCHEDULE_TIMEOUT);
> +
> +		for_each_tile(tile, vm->xe, id)
> +			if (vm->pt_root[id])
> +				xe_pt_clear(vm->xe, vm-
> >pt_root[id]);
> +
> +		for_each_gt(gt, vm->xe, id)
> +			xe_gt_tlb_invalidation_vm(gt, vm);
> +	}
> +
>  	up_write(&vm->lock);
>  }
>  





[Index of Archives]     [Linux DRI Users]     [Linux Intel Graphics]     [Linux USB Devel]     [Video for Linux]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]     [XFree86]     [Linux USB Devel]     [Video for Linux]     [Linux Audio Users]     [Linux Kernel]     [Linux SCSI]     [XFree86]
  Powered by Linux