Re: [PATCH v4 29/33] drm/xe: Basic SVM BO eviction

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



On Wed, 2025-01-29 at 11:52 -0800, Matthew Brost wrote:
> Wire xe_bo_move to GPU SVM migration via new helper xe_svm_bo_evict.
> 
> v2:
>  - Use xe_svm_bo_evict
>  - Drop bo->range
> v3:
>  - Kernel doc (Thomas)
> v4:
>  - Add missing xe_bo.c code
> 
> Signed-off-by: Matthew Brost <matthew.brost@xxxxxxxxx>

I think in the long run, we'd want to do the svm eviction / unbind in
move_notify(), since that's where we're supposed to unbind other
subsystems. And then just purge the bo using a NULL placement, but
since this is equivalent let's postpone that to a more general
xe_bo_move() cleanup. It's getting pretty hard to follow.

Reviewed-by: Thomas Hellström <thomas.hellstrom@xxxxxxxxxxxxxxx>


> ---
>  drivers/gpu/drm/xe/xe_bo.c  | 19 +++++++++++++++++++
>  drivers/gpu/drm/xe/xe_svm.c | 15 ++++++++++++++-
>  drivers/gpu/drm/xe/xe_svm.h |  3 +++
>  3 files changed, 36 insertions(+), 1 deletion(-)
> 
> diff --git a/drivers/gpu/drm/xe/xe_bo.c b/drivers/gpu/drm/xe/xe_bo.c
> index 20c96709e267..657687ee70d0 100644
> --- a/drivers/gpu/drm/xe/xe_bo.c
> +++ b/drivers/gpu/drm/xe/xe_bo.c
> @@ -255,6 +255,8 @@ int xe_bo_placement_for_flags(struct xe_device
> *xe, struct xe_bo *bo,
>  static void xe_evict_flags(struct ttm_buffer_object *tbo,
>  			   struct ttm_placement *placement)
>  {
> +	struct xe_bo *bo;
> +
>  	if (!xe_bo_is_xe_bo(tbo)) {
>  		/* Don't handle scatter gather BOs */
>  		if (tbo->type == ttm_bo_type_sg) {
> @@ -266,6 +268,12 @@ static void xe_evict_flags(struct
> ttm_buffer_object *tbo,
>  		return;
>  	}
>  
> +	bo = ttm_to_xe_bo(tbo);
> +	if (bo->flags & XE_BO_FLAG_CPU_ADDR_MIRROR) {
> +		*placement = sys_placement;
> +		return;
> +	}
> +
>  	/*
>  	 * For xe, sg bos that are evicted to system just triggers a
>  	 * rebind of the sg list upon subsequent validation to
> XE_PL_TT.
> @@ -710,6 +718,17 @@ static int xe_bo_move(struct ttm_buffer_object
> *ttm_bo, bool evict,
>  		goto out;
>  	}
>  
> +	if (!move_lacks_source && (bo->flags &
> XE_BO_FLAG_CPU_ADDR_MIRROR) &&
> +	    new_mem->mem_type == XE_PL_SYSTEM) {
> +		ret = xe_svm_bo_evict(bo);
> +		if (!ret) {
> +			drm_dbg(&xe->drm, "Evict system allocator BO
> success\n");
> +			ttm_bo_move_null(ttm_bo, new_mem);
> +		}
> +
> +		goto out;
> +	}
> +
>  	if (old_mem_type == XE_PL_SYSTEM && new_mem->mem_type ==
> XE_PL_TT && !handle_system_ccs) {
>  		ttm_bo_move_null(ttm_bo, new_mem);
>  		goto out;
> diff --git a/drivers/gpu/drm/xe/xe_svm.c
> b/drivers/gpu/drm/xe/xe_svm.c
> index fc030855d078..dafc5061eb42 100644
> --- a/drivers/gpu/drm/xe/xe_svm.c
> +++ b/drivers/gpu/drm/xe/xe_svm.c
> @@ -768,6 +768,20 @@ bool xe_svm_has_mapping(struct xe_vm *vm, u64
> start, u64 end)
>  	return drm_gpusvm_has_mapping(&vm->svm.gpusvm, start, end);
>  }
>  
> +/**
> + * xe_svm_bo_evict() - SVM evict BO to system memory
> + * @bo: BO to evict
> + *
> + * SVM evict BO to system memory. GPU SVM layer ensures all device
> pages
> + * are evicted before returning.
> + *
> + * Return: 0 on success standard error code otherwise
> + */
> +int xe_svm_bo_evict(struct xe_bo *bo)
> +{
> +	return drm_gpusvm_evict_to_ram(&bo->devmem_allocation);
> +}
> +
>  #if IS_ENABLED(CONFIG_DRM_XE_DEVMEM_MIRROR)
>  static struct drm_pagemap_dma_addr
>  xe_drm_pagemap_map_dma(struct drm_pagemap *dpagemap,
> @@ -795,7 +809,6 @@ static const struct drm_pagemap_ops
> xe_drm_pagemap_ops = {
>  	.map_dma = xe_drm_pagemap_map_dma,
>  };
>  
> ->>>>>>> 133db8ade5f0 (drm/xe: Add drm_pagemap ops to SVM)
>  /**
>   * xe_devm_add: Remap and provide memmap backing for device memory
>   * @tile: tile that the memory region belongs to
> diff --git a/drivers/gpu/drm/xe/xe_svm.h
> b/drivers/gpu/drm/xe/xe_svm.h
> index 4c2576162c39..77dec5aae0ee 100644
> --- a/drivers/gpu/drm/xe/xe_svm.h
> +++ b/drivers/gpu/drm/xe/xe_svm.h
> @@ -11,6 +11,7 @@
>  
>  #define XE_INTERCONNECT_VRAM DRM_INTERCONNECT_DRIVER
>  
> +struct xe_bo;
>  struct xe_mem_region;
>  struct xe_tile;
>  struct xe_vm;
> @@ -56,6 +57,8 @@ int xe_svm_handle_pagefault(struct xe_vm *vm,
> struct xe_vma *vma,
>  
>  bool xe_svm_has_mapping(struct xe_vm *vm, u64 start, u64 end);
>  
> +int xe_svm_bo_evict(struct xe_bo *bo);
> +
>  static inline bool xe_svm_range_pages_valid(struct xe_svm_range
> *range)
>  {
>  	return drm_gpusvm_range_pages_valid(range->base.gpusvm,
> &range->base);





[Index of Archives]     [Linux DRI Users]     [Linux Intel Graphics]     [Linux USB Devel]     [Video for Linux]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]     [XFree86]     [Linux USB Devel]     [Video for Linux]     [Linux Audio Users]     [Linux Kernel]     [Linux SCSI]     [XFree86]
  Powered by Linux