[PATCH v5 4/5] drm/amdgpu: use bulk moves for efficient VM LRU handling (v5)

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



On 08/22/2018 03:52 PM, Huang Rui wrote:
> I continue to work for bulk moving that based on the proposal by Christian.
>
> Background:
> amdgpu driver will move all PD/PT and PerVM BOs into idle list. Then move all of
> them on the end of LRU list one by one. Thus, that cause so many BOs moved to
> the end of the LRU, and impact performance seriously.
>
> Then Christian provided a workaround to not move PD/PT BOs on LRU with below
> patch:
> Commit 0bbf32026cf5ba41e9922b30e26e1bed1ecd38ae ("drm/amdgpu: band aid
> validating VM PTs")
>
> However, the final solution should bulk move all PD/PT and PerVM BOs on the LRU
> instead of one by one.
>
> Whenever amdgpu_vm_validate_pt_bos() is called and we have BOs which need to be
> validated we move all BOs together to the end of the LRU without dropping the
> lock for the LRU.
>
> While doing so we note the beginning and end of this block in the LRU list.
>
> Now when amdgpu_vm_validate_pt_bos() is called and we don't have anything to do,
> we don't move every BO one by one, but instead cut the LRU list into pieces so
> that we bulk move everything to the end in just one operation.
>
> Test data:
> +--------------+-----------------+-----------+---------------------------------------+
> |              |The Talos        |Clpeak(OCL)|BusSpeedReadback(OCL)                  |
> |              |Principle(Vulkan)|           |                                       |
> +------------------------------------------------------------------------------------+
> |              |                 |           |0.319 ms(1k) 0.314 ms(2K) 0.308 ms(4K) |
> | Original     |  147.7 FPS      |  76.86 us |0.307 ms(8K) 0.310 ms(16K)             |
> +------------------------------------------------------------------------------------+
> | Orignial + WA|                 |           |0.254 ms(1K) 0.241 ms(2K)              |
> |(don't move   |  162.1 FPS      |  42.15 us |0.230 ms(4K) 0.223 ms(8K) 0.204 ms(16K)|
> |PT BOs on LRU)|                 |           |                                       |
> +------------------------------------------------------------------------------------+
> | Bulk move    |  163.1 FPS      |  40.52 us |0.244 ms(1K) 0.252 ms(2K) 0.213 ms(4K) |
> |              |                 |           |0.214 ms(8K) 0.225 ms(16K)             |
> +--------------+-----------------+-----------+---------------------------------------+
>
> After test them with above three benchmarks include vulkan and opencl. We can
> see the visible improvement than original, and even better than original with
> workaround.
>
> v2: move all BOs include idle, relocated, and moved list to the end of LRU and
> put them together.
> v3: remove unused parameter and use list_for_each_entry instead of the one with
> save entry.
> v4: move the amdgpu_vm_move_to_lru_tail after command submission, at that time,
> all bo will be back on idle list.
> v5: remove amdgpu_vm_move_to_lru_tail_by_list(), use bulk_moveable instread of
> validated, and move ttm_bo_bulk_move_lru_tail() also into
> amdgpu_vm_move_to_lru_tail().
>
> Signed-off-by: Christian König <christian.koenig at amd.com>
> Signed-off-by: Huang Rui <ray.huang at amd.com>
> Tested-by: Mike Lothian <mike at fireburn.co.uk>
> Tested-by: Dieter Nützel <Dieter at nuetzel-hh.de>
> Acked-by: Chunming Zhou <david1.zhou at amd.com>
> ---
>   drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 10 ++++++
>   drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 66 +++++++++++++++++++++++-----------
>   drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h | 11 +++++-
>   3 files changed, 65 insertions(+), 22 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
> index 502b94f..4efdbd2 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
> @@ -1260,6 +1260,15 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
>   	return 0;
>   }
>
> +static void amdgpu_cs_vm_move_on_lru(struct amdgpu_device *adev,
> +				     struct amdgpu_cs_parser *p)
> +{
> +	struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
> +	struct amdgpu_vm *vm = &fpriv->vm;
> +
> +	amdgpu_vm_move_to_lru_tail(adev, vm);
> +}
> +
>   int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
>   {
>   	struct amdgpu_device *adev = dev->dev_private;
> @@ -1310,6 +1319,7 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
>
>   	r = amdgpu_cs_submit(&parser, cs);
>
> +	amdgpu_cs_vm_move_on_lru(adev, &parser);

Looks we can call amdgpu_vm_move_to_lru_tail() directly.

>   out:
>   	amdgpu_cs_parser_fini(&parser, r, reserved_buffers);
>   	return r;
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
> index 9c84770..db1f28a 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
> @@ -268,6 +268,47 @@ void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm,
>   }
>
>   /**
> + * amdgpu_vm_move_to_lru_tail - move all BOs to the end of LRU
> + *
> + * @adev: amdgpu device pointer
> + * @vm: vm providing the BOs
> + *
> + * Move all BOs to the end of LRU and remember their positions to put them
> + * together.
> + */
> +void amdgpu_vm_move_to_lru_tail(struct amdgpu_device *adev,
> +				struct amdgpu_vm *vm)
> +{
> +	struct ttm_bo_global *glob = adev->mman.bdev.glob;
> +	struct amdgpu_vm_bo_base *bo_base;
> +
> +	if (vm->bulk_moveable) {
> +		spin_lock(&glob->lru_lock);
> +		ttm_bo_bulk_move_lru_tail(&vm->lru_bulk_move);
> +		spin_unlock(&glob->lru_lock);
> +		return;
> +	}

Question:
Why we handle bulk move in next command submission instead of current cs process?

> +
> +	memset(&vm->lru_bulk_move, 0, sizeof(vm->lru_bulk_move));
> +
> +	spin_lock(&glob->lru_lock);
> +	list_for_each_entry(bo_base, &vm->idle, vm_status) {
> +		struct amdgpu_bo *bo = bo_base->bo;
> +
> +		if (!bo->parent)
> +			continue;
> +
> +		ttm_bo_move_to_lru_tail(&bo->tbo, &vm->lru_bulk_move);
> +		if (bo->shadow)
> +			ttm_bo_move_to_lru_tail(&bo->shadow->tbo,
> +						&vm->lru_bulk_move);
> +	}
> +	spin_unlock(&glob->lru_lock);
> +
> +	vm->bulk_moveable = true;
> +}
> +
> +/**
>    * amdgpu_vm_validate_pt_bos - validate the page table BOs
>    *
>    * @adev: amdgpu device pointer
> @@ -284,10 +325,11 @@ int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
>   			      int (*validate)(void *p, struct amdgpu_bo *bo),
>   			      void *param)
>   {
> -	struct ttm_bo_global *glob = adev->mman.bdev.glob;
>   	struct amdgpu_vm_bo_base *bo_base, *tmp;
>   	int r = 0;
>
> +	vm->bulk_moveable &= list_empty(&vm->evicted);
> +
>   	list_for_each_entry_safe(bo_base, tmp, &vm->evicted, vm_status) {
>   		struct amdgpu_bo *bo = bo_base->bo;
>
> @@ -295,12 +337,6 @@ int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
>   			r = validate(param, bo);
>   			if (r)
>   				break;
> -
> -			spin_lock(&glob->lru_lock);
> -			ttm_bo_move_to_lru_tail(&bo->tbo, NULL);
> -			if (bo->shadow)
> -				ttm_bo_move_to_lru_tail(&bo->shadow->tbo, NULL);
> -			spin_unlock(&glob->lru_lock);
>   		}
>
>   		if (bo->tbo.type != ttm_bo_type_kernel) {
> @@ -312,20 +348,7 @@ int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
>   		}
>   	}
>
> -	spin_lock(&glob->lru_lock);
> -	list_for_each_entry(bo_base, &vm->idle, vm_status) {
> -		struct amdgpu_bo *bo = bo_base->bo;
> -
> -		if (!bo->parent)
> -			continue;
> -
> -		ttm_bo_move_to_lru_tail(&bo->tbo, NULL);
> -		if (bo->shadow)
> -			ttm_bo_move_to_lru_tail(&bo->shadow->tbo, NULL);
> -	}
> -	spin_unlock(&glob->lru_lock);
> -
> -	return r;
> +	return 0;

Will it break from validate() and return r?

>   }
>
>   /**
> @@ -2596,6 +2619,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
>   		return r;
>
>   	vm->pte_support_ats = false;
> +	vm->bulk_moveable = true;
>
>   	if (vm_context == AMDGPU_VM_CONTEXT_COMPUTE) {
>   		vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode &
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
> index 67a15d4..bbdde40 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
> @@ -29,6 +29,7 @@
>   #include <linux/rbtree.h>
>   #include <drm/gpu_scheduler.h>
>   #include <drm/drm_file.h>
> +#include <drm/ttm/ttm_bo_driver.h>
>
>   #include "amdgpu_sync.h"
>   #include "amdgpu_ring.h"
> @@ -226,6 +227,11 @@ struct amdgpu_vm {
>
>   	/* Some basic info about the task */
>   	struct amdgpu_task_info task_info;
> +
> +	/* Store positions of group of BOs */
> +	struct ttm_lru_bulk_move lru_bulk_move;
> +	/* mark whether can do the bulk move */
> +	bool			bulk_moveable;
>   };
>
>   struct amdgpu_vm_manager {
> @@ -330,8 +336,11 @@ bool amdgpu_vm_need_pipeline_sync(struct amdgpu_ring *ring,
>   void amdgpu_vm_check_compute_bug(struct amdgpu_device *adev);
>
>   void amdgpu_vm_get_task_info(struct amdgpu_device *adev, unsigned int pasid,
> -			 struct amdgpu_task_info *task_info);
> +			     struct amdgpu_task_info *task_info);

This change looks not related to bulk move

Regards,
Jerry

>
>   void amdgpu_vm_set_task_info(struct amdgpu_vm *vm);
>
> +void amdgpu_vm_move_to_lru_tail(struct amdgpu_device *adev,
> +				struct amdgpu_vm *vm);
> +
>   #endif
>


[Index of Archives]     [Linux USB Devel]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]

  Powered by Linux