[PATCH 1/7] drm/amdgpu: use new scheduler load balancing for VMs

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



Reviewed-by: Chunming Zhou <david1.zhou at amd.com> for series.


Thanks,

David Zhou


On 2018å¹´08æ??01æ?¥ 19:31, Christian König wrote:
> Instead of the fixed round robin use let the scheduler balance the load
> of page table updates.
>
> Signed-off-by: Christian König <christian.koenig at amd.com>
> ---
>   drivers/gpu/drm/amd/amdgpu/amdgpu_device.c |  2 +-
>   drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c     | 12 ++----------
>   drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h     |  7 +++----
>   drivers/gpu/drm/amd/amdgpu/cik_sdma.c      | 12 +++++++-----
>   drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c     | 12 +++++++-----
>   drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c     | 12 +++++++-----
>   drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c     | 12 +++++++-----
>   drivers/gpu/drm/amd/amdgpu/si_dma.c        | 12 +++++++-----
>   8 files changed, 41 insertions(+), 40 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
> index 745f760b8df9..971ab128f277 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
> @@ -2335,7 +2335,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
>   	adev->mman.buffer_funcs = NULL;
>   	adev->mman.buffer_funcs_ring = NULL;
>   	adev->vm_manager.vm_pte_funcs = NULL;
> -	adev->vm_manager.vm_pte_num_rings = 0;
> +	adev->vm_manager.vm_pte_num_rqs = 0;
>   	adev->gmc.gmc_funcs = NULL;
>   	adev->fence_context = dma_fence_context_alloc(AMDGPU_MAX_RINGS);
>   	bitmap_zero(adev->gfx.pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
> index 015613b4f98b..662e8a34d52c 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
> @@ -2568,9 +2568,6 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
>   	struct amdgpu_bo *root;
>   	const unsigned align = min(AMDGPU_VM_PTB_ALIGN_SIZE,
>   		AMDGPU_VM_PTE_COUNT(adev) * 8);
> -	unsigned ring_instance;
> -	struct amdgpu_ring *ring;
> -	struct drm_sched_rq *rq;
>   	unsigned long size;
>   	uint64_t flags;
>   	int r, i;
> @@ -2586,12 +2583,8 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
>   	INIT_LIST_HEAD(&vm->freed);
>   
>   	/* create scheduler entity for page table updates */
> -
> -	ring_instance = atomic_inc_return(&adev->vm_manager.vm_pte_next_ring);
> -	ring_instance %= adev->vm_manager.vm_pte_num_rings;
> -	ring = adev->vm_manager.vm_pte_rings[ring_instance];
> -	rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_KERNEL];
> -	r = drm_sched_entity_init(&vm->entity, &rq, 1, NULL);
> +	r = drm_sched_entity_init(&vm->entity, adev->vm_manager.vm_pte_rqs,
> +				  adev->vm_manager.vm_pte_num_rqs, NULL);
>   	if (r)
>   		return r;
>   
> @@ -2898,7 +2891,6 @@ void amdgpu_vm_manager_init(struct amdgpu_device *adev)
>   	for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
>   		adev->vm_manager.seqno[i] = 0;
>   
> -	atomic_set(&adev->vm_manager.vm_pte_next_ring, 0);
>   	spin_lock_init(&adev->vm_manager.prt_lock);
>   	atomic_set(&adev->vm_manager.num_prt_users, 0);
>   
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
> index 67a15d439ac0..034f8c399c2d 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
> @@ -244,10 +244,9 @@ struct amdgpu_vm_manager {
>   	/* vram base address for page table entry  */
>   	u64					vram_base_offset;
>   	/* vm pte handling */
> -	const struct amdgpu_vm_pte_funcs        *vm_pte_funcs;
> -	struct amdgpu_ring                      *vm_pte_rings[AMDGPU_MAX_RINGS];
> -	unsigned				vm_pte_num_rings;
> -	atomic_t				vm_pte_next_ring;
> +	const struct amdgpu_vm_pte_funcs	*vm_pte_funcs;
> +	struct drm_sched_rq			*vm_pte_rqs[AMDGPU_MAX_RINGS];
> +	unsigned				vm_pte_num_rqs;
>   
>   	/* partial resident texture handling */
>   	spinlock_t				prt_lock;
> diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
> index d0fa2aac2388..154b1499b07e 100644
> --- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
> +++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
> @@ -1386,15 +1386,17 @@ static const struct amdgpu_vm_pte_funcs cik_sdma_vm_pte_funcs = {
>   
>   static void cik_sdma_set_vm_pte_funcs(struct amdgpu_device *adev)
>   {
> +	struct drm_gpu_scheduler *sched;
>   	unsigned i;
>   
>   	if (adev->vm_manager.vm_pte_funcs == NULL) {
>   		adev->vm_manager.vm_pte_funcs = &cik_sdma_vm_pte_funcs;
> -		for (i = 0; i < adev->sdma.num_instances; i++)
> -			adev->vm_manager.vm_pte_rings[i] =
> -				&adev->sdma.instance[i].ring;
> -
> -		adev->vm_manager.vm_pte_num_rings = adev->sdma.num_instances;
> +		for (i = 0; i < adev->sdma.num_instances; i++) {
> +			sched = &adev->sdma.instance[i].ring.sched;
> +			adev->vm_manager.vm_pte_rqs[i] =
> +				&sched->sched_rq[DRM_SCHED_PRIORITY_KERNEL];
> +		}
> +		adev->vm_manager.vm_pte_num_rqs = adev->sdma.num_instances;
>   	}
>   }
>   
> diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
> index 15ae4bc9c072..c403bdf8ad70 100644
> --- a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
> +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
> @@ -1312,15 +1312,17 @@ static const struct amdgpu_vm_pte_funcs sdma_v2_4_vm_pte_funcs = {
>   
>   static void sdma_v2_4_set_vm_pte_funcs(struct amdgpu_device *adev)
>   {
> +	struct drm_gpu_scheduler *sched;
>   	unsigned i;
>   
>   	if (adev->vm_manager.vm_pte_funcs == NULL) {
>   		adev->vm_manager.vm_pte_funcs = &sdma_v2_4_vm_pte_funcs;
> -		for (i = 0; i < adev->sdma.num_instances; i++)
> -			adev->vm_manager.vm_pte_rings[i] =
> -				&adev->sdma.instance[i].ring;
> -
> -		adev->vm_manager.vm_pte_num_rings = adev->sdma.num_instances;
> +		for (i = 0; i < adev->sdma.num_instances; i++) {
> +			sched = &adev->sdma.instance[i].ring.sched;
> +			adev->vm_manager.vm_pte_rqs[i] =
> +				&sched->sched_rq[DRM_SCHED_PRIORITY_KERNEL];
> +		}
> +		adev->vm_manager.vm_pte_num_rqs = adev->sdma.num_instances;
>   	}
>   }
>   
> diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
> index 1e07ff274d73..2677d6a1bf42 100644
> --- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
> +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
> @@ -1752,15 +1752,17 @@ static const struct amdgpu_vm_pte_funcs sdma_v3_0_vm_pte_funcs = {
>   
>   static void sdma_v3_0_set_vm_pte_funcs(struct amdgpu_device *adev)
>   {
> +	struct drm_gpu_scheduler *sched;
>   	unsigned i;
>   
>   	if (adev->vm_manager.vm_pte_funcs == NULL) {
>   		adev->vm_manager.vm_pte_funcs = &sdma_v3_0_vm_pte_funcs;
> -		for (i = 0; i < adev->sdma.num_instances; i++)
> -			adev->vm_manager.vm_pte_rings[i] =
> -				&adev->sdma.instance[i].ring;
> -
> -		adev->vm_manager.vm_pte_num_rings = adev->sdma.num_instances;
> +		for (i = 0; i < adev->sdma.num_instances; i++) {
> +			sched = &adev->sdma.instance[i].ring.sched;
> +			adev->vm_manager.vm_pte_rqs[i] =
> +				&sched->sched_rq[DRM_SCHED_PRIORITY_KERNEL];
> +		}
> +		adev->vm_manager.vm_pte_num_rqs = adev->sdma.num_instances;
>   	}
>   }
>   
> diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
> index e7ca4623cfb9..65883bb5f17b 100644
> --- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
> +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
> @@ -1750,15 +1750,17 @@ static const struct amdgpu_vm_pte_funcs sdma_v4_0_vm_pte_funcs = {
>   
>   static void sdma_v4_0_set_vm_pte_funcs(struct amdgpu_device *adev)
>   {
> +	struct drm_gpu_scheduler *sched;
>   	unsigned i;
>   
>   	if (adev->vm_manager.vm_pte_funcs == NULL) {
>   		adev->vm_manager.vm_pte_funcs = &sdma_v4_0_vm_pte_funcs;
> -		for (i = 0; i < adev->sdma.num_instances; i++)
> -			adev->vm_manager.vm_pte_rings[i] =
> -				&adev->sdma.instance[i].ring;
> -
> -		adev->vm_manager.vm_pte_num_rings = adev->sdma.num_instances;
> +		for (i = 0; i < adev->sdma.num_instances; i++) {
> +			sched = &adev->sdma.instance[i].ring.sched;
> +			adev->vm_manager.vm_pte_rqs[i] =
> +				&sched->sched_rq[DRM_SCHED_PRIORITY_KERNEL];
> +		}
> +		adev->vm_manager.vm_pte_num_rqs = adev->sdma.num_instances;
>   	}
>   }
>   
> diff --git a/drivers/gpu/drm/amd/amdgpu/si_dma.c b/drivers/gpu/drm/amd/amdgpu/si_dma.c
> index b75d901ba3c4..fafaf259b17b 100644
> --- a/drivers/gpu/drm/amd/amdgpu/si_dma.c
> +++ b/drivers/gpu/drm/amd/amdgpu/si_dma.c
> @@ -879,15 +879,17 @@ static const struct amdgpu_vm_pte_funcs si_dma_vm_pte_funcs = {
>   
>   static void si_dma_set_vm_pte_funcs(struct amdgpu_device *adev)
>   {
> +	struct drm_gpu_scheduler *sched;
>   	unsigned i;
>   
>   	if (adev->vm_manager.vm_pte_funcs == NULL) {
>   		adev->vm_manager.vm_pte_funcs = &si_dma_vm_pte_funcs;
> -		for (i = 0; i < adev->sdma.num_instances; i++)
> -			adev->vm_manager.vm_pte_rings[i] =
> -				&adev->sdma.instance[i].ring;
> -
> -		adev->vm_manager.vm_pte_num_rings = adev->sdma.num_instances;
> +		for (i = 0; i < adev->sdma.num_instances; i++) {
> +			sched = &adev->sdma.instance[i].ring.sched;
> +			adev->vm_manager.vm_pte_rqs[i] =
> +				&sched->sched_rq[DRM_SCHED_PRIORITY_KERNEL];
> +		}
> +		adev->vm_manager.vm_pte_num_rqs = adev->sdma.num_instances;
>   	}
>   }
>   



[Index of Archives]     [Linux USB Devel]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]

  Powered by Linux