Re: [PATCH 4/5] drm/amdkfd: use map_queues for hiq on gfx v10 as well

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



On Sat, Jan 11, 2020 at 07:08:42AM +0800, Kuehling, Felix wrote:
> On 2020-01-10 1:37 a.m., Huang Rui wrote:
> > To align with gfx v9, we use the map_queues packet to load hiq MQD.
> >
> > Signed-off-by: Huang Rui <ray.huang@xxxxxxx>
> 
> Please make sure you test this on a GFXv10 GPU.
> 
> Reviewed-by: Felix Kuehling <Felix.Kuehling@xxxxxxx>
> 

I tested both navi14 and fiji card, gfx10 works well. But gfx8 failed with
page fault...

Thanks,
Ray

> 
> > ---
> >   drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c | 82 ++++++++++++++++------
> >   drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c   | 10 ++-
> >   2 files changed, 70 insertions(+), 22 deletions(-)
> >
> > diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c
> > index 61cd707..2a60f73 100644
> > --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c
> > +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c
> > @@ -107,13 +107,13 @@ static void acquire_queue(struct kgd_dev *kgd, uint32_t pipe_id,
> >   	lock_srbm(kgd, mec, pipe, queue_id, 0);
> >   }
> >   
> > -static uint32_t get_queue_mask(struct amdgpu_device *adev,
> > +static uint64_t get_queue_mask(struct amdgpu_device *adev,
> >   			       uint32_t pipe_id, uint32_t queue_id)
> >   {
> > -	unsigned int bit = (pipe_id * adev->gfx.mec.num_queue_per_pipe +
> > -			    queue_id) & 31;
> > +	unsigned int bit = pipe_id * adev->gfx.mec.num_queue_per_pipe +
> > +			queue_id;
> >   
> > -	return ((uint32_t)1) << bit;
> > +	return 1ull << bit;
> >   }
> >   
> >   static void release_queue(struct kgd_dev *kgd)
> > @@ -268,21 +268,6 @@ static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
> >   	pr_debug("Load hqd of pipe %d queue %d\n", pipe_id, queue_id);
> >   	acquire_queue(kgd, pipe_id, queue_id);
> >   
> > -	/* HIQ is set during driver init period with vmid set to 0*/
> > -	if (m->cp_hqd_vmid == 0) {
> > -		uint32_t value, mec, pipe;
> > -
> > -		mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1;
> > -		pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec);
> > -
> > -		pr_debug("kfd: set HIQ, mec:%d, pipe:%d, queue:%d.\n",
> > -			mec, pipe, queue_id);
> > -		value = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_CP_SCHEDULERS));
> > -		value = REG_SET_FIELD(value, RLC_CP_SCHEDULERS, scheduler1,
> > -			((mec << 5) | (pipe << 3) | queue_id | 0x80));
> > -		WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_CP_SCHEDULERS), value);
> > -	}
> > -
> >   	/* HQD registers extend from CP_MQD_BASE_ADDR to CP_HQD_EOP_WPTR_MEM. */
> >   	mqd_hqd = &m->cp_mqd_base_addr_lo;
> >   	hqd_base = SOC15_REG_OFFSET(GC, 0, mmCP_MQD_BASE_ADDR);
> > @@ -332,9 +317,10 @@ static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
> >   		       lower_32_bits((uint64_t)wptr));
> >   		WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR_HI),
> >   		       upper_32_bits((uint64_t)wptr));
> > -		pr_debug("%s setting CP_PQ_WPTR_POLL_CNTL1 to %x\n", __func__, get_queue_mask(adev, pipe_id, queue_id));
> > +		pr_debug("%s setting CP_PQ_WPTR_POLL_CNTL1 to %x\n", __func__,
> > +			 (uint32_t)get_queue_mask(adev, pipe_id, queue_id));
> >   		WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_PQ_WPTR_POLL_CNTL1),
> > -		       get_queue_mask(adev, pipe_id, queue_id));
> > +		       (uint32_t)get_queue_mask(adev, pipe_id, queue_id));
> >   	}
> >   
> >   	/* Start the EOP fetcher */
> > @@ -350,6 +336,59 @@ static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
> >   	return 0;
> >   }
> >   
> > +static int kgd_hiq_mqd_load(struct kgd_dev *kgd, void *mqd,
> > +			    uint32_t pipe_id, uint32_t queue_id,
> > +			    uint32_t doorbell_off)
> > +{
> > +	struct amdgpu_device *adev = get_amdgpu_device(kgd);
> > +	struct amdgpu_ring *kiq_ring = &adev->gfx.kiq.ring;
> > +	struct v10_compute_mqd *m;
> > +	uint32_t mec, pipe;
> > +	int r;
> > +
> > +	m = get_mqd(mqd);
> > +
> > +	acquire_queue(kgd, pipe_id, queue_id);
> > +
> > +	mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1;
> > +	pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec);
> > +
> > +	pr_debug("kfd: set HIQ, mec:%d, pipe:%d, queue:%d.\n",
> > +		 mec, pipe, queue_id);
> > +
> > +	spin_lock(&adev->gfx.kiq.ring_lock);
> > +	r = amdgpu_ring_alloc(kiq_ring, 7);
> > +	if (r) {
> > +		pr_err("Failed to alloc KIQ (%d).\n", r);
> > +		goto out_unlock;
> > +	}
> > +
> > +	amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_MAP_QUEUES, 5));
> > +	amdgpu_ring_write(kiq_ring,
> > +			  PACKET3_MAP_QUEUES_QUEUE_SEL(0) | /* Queue_Sel */
> > +			  PACKET3_MAP_QUEUES_VMID(m->cp_hqd_vmid) | /* VMID */
> > +			  PACKET3_MAP_QUEUES_QUEUE(queue_id) |
> > +			  PACKET3_MAP_QUEUES_PIPE(pipe) |
> > +			  PACKET3_MAP_QUEUES_ME((mec - 1)) |
> > +			  PACKET3_MAP_QUEUES_QUEUE_TYPE(0) | /*queue_type: normal compute queue */
> > +			  PACKET3_MAP_QUEUES_ALLOC_FORMAT(0) | /* alloc format: all_on_one_pipe */
> > +			  PACKET3_MAP_QUEUES_ENGINE_SEL(1) | /* engine_sel: hiq */
> > +			  PACKET3_MAP_QUEUES_NUM_QUEUES(1)); /* num_queues: must be 1 */
> > +	amdgpu_ring_write(kiq_ring,
> > +			  PACKET3_MAP_QUEUES_DOORBELL_OFFSET(doorbell_off));
> > +	amdgpu_ring_write(kiq_ring, m->cp_mqd_base_addr_lo);
> > +	amdgpu_ring_write(kiq_ring, m->cp_mqd_base_addr_hi);
> > +	amdgpu_ring_write(kiq_ring, m->cp_hqd_pq_wptr_poll_addr_lo);
> > +	amdgpu_ring_write(kiq_ring, m->cp_hqd_pq_wptr_poll_addr_hi);
> > +	amdgpu_ring_commit(kiq_ring);
> > +
> > +out_unlock:
> > +	spin_unlock(&adev->gfx.kiq.ring_lock);
> > +	release_queue(kgd);
> > +
> > +	return r;
> > +}
> > +
> >   static int kgd_hqd_dump(struct kgd_dev *kgd,
> >   			uint32_t pipe_id, uint32_t queue_id,
> >   			uint32_t (**dump)[2], uint32_t *n_regs)
> > @@ -817,6 +856,7 @@ const struct kfd2kgd_calls gfx_v10_kfd2kgd = {
> >   	.set_pasid_vmid_mapping = kgd_set_pasid_vmid_mapping,
> >   	.init_interrupts = kgd_init_interrupts,
> >   	.hqd_load = kgd_hqd_load,
> > +	.hiq_mqd_load = kgd_hiq_mqd_load,
> >   	.hqd_sdma_load = kgd_hqd_sdma_load,
> >   	.hqd_dump = kgd_hqd_dump,
> >   	.hqd_sdma_dump = kgd_hqd_sdma_dump,
> > diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c
> > index 7832ec6..d1d68a5 100644
> > --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c
> > +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c
> > @@ -153,6 +153,14 @@ static int load_mqd(struct mqd_manager *mm, void *mqd,
> >   	return r;
> >   }
> >   
> > +static int hiq_load_mqd_kiq(struct mqd_manager *mm, void *mqd,
> > +			    uint32_t pipe_id, uint32_t queue_id,
> > +			    struct queue_properties *p, struct mm_struct *mms)
> > +{
> > +	return mm->dev->kfd2kgd->hiq_mqd_load(mm->dev->kgd, mqd, pipe_id,
> > +					      queue_id, p->doorbell_off);
> > +}
> > +
> >   static void update_mqd(struct mqd_manager *mm, void *mqd,
> >   		      struct queue_properties *q)
> >   {
> > @@ -409,7 +417,7 @@ struct mqd_manager *mqd_manager_init_v10(enum KFD_MQD_TYPE type,
> >   		mqd->allocate_mqd = allocate_hiq_mqd;
> >   		mqd->init_mqd = init_mqd_hiq;
> >   		mqd->free_mqd = free_mqd_hiq_sdma;
> > -		mqd->load_mqd = load_mqd;
> > +		mqd->load_mqd = hiq_load_mqd_kiq;
> >   		mqd->update_mqd = update_mqd;
> >   		mqd->destroy_mqd = destroy_mqd;
> >   		mqd->is_occupied = is_occupied;
_______________________________________________
amd-gfx mailing list
amd-gfx@xxxxxxxxxxxxxxxxxxxxx
https://lists.freedesktop.org/mailman/listinfo/amd-gfx



[Index of Archives]     [Linux USB Devel]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]

  Powered by Linux