From: Jack Xiao <Jack.Xiao@xxxxxxx> Enable KIQ to map MES ring: 1). add MES queue mapping support in MAP_QUEUES packet. 2). use correct MQD settings for MES queue. Signed-off-by: Jack Xiao <Jack.Xiao@xxxxxxx> Reviewed-by: Hawking Zhang <Hawking.Zhang@xxxxxxx> Signed-off-by: Alex Deucher <alexander.deucher@xxxxxxx> --- drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c | 16 +++- drivers/gpu/drm/amd/amdgpu/mes_v10_1.c | 106 ++++++++++--------------- 2 files changed, 55 insertions(+), 67 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c index 3c4f2a94ad9f..fc289ee54a47 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c @@ -3525,7 +3525,21 @@ static void gfx10_kiq_map_queues(struct amdgpu_ring *kiq_ring, { uint64_t mqd_addr = amdgpu_bo_gpu_offset(ring->mqd_obj); uint64_t wptr_addr = ring->wptr_gpu_addr; - uint32_t eng_sel = ring->funcs->type == AMDGPU_RING_TYPE_GFX ? 4 : 0; + uint32_t eng_sel = 0; + + switch (ring->funcs->type) { + case AMDGPU_RING_TYPE_COMPUTE: + eng_sel = 0; + break; + case AMDGPU_RING_TYPE_GFX: + eng_sel = 4; + break; + case AMDGPU_RING_TYPE_MES: + eng_sel = 5; + break; + default: + WARN_ON(1); + } amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_MAP_QUEUES, 5)); /* Q_sel:0, vmid:0, vidmem: 1, engine:0, num_Q:1*/ diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v10_1.c b/drivers/gpu/drm/amd/amdgpu/mes_v10_1.c index 030a92b3a0da..18a129f36215 100644 --- a/drivers/gpu/drm/amd/amdgpu/mes_v10_1.c +++ b/drivers/gpu/drm/amd/amdgpu/mes_v10_1.c @@ -28,6 +28,7 @@ #include "nv.h" #include "gc/gc_10_1_0_offset.h" #include "gc/gc_10_1_0_sh_mask.h" +#include "gc/gc_10_1_0_default.h" #include "v10_structs.h" #include "mes_api_def.h" @@ -529,7 +530,7 @@ static void mes_v10_1_enable(struct amdgpu_device *adev, bool enable) data = REG_SET_FIELD(data, CP_MES_CNTL, MES_PIPE1_ACTIVE, adev->enable_mes_kiq ? 1 : 0); WREG32_SOC15(GC, 0, mmCP_MES_CNTL, data); - udelay(50); + udelay(100); } else { data = RREG32_SOC15(GC, 0, mmCP_MES_CNTL); data = REG_SET_FIELD(data, CP_MES_CNTL, MES_PIPE0_ACTIVE, 0); @@ -665,7 +666,6 @@ static int mes_v10_1_allocate_eop_buf(struct amdgpu_device *adev, static int mes_v10_1_mqd_init(struct amdgpu_ring *ring) { - struct amdgpu_device *adev = ring->adev; struct v10_compute_mqd *mqd = ring->mqd_ptr; uint64_t hqd_gpu_addr, wb_gpu_addr, eop_base_addr; uint32_t tmp; @@ -679,38 +679,18 @@ static int mes_v10_1_mqd_init(struct amdgpu_ring *ring) mqd->compute_misc_reserved = 0x00000003; eop_base_addr = ring->eop_gpu_addr >> 8; - mqd->cp_hqd_eop_base_addr_lo = eop_base_addr; - mqd->cp_hqd_eop_base_addr_hi = upper_32_bits(eop_base_addr); /* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */ - tmp = RREG32_SOC15(GC, 0, mmCP_HQD_EOP_CONTROL); + tmp = mmCP_HQD_EOP_CONTROL_DEFAULT; tmp = REG_SET_FIELD(tmp, CP_HQD_EOP_CONTROL, EOP_SIZE, (order_base_2(MES_EOP_SIZE / 4) - 1)); + mqd->cp_hqd_eop_base_addr_lo = lower_32_bits(eop_base_addr); + mqd->cp_hqd_eop_base_addr_hi = upper_32_bits(eop_base_addr); mqd->cp_hqd_eop_control = tmp; - /* enable doorbell? */ - tmp = RREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL); - - if (ring->use_doorbell) { - tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, - DOORBELL_OFFSET, ring->doorbell_index); - tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, - DOORBELL_EN, 1); - tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, - DOORBELL_SOURCE, 0); - tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, - DOORBELL_HIT, 0); - } - else - tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, - DOORBELL_EN, 0); - - mqd->cp_hqd_pq_doorbell_control = tmp; - /* disable the queue if it's active */ ring->wptr = 0; - mqd->cp_hqd_dequeue_request = 0; mqd->cp_hqd_pq_rptr = 0; mqd->cp_hqd_pq_wptr_lo = 0; mqd->cp_hqd_pq_wptr_hi = 0; @@ -720,17 +700,28 @@ static int mes_v10_1_mqd_init(struct amdgpu_ring *ring) mqd->cp_mqd_base_addr_hi = upper_32_bits(ring->mqd_gpu_addr); /* set MQD vmid to 0 */ - tmp = RREG32_SOC15(GC, 0, mmCP_MQD_CONTROL); + tmp = mmCP_MQD_CONTROL_DEFAULT; tmp = REG_SET_FIELD(tmp, CP_MQD_CONTROL, VMID, 0); mqd->cp_mqd_control = tmp; /* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */ hqd_gpu_addr = ring->gpu_addr >> 8; - mqd->cp_hqd_pq_base_lo = hqd_gpu_addr; + mqd->cp_hqd_pq_base_lo = lower_32_bits(hqd_gpu_addr); mqd->cp_hqd_pq_base_hi = upper_32_bits(hqd_gpu_addr); + /* set the wb address whether it's enabled or not */ + wb_gpu_addr = ring->rptr_gpu_addr; + mqd->cp_hqd_pq_rptr_report_addr_lo = wb_gpu_addr & 0xfffffffc; + mqd->cp_hqd_pq_rptr_report_addr_hi = + upper_32_bits(wb_gpu_addr) & 0xffff; + + /* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */ + wb_gpu_addr = ring->wptr_gpu_addr; + mqd->cp_hqd_pq_wptr_poll_addr_lo = wb_gpu_addr & 0xfffffff8; + mqd->cp_hqd_pq_wptr_poll_addr_hi = upper_32_bits(wb_gpu_addr) & 0xffff; + /* set up the HQD, this is similar to CP_RB0_CNTL */ - tmp = RREG32_SOC15(GC, 0, mmCP_HQD_PQ_CONTROL); + tmp = mmCP_HQD_PQ_CONTROL_DEFAULT; tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, QUEUE_SIZE, (order_base_2(ring->ring_size / 4) - 1)); tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, RPTR_BLOCK_SIZE, @@ -738,30 +729,18 @@ static int mes_v10_1_mqd_init(struct amdgpu_ring *ring) #ifdef __BIG_ENDIAN tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, ENDIAN_SWAP, 1); #endif - tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, UNORD_DISPATCH, 0); + tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, UNORD_DISPATCH, 1); tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, TUNNEL_DISPATCH, 0); tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, PRIV_STATE, 1); tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, KMD_QUEUE, 1); + tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, NO_UPDATE_RPTR, 1); mqd->cp_hqd_pq_control = tmp; - /* set the wb address whether it's enabled or not */ - wb_gpu_addr = ring->rptr_gpu_addr; - mqd->cp_hqd_pq_rptr_report_addr_lo = wb_gpu_addr & 0xfffffffc; - mqd->cp_hqd_pq_rptr_report_addr_hi = - upper_32_bits(wb_gpu_addr) & 0xffff; - - /* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */ - wb_gpu_addr = ring->wptr_gpu_addr; - mqd->cp_hqd_pq_wptr_poll_addr_lo = wb_gpu_addr & 0xfffffff8; - mqd->cp_hqd_pq_wptr_poll_addr_hi = upper_32_bits(wb_gpu_addr) & 0xffff; - + /* enable doorbell? */ tmp = 0; - /* enable the doorbell if requested */ if (ring->use_doorbell) { - tmp = RREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL); tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, - DOORBELL_OFFSET, ring->doorbell_index); - + DOORBELL_OFFSET, ring->doorbell_index); tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, DOORBELL_EN, 1); tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, @@ -769,30 +748,28 @@ static int mes_v10_1_mqd_init(struct amdgpu_ring *ring) tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, DOORBELL_HIT, 0); } - + else + tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, + DOORBELL_EN, 0); mqd->cp_hqd_pq_doorbell_control = tmp; - /* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */ - ring->wptr = 0; - mqd->cp_hqd_pq_rptr = RREG32_SOC15(GC, 0, mmCP_HQD_PQ_RPTR); - - /* set the vmid for the queue */ mqd->cp_hqd_vmid = 0; - - tmp = RREG32_SOC15(GC, 0, mmCP_HQD_PERSISTENT_STATE); - tmp = REG_SET_FIELD(tmp, CP_HQD_PERSISTENT_STATE, PRELOAD_SIZE, 0x53); - mqd->cp_hqd_persistent_state = tmp; - - /* set MIN_IB_AVAIL_SIZE */ - tmp = RREG32_SOC15(GC, 0, mmCP_HQD_IB_CONTROL); - tmp = REG_SET_FIELD(tmp, CP_HQD_IB_CONTROL, MIN_IB_AVAIL_SIZE, 3); - mqd->cp_hqd_ib_control = tmp; - /* activate the queue */ mqd->cp_hqd_active = 1; + mqd->cp_hqd_persistent_state = mmCP_HQD_PERSISTENT_STATE_DEFAULT; + mqd->cp_hqd_ib_control = mmCP_HQD_IB_CONTROL_DEFAULT; + mqd->cp_hqd_iq_timer = mmCP_HQD_IQ_TIMER_DEFAULT; + mqd->cp_hqd_quantum = mmCP_HQD_QUANTUM_DEFAULT; + + tmp = mmCP_HQD_GFX_CONTROL_DEFAULT; + tmp = REG_SET_FIELD(tmp, CP_HQD_GFX_CONTROL, DB_UPDATED_MSG_EN, 1); + /* offset: 184 - this is used for CP_HQD_GFX_CONTROL */ + mqd->cp_hqd_suspend_cntl_stack_offset = tmp; + return 0; } +#if 0 static void mes_v10_1_queue_init_register(struct amdgpu_ring *ring) { struct v10_compute_mqd *mqd = ring->mqd_ptr; @@ -854,8 +831,8 @@ static void mes_v10_1_queue_init_register(struct amdgpu_ring *ring) nv_grbm_select(adev, 0, 0, 0, 0); mutex_unlock(&adev->srbm_mutex); } +#endif -#if 0 static int mes_v10_1_kiq_enable_queue(struct amdgpu_device *adev) { struct amdgpu_kiq *kiq = &adev->gfx.kiq; @@ -878,9 +855,9 @@ static int mes_v10_1_kiq_enable_queue(struct amdgpu_device *adev) DRM_ERROR("kfq enable failed\n"); kiq_ring->sched.ready = false; } + return r; } -#endif static int mes_v10_1_queue_init(struct amdgpu_device *adev) { @@ -890,13 +867,9 @@ static int mes_v10_1_queue_init(struct amdgpu_device *adev) if (r) return r; -#if 0 r = mes_v10_1_kiq_enable_queue(adev); if (r) return r; -#else - mes_v10_1_queue_init_register(&adev->mes.ring); -#endif return 0; } @@ -972,6 +945,7 @@ static int mes_v10_1_mqd_sw_init(struct amdgpu_device *adev, dev_warn(adev->dev, "failed to create ring mqd bo (%d)", r); return r; } + memset(ring->mqd_ptr, 0, mqd_size); /* prepare MQD backup */ adev->mes.mqd_backup[pipe] = kmalloc(mqd_size, GFP_KERNEL); -- 2.35.1