From: Boyuan Zhang <boyuan.zhang@xxxxxxx> For vcn 4_0_3, add ip_block for each vcn instance during discovery stage. And only powering on/off one of the vcn instance using the instance value stored in ip_block, instead of powering on/off all vcn instances. Modify the existing functions to use the instance value in ip_block, and remove the original for loop for all vcn instances. v2: rename "i"/"j" to "inst" for instance value. Signed-off-by: Boyuan Zhang <boyuan.zhang@xxxxxxx> Reviewed-by: Christian König <christian.koenig@xxxxxxx> --- drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c | 3 +- drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c | 462 +++++++++--------- 2 files changed, 228 insertions(+), 237 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c index aaa759765dba..ee10a9218df7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c @@ -2352,7 +2352,8 @@ static int amdgpu_discovery_set_mm_ip_blocks(struct amdgpu_device *adev) amdgpu_device_ip_block_add(adev, &jpeg_v4_0_ip_block); break; case IP_VERSION(4, 0, 3): - amdgpu_device_ip_block_add(adev, &vcn_v4_0_3_ip_block); + for (i = 0; i < adev->vcn.num_vcn_inst; ++i) + amdgpu_device_ip_block_add(adev, &vcn_v4_0_3_ip_block); amdgpu_device_ip_block_add(adev, &jpeg_v4_0_3_ip_block); break; case IP_VERSION(4, 0, 5): diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c index 8e7d7318cf58..db6f8d424777 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c @@ -127,7 +127,7 @@ static int vcn_v4_0_3_sw_init(struct amdgpu_ip_block *ip_block) { struct amdgpu_device *adev = ip_block->adev; struct amdgpu_ring *ring; - int i, r, vcn_inst; + int inst = ip_block->instance, r, vcn_inst; uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_4_0_3); uint32_t *ptr; @@ -147,38 +147,36 @@ static int vcn_v4_0_3_sw_init(struct amdgpu_ip_block *ip_block) if (r) return r; - for (i = 0; i < adev->vcn.num_vcn_inst; i++) { - volatile struct amdgpu_vcn4_fw_shared *fw_shared; + volatile struct amdgpu_vcn4_fw_shared *fw_shared; - vcn_inst = GET_INST(VCN, i); + vcn_inst = GET_INST(VCN, inst); - ring = &adev->vcn.inst[i].ring_enc[0]; - ring->use_doorbell = true; - - if (!amdgpu_sriov_vf(adev)) - ring->doorbell_index = - (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + - 9 * vcn_inst; - else - ring->doorbell_index = - (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + - 32 * vcn_inst; - - ring->vm_hub = AMDGPU_MMHUB0(adev->vcn.inst[i].aid_id); - sprintf(ring->name, "vcn_unified_%d", adev->vcn.inst[i].aid_id); - r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst->irq, 0, - AMDGPU_RING_PRIO_DEFAULT, - &adev->vcn.inst[i].sched_score); - if (r) - return r; + ring = &adev->vcn.inst[inst].ring_enc[0]; + ring->use_doorbell = true; - fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr; - fw_shared->present_flag_0 = cpu_to_le32(AMDGPU_FW_SHARED_FLAG_0_UNIFIED_QUEUE); - fw_shared->sq.is_enabled = true; + if (!amdgpu_sriov_vf(adev)) + ring->doorbell_index = + (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + + 9 * vcn_inst; + else + ring->doorbell_index = + (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + + 32 * vcn_inst; + + ring->vm_hub = AMDGPU_MMHUB0(adev->vcn.inst[inst].aid_id); + sprintf(ring->name, "vcn_unified_%d", adev->vcn.inst[inst].aid_id); + r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst->irq, 0, + AMDGPU_RING_PRIO_DEFAULT, + &adev->vcn.inst[inst].sched_score); + if (r) + return r; - if (amdgpu_vcnfw_log) - amdgpu_vcn_fwlog_init(&adev->vcn.inst[i]); - } + fw_shared = adev->vcn.inst[inst].fw_shared.cpu_addr; + fw_shared->present_flag_0 = cpu_to_le32(AMDGPU_FW_SHARED_FLAG_0_UNIFIED_QUEUE); + fw_shared->sq.is_enabled = true; + + if (amdgpu_vcnfw_log) + amdgpu_vcn_fwlog_init(&adev->vcn.inst[inst]); if (amdgpu_sriov_vf(adev)) { r = amdgpu_virt_alloc_mm_table(adev); @@ -1085,174 +1083,170 @@ static int vcn_v4_0_3_start_sriov(struct amdgpu_device *adev) * * Start VCN block */ -static int vcn_v4_0_3_start(struct amdgpu_device *adev) +static int vcn_v4_0_3_start(struct amdgpu_device *adev, unsigned int inst) { volatile struct amdgpu_vcn4_fw_shared *fw_shared; struct amdgpu_ring *ring; - int i, j, k, r, vcn_inst; + int j, k, r, vcn_inst; uint32_t tmp; - for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { - if (adev->pm.dpm_enabled) - amdgpu_dpm_enable_vcn(adev, true, i); + if (adev->pm.dpm_enabled) + amdgpu_dpm_enable_vcn(adev, true, inst); + + if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) { + r = vcn_v4_0_3_start_dpg_mode(adev, inst, adev->vcn.indirect_sram); + return r; } - for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { - if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) { - r = vcn_v4_0_3_start_dpg_mode(adev, i, adev->vcn.indirect_sram); - continue; - } + vcn_inst = GET_INST(VCN, inst); + /* set VCN status busy */ + tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_STATUS) | + UVD_STATUS__UVD_BUSY; + WREG32_SOC15(VCN, vcn_inst, regUVD_STATUS, tmp); - vcn_inst = GET_INST(VCN, i); - /* set VCN status busy */ - tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_STATUS) | - UVD_STATUS__UVD_BUSY; - WREG32_SOC15(VCN, vcn_inst, regUVD_STATUS, tmp); - - /*SW clock gating */ - vcn_v4_0_3_disable_clock_gating(adev, i); - - /* enable VCPU clock */ - WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_VCPU_CNTL), - UVD_VCPU_CNTL__CLK_EN_MASK, - ~UVD_VCPU_CNTL__CLK_EN_MASK); - - /* disable master interrupt */ - WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_MASTINT_EN), 0, - ~UVD_MASTINT_EN__VCPU_EN_MASK); - - /* enable LMI MC and UMC channels */ - WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_LMI_CTRL2), 0, - ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK); - - tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_SOFT_RESET); - tmp &= ~UVD_SOFT_RESET__LMI_SOFT_RESET_MASK; - tmp &= ~UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK; - WREG32_SOC15(VCN, vcn_inst, regUVD_SOFT_RESET, tmp); - - /* setup regUVD_LMI_CTRL */ - tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_LMI_CTRL); - WREG32_SOC15(VCN, vcn_inst, regUVD_LMI_CTRL, - tmp | UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK | - UVD_LMI_CTRL__MASK_MC_URGENT_MASK | - UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK | - UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK); - - /* setup regUVD_MPC_CNTL */ - tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_MPC_CNTL); - tmp &= ~UVD_MPC_CNTL__REPLACEMENT_MODE_MASK; - tmp |= 0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT; - WREG32_SOC15(VCN, vcn_inst, regUVD_MPC_CNTL, tmp); - - /* setup UVD_MPC_SET_MUXA0 */ - WREG32_SOC15(VCN, vcn_inst, regUVD_MPC_SET_MUXA0, - ((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT) | - (0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT) | - (0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT) | - (0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT))); - - /* setup UVD_MPC_SET_MUXB0 */ - WREG32_SOC15(VCN, vcn_inst, regUVD_MPC_SET_MUXB0, - ((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT) | - (0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT) | - (0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT) | - (0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT))); - - /* setup UVD_MPC_SET_MUX */ - WREG32_SOC15(VCN, vcn_inst, regUVD_MPC_SET_MUX, - ((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT) | - (0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) | - (0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT))); - - vcn_v4_0_3_mc_resume(adev, i); - - /* VCN global tiling registers */ - WREG32_SOC15(VCN, vcn_inst, regUVD_GFX8_ADDR_CONFIG, - adev->gfx.config.gb_addr_config); - WREG32_SOC15(VCN, vcn_inst, regUVD_GFX10_ADDR_CONFIG, - adev->gfx.config.gb_addr_config); - - /* unblock VCPU register access */ - WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_RB_ARB_CTRL), 0, - ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK); - - /* release VCPU reset to boot */ - WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_VCPU_CNTL), 0, - ~UVD_VCPU_CNTL__BLK_RST_MASK); + /*SW clock gating */ + vcn_v4_0_3_disable_clock_gating(adev, inst); + + /* enable VCPU clock */ + WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_VCPU_CNTL), + UVD_VCPU_CNTL__CLK_EN_MASK, + ~UVD_VCPU_CNTL__CLK_EN_MASK); - for (j = 0; j < 10; ++j) { - uint32_t status; + /* disable master interrupt */ + WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_MASTINT_EN), 0, + ~UVD_MASTINT_EN__VCPU_EN_MASK); - for (k = 0; k < 100; ++k) { - status = RREG32_SOC15(VCN, vcn_inst, - regUVD_STATUS); - if (status & 2) - break; - mdelay(10); - } - r = 0; - if (status & 2) - break; + /* enable LMI MC and UMC channels */ + WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_LMI_CTRL2), 0, + ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK); - DRM_DEV_ERROR(adev->dev, - "VCN decode not responding, trying to reset the VCPU!!!\n"); - WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, - regUVD_VCPU_CNTL), - UVD_VCPU_CNTL__BLK_RST_MASK, - ~UVD_VCPU_CNTL__BLK_RST_MASK); - mdelay(10); - WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, - regUVD_VCPU_CNTL), - 0, ~UVD_VCPU_CNTL__BLK_RST_MASK); + tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_SOFT_RESET); + tmp &= ~UVD_SOFT_RESET__LMI_SOFT_RESET_MASK; + tmp &= ~UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK; + WREG32_SOC15(VCN, vcn_inst, regUVD_SOFT_RESET, tmp); + + /* setup regUVD_LMI_CTRL */ + tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_LMI_CTRL); + WREG32_SOC15(VCN, vcn_inst, regUVD_LMI_CTRL, + tmp | UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK | + UVD_LMI_CTRL__MASK_MC_URGENT_MASK | + UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK | + UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK); + + /* setup regUVD_MPC_CNTL */ + tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_MPC_CNTL); + tmp &= ~UVD_MPC_CNTL__REPLACEMENT_MODE_MASK; + tmp |= 0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT; + WREG32_SOC15(VCN, vcn_inst, regUVD_MPC_CNTL, tmp); + + /* setup UVD_MPC_SET_MUXA0 */ + WREG32_SOC15(VCN, vcn_inst, regUVD_MPC_SET_MUXA0, + ((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT) | + (0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT) | + (0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT) | + (0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT))); + + /* setup UVD_MPC_SET_MUXB0 */ + WREG32_SOC15(VCN, vcn_inst, regUVD_MPC_SET_MUXB0, + ((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT) | + (0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT) | + (0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT) | + (0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT))); + + /* setup UVD_MPC_SET_MUX */ + WREG32_SOC15(VCN, vcn_inst, regUVD_MPC_SET_MUX, + ((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT) | + (0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) | + (0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT))); + + vcn_v4_0_3_mc_resume(adev, inst); + + /* VCN global tiling registers */ + WREG32_SOC15(VCN, vcn_inst, regUVD_GFX8_ADDR_CONFIG, + adev->gfx.config.gb_addr_config); + WREG32_SOC15(VCN, vcn_inst, regUVD_GFX10_ADDR_CONFIG, + adev->gfx.config.gb_addr_config); + + /* unblock VCPU register access */ + WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_RB_ARB_CTRL), 0, + ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK); + + /* release VCPU reset to boot */ + WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_VCPU_CNTL), 0, + ~UVD_VCPU_CNTL__BLK_RST_MASK); + for (j = 0; j < 10; ++j) { + uint32_t status; + + for (k = 0; k < 100; ++k) { + status = RREG32_SOC15(VCN, vcn_inst, + regUVD_STATUS); + if (status & 2) + break; mdelay(10); - r = -1; } + r = 0; + if (status & 2) + break; - if (r) { - DRM_DEV_ERROR(adev->dev, "VCN decode not responding, giving up!!!\n"); - return r; - } + DRM_DEV_ERROR(adev->dev, + "VCN decode not responding, trying to reset the VCPU!!!\n"); + WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, + regUVD_VCPU_CNTL), + UVD_VCPU_CNTL__BLK_RST_MASK, + ~UVD_VCPU_CNTL__BLK_RST_MASK); + mdelay(10); + WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, + regUVD_VCPU_CNTL), + 0, ~UVD_VCPU_CNTL__BLK_RST_MASK); - /* enable master interrupt */ - WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_MASTINT_EN), - UVD_MASTINT_EN__VCPU_EN_MASK, - ~UVD_MASTINT_EN__VCPU_EN_MASK); + mdelay(10); + r = -1; + } - /* clear the busy bit of VCN_STATUS */ - WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_STATUS), 0, - ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT)); + if (r) { + DRM_DEV_ERROR(adev->dev, "VCN decode not responding, giving up!!!\n"); + return r; + } - ring = &adev->vcn.inst[i].ring_enc[0]; - fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr; + /* enable master interrupt */ + WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_MASTINT_EN), + UVD_MASTINT_EN__VCPU_EN_MASK, + ~UVD_MASTINT_EN__VCPU_EN_MASK); - /* program the RB_BASE for ring buffer */ - WREG32_SOC15(VCN, vcn_inst, regUVD_RB_BASE_LO, - lower_32_bits(ring->gpu_addr)); - WREG32_SOC15(VCN, vcn_inst, regUVD_RB_BASE_HI, - upper_32_bits(ring->gpu_addr)); + /* clear the busy bit of VCN_STATUS */ + WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_STATUS), 0, + ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT)); - WREG32_SOC15(VCN, vcn_inst, regUVD_RB_SIZE, - ring->ring_size / sizeof(uint32_t)); + ring = &adev->vcn.inst[inst].ring_enc[0]; + fw_shared = adev->vcn.inst[inst].fw_shared.cpu_addr; - /* resetting ring, fw should not check RB ring */ - tmp = RREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE); - tmp &= ~(VCN_RB_ENABLE__RB_EN_MASK); - WREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE, tmp); + /* program the RB_BASE for ring buffer */ + WREG32_SOC15(VCN, vcn_inst, regUVD_RB_BASE_LO, + lower_32_bits(ring->gpu_addr)); + WREG32_SOC15(VCN, vcn_inst, regUVD_RB_BASE_HI, + upper_32_bits(ring->gpu_addr)); - /* Initialize the ring buffer's read and write pointers */ - WREG32_SOC15(VCN, vcn_inst, regUVD_RB_RPTR, 0); - WREG32_SOC15(VCN, vcn_inst, regUVD_RB_WPTR, 0); + WREG32_SOC15(VCN, vcn_inst, regUVD_RB_SIZE, + ring->ring_size / sizeof(uint32_t)); - tmp = RREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE); - tmp |= VCN_RB_ENABLE__RB_EN_MASK; - WREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE, tmp); + /* resetting ring, fw should not check RB ring */ + tmp = RREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE); + tmp &= ~(VCN_RB_ENABLE__RB_EN_MASK); + WREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE, tmp); - ring->wptr = RREG32_SOC15(VCN, vcn_inst, regUVD_RB_WPTR); - fw_shared->sq.queue_mode &= - cpu_to_le32(~(FW_QUEUE_RING_RESET | FW_QUEUE_DPG_HOLD_OFF)); + /* Initialize the ring buffer's read and write pointers */ + WREG32_SOC15(VCN, vcn_inst, regUVD_RB_RPTR, 0); + WREG32_SOC15(VCN, vcn_inst, regUVD_RB_WPTR, 0); + + tmp = RREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE); + tmp |= VCN_RB_ENABLE__RB_EN_MASK; + WREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE, tmp); + + ring->wptr = RREG32_SOC15(VCN, vcn_inst, regUVD_RB_WPTR); + fw_shared->sq.queue_mode &= + cpu_to_le32(~(FW_QUEUE_RING_RESET | FW_QUEUE_DPG_HOLD_OFF)); - } return 0; } @@ -1295,83 +1289,79 @@ static int vcn_v4_0_3_stop_dpg_mode(struct amdgpu_device *adev, int inst_idx) * * Stop VCN block */ -static int vcn_v4_0_3_stop(struct amdgpu_device *adev) +static int vcn_v4_0_3_stop(struct amdgpu_device *adev, unsigned int inst) { volatile struct amdgpu_vcn4_fw_shared *fw_shared; - int i, r = 0, vcn_inst; + int r = 0, vcn_inst; uint32_t tmp; - for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { - vcn_inst = GET_INST(VCN, i); + vcn_inst = GET_INST(VCN, inst); - fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr; - fw_shared->sq.queue_mode |= FW_QUEUE_DPG_HOLD_OFF; + fw_shared = adev->vcn.inst[inst].fw_shared.cpu_addr; + fw_shared->sq.queue_mode |= FW_QUEUE_DPG_HOLD_OFF; - if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) { - vcn_v4_0_3_stop_dpg_mode(adev, i); - continue; - } + if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) { + vcn_v4_0_3_stop_dpg_mode(adev, inst); + goto Done; + } - /* wait for vcn idle */ - r = SOC15_WAIT_ON_RREG(VCN, vcn_inst, regUVD_STATUS, - UVD_STATUS__IDLE, 0x7); - if (r) - goto Done; - - tmp = UVD_LMI_STATUS__VCPU_LMI_WRITE_CLEAN_MASK | - UVD_LMI_STATUS__READ_CLEAN_MASK | - UVD_LMI_STATUS__WRITE_CLEAN_MASK | - UVD_LMI_STATUS__WRITE_CLEAN_RAW_MASK; - r = SOC15_WAIT_ON_RREG(VCN, vcn_inst, regUVD_LMI_STATUS, tmp, - tmp); - if (r) - goto Done; - - /* stall UMC channel */ - tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_LMI_CTRL2); - tmp |= UVD_LMI_CTRL2__STALL_ARB_UMC_MASK; - WREG32_SOC15(VCN, vcn_inst, regUVD_LMI_CTRL2, tmp); - tmp = UVD_LMI_STATUS__UMC_READ_CLEAN_RAW_MASK | - UVD_LMI_STATUS__UMC_WRITE_CLEAN_RAW_MASK; - r = SOC15_WAIT_ON_RREG(VCN, vcn_inst, regUVD_LMI_STATUS, tmp, - tmp); - if (r) - goto Done; + /* wait for vcn idle */ + r = SOC15_WAIT_ON_RREG(VCN, vcn_inst, regUVD_STATUS, + UVD_STATUS__IDLE, 0x7); + if (r) + goto Done; + + tmp = UVD_LMI_STATUS__VCPU_LMI_WRITE_CLEAN_MASK | + UVD_LMI_STATUS__READ_CLEAN_MASK | + UVD_LMI_STATUS__WRITE_CLEAN_MASK | + UVD_LMI_STATUS__WRITE_CLEAN_RAW_MASK; + r = SOC15_WAIT_ON_RREG(VCN, vcn_inst, regUVD_LMI_STATUS, tmp, + tmp); + if (r) + goto Done; + + /* stall UMC channel */ + tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_LMI_CTRL2); + tmp |= UVD_LMI_CTRL2__STALL_ARB_UMC_MASK; + WREG32_SOC15(VCN, vcn_inst, regUVD_LMI_CTRL2, tmp); + tmp = UVD_LMI_STATUS__UMC_READ_CLEAN_RAW_MASK | + UVD_LMI_STATUS__UMC_WRITE_CLEAN_RAW_MASK; + r = SOC15_WAIT_ON_RREG(VCN, vcn_inst, regUVD_LMI_STATUS, tmp, + tmp); + if (r) + goto Done; - /* Unblock VCPU Register access */ - WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_RB_ARB_CTRL), - UVD_RB_ARB_CTRL__VCPU_DIS_MASK, - ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK); + /* Unblock VCPU Register access */ + WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_RB_ARB_CTRL), + UVD_RB_ARB_CTRL__VCPU_DIS_MASK, + ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK); - /* release VCPU reset to boot */ - WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_VCPU_CNTL), - UVD_VCPU_CNTL__BLK_RST_MASK, - ~UVD_VCPU_CNTL__BLK_RST_MASK); + /* release VCPU reset to boot */ + WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_VCPU_CNTL), + UVD_VCPU_CNTL__BLK_RST_MASK, + ~UVD_VCPU_CNTL__BLK_RST_MASK); - /* disable VCPU clock */ - WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_VCPU_CNTL), 0, - ~(UVD_VCPU_CNTL__CLK_EN_MASK)); + /* disable VCPU clock */ + WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_VCPU_CNTL), 0, + ~(UVD_VCPU_CNTL__CLK_EN_MASK)); - /* reset LMI UMC/LMI/VCPU */ - tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_SOFT_RESET); - tmp |= UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK; - WREG32_SOC15(VCN, vcn_inst, regUVD_SOFT_RESET, tmp); + /* reset LMI UMC/LMI/VCPU */ + tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_SOFT_RESET); + tmp |= UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK; + WREG32_SOC15(VCN, vcn_inst, regUVD_SOFT_RESET, tmp); - tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_SOFT_RESET); - tmp |= UVD_SOFT_RESET__LMI_SOFT_RESET_MASK; - WREG32_SOC15(VCN, vcn_inst, regUVD_SOFT_RESET, tmp); + tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_SOFT_RESET); + tmp |= UVD_SOFT_RESET__LMI_SOFT_RESET_MASK; + WREG32_SOC15(VCN, vcn_inst, regUVD_SOFT_RESET, tmp); - /* clear VCN status */ - WREG32_SOC15(VCN, vcn_inst, regUVD_STATUS, 0); + /* clear VCN status */ + WREG32_SOC15(VCN, vcn_inst, regUVD_STATUS, 0); - /* apply HW clock gating */ - vcn_v4_0_3_enable_clock_gating(adev, i); - } + /* apply HW clock gating */ + vcn_v4_0_3_enable_clock_gating(adev, inst); Done: - for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { - if (adev->pm.dpm_enabled) - amdgpu_dpm_enable_vcn(adev, false, i); - } + if (adev->pm.dpm_enabled) + amdgpu_dpm_enable_vcn(adev, false, inst); return 0; } @@ -1644,9 +1634,9 @@ static int vcn_v4_0_3_set_powergating_state(struct amdgpu_ip_block *ip_block, return 0; if (state == AMD_PG_STATE_GATE) - ret = vcn_v4_0_3_stop(adev); + ret = vcn_v4_0_3_stop(adev, inst); else - ret = vcn_v4_0_3_start(adev); + ret = vcn_v4_0_3_start(adev, inst); if (!ret) adev->vcn.inst[inst].cur_state = state; -- 2.34.1