[PATCH 5/6] drm/amdgpu/vpe: add VPE 6.1.1 support

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



From: Lang Yu <Lang.Yu@xxxxxxx>

Add initial support for VPE 6.1.1.

v2: squash in updates (Alex)

Signed-off-by: Lang Yu <Lang.Yu@xxxxxxx>
Signed-off-by: Alex Deucher <alexander.deucher@xxxxxxx>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.c |  32 ++-
 drivers/gpu/drm/amd/amdgpu/vpe_v6_1.c   | 281 ++++++++++++++++--------
 2 files changed, 207 insertions(+), 106 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.c
index eea2487d2ea24..70c5cc80ecdc0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.c
@@ -297,6 +297,10 @@ static int vpe_early_init(void *handle)
 	case IP_VERSION(6, 1, 0):
 		vpe_v6_1_set_funcs(vpe);
 		break;
+	case IP_VERSION(6, 1, 1):
+		vpe_v6_1_set_funcs(vpe);
+		vpe->collaborate_mode = true;
+		break;
 	default:
 		return -EINVAL;
 	}
@@ -304,6 +308,8 @@ static int vpe_early_init(void *handle)
 	vpe_set_ring_funcs(adev);
 	vpe_set_regs(vpe);
 
+	dev_info(adev->dev, "VPE: collaborate mode %s", vpe->collaborate_mode ? "true" : "false");
+
 	return 0;
 }
 
@@ -493,8 +499,6 @@ static void vpe_ring_emit_fence(struct amdgpu_ring *ring, uint64_t addr,
 {
 	int i = 0;
 
-	vpe_ring_emit_pred_exec(ring, 0, 10);
-
 	do {
 		/* write the fence */
 		amdgpu_ring_write(ring, VPE_CMD_HEADER(VPE_CMD_OPCODE_FENCE, 0));
@@ -705,16 +709,22 @@ static void vpe_ring_set_wptr(struct amdgpu_ring *ring)
 			upper_32_bits(ring->wptr << 2));
 		atomic64_set((atomic64_t *)ring->wptr_cpu_addr, ring->wptr << 2);
 		WDOORBELL64(ring->doorbell_index, ring->wptr << 2);
+		if (vpe->collaborate_mode)
+			WDOORBELL64(ring->doorbell_index + 4, ring->wptr << 2);
 	} else {
-		dev_dbg(adev->dev, "Not using doorbell, \
-			regVPEC_QUEUE0_RB_WPTR == 0x%08x, \
-			regVPEC_QUEUE0_RB_WPTR_HI == 0x%08x\n",
-			lower_32_bits(ring->wptr << 2),
-			upper_32_bits(ring->wptr << 2));
-		WREG32(vpe_get_reg_offset(vpe, ring->me, vpe->regs.queue0_rb_wptr_lo),
-		       lower_32_bits(ring->wptr << 2));
-		WREG32(vpe_get_reg_offset(vpe, ring->me, vpe->regs.queue0_rb_wptr_hi),
-		       upper_32_bits(ring->wptr << 2));
+		int i;
+
+		for (i = 0; i < vpe->num_instances; i++) {
+			dev_dbg(adev->dev, "Not using doorbell, \
+				regVPEC_QUEUE0_RB_WPTR == 0x%08x, \
+				regVPEC_QUEUE0_RB_WPTR_HI == 0x%08x\n",
+				lower_32_bits(ring->wptr << 2),
+				upper_32_bits(ring->wptr << 2));
+			WREG32(vpe_get_reg_offset(vpe, i, vpe->regs.queue0_rb_wptr_lo),
+			       lower_32_bits(ring->wptr << 2));
+			WREG32(vpe_get_reg_offset(vpe, i, vpe->regs.queue0_rb_wptr_hi),
+			       upper_32_bits(ring->wptr << 2));
+		}
 	}
 }
 
diff --git a/drivers/gpu/drm/amd/amdgpu/vpe_v6_1.c b/drivers/gpu/drm/amd/amdgpu/vpe_v6_1.c
index d20060a51e052..769eb8f7bb3c5 100644
--- a/drivers/gpu/drm/amd/amdgpu/vpe_v6_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/vpe_v6_1.c
@@ -33,14 +33,38 @@
 #include "vpe/vpe_6_1_0_sh_mask.h"
 
 MODULE_FIRMWARE("amdgpu/vpe_6_1_0.bin");
+MODULE_FIRMWARE("amdgpu/vpe_6_1_1.bin");
 
 #define VPE_THREAD1_UCODE_OFFSET	0x8000
 
+#define regVPEC_COLLABORATE_CNTL                                                0x0013
+#define regVPEC_COLLABORATE_CNTL_BASE_IDX                                       0
+#define VPEC_COLLABORATE_CNTL__COLLABORATE_MODE_EN__SHIFT                       0x0
+#define VPEC_COLLABORATE_CNTL__COLLABORATE_MODE_EN_MASK                         0x00000001L
+
+#define regVPEC_COLLABORATE_CFG                                                 0x0014
+#define regVPEC_COLLABORATE_CFG_BASE_IDX                                        0
+#define VPEC_COLLABORATE_CFG__MASTER_ID__SHIFT                                  0x0
+#define VPEC_COLLABORATE_CFG__MASTER_EN__SHIFT                                  0x3
+#define VPEC_COLLABORATE_CFG__SLAVE0_ID__SHIFT                                  0x4
+#define VPEC_COLLABORATE_CFG__SLAVE0_EN__SHIFT                                  0x7
+#define VPEC_COLLABORATE_CFG__MASTER_ID_MASK                                    0x00000007L
+#define VPEC_COLLABORATE_CFG__MASTER_EN_MASK                                    0x00000008L
+#define VPEC_COLLABORATE_CFG__SLAVE0_ID_MASK                                    0x00000070L
+#define VPEC_COLLABORATE_CFG__SLAVE0_EN_MASK                                    0x00000080L
+
+#define regVPEC_CNTL_6_1_1                                                      0x0016
+#define regVPEC_CNTL_6_1_1_BASE_IDX                                             0
+#define regVPEC_QUEUE_RESET_REQ_6_1_1                                           0x002c
+#define regVPEC_QUEUE_RESET_REQ_6_1_1_BASE_IDX                                  0
+#define regVPEC_PUB_DUMMY2_6_1_1                                                0x004c
+#define regVPEC_PUB_DUMMY2_6_1_1_BASE_IDX                                       0
+
 static uint32_t vpe_v6_1_get_reg_offset(struct amdgpu_vpe *vpe, uint32_t inst, uint32_t offset)
 {
 	uint32_t base;
 
-	base = vpe->ring.adev->reg_offset[VPE_HWIP][0][0];
+	base = vpe->ring.adev->reg_offset[VPE_HWIP][inst][0];
 
 	return base + offset;
 }
@@ -48,12 +72,14 @@ static uint32_t vpe_v6_1_get_reg_offset(struct amdgpu_vpe *vpe, uint32_t inst, u
 static void vpe_v6_1_halt(struct amdgpu_vpe *vpe, bool halt)
 {
 	struct amdgpu_device *adev = vpe->ring.adev;
-	uint32_t f32_cntl;
+	uint32_t i, f32_cntl;
 
-	f32_cntl = RREG32(vpe_get_reg_offset(vpe, 0, regVPEC_F32_CNTL));
-	f32_cntl = REG_SET_FIELD(f32_cntl, VPEC_F32_CNTL, HALT, halt ? 1 : 0);
-	f32_cntl = REG_SET_FIELD(f32_cntl, VPEC_F32_CNTL, TH1_RESET, halt ? 1 : 0);
-	WREG32(vpe_get_reg_offset(vpe, 0, regVPEC_F32_CNTL), f32_cntl);
+	for (i = 0; i < vpe->num_instances; i++) {
+		f32_cntl = RREG32(vpe_get_reg_offset(vpe, i, regVPEC_F32_CNTL));
+		f32_cntl = REG_SET_FIELD(f32_cntl, VPEC_F32_CNTL, HALT, halt ? 1 : 0);
+		f32_cntl = REG_SET_FIELD(f32_cntl, VPEC_F32_CNTL, TH1_RESET, halt ? 1 : 0);
+		WREG32(vpe_get_reg_offset(vpe, i, regVPEC_F32_CNTL), f32_cntl);
+	}
 }
 
 static int vpe_v6_1_irq_init(struct amdgpu_vpe *vpe)
@@ -70,20 +96,58 @@ static int vpe_v6_1_irq_init(struct amdgpu_vpe *vpe)
 	return 0;
 }
 
+static void vpe_v6_1_set_collaborate_mode(struct amdgpu_vpe *vpe, bool enable)
+{
+	struct amdgpu_device *adev = vpe->ring.adev;
+	uint32_t vpe_colla_cntl, vpe_colla_cfg, i;
+
+	if (!vpe->collaborate_mode)
+		return;
+
+	for (i = 0; i < vpe->num_instances; i++) {
+		vpe_colla_cntl = RREG32(vpe_get_reg_offset(vpe, i, regVPEC_COLLABORATE_CNTL));
+		vpe_colla_cntl = REG_SET_FIELD(vpe_colla_cntl, VPEC_COLLABORATE_CNTL,
+					       COLLABORATE_MODE_EN, enable ? 1 : 0);
+		WREG32(vpe_get_reg_offset(vpe, i, regVPEC_COLLABORATE_CNTL), vpe_colla_cntl);
+
+		vpe_colla_cfg = RREG32(vpe_get_reg_offset(vpe, i, regVPEC_COLLABORATE_CFG));
+		vpe_colla_cfg = REG_SET_FIELD(vpe_colla_cfg, VPEC_COLLABORATE_CFG, MASTER_ID, 0);
+		vpe_colla_cfg = REG_SET_FIELD(vpe_colla_cfg, VPEC_COLLABORATE_CFG, MASTER_EN, enable ? 1 : 0);
+		vpe_colla_cfg = REG_SET_FIELD(vpe_colla_cfg, VPEC_COLLABORATE_CFG, SLAVE0_ID, 1);
+		vpe_colla_cfg = REG_SET_FIELD(vpe_colla_cfg, VPEC_COLLABORATE_CFG, SLAVE0_EN, enable ? 1 : 0);
+		WREG32(vpe_get_reg_offset(vpe, i, regVPEC_COLLABORATE_CFG), vpe_colla_cfg);
+	}
+}
+
 static int vpe_v6_1_load_microcode(struct amdgpu_vpe *vpe)
 {
 	struct amdgpu_device *adev = vpe->ring.adev;
 	const struct vpe_firmware_header_v1_0 *vpe_hdr;
 	const __le32 *data;
 	uint32_t ucode_offset[2], ucode_size[2];
-	uint32_t i, size_dw;
+	uint32_t i, j, size_dw;
 	uint32_t ret;
 
-	// disable UMSCH_INT_ENABLE
-	ret = RREG32(vpe_get_reg_offset(vpe, 0, regVPEC_CNTL));
-	ret = REG_SET_FIELD(ret, VPEC_CNTL, UMSCH_INT_ENABLE, 0);
-	WREG32(vpe_get_reg_offset(vpe, 0, regVPEC_CNTL), ret);
+	/* disable UMSCH_INT_ENABLE */
+	for (j = 0; j < vpe->num_instances; j++) {
+
+		if (amdgpu_ip_version(adev, VPE_HWIP, 0) == IP_VERSION(6, 1, 1))
+			ret = RREG32(vpe_get_reg_offset(vpe, j, regVPEC_CNTL_6_1_1));
+		else
+			ret = RREG32(vpe_get_reg_offset(vpe, j, regVPEC_CNTL));
+
+		ret = REG_SET_FIELD(ret, VPEC_CNTL, UMSCH_INT_ENABLE, 0);
 
+		if (amdgpu_ip_version(adev, VPE_HWIP, 0) == IP_VERSION(6, 1, 1))
+			WREG32(vpe_get_reg_offset(vpe, j, regVPEC_CNTL_6_1_1), ret);
+		else
+			WREG32(vpe_get_reg_offset(vpe, j, regVPEC_CNTL), ret);
+	}
+
+	/*
+	 * For VPE 6.1.1, still only need to add master's offset, and psp will apply it to slave as well.
+	 * Here use instance 0 as master.
+	 */
 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
 		uint32_t f32_offset, f32_cntl;
 
@@ -96,8 +160,7 @@ static int vpe_v6_1_load_microcode(struct amdgpu_vpe *vpe)
 		adev->vpe.cmdbuf_cpu_addr[1] = f32_cntl;
 
 		amdgpu_vpe_psp_update_sram(adev);
-
-		/* Config DPM */
+		vpe_v6_1_set_collaborate_mode(vpe, true);
 		amdgpu_vpe_configure_dpm(vpe);
 
 		return 0;
@@ -114,25 +177,26 @@ static int vpe_v6_1_load_microcode(struct amdgpu_vpe *vpe)
 
 	vpe_v6_1_halt(vpe, true);
 
-	for (i = 0; i < 2; i++) {
-		if (i > 0)
-			WREG32(vpe_get_reg_offset(vpe, 0, regVPEC_UCODE_ADDR), VPE_THREAD1_UCODE_OFFSET);
-		else
-			WREG32(vpe_get_reg_offset(vpe, 0, regVPEC_UCODE_ADDR), 0);
-
-		data = (const __le32 *)(adev->vpe.fw->data + ucode_offset[i]);
-		size_dw = ucode_size[i] / sizeof(__le32);
-
-		while (size_dw--) {
-			if (amdgpu_emu_mode && size_dw % 500 == 0)
-				msleep(1);
-			WREG32(vpe_get_reg_offset(vpe, 0, regVPEC_UCODE_DATA), le32_to_cpup(data++));
+	for (j = 0; j < vpe->num_instances; j++) {
+		for (i = 0; i < 2; i++) {
+			if (i > 0)
+				WREG32(vpe_get_reg_offset(vpe, j, regVPEC_UCODE_ADDR), VPE_THREAD1_UCODE_OFFSET);
+			else
+				WREG32(vpe_get_reg_offset(vpe, j, regVPEC_UCODE_ADDR), 0);
+
+			data = (const __le32 *)(adev->vpe.fw->data + ucode_offset[i]);
+			size_dw = ucode_size[i] / sizeof(__le32);
+
+			while (size_dw--) {
+				if (amdgpu_emu_mode && size_dw % 500 == 0)
+					msleep(1);
+				WREG32(vpe_get_reg_offset(vpe, j, regVPEC_UCODE_DATA), le32_to_cpup(data++));
+			}
 		}
-
 	}
 
 	vpe_v6_1_halt(vpe, false);
-	/* Config DPM */
+	vpe_v6_1_set_collaborate_mode(vpe, true);
 	amdgpu_vpe_configure_dpm(vpe);
 
 	return 0;
@@ -142,68 +206,68 @@ static int vpe_v6_1_ring_start(struct amdgpu_vpe *vpe)
 {
 	struct amdgpu_ring *ring = &vpe->ring;
 	struct amdgpu_device *adev = ring->adev;
-	uint32_t rb_bufsz, rb_cntl;
-	uint32_t ib_cntl;
 	uint32_t doorbell, doorbell_offset;
+	uint32_t rb_bufsz, rb_cntl;
+	uint32_t ib_cntl, i;
 	int ret;
 
-	rb_bufsz = order_base_2(ring->ring_size / 4);
-	rb_cntl = RREG32(vpe_get_reg_offset(vpe, 0, regVPEC_QUEUE0_RB_CNTL));
-	rb_cntl = REG_SET_FIELD(rb_cntl, VPEC_QUEUE0_RB_CNTL, RB_SIZE, rb_bufsz);
-	rb_cntl = REG_SET_FIELD(rb_cntl, VPEC_QUEUE0_RB_CNTL, RB_PRIV, 1);
-	rb_cntl = REG_SET_FIELD(rb_cntl, VPEC_QUEUE0_RB_CNTL, RB_VMID, 0);
-	WREG32(vpe_get_reg_offset(vpe, 0, regVPEC_QUEUE0_RB_CNTL), rb_cntl);
-
-	WREG32(vpe_get_reg_offset(vpe, 0, regVPEC_QUEUE0_RB_RPTR), 0);
-	WREG32(vpe_get_reg_offset(vpe, 0, regVPEC_QUEUE0_RB_RPTR_HI), 0);
-	WREG32(vpe_get_reg_offset(vpe, 0, regVPEC_QUEUE0_RB_WPTR), 0);
-	WREG32(vpe_get_reg_offset(vpe, 0, regVPEC_QUEUE0_RB_WPTR_HI), 0);
-
-	WREG32(vpe_get_reg_offset(vpe, 0, regVPEC_QUEUE0_RB_RPTR_ADDR_LO),
-	       lower_32_bits(ring->rptr_gpu_addr) & 0xFFFFFFFC);
-	WREG32(vpe_get_reg_offset(vpe, 0, regVPEC_QUEUE0_RB_RPTR_ADDR_HI),
-	       upper_32_bits(ring->rptr_gpu_addr) & 0xFFFFFFFF);
-
-	WREG32(vpe_get_reg_offset(vpe, 0, regVPEC_QUEUE0_RB_BASE), ring->gpu_addr >> 8);
-	WREG32(vpe_get_reg_offset(vpe, 0, regVPEC_QUEUE0_RB_BASE_HI), ring->gpu_addr >> 40);
-
-	ring->wptr = 0;
-
-	/* before programing wptr to a less value, need set minor_ptr_update first */
-	WREG32(vpe_get_reg_offset(vpe, 0, regVPEC_QUEUE0_MINOR_PTR_UPDATE), 1);
-
-	WREG32(vpe_get_reg_offset(vpe, 0, regVPEC_QUEUE0_RB_WPTR), lower_32_bits(ring->wptr) << 2);
-	WREG32(vpe_get_reg_offset(vpe, 0, regVPEC_QUEUE0_RB_WPTR_HI), upper_32_bits(ring->wptr) << 2);
-
-	/* set minor_ptr_update to 0 after wptr programed */
-	WREG32(vpe_get_reg_offset(vpe, 0, regVPEC_QUEUE0_MINOR_PTR_UPDATE), 0);
-
-	doorbell = RREG32(vpe_get_reg_offset(vpe, 0, regVPEC_QUEUE0_DOORBELL));
-	doorbell_offset = RREG32(vpe_get_reg_offset(vpe, 0, regVPEC_QUEUE0_DOORBELL_OFFSET));
-
-	doorbell = REG_SET_FIELD(doorbell, VPEC_QUEUE0_DOORBELL, ENABLE, ring->use_doorbell ? 1 : 0);
-	doorbell_offset = REG_SET_FIELD(doorbell_offset, VPEC_QUEUE0_DOORBELL_OFFSET, OFFSET, ring->doorbell_index);
-
-	WREG32(vpe_get_reg_offset(vpe, 0, regVPEC_QUEUE0_DOORBELL), doorbell);
-	WREG32(vpe_get_reg_offset(vpe, 0, regVPEC_QUEUE0_DOORBELL_OFFSET), doorbell_offset);
-
-	adev->nbio.funcs->vpe_doorbell_range(adev, 0, ring->use_doorbell, ring->doorbell_index, 2);
-
-	rb_cntl = REG_SET_FIELD(rb_cntl, VPEC_QUEUE0_RB_CNTL, RPTR_WRITEBACK_ENABLE, 1);
-	rb_cntl = REG_SET_FIELD(rb_cntl, VPEC_QUEUE0_RB_CNTL, RB_ENABLE, 1);
-	WREG32(vpe_get_reg_offset(vpe, 0, regVPEC_QUEUE0_RB_CNTL), rb_cntl);
-
-	ib_cntl = RREG32(vpe_get_reg_offset(vpe, 0, regVPEC_QUEUE0_IB_CNTL));
-	ib_cntl = REG_SET_FIELD(ib_cntl, VPEC_QUEUE0_IB_CNTL, IB_ENABLE, 1);
-	WREG32(vpe_get_reg_offset(vpe, 0, regVPEC_QUEUE0_IB_CNTL), ib_cntl);
-
-	ring->sched.ready = true;
+	for (i = 0; i < vpe->num_instances; i++) {
+		/* Set ring buffer size in dwords */
+		rb_bufsz = order_base_2(ring->ring_size / 4);
+		rb_cntl = RREG32(vpe_get_reg_offset(vpe, i, regVPEC_QUEUE0_RB_CNTL));
+		rb_cntl = REG_SET_FIELD(rb_cntl, VPEC_QUEUE0_RB_CNTL, RB_SIZE, rb_bufsz);
+		rb_cntl = REG_SET_FIELD(rb_cntl, VPEC_QUEUE0_RB_CNTL, RB_PRIV, 1);
+		rb_cntl = REG_SET_FIELD(rb_cntl, VPEC_QUEUE0_RB_CNTL, RB_VMID, 0);
+		WREG32(vpe_get_reg_offset(vpe, i, regVPEC_QUEUE0_RB_CNTL), rb_cntl);
+
+		/* Initialize the ring buffer's read and write pointers */
+		WREG32(vpe_get_reg_offset(vpe, i, regVPEC_QUEUE0_RB_RPTR), 0);
+		WREG32(vpe_get_reg_offset(vpe, i, regVPEC_QUEUE0_RB_RPTR_HI), 0);
+		WREG32(vpe_get_reg_offset(vpe, i, regVPEC_QUEUE0_RB_WPTR), 0);
+		WREG32(vpe_get_reg_offset(vpe, i, regVPEC_QUEUE0_RB_WPTR_HI), 0);
+
+		/* set the wb address whether it's enabled or not */
+		WREG32(vpe_get_reg_offset(vpe, i, regVPEC_QUEUE0_RB_RPTR_ADDR_LO),
+			lower_32_bits(ring->rptr_gpu_addr) & 0xFFFFFFFC);
+		WREG32(vpe_get_reg_offset(vpe, i, regVPEC_QUEUE0_RB_RPTR_ADDR_HI),
+			upper_32_bits(ring->rptr_gpu_addr) & 0xFFFFFFFF);
+
+		rb_cntl = REG_SET_FIELD(rb_cntl, VPEC_QUEUE0_RB_CNTL, RPTR_WRITEBACK_ENABLE, 1);
+
+		WREG32(vpe_get_reg_offset(vpe, i, regVPEC_QUEUE0_RB_BASE), ring->gpu_addr >> 8);
+		WREG32(vpe_get_reg_offset(vpe, i, regVPEC_QUEUE0_RB_BASE_HI), ring->gpu_addr >> 40);
+
+		ring->wptr = 0;
+
+		/* before programing wptr to a less value, need set minor_ptr_update first */
+		WREG32(vpe_get_reg_offset(vpe, i, regVPEC_QUEUE0_MINOR_PTR_UPDATE), 1);
+		WREG32(vpe_get_reg_offset(vpe, i, regVPEC_QUEUE0_RB_WPTR), lower_32_bits(ring->wptr) << 2);
+		WREG32(vpe_get_reg_offset(vpe, i, regVPEC_QUEUE0_RB_WPTR_HI), upper_32_bits(ring->wptr) << 2);
+		/* set minor_ptr_update to 0 after wptr programed */
+		WREG32(vpe_get_reg_offset(vpe, i, regVPEC_QUEUE0_MINOR_PTR_UPDATE), 0);
+
+		doorbell_offset = RREG32(vpe_get_reg_offset(vpe, i, regVPEC_QUEUE0_DOORBELL_OFFSET));
+		doorbell_offset = REG_SET_FIELD(doorbell_offset, VPEC_QUEUE0_DOORBELL_OFFSET, OFFSET, ring->doorbell_index + i*4);
+		WREG32(vpe_get_reg_offset(vpe, i, regVPEC_QUEUE0_DOORBELL_OFFSET), doorbell_offset);
+
+		doorbell = RREG32(vpe_get_reg_offset(vpe, i, regVPEC_QUEUE0_DOORBELL));
+		doorbell = REG_SET_FIELD(doorbell, VPEC_QUEUE0_DOORBELL, ENABLE, ring->use_doorbell ? 1 : 0);
+		WREG32(vpe_get_reg_offset(vpe, i, regVPEC_QUEUE0_DOORBELL), doorbell);
+
+		adev->nbio.funcs->vpe_doorbell_range(adev, i, ring->use_doorbell, ring->doorbell_index + i*4, 4);
+
+		rb_cntl = REG_SET_FIELD(rb_cntl, VPEC_QUEUE0_RB_CNTL, RPTR_WRITEBACK_ENABLE, 1);
+		rb_cntl = REG_SET_FIELD(rb_cntl, VPEC_QUEUE0_RB_CNTL, RB_ENABLE, 1);
+		WREG32(vpe_get_reg_offset(vpe, i, regVPEC_QUEUE0_RB_CNTL), rb_cntl);
+
+		ib_cntl = RREG32(vpe_get_reg_offset(vpe, i, regVPEC_QUEUE0_IB_CNTL));
+		ib_cntl = REG_SET_FIELD(ib_cntl, VPEC_QUEUE0_IB_CNTL, IB_ENABLE, 1);
+		WREG32(vpe_get_reg_offset(vpe, i, regVPEC_QUEUE0_IB_CNTL), ib_cntl);
+	}
 
 	ret = amdgpu_ring_test_helper(ring);
-	if (ret) {
-		ring->sched.ready = false;
+	if (ret)
 		return ret;
-	}
 
 	return 0;
 }
@@ -211,17 +275,30 @@ static int vpe_v6_1_ring_start(struct amdgpu_vpe *vpe)
 static int vpe_v_6_1_ring_stop(struct amdgpu_vpe *vpe)
 {
 	struct amdgpu_device *adev = vpe->ring.adev;
-	uint32_t queue_reset;
+	uint32_t queue_reset, i;
 	int ret;
 
-	queue_reset = RREG32(vpe_get_reg_offset(vpe, 0, regVPEC_QUEUE_RESET_REQ));
-	queue_reset = REG_SET_FIELD(queue_reset, VPEC_QUEUE_RESET_REQ, QUEUE0_RESET, 1);
-	WREG32(vpe_get_reg_offset(vpe, 0, regVPEC_QUEUE_RESET_REQ), queue_reset);
+	for (i = 0; i < vpe->num_instances; i++) {
+		if (amdgpu_ip_version(adev, VPE_HWIP, 0) == IP_VERSION(6, 1, 1))
+			queue_reset = RREG32(vpe_get_reg_offset(vpe, i, regVPEC_QUEUE_RESET_REQ_6_1_1));
+		else
+			queue_reset = RREG32(vpe_get_reg_offset(vpe, i, regVPEC_QUEUE_RESET_REQ));
+
+		queue_reset = REG_SET_FIELD(queue_reset, VPEC_QUEUE_RESET_REQ, QUEUE0_RESET, 1);
+
+		if (amdgpu_ip_version(adev, VPE_HWIP, 0) == IP_VERSION(6, 1, 1)) {
+			WREG32(vpe_get_reg_offset(vpe, i, regVPEC_QUEUE_RESET_REQ_6_1_1), queue_reset);
+			ret = SOC15_WAIT_ON_RREG(VPE, i, regVPEC_QUEUE_RESET_REQ_6_1_1, 0,
+						 VPEC_QUEUE_RESET_REQ__QUEUE0_RESET_MASK);
+		} else {
+			WREG32(vpe_get_reg_offset(vpe, i, regVPEC_QUEUE_RESET_REQ), queue_reset);
+			ret = SOC15_WAIT_ON_RREG(VPE, i, regVPEC_QUEUE_RESET_REQ, 0,
+						 VPEC_QUEUE_RESET_REQ__QUEUE0_RESET_MASK);
+		}
 
-	ret = SOC15_WAIT_ON_RREG(VPE, 0, regVPEC_QUEUE_RESET_REQ, 0,
-				 VPEC_QUEUE_RESET_REQ__QUEUE0_RESET_MASK);
-	if (ret)
-		dev_err(adev->dev, "VPE queue reset failed\n");
+		if (ret)
+			dev_err(adev->dev, "VPE queue reset failed\n");
+	}
 
 	vpe->ring.sched.ready = false;
 
@@ -236,10 +313,18 @@ static int vpe_v6_1_set_trap_irq_state(struct amdgpu_device *adev,
 	struct amdgpu_vpe *vpe = &adev->vpe;
 	uint32_t vpe_cntl;
 
-	vpe_cntl = RREG32(vpe_get_reg_offset(vpe, 0, regVPEC_CNTL));
+	if (amdgpu_ip_version(adev, VPE_HWIP, 0) == IP_VERSION(6, 1, 1))
+		vpe_cntl = RREG32(vpe_get_reg_offset(vpe, 0, regVPEC_CNTL_6_1_1));
+	else
+		vpe_cntl = RREG32(vpe_get_reg_offset(vpe, 0, regVPEC_CNTL));
+
 	vpe_cntl = REG_SET_FIELD(vpe_cntl, VPEC_CNTL, TRAP_ENABLE,
 				 state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
-	WREG32(vpe_get_reg_offset(vpe, 0, regVPEC_CNTL), vpe_cntl);
+
+	if (amdgpu_ip_version(adev, VPE_HWIP, 0) == IP_VERSION(6, 1, 1))
+		WREG32(vpe_get_reg_offset(vpe, 0, regVPEC_CNTL_6_1_1), vpe_cntl);
+	else
+		WREG32(vpe_get_reg_offset(vpe, 0, regVPEC_CNTL), vpe_cntl);
 
 	return 0;
 }
@@ -264,13 +349,19 @@ static int vpe_v6_1_process_trap_irq(struct amdgpu_device *adev,
 
 static int vpe_v6_1_set_regs(struct amdgpu_vpe *vpe)
 {
+	struct amdgpu_device *adev = container_of(vpe, struct amdgpu_device, vpe);
+
 	vpe->regs.queue0_rb_rptr_lo = regVPEC_QUEUE0_RB_RPTR;
 	vpe->regs.queue0_rb_rptr_hi = regVPEC_QUEUE0_RB_RPTR_HI;
 	vpe->regs.queue0_rb_wptr_lo = regVPEC_QUEUE0_RB_WPTR;
 	vpe->regs.queue0_rb_wptr_hi = regVPEC_QUEUE0_RB_WPTR_HI;
 	vpe->regs.queue0_preempt = regVPEC_QUEUE0_PREEMPT;
 
-	vpe->regs.dpm_enable = regVPEC_PUB_DUMMY2;
+	if (amdgpu_ip_version(adev, VPE_HWIP, 0) == IP_VERSION(6, 1, 1))
+		vpe->regs.dpm_enable = regVPEC_PUB_DUMMY2_6_1_1;
+	else
+		vpe->regs.dpm_enable = regVPEC_PUB_DUMMY2;
+
 	vpe->regs.dpm_pratio = regVPEC_QUEUE6_DUMMY4;
 	vpe->regs.dpm_request_interval = regVPEC_QUEUE5_DUMMY3;
 	vpe->regs.dpm_decision_threshold = regVPEC_QUEUE5_DUMMY4;
-- 
2.44.0




[Index of Archives]     [Linux USB Devel]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]

  Powered by Linux