Signed-off-by: Nirmoy Das <nirmoy.das@xxxxxxx>
---
drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c | 14 ++++-
drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c | 77 ++++++++++++++++++++++++
drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 80 +++++++++++++++++++++++++
3 files changed, 170 insertions(+), 1 deletion(-)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
index 053a1119ebfe..837f76550242 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
@@ -519,7 +519,7 @@ int amdgpu_gfx_mqd_sw_init(struct amdgpu_device *adev,
AMDGPU_GEM_DOMAIN_VRAM, &ring->mqd_obj,
&ring->mqd_gpu_addr, &ring->mqd_ptr);
if (r) {
- dev_warn(adev->dev, "failed to create ring mqd ob (%d)", r);
+ dev_warn(adev->dev, "failed to create KIQ ring mqd ob (%d)", r);
return r;
}
@@ -569,6 +569,18 @@ int amdgpu_gfx_mqd_sw_init(struct amdgpu_device *adev,
}
}
+ /* create MQD for HIQ */
+ ring = &adev->gfx.hiq.ring;
+ if (!ring->mqd_obj) {
+ r = amdgpu_bo_create_kernel(adev, mqd_size, PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_VRAM, &ring->mqd_obj,
+ &ring->mqd_gpu_addr, &ring->mqd_ptr);
+ if (r) {
+ dev_warn(adev->dev, "failed to create HIQ ring mqd ob (%d)", r);
+ return r;
+ }
+ }
+
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
index 538130c453a6..9532f013128f 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
@@ -4794,6 +4794,7 @@ static int gfx_v10_0_sw_init(void *handle)
{
int i, j, k, r, ring_id = 0;
struct amdgpu_kiq *kiq;
+ struct amdgpu_hiq *hiq;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
switch (adev->ip_versions[GC_HWIP][0]) {
@@ -4923,6 +4924,18 @@ static int gfx_v10_0_sw_init(void *handle)
if (r)
return r;
+ if (!adev->kfd.dev) {
+ r = amdgpu_gfx_hiq_init(adev, GFX10_MEC_HPD_SIZE);
+ if (r) {
+ DRM_ERROR("Failed to init HIQ BOs!\n");
+ return r;
+ }
+
+ hiq = &adev->gfx.hiq;
+ r = amdgpu_gfx_hiq_init_ring(adev, &hiq->ring, &hiq->irq);
+ if (r)
+ return r;
+ }
r = amdgpu_gfx_mqd_sw_init(adev, sizeof(struct v10_compute_mqd));
if (r)
return r;
@@ -7215,6 +7228,54 @@ static int gfx_v10_0_kcq_resume(struct amdgpu_device *adev)
return r;
}
+static int gfx_v10_0_hiq_init_queue(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+ struct v10_compute_mqd *mqd = ring->mqd_ptr;
+
+
+ if (amdgpu_in_reset(adev)) {
+ /* reset ring buffer */
+ ring->wptr = 0;
+ amdgpu_ring_clear_ring(ring);
+
+ } else {
+ memset((void *)mqd, 0, sizeof(*mqd));
+ mutex_lock(&adev->srbm_mutex);
+ nv_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
+ gfx_v10_0_compute_mqd_init(ring);
+ nv_grbm_select(adev, 0, 0, 0, 0);
+ mutex_unlock(&adev->srbm_mutex);
+ }
+
+ return 0;
+}
+
+static int gfx_v10_0_hiq_resume(struct amdgpu_device *adev)
+{
+ struct amdgpu_ring *ring;
+ int r;
+
+ ring = &adev->gfx.hiq.ring;
+
+ r = amdgpu_bo_reserve(ring->mqd_obj, false);
+ if (unlikely(r != 0))
+ return r;
+
+ r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
+ if (unlikely(r != 0))
+ return r;
+
+ gfx_v10_0_hiq_init_queue(ring);
+ amdgpu_bo_kunmap(ring->mqd_obj);
+ ring->mqd_ptr = NULL;
+ amdgpu_bo_unreserve(ring->mqd_obj);
+ ring->sched.ready = true;
+
+ amdgpu_gfx_enable_hiq(adev);
+ return 0;
+}
+
static int gfx_v10_0_cp_resume(struct amdgpu_device *adev)
{
int r, i;
@@ -7252,6 +7313,12 @@ static int gfx_v10_0_cp_resume(struct amdgpu_device *adev)
return r;
}
+ if (!adev->kfd.dev) {
+ r = gfx_v10_0_hiq_resume(adev);
+ if (r)
+ return r;
+ }
+
for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
ring = &adev->gfx.gfx_ring[i];
r = amdgpu_ring_test_helper(ring);
@@ -7557,6 +7624,11 @@ static int gfx_v10_0_hw_fini(void *handle)
#endif
if (amdgpu_gfx_disable_kcq(adev))
DRM_ERROR("KCQ disable failed\n");
+
+ if (!adev->kfd.dev) {
+ if (amdgpu_gfx_disable_hiq(adev))
+ DRM_ERROR("HIQ disable failed\n");
+ }
}
if (amdgpu_sriov_vf(adev)) {
@@ -9470,11 +9542,16 @@ static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_kiq = {
.emit_reg_write_reg_wait = gfx_v10_0_ring_emit_reg_write_reg_wait,
};
+static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_hiq = {
+ .type = AMDGPU_RING_TYPE_HIQ,
+};
+
static void gfx_v10_0_set_ring_funcs(struct amdgpu_device *adev)
{
int i;
adev->gfx.kiq.ring.funcs = &gfx_v10_0_ring_funcs_kiq;
+ adev->gfx.hiq.ring.funcs = &gfx_v10_0_ring_funcs_hiq;
for (i = 0; i < adev->gfx.num_gfx_rings; i++)
adev->gfx.gfx_ring[i].funcs = &gfx_v10_0_ring_funcs_gfx;
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index 2b29e42bde62..9653ea8743d3 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -2309,6 +2309,7 @@ static int gfx_v9_0_sw_init(void *handle)
int i, j, k, r, ring_id;
struct amdgpu_ring *ring;
struct amdgpu_kiq *kiq;
+ struct amdgpu_hiq *hiq;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
switch (adev->ip_versions[GC_HWIP][0]) {
@@ -2428,6 +2429,19 @@ static int gfx_v9_0_sw_init(void *handle)
if (r)
return r;
+ if (!adev->kfd.dev) {
+ r = amdgpu_gfx_hiq_init(adev, GFX9_MEC_HPD_SIZE);
+ if (r) {
+ DRM_ERROR("Failed to init HIQ BOs!\n");
+ return r;
+ }
+
+ hiq = &adev->gfx.hiq;
+ r = amdgpu_gfx_hiq_init_ring(adev, &hiq->ring, &hiq->irq);
+ if (r)
+ return r;
+ }
+
/* create MQD for all compute queues as wel as KIQ for SRIOV case */
r = amdgpu_gfx_mqd_sw_init(adev, sizeof(struct v9_mqd_allocation));
if (r)
@@ -3911,6 +3925,56 @@ static int gfx_v9_0_kcq_resume(struct amdgpu_device *adev)
return r;
}
+static int gfx_v9_0_hiq_init_queue(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+ struct v9_mqd *mqd = ring->mqd_ptr;
+
+
+ if (amdgpu_in_reset(adev)) {
+ /* reset ring buffer */
+ ring->wptr = 0;
+ amdgpu_ring_clear_ring(ring);
+
+ } else {
+ memset((void *)mqd, 0, sizeof(struct v9_mqd_allocation));
+ ((struct v9_mqd_allocation *)mqd)->dynamic_cu_mask = 0xFFFFFFFF;
+ ((struct v9_mqd_allocation *)mqd)->dynamic_rb_mask = 0xFFFFFFFF;
+ mutex_lock(&adev->srbm_mutex);
+ soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
+ gfx_v9_0_mqd_init(ring);
+ soc15_grbm_select(adev, 0, 0, 0, 0);
+ mutex_unlock(&adev->srbm_mutex);
+ }
+
+ return 0;
+}
+
+static int gfx_v9_0_hiq_resume(struct amdgpu_device *adev)
+{
+ struct amdgpu_ring *ring;
+ int r;
+
+ ring = &adev->gfx.hiq.ring;
+
+ r = amdgpu_bo_reserve(ring->mqd_obj, false);
+ if (unlikely(r != 0))
+ return r;
+
+ r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
+ if (unlikely(r != 0))
+ return r;
+
+ gfx_v9_0_hiq_init_queue(ring);
+ amdgpu_bo_kunmap(ring->mqd_obj);
+ ring->mqd_ptr = NULL;
+ amdgpu_bo_unreserve(ring->mqd_obj);
+ ring->sched.ready = true;
+
+ amdgpu_gfx_enable_hiq(adev);
+ return 0;
+}
+
static int gfx_v9_0_cp_resume(struct amdgpu_device *adev)
{
int r, i;
@@ -3946,6 +4010,12 @@ static int gfx_v9_0_cp_resume(struct amdgpu_device *adev)
if (r)
return r;
+ if (!adev->kfd.dev) {
+ r = gfx_v9_0_hiq_resume(adev);
+ if (r)
+ return r;
+ }
+
if (adev->gfx.num_gfx_rings) {
ring = &adev->gfx.gfx_ring[0];
r = amdgpu_ring_test_helper(ring);
@@ -4027,6 +4097,11 @@ static int gfx_v9_0_hw_fini(void *handle)
/* disable KCQ to avoid CPC touch memory not valid anymore */
amdgpu_gfx_disable_kcq(adev);
+ if (!adev->kfd.dev) {
+ if (amdgpu_gfx_disable_hiq(adev))
+ DRM_ERROR("HIQ disable failed");
+ }
+
if (amdgpu_sriov_vf(adev)) {
gfx_v9_0_cp_gfx_enable(adev, false);
/* must disable polling for SRIOV when hw finished, otherwise
@@ -6986,11 +7061,16 @@ static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_kiq = {
.emit_reg_write_reg_wait = gfx_v9_0_ring_emit_reg_write_reg_wait,
};
+static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_hiq = {
+ .type = AMDGPU_RING_TYPE_HIQ,
+};
+
static void gfx_v9_0_set_ring_funcs(struct amdgpu_device *adev)
{
int i;
adev->gfx.kiq.ring.funcs = &gfx_v9_0_ring_funcs_kiq;
+ adev->gfx.hiq.ring.funcs = &gfx_v9_0_ring_funcs_hiq;
for (i = 0; i < adev->gfx.num_gfx_rings; i++)
adev->gfx.gfx_ring[i].funcs = &gfx_v9_0_ring_funcs_gfx;
--
2.31.1