Re: [PATCH 1/2] drm/amdgpu: cleanup drm_gpu_scheduler array creation

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 




On 3/10/20 12:41 PM, Christian König wrote:
Hi Nirmoy,

you can stick with that for now.

In the long term we should make the priority a parameter of amdgpu_ring_init(). And then amdgpu_ring_init() can gather the rings by priority and type.

That in turn would make amdgpu_ring_init_sched() and amdgpu_ring_init_compute_sched() superfluous.


Yes that would be even better.


But fixing the bug with the NULL pointer dereference should come first, everything else is just cleanup and has lower urgency.


I will swap these two patches.


Thanks,

Nirmoy


Regards,
Christian.

Am 10.03.20 um 12:39 schrieb Nirmoy:
Hi Christian,


I think we still need amdgpu_ring.has_high_prio bool. I was thinking of using

amdgpu_gfx_is_high_priority_compute_queue() to see if a ring is set to high priority

but then I realized we don't support high priority gfx queue on gfx7 and less.


Regards,

Nirmoy

On 3/10/20 12:27 PM, Nirmoy Das wrote:
Move initialization of struct drm_gpu_scheduler array,
amdgpu_ctx_init_sched() to amdgpu_ring.c.

Signed-off-by: Nirmoy Das <nirmoy.das@xxxxxxx>
---
  drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c    | 68 -------------------
  drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h    |  3 -
  drivers/gpu/drm/amd/amdgpu/amdgpu_device.c |  2 +-
  drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c   | 77 ++++++++++++++++++++++
  drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h   |  2 +
  5 files changed, 80 insertions(+), 72 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
index 3b2370ad1e47..06d151c0fe4e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
@@ -661,71 +661,3 @@ void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr)
      idr_destroy(&mgr->ctx_handles);
      mutex_destroy(&mgr->lock);
  }
-
-
-static void amdgpu_ctx_init_compute_sched(struct amdgpu_device *adev)
-{
-    int num_compute_sched_normal = 0;
-    int num_compute_sched_high = AMDGPU_MAX_COMPUTE_RINGS - 1;
-    int i;
-
-    /* use one drm sched array, gfx.compute_sched to store both high and
-     * normal priority drm compute schedulers */
-    for (i = 0; i < adev->gfx.num_compute_rings; i++) {
-        if (!adev->gfx.compute_ring[i].has_high_prio)
- adev->gfx.compute_sched[num_compute_sched_normal++] =
-                &adev->gfx.compute_ring[i].sched;
-        else
- adev->gfx.compute_sched[num_compute_sched_high--] =
-                &adev->gfx.compute_ring[i].sched;
-    }
-
-    /* compute ring only has two priority for now */
-    i = AMDGPU_GFX_PIPE_PRIO_NORMAL;
-    adev->gfx.compute_prio_sched[i] = &adev->gfx.compute_sched[0];
-    adev->gfx.num_compute_sched[i] = num_compute_sched_normal;
-
-    i = AMDGPU_GFX_PIPE_PRIO_HIGH;
-    adev->gfx.compute_prio_sched[i] =
- &adev->gfx.compute_sched[num_compute_sched_high - 1];
-    adev->gfx.num_compute_sched[i] =
-        adev->gfx.num_compute_rings - num_compute_sched_normal;
-}
-
-void amdgpu_ctx_init_sched(struct amdgpu_device *adev)
-{
-    int i, j;
-
-    amdgpu_ctx_init_compute_sched(adev);
-    for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
-        adev->gfx.gfx_sched[i] = &adev->gfx.gfx_ring[i].sched;
-        adev->gfx.num_gfx_sched++;
-    }
-
-    for (i = 0; i < adev->sdma.num_instances; i++) {
-        adev->sdma.sdma_sched[i] = &adev->sdma.instance[i].ring.sched;
-        adev->sdma.num_sdma_sched++;
-    }
-
-    for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
-        if (adev->vcn.harvest_config & (1 << i))
-            continue;
- adev->vcn.vcn_dec_sched[adev->vcn.num_vcn_dec_sched++] =
-            &adev->vcn.inst[i].ring_dec.sched;
-    }
-
-    for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
-        if (adev->vcn.harvest_config & (1 << i))
-            continue;
-        for (j = 0; j < adev->vcn.num_enc_rings; ++j)
- adev->vcn.vcn_enc_sched[adev->vcn.num_vcn_enc_sched++] =
-                &adev->vcn.inst[i].ring_enc[j].sched;
-    }
-
-    for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
-        if (adev->jpeg.harvest_config & (1 << i))
-            continue;
- adev->jpeg.jpeg_sched[adev->jpeg.num_jpeg_sched++] =
-            &adev->jpeg.inst[i].ring_dec.sched;
-    }
-}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h
index de490f183af2..f54e10314661 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h
@@ -88,7 +88,4 @@ void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr *mgr);   long amdgpu_ctx_mgr_entity_flush(struct amdgpu_ctx_mgr *mgr, long timeout);
  void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr);
  -void amdgpu_ctx_init_sched(struct amdgpu_device *adev);
-
-
  #endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 572eb6ea8eab..b2a99f9fc223 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -3092,7 +3092,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
              adev->gfx.config.max_cu_per_sh,
              adev->gfx.cu_info.number);
  -    amdgpu_ctx_init_sched(adev);
+    amdgpu_ring_init_sched(adev);
        adev->accel_working = true;
  diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
index a7e1d0425ed0..99875dd633e6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
@@ -454,3 +454,80 @@ int amdgpu_ring_test_helper(struct amdgpu_ring *ring)
      ring->sched.ready = !r;
      return r;
  }
+
+static void amdgpu_ring_init_compute_sched(struct amdgpu_device *adev)
+{
+    int num_compute_sched_normal = 0;
+    int num_compute_sched_high = AMDGPU_MAX_COMPUTE_RINGS - 1;
+    int i;
+
+    /* use one drm sched array, gfx.compute_sched to store both high and */
+    /* normal priority drm compute schedulers */
+    for (i = 0; i < adev->gfx.num_compute_rings; i++) {
+        if (!adev->gfx.compute_ring[i].has_high_prio)
+ adev->gfx.compute_sched[num_compute_sched_normal++] =
+                &adev->gfx.compute_ring[i].sched;
+        else
+ adev->gfx.compute_sched[num_compute_sched_high--] =
+                &adev->gfx.compute_ring[i].sched;
+    }
+
+    /* compute ring only has two priority for now */
+    i = AMDGPU_GFX_PIPE_PRIO_NORMAL;
+    adev->gfx.compute_prio_sched[i] = &adev->gfx.compute_sched[0];
+    adev->gfx.num_compute_sched[i] = num_compute_sched_normal;
+
+    i = AMDGPU_GFX_PIPE_PRIO_HIGH;
+    adev->gfx.compute_prio_sched[i] =
+ &adev->gfx.compute_sched[num_compute_sched_high - 1];
+    adev->gfx.num_compute_sched[i] =
+        adev->gfx.num_compute_rings - num_compute_sched_normal;
+}
+
+/**
+ * amdgpu_ring_init_sched - populate array of drm scheds for each HW IP
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Populate an array of struct drm_gpu_schedulers for each HW IP which
+ * can be use by amdgpu_ctx_get_entity() to initialize an entity.
+ *
+ */
+
+void amdgpu_ring_init_sched(struct amdgpu_device *adev)
+{
+    int i, j;
+
+    amdgpu_ring_init_compute_sched(adev);
+    for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
+        adev->gfx.gfx_sched[i] = &adev->gfx.gfx_ring[i].sched;
+        adev->gfx.num_gfx_sched++;
+    }
+
+    for (i = 0; i < adev->sdma.num_instances; i++) {
+        adev->sdma.sdma_sched[i] = &adev->sdma.instance[i].ring.sched;
+        adev->sdma.num_sdma_sched++;
+    }
+
+    for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+        if (adev->vcn.harvest_config & (1 << i))
+            continue;
+ adev->vcn.vcn_dec_sched[adev->vcn.num_vcn_dec_sched++] =
+            &adev->vcn.inst[i].ring_dec.sched;
+    }
+
+    for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+        if (adev->vcn.harvest_config & (1 << i))
+            continue;
+        for (j = 0; j < adev->vcn.num_enc_rings; ++j)
+ adev->vcn.vcn_enc_sched[adev->vcn.num_vcn_enc_sched++] =
+                &adev->vcn.inst[i].ring_enc[j].sched;
+    }
+
+    for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
+        if (adev->jpeg.harvest_config & (1 << i))
+            continue;
+ adev->jpeg.jpeg_sched[adev->jpeg.num_jpeg_sched++] =
+            &adev->jpeg.inst[i].ring_dec.sched;
+    }
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
index 9a443013d70d..4ccd056d4353 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
@@ -326,4 +326,6 @@ int amdgpu_debugfs_ring_init(struct amdgpu_device *adev,
                   struct amdgpu_ring *ring);
  void amdgpu_debugfs_ring_fini(struct amdgpu_ring *ring);
  +void amdgpu_ring_init_sched(struct amdgpu_device *adev);
+
  #endif

_______________________________________________
amd-gfx mailing list
amd-gfx@xxxxxxxxxxxxxxxxxxxxx
https://lists.freedesktop.org/mailman/listinfo/amd-gfx




[Index of Archives]     [Linux USB Devel]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]

  Powered by Linux