From: "Jesse.zhang@xxxxxxx" <Jesse.zhang@xxxxxxx> This patch updates the SDMA scheduler mask handling to include the page queue if it exists. The scheduler mask is calculated based on the number of SDMA instances and the presence of the page queue. The mask is updated to reflect the state of both the SDMA gfx ring and the page queue. Changes: - Add handling for the SDMA page queue in `amdgpu_debugfs_sdma_sched_mask_set`. - Update scheduler mask calculations to include the page queue. - Modify `amdgpu_debugfs_sdma_sched_mask_get` to return the correct mask value. This change is necessary to verify multiple queues (SDMA gfx queue + page queue) and ensure proper scheduling and state management for SDMA instances. Signed-off-by: Jesse Zhang <jesse.zhang@xxxxxxx> --- drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c | 55 +++++++++++++++++++++--- 1 file changed, 48 insertions(+), 7 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c index b9f0c78a6d77..8de214a8ba6d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c @@ -356,23 +356,44 @@ int amdgpu_sdma_ras_sw_init(struct amdgpu_device *adev) static int amdgpu_debugfs_sdma_sched_mask_set(void *data, u64 val) { struct amdgpu_device *adev = (struct amdgpu_device *)data; - u32 i; + u32 i, num_ring; u64 mask = 0; - struct amdgpu_ring *ring; + struct amdgpu_ring *ring, *page = NULL; if (!adev) return -ENODEV; - mask = BIT_ULL(adev->sdma.num_instances) - 1; + /* Determine the number of rings per SDMA instance + * (1 for sdma gfx ring, 2 if page queue exists) + */ + if (adev->sdma.has_page_queue) + num_ring = 2; + else + num_ring = 1; + + /* Calculate the maximum possible mask value + * based on the number of SDMA instances and rings + */ + mask = BIT_ULL(adev->sdma.num_instances * num_ring) - 1; + if ((val & mask) == 0) return -EINVAL; for (i = 0; i < adev->sdma.num_instances; ++i) { ring = &adev->sdma.instance[i].ring; - if (val & BIT_ULL(i)) + if (adev->sdma.has_page_queue) + page = &adev->sdma.instance[i].page; + if (val & BIT_ULL(i * num_ring)) ring->sched.ready = true; else ring->sched.ready = false; + + if (page) { + if (val & BIT_ULL(i * num_ring + 1)) + page->sched.ready = true; + else + page->sched.ready = false; + } } /* publish sched.ready flag update effective immediately across smp */ smp_rmb(); @@ -382,16 +403,36 @@ static int amdgpu_debugfs_sdma_sched_mask_set(void *data, u64 val) static int amdgpu_debugfs_sdma_sched_mask_get(void *data, u64 *val) { struct amdgpu_device *adev = (struct amdgpu_device *)data; - u32 i; + u32 i, num_ring; u64 mask = 0; - struct amdgpu_ring *ring; + struct amdgpu_ring *ring, *page = NULL; if (!adev) return -ENODEV; + + /* Determine the number of rings per SDMA instance + * (1 for sdma gfx ring, 2 if page queue exists) + */ + if (adev->sdma.has_page_queue) + num_ring = 2; + else + num_ring = 1; + for (i = 0; i < adev->sdma.num_instances; ++i) { ring = &adev->sdma.instance[i].ring; + if (adev->sdma.has_page_queue) + page = &adev->sdma.instance[i].page; + if (ring->sched.ready) - mask |= 1 << i; + mask |= 1 << (i * num_ring); + else + mask &= ~(1 << (i * num_ring)); + + if (page && page->sched.ready) { + mask |= 1 << (i * num_ring + 1); + } else { + mask &= ~(1 << (i * num_ring + 1)); + } } *val = mask; -- 2.25.1