On Fri, Sep 8, 2023 at 12:20 PM Shashank Sharma <shashank.sharma@xxxxxxx> wrote: > > This patch adds new functions to map/unmap a usermode queue into > the FW, using the MES ring. As soon as this mapping is done, the > queue would be considered ready to accept the workload. > > V1: Addressed review comments from Alex on the RFC patch series > - Map/Unmap should be IP specific. > V2: > Addressed review comments from Christian: > - Fix the wptr_mc_addr calculation (moved into another patch) > Addressed review comments from Alex: > - Do not add fptrs for map/unmap > > V3: Integration with doorbell manager > V4: Rebase > V5: Use gfx_v11_0 for function names (Alex) > V6: Removed queue->proc/gang/fw_ctx_address variables and doing the > address calculations locally to keep the queue structure GEN > independent (Alex) > > Cc: Alex Deucher <alexander.deucher@xxxxxxx> > Cc: Christian Koenig <christian.koenig@xxxxxxx> > Signed-off-by: Shashank Sharma <shashank.sharma@xxxxxxx> > --- > drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c | 72 ++++++++++++++++++++++++++ > 1 file changed, 72 insertions(+) > > diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c > index 8ffb5dee72a9..e266674e0d44 100644 > --- a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c > +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c > @@ -6427,6 +6427,67 @@ const struct amdgpu_ip_block_version gfx_v11_0_ip_block = > .funcs = &gfx_v11_0_ip_funcs, > }; > > +static void gfx_v11_0_userq_unmap(struct amdgpu_userq_mgr *uq_mgr, > + struct amdgpu_usermode_queue *queue) > +{ > + struct amdgpu_device *adev = uq_mgr->adev; > + struct mes_remove_queue_input queue_input; > + struct amdgpu_userq_obj *ctx = &queue->fw_obj; > + int r; > + > + memset(&queue_input, 0x0, sizeof(struct mes_remove_queue_input)); > + queue_input.doorbell_offset = queue->doorbell_index; > + queue_input.gang_context_addr = ctx->gpu_addr + AMDGPU_USERQ_PROC_CTX_SZ; > + > + amdgpu_mes_lock(&adev->mes); > + r = adev->mes.funcs->remove_hw_queue(&adev->mes, &queue_input); > + amdgpu_mes_unlock(&adev->mes); > + if (r) > + DRM_ERROR("Failed to unmap queue in HW, err (%d)\n", r); > +} > + > +static int gfx_v11_0_userq_map(struct amdgpu_userq_mgr *uq_mgr, > + struct amdgpu_usermode_queue *queue, > + struct amdgpu_mqd_prop *userq_props) > +{ > + struct amdgpu_device *adev = uq_mgr->adev; > + struct amdgpu_userq_obj *ctx = &queue->fw_obj; > + struct mes_add_queue_input queue_input; > + int r; > + > + memset(&queue_input, 0x0, sizeof(struct mes_add_queue_input)); > + > + queue_input.process_va_start = 0; > + queue_input.process_va_end = (adev->vm_manager.max_pfn - 1) << AMDGPU_GPU_PAGE_SHIFT; > + queue_input.process_quantum = 100000; /* 10ms */ > + queue_input.gang_quantum = 10000; /* 1ms */ > + queue_input.paging = false; > + > + queue_input.process_context_addr = ctx->gpu_addr; > + queue_input.gang_context_addr = ctx->gpu_addr + AMDGPU_USERQ_PROC_CTX_SZ; > + queue_input.inprocess_gang_priority = AMDGPU_MES_PRIORITY_LEVEL_NORMAL; > + queue_input.gang_global_priority_level = AMDGPU_MES_PRIORITY_LEVEL_NORMAL; I can't remember, did we have a plan for priority handling? Compositors would want high priority queues for example. Alex > + > + queue_input.process_id = queue->vm->pasid; > + queue_input.queue_type = queue->queue_type; > + queue_input.mqd_addr = queue->mqd.gpu_addr; > + queue_input.wptr_addr = userq_props->wptr_gpu_addr; > + queue_input.queue_size = userq_props->queue_size >> 2; > + queue_input.doorbell_offset = userq_props->doorbell_index; > + queue_input.page_table_base_addr = amdgpu_gmc_pd_addr(queue->vm->root.bo); > + > + amdgpu_mes_lock(&adev->mes); > + r = adev->mes.funcs->add_hw_queue(&adev->mes, &queue_input); > + amdgpu_mes_unlock(&adev->mes); > + if (r) { > + DRM_ERROR("Failed to map queue in HW, err (%d)\n", r); > + return r; > + } > + > + DRM_DEBUG_DRIVER("Queue (doorbell:%d) mapped successfully\n", userq_props->doorbell_index); > + return 0; > +} > + > static void gfx_v11_0_userq_destroy_ctx_space(struct amdgpu_userq_mgr *uq_mgr, > struct amdgpu_usermode_queue *queue) > { > @@ -6540,8 +6601,18 @@ static int gfx_v11_0_userq_mqd_create(struct amdgpu_userq_mgr *uq_mgr, > goto free_mqd; > } > > + /* Map userqueue into FW using MES */ > + r = gfx_v11_0_userq_map(uq_mgr, queue, &userq_props); > + if (r) { > + DRM_ERROR("Failed to init MQD\n"); > + goto free_ctx; > + } > + > return 0; > > +free_ctx: > + gfx_v11_0_userq_destroy_ctx_space(uq_mgr, queue); > + > free_mqd: > amdgpu_bo_free_kernel(&queue->mqd.obj, &queue->mqd.gpu_addr, &queue->mqd.cpu_ptr); > return r; > @@ -6552,6 +6623,7 @@ gfx_v11_0_userq_mqd_destroy(struct amdgpu_userq_mgr *uq_mgr, struct amdgpu_userm > { > struct amdgpu_userq_obj *mqd = &queue->mqd; > > + gfx_v11_0_userq_unmap(uq_mgr, queue); > gfx_v11_0_userq_destroy_ctx_space(uq_mgr, queue); > amdgpu_bo_free_kernel(&mqd->obj, &mqd->gpu_addr, &mqd->cpu_ptr); > } > -- > 2.42.0 >