From: Michal Wajdeczko <michal.wajdeczko@xxxxxxxxx> We want to stop using guc.send_mutex while sending CTB messages so we have to start protecting access to CTB send descriptor. For completeness protect also CTB receive descriptor. Add spinlock to struct intel_guc_ct_buffer and start using it. Signed-off-by: Michal Wajdeczko <michal.wajdeczko@xxxxxxxxx> Signed-off-by: Matthew Brost <matthew.brost@xxxxxxxxx> Reviewed-by: Matthew Brost <matthew.brost@xxxxxxxxx> --- drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c | 14 ++++++++++++-- drivers/gpu/drm/i915/gt/uc/intel_guc_ct.h | 2 ++ 2 files changed, 14 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c index cf1fb09ef766..80976fe40fbf 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c @@ -89,6 +89,8 @@ static void ct_incoming_request_worker_func(struct work_struct *w); */ void intel_guc_ct_init_early(struct intel_guc_ct *ct) { + spin_lock_init(&ct->ctbs.send.lock); + spin_lock_init(&ct->ctbs.recv.lock); spin_lock_init(&ct->requests.lock); INIT_LIST_HEAD(&ct->requests.pending); INIT_LIST_HEAD(&ct->requests.incoming); @@ -476,17 +478,22 @@ static int ct_send(struct intel_guc_ct *ct, GEM_BUG_ON(len & ~GUC_CT_MSG_LEN_MASK); GEM_BUG_ON(!response_buf && response_buf_size); + spin_lock_irqsave(&ct->ctbs.send.lock, flags); + fence = ct_get_next_fence(ct); request.fence = fence; request.status = 0; request.response_len = response_buf_size; request.response_buf = response_buf; - spin_lock_irqsave(&ct->requests.lock, flags); + spin_lock(&ct->requests.lock); list_add_tail(&request.link, &ct->requests.pending); - spin_unlock_irqrestore(&ct->requests.lock, flags); + spin_unlock(&ct->requests.lock); err = ct_write(ct, action, len, fence); + + spin_unlock_irqrestore(&ct->ctbs.send.lock, flags); + if (unlikely(err)) goto unlink; @@ -822,6 +829,7 @@ static int ct_handle_request(struct intel_guc_ct *ct, const u32 *msg) void intel_guc_ct_event_handler(struct intel_guc_ct *ct) { u32 msg[GUC_CT_MSG_LEN_MASK + 1]; /* one extra dw for the header */ + unsigned long flags; int err = 0; if (unlikely(!ct->enabled)) { @@ -830,7 +838,9 @@ void intel_guc_ct_event_handler(struct intel_guc_ct *ct) } do { + spin_lock_irqsave(&ct->ctbs.recv.lock, flags); err = ct_read(ct, msg); + spin_unlock_irqrestore(&ct->ctbs.recv.lock, flags); if (err) break; diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.h index fc9486779e87..bc52dc479a14 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.h +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.h @@ -27,11 +27,13 @@ struct intel_guc; * record (command transport buffer descriptor) and the actual buffer which * holds the commands. * + * @lock: protects access to the commands buffer and buffer descriptor * @desc: pointer to the buffer descriptor * @cmds: pointer to the commands buffer * @size: size of the commands buffer */ struct intel_guc_ct_buffer { + spinlock_t lock; struct guc_ct_buffer_desc *desc; u32 *cmds; u32 size; -- 2.28.0 _______________________________________________ Intel-gfx mailing list Intel-gfx@xxxxxxxxxxxxxxxxxxxxx https://lists.freedesktop.org/mailman/listinfo/intel-gfx