From: Allen Pais <allen.lkml@xxxxxxxxx> In preparation for unconditionally passing the struct tasklet_struct pointer to all tasklet callbacks, switch to using the new tasklet_setup() and from_tasklet() to pass the tasklet pointer explicitly and remove the .data field. Signed-off-by: Romain Perier <romain.perier@xxxxxxxxx> Signed-off-by: Allen Pais <allen.lkml@xxxxxxxxx> --- drivers/gpu/drm/i915/gt/intel_lrc.c | 31 ++++++++++--------- .../gpu/drm/i915/gt/uc/intel_guc_submission.c | 8 +++-- 2 files changed, 22 insertions(+), 17 deletions(-) diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c index 24322ef08aa4..c45e42b9f239 100644 --- a/drivers/gpu/drm/i915/gt/intel_lrc.c +++ b/drivers/gpu/drm/i915/gt/intel_lrc.c @@ -3130,9 +3130,10 @@ static bool preempt_timeout(const struct intel_engine_cs *const engine) * Check the unread Context Status Buffers and manage the submission of new * contexts to the ELSP accordingly. */ -static void execlists_submission_tasklet(unsigned long data) +static void execlists_submission_tasklet(struct tasklet_struct *t) { - struct intel_engine_cs * const engine = (struct intel_engine_cs *)data; + struct intel_engine_cs * const engine = from_tasklet(engine, t, + execlists.tasklet); bool timeout = preempt_timeout(engine); process_csb(engine); @@ -4306,9 +4307,10 @@ static void execlists_reset_rewind(struct intel_engine_cs *engine, bool stalled) spin_unlock_irqrestore(&engine->active.lock, flags); } -static void nop_submission_tasklet(unsigned long data) +static void nop_submission_tasklet(struct tasklet_struct *t) { - struct intel_engine_cs * const engine = (struct intel_engine_cs *)data; + struct intel_engine_cs * const engine = from_tasklet(engine, t, + execlists.tasklet); /* The driver is wedged; don't process any more events. */ WRITE_ONCE(engine->execlists.queue_priority_hint, INT_MIN); @@ -4391,7 +4393,8 @@ static void execlists_reset_cancel(struct intel_engine_cs *engine) execlists->queue = RB_ROOT_CACHED; GEM_BUG_ON(__tasklet_is_enabled(&execlists->tasklet)); - execlists->tasklet.func = nop_submission_tasklet; + execlists->tasklet.func = (void (*)(unsigned long)) + nop_submission_tasklet; spin_unlock_irqrestore(&engine->active.lock, flags); } @@ -4986,7 +4989,8 @@ void intel_execlists_set_default_submission(struct intel_engine_cs *engine) { engine->submit_request = execlists_submit_request; engine->schedule = i915_schedule; - engine->execlists.tasklet.func = execlists_submission_tasklet; + engine->execlists.tasklet.func = (void (*)(unsigned long)) + execlists_submission_tasklet; engine->reset.prepare = execlists_reset_prepare; engine->reset.rewind = execlists_reset_rewind; @@ -5113,8 +5117,7 @@ int intel_execlists_submission_setup(struct intel_engine_cs *engine) struct intel_uncore *uncore = engine->uncore; u32 base = engine->mmio_base; - tasklet_init(&engine->execlists.tasklet, - execlists_submission_tasklet, (unsigned long)engine); + tasklet_setup(&engine->execlists.tasklet, execlists_submission_tasklet); timer_setup(&engine->execlists.timer, execlists_timeslice, 0); timer_setup(&engine->execlists.preempt, execlists_preempt, 0); @@ -5509,9 +5512,10 @@ static intel_engine_mask_t virtual_submission_mask(struct virtual_engine *ve) return mask; } -static void virtual_submission_tasklet(unsigned long data) +static void virtual_submission_tasklet(struct tasklet_struct *t) { - struct virtual_engine * const ve = (struct virtual_engine *)data; + struct virtual_engine * ve = from_tasklet(ve, t, + base.execlists.tasklet); const int prio = READ_ONCE(ve->base.execlists.queue_priority_hint); intel_engine_mask_t mask; unsigned int n; @@ -5724,9 +5728,8 @@ intel_execlists_create_virtual(struct intel_engine_cs **siblings, INIT_LIST_HEAD(virtual_queue(ve)); ve->base.execlists.queue_priority_hint = INT_MIN; - tasklet_init(&ve->base.execlists.tasklet, - virtual_submission_tasklet, - (unsigned long)ve); + tasklet_setup(&ve->base.execlists.tasklet, + virtual_submission_tasklet); intel_context_init(&ve->context, &ve->base); @@ -5748,7 +5751,7 @@ intel_execlists_create_virtual(struct intel_engine_cs **siblings, * layering if we handle cloning of the requests and * submitting a copy into each backend. */ - if (sibling->execlists.tasklet.func != + if (sibling->execlists.tasklet.func != (void (*)(unsigned long)) execlists_submission_tasklet) { err = -ENODEV; goto err_put; diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c index fdfeb4b9b0f5..3013ff54431c 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c @@ -344,9 +344,10 @@ static void __guc_dequeue(struct intel_engine_cs *engine) execlists->active = execlists->inflight; } -static void guc_submission_tasklet(unsigned long data) +static void guc_submission_tasklet(struct tasklet_struct *t) { - struct intel_engine_cs * const engine = (struct intel_engine_cs *)data; + struct intel_engine_cs * const engine = from_tasklet(engine, t, + execlists.tasklet); struct intel_engine_execlists * const execlists = &engine->execlists; struct i915_request **port, *rq; unsigned long flags; @@ -591,7 +592,8 @@ static void guc_set_default_submission(struct intel_engine_cs *engine) */ intel_execlists_set_default_submission(engine); - engine->execlists.tasklet.func = guc_submission_tasklet; + engine->execlists.tasklet.func = + (void (*)(unsigned long))guc_submission_tasklet; /* do not use execlists park/unpark */ engine->park = engine->unpark = NULL; -- 2.17.1