From: Tvrtko Ursulin <tvrtko.ursulin@xxxxxxxxx> If we make GEM contexts keep a reference to i915_drm_client for the whole of their lifetime, we can consolidate the current task pid and name usage by getting it from the client. v2: Don't bother supporting selftests contexts from debugfs. (Chris) v3 (Lucas): Finish constructing ctx before adding it to the list v4 (Ram): Rebase. v5: Trivial rebase for proto ctx changes. Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@xxxxxxxxx> Reviewed-by: Chris Wilson <chris@xxxxxxxxxxxxxxxxxx> Reviewed-by: Aravind Iddamsetty <aravind.iddamsetty@xxxxxxxxx> Signed-off-by: Chris Wilson <chris@xxxxxxxxxxxxxxxxxx> --- drivers/gpu/drm/i915/gem/i915_gem_context.c | 13 +++++++---- .../gpu/drm/i915/gem/i915_gem_context_types.h | 13 +++-------- drivers/gpu/drm/i915/i915_gpu_error.c | 22 +++++++++++-------- 3 files changed, 25 insertions(+), 23 deletions(-) diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.c b/drivers/gpu/drm/i915/gem/i915_gem_context.c index ae9d4e087a92..5a38cb163f04 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_context.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_context.c @@ -989,10 +989,11 @@ void i915_gem_context_release(struct kref *ref) trace_i915_context_free(ctx); GEM_BUG_ON(!i915_gem_context_is_closed(ctx)); + if (ctx->client) + i915_drm_client_put(ctx->client); + mutex_destroy(&ctx->engines_mutex); mutex_destroy(&ctx->lut_mutex); - - put_pid(ctx->pid); mutex_destroy(&ctx->mutex); kfree_rcu(ctx, rcu); @@ -1436,9 +1437,13 @@ static void gem_context_register(struct i915_gem_context *ctx, ctx->file_priv = fpriv; - ctx->pid = get_task_pid(current, PIDTYPE_PID); + ctx->client = i915_drm_client_get(fpriv->client); + + rcu_read_lock(); snprintf(ctx->name, sizeof(ctx->name), "%s[%d]", - current->comm, pid_nr(ctx->pid)); + i915_drm_client_name(ctx->client), + pid_nr(i915_drm_client_pid(ctx->client))); + rcu_read_unlock(); /* And finally expose ourselves to userspace via the idr */ old = xa_store(&fpriv->context_xa, id, ctx, GFP_KERNEL); diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context_types.h b/drivers/gpu/drm/i915/gem/i915_gem_context_types.h index 94c03a97cb77..d28678385d16 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_context_types.h +++ b/drivers/gpu/drm/i915/gem/i915_gem_context_types.h @@ -264,19 +264,12 @@ struct i915_gem_context { */ struct i915_address_space __rcu *vm; - /** - * @pid: process id of creator - * - * Note that who created the context may not be the principle user, - * as the context may be shared across a local socket. However, - * that should only affect the default context, all contexts created - * explicitly by the client are expected to be isolated. - */ - struct pid *pid; - /** @link: place with &drm_i915_private.context_list */ struct list_head link; + /** @client: struct i915_drm_client */ + struct i915_drm_client *client; + /** * @ref: reference count * diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c index a2c58b54a592..b1f17477d0cb 100644 --- a/drivers/gpu/drm/i915/i915_gpu_error.c +++ b/drivers/gpu/drm/i915/i915_gpu_error.c @@ -1235,7 +1235,9 @@ static void record_request(const struct i915_request *request, ctx = rcu_dereference(request->context->gem_context); if (ctx) - erq->pid = pid_nr(ctx->pid); + erq->pid = I915_SELFTEST_ONLY(!ctx->client) ? + 0 : + pid_nr(i915_drm_client_pid(ctx->client)); } rcu_read_unlock(); } @@ -1256,23 +1258,25 @@ static bool record_context(struct i915_gem_context_coredump *e, const struct i915_request *rq) { struct i915_gem_context *ctx; - struct task_struct *task; bool simulated; rcu_read_lock(); + ctx = rcu_dereference(rq->context->gem_context); if (ctx && !kref_get_unless_zero(&ctx->ref)) ctx = NULL; - rcu_read_unlock(); - if (!ctx) + if (!ctx) { + rcu_read_unlock(); return true; + } - rcu_read_lock(); - task = pid_task(ctx->pid, PIDTYPE_PID); - if (task) { - strcpy(e->comm, task->comm); - e->pid = task->pid; + if (I915_SELFTEST_ONLY(!ctx->client)) { + strcpy(e->comm, "[kernel]"); + } else { + strcpy(e->comm, i915_drm_client_name(ctx->client)); + e->pid = pid_nr(i915_drm_client_pid(ctx->client)); } + rcu_read_unlock(); e->sched_attr = ctx->sched; -- 2.30.2 _______________________________________________ Intel-gfx mailing list Intel-gfx@xxxxxxxxxxxxxxxxxxxxx https://lists.freedesktop.org/mailman/listinfo/intel-gfx