+void intel_context_pin_unlock(struct intel_context *ce);
struct intel_context *
__intel_context_insert(struct i915_gem_context *ctx,
@@ -63,7 +59,18 @@ void
__intel_context_remove(struct intel_context *ce);
struct intel_context *
-intel_context_pin(struct i915_gem_context *ctx, struct intel_engine_cs *engine);
+intel_context_instance(struct i915_gem_context *ctx,
+ struct intel_engine_cs *engine);
+
+int __intel_context_do_pin(struct intel_context *ce);
+
+static inline int intel_context_pin(struct intel_context *ce)
+{
+ if (likely(atomic_inc_not_zero(&ce->pin_count)))
+ return 0;
+
+ return __intel_context_do_pin(ce);
+}
static inline void __intel_context_pin(struct intel_context *ce)
{
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
index d378b276e476..f6828c0276eb 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
@@ -697,11 +697,17 @@ static int pin_context(struct i915_gem_context *ctx,
struct intel_context **out)
{
struct intel_context *ce;
+ int err;
- ce = intel_context_pin(ctx, engine);
+ ce = intel_context_instance(ctx, engine);
if (IS_ERR(ce))
return PTR_ERR(ce);
+ err = intel_context_pin(ce);
+ intel_context_put(ce);
+ if (err)
+ return err;
+
*out = ce;
return 0;
}
diff --git a/drivers/gpu/drm/i915/gt/mock_engine.c b/drivers/gpu/drm/i915/gt/mock_engine.c
index d9f68c89dff4..a79d9909d171 100644
--- a/drivers/gpu/drm/i915/gt/mock_engine.c
+++ b/drivers/gpu/drm/i915/gt/mock_engine.c
@@ -239,6 +239,7 @@ struct intel_engine_cs *mock_engine(struct drm_i915_private *i915,
int id)
{
struct mock_engine *engine;
+ int err;
GEM_BUG_ON(id >= I915_NUM_ENGINES);
@@ -278,10 +279,15 @@ struct intel_engine_cs *mock_engine(struct drm_i915_private *i915,
INIT_LIST_HEAD(&engine->hw_queue);
engine->base.kernel_context =
- intel_context_pin(i915->kernel_context, &engine->base);
+ intel_context_instance(i915->kernel_context, &engine->base);
if (IS_ERR(engine->base.kernel_context))
goto err_breadcrumbs;
+ err = intel_context_pin(engine->base.kernel_context);
+ intel_context_put(engine->base.kernel_context);
+ if (err)
+ goto err_breadcrumbs;
+
return &engine->base;
err_breadcrumbs:
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
index 40d9f549a0cd..606fc2713240 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -1188,12 +1188,17 @@ int intel_vgpu_setup_submission(struct intel_vgpu *vgpu)
INIT_LIST_HEAD(&s->workload_q_head[i]);
s->shadow[i] = ERR_PTR(-EINVAL);
- ce = intel_context_pin(ctx, engine);
+ ce = intel_context_instance(ctx, engine);
if (IS_ERR(ce)) {
ret = PTR_ERR(ce);
goto out_shadow_ctx;
}
+ ret = intel_context_pin(ce);
+ intel_context_put(ce);
+ if (ret)
+ goto out_shadow_ctx;
+
s->shadow[i] = ce;
}
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 35732c2ae17f..c700cbc2f594 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -2101,14 +2101,19 @@ static int eb_pin_context(struct i915_execbuffer *eb,
if (err)
return err;
+ ce = intel_context_instance(eb->gem_context, engine);
+ if (IS_ERR(ce))
+ return PTR_ERR(ce);
+
/*
* Pinning the contexts may generate requests in order to acquire
* GGTT space, so do this first before we reserve a seqno for
* ourselves.
*/
- ce = intel_context_pin(eb->gem_context, engine);
- if (IS_ERR(ce))
- return PTR_ERR(ce);
+ err = intel_context_pin(ce);
+ intel_context_put(ce);
+ if (err)
+ return err;
eb->engine = engine;
eb->context = ce;
diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
index 328a740e72cb..afaeabe5e531 100644
--- a/drivers/gpu/drm/i915/i915_perf.c
+++ b/drivers/gpu/drm/i915/i915_perf.c
@@ -1205,11 +1205,17 @@ static struct intel_context *oa_pin_context(struct drm_i915_private *i915,
{
struct intel_engine_cs *engine = i915->engine[RCS0];
struct intel_context *ce;
- int ret;
+ int err;
- ret = i915_mutex_lock_interruptible(&i915->drm);
- if (ret)
- return ERR_PTR(ret);
+ ce = intel_context_instance(ctx, engine);
+ if (IS_ERR(ce))
+ return ce;
+
+ err = i915_mutex_lock_interruptible(&i915->drm);
+ if (err) {
+ intel_context_put(ce);
+ return ERR_PTR(err);
+ }
/*
* As the ID is the gtt offset of the context's vma we
@@ -1217,10 +1223,11 @@ static struct intel_context *oa_pin_context(struct drm_i915_private *i915,
*
* NB: implied RCS engine...
*/
- ce = intel_context_pin(ctx, engine);
+ err = intel_context_pin(ce);
mutex_unlock(&i915->drm.struct_mutex);
- if (IS_ERR(ce))
- return ce;
+ intel_context_put(ce);
+ if (err)
+ return ERR_PTR(err);
i915->perf.oa.pinned_ctx = ce;
diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
index 7932d1209247..8abd891d9287 100644
--- a/drivers/gpu/drm/i915/i915_request.c
+++ b/drivers/gpu/drm/i915/i915_request.c
@@ -753,6 +753,7 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx)
struct drm_i915_private *i915 = engine->i915;
struct intel_context *ce;
struct i915_request *rq;
+ int err;
/*
* Preempt contexts are reserved for exclusive use to inject a
@@ -766,13 +767,21 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx)
* GGTT space, so do this first before we reserve a seqno for
* ourselves.
*/
- ce = intel_context_pin(ctx, engine);
+ ce = intel_context_instance(ctx, engine);
if (IS_ERR(ce))
return ERR_CAST(ce);
+ err = intel_context_pin(ce);
+ if (err) {
+ rq = ERR_PTR(err);
+ goto err_put;
+ }
+
rq = i915_request_create(ce);
intel_context_unpin(ce);
+err_put:
+ intel_context_put(ce);
return rq;
}