Some clients, such as mesa, may only emit minimal incremental batches that rely on the logical context state from previous batches. They know that recovery is impossible after a hang as their required GPU state is lost, and that each in flight and subsequent batch will hang (resetting the context image back to default perpetuating the problem). To avoid getting into the state in the first place, we can allow clients to opt out of automatic recovery and elect to ban any guilty context following a hang. This prevents the continual stream of hangs and allows the client to recreate their context and rebuild the state from scratch. v2: Prefer calling it recoverable rather than unrecoverable. References: https://lists.freedesktop.org/archives/mesa-dev/2019-February/215431.html Signed-off-by: Chris Wilson <chris@xxxxxxxxxxxxxxxxxx> Cc: Kenneth Graunke <kenneth@xxxxxxxxxxxxx> Cc: Mika Kuoppala <mika.kuoppala@xxxxxxxxx> Reviewed-by: Mika Kuoppala <mika.kuoppala@xxxxxxxxxxxxxxx> Acked-by: Kenneth Graunke <kenneth@xxxxxxxxxxxxx> # for mesa --- drivers/gpu/drm/i915/i915_gem_context.c | 15 +++++++++++++++ drivers/gpu/drm/i915/i915_gem_context.h | 16 ++++++++++++++++ drivers/gpu/drm/i915/i915_reset.c | 3 ++- include/uapi/drm/i915_drm.h | 20 ++++++++++++++++++++ 4 files changed, 53 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c index 280813a4bf82..da21c843fed8 100644 --- a/drivers/gpu/drm/i915/i915_gem_context.c +++ b/drivers/gpu/drm/i915/i915_gem_context.c @@ -401,6 +401,8 @@ __create_hw_context(struct drm_i915_private *dev_priv, ctx->remap_slice = ALL_L3_SLICES(dev_priv); i915_gem_context_set_bannable(ctx); + i915_gem_context_set_recoverable(ctx); + ctx->ring_size = 4 * PAGE_SIZE; ctx->desc_template = default_desc_template(dev_priv, dev_priv->mm.aliasing_ppgtt); @@ -951,6 +953,10 @@ int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data, args->size = 0; args->value = i915_gem_context_is_bannable(ctx); break; + case I915_CONTEXT_PARAM_RECOVERABLE: + args->size = 0; + args->value = i915_gem_context_is_recoverable(ctx); + break; case I915_CONTEXT_PARAM_PRIORITY: args->size = 0; args->value = ctx->sched.priority >> I915_USER_PRIORITY_SHIFT; @@ -1285,6 +1291,15 @@ int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data, i915_gem_context_clear_bannable(ctx); break; + case I915_CONTEXT_PARAM_RECOVERABLE: + if (args->size) + ret = -EINVAL; + else if (args->value) + i915_gem_context_set_recoverable(ctx); + else + i915_gem_context_clear_recoverable(ctx); + break; + case I915_CONTEXT_PARAM_PRIORITY: { s64 priority = args->value; diff --git a/drivers/gpu/drm/i915/i915_gem_context.h b/drivers/gpu/drm/i915/i915_gem_context.h index ca150a764c24..071108d34ae0 100644 --- a/drivers/gpu/drm/i915/i915_gem_context.h +++ b/drivers/gpu/drm/i915/i915_gem_context.h @@ -134,6 +134,7 @@ struct i915_gem_context { #define UCONTEXT_NO_ZEROMAP 0 #define UCONTEXT_NO_ERROR_CAPTURE 1 #define UCONTEXT_BANNABLE 2 +#define UCONTEXT_RECOVERABLE 3 /** * @flags: small set of booleans @@ -270,6 +271,21 @@ static inline void i915_gem_context_clear_bannable(struct i915_gem_context *ctx) clear_bit(UCONTEXT_BANNABLE, &ctx->user_flags); } +static inline bool i915_gem_context_is_recoverable(const struct i915_gem_context *ctx) +{ + return test_bit(UCONTEXT_RECOVERABLE, &ctx->user_flags); +} + +static inline void i915_gem_context_set_recoverable(struct i915_gem_context *ctx) +{ + set_bit(UCONTEXT_RECOVERABLE, &ctx->user_flags); +} + +static inline void i915_gem_context_clear_recoverable(struct i915_gem_context *ctx) +{ + clear_bit(UCONTEXT_RECOVERABLE, &ctx->user_flags); +} + static inline bool i915_gem_context_is_banned(const struct i915_gem_context *ctx) { return test_bit(CONTEXT_BANNED, &ctx->flags); diff --git a/drivers/gpu/drm/i915/i915_reset.c b/drivers/gpu/drm/i915/i915_reset.c index 5a067a4b3d5d..1911e00d2581 100644 --- a/drivers/gpu/drm/i915/i915_reset.c +++ b/drivers/gpu/drm/i915/i915_reset.c @@ -66,7 +66,8 @@ static bool context_mark_guilty(struct i915_gem_context *ctx) bannable = i915_gem_context_is_bannable(ctx); score = atomic_add_return(CONTEXT_SCORE_GUILTY, &ctx->ban_score); - banned = score >= CONTEXT_SCORE_BAN_THRESHOLD; + banned = (!i915_gem_context_is_recoverable(ctx) || + score >= CONTEXT_SCORE_BAN_THRESHOLD); /* Cool contexts don't accumulate client ban score */ if (!bannable) diff --git a/include/uapi/drm/i915_drm.h b/include/uapi/drm/i915_drm.h index 397810fa2d33..c890b7992d5c 100644 --- a/include/uapi/drm/i915_drm.h +++ b/include/uapi/drm/i915_drm.h @@ -1491,6 +1491,26 @@ struct drm_i915_gem_context_param { * drm_i915_gem_context_param_sseu. */ #define I915_CONTEXT_PARAM_SSEU 0x7 + +/* + * Not all clients may want to attempt automatic recover of a context after + * a hang (for example, some clients may only submit very small incremental + * batches relying on known logical state of previous batches which will never + * recover correctly and each attempt will hang), and so would prefer that + * the context is forever banned instead. + * + * If set to false (0), after a reset, subsequent (and in flight) rendering + * from this context is discarded, and the client will need to create a new + * context to use instead. + * + * If set to true (1), the kernel will automatically attempt to recover the + * context by skipping the hanging batch and executing the next batch starting + * from the default context state (discarding the incomplete logical context + * state lost due to the reset). + * + * On creation, all new contexts are marked as recoverable. + */ +#define I915_CONTEXT_PARAM_RECOVERABLE 0x8 __u64 value; }; -- 2.20.1 _______________________________________________ Intel-gfx mailing list Intel-gfx@xxxxxxxxxxxxxxxxxxxxx https://lists.freedesktop.org/mailman/listinfo/intel-gfx