Currently guc_mmio_reg_add() relies on having enough memory available in the array to add a new slot. It uses `GEM_BUG_ON(count >= regset->size);` to protect going above the threshold. In order to allow guc_mmio_reg_add() to handle the memory allocation by itself, it must return an error in case of failures. Adjust return code so this error can be propagated to the callers of guc_mmio_reg_add() and guc_mmio_regset_init(). No intended change in behavior. Cc: Matt Roper <matthew.d.roper@xxxxxxxxx> Cc: John Harrison <John.C.Harrison@xxxxxxxxx> Cc: Matthew Brost <matthew.brost@xxxxxxxxx> Cc: Daniele Ceraolo Spurio <daniele.ceraolospurio@xxxxxxxxx> Signed-off-by: Lucas De Marchi <lucas.demarchi@xxxxxxxxx> --- drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c | 31 +++++++++++++--------- 1 file changed, 18 insertions(+), 13 deletions(-) diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c index 1f6a3d4d9431..21e975d371e6 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c @@ -244,8 +244,8 @@ static int guc_mmio_reg_cmp(const void *a, const void *b) return (int)ra->offset - (int)rb->offset; } -static void guc_mmio_reg_add(struct temp_regset *regset, - u32 offset, u32 flags) +static long __must_check guc_mmio_reg_add(struct temp_regset *regset, + u32 offset, u32 flags) { u32 count = regset->used; struct guc_mmio_reg reg = { @@ -264,7 +264,7 @@ static void guc_mmio_reg_add(struct temp_regset *regset, */ if (bsearch(®, regset->registers, count, sizeof(reg), guc_mmio_reg_cmp)) - return; + return 0; slot = ®set->registers[count]; regset->used++; @@ -277,6 +277,8 @@ static void guc_mmio_reg_add(struct temp_regset *regset, swap(slot[1], slot[0]); } + + return 0; } #define GUC_MMIO_REG_ADD(regset, reg, masked) \ @@ -284,32 +286,35 @@ static void guc_mmio_reg_add(struct temp_regset *regset, i915_mmio_reg_offset((reg)), \ (masked) ? GUC_REGSET_MASKED : 0) -static void guc_mmio_regset_init(struct temp_regset *regset, - struct intel_engine_cs *engine) +static int guc_mmio_regset_init(struct temp_regset *regset, + struct intel_engine_cs *engine) { const u32 base = engine->mmio_base; struct i915_wa_list *wal = &engine->wa_list; struct i915_wa *wa; unsigned int i; + int ret = 0; regset->used = 0; - GUC_MMIO_REG_ADD(regset, RING_MODE_GEN7(base), true); - GUC_MMIO_REG_ADD(regset, RING_HWS_PGA(base), false); - GUC_MMIO_REG_ADD(regset, RING_IMR(base), false); + ret |= GUC_MMIO_REG_ADD(regset, RING_MODE_GEN7(base), true); + ret |= GUC_MMIO_REG_ADD(regset, RING_HWS_PGA(base), false); + ret |= GUC_MMIO_REG_ADD(regset, RING_IMR(base), false); for (i = 0, wa = wal->list; i < wal->count; i++, wa++) - GUC_MMIO_REG_ADD(regset, wa->reg, wa->masked_reg); + ret |= GUC_MMIO_REG_ADD(regset, wa->reg, wa->masked_reg); /* Be extra paranoid and include all whitelist registers. */ for (i = 0; i < RING_MAX_NONPRIV_SLOTS; i++) - GUC_MMIO_REG_ADD(regset, - RING_FORCE_TO_NONPRIV(base, i), - false); + ret |= GUC_MMIO_REG_ADD(regset, + RING_FORCE_TO_NONPRIV(base, i), + false); /* add in local MOCS registers */ for (i = 0; i < GEN9_LNCFCMOCS_REG_COUNT; i++) - GUC_MMIO_REG_ADD(regset, GEN9_LNCFCMOCS(i), false); + ret |= GUC_MMIO_REG_ADD(regset, GEN9_LNCFCMOCS(i), false); + + return ret ? -1 : 0; } static int guc_mmio_reg_state_query(struct intel_guc *guc) -- 2.35.1