In order to prepare for different types of workaround lists, parametrize the list we are adding the workaround register. Signed-off-by: Mika Kuoppala <mika.kuoppala@xxxxxxxxx> --- drivers/gpu/drm/i915/i915_drv.h | 20 +++++----- drivers/gpu/drm/i915/intel_ringbuffer.c | 65 +++++++++++++++++---------------- 2 files changed, 44 insertions(+), 41 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 5a04948..0ed790c 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -3519,23 +3519,25 @@ static inline void i915_trace_irq_get(struct intel_engine_cs *ring, } /* Workaround register lists */ -#define WA_REG(addr, mask, val) do { \ +#define WA_REG_LRI(addr, mask, val) do { \ const int r = intel_wa_add(&dev_priv->lri_workarounds, \ (addr), (mask), (val)); \ WARN_ON(r); \ } while (0) -#define WA_SET_BIT_MASKED(addr, mask) \ - WA_REG(addr, (mask), _MASKED_BIT_ENABLE(mask)) +#define WA_SET_BIT_MASKED(t, addr, mask) \ + WA_REG_##t(addr, (mask), _MASKED_BIT_ENABLE(mask)) -#define WA_CLR_BIT_MASKED(addr, mask) \ - WA_REG(addr, (mask), _MASKED_BIT_DISABLE(mask)) +#define WA_CLR_BIT_MASKED(t, addr, mask) \ + WA_REG_##t(addr, (mask), _MASKED_BIT_DISABLE(mask)) -#define WA_SET_FIELD_MASKED(addr, mask, value) \ - WA_REG(addr, mask, _MASKED_FIELD(mask, value)) +#define WA_SET_FIELD_MASKED(t, addr, mask, value) \ + WA_REG_##t(addr, mask, _MASKED_FIELD(mask, value)) -#define WA_SET_BIT(addr, mask) WA_REG(addr, mask, I915_READ((addr)) | (mask)) -#define WA_CLR_BIT(addr, mask) WA_REG(addr, mask, I915_READ((addr)) & ~(mask)) +#define WA_SET_BIT(t, addr, mask) \ + WA_REG_##t(addr, mask, I915_READ((addr)) | (mask)) +#define WA_CLR_BIT(t, addr, mask) \ + WA_REG_##t(addr, mask, I915_READ((addr)) & ~(mask)) int intel_wa_add(struct i915_workarounds *w, const u32 addr, const u32 mask, const u32 val); diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index 29ae97e..c9d3489e 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -785,13 +785,13 @@ static int gen8_init_workarounds(struct intel_engine_cs *ring) struct drm_device *dev = ring->dev; struct drm_i915_private *dev_priv = dev->dev_private; - WA_SET_BIT_MASKED(INSTPM, INSTPM_FORCE_ORDERING); + WA_SET_BIT_MASKED(LRI, INSTPM, INSTPM_FORCE_ORDERING); /* WaDisableAsyncFlipPerfMode:bdw,chv */ - WA_SET_BIT_MASKED(MI_MODE, ASYNC_FLIP_PERF_DISABLE); + WA_SET_BIT_MASKED(LRI, MI_MODE, ASYNC_FLIP_PERF_DISABLE); /* WaDisablePartialInstShootdown:bdw,chv */ - WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, + WA_SET_BIT_MASKED(LRI, GEN8_ROW_CHICKEN, PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE); /* Use Force Non-Coherent whenever executing a 3D context. This is a @@ -800,7 +800,7 @@ static int gen8_init_workarounds(struct intel_engine_cs *ring) */ /* WaForceEnableNonCoherent:bdw,chv */ /* WaHdcDisableFetchWhenMasked:bdw,chv */ - WA_SET_BIT_MASKED(HDC_CHICKEN0, + WA_SET_BIT_MASKED(LRI, HDC_CHICKEN0, HDC_DONOT_FETCH_MEM_WHEN_MASKED | HDC_FORCE_NON_COHERENT); @@ -812,10 +812,10 @@ static int gen8_init_workarounds(struct intel_engine_cs *ring) * * This optimization is off by default for BDW and CHV; turn it on. */ - WA_CLR_BIT_MASKED(CACHE_MODE_0_GEN7, HIZ_RAW_STALL_OPT_DISABLE); + WA_CLR_BIT_MASKED(LRI, CACHE_MODE_0_GEN7, HIZ_RAW_STALL_OPT_DISABLE); /* Wa4x4STCOptimizationDisable:bdw,chv */ - WA_SET_BIT_MASKED(CACHE_MODE_1, GEN8_4x4_STC_OPTIMIZATION_DISABLE); + WA_SET_BIT_MASKED(LRI, CACHE_MODE_1, GEN8_4x4_STC_OPTIMIZATION_DISABLE); /* * BSpec recommends 8x4 when MSAA is used, @@ -825,7 +825,7 @@ static int gen8_init_workarounds(struct intel_engine_cs *ring) * disable bit, which we don't touch here, but it's good * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM). */ - WA_SET_FIELD_MASKED(GEN7_GT_MODE, + WA_SET_FIELD_MASKED(LRI, GEN7_GT_MODE, GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4); @@ -843,16 +843,16 @@ static int bdw_init_workarounds(struct intel_engine_cs *ring) return ret; /* WaDisableThreadStallDopClockGating:bdw (pre-production) */ - WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, STALL_DOP_GATING_DISABLE); + WA_SET_BIT_MASKED(LRI, GEN8_ROW_CHICKEN, STALL_DOP_GATING_DISABLE); /* WaDisableDopClockGating:bdw */ - WA_SET_BIT_MASKED(GEN7_ROW_CHICKEN2, + WA_SET_BIT_MASKED(LRI, GEN7_ROW_CHICKEN2, DOP_CLOCK_GATING_DISABLE); - WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3, + WA_SET_BIT_MASKED(LRI, HALF_SLICE_CHICKEN3, GEN8_SAMPLER_POWER_BYPASS_DIS); - WA_SET_BIT_MASKED(HDC_CHICKEN0, + WA_SET_BIT_MASKED(LRI, HDC_CHICKEN0, /* WaForceContextSaveRestoreNonCoherent:bdw */ HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT | /* WaDisableFenceDestinationToSLM:bdw (pre-prod) */ @@ -872,10 +872,10 @@ static int chv_init_workarounds(struct intel_engine_cs *ring) return ret; /* WaDisableThreadStallDopClockGating:chv */ - WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, STALL_DOP_GATING_DISABLE); + WA_SET_BIT_MASKED(LRI, GEN8_ROW_CHICKEN, STALL_DOP_GATING_DISABLE); /* Improve HiZ throughput on CHV. */ - WA_SET_BIT_MASKED(HIZ_CHICKEN, CHV_HZ_8X8_MODE_IN_1X); + WA_SET_BIT_MASKED(LRI, HIZ_CHICKEN, CHV_HZ_8X8_MODE_IN_1X); return 0; } @@ -887,25 +887,25 @@ static int gen9_init_workarounds(struct intel_engine_cs *ring) uint32_t tmp; /* WaDisablePartialInstShootdown:skl,bxt */ - WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, + WA_SET_BIT_MASKED(LRI, GEN8_ROW_CHICKEN, PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE); /* Syncing dependencies between camera and graphics:skl,bxt */ - WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3, + WA_SET_BIT_MASKED(LRI, HALF_SLICE_CHICKEN3, GEN9_DISABLE_OCL_OOB_SUPPRESS_LOGIC); if ((IS_SKYLAKE(dev) && (INTEL_REVID(dev) == SKL_REVID_A0 || INTEL_REVID(dev) == SKL_REVID_B0)) || (IS_BROXTON(dev) && INTEL_REVID(dev) < BXT_REVID_B0)) { /* WaDisableDgMirrorFixInHalfSliceChicken5:skl,bxt */ - WA_CLR_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN5, + WA_CLR_BIT_MASKED(LRI, GEN9_HALF_SLICE_CHICKEN5, GEN9_DG_MIRROR_FIX_ENABLE); } if ((IS_SKYLAKE(dev) && INTEL_REVID(dev) <= SKL_REVID_B0) || (IS_BROXTON(dev) && INTEL_REVID(dev) < BXT_REVID_B0)) { /* WaSetDisablePixMaskCammingAndRhwoInCommonSliceChicken:skl,bxt */ - WA_SET_BIT_MASKED(GEN7_COMMON_SLICE_CHICKEN1, + WA_SET_BIT_MASKED(LRI, GEN7_COMMON_SLICE_CHICKEN1, GEN9_RHWO_OPTIMIZATION_DISABLE); /* * WA also requires GEN9_SLICE_COMMON_ECO_CHICKEN0[14:14] to be set @@ -917,23 +917,24 @@ static int gen9_init_workarounds(struct intel_engine_cs *ring) if ((IS_SKYLAKE(dev) && INTEL_REVID(dev) >= SKL_REVID_C0) || IS_BROXTON(dev)) { /* WaEnableYV12BugFixInHalfSliceChicken7:skl,bxt */ - WA_SET_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN7, + WA_SET_BIT_MASKED(LRI, GEN9_HALF_SLICE_CHICKEN7, GEN9_ENABLE_YV12_BUGFIX); } /* Wa4x4STCOptimizationDisable:skl,bxt */ /* WaDisablePartialResolveInVc:skl,bxt */ - WA_SET_BIT_MASKED(CACHE_MODE_1, (GEN8_4x4_STC_OPTIMIZATION_DISABLE | - GEN9_PARTIAL_RESOLVE_IN_VC_DISABLE)); + WA_SET_BIT_MASKED(LRI, CACHE_MODE_1, + (GEN8_4x4_STC_OPTIMIZATION_DISABLE | + GEN9_PARTIAL_RESOLVE_IN_VC_DISABLE)); /* WaCcsTlbPrefetchDisable:skl,bxt */ - WA_CLR_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN5, + WA_CLR_BIT_MASKED(LRI, GEN9_HALF_SLICE_CHICKEN5, GEN9_CCS_TLB_PREFETCH_ENABLE); /* WaDisableMaskBasedCammingInRCC:skl,bxt */ if ((IS_SKYLAKE(dev) && INTEL_REVID(dev) == SKL_REVID_C0) || (IS_BROXTON(dev) && INTEL_REVID(dev) < BXT_REVID_B0)) - WA_SET_BIT_MASKED(SLICE_ECO_CHICKEN0, + WA_SET_BIT_MASKED(LRI, SLICE_ECO_CHICKEN0, PIXEL_MASK_CAMMING_DISABLE); /* WaForceContextSaveRestoreNonCoherent:skl,bxt */ @@ -941,17 +942,17 @@ static int gen9_init_workarounds(struct intel_engine_cs *ring) if ((IS_SKYLAKE(dev) && INTEL_REVID(dev) == SKL_REVID_F0) || (IS_BROXTON(dev) && INTEL_REVID(dev) >= BXT_REVID_B0)) tmp |= HDC_FORCE_CSR_NON_COHERENT_OVR_DISABLE; - WA_SET_BIT_MASKED(HDC_CHICKEN0, tmp); + WA_SET_BIT_MASKED(LRI, HDC_CHICKEN0, tmp); /* WaDisableSamplerPowerBypassForSOPingPong:skl,bxt */ if (IS_SKYLAKE(dev) || (IS_BROXTON(dev) && INTEL_REVID(dev) <= BXT_REVID_B0)) { - WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3, + WA_SET_BIT_MASKED(LRI, HALF_SLICE_CHICKEN3, GEN8_SAMPLER_POWER_BYPASS_DIS); } /* WaDisableSTUnitPowerOptimization:skl,bxt */ - WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN2, GEN8_ST_PO_DISABLE); + WA_SET_BIT_MASKED(LRI, HALF_SLICE_CHICKEN2, GEN8_ST_PO_DISABLE); return 0; } @@ -987,7 +988,7 @@ static int skl_tune_iz_hashing(struct intel_engine_cs *ring) return 0; /* Tune IZ hashing. See intel_device_info_runtime_init() */ - WA_SET_FIELD_MASKED(GEN7_GT_MODE, + WA_SET_FIELD_MASKED(LRI, GEN7_GT_MODE, GEN9_IZ_HASHING_MASK(2) | GEN9_IZ_HASHING_MASK(1) | GEN9_IZ_HASHING_MASK(0), @@ -1011,7 +1012,7 @@ static int skl_init_workarounds(struct intel_engine_cs *ring) /* WaDisablePowerCompilerClockGating:skl */ if (INTEL_REVID(dev) == SKL_REVID_B0) - WA_SET_BIT_MASKED(HIZ_CHICKEN, + WA_SET_BIT_MASKED(LRI, HIZ_CHICKEN, BDW_HIZ_POWER_COMPILER_CLOCK_GATING_DISABLE); if (INTEL_REVID(dev) <= SKL_REVID_D0) { @@ -1021,20 +1022,20 @@ static int skl_init_workarounds(struct intel_engine_cs *ring) * a TLB invalidation occurs during a PSD flush. */ /* WaForceEnableNonCoherent:skl */ - WA_SET_BIT_MASKED(HDC_CHICKEN0, + WA_SET_BIT_MASKED(LRI, HDC_CHICKEN0, HDC_FORCE_NON_COHERENT); } if (INTEL_REVID(dev) == SKL_REVID_C0 || INTEL_REVID(dev) == SKL_REVID_D0) /* WaBarrierPerformanceFixDisable:skl */ - WA_SET_BIT_MASKED(HDC_CHICKEN0, + WA_SET_BIT_MASKED(LRI, HDC_CHICKEN0, HDC_FENCE_DEST_SLM_DISABLE | HDC_BARRIER_PERFORMANCE_DISABLE); /* WaDisableSbeCacheDispatchPortSharing:skl */ if (INTEL_REVID(dev) <= SKL_REVID_F0) { - WA_SET_BIT_MASKED( + WA_SET_BIT_MASKED(LRI, GEN7_HALF_SLICE_CHICKEN1, GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE); } @@ -1053,12 +1054,12 @@ static int bxt_init_workarounds(struct intel_engine_cs *ring) return ret; /* WaDisableThreadStallDopClockGating:bxt */ - WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, + WA_SET_BIT_MASKED(LRI, GEN8_ROW_CHICKEN, STALL_DOP_GATING_DISABLE); /* WaDisableSbeCacheDispatchPortSharing:bxt */ if (INTEL_REVID(dev) <= BXT_REVID_B0) { - WA_SET_BIT_MASKED( + WA_SET_BIT_MASKED(LRI, GEN7_HALF_SLICE_CHICKEN1, GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE); } -- 2.1.4 _______________________________________________ Intel-gfx mailing list Intel-gfx@xxxxxxxxxxxxxxxxxxxxx http://lists.freedesktop.org/mailman/listinfo/intel-gfx