BXT requires accesses to the GTT (i.e. PTE updates) to be serialized when IOMMU is enabled. This patch guarantees this by wrapping all updates in stop_machine and using a flushing read to guarantee that the GTT writes have reached their destination before restarting. Signed-off-by: Jon Bloomfield <jon.bloomfield@xxxxxxxxx> Signed-off-by: John Harrison <john.C.Harrison@xxxxxxxxx> --- drivers/gpu/drm/i915/i915_gem_gtt.c | 106 ++++++++++++++++++++++++++++++++++++ 1 file changed, 106 insertions(+) diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 7c769d7..6360d92 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -2191,6 +2191,100 @@ static void gen8_ggtt_clear_range(struct i915_address_space *vm, gen8_set_pte(>t_base[i], scratch_pte); } +#ifdef CONFIG_INTEL_IOMMU +struct insert_page { + struct i915_address_space *vm; + dma_addr_t addr; + u64 offset; + enum i915_cache_level level; +}; + +static int gen8_ggtt_insert_page__cb(void *_arg) +{ + struct insert_page *arg = _arg; + + struct drm_i915_private *dev_priv = arg->vm->i915; + + gen8_ggtt_insert_page(arg->vm, arg->addr, + arg->offset, arg->level, 0); + + POSTING_READ(GFX_FLSH_CNTL_GEN6); + + return 0; +} + +static void gen8_ggtt_insert_page__BKL(struct i915_address_space *vm, + dma_addr_t addr, + u64 offset, + enum i915_cache_level level, + u32 unused) +{ + struct insert_page arg = { vm, addr, offset, level }; + + stop_machine(gen8_ggtt_insert_page__cb, &arg, NULL); +} + + +struct insert_entries { + struct i915_address_space *vm; + struct sg_table *st; + u64 start; + enum i915_cache_level level; +}; + +static int gen8_ggtt_insert_entries__cb(void *_arg) +{ + struct insert_entries *arg = _arg; + + struct drm_i915_private *dev_priv = arg->vm->i915; + + gen8_ggtt_insert_entries(arg->vm, arg->st, + arg->start, arg->level, 0); + + POSTING_READ(GFX_FLSH_CNTL_GEN6); + + return 0; +} + +static void gen8_ggtt_insert_entries__BKL(struct i915_address_space *vm, + struct sg_table *st, + u64 start, + enum i915_cache_level level, + u32 unused) +{ + struct insert_entries arg = { vm, st, start, level }; + + stop_machine(gen8_ggtt_insert_entries__cb, &arg, NULL); +} + +struct clear_range { + struct i915_address_space *vm; + u64 start; + u64 length; +}; + +static int gen8_ggtt_clear_range__cb(void *_arg) +{ + struct clear_range *arg = _arg; + + struct drm_i915_private *dev_priv = arg->vm->i915; + + gen8_ggtt_clear_range(arg->vm, arg->start, arg->length); + + POSTING_READ(GFX_FLSH_CNTL_GEN6); + + return 0; +} + +static void gen8_ggtt_clear_range__BKL(struct i915_address_space *vm, + u64 start, + u64 length) +{ + struct clear_range arg = { vm, start, length }; + stop_machine(gen8_ggtt_clear_range__cb, &arg, NULL); +} +#endif + static void gen6_ggtt_clear_range(struct i915_address_space *vm, u64 start, u64 length) { @@ -2789,6 +2883,18 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt) ggtt->base.insert_entries = gen8_ggtt_insert_entries; +#ifdef CONFIG_INTEL_IOMMU + /* Serialize GTT updates on BXT if VT-d is on. */ + if (IS_BROXTON(dev_priv) && intel_iommu_gfx_mapped) { + ggtt->base.insert_entries = gen8_ggtt_insert_entries__BKL; + ggtt->base.insert_page = gen8_ggtt_insert_page__BKL; + if (!USES_FULL_PPGTT(dev_priv) || + intel_scanout_needs_vtd_wa(dev_priv)) { + ggtt->base.clear_range = gen8_ggtt_clear_range__BKL; + } + } +#endif + ggtt->invalidate = gen6_ggtt_invalidate; return ggtt_probe_common(ggtt, size); -- 2.7.4 _______________________________________________ Intel-gfx mailing list Intel-gfx@xxxxxxxxxxxxxxxxxxxxx https://lists.freedesktop.org/mailman/listinfo/intel-gfx