Restricting the fence to the end of the previous tile-row breaks access to the final portion of the object. On gen2/3 we employed lazy fencing to pad out the fence with scratch page to provide access to the tail, and now we also pad out the object on gen4+ we can apply the same fix. Fixes: af1a7301c7cf ("drm/i915: Only fence tiled region of object.") Signed-off-by: Chris Wilson <chris@xxxxxxxxxxxxxxxxxx> --- drivers/gpu/drm/i915/i915_gem_fence_reg.c | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_gem_fence_reg.c b/drivers/gpu/drm/i915/i915_gem_fence_reg.c index a1a0c7104576..6de527df6cdc 100644 --- a/drivers/gpu/drm/i915/i915_gem_fence_reg.c +++ b/drivers/gpu/drm/i915/i915_gem_fence_reg.c @@ -78,20 +78,17 @@ static void i965_write_fence_reg(struct drm_i915_fence_reg *fence, val = 0; if (vma) { unsigned int tiling = i915_gem_object_get_tiling(vma->obj); - bool is_y_tiled = tiling == I915_TILING_Y; unsigned int stride = i915_gem_object_get_stride(vma->obj); - u32 row_size = stride * (is_y_tiled ? 32 : 8); - u32 size = rounddown((u32)vma->fence_size, row_size); GEM_BUG_ON(!i915_vma_is_map_and_fenceable(vma)); GEM_BUG_ON(vma->node.start & 4095); GEM_BUG_ON(vma->fence_size & 4095); GEM_BUG_ON(stride & 127); - val = (vma->node.start + size - 4096) << 32; + val = (vma->node.start + vma->fence_size - 4096) << 32; val |= vma->node.start; val |= (u64)((stride / 128) - 1) << fence_pitch_shift; - if (is_y_tiled) + if (tiling == I915_TILING_Y) val |= BIT(I965_FENCE_TILING_Y_SHIFT); val |= I965_FENCE_REG_VALID; } -- 2.11.0 _______________________________________________ Intel-gfx mailing list Intel-gfx@xxxxxxxxxxxxxxxxxxxxx https://lists.freedesktop.org/mailman/listinfo/intel-gfx