In order to reuse the partial view for selftesting, extract the common function for computing the view. Signed-off-by: Chris Wilson <chris@xxxxxxxxxxxxxxxxxx> Reviewed-by: Joonas Lahtinen <joonas.lahtinen@xxxxxxxxxxxxxxx> --- drivers/gpu/drm/i915/i915_gem.c | 49 +++++++++++++++++++++++++---------------- 1 file changed, 30 insertions(+), 19 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 51c009303c77..5c5cf31ef40f 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -1757,6 +1757,33 @@ int i915_gem_mmap_gtt_version(void) return 1; } +static inline struct i915_ggtt_view +compute_partial_view(struct drm_i915_gem_object *obj, + struct vm_area_struct *area, + pgoff_t page_offset, + unsigned int chunk) +{ + struct i915_ggtt_view view; + + if (i915_gem_object_is_tiled(obj)) + chunk = roundup(chunk, tile_row_pages(obj)); + + view.type = I915_GGTT_VIEW_PARTIAL; + view.partial.offset_size = rounddown(page_offset, chunk); + view.partial.offset_size = + (view.partial.offset_size << INTEL_PARTIAL_SIZE_BITS) | + (min_t(unsigned int, chunk, + vma_pages(area) - view.partial.offset_size) - 1); + + /* If the partial covers the entire object, just create a + * normal VMA. + */ + if (chunk >= obj->base.size >> PAGE_SHIFT) + view.type = I915_GGTT_VIEW_NORMAL; + + return view; +} + /** * i915_gem_fault - fault a page into the GTT * @area: CPU VMA in question @@ -1833,26 +1860,10 @@ int i915_gem_fault(struct vm_area_struct *area, struct vm_fault *vmf) /* Now pin it into the GTT as needed */ vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, flags); if (IS_ERR(vma)) { - struct i915_ggtt_view view; - unsigned int chunk; - /* Use a partial view if it is bigger than available space */ - chunk = MIN_CHUNK_PAGES; - if (i915_gem_object_is_tiled(obj)) - chunk = roundup(chunk, tile_row_pages(obj)); - - view.type = I915_GGTT_VIEW_PARTIAL; - view.partial.offset_size = rounddown(page_offset, chunk); - view.partial.offset_size = - (view.partial.offset_size << INTEL_PARTIAL_SIZE_BITS) | - (min_t(unsigned int, chunk, - vma_pages(area) - view.partial.offset_size) - 1); - - /* If the partial covers the entire object, just create a - * normal VMA. - */ - if (chunk >= obj->base.size >> PAGE_SHIFT) - view.type = I915_GGTT_VIEW_NORMAL; + struct i915_ggtt_view view = + compute_partial_view(obj, area, + page_offset, MIN_CHUNK_PAGES); /* Userspace is now writing through an untracked VMA, abandon * all hope that the hardware is able to track future writes. -- 2.11.0 _______________________________________________ Intel-gfx mailing list Intel-gfx@xxxxxxxxxxxxxxxxxxxxx https://lists.freedesktop.org/mailman/listinfo/intel-gfx