On Tue, May 16, 2017 at 09:29:33AM +0100, Matthew Auld wrote: > In preparation for supporting huge gtt pages for the ppgtt, we introduce > a gtt_page_size member for gem objects. We fill in the gtt page size by > scanning the sg table to determine the max page size which satisfies the > alignment for each sg entry. > > Signed-off-by: Matthew Auld <matthew.auld@xxxxxxxxx> > Cc: Joonas Lahtinen <joonas.lahtinen@xxxxxxxxxxxxxxx> > Cc: Chris Wilson <chris@xxxxxxxxxxxxxxxxxx> > Cc: Daniel Vetter <daniel@xxxxxxxx> > --- > drivers/gpu/drm/i915/i915_drv.h | 2 ++ > drivers/gpu/drm/i915/i915_gem.c | 23 +++++++++++++++++++++++ > drivers/gpu/drm/i915/i915_gem_object.h | 2 ++ > 3 files changed, 27 insertions(+) > > diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h > index e18f11f77f35..a7a108d18a2d 100644 > --- a/drivers/gpu/drm/i915/i915_drv.h > +++ b/drivers/gpu/drm/i915/i915_drv.h > @@ -2843,6 +2843,8 @@ intel_info(const struct drm_i915_private *dev_priv) > #define USES_PPGTT(dev_priv) (i915.enable_ppgtt) > #define USES_FULL_PPGTT(dev_priv) (i915.enable_ppgtt >= 2) > #define USES_FULL_48BIT_PPGTT(dev_priv) (i915.enable_ppgtt == 3) > +#define HAS_PAGE_SIZE(dev_priv, page_size) \ > + ((dev_priv)->info.page_size_mask & (page_size)) > > #define HAS_OVERLAY(dev_priv) ((dev_priv)->info.has_overlay) > #define OVERLAY_NEEDS_PHYSICAL(dev_priv) \ > diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c > index 0c1cbe98c994..6a5e864d7710 100644 > --- a/drivers/gpu/drm/i915/i915_gem.c > +++ b/drivers/gpu/drm/i915/i915_gem.c > @@ -2294,6 +2294,8 @@ void __i915_gem_object_put_pages(struct drm_i915_gem_object *obj, > if (!IS_ERR(pages)) > obj->ops->put_pages(obj, pages); > > + obj->gtt_page_size = 0; > + > unlock: > mutex_unlock(&obj->mm.lock); > } > @@ -2473,6 +2475,13 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj) > void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj, > struct sg_table *pages) > { > + struct drm_i915_private *i915 = to_i915(obj->base.dev); > + unsigned long supported_page_sizes = INTEL_INFO(i915)->page_size_mask; > + struct scatterlist *sg; > + unsigned int sg_mask = 0; > + unsigned int i; > + unsigned int bit; > + > lockdep_assert_held(&obj->mm.lock); > > obj->mm.get_page.sg_pos = pages->sgl; > @@ -2486,6 +2495,20 @@ void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj, > __i915_gem_object_pin_pages(obj); > obj->mm.quirked = true; > } > + > + for_each_sg(pages->sgl, sg, pages->nents, i) > + sg_mask |= sg->length; > + > + GEM_BUG_ON(!sg_mask); > + > + for_each_set_bit(bit, &supported_page_sizes, BITS_PER_LONG) { > + if (!IS_ALIGNED(sg_mask, 1 << bit)) > + break; > + > + obj->gtt_page_size = 1 << bit; > + } Not here. This is the synchronous part, and we really do not want to loop again. However, we have just looped over and can compute this mask inline in the asynchronous portion of get_pages. -Chris -- Chris Wilson, Intel Open Source Technology Centre _______________________________________________ Intel-gfx mailing list Intel-gfx@xxxxxxxxxxxxxxxxxxxxx https://lists.freedesktop.org/mailman/listinfo/intel-gfx