On 2017.06.27 15:54:29 +0100, Matthew Auld wrote: > In preparation for supporting huge gtt pages for the ppgtt, we introduce > page size members for gem objects. We fill in the page sizes by > scanning the sg table. > > v2: pass the sg_mask to set_pages > > v3: calculate the sg_mask inline with populating the sg_table where > possible, and pass to set_pages along with the pages. > > Signed-off-by: Matthew Auld <matthew.auld@xxxxxxxxx> > Cc: Joonas Lahtinen <joonas.lahtinen@xxxxxxxxxxxxxxx> > Cc: Chris Wilson <chris@xxxxxxxxxxxxxxxxxx> > Cc: Daniel Vetter <daniel@xxxxxxxx> > --- > drivers/gpu/drm/i915/i915_drv.h | 5 ++- > drivers/gpu/drm/i915/i915_gem.c | 43 ++++++++++++++++++++---- > drivers/gpu/drm/i915/i915_gem_dmabuf.c | 17 ++++++++-- > drivers/gpu/drm/i915/i915_gem_internal.c | 5 ++- > drivers/gpu/drm/i915/i915_gem_object.h | 20 ++++++++++- > drivers/gpu/drm/i915/i915_gem_stolen.c | 13 ++++--- > drivers/gpu/drm/i915/i915_gem_userptr.c | 26 ++++++++++---- > drivers/gpu/drm/i915/selftests/huge_gem_object.c | 4 ++- > drivers/gpu/drm/i915/selftests/i915_gem_gtt.c | 3 +- > 9 files changed, 110 insertions(+), 26 deletions(-) > > diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h > index fe225cb9b622..0539f210622f 100644 > --- a/drivers/gpu/drm/i915/i915_drv.h > +++ b/drivers/gpu/drm/i915/i915_drv.h > @@ -2951,6 +2951,8 @@ intel_info(const struct drm_i915_private *dev_priv) > #define USES_PPGTT(dev_priv) (i915.enable_ppgtt) > #define USES_FULL_PPGTT(dev_priv) (i915.enable_ppgtt >= 2) > #define USES_FULL_48BIT_PPGTT(dev_priv) (i915.enable_ppgtt == 3) > +#define HAS_PAGE_SIZE(dev_priv, page_size) \ > + ((dev_priv)->info.page_size_mask & (page_size)) > > #define HAS_OVERLAY(dev_priv) ((dev_priv)->info.has_overlay) > #define OVERLAY_NEEDS_PHYSICAL(dev_priv) \ > @@ -3335,7 +3337,8 @@ i915_gem_object_get_dma_address(struct drm_i915_gem_object *obj, > unsigned long n); > > void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj, > - struct sg_table *pages); > + struct sg_table *pages, > + unsigned int sg_mask); > int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj); > > static inline int __must_check > diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c > index 80539a821004..4d29a5cfa0c4 100644 > --- a/drivers/gpu/drm/i915/i915_gem.c > +++ b/drivers/gpu/drm/i915/i915_gem.c > @@ -163,7 +163,8 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data, > } > > static struct sg_table * > -i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj) > +i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj, > + unsigned int *sg_mask) > { > struct address_space *mapping = obj->base.filp->f_mapping; > drm_dma_handle_t *phys; > @@ -223,6 +224,8 @@ i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj) > sg->offset = 0; > sg->length = obj->base.size; > > + *sg_mask = sg->length; > + > sg_dma_address(sg) = phys->busaddr; > sg_dma_len(sg) = obj->base.size; > > @@ -2314,6 +2317,8 @@ void __i915_gem_object_put_pages(struct drm_i915_gem_object *obj, > if (!IS_ERR(pages)) > obj->ops->put_pages(obj, pages); > > + obj->mm.page_sizes.phys = obj->mm.page_sizes.sg = 0; > + > unlock: > mutex_unlock(&obj->mm.lock); > } > @@ -2345,7 +2350,8 @@ static bool i915_sg_trim(struct sg_table *orig_st) > } > > static struct sg_table * > -i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj) > +i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj, > + unsigned int *sg_mask) > { > struct drm_i915_private *dev_priv = to_i915(obj->base.dev); > const unsigned long page_count = obj->base.size / PAGE_SIZE; > @@ -2392,6 +2398,7 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj) > > sg = st->sgl; > st->nents = 0; > + *sg_mask = 0; > for (i = 0; i < page_count; i++) { > const unsigned int shrink[] = { > I915_SHRINK_BOUND | I915_SHRINK_UNBOUND | I915_SHRINK_PURGEABLE, > @@ -2443,8 +2450,10 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj) > if (!i || > sg->length >= max_segment || > page_to_pfn(page) != last_pfn + 1) { > - if (i) > + if (i) { > + *sg_mask |= sg->length; > sg = sg_next(sg); > + } > st->nents++; > sg_set_page(sg, page, PAGE_SIZE, 0); > } else { > @@ -2455,8 +2464,10 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj) > /* Check that the i965g/gm workaround works. */ > WARN_ON((gfp & __GFP_DMA32) && (last_pfn >= 0x00100000UL)); > } > - if (sg) /* loop terminated early; short sg table */ > + if (sg) { /* loop terminated early; short sg table */ > + *sg_mask |= sg->length; > sg_mark_end(sg); > + } > > /* Trim unused sg entries to avoid wasting memory. */ > i915_sg_trim(st); > @@ -2510,8 +2521,13 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj) > } > > void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj, > - struct sg_table *pages) > + struct sg_table *pages, > + unsigned int sg_mask) > { > + struct drm_i915_private *i915 = to_i915(obj->base.dev); > + unsigned long supported_page_sizes = INTEL_INFO(i915)->page_size_mask; > + unsigned int bit; > + > lockdep_assert_held(&obj->mm.lock); > > obj->mm.get_page.sg_pos = pages->sgl; > @@ -2525,11 +2541,24 @@ void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj, > __i915_gem_object_pin_pages(obj); > obj->mm.quirked = true; > } > + > + GEM_BUG_ON(!sg_mask); > + > + obj->mm.page_sizes.phys = sg_mask; > + > + obj->mm.page_sizes.sg = 0; > + for_each_set_bit(bit, &supported_page_sizes, BITS_PER_LONG) { > + if (obj->mm.page_sizes.phys & ~0u << bit) > + obj->mm.page_sizes.sg |= BIT(bit); > + } > + > + GEM_BUG_ON(!HAS_PAGE_SIZE(i915, obj->mm.page_sizes.sg)); We need to fallback to default supported page size when vGPU is active (intel_vgpu_active() is true). Currently gvt gtt handling can't support huge page entry yet, we need to check either hypervisor mm can support huge guest page or just do emulation in gvt. -- Open Source Technology Center, Intel ltd. $gpg --keyserver wwwkeys.pgp.net --recv-keys 4D781827
Attachment:
signature.asc
Description: PGP signature
_______________________________________________ Intel-gfx mailing list Intel-gfx@xxxxxxxxxxxxxxxxxxxxx https://lists.freedesktop.org/mailman/listinfo/intel-gfx