On Mon, Oct 10, 2016 at 11:27:00PM +0100, Chris Wilson wrote: > commit 1625e7e549c5 ("drm/i915: make compact dma scatter lists creation > work with SWIOTLB backend") took a heavy handed approach to undo the > scatterlist compaction in the face of SWIOTLB. (The compaction hit a bug > whereby we tried to pass a segment larger than SWIOTLB could handle.) We > can be a little more intelligent and try compacting the scatterlist up > to the maximum SWIOTLB segment size (when using SWIOTLB). > Won't this cause a bigger usage of the SWIOTLB bounce buffer ? > v2: Tidy sg_mark_end() and cpp > > Signed-off-by: Chris Wilson <chris@xxxxxxxxxxxxxxxxxx> > CC: Imre Deak <imre.deak@xxxxxxxxx> > CC: Daniel Vetter <daniel.vetter@xxxxxxxx> > Cc: Konrad Rzeszutek Wilk <konrad.wilk@xxxxxxxxxx> > Cc: Tvrtko Ursulin <tvrtko.ursulin@xxxxxxxxxxxxxxx> > --- > drivers/gpu/drm/i915/i915_gem.c | 30 ++++++++++++++++++------------ > 1 file changed, 18 insertions(+), 12 deletions(-) > > diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c > index dff8d05d80ee..50fd611926cb 100644 > --- a/drivers/gpu/drm/i915/i915_gem.c > +++ b/drivers/gpu/drm/i915/i915_gem.c > @@ -2201,6 +2201,15 @@ unlock: > mutex_unlock(&obj->mm.lock); > } > > +static unsigned long swiotlb_max_size(void) > +{ > +#if IS_ENABLED(CONFIG_SWIOTLB) > + return swiotlb_nr_tbl() << IO_TLB_SHIFT; > +#else > + return 0; > +#endif > +} > + > static struct sg_table * > i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj) > { > @@ -2212,6 +2221,7 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj) > struct sgt_iter sgt_iter; > struct page *page; > unsigned long last_pfn = 0; /* suppress gcc warning */ > + unsigned long max_segment; > int ret; > gfp_t gfp; > > @@ -2222,6 +2232,10 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj) > GEM_BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS); > GEM_BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS); > > + max_segment = swiotlb_max_size(); > + if (!max_segment) > + max_segment = obj->base.size; > + > st = kmalloc(sizeof(*st), GFP_KERNEL); > if (st == NULL) > return ERR_PTR(-ENOMEM); > @@ -2263,15 +2277,9 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj) > goto err_pages; > } > } > -#ifdef CONFIG_SWIOTLB > - if (swiotlb_nr_tbl()) { > - st->nents++; > - sg_set_page(sg, page, PAGE_SIZE, 0); > - sg = sg_next(sg); > - continue; > - } > -#endif > - if (!i || page_to_pfn(page) != last_pfn + 1) { > + if (!i || > + sg->length >= max_segment || > + page_to_pfn(page) != last_pfn + 1) { > if (i) > sg = sg_next(sg); > st->nents++; > @@ -2284,9 +2292,7 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj) > /* Check that the i965g/gm workaround works. */ > WARN_ON((gfp & __GFP_DMA32) && (last_pfn >= 0x00100000UL)); > } > -#ifdef CONFIG_SWIOTLB > - if (!swiotlb_nr_tbl()) > -#endif > + if (sg) /* loop terminated early; short sg table */ > sg_mark_end(sg); > > ret = i915_gem_gtt_prepare_pages(obj, st); > -- > 2.9.3 > _______________________________________________ Intel-gfx mailing list Intel-gfx@xxxxxxxxxxxxxxxxxxxxx https://lists.freedesktop.org/mailman/listinfo/intel-gfx