On Sat, Feb 09, 2013 at 05:27:36PM +0200, Imre Deak wrote: > So far we created a sparse dma scatter list for gem objects, where each > scatter list entry represented only a single page. In the future we'll > have to handle compact scatter lists too where each entry can consist of > multiple pages, for example for objects imported through PRIME. > > The previous patches have already fixed up all other places where the > i915 driver _walked_ these lists. Here we have the corresponding fix to > _create_ compact lists. It's not a performance or memory footprint > improvement, but it helps to better exercise the new logic. > > Reference: http://www.spinics.net/lists/dri-devel/msg33917.html > Signed-off-by: Imre Deak <imre.deak at intel.com> Just a quick question: Have you checked with printks or so that we indeed create such coalesced sg entries every once in a while? -Daniel > --- > drivers/gpu/drm/i915/i915_gem.c | 31 ++++++++++++++++++++++--------- > 1 file changed, 22 insertions(+), 9 deletions(-) > > diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c > index 4a199e0..1028dd5 100644 > --- a/drivers/gpu/drm/i915/i915_gem.c > +++ b/drivers/gpu/drm/i915/i915_gem.c > @@ -1623,9 +1623,8 @@ i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj) > static void > i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj) > { > - int page_count = obj->base.size / PAGE_SIZE; > - struct scatterlist *sg; > - int ret, i; > + struct drm_sg_iter sg_iter; > + int ret; > > BUG_ON(obj->madv == __I915_MADV_PURGED); > > @@ -1645,8 +1644,8 @@ i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj) > if (obj->madv == I915_MADV_DONTNEED) > obj->dirty = 0; > > - for_each_sg(obj->pages->sgl, sg, page_count, i) { > - struct page *page = sg_page(sg); > + drm_for_each_sg_page(&sg_iter, obj->pages->sgl, 0) { > + struct page *page = sg_iter.page; > > if (obj->dirty) > set_page_dirty(page); > @@ -1740,7 +1739,9 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj) > struct address_space *mapping; > struct sg_table *st; > struct scatterlist *sg; > + struct drm_sg_iter sg_iter; > struct page *page; > + unsigned long last_pfn = 0; /* suppress gcc warning */ > gfp_t gfp; > > /* Assert that the object is not currently in any GPU domain. As it > @@ -1770,7 +1771,9 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj) > gfp = mapping_gfp_mask(mapping); > gfp |= __GFP_NORETRY | __GFP_NOWARN | __GFP_NO_KSWAPD; > gfp &= ~(__GFP_IO | __GFP_WAIT); > - for_each_sg(st->sgl, sg, page_count, i) { > + sg = st->sgl; > + st->nents = 0; > + for (i = 0; i < page_count; i++) { > page = shmem_read_mapping_page_gfp(mapping, i, gfp); > if (IS_ERR(page)) { > i915_gem_purge(dev_priv, page_count); > @@ -1793,9 +1796,18 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj) > gfp &= ~(__GFP_IO | __GFP_WAIT); > } > > - sg_set_page(sg, page, PAGE_SIZE, 0); > + if (!i || page_to_pfn(page) != last_pfn + 1) { > + if (i) > + sg = sg_next(sg); > + st->nents++; > + sg_set_page(sg, page, PAGE_SIZE, 0); > + } else { > + sg->length += PAGE_SIZE; > + } > + last_pfn = page_to_pfn(page); > } > > + sg_mark_end(sg); > obj->pages = st; > > if (i915_gem_object_needs_bit17_swizzle(obj)) > @@ -1804,8 +1816,9 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj) > return 0; > > err_pages: > - for_each_sg(st->sgl, sg, i, page_count) > - page_cache_release(sg_page(sg)); > + sg_mark_end(sg); > + drm_for_each_sg_page(&sg_iter, st->sgl, 0) > + page_cache_release(sg_iter.page); > sg_free_table(st); > kfree(st); > return PTR_ERR(page); > -- > 1.7.10.4 > > _______________________________________________ > dri-devel mailing list > dri-devel at lists.freedesktop.org > http://lists.freedesktop.org/mailman/listinfo/dri-devel -- Daniel Vetter Software Engineer, Intel Corporation +41 (0) 79 365 57 48 - http://blog.ffwll.ch