On Sat, 2013-02-09 at 23:51 +0100, Daniel Vetter wrote: > On Sat, Feb 09, 2013 at 05:27:35PM +0200, Imre Deak wrote: > > So far the assumption was that each dma scatter list entry contains only > > a single page. This might not hold in the future, when we'll introduce > > compact scatter lists, so prepare for this everywhere in the i915 code > > where we walk such a list. > > > > We'll fix the place _creating_ these lists separately in the next patch > > to help the reviewing/bisectability. > > > > Reference: http://www.spinics.net/lists/dri-devel/msg33917.html > > Signed-off-by: Imre Deak <imre.deak at intel.com> > > Since we now have such a nice macro to loop over sg pages ... Care to also > convert the two existing (correct loops) in i915_gem_gtt.c and intel-gtt.c > in a follow-up patch? Ok, will do. --Imre > -Daniel > > > --- > > drivers/gpu/drm/i915/i915_drv.h | 17 ++++++----------- > > drivers/gpu/drm/i915/i915_gem.c | 22 ++++++---------------- > > drivers/gpu/drm/i915/i915_gem_dmabuf.c | 13 +++++++------ > > drivers/gpu/drm/i915/i915_gem_tiling.c | 18 ++++++++++-------- > > 4 files changed, 29 insertions(+), 41 deletions(-) > > > > diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h > > index 08c5def..0462428 100644 > > --- a/drivers/gpu/drm/i915/i915_drv.h > > +++ b/drivers/gpu/drm/i915/i915_drv.h > > @@ -1531,17 +1531,12 @@ void i915_gem_lastclose(struct drm_device *dev); > > int __must_check i915_gem_object_get_pages(struct drm_i915_gem_object *obj); > > static inline struct page *i915_gem_object_get_page(struct drm_i915_gem_object *obj, int n) > > { > > - struct scatterlist *sg = obj->pages->sgl; > > - int nents = obj->pages->nents; > > - while (nents > SG_MAX_SINGLE_ALLOC) { > > - if (n < SG_MAX_SINGLE_ALLOC - 1) > > - break; > > - > > - sg = sg_chain_ptr(sg + SG_MAX_SINGLE_ALLOC - 1); > > - n -= SG_MAX_SINGLE_ALLOC - 1; > > - nents -= SG_MAX_SINGLE_ALLOC - 1; > > - } > > - return sg_page(sg+n); > > + struct drm_sg_iter sg_iter; > > + > > + drm_for_each_sg_page(&sg_iter, obj->pages->sgl, n << PAGE_SHIFT) > > + return sg_iter.page; > > + > > + return NULL; > > } > > static inline void i915_gem_object_pin_pages(struct drm_i915_gem_object *obj) > > { > > diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c > > index d746177..4a199e0 100644 > > --- a/drivers/gpu/drm/i915/i915_gem.c > > +++ b/drivers/gpu/drm/i915/i915_gem.c > > @@ -411,8 +411,7 @@ i915_gem_shmem_pread(struct drm_device *dev, > > int obj_do_bit17_swizzling, page_do_bit17_swizzling; > > int prefaulted = 0; > > int needs_clflush = 0; > > - struct scatterlist *sg; > > - int i; > > + struct drm_sg_iter sg_iter; > > > > user_data = (char __user *) (uintptr_t) args->data_ptr; > > remain = args->size; > > @@ -441,11 +440,8 @@ i915_gem_shmem_pread(struct drm_device *dev, > > > > offset = args->offset; > > > > - for_each_sg(obj->pages->sgl, sg, obj->pages->nents, i) { > > - struct page *page; > > - > > - if (i < offset >> PAGE_SHIFT) > > - continue; > > + drm_for_each_sg_page(&sg_iter, obj->pages->sgl, offset) { > > + struct page *page = sg_iter.page; > > > > if (remain <= 0) > > break; > > @@ -460,7 +456,6 @@ i915_gem_shmem_pread(struct drm_device *dev, > > if ((shmem_page_offset + page_length) > PAGE_SIZE) > > page_length = PAGE_SIZE - shmem_page_offset; > > > > - page = sg_page(sg); > > page_do_bit17_swizzling = obj_do_bit17_swizzling && > > (page_to_phys(page) & (1 << 17)) != 0; > > > > @@ -732,8 +727,7 @@ i915_gem_shmem_pwrite(struct drm_device *dev, > > int hit_slowpath = 0; > > int needs_clflush_after = 0; > > int needs_clflush_before = 0; > > - int i; > > - struct scatterlist *sg; > > + struct drm_sg_iter sg_iter; > > > > user_data = (char __user *) (uintptr_t) args->data_ptr; > > remain = args->size; > > @@ -768,13 +762,10 @@ i915_gem_shmem_pwrite(struct drm_device *dev, > > offset = args->offset; > > obj->dirty = 1; > > > > - for_each_sg(obj->pages->sgl, sg, obj->pages->nents, i) { > > - struct page *page; > > + drm_for_each_sg_page(&sg_iter, obj->pages->sgl, offset) { > > + struct page *page = sg_iter.page; > > int partial_cacheline_write; > > > > - if (i < offset >> PAGE_SHIFT) > > - continue; > > - > > if (remain <= 0) > > break; > > > > @@ -796,7 +787,6 @@ i915_gem_shmem_pwrite(struct drm_device *dev, > > ((shmem_page_offset | page_length) > > & (boot_cpu_data.x86_clflush_size - 1)); > > > > - page = sg_page(sg); > > page_do_bit17_swizzling = obj_do_bit17_swizzling && > > (page_to_phys(page) & (1 << 17)) != 0; > > > > diff --git a/drivers/gpu/drm/i915/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/i915_gem_dmabuf.c > > index 6a5af68..ac98792 100644 > > --- a/drivers/gpu/drm/i915/i915_gem_dmabuf.c > > +++ b/drivers/gpu/drm/i915/i915_gem_dmabuf.c > > @@ -62,7 +62,7 @@ static struct sg_table *i915_gem_map_dma_buf(struct dma_buf_attachment *attachme > > src = obj->pages->sgl; > > dst = st->sgl; > > for (i = 0; i < obj->pages->nents; i++) { > > - sg_set_page(dst, sg_page(src), PAGE_SIZE, 0); > > + sg_set_page(dst, sg_page(src), src->length, 0); > > dst = sg_next(dst); > > src = sg_next(src); > > } > > @@ -105,7 +105,7 @@ static void *i915_gem_dmabuf_vmap(struct dma_buf *dma_buf) > > { > > struct drm_i915_gem_object *obj = dma_buf->priv; > > struct drm_device *dev = obj->base.dev; > > - struct scatterlist *sg; > > + struct drm_sg_iter sg_iter; > > struct page **pages; > > int ret, i; > > > > @@ -124,14 +124,15 @@ static void *i915_gem_dmabuf_vmap(struct dma_buf *dma_buf) > > > > ret = -ENOMEM; > > > > - pages = drm_malloc_ab(obj->pages->nents, sizeof(struct page *)); > > + pages = drm_malloc_ab(obj->base.size >> PAGE_SHIFT, sizeof(*pages)); > > if (pages == NULL) > > goto error; > > > > - for_each_sg(obj->pages->sgl, sg, obj->pages->nents, i) > > - pages[i] = sg_page(sg); > > + i = 0; > > + drm_for_each_sg_page(&sg_iter, obj->pages->sgl, 0); > > + pages[i++] = sg_iter.page; > > > > - obj->dma_buf_vmapping = vmap(pages, obj->pages->nents, 0, PAGE_KERNEL); > > + obj->dma_buf_vmapping = vmap(pages, i, 0, PAGE_KERNEL); > > drm_free_large(pages); > > > > if (!obj->dma_buf_vmapping) > > diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c > > index abcba2f..834ed70 100644 > > --- a/drivers/gpu/drm/i915/i915_gem_tiling.c > > +++ b/drivers/gpu/drm/i915/i915_gem_tiling.c > > @@ -473,28 +473,29 @@ i915_gem_swizzle_page(struct page *page) > > void > > i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj) > > { > > - struct scatterlist *sg; > > - int page_count = obj->base.size >> PAGE_SHIFT; > > + struct drm_sg_iter sg_iter; > > int i; > > > > if (obj->bit_17 == NULL) > > return; > > > > - for_each_sg(obj->pages->sgl, sg, page_count, i) { > > - struct page *page = sg_page(sg); > > + i = 0; > > + drm_for_each_sg_page(&sg_iter, obj->pages->sgl, 0) { > > + struct page *page = sg_iter.page; > > char new_bit_17 = page_to_phys(page) >> 17; > > if ((new_bit_17 & 0x1) != > > (test_bit(i, obj->bit_17) != 0)) { > > i915_gem_swizzle_page(page); > > set_page_dirty(page); > > } > > + i++; > > } > > } > > > > void > > i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj) > > { > > - struct scatterlist *sg; > > + struct drm_sg_iter sg_iter; > > int page_count = obj->base.size >> PAGE_SHIFT; > > int i; > > > > @@ -508,11 +509,12 @@ i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj) > > } > > } > > > > - for_each_sg(obj->pages->sgl, sg, page_count, i) { > > - struct page *page = sg_page(sg); > > - if (page_to_phys(page) & (1 << 17)) > > + i = 0; > > + drm_for_each_sg_page(&sg_iter, obj->pages->sgl, 0) { > > + if (page_to_phys(sg_iter.page) & (1 << 17)) > > __set_bit(i, obj->bit_17); > > else > > __clear_bit(i, obj->bit_17); > > + i++; > > } > > } > > -- > > 1.7.10.4 > > > > _______________________________________________ > > Intel-gfx mailing list > > Intel-gfx at lists.freedesktop.org > > http://lists.freedesktop.org/mailman/listinfo/intel-gfx >