From: Tvrtko Ursulin <tvrtko.ursulin@xxxxxxxxx> As for the partial view, just simplifies the loop and enables entry coalescing. Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@xxxxxxxxx> --- drivers/gpu/drm/i915/i915_gem_gtt.c | 42 +++++++++++++------------------------ 1 file changed, 14 insertions(+), 28 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index f0c70e7c8daa..655c31182c68 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -3464,11 +3464,10 @@ i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj, return vma; } -static struct scatterlist * +static void rotate_pages(const dma_addr_t *in, unsigned int offset, - unsigned int width, unsigned int height, - unsigned int stride, - struct sg_table *st, struct scatterlist *sg) + unsigned int width, unsigned int height, unsigned int stride, + struct i915_sg_create_state *state) { unsigned int column, row; unsigned int src_idx; @@ -3476,34 +3475,27 @@ rotate_pages(const dma_addr_t *in, unsigned int offset, for (column = 0; column < width; column++) { src_idx = stride * (height - 1) + column; for (row = 0; row < height; row++) { - st->nents++; /* We don't need the pages, but need to initialize * the entries so the sg list can be happily traversed. * The only thing we need are DMA addresses. */ - sg_set_page(sg, NULL, PAGE_SIZE, 0); - sg_dma_address(sg) = in[offset + src_idx]; - sg_dma_len(sg) = PAGE_SIZE; - sg = sg_next(sg); + i915_sg_add_dma(state, in[offset + src_idx]); src_idx -= stride; } } - - return sg; } static struct sg_table * intel_rotate_fb_obj_pages(const struct intel_rotation_info *rot_info, struct drm_i915_gem_object *obj) { + struct i915_sg_create_state *state; const size_t n_pages = obj->base.size / PAGE_SIZE; unsigned int size = intel_rotation_info_size(rot_info); struct sgt_iter sgt_iter; dma_addr_t dma_addr; unsigned long i; dma_addr_t *page_addr_list; - struct sg_table *st; - struct scatterlist *sg; int ret = -ENOMEM; /* Allocate a temporary list of source pages for random access. */ @@ -3513,14 +3505,12 @@ intel_rotate_fb_obj_pages(const struct intel_rotation_info *rot_info, if (!page_addr_list) return ERR_PTR(ret); - /* Allocate target SG list. */ - st = kmalloc(sizeof(*st), GFP_KERNEL); - if (!st) + /* Begin creation of the target SG list. */ + state = i915_sg_create(size); + if (IS_ERR(state)) { + ret = PTR_ERR(state); goto err_st_alloc; - - ret = sg_alloc_table(st, size, GFP_KERNEL); - if (ret) - goto err_sg_alloc; + } /* Populate source page list from the object. */ i = 0; @@ -3528,13 +3518,11 @@ intel_rotate_fb_obj_pages(const struct intel_rotation_info *rot_info, page_addr_list[i++] = dma_addr; GEM_BUG_ON(i != n_pages); - st->nents = 0; - sg = st->sgl; for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++) { - sg = rotate_pages(page_addr_list, rot_info->plane[i].offset, - rot_info->plane[i].width, rot_info->plane[i].height, - rot_info->plane[i].stride, st, sg); + rotate_pages(page_addr_list, rot_info->plane[i].offset, + rot_info->plane[i].width, rot_info->plane[i].height, + rot_info->plane[i].stride, state); } DRM_DEBUG_KMS("Created rotated page mapping for object size %zu (%ux%u tiles, %u pages)\n", @@ -3542,10 +3530,8 @@ intel_rotate_fb_obj_pages(const struct intel_rotation_info *rot_info, drm_free_large(page_addr_list); - return st; + return i915_sg_complete(state); -err_sg_alloc: - kfree(st); err_st_alloc: drm_free_large(page_addr_list); -- 2.7.4 _______________________________________________ Intel-gfx mailing list Intel-gfx@xxxxxxxxxxxxxxxxxxxxx https://lists.freedesktop.org/mailman/listinfo/intel-gfx