Pointer passed to zlib_deflate() for compression could point to io mapped memory and might end up in direct derefencing. io mapped memory is copied to a temporary buffer, which is then shared to zlib_deflate(), only for the case where platform supports fast copy using non-temporal instructions. If the platform lacks support, then io mapped memory is directly used. Direct dereferencing of io memory makes driver not portable outside x86 and should be avoided. With this patch, io memory is always copied to a temporary buffer irrespective of platform support for fast copy. The i915_has_memcpy_from_wc() check is removed. And drm_memcpy_from_wc_vaddr() is now used for copying instead of i915_memcpy_from_wc() for 2 reasons. - i915_memcpy_from_wc() will be deprecated. - drm_memcpy_from_wc_vaddr() will not fail if the fast copy is not supported instead continues copying using memcpy_fromio as fallback. Signed-off-by: Balasubramani Vivekanandan <balasubramani.vivekanandan@xxxxxxxxx> Acked-by: Nirmoy Das <nirmoy.das@xxxxxxxxx> --- drivers/gpu/drm/i915/i915_gpu_error.c | 45 +++++++++++++++------------ 1 file changed, 25 insertions(+), 20 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c index 0512c66fa4f3..9cafacb4ceb6 100644 --- a/drivers/gpu/drm/i915/i915_gpu_error.c +++ b/drivers/gpu/drm/i915/i915_gpu_error.c @@ -262,9 +262,12 @@ static bool compress_init(struct i915_vma_compress *c) return false; } - c->tmp = NULL; - if (i915_has_memcpy_from_wc()) - c->tmp = pool_alloc(&c->pool, ALLOW_FAIL); + c->tmp = pool_alloc(&c->pool, ALLOW_FAIL); + if (!c->tmp) { + kfree(zstream->workspace); + pool_fini(&c->pool); + return false; + } return true; } @@ -296,15 +299,17 @@ static void *compress_next_page(struct i915_vma_compress *c, } static int compress_page(struct i915_vma_compress *c, - void *src, - struct i915_vma_coredump *dst, - bool wc) + struct iosys_map *src, + struct i915_vma_coredump *dst) { struct z_stream_s *zstream = &c->zstream; - zstream->next_in = src; - if (wc && c->tmp && i915_memcpy_from_wc(c->tmp, src, PAGE_SIZE)) + if (src->is_iomem) { + drm_memcpy_from_wc_vaddr(c->tmp, src, 0, PAGE_SIZE); zstream->next_in = c->tmp; + } else { + zstream->next_in = src->vaddr; + } zstream->avail_in = PAGE_SIZE; do { @@ -393,9 +398,8 @@ static bool compress_start(struct i915_vma_compress *c) } static int compress_page(struct i915_vma_compress *c, - void *src, - struct i915_vma_coredump *dst, - bool wc) + struct iosys_map *src, + struct i915_vma_coredump *dst) { void *ptr; @@ -403,8 +407,7 @@ static int compress_page(struct i915_vma_compress *c, if (!ptr) return -ENOMEM; - if (!(wc && i915_memcpy_from_wc(ptr, src, PAGE_SIZE))) - memcpy(ptr, src, PAGE_SIZE); + drm_memcpy_from_wc_vaddr(ptr, src, 0, PAGE_SIZE); list_add_tail(&virt_to_page(ptr)->lru, &dst->page_list); cond_resched(); @@ -1092,6 +1095,7 @@ i915_vma_coredump_create(const struct intel_gt *gt, if (drm_mm_node_allocated(&ggtt->error_capture)) { void __iomem *s; dma_addr_t dma; + struct iosys_map src; for_each_sgt_daddr(dma, iter, vma_res->bi.pages) { mutex_lock(&ggtt->error_mutex); @@ -1100,9 +1104,8 @@ i915_vma_coredump_create(const struct intel_gt *gt, mb(); s = io_mapping_map_wc(&ggtt->iomap, slot, PAGE_SIZE); - ret = compress_page(compress, - (void __force *)s, dst, - true); + iosys_map_set_vaddr_iomem(&src, s); + ret = compress_page(compress, &src, dst); io_mapping_unmap(s); mb(); @@ -1114,6 +1117,7 @@ i915_vma_coredump_create(const struct intel_gt *gt, } else if (vma_res->bi.lmem) { struct intel_memory_region *mem = vma_res->mr; dma_addr_t dma; + struct iosys_map src; for_each_sgt_daddr(dma, iter, vma_res->bi.pages) { void __iomem *s; @@ -1121,15 +1125,15 @@ i915_vma_coredump_create(const struct intel_gt *gt, s = io_mapping_map_wc(&mem->iomap, dma - mem->region.start, PAGE_SIZE); - ret = compress_page(compress, - (void __force *)s, dst, - true); + iosys_map_set_vaddr_iomem(&src, s); + ret = compress_page(compress, &src, dst); io_mapping_unmap(s); if (ret) break; } } else { struct page *page; + struct iosys_map src; for_each_sgt_page(page, iter, vma_res->bi.pages) { void *s; @@ -1137,7 +1141,8 @@ i915_vma_coredump_create(const struct intel_gt *gt, drm_clflush_pages(&page, 1); s = kmap(page); - ret = compress_page(compress, s, dst, false); + iosys_map_set_vaddr(&src, s); + ret = compress_page(compress, &src, dst); kunmap(page); drm_clflush_pages(&page, 1); -- 2.25.1