LMEM backed buffer objects do not have struct page information, and are not WB compatible. Currently the cpu access and vmap interfaces only support struct page backed objects. Update the dma-buf interfaces begin/end_cpu_access and vmap/vunmap to be LMEM aware. Signed-off-by: Michael J. Ruhl <michael.j.ruhl@xxxxxxxxx> --- drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c index 402c989cc23d..988778cc8539 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c @@ -155,7 +155,10 @@ static void *i915_gem_dmabuf_vmap(struct dma_buf *dma_buf) { struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf); - return i915_gem_object_pin_map(obj, I915_MAP_WB); + if (i915_gem_object_has_struct_page(obj)) + return i915_gem_object_pin_map(obj, I915_MAP_WB); + else + return i915_gem_object_pin_map(obj, I915_MAP_WC); } static void i915_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr) @@ -201,7 +204,11 @@ static int i915_gem_begin_cpu_access(struct dma_buf *dma_buf, enum dma_data_dire if (err) goto out; - err = i915_gem_object_set_to_cpu_domain(obj, write); + if (i915_gem_object_has_struct_page(obj)) + err = i915_gem_object_set_to_cpu_domain(obj, write); + else + err = i915_gem_object_set_to_wc_domain(obj, write); + i915_gem_object_unlock(obj); out: @@ -222,7 +229,9 @@ static int i915_gem_end_cpu_access(struct dma_buf *dma_buf, enum dma_data_direct if (err) goto out; - err = i915_gem_object_set_to_gtt_domain(obj, false); + if (i915_gem_object_has_struct_page(obj)) + err = i915_gem_object_set_to_gtt_domain(obj, false); + i915_gem_object_unlock(obj); out: -- 2.21.0 _______________________________________________ dri-devel mailing list dri-devel@xxxxxxxxxxxxxxxxxxxxx https://lists.freedesktop.org/mailman/listinfo/dri-devel