If tbo.mem.bus.caching is cached, buffer is intended to be mapped as cached from CPU. Map it with ioremap_cache. This wasn't necessary before as device memory was never mapped as cached from CPU side. It becomes necessary for aldebaran as device memory is mapped cached from CPU. Signed-off-by: Oak Zeng <Oak.Zeng@xxxxxxx> Reviewed-by: Christian Konig <Christian.Koenig@xxxxxxx> --- drivers/gpu/drm/ttm/ttm_bo_util.c | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c index 031e581..8c65a13 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_util.c +++ b/drivers/gpu/drm/ttm/ttm_bo_util.c @@ -91,6 +91,8 @@ static int ttm_resource_ioremap(struct ttm_device *bdev, if (mem->bus.caching == ttm_write_combined) addr = ioremap_wc(mem->bus.offset, bus_size); + else if (mem->bus.caching == ttm_cached) + addr = ioremap_cache(mem->bus.offset, bus_size); else addr = ioremap(mem->bus.offset, bus_size); if (!addr) { @@ -372,6 +374,9 @@ static int ttm_bo_ioremap(struct ttm_buffer_object *bo, if (mem->bus.caching == ttm_write_combined) map->virtual = ioremap_wc(bo->mem.bus.offset + offset, size); + else if (mem->bus.caching == ttm_cached) + map->virtual = ioremap_cache(bo->mem.bus.offset + offset, + size); else map->virtual = ioremap(bo->mem.bus.offset + offset, size); @@ -490,6 +495,9 @@ int ttm_bo_vmap(struct ttm_buffer_object *bo, struct dma_buf_map *map) else if (mem->bus.caching == ttm_write_combined) vaddr_iomem = ioremap_wc(mem->bus.offset, bo->base.size); + else if (mem->bus.caching == ttm_cached) + vaddr_iomem = ioremap_cache(mem->bus.offset, + bo->base.size); else vaddr_iomem = ioremap(mem->bus.offset, bo->base.size); -- 2.7.4 _______________________________________________ dri-devel mailing list dri-devel@xxxxxxxxxxxxxxxxxxxxx https://lists.freedesktop.org/mailman/listinfo/dri-devel