From: Thomas Hellstrom <thellstrom@xxxxxxxxxx> With TTM pages allocated out of the DMA pool, use the force_dma_unencrypted function to be able to set up the correct page-protection. Previously it was unconditionally set to encrypted, which only works with SME encryption on devices with a large enough DMA mask. Tested with vmwgfx and sev-es. Screen garbage without this patch and normal functionality with it. Cc: Dave Hansen <dave.hansen@xxxxxxxxxxxxxxx> Cc: Andy Lutomirski <luto@xxxxxxxxxx> Cc: Peter Zijlstra <peterz@xxxxxxxxxxxxx> Cc: Thomas Gleixner <tglx@xxxxxxxxxxxxx> Cc: Ingo Molnar <mingo@xxxxxxxxxx> Cc: Borislav Petkov <bp@xxxxxxxxx> Cc: "H. Peter Anvin" <hpa@xxxxxxxxx> Cc: Heiko Carstens <heiko.carstens@xxxxxxxxxx> Cc: Christian Borntraeger <borntraeger@xxxxxxxxxx> Cc: Tom Lendacky <thomas.lendacky@xxxxxxx> Cc: Christian König <christian.koenig@xxxxxxx> Signed-off-by: Thomas Hellstrom <thellstrom@xxxxxxxxxx> --- drivers/gpu/drm/ttm/ttm_bo_util.c | 17 +++++++++++++---- drivers/gpu/drm/ttm/ttm_bo_vm.c | 21 ++++++++++----------- drivers/gpu/drm/ttm/ttm_page_alloc_dma.c | 4 ++++ drivers/gpu/drm/vmwgfx/vmwgfx_blit.c | 6 ++++-- include/drm/ttm/ttm_bo_driver.h | 8 +++++--- include/drm/ttm/ttm_tt.h | 1 + 6 files changed, 37 insertions(+), 20 deletions(-) diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c index fe81c565e7ef..d5ad8f03b63f 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_util.c +++ b/drivers/gpu/drm/ttm/ttm_bo_util.c @@ -419,11 +419,13 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo, page = i * dir + add; if (old_iomap == NULL) { pgprot_t prot = ttm_io_prot(old_mem->placement, + ttm->page_flags, PAGE_KERNEL); ret = ttm_copy_ttm_io_page(ttm, new_iomap, page, prot); } else if (new_iomap == NULL) { pgprot_t prot = ttm_io_prot(new_mem->placement, + ttm->page_flags, PAGE_KERNEL); ret = ttm_copy_io_ttm_page(ttm, old_iomap, page, prot); @@ -526,11 +528,11 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo, return 0; } -pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp) +pgprot_t ttm_io_prot(u32 caching_flags, u32 tt_page_flags, pgprot_t tmp) { /* Cached mappings need no adjustment */ if (caching_flags & TTM_PL_FLAG_CACHED) - return tmp; + goto check_encryption; #if defined(__i386__) || defined(__x86_64__) if (caching_flags & TTM_PL_FLAG_WC) @@ -548,6 +550,11 @@ pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp) #if defined(__sparc__) tmp = pgprot_noncached(tmp); #endif + +check_encryption: + if (tt_page_flags & TTM_PAGE_FLAG_DECRYPTED) + tmp = pgprot_decrypted(tmp); + return tmp; } EXPORT_SYMBOL(ttm_io_prot); @@ -594,7 +601,8 @@ static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo, if (ret) return ret; - if (num_pages == 1 && (mem->placement & TTM_PL_FLAG_CACHED)) { + if (num_pages == 1 && (mem->placement & TTM_PL_FLAG_CACHED) && + !(ttm->page_flags & TTM_PAGE_FLAG_DECRYPTED)) { /* * We're mapping a single page, and the desired * page protection is consistent with the bo. @@ -608,7 +616,8 @@ static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo, * We need to use vmap to get the desired page protection * or to make the buffer object look contiguous. */ - prot = ttm_io_prot(mem->placement, PAGE_KERNEL); + prot = ttm_io_prot(mem->placement, ttm->page_flags, + PAGE_KERNEL); map->bo_kmap_type = ttm_bo_map_vmap; map->virtual = vmap(ttm->pages + start_page, num_pages, 0, prot); diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c index 76eedb963693..194d8d618d23 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_vm.c +++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c @@ -226,12 +226,7 @@ static vm_fault_t ttm_bo_vm_fault(struct vm_fault *vmf) * by mmap_sem in write mode. */ cvma = *vma; - cvma.vm_page_prot = vm_get_page_prot(cvma.vm_flags); - - if (bo->mem.bus.is_iomem) { - cvma.vm_page_prot = ttm_io_prot(bo->mem.placement, - cvma.vm_page_prot); - } else { + if (!bo->mem.bus.is_iomem) { struct ttm_operation_ctx ctx = { .interruptible = false, .no_wait_gpu = false, @@ -240,14 +235,18 @@ static vm_fault_t ttm_bo_vm_fault(struct vm_fault *vmf) }; ttm = bo->ttm; - cvma.vm_page_prot = ttm_io_prot(bo->mem.placement, - cvma.vm_page_prot); - - /* Allocate all page at once, most common usage */ - if (ttm_tt_populate(ttm, &ctx)) { + if (ttm_tt_populate(bo->ttm, &ctx)) { ret = VM_FAULT_OOM; goto out_io_unlock; } + cvma.vm_page_prot = ttm_io_prot(bo->mem.placement, + ttm->page_flags, + cvma.vm_page_prot); + } else { + /* Iomem should not be marked encrypted */ + cvma.vm_page_prot = ttm_io_prot(bo->mem.placement, + TTM_PAGE_FLAG_DECRYPTED, + cvma.vm_page_prot); } /* diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c index 7d78e6deac89..9b15df8ecd49 100644 --- a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c +++ b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c @@ -48,6 +48,7 @@ #include <linux/atomic.h> #include <linux/device.h> #include <linux/kthread.h> +#include <linux/dma-direct.h> #include <drm/ttm/ttm_bo_driver.h> #include <drm/ttm/ttm_page_alloc.h> #include <drm/ttm/ttm_set_memory.h> @@ -984,6 +985,9 @@ int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev, } ttm->state = tt_unbound; + if (force_dma_unencrypted(dev)) + ttm->page_flags |= TTM_PAGE_FLAG_DECRYPTED; + return 0; } EXPORT_SYMBOL_GPL(ttm_dma_populate); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c b/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c index bb46ca0c458f..d3ced89a37e9 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c @@ -483,8 +483,10 @@ int vmw_bo_cpu_blit(struct ttm_buffer_object *dst, d.src_pages = src->ttm->pages; d.dst_num_pages = dst->num_pages; d.src_num_pages = src->num_pages; - d.dst_prot = ttm_io_prot(dst->mem.placement, PAGE_KERNEL); - d.src_prot = ttm_io_prot(src->mem.placement, PAGE_KERNEL); + d.dst_prot = ttm_io_prot(dst->mem.placement, dst->ttm->page_flags, + PAGE_KERNEL); + d.src_prot = ttm_io_prot(src->mem.placement, src->ttm->page_flags, + PAGE_KERNEL); d.diff = diff; for (j = 0; j < h; ++j) { diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h index 6f536caea368..68ead1bd3042 100644 --- a/include/drm/ttm/ttm_bo_driver.h +++ b/include/drm/ttm/ttm_bo_driver.h @@ -893,13 +893,15 @@ int ttm_bo_pipeline_gutting(struct ttm_buffer_object *bo); /** * ttm_io_prot * - * @c_state: Caching state. + * @caching_flags: The caching flags of the map. + * @tt_page_flags: The tt_page_flags of the map, TTM_PAGE_FLAG_* * @tmp: Page protection flag for a normal, cached mapping. * * Utility function that returns the pgprot_t that should be used for - * setting up a PTE with the caching model indicated by @c_state. + * setting up a PTE with the caching model indicated by @caching_flags, + * and encryption state indicated by @tt_page_flags, */ -pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp); +pgprot_t ttm_io_prot(u32 caching_flags, u32 tt_page_flags, pgprot_t tmp); extern const struct ttm_mem_type_manager_func ttm_bo_manager_func; diff --git a/include/drm/ttm/ttm_tt.h b/include/drm/ttm/ttm_tt.h index c0e928abf592..45cc26355513 100644 --- a/include/drm/ttm/ttm_tt.h +++ b/include/drm/ttm/ttm_tt.h @@ -41,6 +41,7 @@ struct ttm_operation_ctx; #define TTM_PAGE_FLAG_DMA32 (1 << 7) #define TTM_PAGE_FLAG_SG (1 << 8) #define TTM_PAGE_FLAG_NO_RETRY (1 << 9) +#define TTM_PAGE_FLAG_DECRYPTED (1 << 10) enum ttm_caching_state { tt_uncached, -- 2.20.1 _______________________________________________ dri-devel mailing list dri-devel@xxxxxxxxxxxxxxxxxxxxx https://lists.freedesktop.org/mailman/listinfo/dri-devel