From: Thomas Hellstrom <thellstrom@xxxxxxxxxx> With SEV encryption, all DMA memory must be marked decrypted (AKA "shared") for devices to be able to read it. In the future we might want to be able to switch normal (encrypted) memory to decrypted in exactly the same way as we handle caching states, and that would require additional memory pools. But for now, rely on memory allocated with dma_alloc_coherent() which is already decrypted with SEV enabled. Set up the page protection accordingly. Drivers must detect SEV enabled and switch to the dma page pool if they don't want to bounce DMA though the SWIOTLB and implement proper syncing. Tested with vmwgfx and sev-es. Screen garbage without this patch and normal functionality with it. Cc: Christian König <christian.koenig@xxxxxxx> Cc: Thomas Lendacky <thomas.lendacky@xxxxxxx> Signed-off-by: Thomas Hellstrom <thellstrom@xxxxxxxxxx> --- drivers/gpu/drm/ttm/ttm_bo_util.c | 17 +++++++++++++---- drivers/gpu/drm/ttm/ttm_bo_vm.c | 17 +++++++---------- drivers/gpu/drm/ttm/ttm_page_alloc_dma.c | 3 +++ drivers/gpu/drm/vmwgfx/vmwgfx_blit.c | 6 ++++-- include/drm/ttm/ttm_bo_driver.h | 8 +++++--- include/drm/ttm/ttm_tt.h | 1 + 6 files changed, 33 insertions(+), 19 deletions(-) diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c index 82ea26a49959..66d401935e0f 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_util.c +++ b/drivers/gpu/drm/ttm/ttm_bo_util.c @@ -419,11 +419,13 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo, page = i * dir + add; if (old_iomap == NULL) { pgprot_t prot = ttm_io_prot(old_mem->placement, + ttm->page_flags, PAGE_KERNEL); ret = ttm_copy_ttm_io_page(ttm, new_iomap, page, prot); } else if (new_iomap == NULL) { pgprot_t prot = ttm_io_prot(new_mem->placement, + ttm->page_flags, PAGE_KERNEL); ret = ttm_copy_io_ttm_page(ttm, old_iomap, page, prot); @@ -525,11 +527,11 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo, return 0; } -pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp) +pgprot_t ttm_io_prot(u32 caching_flags, u32 tt_page_flags, pgprot_t tmp) { /* Cached mappings need no adjustment */ if (caching_flags & TTM_PL_FLAG_CACHED) - return tmp; + goto check_encryption; #if defined(__i386__) || defined(__x86_64__) if (caching_flags & TTM_PL_FLAG_WC) @@ -547,6 +549,11 @@ pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp) #if defined(__sparc__) tmp = pgprot_noncached(tmp); #endif + +check_encryption: + if (tt_page_flags & TTM_PAGE_FLAG_DECRYPTED) + tmp = pgprot_decrypted(tmp); + return tmp; } EXPORT_SYMBOL(ttm_io_prot); @@ -593,7 +600,8 @@ static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo, if (ret) return ret; - if (num_pages == 1 && (mem->placement & TTM_PL_FLAG_CACHED)) { + if (num_pages == 1 && (mem->placement & TTM_PL_FLAG_CACHED) && + !(ttm->page_flags & TTM_PAGE_FLAG_DECRYPTED)) { /* * We're mapping a single page, and the desired * page protection is consistent with the bo. @@ -607,7 +615,8 @@ static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo, * We need to use vmap to get the desired page protection * or to make the buffer object look contiguous. */ - prot = ttm_io_prot(mem->placement, PAGE_KERNEL); + prot = ttm_io_prot(mem->placement, ttm->page_flags, + PAGE_KERNEL); map->bo_kmap_type = ttm_bo_map_vmap; map->virtual = vmap(ttm->pages + start_page, num_pages, 0, prot); diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c index e93b1ad7828f..a7426f48c21d 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_vm.c +++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c @@ -229,12 +229,7 @@ static vm_fault_t ttm_bo_vm_fault(struct vm_fault *vmf) * by mmap_sem in write mode. */ cvma = *vma; - cvma.vm_page_prot = vm_get_page_prot(cvma.vm_flags); - - if (bo->mem.bus.is_iomem) { - cvma.vm_page_prot = ttm_io_prot(bo->mem.placement, - cvma.vm_page_prot); - } else { + if (!bo->mem.bus.is_iomem) { struct ttm_operation_ctx ctx = { .interruptible = false, .no_wait_gpu = false, @@ -244,13 +239,15 @@ static vm_fault_t ttm_bo_vm_fault(struct vm_fault *vmf) ttm = bo->ttm; cvma.vm_page_prot = ttm_io_prot(bo->mem.placement, - cvma.vm_page_prot); - - /* Allocate all page at once, most common usage */ - if (ttm_tt_populate(ttm, &ctx)) { + ttm->page_flags, cvma.vm_page_prot); + if (ttm_tt_populate(bo->ttm, &ctx)) { ret = VM_FAULT_OOM; goto out_io_unlock; } + } else { + /* Iomem should not be marked encrypted */ + cvma.vm_page_prot = ttm_io_prot(bo->mem.placement, + TTM_PAGE_FLAG_DECRYPTED, cvma.vm_page_prot); } /* diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c index 7d78e6deac89..c7e223c4f26c 100644 --- a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c +++ b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c @@ -984,6 +984,9 @@ int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev, } ttm->state = tt_unbound; + if (sev_active()) + ttm->page_flags |= TTM_PAGE_FLAG_DECRYPTED; + return 0; } EXPORT_SYMBOL_GPL(ttm_dma_populate); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c b/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c index bb46ca0c458f..d3ced89a37e9 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c @@ -483,8 +483,10 @@ int vmw_bo_cpu_blit(struct ttm_buffer_object *dst, d.src_pages = src->ttm->pages; d.dst_num_pages = dst->num_pages; d.src_num_pages = src->num_pages; - d.dst_prot = ttm_io_prot(dst->mem.placement, PAGE_KERNEL); - d.src_prot = ttm_io_prot(src->mem.placement, PAGE_KERNEL); + d.dst_prot = ttm_io_prot(dst->mem.placement, dst->ttm->page_flags, + PAGE_KERNEL); + d.src_prot = ttm_io_prot(src->mem.placement, src->ttm->page_flags, + PAGE_KERNEL); d.diff = diff; for (j = 0; j < h; ++j) { diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h index 6f536caea368..68ead1bd3042 100644 --- a/include/drm/ttm/ttm_bo_driver.h +++ b/include/drm/ttm/ttm_bo_driver.h @@ -893,13 +893,15 @@ int ttm_bo_pipeline_gutting(struct ttm_buffer_object *bo); /** * ttm_io_prot * - * @c_state: Caching state. + * @caching_flags: The caching flags of the map. + * @tt_page_flags: The tt_page_flags of the map, TTM_PAGE_FLAG_* * @tmp: Page protection flag for a normal, cached mapping. * * Utility function that returns the pgprot_t that should be used for - * setting up a PTE with the caching model indicated by @c_state. + * setting up a PTE with the caching model indicated by @caching_flags, + * and encryption state indicated by @tt_page_flags, */ -pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp); +pgprot_t ttm_io_prot(u32 caching_flags, u32 tt_page_flags, pgprot_t tmp); extern const struct ttm_mem_type_manager_func ttm_bo_manager_func; diff --git a/include/drm/ttm/ttm_tt.h b/include/drm/ttm/ttm_tt.h index c0e928abf592..45cc26355513 100644 --- a/include/drm/ttm/ttm_tt.h +++ b/include/drm/ttm/ttm_tt.h @@ -41,6 +41,7 @@ struct ttm_operation_ctx; #define TTM_PAGE_FLAG_DMA32 (1 << 7) #define TTM_PAGE_FLAG_SG (1 << 8) #define TTM_PAGE_FLAG_NO_RETRY (1 << 9) +#define TTM_PAGE_FLAG_DECRYPTED (1 << 10) enum ttm_caching_state { tt_uncached, -- 2.20.1 _______________________________________________ dri-devel mailing list dri-devel@xxxxxxxxxxxxxxxxxxxxx https://lists.freedesktop.org/mailman/listinfo/dri-devel