From: Tianyu Lan <ltykernel@xxxxxxxxx> Sent: Tuesday, November 23, 2021 6:31 AM > > In Isolation VM with AMD SEV, bounce buffer needs to be accessed via > extra address space which is above shared_gpa_boundary (E.G 39 bit > address line) reported by Hyper-V CPUID ISOLATION_CONFIG. The access > physical address will be original physical address + shared_gpa_boundary. > The shared_gpa_boundary in the AMD SEV SNP spec is called virtual top of > memory(vTOM). Memory addresses below vTOM are automatically treated as > private while memory above vTOM is treated as shared. > > Expose swiotlb_unencrypted_base for platforms to set unencrypted > memory base offset and platform calls swiotlb_update_mem_attributes() > to remap swiotlb mem to unencrypted address space. memremap() can > not be called in the early stage and so put remapping code into > swiotlb_update_mem_attributes(). Store remap address and use it to copy > data from/to swiotlb bounce buffer. > > Signed-off-by: Tianyu Lan <Tianyu.Lan@xxxxxxxxxxxxx> > --- > Change since v1: > * Rework comment in the swiotlb_init_io_tlb_mem() > * Make swiotlb_init_io_tlb_mem() back to return void. > --- > include/linux/swiotlb.h | 6 +++++ > kernel/dma/swiotlb.c | 53 +++++++++++++++++++++++++++++++++++++---- > 2 files changed, 54 insertions(+), 5 deletions(-) > > diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h > index 569272871375..f6c3638255d5 100644 > --- a/include/linux/swiotlb.h > +++ b/include/linux/swiotlb.h > @@ -73,6 +73,9 @@ extern enum swiotlb_force swiotlb_force; > * @end: The end address of the swiotlb memory pool. Used to do a quick > * range check to see if the memory was in fact allocated by this > * API. > + * @vaddr: The vaddr of the swiotlb memory pool. The swiotlb memory pool > + * may be remapped in the memory encrypted case and store virtual > + * address for bounce buffer operation. > * @nslabs: The number of IO TLB blocks (in groups of 64) between @start and > * @end. For default swiotlb, this is command line adjustable via > * setup_io_tlb_npages. > @@ -92,6 +95,7 @@ extern enum swiotlb_force swiotlb_force; > struct io_tlb_mem { > phys_addr_t start; > phys_addr_t end; > + void *vaddr; > unsigned long nslabs; > unsigned long used; > unsigned int index; > @@ -186,4 +190,6 @@ static inline bool is_swiotlb_for_alloc(struct device *dev) > } > #endif /* CONFIG_DMA_RESTRICTED_POOL */ > > +extern phys_addr_t swiotlb_unencrypted_base; > + > #endif /* __LINUX_SWIOTLB_H */ > diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c > index 8e840fbbed7c..c303fdeba82f 100644 > --- a/kernel/dma/swiotlb.c > +++ b/kernel/dma/swiotlb.c > @@ -50,6 +50,7 @@ > #include <asm/io.h> > #include <asm/dma.h> > > +#include <linux/io.h> > #include <linux/init.h> > #include <linux/memblock.h> > #include <linux/iommu-helper.h> > @@ -72,6 +73,8 @@ enum swiotlb_force swiotlb_force; > > struct io_tlb_mem io_tlb_default_mem; > > +phys_addr_t swiotlb_unencrypted_base; > + > /* > * Max segment that we can provide which (if pages are contingous) will > * not be bounced (unless SWIOTLB_FORCE is set). > @@ -155,6 +158,31 @@ static inline unsigned long nr_slots(u64 val) > return DIV_ROUND_UP(val, IO_TLB_SIZE); > } > > +/* > + * Remap swioltb memory in the unencrypted physical address space > + * when swiotlb_unencrypted_base is set. (e.g. for Hyper-V AMD SEV-SNP > + * Isolation VMs). > + */ > +void *swiotlb_mem_remap(struct io_tlb_mem *mem, unsigned long bytes) > +{ > + void *vaddr; > + > + if (swiotlb_unencrypted_base) { > + phys_addr_t paddr = mem->start + swiotlb_unencrypted_base; > + > + vaddr = memremap(paddr, bytes, MEMREMAP_WB); > + if (!vaddr) { > + pr_err("Failed to map the unencrypted memory %llx size %lx.\n", > + paddr, bytes); > + return NULL; > + } > + > + return vaddr; > + } > + > + return phys_to_virt(mem->start); > +} > + > /* > * Early SWIOTLB allocation may be too early to allow an architecture to > * perform the desired operations. This function allows the architecture to > @@ -172,7 +200,14 @@ void __init swiotlb_update_mem_attributes(void) > vaddr = phys_to_virt(mem->start); > bytes = PAGE_ALIGN(mem->nslabs << IO_TLB_SHIFT); > set_memory_decrypted((unsigned long)vaddr, bytes >> PAGE_SHIFT); > - memset(vaddr, 0, bytes); > + > + mem->vaddr = swiotlb_mem_remap(mem, bytes); > + if (!mem->vaddr) { > + pr_err("Fail to remap swiotlb mem.\n"); > + return; > + } > + > + memset(mem->vaddr, 0, bytes); > } In the error case, do you want to leave mem->vaddr as NULL? Or is it better to leave it as the virtual address of mem-start? Your code leaves it as NULL. The interaction between swiotlb_update_mem_attributes() and the helper function swiotlb_memo_remap() seems kind of clunky. phys_to_virt() gets called twice, for example, and two error messages are printed. The code would be more straightforward by just putting the helper function inline: mem->vaddr = phys_to_virt(mem->start); bytes = PAGE_ALIGN(mem->nslabs << IO_TLB_SHIFT); set_memory_decrypted((unsigned long)(mem->vaddr), bytes >> PAGE_SHIFT); if (swiotlb_unencrypted_base) { phys_addr_t paddr = mem->start + swiotlb_unencrypted_base; mem->vaddr = memremap(paddr, bytes, MEMREMAP_WB); if (!mem->vaddr) { pr_err("Failed to map the unencrypted memory %llx size %lx.\n", paddr, bytes); return; } } memset(mem->vaddr, 0, bytes); (This version also leaves mem->vaddr as NULL in the error case.) > > static void swiotlb_init_io_tlb_mem(struct io_tlb_mem *mem, phys_addr_t start, > @@ -196,7 +231,18 @@ static void swiotlb_init_io_tlb_mem(struct io_tlb_mem *mem, phys_addr_t start, > mem->slots[i].orig_addr = INVALID_PHYS_ADDR; > mem->slots[i].alloc_size = 0; > } > + > + /* > + * If swiotlb_unencrypted_base is set, the bounce buffer memory will > + * be remapped and cleared in swiotlb_update_mem_attributes. > + */ > + if (swiotlb_unencrypted_base) > + return; > + > + set_memory_decrypted((unsigned long)vaddr, bytes >> PAGE_SHIFT); Prior to this patch, and here in the new version as well, the return value from set_memory_decrypted() is ignored in several places in this file. As previously discussed, swiotlb_init_io_tlb_mem() is a void function, so there's no place to return an error. Is that OK? > memset(vaddr, 0, bytes); > + mem->vaddr = vaddr; > + return; > } > > int __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose) > @@ -318,7 +364,6 @@ swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs) > if (!mem->slots) > return -ENOMEM; > > - set_memory_decrypted((unsigned long)tlb, bytes >> PAGE_SHIFT); > swiotlb_init_io_tlb_mem(mem, virt_to_phys(tlb), nslabs, true); > > swiotlb_print_info(); > @@ -371,7 +416,7 @@ static void swiotlb_bounce(struct device *dev, phys_addr_t tlb_addr, size_t size > phys_addr_t orig_addr = mem->slots[index].orig_addr; > size_t alloc_size = mem->slots[index].alloc_size; > unsigned long pfn = PFN_DOWN(orig_addr); > - unsigned char *vaddr = phys_to_virt(tlb_addr); > + unsigned char *vaddr = mem->vaddr + tlb_addr - mem->start; > unsigned int tlb_offset, orig_addr_offset; > > if (orig_addr == INVALID_PHYS_ADDR) > @@ -806,8 +851,6 @@ static int rmem_swiotlb_device_init(struct reserved_mem *rmem, > return -ENOMEM; > } > > - set_memory_decrypted((unsigned long)phys_to_virt(rmem->base), > - rmem->size >> PAGE_SHIFT); > swiotlb_init_io_tlb_mem(mem, rmem->base, nslabs, false); > mem->force_bounce = true; > mem->for_alloc = true; > -- > 2.25.1