RE: [PATCH V5 09/12] x86/Swiotlb: Add Swiotlb bounce buffer remap function for HV IVM

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



From: Tianyu Lan <ltykernel@xxxxxxxxx> Sent: Tuesday, September 14, 2021 6:39 AM
> 
> In Isolation VM with AMD SEV, bounce buffer needs to be accessed via
> extra address space which is above shared_gpa_boundary
> (E.G 39 bit address line) reported by Hyper-V CPUID ISOLATION_CONFIG.
> The access physical address will be original physical address +
> shared_gpa_boundary. The shared_gpa_boundary in the AMD SEV SNP
> spec is called virtual top of memory(vTOM). Memory addresses below
> vTOM are automatically treated as private while memory above
> vTOM is treated as shared.
> 
> Expose swiotlb_unencrypted_base for platforms to set unencrypted
> memory base offset and call memremap() to map bounce buffer in the
> swiotlb code, store map address and use the address to copy data
> from/to swiotlb bounce buffer.
> 
> Signed-off-by: Tianyu Lan <Tianyu.Lan@xxxxxxxxxxxxx>
> ---
> Change since v4:
> 	* Expose swiotlb_unencrypted_base to set unencrypted memory
> 	  offset.
> 	* Use memremap() to map bounce buffer if swiotlb_unencrypted_
> 	  base is set.
> 
> Change since v1:
> 	* Make swiotlb_init_io_tlb_mem() return error code and return
>           error when dma_map_decrypted() fails.
> ---
>  include/linux/swiotlb.h |  6 ++++++
>  kernel/dma/swiotlb.c    | 41 +++++++++++++++++++++++++++++++++++------
>  2 files changed, 41 insertions(+), 6 deletions(-)
> 
> diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h
> index b0cb2a9973f4..4998ed44ae3d 100644
> --- a/include/linux/swiotlb.h
> +++ b/include/linux/swiotlb.h
> @@ -72,6 +72,9 @@ extern enum swiotlb_force swiotlb_force;
>   * @end:	The end address of the swiotlb memory pool. Used to do a quick
>   *		range check to see if the memory was in fact allocated by this
>   *		API.
> + * @vaddr:	The vaddr of the swiotlb memory pool. The swiotlb
> + *		memory pool may be remapped in the memory encrypted case and store
> + *		virtual address for bounce buffer operation.
>   * @nslabs:	The number of IO TLB blocks (in groups of 64) between @start and
>   *		@end. For default swiotlb, this is command line adjustable via
>   *		setup_io_tlb_npages.
> @@ -91,6 +94,7 @@ extern enum swiotlb_force swiotlb_force;
>  struct io_tlb_mem {
>  	phys_addr_t start;
>  	phys_addr_t end;
> +	void *vaddr;
>  	unsigned long nslabs;
>  	unsigned long used;
>  	unsigned int index;
> @@ -185,4 +189,6 @@ static inline bool is_swiotlb_for_alloc(struct device *dev)
>  }
>  #endif /* CONFIG_DMA_RESTRICTED_POOL */
> 
> +extern phys_addr_t swiotlb_unencrypted_base;
> +
>  #endif /* __LINUX_SWIOTLB_H */
> diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c
> index 87c40517e822..9e30cc4bd872 100644
> --- a/kernel/dma/swiotlb.c
> +++ b/kernel/dma/swiotlb.c
> @@ -50,6 +50,7 @@
>  #include <asm/io.h>
>  #include <asm/dma.h>
> 
> +#include <linux/io.h>
>  #include <linux/init.h>
>  #include <linux/memblock.h>
>  #include <linux/iommu-helper.h>
> @@ -72,6 +73,8 @@ enum swiotlb_force swiotlb_force;
> 
>  struct io_tlb_mem io_tlb_default_mem;
> 
> +phys_addr_t swiotlb_unencrypted_base;
> +
>  /*
>   * Max segment that we can provide which (if pages are contingous) will
>   * not be bounced (unless SWIOTLB_FORCE is set).
> @@ -175,7 +178,7 @@ void __init swiotlb_update_mem_attributes(void)
>  	memset(vaddr, 0, bytes);
>  }
> 
> -static void swiotlb_init_io_tlb_mem(struct io_tlb_mem *mem, phys_addr_t start,
> +static int swiotlb_init_io_tlb_mem(struct io_tlb_mem *mem, phys_addr_t start,
>  				    unsigned long nslabs, bool late_alloc)
>  {
>  	void *vaddr = phys_to_virt(start);
> @@ -196,13 +199,34 @@ static void swiotlb_init_io_tlb_mem(struct io_tlb_mem *mem, phys_addr_t start,
>  		mem->slots[i].orig_addr = INVALID_PHYS_ADDR;
>  		mem->slots[i].alloc_size = 0;
>  	}
> +
> +	if (set_memory_decrypted((unsigned long)vaddr, bytes >> PAGE_SHIFT))
> +		return -EFAULT;
> +
> +	/*
> +	 * Map memory in the unencrypted physical address space when requested
> +	 * (e.g. for Hyper-V AMD SEV-SNP Isolation VMs).
> +	 */
> +	if (swiotlb_unencrypted_base) {
> +		phys_addr_t paddr = __pa(vaddr) + swiotlb_unencrypted_base;

Nit:  Use "start" instead of "__pa(vaddr)" since "start" is already the needed
physical address.

> +
> +		vaddr = memremap(paddr, bytes, MEMREMAP_WB);
> +		if (!vaddr) {
> +			pr_err("Failed to map the unencrypted memory.\n");
> +			return -ENOMEM;
> +		}
> +	}
> +
>  	memset(vaddr, 0, bytes);
> +	mem->vaddr = vaddr;
> +	return 0;
>  }
> 
>  int __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose)
>  {
>  	struct io_tlb_mem *mem = &io_tlb_default_mem;
>  	size_t alloc_size;
> +	int ret;
> 
>  	if (swiotlb_force == SWIOTLB_NO_FORCE)
>  		return 0;
> @@ -217,7 +241,11 @@ int __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose)
>  		panic("%s: Failed to allocate %zu bytes align=0x%lx\n",
>  		      __func__, alloc_size, PAGE_SIZE);
> 
> -	swiotlb_init_io_tlb_mem(mem, __pa(tlb), nslabs, false);
> +	ret = swiotlb_init_io_tlb_mem(mem, __pa(tlb), nslabs, false);
> +	if (ret) {
> +		memblock_free(__pa(mem), alloc_size);
> +		return ret;
> +	}
> 
>  	if (verbose)
>  		swiotlb_print_info();
> @@ -304,7 +332,7 @@ int
>  swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs)
>  {
>  	struct io_tlb_mem *mem = &io_tlb_default_mem;
> -	unsigned long bytes = nslabs << IO_TLB_SHIFT;
> +	int ret;
> 
>  	if (swiotlb_force == SWIOTLB_NO_FORCE)
>  		return 0;
> @@ -318,8 +346,9 @@ swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs)
>  	if (!mem->slots)
>  		return -ENOMEM;
> 
> -	set_memory_decrypted((unsigned long)tlb, bytes >> PAGE_SHIFT);
> -	swiotlb_init_io_tlb_mem(mem, virt_to_phys(tlb), nslabs, true);
> +	ret = swiotlb_init_io_tlb_mem(mem, virt_to_phys(tlb), nslabs, true);
> +	if (ret)

Before returning the error, free the pages obtained from the earlier call
to __get_free_pages()?

> +		return ret;
> 
>  	swiotlb_print_info();
>  	swiotlb_set_max_segment(mem->nslabs << IO_TLB_SHIFT);
> @@ -371,7 +400,7 @@ static void swiotlb_bounce(struct device *dev, phys_addr_t tlb_addr, size_t size
>  	phys_addr_t orig_addr = mem->slots[index].orig_addr;
>  	size_t alloc_size = mem->slots[index].alloc_size;
>  	unsigned long pfn = PFN_DOWN(orig_addr);
> -	unsigned char *vaddr = phys_to_virt(tlb_addr);
> +	unsigned char *vaddr = mem->vaddr + tlb_addr - mem->start;
>  	unsigned int tlb_offset, orig_addr_offset;
> 
>  	if (orig_addr == INVALID_PHYS_ADDR)
> --
> 2.25.1




[Index of Archives]     [Linux Samsung SoC]     [Linux Rockchip SoC]     [Linux Actions SoC]     [Linux for Synopsys ARC Processors]     [Linux NFS]     [Linux NILFS]     [Linux USB Devel]     [Video for Linux]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]


  Powered by Linux