From: Tom Lendacky <thomas.lendacky@xxxxxxx> In order for memory pages to be properly mapped when SEV is active, we need to use the PAGE_KERNEL protection attribute as the base protection. This will insure that memory mapping of, e.g. ACPI tables, receives the proper mapping attributes. Signed-off-by: Tom Lendacky <thomas.lendacky@xxxxxxx> --- arch/x86/mm/ioremap.c | 8 ++++++++ include/linux/mm.h | 1 + kernel/resource.c | 40 ++++++++++++++++++++++++++++++++++++++++ 3 files changed, 49 insertions(+) diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c index c400ab5..481c999 100644 --- a/arch/x86/mm/ioremap.c +++ b/arch/x86/mm/ioremap.c @@ -151,7 +151,15 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr, pcm = new_pcm; } + /* + * If the page being mapped is in memory and SEV is active then + * make sure the memory encryption attribute is enabled in the + * resulting mapping. + */ prot = PAGE_KERNEL_IO; + if (sev_active() && page_is_mem(pfn)) + prot = __pgprot(pgprot_val(prot) | _PAGE_ENC); + switch (pcm) { case _PAGE_CACHE_MODE_UC: default: diff --git a/include/linux/mm.h b/include/linux/mm.h index b84615b..825df27 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -445,6 +445,7 @@ static inline int get_page_unless_zero(struct page *page) } extern int page_is_ram(unsigned long pfn); +extern int page_is_mem(unsigned long pfn); enum { REGION_INTERSECTS, diff --git a/kernel/resource.c b/kernel/resource.c index 9b5f044..db56ba3 100644 --- a/kernel/resource.c +++ b/kernel/resource.c @@ -518,6 +518,46 @@ int __weak page_is_ram(unsigned long pfn) } EXPORT_SYMBOL_GPL(page_is_ram); +/* + * This function returns true if the target memory is marked as + * IORESOURCE_MEM and IORESOUCE_BUSY and described as other than + * IORES_DESC_NONE (e.g. IORES_DESC_ACPI_TABLES). + */ +static int walk_mem_range(unsigned long start_pfn, unsigned long nr_pages) +{ + struct resource res; + unsigned long pfn, end_pfn; + u64 orig_end; + int ret = -1; + + res.start = (u64) start_pfn << PAGE_SHIFT; + res.end = ((u64)(start_pfn + nr_pages) << PAGE_SHIFT) - 1; + res.flags = IORESOURCE_MEM | IORESOURCE_BUSY; + orig_end = res.end; + while ((res.start < res.end) && + (find_next_iomem_res(&res, IORES_DESC_NONE, true) >= 0)) { + pfn = (res.start + PAGE_SIZE - 1) >> PAGE_SHIFT; + end_pfn = (res.end + 1) >> PAGE_SHIFT; + if (end_pfn > pfn) + ret = (res.desc != IORES_DESC_NONE) ? 1 : 0; + if (ret) + break; + res.start = res.end + 1; + res.end = orig_end; + } + return ret; +} + +/* + * This generic page_is_mem() returns true if specified address is + * registered as memory in iomem_resource list. + */ +int __weak page_is_mem(unsigned long pfn) +{ + return walk_mem_range(pfn, 1) == 1; +} +EXPORT_SYMBOL_GPL(page_is_mem); + /** * region_intersects() - determine intersection of region with known resources * @start: region start address