From: Brijesh Singh <brijesh.singh@xxxxxxx> The snp_lookup_page_in_rmptable() can be used by the host to read the RMP entry for a given page. The RMP entry format is documented in AMD PPR, see https://bugzilla.kernel.org/attachment.cgi?id=296015. Co-developed-by: Ashish Kalra <ashish.kalra@xxxxxxx> Signed-off-by: Ashish Kalra <ashish.kalra@xxxxxxx> Signed-off-by: Brijesh Singh <brijesh.singh@xxxxxxx> [mdr: separate 'assigned' indicator from return code] Signed-off-by: Michael Roth <michael.roth@xxxxxxx> --- arch/x86/include/asm/sev-common.h | 4 +++ arch/x86/include/asm/sev-host.h | 22 +++++++++++++ arch/x86/virt/svm/sev.c | 53 +++++++++++++++++++++++++++++++ 3 files changed, 79 insertions(+) create mode 100644 arch/x86/include/asm/sev-host.h diff --git a/arch/x86/include/asm/sev-common.h b/arch/x86/include/asm/sev-common.h index b463fcbd4b90..1e6fb93d8ab0 100644 --- a/arch/x86/include/asm/sev-common.h +++ b/arch/x86/include/asm/sev-common.h @@ -173,4 +173,8 @@ struct snp_psc_desc { #define GHCB_ERR_INVALID_INPUT 5 #define GHCB_ERR_INVALID_EVENT 6 +/* RMP page size */ +#define RMP_PG_SIZE_4K 0 +#define RMP_TO_X86_PG_LEVEL(level) (((level) == RMP_PG_SIZE_4K) ? PG_LEVEL_4K : PG_LEVEL_2M) + #endif diff --git a/arch/x86/include/asm/sev-host.h b/arch/x86/include/asm/sev-host.h new file mode 100644 index 000000000000..4c487ce8457f --- /dev/null +++ b/arch/x86/include/asm/sev-host.h @@ -0,0 +1,22 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * AMD SVM-SEV Host Support. + * + * Copyright (C) 2023 Advanced Micro Devices, Inc. + * + * Author: Ashish Kalra <ashish.kalra@xxxxxxx> + * + */ + +#ifndef __ASM_X86_SEV_HOST_H +#define __ASM_X86_SEV_HOST_H + +#include <asm/sev-common.h> + +#ifdef CONFIG_KVM_AMD_SEV +int snp_lookup_rmpentry(u64 pfn, bool *assigned, int *level); +#else +static inline int snp_lookup_rmpentry(u64 pfn, bool *assigned, int *level) { return -ENXIO; } +#endif + +#endif diff --git a/arch/x86/virt/svm/sev.c b/arch/x86/virt/svm/sev.c index 8b9ed72489e4..7d3802605376 100644 --- a/arch/x86/virt/svm/sev.c +++ b/arch/x86/virt/svm/sev.c @@ -53,6 +53,9 @@ struct rmpentry { */ #define RMPTABLE_CPU_BOOKKEEPING_SZ 0x4000 +/* Mask to apply to a PFN to get the first PFN of a 2MB page */ +#define PFN_PMD_MASK (~((1ULL << (PMD_SHIFT - PAGE_SHIFT)) - 1)) + static struct rmpentry *rmptable_start __ro_after_init; static u64 rmptable_max_pfn __ro_after_init; @@ -237,3 +240,53 @@ static int __init snp_rmptable_init(void) * the page(s) used for DMA are hypervisor owned. */ fs_initcall(snp_rmptable_init); + +static int rmptable_entry(u64 pfn, struct rmpentry *entry) +{ + if (WARN_ON_ONCE(pfn > rmptable_max_pfn)) + return -EFAULT; + + *entry = rmptable_start[pfn]; + + return 0; +} + +static int __snp_lookup_rmpentry(u64 pfn, struct rmpentry *entry, int *level) +{ + struct rmpentry large_entry; + int ret; + + if (!cpu_feature_enabled(X86_FEATURE_SEV_SNP)) + return -ENXIO; + + ret = rmptable_entry(pfn, entry); + if (ret) + return ret; + + /* + * Find the authoritative RMP entry for a PFN. This can be either a 4K + * RMP entry or a special large RMP entry that is authoritative for a + * whole 2M area. + */ + ret = rmptable_entry(pfn & PFN_PMD_MASK, &large_entry); + if (ret) + return ret; + + *level = RMP_TO_X86_PG_LEVEL(large_entry.pagesize); + + return 0; +} + +int snp_lookup_rmpentry(u64 pfn, bool *assigned, int *level) +{ + struct rmpentry e; + int ret; + + ret = __snp_lookup_rmpentry(pfn, &e, level); + if (ret) + return ret; + + *assigned = !!e.assigned; + return 0; +} +EXPORT_SYMBOL_GPL(snp_lookup_rmpentry); -- 2.25.1