Extend sev_smoke_test to also run a minimal SEV-SNP smoke test that initializes and sets up private memory regions required to run a simple SEV-SNP guest. Similar to its SEV-ES smoke test counterpart, this also does not support GHCB and ucall yet and uses the GHCB MSR protocol to trigger an exit of the type KVM_EXIT_SYSTEM_EVENT. Also, decouple policy and type and require functions to provide both such that there is no assumption regarding the type using policy. Signed-off-by: Pratik R. Sampat <pratikrajesh.sampat@xxxxxxx> --- .../selftests/kvm/include/x86_64/processor.h | 1 + .../selftests/kvm/include/x86_64/sev.h | 54 +++++++- tools/testing/selftests/kvm/lib/kvm_util.c | 8 +- .../selftests/kvm/lib/x86_64/processor.c | 6 +- tools/testing/selftests/kvm/lib/x86_64/sev.c | 116 +++++++++++++++++- .../selftests/kvm/x86_64/sev_smoke_test.c | 67 ++++++++-- 6 files changed, 230 insertions(+), 22 deletions(-) diff --git a/tools/testing/selftests/kvm/include/x86_64/processor.h b/tools/testing/selftests/kvm/include/x86_64/processor.h index a0c1440017bb..8acf06cc66cb 100644 --- a/tools/testing/selftests/kvm/include/x86_64/processor.h +++ b/tools/testing/selftests/kvm/include/x86_64/processor.h @@ -195,6 +195,7 @@ struct kvm_x86_cpu_feature { #define X86_FEATURE_VGIF KVM_X86_CPU_FEATURE(0x8000000A, 0, EDX, 16) #define X86_FEATURE_SEV KVM_X86_CPU_FEATURE(0x8000001F, 0, EAX, 1) #define X86_FEATURE_SEV_ES KVM_X86_CPU_FEATURE(0x8000001F, 0, EAX, 3) +#define X86_FEATURE_SNP KVM_X86_CPU_FEATURE(0x8000001F, 0, EAX, 4) /* * KVM defined paravirt features. diff --git a/tools/testing/selftests/kvm/include/x86_64/sev.h b/tools/testing/selftests/kvm/include/x86_64/sev.h index 3998152cc081..658c3cca208d 100644 --- a/tools/testing/selftests/kvm/include/x86_64/sev.h +++ b/tools/testing/selftests/kvm/include/x86_64/sev.h @@ -22,8 +22,21 @@ enum sev_guest_state { SEV_GUEST_STATE_RUNNING, }; +/* Minimum firmware version required for the SEV-SNP support */ +#define SNP_FW_REQ_VER_MAJOR 1 +#define SNP_FW_REQ_VER_MINOR 51 +#define SNP_POLICY_MINOR_BIT 0 +#define SNP_POLICY_MAJOR_BIT 8 + #define SEV_POLICY_NO_DBG (1UL << 0) #define SEV_POLICY_ES (1UL << 2) +#define SNP_POLICY_SMT (1ULL << 16) +#define SNP_POLICY_RSVD_MBO (1ULL << 17) +#define SNP_POLICY_DBG (1ULL << 19) +#define SNP_POLICY (SNP_POLICY_SMT | SNP_POLICY_RSVD_MBO) + +#define SNP_FW_VER_MAJOR(maj) ((uint8_t)(maj) << SNP_POLICY_MAJOR_BIT) +#define SNP_FW_VER_MINOR(min) ((uint8_t)(min) << SNP_POLICY_MINOR_BIT) #define GHCB_MSR_TERM_REQ 0x100 @@ -32,14 +45,22 @@ int __sev_vm_launch_start(struct kvm_vm *vm, uint32_t policy); int __sev_vm_launch_update(struct kvm_vm *vm, uint32_t policy); int __sev_vm_launch_measure(struct kvm_vm *vm, uint8_t *measurement); int __sev_vm_launch_finish(struct kvm_vm *vm); +int __snp_vm_launch_start(struct kvm_vm *vm, uint64_t policy, uint8_t flags); +int __snp_vm_launch_update(struct kvm_vm *vm, uint8_t page_type); +int __snp_vm_launch_finish(struct kvm_vm *vm, uint16_t flags); void sev_vm_launch(struct kvm_vm *vm, uint32_t policy); void sev_vm_launch_measure(struct kvm_vm *vm, uint8_t *measurement); void sev_vm_launch_finish(struct kvm_vm *vm); +void snp_vm_launch_start(struct kvm_vm *vm, uint64_t policy); +void snp_vm_launch_update(struct kvm_vm *vm); +void snp_vm_launch_finish(struct kvm_vm *vm); + +bool is_kvm_snp_supported(void); struct kvm_vm *vm_sev_create_with_one_vcpu(uint32_t type, void *guest_code, struct kvm_vcpu **cpu); -void vm_sev_launch(struct kvm_vm *vm, uint32_t policy, uint8_t *measurement); +void vm_sev_launch(struct kvm_vm *vm, uint64_t policy, uint8_t *measurement); kvm_static_assert(SEV_RET_SUCCESS == 0); @@ -74,8 +95,18 @@ kvm_static_assert(SEV_RET_SUCCESS == 0); __TEST_ASSERT_VM_VCPU_IOCTL(!ret, #cmd, ret, vm); \ }) +/* Ensure policy is within bounds for SEV, SEV-ES */ +#define ASSERT_SEV_POLICY(type, policy) \ +({ \ + if (type == KVM_X86_SEV_VM || type == KVM_X86_SEV_ES_VM) { \ + TEST_ASSERT(policy < ((uint32_t)~0U), \ + "Policy beyond bounds for SEV"); \ + } \ +}) \ + void sev_vm_init(struct kvm_vm *vm); void sev_es_vm_init(struct kvm_vm *vm); +void snp_vm_init(struct kvm_vm *vm); static inline void sev_register_encrypted_memory(struct kvm_vm *vm, struct userspace_mem_region *region) @@ -99,6 +130,19 @@ static inline int __sev_launch_update_data(struct kvm_vm *vm, vm_paddr_t gpa, return __vm_sev_ioctl(vm, KVM_SEV_LAUNCH_UPDATE_DATA, &update_data); } +static inline int __snp_launch_update_data(struct kvm_vm *vm, vm_paddr_t gpa, + uint64_t hva, uint64_t size, uint8_t type) +{ + struct kvm_sev_snp_launch_update update_data = { + .uaddr = hva, + .gfn_start = gpa >> PAGE_SHIFT, + .len = size, + .type = type, + }; + + return __vm_sev_ioctl(vm, KVM_SEV_SNP_LAUNCH_UPDATE, &update_data); +} + static inline void sev_launch_update_data(struct kvm_vm *vm, vm_paddr_t gpa, uint64_t hva, uint64_t size) { @@ -107,4 +151,12 @@ static inline void sev_launch_update_data(struct kvm_vm *vm, vm_paddr_t gpa, TEST_ASSERT_VM_VCPU_IOCTL(!ret, KVM_SEV_LAUNCH_UPDATE_DATA, ret, vm); } +static inline int snp_launch_update_data(struct kvm_vm *vm, vm_paddr_t gpa, + uint64_t hva, uint64_t size, uint8_t type) +{ + int ret = __snp_launch_update_data(vm, gpa, hva, size, type); + + TEST_ASSERT_VM_VCPU_IOCTL(!ret, KVM_SEV_SNP_LAUNCH_UPDATE, ret, vm); +} + #endif /* SELFTEST_KVM_SEV_H */ diff --git a/tools/testing/selftests/kvm/lib/kvm_util.c b/tools/testing/selftests/kvm/lib/kvm_util.c index 56b170b725b3..9cc4dfc72329 100644 --- a/tools/testing/selftests/kvm/lib/kvm_util.c +++ b/tools/testing/selftests/kvm/lib/kvm_util.c @@ -413,14 +413,18 @@ struct kvm_vm *__vm_create(struct vm_shape shape, uint32_t nr_runnable_vcpus, nr_extra_pages); struct userspace_mem_region *slot0; struct kvm_vm *vm; - int i; + int i, flags = 0; pr_debug("%s: mode='%s' type='%d', pages='%ld'\n", __func__, vm_guest_mode_string(shape.mode), shape.type, nr_pages); vm = ____vm_create(shape); - vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS, 0, 0, nr_pages, 0); + if (shape.type == KVM_X86_SNP_VM) + flags |= KVM_MEM_GUEST_MEMFD; + + vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS, 0, 0, nr_pages, flags); + for (i = 0; i < NR_MEM_REGIONS; i++) vm->memslots[i] = 0; diff --git a/tools/testing/selftests/kvm/lib/x86_64/processor.c b/tools/testing/selftests/kvm/lib/x86_64/processor.c index 153739f2e201..670fb09c43d0 100644 --- a/tools/testing/selftests/kvm/lib/x86_64/processor.c +++ b/tools/testing/selftests/kvm/lib/x86_64/processor.c @@ -629,7 +629,8 @@ void kvm_arch_vm_post_create(struct kvm_vm *vm) sync_global_to_guest(vm, host_cpu_is_amd); sync_global_to_guest(vm, is_forced_emulation_enabled); - if (vm->type == KVM_X86_SEV_VM || vm->type == KVM_X86_SEV_ES_VM) { + if (vm->type == KVM_X86_SEV_VM || vm->type == KVM_X86_SEV_ES_VM || + vm->type == KVM_X86_SNP_VM) { struct kvm_sev_init init = { 0 }; vm_sev_ioctl(vm, KVM_SEV_INIT2, &init); @@ -1138,7 +1139,8 @@ void kvm_get_cpu_address_width(unsigned int *pa_bits, unsigned int *va_bits) void kvm_init_vm_address_properties(struct kvm_vm *vm) { - if (vm->type == KVM_X86_SEV_VM || vm->type == KVM_X86_SEV_ES_VM) { + if (vm->type == KVM_X86_SEV_VM || vm->type == KVM_X86_SEV_ES_VM || + vm->type == KVM_X86_SNP_VM) { vm->arch.sev_fd = open_sev_dev_path_or_exit(); vm->arch.c_bit = BIT_ULL(this_cpu_property(X86_PROPERTY_SEV_C_BIT)); vm->gpa_tag_mask = vm->arch.c_bit; diff --git a/tools/testing/selftests/kvm/lib/x86_64/sev.c b/tools/testing/selftests/kvm/lib/x86_64/sev.c index 290eb0bd495e..75c1463b98d4 100644 --- a/tools/testing/selftests/kvm/lib/x86_64/sev.c +++ b/tools/testing/selftests/kvm/lib/x86_64/sev.c @@ -14,7 +14,8 @@ * and find the first range, but that's correct because the condition * expression would cause us to quit the loop. */ -static int encrypt_region(struct kvm_vm *vm, struct userspace_mem_region *region) +static int encrypt_region(struct kvm_vm *vm, struct userspace_mem_region *region, + uint8_t page_type) { const struct sparsebit *protected_phy_pages = region->protected_phy_pages; const vm_paddr_t gpa_base = region->region.guest_phys_addr; @@ -25,12 +26,23 @@ static int encrypt_region(struct kvm_vm *vm, struct userspace_mem_region *region if (!sparsebit_any_set(protected_phy_pages)) return 0; - sev_register_encrypted_memory(vm, region); + if (vm->type == KVM_X86_SEV_VM || vm->type == KVM_X86_SEV_ES_VM) + sev_register_encrypted_memory(vm, region); sparsebit_for_each_set_range(protected_phy_pages, i, j) { const uint64_t size = (j - i + 1) * vm->page_size; const uint64_t offset = (i - lowest_page_in_region) * vm->page_size; + if (vm->type == KVM_X86_SNP_VM) { + vm_mem_set_private(vm, gpa_base + offset, size); + ret = __snp_launch_update_data(vm, gpa_base + offset, + (uint64_t)addr_gpa2hva(vm, gpa_base + offset), + size, page_type); + if (ret) + return ret; + continue; + } + ret = __sev_launch_update_data(vm, gpa_base + offset, (uint64_t)addr_gpa2hva(vm, gpa_base + offset), size); @@ -68,6 +80,14 @@ void sev_es_vm_init(struct kvm_vm *vm) } } +void snp_vm_init(struct kvm_vm *vm) +{ + struct kvm_sev_init init = { 0 }; + + assert(vm->type == KVM_X86_SNP_VM); + vm_sev_ioctl(vm, KVM_SEV_INIT2, &init); +} + int __sev_vm_launch_start(struct kvm_vm *vm, uint32_t policy) { struct kvm_sev_launch_start launch_start = { @@ -83,7 +103,7 @@ int __sev_vm_launch_update(struct kvm_vm *vm, uint32_t policy) int ctr; hash_for_each(vm->regions.slot_hash, ctr, region, slot_node) { - int ret = encrypt_region(vm, region); + int ret = encrypt_region(vm, region, 0); if (ret) return ret; @@ -112,6 +132,41 @@ int __sev_vm_launch_finish(struct kvm_vm *vm) return __vm_sev_ioctl(vm, KVM_SEV_LAUNCH_FINISH, NULL); } +int __snp_vm_launch_start(struct kvm_vm *vm, uint64_t policy, uint8_t flags) +{ + struct kvm_sev_snp_launch_start launch_start = { + .policy = policy, + .flags = flags, + }; + + return __vm_sev_ioctl(vm, KVM_SEV_SNP_LAUNCH_START, &launch_start); +} + +int __snp_vm_launch_update(struct kvm_vm *vm, uint8_t page_type) +{ + struct userspace_mem_region *region; + int ctr, ret; + + hash_for_each(vm->regions.slot_hash, ctr, region, slot_node) { + ret = encrypt_region(vm, region, page_type); + if (ret) + return ret; + } + + vm->arch.is_pt_protected = true; + + return 0; +} + +int __snp_vm_launch_finish(struct kvm_vm *vm, uint16_t flags) +{ + struct kvm_sev_snp_launch_finish launch_finish = { + .flags = flags, + }; + + return __vm_sev_ioctl(vm, KVM_SEV_SNP_LAUNCH_FINISH, &launch_finish); +} + void sev_vm_launch(struct kvm_vm *vm, uint32_t policy) { struct kvm_sev_guest_status status; @@ -158,6 +213,45 @@ void sev_vm_launch_finish(struct kvm_vm *vm) TEST_ASSERT_EQ(status.state, SEV_GUEST_STATE_RUNNING); } +void snp_vm_launch_start(struct kvm_vm *vm, uint64_t policy) +{ + int ret = __snp_vm_launch_start(vm, policy, 0); + + TEST_ASSERT_VM_VCPU_IOCTL(!ret, KVM_SEV_SNP_LAUNCH_START, ret, vm); +} + +void snp_vm_launch_update(struct kvm_vm *vm) +{ + int ret = __snp_vm_launch_update(vm, KVM_SEV_SNP_PAGE_TYPE_NORMAL); + + TEST_ASSERT_VM_VCPU_IOCTL(!ret, KVM_SEV_SNP_LAUNCH_UPDATE, ret, vm); +} + +void snp_vm_launch_finish(struct kvm_vm *vm) +{ + int ret = __snp_vm_launch_finish(vm, 0); + + TEST_ASSERT_VM_VCPU_IOCTL(!ret, KVM_SEV_SNP_LAUNCH_FINISH, ret, vm); +} + +bool is_kvm_snp_supported(void) +{ + int sev_fd = open_sev_dev_path_or_exit(); + struct sev_user_data_status sev_status; + + struct sev_issue_cmd arg = { + .cmd = SEV_PLATFORM_STATUS, + .data = (unsigned long)&sev_status, + }; + + kvm_ioctl(sev_fd, SEV_ISSUE_CMD, &arg); + close(sev_fd); + + return sev_status.api_major > SNP_FW_REQ_VER_MAJOR || + (sev_status.api_major == SNP_FW_REQ_VER_MAJOR && + sev_status.api_minor >= SNP_FW_REQ_VER_MINOR); +} + struct kvm_vm *vm_sev_create_with_one_vcpu(uint32_t type, void *guest_code, struct kvm_vcpu **cpu) { @@ -174,8 +268,22 @@ struct kvm_vm *vm_sev_create_with_one_vcpu(uint32_t type, void *guest_code, return vm; } -void vm_sev_launch(struct kvm_vm *vm, uint32_t policy, uint8_t *measurement) +void vm_sev_launch(struct kvm_vm *vm, uint64_t policy, uint8_t *measurement) { + if (vm->type == KVM_X86_SNP_VM) { + vm_enable_cap(vm, KVM_CAP_EXIT_HYPERCALL, (1 << KVM_HC_MAP_GPA_RANGE)); + + snp_vm_launch_start(vm, policy); + + snp_vm_launch_update(vm); + + snp_vm_launch_finish(vm); + + return; + } + + ASSERT_SEV_POLICY(vm->type, policy); + sev_vm_launch(vm, policy); if (!measurement) diff --git a/tools/testing/selftests/kvm/x86_64/sev_smoke_test.c b/tools/testing/selftests/kvm/x86_64/sev_smoke_test.c index 04f24d5f0987..0b65b59b9b40 100644 --- a/tools/testing/selftests/kvm/x86_64/sev_smoke_test.c +++ b/tools/testing/selftests/kvm/x86_64/sev_smoke_test.c @@ -16,6 +16,27 @@ #define XFEATURE_MASK_X87_AVX (XFEATURE_MASK_FP | XFEATURE_MASK_SSE | XFEATURE_MASK_YMM) +static bool is_smt_active(void) +{ + FILE *f; + + f = fopen("/sys/devices/system/cpu/smt/active", "r"); + if (!f) + return false; + + return fgetc(f) - '0'; +} + +static void guest_snp_code(void) +{ + GUEST_ASSERT(rdmsr(MSR_AMD64_SEV) & MSR_AMD64_SEV_ENABLED); + GUEST_ASSERT(rdmsr(MSR_AMD64_SEV) & MSR_AMD64_SEV_ES_ENABLED); + GUEST_ASSERT(rdmsr(MSR_AMD64_SEV) & MSR_AMD64_SEV_SNP_ENABLED); + + wrmsr(MSR_AMD64_SEV_ES_GHCB, GHCB_MSR_TERM_REQ); + __asm__ __volatile__("rep; vmmcall"); +} + static void guest_sev_es_code(void) { /* TODO: Check CPUID after GHCB-based hypercall support is added. */ @@ -61,7 +82,7 @@ static void compare_xsave(u8 *from_host, u8 *from_guest) abort(); } -static void test_sync_vmsa(uint32_t policy) +static void test_sync_vmsa(uint32_t type, uint64_t policy) { struct kvm_vcpu *vcpu; struct kvm_vm *vm; @@ -77,7 +98,10 @@ static void test_sync_vmsa(uint32_t policy) .xcrs[0].value = XFEATURE_MASK_X87_AVX, }; - vm = vm_sev_create_with_one_vcpu(KVM_X86_SEV_ES_VM, guest_code_xsave, &vcpu); + TEST_ASSERT(type != KVM_X86_SEV_VM, + "sync_vmsa only supported for SEV-ES and SNP VM types"); + + vm = vm_sev_create_with_one_vcpu(type, guest_code_xsave, &vcpu); gva = vm_vaddr_alloc_shared(vm, PAGE_SIZE, KVM_UTIL_MIN_VADDR, MEM_REGION_TEST_DATA); hva = addr_gva2hva(vm, gva); @@ -99,7 +123,7 @@ static void test_sync_vmsa(uint32_t policy) : "ymm4", "st", "st(1)", "st(2)", "st(3)", "st(4)", "st(5)", "st(6)", "st(7)"); vcpu_xsave_set(vcpu, &xsave); - vm_sev_launch(vm, SEV_POLICY_ES | policy, NULL); + vm_sev_launch(vm, policy, NULL); /* This page is shared, so make it decrypted. */ memset(hva, 0, 4096); @@ -118,14 +142,12 @@ static void test_sync_vmsa(uint32_t policy) kvm_vm_free(vm); } -static void test_sev(void *guest_code, uint64_t policy) +static void test_sev(void *guest_code, uint32_t type, uint64_t policy) { struct kvm_vcpu *vcpu; struct kvm_vm *vm; struct ucall uc; - uint32_t type = policy & SEV_POLICY_ES ? KVM_X86_SEV_ES_VM : KVM_X86_SEV_VM; - vm = vm_sev_create_with_one_vcpu(type, guest_code, &vcpu); /* TODO: Validate the measurement is as expected. */ @@ -134,7 +156,7 @@ static void test_sev(void *guest_code, uint64_t policy) for (;;) { vcpu_run(vcpu); - if (policy & SEV_POLICY_ES) { + if (vm->type == KVM_X86_SEV_ES_VM || vm->type == KVM_X86_SNP_VM) { TEST_ASSERT(vcpu->run->exit_reason == KVM_EXIT_SYSTEM_EVENT, "Wanted SYSTEM_EVENT, got %s", exit_reason_str(vcpu->run->exit_reason)); @@ -188,19 +210,38 @@ int main(int argc, char *argv[]) { TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_SEV)); - test_sev(guest_sev_code, SEV_POLICY_NO_DBG); - test_sev(guest_sev_code, 0); + test_sev(guest_sev_code, KVM_X86_SEV_VM, SEV_POLICY_NO_DBG); + test_sev(guest_sev_code, KVM_X86_SEV_VM, 0); if (kvm_cpu_has(X86_FEATURE_SEV_ES)) { - test_sev(guest_sev_es_code, SEV_POLICY_ES | SEV_POLICY_NO_DBG); - test_sev(guest_sev_es_code, SEV_POLICY_ES); + test_sev(guest_sev_es_code, KVM_X86_SEV_ES_VM, SEV_POLICY_ES | SEV_POLICY_NO_DBG); + test_sev(guest_sev_es_code, KVM_X86_SEV_ES_VM, SEV_POLICY_ES); test_sev_es_shutdown(); if (kvm_has_cap(KVM_CAP_XCRS) && (xgetbv(0) & XFEATURE_MASK_X87_AVX) == XFEATURE_MASK_X87_AVX) { - test_sync_vmsa(0); - test_sync_vmsa(SEV_POLICY_NO_DBG); + test_sync_vmsa(KVM_X86_SEV_ES_VM, SEV_POLICY_ES); + test_sync_vmsa(KVM_X86_SEV_ES_VM, SEV_POLICY_ES | SEV_POLICY_NO_DBG); + } + } + + if (kvm_cpu_has(X86_FEATURE_SNP) && is_kvm_snp_supported()) { + unsigned long snp_policy = SNP_POLICY; + + if (unlikely(!is_smt_active())) + snp_policy &= ~SNP_POLICY_SMT; + + test_sev(guest_snp_code, KVM_X86_SNP_VM, snp_policy); + /* Test minimum firmware level */ + test_sev(guest_snp_code, KVM_X86_SNP_VM, + snp_policy | + SNP_FW_VER_MAJOR(SNP_FW_REQ_VER_MAJOR) | + SNP_FW_VER_MINOR(SNP_FW_REQ_VER_MINOR)); + + if (kvm_has_cap(KVM_CAP_XCRS) && + (xgetbv(0) & XFEATURE_MASK_X87_AVX) == XFEATURE_MASK_X87_AVX) { + test_sync_vmsa(KVM_X86_SNP_VM, snp_policy); } } -- 2.34.1