On Mon, Dec 18, 2023, Peter Gonda wrote: > Add kvm_vm.protected metadata. Protected VMs memory, potentially > register and other state may not be accessible to KVM. This combined > with a new protected_phy_pages bitmap will allow the selftests to check > if a given pages is accessible. > > Cc: Paolo Bonzini <pbonzini@xxxxxxxxxx> > Cc: Sean Christopherson <seanjc@xxxxxxxxxx> > Cc: Vishal Annapurve <vannapurve@xxxxxxxxxx> > Cc: Ackerley Tng <ackerleytng@xxxxxxxxxx> > cc: Andrew Jones <andrew.jones@xxxxxxxxx> > Cc: Tom Lendacky <thomas.lendacky@xxxxxxx> > Cc: Michael Roth <michael.roth@xxxxxxx> > Originally-by: Michael Roth <michael.roth@xxxxxxx> > Signed-off-by: Peter Gonda <pgonda@xxxxxxxxxx> > --- > .../selftests/kvm/include/kvm_util_base.h | 15 +++++++++++++-- > tools/testing/selftests/kvm/lib/kvm_util.c | 16 +++++++++++++--- > 2 files changed, 26 insertions(+), 5 deletions(-) > > diff --git a/tools/testing/selftests/kvm/include/kvm_util_base.h b/tools/testing/selftests/kvm/include/kvm_util_base.h > index ca99cc41685d..71c0ed6a1197 100644 > --- a/tools/testing/selftests/kvm/include/kvm_util_base.h > +++ b/tools/testing/selftests/kvm/include/kvm_util_base.h > @@ -88,6 +88,7 @@ _Static_assert(NUM_VM_SUBTYPES < 256); > struct userspace_mem_region { > struct kvm_userspace_memory_region region; > struct sparsebit *unused_phy_pages; > + struct sparsebit *protected_phy_pages; > int fd; > off_t offset; > enum vm_mem_backing_src_type backing_src_type; > @@ -155,6 +156,9 @@ struct kvm_vm { > vm_vaddr_t handlers; > uint32_t dirty_ring_size; > > + /* VM protection enabled: SEV, etc*/ > + bool protected; Yet another bool is unnecessary, just add an arch hook. That way it's impossible to have a discrepancy where vm->arch says a VM is protected, but vm->protected says it's not. > @@ -1040,6 +1041,7 @@ void vm_userspace_mem_region_add(struct kvm_vm *vm, > > region->backing_src_type = src_type; > region->unused_phy_pages = sparsebit_alloc(); > + region->protected_phy_pages = sparsebit_alloc(); There's zero region to allocate protected_phy_pages if the VM doesn't support protected memory. > sparsebit_set_num(region->unused_phy_pages, > guest_paddr >> vm->page_shift, npages); > region->region.slot = slot; > @@ -1829,6 +1831,10 @@ void vm_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent) > region->host_mem); > fprintf(stream, "%*sunused_phy_pages: ", indent + 2, ""); > sparsebit_dump(stream, region->unused_phy_pages, 0); > + if (vm->protected) { And this should check region->protected_phy_pages, not vm->protected. > + fprintf(stream, "%*sprotected_phy_pages: ", indent + 2, ""); > + sparsebit_dump(stream, region->protected_phy_pages, 0); > + } > } > fprintf(stream, "%*sMapped Virtual Pages:\n", indent, ""); > sparsebit_dump(stream, vm->vpages_mapped, indent + 2); > @@ -1941,8 +1947,9 @@ const char *exit_reason_str(unsigned int exit_reason) > * and their base address is returned. A TEST_ASSERT failure occurs if > * not enough pages are available at or above paddr_min. > */ > -vm_paddr_t vm_phy_pages_alloc(struct kvm_vm *vm, size_t num, > - vm_paddr_t paddr_min, uint32_t memslot) > +vm_paddr_t __vm_phy_pages_alloc(struct kvm_vm *vm, size_t num, > + vm_paddr_t paddr_min, uint32_t memslot, > + bool protected) > { > struct userspace_mem_region *region; > sparsebit_idx_t pg, base; > @@ -1975,8 +1982,11 @@ vm_paddr_t vm_phy_pages_alloc(struct kvm_vm *vm, size_t num, > abort(); > } And here, assert that: TEST_ASSERT(!protected || region->protected_phy_pages, "Region doesn't support protected memory"); > - for (pg = base; pg < base + num; ++pg) > + for (pg = base; pg < base + num; ++pg) { > sparsebit_clear(region->unused_phy_pages, pg); > + if (protected) > + sparsebit_set(region->protected_phy_pages, pg); > + } > > return base * vm->page_size; > } > -- > 2.43.0.472.g3155946c3a-goog >