From: Isaku Yamahata <isaku.yamahata@xxxxxxxxx> Punch hole implies the region is zeroed out. Add tests if the punched region has zero. Oppertunistically Remove unused member, pattern, in guest_run_test(). Signed-off-by: Isaku Yamahata <isaku.yamahata@xxxxxxxxx> --- .../kvm/x86_64/private_mem_conversions_test.c | 26 ++++++++++++++----- 1 file changed, 20 insertions(+), 6 deletions(-) diff --git a/tools/testing/selftests/kvm/x86_64/private_mem_conversions_test.c b/tools/testing/selftests/kvm/x86_64/private_mem_conversions_test.c index 50541246d6fd..c05c725645af 100644 --- a/tools/testing/selftests/kvm/x86_64/private_mem_conversions_test.c +++ b/tools/testing/selftests/kvm/x86_64/private_mem_conversions_test.c @@ -85,9 +85,10 @@ static void guest_sync_private(uint64_t gpa, uint64_t size, uint8_t pattern) /* Arbitrary values, KVM doesn't care about the attribute flags. */ #define MAP_GPA_SHARED BIT(0) #define MAP_GPA_DO_FALLOCATE BIT(1) +#define MAP_GPA_FALLOCATE_ONLY BIT(2) static void guest_map_mem(uint64_t gpa, uint64_t size, bool map_shared, - bool do_fallocate) + bool do_fallocate, bool fallocate_only) { uint64_t flags = 0; @@ -95,17 +96,24 @@ static void guest_map_mem(uint64_t gpa, uint64_t size, bool map_shared, flags |= MAP_GPA_SHARED; if (do_fallocate) flags |= MAP_GPA_DO_FALLOCATE; + if (fallocate_only) + flags |= MAP_GPA_FALLOCATE_ONLY; kvm_hypercall_map_gpa_range(gpa, size, flags); } static void guest_map_shared(uint64_t gpa, uint64_t size, bool do_fallocate) { - guest_map_mem(gpa, size, true, do_fallocate); + guest_map_mem(gpa, size, true, do_fallocate, false); } static void guest_map_private(uint64_t gpa, uint64_t size, bool do_fallocate) { - guest_map_mem(gpa, size, false, do_fallocate); + guest_map_mem(gpa, size, false, do_fallocate, false); +} + +static void guest_punch_hole_private(uint64_t gpa, uint64_t size) +{ + guest_map_mem(gpa, size, true, true, true); } static void guest_run_test(uint64_t base_gpa, bool do_fallocate) @@ -113,7 +121,6 @@ static void guest_run_test(uint64_t base_gpa, bool do_fallocate) struct { uint64_t offset; uint64_t size; - uint8_t pattern; } stages[] = { GUEST_STAGE(0, PAGE_SIZE), GUEST_STAGE(0, SZ_2M), @@ -156,6 +163,10 @@ static void guest_run_test(uint64_t base_gpa, bool do_fallocate) if (size > PAGE_SIZE) { memset((void *)gpa, p2, PAGE_SIZE); + + /* Test if punch hole results in zeroing page. */ + guest_punch_hole_private(gpa, PAGE_SIZE); + memcmp_g(gpa, 0, PAGE_SIZE); goto skip; } @@ -229,6 +240,7 @@ static void handle_exit_hypercall(struct kvm_vcpu *vcpu) uint64_t size = run->hypercall.args[1] * PAGE_SIZE; bool map_shared = run->hypercall.args[2] & MAP_GPA_SHARED; bool do_fallocate = run->hypercall.args[2] & MAP_GPA_DO_FALLOCATE; + bool fallocate_only = run->hypercall.args[2] & MAP_GPA_FALLOCATE_ONLY; struct kvm_vm *vm = vcpu->vm; TEST_ASSERT(run->hypercall.nr == KVM_HC_MAP_GPA_RANGE, @@ -238,8 +250,10 @@ static void handle_exit_hypercall(struct kvm_vcpu *vcpu) if (do_fallocate) vm_guest_mem_fallocate(vm, gpa, size, map_shared); - vm_set_memory_attributes(vm, gpa, size, - map_shared ? 0 : KVM_MEMORY_ATTRIBUTE_PRIVATE); + if (!fallocate_only) + vm_set_memory_attributes(vm, gpa, size, + map_shared ? + 0 : KVM_MEMORY_ATTRIBUTE_PRIVATE); run->hypercall.ret = 0; } -- 2.25.1