Run the exact number of guest loops required in mmu_stress_test instead of looping indefinitely in anticipation of adding more stages that run different code (e.g. reads instead of writes). Reviewed-by: James Houghton <jthoughton@xxxxxxxxxx> Reviewed-by: Andrew Jones <ajones@xxxxxxxxxxxxxxxx> Signed-off-by: Sean Christopherson <seanjc@xxxxxxxxxx> --- tools/testing/selftests/kvm/mmu_stress_test.c | 25 ++++++++++++++----- 1 file changed, 19 insertions(+), 6 deletions(-) diff --git a/tools/testing/selftests/kvm/mmu_stress_test.c b/tools/testing/selftests/kvm/mmu_stress_test.c index 656a837c7f49..c6bf18cb7c89 100644 --- a/tools/testing/selftests/kvm/mmu_stress_test.c +++ b/tools/testing/selftests/kvm/mmu_stress_test.c @@ -20,12 +20,15 @@ static void guest_code(uint64_t start_gpa, uint64_t end_gpa, uint64_t stride) { uint64_t gpa; + int i; - for (;;) { + for (i = 0; i < 2; i++) { for (gpa = start_gpa; gpa < end_gpa; gpa += stride) vcpu_arch_put_guest(*((volatile uint64_t *)gpa), gpa); - GUEST_SYNC(0); + GUEST_SYNC(i); } + + GUEST_ASSERT(0); } struct vcpu_info { @@ -52,10 +55,18 @@ static void rendezvous_with_boss(void) } } -static void run_vcpu(struct kvm_vcpu *vcpu) +static void assert_sync_stage(struct kvm_vcpu *vcpu, int stage) +{ + struct ucall uc; + + TEST_ASSERT_EQ(get_ucall(vcpu, &uc), UCALL_SYNC); + TEST_ASSERT_EQ(uc.args[1], stage); +} + +static void run_vcpu(struct kvm_vcpu *vcpu, int stage) { vcpu_run(vcpu); - TEST_ASSERT_EQ(get_ucall(vcpu, NULL), UCALL_SYNC); + assert_sync_stage(vcpu, stage); } static void *vcpu_worker(void *data) @@ -69,7 +80,8 @@ static void *vcpu_worker(void *data) rendezvous_with_boss(); - run_vcpu(vcpu); + /* Stage 0, write all of guest memory. */ + run_vcpu(vcpu, 0); rendezvous_with_boss(); #ifdef __x86_64__ vcpu_sregs_get(vcpu, &sregs); @@ -79,7 +91,8 @@ static void *vcpu_worker(void *data) #endif rendezvous_with_boss(); - run_vcpu(vcpu); + /* Stage 1, re-write all of guest memory. */ + run_vcpu(vcpu, 1); rendezvous_with_boss(); return NULL; -- 2.47.0.338.g60cca15819-goog