[PATCH v1 07/18] KVM: selftests/max_guest_memory_test: vcpu related code consolidation

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



Remove the unnecessary allocation of the vcpu and threads array,
and use the helper functinos to create and join the vcpu threads.

As the vcpu thread's start routine (i.e. vcpu_worker) uses kvm_vcpu as the
interface, change vcpu_info to be the vcpu thread's private data to have
it passed to the thread's start routine.

Signed-off-by: Wei Wang <wei.w.wang@xxxxxxxxx>
---
 .../selftests/kvm/max_guest_memory_test.c     | 53 +++++++------------
 1 file changed, 20 insertions(+), 33 deletions(-)

diff --git a/tools/testing/selftests/kvm/max_guest_memory_test.c b/tools/testing/selftests/kvm/max_guest_memory_test.c
index 9a6e4f3ad6b5..2d9c83e36e65 100644
--- a/tools/testing/selftests/kvm/max_guest_memory_test.c
+++ b/tools/testing/selftests/kvm/max_guest_memory_test.c
@@ -3,7 +3,6 @@
 
 #include <stdio.h>
 #include <stdlib.h>
-#include <pthread.h>
 #include <semaphore.h>
 #include <sys/types.h>
 #include <signal.h>
@@ -27,8 +26,7 @@ static void guest_code(uint64_t start_gpa, uint64_t end_gpa, uint64_t stride)
 	GUEST_DONE();
 }
 
-struct vcpu_info {
-	struct kvm_vcpu *vcpu;
+struct vcpu_thread_data {
 	uint64_t start_gpa;
 	uint64_t end_gpa;
 };
@@ -59,13 +57,15 @@ static void run_vcpu(struct kvm_vcpu *vcpu)
 
 static void *vcpu_worker(void *data)
 {
-	struct vcpu_info *info = data;
-	struct kvm_vcpu *vcpu = info->vcpu;
+	struct kvm_vcpu *vcpu = (struct kvm_vcpu *)data;
+	struct vcpu_thread_data *thread_data =
+		(struct vcpu_thread_data *)vcpu->private_data;
 	struct kvm_vm *vm = vcpu->vm;
 	struct kvm_sregs sregs;
 	struct kvm_regs regs;
 
-	vcpu_args_set(vcpu, 3, info->start_gpa, info->end_gpa, vm->page_size);
+	vcpu_args_set(vcpu, 3, thread_data->start_gpa,
+		      thread_data->end_gpa, vm->page_size);
 
 	/* Snapshot regs before the first run. */
 	vcpu_regs_get(vcpu, &regs);
@@ -88,31 +88,24 @@ static void *vcpu_worker(void *data)
 	return NULL;
 }
 
-static pthread_t *spawn_workers(struct kvm_vm *vm, struct kvm_vcpu **vcpus,
-				uint64_t start_gpa, uint64_t end_gpa)
+static void vm_vcpu_threads_data_init(struct kvm_vm *vm,
+				     uint64_t start_gpa, uint64_t end_gpa)
 {
-	struct vcpu_info *info;
+	struct kvm_vcpu *vcpu;
+	struct vcpu_thread_data *thread_data;
 	uint64_t gpa, nr_bytes;
-	pthread_t *threads;
 	int i;
 
-	threads = malloc(nr_vcpus * sizeof(*threads));
-	TEST_ASSERT(threads, "Failed to allocate vCPU threads");
-
-	info = malloc(nr_vcpus * sizeof(*info));
-	TEST_ASSERT(info, "Failed to allocate vCPU gpa ranges");
-
 	nr_bytes = ((end_gpa - start_gpa) / nr_vcpus) &
 			~((uint64_t)vm->page_size - 1);
 	TEST_ASSERT(nr_bytes, "C'mon, no way you have %d CPUs", nr_vcpus);
 
-	for (i = 0, gpa = start_gpa; i < nr_vcpus; i++, gpa += nr_bytes) {
-		info[i].vcpu = vcpus[i];
-		info[i].start_gpa = gpa;
-		info[i].end_gpa = gpa + nr_bytes;
-		pthread_create(&threads[i], NULL, vcpu_worker, &info[i]);
+	vm_iterate_over_vcpus(vm, vcpu, i) {
+		thread_data = (struct vcpu_thread_data *)vcpu->private_data;
+		gpa = start_gpa + i * nr_bytes;
+		thread_data->start_gpa =  gpa;
+		thread_data->end_gpa = gpa + nr_bytes;
 	}
-	return threads;
 }
 
 static void rendezvous_with_vcpus(struct timespec *time, const char *name)
@@ -170,8 +163,6 @@ int main(int argc, char *argv[])
 	uint64_t max_gpa, gpa, slot_size, max_mem, i;
 	int max_slots, slot, opt, fd;
 	bool hugepages = false;
-	struct kvm_vcpu **vcpus;
-	pthread_t *threads;
 	struct kvm_vm *vm;
 	void *mem;
 
@@ -214,10 +205,7 @@ int main(int argc, char *argv[])
 		}
 	}
 
-	vcpus = malloc(nr_vcpus * sizeof(*vcpus));
-	TEST_ASSERT(vcpus, "Failed to allocate vCPU array");
-
-	vm = vm_create_with_vcpus(nr_vcpus, guest_code, vcpus);
+	vm = vm_create_with_vcpus(nr_vcpus, guest_code, NULL);
 
 	max_gpa = vm->max_gfn << vm->page_shift;
 	TEST_ASSERT(max_gpa > (4 * slot_size), "MAXPHYADDR <4gb ");
@@ -254,10 +242,10 @@ int main(int argc, char *argv[])
 	}
 
 	atomic_set(&rendezvous, nr_vcpus + 1);
-	threads = spawn_workers(vm, vcpus, start_gpa, gpa);
 
-	free(vcpus);
-	vcpus = NULL;
+	vm_vcpu_threads_create(vm, vcpu_worker,
+			       sizeof(struct vcpu_thread_data));
+	vm_vcpu_threads_data_init(vm, start_gpa, gpa);
 
 	pr_info("Running with %lugb of guest memory and %u vCPUs\n",
 		(gpa - start_gpa) / size_1gb, nr_vcpus);
@@ -287,8 +275,7 @@ int main(int argc, char *argv[])
 	munmap(mem, slot_size / 2);
 
 	/* Sanity check that the vCPUs actually ran. */
-	for (i = 0; i < nr_vcpus; i++)
-		pthread_join(threads[i], NULL);
+	vm_vcpu_threads_join(vm);
 
 	/*
 	 * Deliberately exit without deleting the remaining memslots or closing
-- 
2.27.0




[Index of Archives]     [KVM ARM]     [KVM ia64]     [KVM ppc]     [Virtualization Tools]     [Spice Development]     [Libvirt]     [Libvirt Users]     [Linux USB Devel]     [Linux Audio Users]     [Yosemite Questions]     [Linux Kernel]     [Linux SCSI]     [XFree86]

  Powered by Linux