[PATCH] selftests/rseq: take large C-state exit latency into consideration

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



Currently, the migration worker delays 1-10 us, assuming that one
KVM_RUN iteration only takes a few microseconds.  But if C-state exit
latencies are large enough, for example, hundreds or even thousands
of microseconds on server CPUs, it may happen that it's not able to
bring the target CPU out of C-state before the migration worker starts
to migrate it to the next CPU.

If the system workload is light, most CPUs could be at a certain level
of C-state, and the vCPU thread may waste milliseconds before it can
actually migrate to a new CPU.

Thus, the tests may be inefficient in such systems, and in some cases
it may fail the migration/KVM_RUN ratio sanity check.

Since we are not able to turn off the cpuidle sub-system in run time,
this patch creates an idle thread on every CPU to prevent them from
entering C-states.

Additionally, seems it's reasonable to randomize the length of usleep(),
other than delay in a fixed pattern.

Signed-off-by: Zide Chen <zide.chen@xxxxxxxxx>
---
 tools/testing/selftests/kvm/rseq_test.c | 76 ++++++++++++++++++++++---
 1 file changed, 69 insertions(+), 7 deletions(-)

diff --git a/tools/testing/selftests/kvm/rseq_test.c b/tools/testing/selftests/kvm/rseq_test.c
index 28f97fb52044..d6e8b851d29e 100644
--- a/tools/testing/selftests/kvm/rseq_test.c
+++ b/tools/testing/selftests/kvm/rseq_test.c
@@ -11,6 +11,7 @@
 #include <syscall.h>
 #include <sys/ioctl.h>
 #include <sys/sysinfo.h>
+#include <sys/resource.h>
 #include <asm/barrier.h>
 #include <linux/atomic.h>
 #include <linux/rseq.h>
@@ -29,9 +30,10 @@
 #define NR_TASK_MIGRATIONS 100000
 
 static pthread_t migration_thread;
+static pthread_t *idle_threads;
 static cpu_set_t possible_mask;
-static int min_cpu, max_cpu;
-static bool done;
+static int min_cpu, max_cpu, nproc;
+static volatile bool done;
 
 static atomic_t seq_cnt;
 
@@ -150,7 +152,7 @@ static void *migration_worker(void *__rseq_tid)
 		 * Use usleep() for simplicity and to avoid unnecessary kernel
 		 * dependencies.
 		 */
-		usleep((i % 10) + 1);
+		usleep((rand() % 10) + 1);
 	}
 	done = true;
 	return NULL;
@@ -158,7 +160,7 @@ static void *migration_worker(void *__rseq_tid)
 
 static void calc_min_max_cpu(void)
 {
-	int i, cnt, nproc;
+	int i, cnt;
 
 	TEST_REQUIRE(CPU_COUNT(&possible_mask) >= 2);
 
@@ -186,6 +188,61 @@ static void calc_min_max_cpu(void)
 		       "Only one usable CPU, task migration not possible");
 }
 
+static void *idle_thread_fn(void *__idle_cpu)
+{
+	int r, cpu = (int)(unsigned long)__idle_cpu;
+	cpu_set_t allowed_mask;
+
+	CPU_ZERO(&allowed_mask);
+	CPU_SET(cpu, &allowed_mask);
+
+	r = sched_setaffinity(0, sizeof(allowed_mask), &allowed_mask);
+	TEST_ASSERT(!r, "sched_setaffinity failed, errno = %d (%s)",
+		errno, strerror(errno));
+
+	/* lowest priority, trying to prevent it from entering C-states */
+	r = setpriority(PRIO_PROCESS, 0, 19);
+	TEST_ASSERT(!r, "setpriority failed, errno = %d (%s)",
+		errno, strerror(errno));
+
+	while(!done);
+
+	return NULL;
+}
+
+static void spawn_threads(void)
+{
+	int cpu;
+
+	/* Run a dummy thread on every CPU */
+	for (cpu = min_cpu; cpu <= max_cpu; cpu++) {
+		if (!CPU_ISSET(cpu, &possible_mask))
+			continue;
+
+		pthread_create(&idle_threads[cpu], NULL, idle_thread_fn,
+			       (void *)(unsigned long)cpu);
+	}
+
+	pthread_create(&migration_thread, NULL, migration_worker,
+		       (void *)(unsigned long)syscall(SYS_gettid));
+}
+
+static void join_threads(void)
+{
+	int cpu;
+
+	pthread_join(migration_thread, NULL);
+
+	for (cpu = min_cpu; cpu <= max_cpu; cpu++) {
+		if (!CPU_ISSET(cpu, &possible_mask))
+			continue;
+
+		pthread_join(idle_threads[cpu], NULL);
+	}
+
+	free(idle_threads);
+}
+
 int main(int argc, char *argv[])
 {
 	int r, i, snapshot;
@@ -199,6 +256,12 @@ int main(int argc, char *argv[])
 
 	calc_min_max_cpu();
 
+	srand(time(NULL));
+
+	idle_threads = malloc(sizeof(pthread_t) * nproc);
+	TEST_ASSERT(idle_threads, "malloc failed, errno = %d (%s)", errno,
+		    strerror(errno));
+
 	r = rseq_register_current_thread();
 	TEST_ASSERT(!r, "rseq_register_current_thread failed, errno = %d (%s)",
 		    errno, strerror(errno));
@@ -210,8 +273,7 @@ int main(int argc, char *argv[])
 	 */
 	vm = vm_create_with_one_vcpu(&vcpu, guest_code);
 
-	pthread_create(&migration_thread, NULL, migration_worker,
-		       (void *)(unsigned long)syscall(SYS_gettid));
+	spawn_threads();
 
 	for (i = 0; !done; i++) {
 		vcpu_run(vcpu);
@@ -258,7 +320,7 @@ int main(int argc, char *argv[])
 	TEST_ASSERT(i > (NR_TASK_MIGRATIONS / 2),
 		    "Only performed %d KVM_RUNs, task stalled too much?", i);
 
-	pthread_join(migration_thread, NULL);
+	join_threads();
 
 	kvm_vm_free(vm);
 
-- 
2.34.1





[Index of Archives]     [Linux Wireless]     [Linux Kernel]     [ATH6KL]     [Linux Bluetooth]     [Linux Netdev]     [Kernel Newbies]     [Share Photos]     [IDE]     [Security]     [Git]     [Netfilter]     [Bugtraq]     [Yosemite News]     [MIPS Linux]     [ARM Linux]     [Linux Security]     [Linux RAID]     [Linux ATA RAID]     [Samba]     [Device Mapper]

  Powered by Linux