[RFC 4/6] percpu-refcount-torture: Extend test with runtime mode switches

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



Extend the test to exercise runtime switching from managed
mode to other reinitable active modes.

Signed-off-by: Neeraj Upadhyay <Neeraj.Upadhyay@xxxxxxx>
---
 lib/percpu-refcount-torture.c | 41 +++++++++++++++++++++++++++++++++--
 lib/percpu-refcount.c         | 12 +++++++++-
 2 files changed, 50 insertions(+), 3 deletions(-)

diff --git a/lib/percpu-refcount-torture.c b/lib/percpu-refcount-torture.c
index 686f5a228b40..cb2700b16517 100644
--- a/lib/percpu-refcount-torture.c
+++ b/lib/percpu-refcount-torture.c
@@ -3,6 +3,7 @@
 #include <linux/jiffies.h>
 #include <linux/module.h>
 #include <linux/moduleparam.h>
+#include <linux/mutex.h>
 #include <linux/percpu-refcount.h>
 #include <linux/torture.h>
 
@@ -59,6 +60,7 @@ static struct task_struct **busted_late_release_tasks;
 
 static struct percpu_ref *refs;
 static long *num_per_ref_users;
+static struct mutex *ref_switch_mutexes;
 
 static atomic_t running;
 static atomic_t *ref_running;
@@ -97,19 +99,36 @@ static int percpu_ref_manager_thread(void *data)
 static int percpu_ref_test_thread(void *data)
 {
 	struct percpu_ref *ref = (struct percpu_ref *)data;
+	DEFINE_TORTURE_RANDOM(rand);
+	int ref_idx = ref - refs;
+	int do_switch;
 	int i = 0;
 
 	percpu_ref_get(ref);
 
 	do {
 		percpu_ref_get(ref);
+		/* Perform checks once per 256 iterations */
+		do_switch = (torture_random(&rand) & 0xff);
 		udelay(delay_us);
+		if (do_switch) {
+			mutex_lock(&ref_switch_mutexes[ref_idx]);
+			percpu_ref_switch_to_unmanaged(ref);
+			udelay(delay_us);
+			percpu_ref_switch_to_atomic_sync(ref);
+			if (do_switch & 1)
+				percpu_ref_switch_to_percpu(ref);
+			udelay(delay_us);
+			percpu_ref_switch_to_managed(ref);
+			mutex_unlock(&ref_switch_mutexes[ref_idx]);
+			udelay(delay_us);
+		}
 		percpu_ref_put(ref);
 		stutter_wait("percpu_ref_test_thread");
 		i++;
 	} while (i < niterations);
 
-	atomic_dec(&ref_running[ref - refs]);
+	atomic_dec(&ref_running[ref_idx]);
 	/* Order ref release with ref_running[ref_idx] == 0 */
 	smp_mb();
 	percpu_ref_put(ref);
@@ -213,6 +232,13 @@ static void percpu_ref_test_cleanup(void)
 	kfree(num_per_ref_users);
 	num_per_ref_users = NULL;
 
+	if (ref_switch_mutexes) {
+		for (i = 0; i < nrefs; i++)
+			mutex_destroy(&ref_switch_mutexes[i]);
+		kfree(ref_switch_mutexes);
+		ref_switch_mutexes = NULL;
+	}
+
 	if (refs) {
 		for (i = 0; i < nrefs; i++)
 			percpu_ref_exit(&refs[i]);
@@ -251,7 +277,8 @@ static int __init percpu_ref_torture_init(void)
 		goto init_err;
 	}
 	for (i = 0; i < nrefs; i++) {
-		flags = torture_random(trsp) & 1 ? PERCPU_REF_INIT_ATOMIC : PERCPU_REF_REL_MANAGED;
+		flags = (torture_random(trsp) & 1) ? PERCPU_REF_INIT_ATOMIC :
+							PERCPU_REF_REL_MANAGED;
 		err = percpu_ref_init(&refs[i], percpu_ref_test_release,
 				      flags, GFP_KERNEL);
 		if (err)
@@ -269,6 +296,16 @@ static int __init percpu_ref_torture_init(void)
 	for (i = 0; i < nrefs; i++)
 		num_per_ref_users[i] = 0;
 
+	ref_switch_mutexes = kcalloc(nrefs, sizeof(ref_switch_mutexes[0]), GFP_KERNEL);
+	if (!ref_switch_mutexes) {
+		TOROUT_ERRSTRING("out of memory");
+		err = -ENOMEM;
+		goto init_err;
+	}
+
+	for (i = 0; i < nrefs; i++)
+		mutex_init(&ref_switch_mutexes[i]);
+
 	ref_user_tasks = kcalloc(nusers, sizeof(ref_user_tasks[0]), GFP_KERNEL);
 	if (!ref_user_tasks) {
 		TOROUT_ERRSTRING("out of memory");
diff --git a/lib/percpu-refcount.c b/lib/percpu-refcount.c
index b79e36905aa4..4e0a453bd51f 100644
--- a/lib/percpu-refcount.c
+++ b/lib/percpu-refcount.c
@@ -723,6 +723,7 @@ EXPORT_SYMBOL_GPL(percpu_ref_test_is_percpu);
 void percpu_ref_test_flush_release_work(void)
 {
 	int max_flush = READ_ONCE(max_scan_count);
+	struct list_head *next;
 	int max_count = 1000;
 
 	/* Complete any executing release work */
@@ -738,8 +739,17 @@ void percpu_ref_test_flush_release_work(void)
 	/* max scan count update visible to work */
 	smp_mb();
 	flush_delayed_work(&percpu_ref_release_work);
-	while (READ_ONCE(last_percpu_ref_node) != NULL && max_count--)
+
+	while (true) {
+		if (!max_count--)
+			break;
+		spin_lock(&percpu_ref_manage_lock);
+		next = next_percpu_ref_node;
+		spin_unlock(&percpu_ref_manage_lock);
+		if (list_is_head(next, &percpu_ref_manage_head))
+			break;
 		flush_delayed_work(&percpu_ref_release_work);
+	}
 	/* max scan count update visible to work */
 	smp_mb();
 	WRITE_ONCE(max_scan_count, max_flush);
-- 
2.34.1





[Index of Archives]     [Linux ARM Kernel]     [Linux ARM]     [Linux Omap]     [Fedora ARM]     [IETF Annouce]     [Bugtraq]     [Linux OMAP]     [Linux MIPS]     [eCos]     [Asterisk Internet PBX]     [Linux API]

  Powered by Linux