[tip:sched/core] sched/core: Convert nohz_flags to atomic_t

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



Commit-ID:  a22e47a4e3f5a9e50a827c5d94705ace3b1eac0b
Gitweb:     https://git.kernel.org/tip/a22e47a4e3f5a9e50a827c5d94705ace3b1eac0b
Author:     Peter Zijlstra <peterz@xxxxxxxxxxxxx>
AuthorDate: Thu, 21 Dec 2017 10:01:24 +0100
Committer:  Ingo Molnar <mingo@xxxxxxxxxx>
CommitDate: Fri, 9 Mar 2018 07:59:16 +0100

sched/core: Convert nohz_flags to atomic_t

Using atomic_t allows us to use the more flexible bitops provided
there. Also its smaller.

Signed-off-by: Peter Zijlstra (Intel) <peterz@xxxxxxxxxxxxx>
Cc: Linus Torvalds <torvalds@xxxxxxxxxxxxxxxxxxxx>
Cc: Mike Galbraith <efault@xxxxxx>
Cc: Peter Zijlstra <peterz@xxxxxxxxxxxxx>
Cc: Thomas Gleixner <tglx@xxxxxxxxxxxxx>
Cc: linux-kernel@xxxxxxxxxxxxxxx
Signed-off-by: Ingo Molnar <mingo@xxxxxxxxxx>
---
 kernel/sched/core.c  |  6 +++---
 kernel/sched/fair.c  | 23 +++++++++++++++--------
 kernel/sched/sched.h | 11 ++++++-----
 3 files changed, 24 insertions(+), 16 deletions(-)

diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 4f5eeb63ab5b..96ad1c003d74 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -583,7 +583,7 @@ static inline bool got_nohz_idle_kick(void)
 {
 	int cpu = smp_processor_id();
 
-	if (!test_bit(NOHZ_BALANCE_KICK, nohz_flags(cpu)))
+	if (!(atomic_read(nohz_flags(cpu)) & NOHZ_BALANCE_KICK))
 		return false;
 
 	if (idle_cpu(cpu) && !need_resched())
@@ -593,7 +593,7 @@ static inline bool got_nohz_idle_kick(void)
 	 * We can't run Idle Load Balance on this CPU for this time so we
 	 * cancel it and clear NOHZ_BALANCE_KICK
 	 */
-	clear_bit(NOHZ_BALANCE_KICK, nohz_flags(cpu));
+	atomic_andnot(NOHZ_BALANCE_KICK, nohz_flags(cpu));
 	return false;
 }
 
@@ -6074,7 +6074,7 @@ void __init sched_init(void)
 		rq_attach_root(rq, &def_root_domain);
 #ifdef CONFIG_NO_HZ_COMMON
 		rq->last_load_update_tick = jiffies;
-		rq->nohz_flags = 0;
+		atomic_set(&rq->nohz_flags, 0);
 #endif
 #endif /* CONFIG_SMP */
 		hrtick_rq_init(rq);
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 097db34d5ba2..5d150478dd58 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -9072,6 +9072,7 @@ static inline int find_new_ilb(void)
  */
 static void nohz_balancer_kick(void)
 {
+	unsigned int flags;
 	int ilb_cpu;
 
 	nohz.next_balance++;
@@ -9081,7 +9082,8 @@ static void nohz_balancer_kick(void)
 	if (ilb_cpu >= nr_cpu_ids)
 		return;
 
-	if (test_and_set_bit(NOHZ_BALANCE_KICK, nohz_flags(ilb_cpu)))
+	flags = atomic_fetch_or(NOHZ_BALANCE_KICK, nohz_flags(ilb_cpu));
+	if (flags & NOHZ_BALANCE_KICK)
 		return;
 	/*
 	 * Use smp_send_reschedule() instead of resched_cpu().
@@ -9095,7 +9097,9 @@ static void nohz_balancer_kick(void)
 
 void nohz_balance_exit_idle(unsigned int cpu)
 {
-	if (unlikely(test_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu)))) {
+	unsigned int flags = atomic_read(nohz_flags(cpu));
+
+	if (unlikely(flags & NOHZ_TICK_STOPPED)) {
 		/*
 		 * Completely isolated CPUs don't ever set, so we must test.
 		 */
@@ -9103,7 +9107,8 @@ void nohz_balance_exit_idle(unsigned int cpu)
 			cpumask_clear_cpu(cpu, nohz.idle_cpus_mask);
 			atomic_dec(&nohz.nr_cpus);
 		}
-		clear_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu));
+
+		atomic_andnot(NOHZ_TICK_STOPPED, nohz_flags(cpu));
 	}
 }
 
@@ -9155,7 +9160,7 @@ void nohz_balance_enter_idle(int cpu)
 	if (!housekeeping_cpu(cpu, HK_FLAG_SCHED))
 		return;
 
-	if (test_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu)))
+	if (atomic_read(nohz_flags(cpu)) & NOHZ_TICK_STOPPED)
 		return;
 
 	/* If we're a completely isolated CPU, we don't play: */
@@ -9164,7 +9169,7 @@ void nohz_balance_enter_idle(int cpu)
 
 	cpumask_set_cpu(cpu, nohz.idle_cpus_mask);
 	atomic_inc(&nohz.nr_cpus);
-	set_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu));
+	atomic_or(NOHZ_TICK_STOPPED, nohz_flags(cpu));
 }
 #endif
 
@@ -9302,8 +9307,10 @@ static void nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle)
 	unsigned long next_balance = jiffies + 60*HZ;
 	int update_next_balance = 0;
 
-	if (idle != CPU_IDLE ||
-	    !test_bit(NOHZ_BALANCE_KICK, nohz_flags(this_cpu)))
+	if (!(atomic_read(nohz_flags(this_cpu)) & NOHZ_BALANCE_KICK))
+		return;
+
+	if (idle != CPU_IDLE)
 		goto end;
 
 	for_each_cpu(balance_cpu, nohz.idle_cpus_mask) {
@@ -9349,7 +9356,7 @@ static void nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle)
 	if (likely(update_next_balance))
 		nohz.next_balance = next_balance;
 end:
-	clear_bit(NOHZ_BALANCE_KICK, nohz_flags(this_cpu));
+	atomic_andnot(NOHZ_BALANCE_KICK, nohz_flags(this_cpu));
 }
 
 /*
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 23ba4dd76ac4..d98e761b962f 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -763,7 +763,7 @@ struct rq {
 #ifdef CONFIG_SMP
 	unsigned long		last_load_update_tick;
 #endif /* CONFIG_SMP */
-	unsigned long		nohz_flags;
+	atomic_t nohz_flags;
 #endif /* CONFIG_NO_HZ_COMMON */
 
 	/* capture load from *all* tasks on this CPU: */
@@ -2034,10 +2034,11 @@ extern void cfs_bandwidth_usage_inc(void);
 extern void cfs_bandwidth_usage_dec(void);
 
 #ifdef CONFIG_NO_HZ_COMMON
-enum rq_nohz_flag_bits {
-	NOHZ_TICK_STOPPED,
-	NOHZ_BALANCE_KICK,
-};
+#define NOHZ_TICK_STOPPED_BIT	0
+#define NOHZ_BALANCE_KICK_BIT	1
+
+#define NOHZ_TICK_STOPPED	BIT(NOHZ_TICK_STOPPED_BIT)
+#define NOHZ_BALANCE_KICK	BIT(NOHZ_BALANCE_KICK_BIT)
 
 #define nohz_flags(cpu)	(&cpu_rq(cpu)->nohz_flags)
 
--
To unsubscribe from this list: send the line "unsubscribe linux-tip-commits" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at  http://vger.kernel.org/majordomo-info.html



[Index of Archives]     [Linux Stable Commits]     [Linux Stable Kernel]     [Linux Kernel]     [Linux USB Devel]     [Linux Video &Media]     [Linux Audio Users]     [Yosemite News]     [Linux SCSI]

  Powered by Linux