- sched-store-weighted-load-on-up.patch removed from -mm tree

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



The patch titled

     sched: store weighted load on up

has been removed from the -mm tree.  Its filename is

     sched-store-weighted-load-on-up.patch

This patch was dropped because it was folded into sched-implement-smpnice.patch

------------------------------------------------------
Subject: sched: store weighted load on up
From: Con Kolivas <kernel@xxxxxxxxxxx>


Modify the smp nice code to store load_weight on uniprocessor as well to
allow relative niceness on one cpu to be assessed.  Minor cleanups and
uninline set_load_weight().

Signed-off-by: Con Kolivas <kernel@xxxxxxxxxxx>
Cc: Peter Williams <pwil3058@xxxxxxxxxxxxxx>
Acked-by: Ingo Molnar <mingo@xxxxxxx>
Signed-off-by: Andrew Morton <akpm@xxxxxxxx>
---

 include/linux/sched.h |    4 ++--
 kernel/sched.c        |   24 ++++++------------------
 2 files changed, 8 insertions(+), 20 deletions(-)

diff -puN include/linux/sched.h~sched-store-weighted-load-on-up include/linux/sched.h
--- devel/include/linux/sched.h~sched-store-weighted-load-on-up	2006-06-09 15:22:29.000000000 -0700
+++ devel-akpm/include/linux/sched.h	2006-06-09 15:22:29.000000000 -0700
@@ -591,9 +591,9 @@ enum idle_type
 /*
  * sched-domains (multiprocessor balancing) declarations:
  */
-#ifdef CONFIG_SMP
 #define SCHED_LOAD_SCALE	128UL	/* increase resolution of load */
 
+#ifdef CONFIG_SMP
 #define SD_LOAD_BALANCE		1	/* Do load balancing on this domain. */
 #define SD_BALANCE_NEWIDLE	2	/* Balance when about to become idle */
 #define SD_BALANCE_EXEC		4	/* Balance on exec */
@@ -750,8 +750,8 @@ struct task_struct {
 #ifdef __ARCH_WANT_UNLOCKED_CTXSW
 	int oncpu;
 #endif
-	int load_weight;	/* for load balancing purposes */
 #endif
+	int load_weight;	/* for niceness load balancing purposes */
 	int prio, static_prio;
 	struct list_head run_list;
 	prio_array_t *array;
diff -puN kernel/sched.c~sched-store-weighted-load-on-up kernel/sched.c
--- devel/kernel/sched.c~sched-store-weighted-load-on-up	2006-06-09 15:22:29.000000000 -0700
+++ devel-akpm/kernel/sched.c	2006-06-09 15:22:29.000000000 -0700
@@ -169,12 +169,12 @@
  */
 
 #define SCALE_PRIO(x, prio) \
-	max(x * (MAX_PRIO - prio) / (MAX_USER_PRIO/2), MIN_TIMESLICE)
+	max(x * (MAX_PRIO - prio) / (MAX_USER_PRIO / 2), MIN_TIMESLICE)
 
 static unsigned int static_prio_timeslice(int static_prio)
 {
 	if (static_prio < NICE_TO_PRIO(0))
-		return SCALE_PRIO(DEF_TIMESLICE*4, static_prio);
+		return SCALE_PRIO(DEF_TIMESLICE * 4, static_prio);
 	else
 		return SCALE_PRIO(DEF_TIMESLICE, static_prio);
 }
@@ -214,8 +214,8 @@ struct runqueue {
 	 * remote CPUs use both these fields when doing load calculation.
 	 */
 	unsigned long nr_running;
-#ifdef CONFIG_SMP
 	unsigned long raw_weighted_load;
+#ifdef CONFIG_SMP
 	unsigned long cpu_load[3];
 #endif
 	unsigned long long nr_switches;
@@ -694,7 +694,6 @@ static int effective_prio(task_t *p)
 	return prio;
 }
 
-#ifdef CONFIG_SMP
 /*
  * To aid in avoiding the subversion of "niceness" due to uneven distribution
  * of tasks with abnormal "nice" values across CPUs the contribution that
@@ -717,9 +716,10 @@ static int effective_prio(task_t *p)
 #define RTPRIO_TO_LOAD_WEIGHT(rp) \
 	(PRIO_TO_LOAD_WEIGHT(MAX_RT_PRIO) + LOAD_WEIGHT(rp))
 
-static inline void set_load_weight(task_t *p)
+static void set_load_weight(task_t *p)
 {
 	if (rt_task(p)) {
+#ifdef CONFIG_SMP
 		if (p == task_rq(p)->migration_thread)
 			/*
 			 * The migration thread does the actual balancing.
@@ -728,6 +728,7 @@ static inline void set_load_weight(task_
 			 */
 			p->load_weight = 0;
 		else
+#endif
 			p->load_weight = RTPRIO_TO_LOAD_WEIGHT(p->rt_priority);
 	} else
 		p->load_weight = PRIO_TO_LOAD_WEIGHT(p->static_prio);
@@ -742,19 +743,6 @@ static inline void dec_raw_weighted_load
 {
 	rq->raw_weighted_load -= p->load_weight;
 }
-#else
-static inline void set_load_weight(task_t *p)
-{
-}
-
-static inline void inc_raw_weighted_load(runqueue_t *rq, const task_t *p)
-{
-}
-
-static inline void dec_raw_weighted_load(runqueue_t *rq, const task_t *p)
-{
-}
-#endif
 
 static inline void inc_nr_running(task_t *p, runqueue_t *rq)
 {
_

Patches currently in -mm which might be from kernel@xxxxxxxxxxx are

origin.patch
sched-fix-smt-nice-lock-contention-and-optimization.patch
sched-fix-interactive-ceiling-code.patch
sched-implement-smpnice.patch
sched-store-weighted-load-on-up.patch
sched-add-discrete-weighted-cpu-load-function.patch
sched-prevent-high-load-weight-tasks-suppressing-balancing.patch
sched-improve-stability-of-smpnice-load-balancing.patch
sched-mc-smt-power-savings-sched-policy.patch
sched-uninline-task_rq_lock.patch
sched-add-above-background-load-function.patch
mm-implement-swap-prefetching.patch
swap_prefetch-vs-zoned-counters.patch

-
To unsubscribe from this list: send the line "unsubscribe mm-commits" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at  http://vger.kernel.org/majordomo-info.html

[Index of Archives]     [Kernel Newbies FAQ]     [Kernel Archive]     [IETF Annouce]     [DCCP]     [Netdev]     [Networking]     [Security]     [Bugtraq]     [Photo]     [Yosemite]     [MIPS Linux]     [ARM Linux]     [Linux Security]     [Linux RAID]     [Linux SCSI]

  Powered by Linux