Commit-ID: 317d359df95dd0cb7653d09b7fc513770590cf85 Gitweb: https://git.kernel.org/tip/317d359df95dd0cb7653d09b7fc513770590cf85 Author: Peter Zijlstra <peterz@xxxxxxxxxxxxx> AuthorDate: Thu, 5 Apr 2018 10:05:21 +0200 Committer: Ingo Molnar <mingo@xxxxxxxxxx> CommitDate: Thu, 5 Apr 2018 10:56:16 +0200 sched/core: Force proper alignment of 'struct util_est' For some as yet not understood reason, Tony gets unaligned access traps on IA64 because of: struct util_est ue = READ_ONCE(p->se.avg.util_est); and: WRITE_ONCE(p->se.avg.util_est, ue); introduced by commit: d519329f72a6 ("sched/fair: Update util_est only on util_avg updates") Normally those two fields should end up on an 8-byte aligned location, but UP and RANDSTRUCT can mess that up so enforce the alignment explicitly. Also make the alignment on sched_avg unconditional, as it is really about data locality, not false-sharing. With or without this patch the layout for sched_avg on a ia64-defconfig build looks like: $ pahole -EC sched_avg ia64-defconfig/kernel/sched/core.o die__process_function: tag not supported (INVALID)! struct sched_avg { /* typedef u64 */ long long unsigned int last_update_time; /* 0 8 */ /* typedef u64 */ long long unsigned int load_sum; /* 8 8 */ /* typedef u64 */ long long unsigned int runnable_load_sum; /* 16 8 */ /* typedef u32 */ unsigned int util_sum; /* 24 4 */ /* typedef u32 */ unsigned int period_contrib; /* 28 4 */ long unsigned int load_avg; /* 32 8 */ long unsigned int runnable_load_avg; /* 40 8 */ long unsigned int util_avg; /* 48 8 */ struct util_est { unsigned int enqueued; /* 56 4 */ unsigned int ewma; /* 60 4 */ } util_est; /* 56 8 */ /* --- cacheline 1 boundary (64 bytes) --- */ /* size: 64, cachelines: 1, members: 9 */ }; Reported-and-Tested-by: Tony Luck <tony.luck@xxxxxxxxx> Signed-off-by: Peter Zijlstra (Intel) <peterz@xxxxxxxxxxxxx> Cc: Arnaldo Carvalho de Melo <acme@xxxxxxxxxx> Cc: Frederic Weisbecker <frederic@xxxxxxxxxx> Cc: Linus Torvalds <torvalds@xxxxxxxxxxxxxxxxxxxx> Cc: Mel Gorman <mgorman@xxxxxxxxxxxxxxxxxxx> Cc: Norbert Manthey <nmanthey@xxxxxxxxx> Cc: Patrick Bellasi <patrick.bellasi@xxxxxxx> Cc: Peter Zijlstra <peterz@xxxxxxxxxxxxx> Cc: Thomas Gleixner <tglx@xxxxxxxxxxxxx> Cc: Tony <tony.luck@xxxxxxxxx> Cc: Vincent Guittot <vincent.guittot@xxxxxxxxxx> Fixes: d519329f72a6 ("sched/fair: Update util_est only on util_avg updates") Link: http://lkml.kernel.org/r/20180405080521.GG4129@xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx Signed-off-by: Ingo Molnar <mingo@xxxxxxxxxx> --- include/linux/sched.h | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/include/linux/sched.h b/include/linux/sched.h index f228c6033832..b3d697f3b573 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -300,7 +300,7 @@ struct util_est { unsigned int enqueued; unsigned int ewma; #define UTIL_EST_WEIGHT_SHIFT 2 -}; +} __attribute__((__aligned__(sizeof(u64)))); /* * The load_avg/util_avg accumulates an infinite geometric series @@ -364,7 +364,7 @@ struct sched_avg { unsigned long runnable_load_avg; unsigned long util_avg; struct util_est util_est; -}; +} ____cacheline_aligned; struct sched_statistics { #ifdef CONFIG_SCHEDSTATS @@ -435,7 +435,7 @@ struct sched_entity { * Put into separate cache line so it does not * collide with read-mostly values above. */ - struct sched_avg avg ____cacheline_aligned_in_smp; + struct sched_avg avg; #endif }; -- To unsubscribe from this list: send the line "unsubscribe linux-tip-commits" in the body of a message to majordomo@xxxxxxxxxxxxxxx More majordomo info at http://vger.kernel.org/majordomo-info.html
![]() |