4.9-stable review patch. If anyone has any objections, please let me know. ------------------ From: Eric Dumazet <edumazet@xxxxxxxxxx> While under frags DDOS I noticed unfortunate false sharing between @nelems and @params.automatic_shrinking Move @nelems at the end of struct rhashtable so that first cache line is shared between all cpus, because almost never dirtied. Signed-off-by: Eric Dumazet <edumazet@xxxxxxxxxx> Signed-off-by: David S. Miller <davem@xxxxxxxxxxxxx> (cherry picked from commit e5d672a0780d9e7118caad4c171ec88b8299398d) Signed-off-by: Greg Kroah-Hartman <gregkh@xxxxxxxxxxxxxxxxxxx> --- include/linux/rhashtable.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) --- a/include/linux/rhashtable.h +++ b/include/linux/rhashtable.h @@ -138,7 +138,6 @@ struct rhashtable_params { /** * struct rhashtable - Hash table handle * @tbl: Bucket table - * @nelems: Number of elements in table * @key_len: Key length for hashfn * @elasticity: Maximum chain length before rehash * @p: Configuration parameters @@ -146,10 +145,10 @@ struct rhashtable_params { * @run_work: Deferred worker to expand/shrink asynchronously * @mutex: Mutex to protect current/future table swapping * @lock: Spin lock to protect walker list + * @nelems: Number of elements in table */ struct rhashtable { struct bucket_table __rcu *tbl; - atomic_t nelems; unsigned int key_len; unsigned int elasticity; struct rhashtable_params p; @@ -157,6 +156,7 @@ struct rhashtable { struct work_struct run_work; struct mutex mutex; spinlock_t lock; + atomic_t nelems; }; /**