From: Alex Belits <abelits@xxxxxxxxxxx> With the existing implementation of store_rps_map() packets are queued in the receive path on the backlog queues of other CPUs irrespective of whether they are isolated or not. This could add a latency overhead to any RT workload that is running on the same CPU. This patch ensures that store_rps_map() only uses available housekeeping CPUs for storing the rps_map. Signed-off-by: Alex Belits <abelits@xxxxxxxxxxx> Signed-off-by: Nitesh Narayan Lal <nitesh@xxxxxxxxxx> --- net/core/net-sysfs.c | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c index e353b822bb15..16e433287191 100644 --- a/net/core/net-sysfs.c +++ b/net/core/net-sysfs.c @@ -11,6 +11,7 @@ #include <linux/if_arp.h> #include <linux/slab.h> #include <linux/sched/signal.h> +#include <linux/sched/isolation.h> #include <linux/nsproxy.h> #include <net/sock.h> #include <net/net_namespace.h> @@ -741,7 +742,7 @@ static ssize_t store_rps_map(struct netdev_rx_queue *queue, { struct rps_map *old_map, *map; cpumask_var_t mask; - int err, cpu, i; + int err, cpu, i, hk_flags; static DEFINE_MUTEX(rps_map_mutex); if (!capable(CAP_NET_ADMIN)) @@ -756,6 +757,13 @@ static ssize_t store_rps_map(struct netdev_rx_queue *queue, return err; } + hk_flags = HK_FLAG_DOMAIN | HK_FLAG_WQ; + cpumask_and(mask, mask, housekeeping_cpumask(hk_flags)); + if (cpumask_weight(mask) == 0) { + free_cpumask_var(mask); + return -EINVAL; + } + map = kzalloc(max_t(unsigned int, RPS_MAP_SIZE(cpumask_weight(mask)), L1_CACHE_BYTES), GFP_KERNEL); -- 2.18.4