There are various mechanisms that select CPUs for jobs other than regular workqueue selection. CPU isolation normally does not prevent those jobs from running on isolated CPUs. When task isolation is enabled those jobs should be limited to housekeeping CPUs. Signed-off-by: Alex Belits <abelits@xxxxxxxxxxx> --- drivers/pci/pci-driver.c | 9 +++++++ lib/cpumask.c | 53 +++++++++++++++++++++++++--------------- net/core/net-sysfs.c | 9 +++++++ 3 files changed, 51 insertions(+), 20 deletions(-) diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c index 0454ca0e4e3f..cb872cdd1782 100644 --- a/drivers/pci/pci-driver.c +++ b/drivers/pci/pci-driver.c @@ -12,6 +12,7 @@ #include <linux/string.h> #include <linux/slab.h> #include <linux/sched.h> +#include <linux/sched/isolation.h> #include <linux/cpu.h> #include <linux/pm_runtime.h> #include <linux/suspend.h> @@ -332,6 +333,9 @@ static bool pci_physfn_is_probed(struct pci_dev *dev) static int pci_call_probe(struct pci_driver *drv, struct pci_dev *dev, const struct pci_device_id *id) { +#ifdef CONFIG_TASK_ISOLATION + int hk_flags = HK_FLAG_DOMAIN | HK_FLAG_WQ; +#endif int error, node, cpu; struct drv_dev_and_id ddi = { drv, dev, id }; @@ -353,7 +357,12 @@ static int pci_call_probe(struct pci_driver *drv, struct pci_dev *dev, pci_physfn_is_probed(dev)) cpu = nr_cpu_ids; else +#ifdef CONFIG_TASK_ISOLATION + cpu = cpumask_any_and(cpumask_of_node(node), + housekeeping_cpumask(hk_flags)); +#else cpu = cpumask_any_and(cpumask_of_node(node), cpu_online_mask); +#endif if (cpu < nr_cpu_ids) error = work_on_cpu(cpu, local_pci_probe, &ddi); diff --git a/lib/cpumask.c b/lib/cpumask.c index 0cb672eb107c..dcbc30a47600 100644 --- a/lib/cpumask.c +++ b/lib/cpumask.c @@ -6,6 +6,7 @@ #include <linux/export.h> #include <linux/memblock.h> #include <linux/numa.h> +#include <linux/sched/isolation.h> /** * cpumask_next - get the next cpu in a cpumask @@ -205,28 +206,40 @@ void __init free_bootmem_cpumask_var(cpumask_var_t mask) */ unsigned int cpumask_local_spread(unsigned int i, int node) { - int cpu; + const struct cpumask *mask; + int cpu, m, n; + +#ifdef CONFIG_TASK_ISOLATION + mask = housekeeping_cpumask(HK_FLAG_DOMAIN); + m = cpumask_weight(mask); +#else + mask = cpu_online_mask; + m = num_online_cpus(); +#endif /* Wrap: we always want a cpu. */ - i %= num_online_cpus(); - - if (node == NUMA_NO_NODE) { - for_each_cpu(cpu, cpu_online_mask) - if (i-- == 0) - return cpu; - } else { - /* NUMA first. */ - for_each_cpu_and(cpu, cpumask_of_node(node), cpu_online_mask) - if (i-- == 0) - return cpu; - - for_each_cpu(cpu, cpu_online_mask) { - /* Skip NUMA nodes, done above. */ - if (cpumask_test_cpu(cpu, cpumask_of_node(node))) - continue; - - if (i-- == 0) - return cpu; + n = i % m; + + while (m-- > 0) { + if (node == NUMA_NO_NODE) { + for_each_cpu(cpu, mask) + if (n-- == 0) + return cpu; + } else { + /* NUMA first. */ + for_each_cpu_and(cpu, cpumask_of_node(node), mask) + if (n-- == 0) + return cpu; + + for_each_cpu(cpu, mask) { + /* Skip NUMA nodes, done above. */ + if (cpumask_test_cpu(cpu, + cpumask_of_node(node))) + continue; + + if (n-- == 0) + return cpu; + } } } BUG(); diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c index 4c826b8bf9b1..253758f102d9 100644 --- a/net/core/net-sysfs.c +++ b/net/core/net-sysfs.c @@ -11,6 +11,7 @@ #include <linux/if_arp.h> #include <linux/slab.h> #include <linux/sched/signal.h> +#include <linux/sched/isolation.h> #include <linux/nsproxy.h> #include <net/sock.h> #include <net/net_namespace.h> @@ -725,6 +726,14 @@ static ssize_t store_rps_map(struct netdev_rx_queue *queue, return err; } +#ifdef CONFIG_TASK_ISOLATION + cpumask_and(mask, mask, housekeeping_cpumask(HK_FLAG_DOMAIN)); + if (cpumask_weight(mask) == 0) { + free_cpumask_var(mask); + return -EINVAL; + } +#endif + map = kzalloc(max_t(unsigned int, RPS_MAP_SIZE(cpumask_weight(mask)), L1_CACHE_BYTES), GFP_KERNEL); -- 2.20.1