group_cpus_evenly distributes all present CPUs into groups. This ignores the isolcpus configuration and assigns isolated CPUs into the groups. Make group_cpus_evenly aware of isolcpus configuration and use the housekeeping CPU mask as base for distributing the available CPUs into groups. Reviewed-by: Christoph Hellwig <hch@xxxxxx> Reviewed-by: Hannes Reinecke <hare@xxxxxxx> Reviewed-by: Sagi Grimberg <sagi@xxxxxxxxxxx> Signed-off-by: Daniel Wagner <wagi@xxxxxxxxxx> --- lib/group_cpus.c | 77 ++++++++++++++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 75 insertions(+), 2 deletions(-) diff --git a/lib/group_cpus.c b/lib/group_cpus.c index 73da83ca2c45347a3a443d42d4f16801a47effd5..927e4ed634d0d9ca14235c977fc53d6f5f649396 100644 --- a/lib/group_cpus.c +++ b/lib/group_cpus.c @@ -8,6 +8,7 @@ #include <linux/cpu.h> #include <linux/sort.h> #include <linux/group_cpus.h> +#include <linux/sched/isolation.h> #ifdef CONFIG_SMP @@ -330,7 +331,7 @@ static int __group_cpus_evenly(unsigned int startgrp, unsigned int numgrps, } /** - * group_cpus_evenly - Group all CPUs evenly per NUMA/CPU locality + * group_possible_cpus_evenly - Group all CPUs evenly per NUMA/CPU locality * @numgrps: number of groups * * Return: cpumask array if successful, NULL otherwise. And each element @@ -345,7 +346,7 @@ static int __group_cpus_evenly(unsigned int startgrp, unsigned int numgrps, * We guarantee in the resulted grouping that all CPUs are covered, and * no same CPU is assigned to multiple groups */ -struct cpumask *group_cpus_evenly(unsigned int *numgrps) +static struct cpumask *group_possible_cpus_evenly(unsigned int *numgrps) { unsigned int curgrp = 0, nr_present = 0, nr_others = 0, nr_grps; cpumask_var_t *node_to_cpumask; @@ -426,6 +427,78 @@ struct cpumask *group_cpus_evenly(unsigned int *numgrps) *numgrps = nr_present + nr_others; return masks; } + +/** + * group_mask_cpus_evenly - Group all CPUs evenly per NUMA/CPU locality + * @numgrps: number of groups + * @cpu_mask: CPU to consider for the grouping + * + * Return: cpumask array if successful, NULL otherwise. And each element + * includes CPUs assigned to this group. + * + * Try to put close CPUs from viewpoint of CPU and NUMA locality into + * same group. Allocate present CPUs on these groups evenly. + */ +static struct cpumask *group_mask_cpus_evenly(unsigned int *numgrps, + const struct cpumask *cpu_mask) +{ + cpumask_var_t *node_to_cpumask; + cpumask_var_t nmsk; + unsigned int nr_grps; + int ret = -ENOMEM; + struct cpumask *masks = NULL; + + if (!zalloc_cpumask_var(&nmsk, GFP_KERNEL)) + return NULL; + + node_to_cpumask = alloc_node_to_cpumask(); + if (!node_to_cpumask) + goto fail_nmsk; + + nr_grps = *numgrps; + masks = kcalloc(nr_grps, sizeof(*masks), GFP_KERNEL); + if (!masks) + goto fail_node_to_cpumask; + + build_node_to_cpumask(node_to_cpumask); + + ret = __group_cpus_evenly(0, nr_grps, node_to_cpumask, cpu_mask, nmsk, + masks); + +fail_node_to_cpumask: + free_node_to_cpumask(node_to_cpumask); + +fail_nmsk: + free_cpumask_var(nmsk); + if (ret < 0) { + kfree(masks); + return NULL; + } + *numgrps = ret; + return masks; +} + +/** + * group_cpus_evenly - Group all CPUs evenly per NUMA/CPU locality + * @numgrps: number of groups + * + * Return: cpumask array if successful, NULL otherwise. + * + * group_possible_cpus_evently() is used for distributing the cpus on all + * possible cpus in absence of isolcpus command line argument. + * group_mask_cpu_evenly() is used when the isolcpus command line + * argument is used with managed_irq option. In this case only the + * housekeeping CPUs are considered. + */ +struct cpumask *group_cpus_evenly(unsigned int *numgrps) +{ + if (housekeeping_enabled(HK_TYPE_MANAGED_IRQ)) { + return group_mask_cpus_evenly(numgrps, + housekeeping_cpumask(HK_TYPE_MANAGED_IRQ)); + } + + return group_possible_cpus_evenly(numgrps); +} #else /* CONFIG_SMP */ struct cpumask *group_cpus_evenly(unsigned int *numgrps) { -- 2.47.1