With the introduction of sched_numa_hop_mask() and for_each_numa_hop_mask(), the affinity masks for queue vectors can be conveniently set by preferring the CPUs that are closest to the NUMA node of the parent PCI device. Signed-off-by: Pawel Chmielewski <pawel.chmielewski@xxxxxxxxx> --- drivers/net/ethernet/intel/ice/ice_base.c | 17 ++++++++++++++--- 1 file changed, 14 insertions(+), 3 deletions(-) diff --git a/drivers/net/ethernet/intel/ice/ice_base.c b/drivers/net/ethernet/intel/ice/ice_base.c index e864634d66bc..fd3550d15c9e 100644 --- a/drivers/net/ethernet/intel/ice/ice_base.c +++ b/drivers/net/ethernet/intel/ice/ice_base.c @@ -122,8 +122,6 @@ static int ice_vsi_alloc_q_vector(struct ice_vsi *vsi, u16 v_idx) if (vsi->type == ICE_VSI_VF) goto out; /* only set affinity_mask if the CPU is online */ - if (cpu_online(v_idx)) - cpumask_set_cpu(v_idx, &q_vector->affinity_mask); /* This will not be called in the driver load path because the netdev * will not be created yet. All other cases with register the NAPI @@ -659,8 +657,10 @@ int ice_vsi_wait_one_rx_ring(struct ice_vsi *vsi, bool ena, u16 rxq_idx) */ int ice_vsi_alloc_q_vectors(struct ice_vsi *vsi) { + cpumask_t *aff_mask, *last_aff_mask = cpu_none_mask; struct device *dev = ice_pf_to_dev(vsi->back); - u16 v_idx; + int numa_node = dev->numa_node; + u16 v_idx, cpu = 0; int err; if (vsi->q_vectors[0]) { @@ -674,6 +674,17 @@ int ice_vsi_alloc_q_vectors(struct ice_vsi *vsi) goto err_out; } + v_idx = 0; + for_each_numa_hop_mask(aff_mask, numa_node) { + for_each_cpu_andnot(cpu, aff_mask, last_aff_mask) + if (v_idx < vsi->num_q_vectors) { + if (cpu_online(cpu)) + cpumask_set_cpu(cpu, &vsi->q_vectors[v_idx]->affinity_mask); + v_idx++; + } + last_aff_mask = aff_mask; + } + return 0; err_out: -- 2.37.3