@@ -121,9 +121,6 @@ static int ice_vsi_alloc_q_vector(struct ice_vsi *vsi, u16 v_idx)
if (vsi->type == ICE_VSI_VF)
goto out;
- /* only set affinity_mask if the CPU is online */
- if (cpu_online(v_idx))
- cpumask_set_cpu(v_idx, &q_vector->affinity_mask);
/* This will not be called in the driver load path because the netdev
* will not be created yet. All other cases with register the NAPI
@@ -659,8 +656,10 @@ int ice_vsi_wait_one_rx_ring(struct ice_vsi *vsi, bool ena, u16 rxq_idx)
*/
int ice_vsi_alloc_q_vectors(struct ice_vsi *vsi)
{
+ cpumask_t *aff_mask, *last_aff_mask = cpu_none_mask;
struct device *dev = ice_pf_to_dev(vsi->back);
- u16 v_idx;
+ int numa_node = dev->numa_node;
+ u16 v_idx, cpu = 0;
int err;
if (vsi->q_vectors[0]) {
@@ -674,6 +673,23 @@ int ice_vsi_alloc_q_vectors(struct ice_vsi *vsi)
goto err_out;
}
+ v_idx = 0;
+
+ for_each_numa_hop_mask(aff_mask, numa_node) {
+ for_each_cpu_andnot(cpu, aff_mask, last_aff_mask) {
+ if (v_idx >= vsi->num_q_vectors)
+ goto out;
+
+ if (cpu_online(cpu)) {
+ cpumask_set_cpu(cpu, &vsi->q_vectors[v_idx]->affinity_mask);
+ v_idx++;
+ }
+ }
+
+ last_aff_mask = aff_mask;
+ }
+
+out:
return 0;
err_out:
With the introduction of sched_numa_hop_mask() and for_each_numa_hop_mask(), the affinity masks for queue vectors can be conveniently set by preferring the CPUs that are closest to the NUMA node of the parent PCI device. Signed-off-by: Pawel Chmielewski <pawel.chmielewski@intel.com> --- Changes since v1: * Removed obsolete comment * Inverted condition for loop escape * Incrementing v_idx only in case of available cpu --- drivers/net/ethernet/intel/ice/ice_base.c | 24 +++++++++++++++++++---- 1 file changed, 20 insertions(+), 4 deletions(-)