RE: [EXT] [PATCH V5 net-next] net: mana: Assigning IRQ affinity on HT cores

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 




>-----Original Message-----
>From: Suman Ghosh <sumang@xxxxxxxxxxx>
>Sent: Tuesday, December 12, 2023 11:48 PM
>To: Souradeep Chakrabarti <schakrabarti@xxxxxxxxxxxxxxxxxxx>; KY Srinivasan
><kys@xxxxxxxxxxxxx>; Haiyang Zhang <haiyangz@xxxxxxxxxxxxx>;
>wei.liu@xxxxxxxxxx; Dexuan Cui <decui@xxxxxxxxxxxxx>; davem@xxxxxxxxxxxxx;
>edumazet@xxxxxxxxxx; kuba@xxxxxxxxxx; pabeni@xxxxxxxxxx; Long Li
><longli@xxxxxxxxxxxxx>; yury.norov@xxxxxxxxx; leon@xxxxxxxxxx;
>cai.huoqing@xxxxxxxxx; ssengar@xxxxxxxxxxxxxxxxxxx; vkuznets@xxxxxxxxxx;
>tglx@xxxxxxxxxxxxx; linux-hyperv@xxxxxxxxxxxxxxx; netdev@xxxxxxxxxxxxxxx; linux-
>kernel@xxxxxxxxxxxxxxx; linux-rdma@xxxxxxxxxxxxxxx
>Cc: Souradeep Chakrabarti <schakrabarti@xxxxxxxxxxxxx>; Paul Rosswurm
><paulros@xxxxxxxxxxxxx>
>Subject: [EXTERNAL] RE: [EXT] [PATCH V5 net-next] net: mana: Assigning IRQ
>affinity on HT cores
>
>[Some people who received this message don't often get email from
>sumang@xxxxxxxxxxx. Learn why this is important at
>https://aka.ms/LearnAboutSenderIdentification ]
>
>Hi Souradeep,
>
>Please find inline for couple of comments.
>
>>+
>>+      if (!zalloc_cpumask_var(&curr, GFP_KERNEL)) {
>>+              err = -ENOMEM;
>>+              return err;
>>+      }
>>+      if (!zalloc_cpumask_var(&cpus, GFP_KERNEL)) {
>[Suman] memory leak here, should free 'curr'.
This will be taken care in next version.
>>+              err = -ENOMEM;
>>+              return err;
>>+      }
>>+
>>+      rcu_read_lock();
>>+      for_each_numa_hop_mask(next, next_node) {
>>+              cpumask_andnot(curr, next, prev);
>>+              for (w = cpumask_weight(curr), cnt = 0; cnt < w; ) {
>>+                      cpumask_copy(cpus, curr);
>>+                      for_each_cpu(cpu, cpus) {
>>+                              irq_set_affinity_and_hint(irqs[i],
>>topology_sibling_cpumask(cpu));
>>+                              if (++i == nvec)
>>+                                      goto done;
>>+                              cpumask_andnot(cpus, cpus,
>>topology_sibling_cpumask(cpu));
>>+                              ++cnt;
>>+                      }
>>+              }
>>+              prev = next;
>>+      }
>>+done:
>>+      rcu_read_unlock();
>>+      free_cpumask_var(curr);
>>+      free_cpumask_var(cpus);
>>+      return err;
>>+}
>>+
>> static int mana_gd_setup_irqs(struct pci_dev *pdev)  {
>>-      unsigned int max_queues_per_port = num_online_cpus();
>>       struct gdma_context *gc = pci_get_drvdata(pdev);
>>+      unsigned int max_queues_per_port;
>>       struct gdma_irq_context *gic;
>>       unsigned int max_irqs, cpu;
>>-      int nvec, irq;
>>+      int start_irq_index = 1;
>>+      int nvec, *irqs, irq;
>>       int err, i = 0, j;
>>
>>+      cpus_read_lock();
>>+      max_queues_per_port = num_online_cpus();
>>       if (max_queues_per_port > MANA_MAX_NUM_QUEUES)
>>               max_queues_per_port = MANA_MAX_NUM_QUEUES;
>>
>>@@ -1261,6 +1302,14 @@ static int mana_gd_setup_irqs(struct pci_dev
>>*pdev)
>>       nvec = pci_alloc_irq_vectors(pdev, 2, max_irqs, PCI_IRQ_MSIX);
>>       if (nvec < 0)
>[Suman] cpus_read_unlock()?
Thanks for pointing, it will be taken care off in the V6.
>>               return nvec;
>>+      if (nvec <= num_online_cpus())
>>+              start_irq_index = 0;
>>+
>>+      irqs = kmalloc_array((nvec - start_irq_index), sizeof(int),
>>GFP_KERNEL);
>>+      if (!irqs) {
>>+              err = -ENOMEM;
>>+              goto free_irq_vector;
>>+      }
>>
>>       gc->irq_contexts = kcalloc(nvec, sizeof(struct gdma_irq_context),
>>                                  GFP_KERNEL); @@ -1287,21 +1336,44 @@
>>static int mana_gd_setup_irqs(struct pci_dev
>>*pdev)
>>                       goto free_irq;
>>               }
>>
>>-              err = request_irq(irq, mana_gd_intr, 0, gic->name, gic);
>>-              if (err)
>>-                      goto free_irq;
>>-
>>-              cpu = cpumask_local_spread(i, gc->numa_node);
>>-              irq_set_affinity_and_hint(irq, cpumask_of(cpu));
>>+              if (!i) {
>>+                      err = request_irq(irq, mana_gd_intr, 0, gic->name, gic);
>>+                      if (err)
>>+                              goto free_irq;
>>+
>>+                      /* If number of IRQ is one extra than number of
>>+ online
>>CPUs,
>>+                       * then we need to assign IRQ0 (hwc irq) and IRQ1 to
>>+                       * same CPU.
>>+                       * Else we will use different CPUs for IRQ0 and IRQ1.
>>+                       * Also we are using cpumask_local_spread instead of
>>+                       * cpumask_first for the node, because the node can be
>>+                       * mem only.
>>+                       */
>>+                      if (start_irq_index) {
>>+                              cpu = cpumask_local_spread(i, gc->numa_node);
>>+                              irq_set_affinity_and_hint(irq, cpumask_of(cpu));
>>+                      } else {
>>+                              irqs[start_irq_index] = irq;
>>+                      }
>>+              } else {
>>+                      irqs[i - start_irq_index] = irq;
>>+                      err = request_irq(irqs[i - start_irq_index],
>>mana_gd_intr, 0,
>>+                                        gic->name, gic);
>>+                      if (err)
>>+                              goto free_irq;
>>+              }
>>       }
>>
>>+      err = irq_setup(irqs, (nvec - start_irq_index), gc->numa_node);
>>+      if (err)
>>+              goto free_irq;
>>       err = mana_gd_alloc_res_map(nvec, &gc->msix_resource);
>>       if (err)
>>               goto free_irq;
>>
>>       gc->max_num_msix = nvec;
>>       gc->num_msix_usable = nvec;
>>-
>>+      cpus_read_unlock();
>>       return 0;
>>
>> free_irq:
>>@@ -1314,8 +1386,10 @@ static int mana_gd_setup_irqs(struct pci_dev
>>*pdev)
>>       }
>>
>>       kfree(gc->irq_contexts);
>>+      kfree(irqs);
>>       gc->irq_contexts = NULL;
>> free_irq_vector:
>>+      cpus_read_unlock();
>>       pci_free_irq_vectors(pdev);
>>       return err;
>> }
>>--
>>2.34.1
>>






[Index of Archives]     [Linux Samsung SoC]     [Linux Rockchip SoC]     [Linux Actions SoC]     [Linux for Synopsys ARC Processors]     [Linux NFS]     [Linux NILFS]     [Linux USB Devel]     [Video for Linux]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]


  Powered by Linux