On Wed, Mar 19, 2025 at 11:56:53AM +0100, Thomas Gleixner wrote: > Split the lock protected functionality of msi_capability_init() out into a > helper function and use guard(msi_desc_lock) to replace the lock/unlock > pair. > > No functional change intended. > > Signed-off-by: Thomas Gleixner <tglx@xxxxxxxxxxxxx> Acked-by: Bjorn Helgaas <bhelgaas@xxxxxxxxxx> > --- > V4: Split out from the previous combo patch > --- > drivers/pci/msi/msi.c | 68 ++++++++++++++++++++++++++------------------------ > 1 file changed, 36 insertions(+), 32 deletions(-) > > --- a/drivers/pci/msi/msi.c > +++ b/drivers/pci/msi/msi.c > @@ -336,38 +336,13 @@ static int msi_verify_entries(struct pci > return !entry ? 0 : -EIO; > } > > -/** > - * msi_capability_init - configure device's MSI capability structure > - * @dev: pointer to the pci_dev data structure of MSI device function > - * @nvec: number of interrupts to allocate > - * @affd: description of automatic IRQ affinity assignments (may be %NULL) > - * > - * Setup the MSI capability structure of the device with the requested > - * number of interrupts. A return value of zero indicates the successful > - * setup of an entry with the new MSI IRQ. A negative return value indicates > - * an error, and a positive return value indicates the number of interrupts > - * which could have been allocated. > - */ > -static int msi_capability_init(struct pci_dev *dev, int nvec, > - struct irq_affinity *affd) > +static int __msi_capability_init(struct pci_dev *dev, int nvec, struct irq_affinity_desc *masks) > { > + int ret = msi_setup_msi_desc(dev, nvec, masks); > struct msi_desc *entry, desc; > - int ret; > - > - /* Reject multi-MSI early on irq domain enabled architectures */ > - if (nvec > 1 && !pci_msi_domain_supports(dev, MSI_FLAG_MULTI_PCI_MSI, ALLOW_LEGACY)) > - return 1; > - > - /* Disable MSI during setup in the hardware to erase stale state */ > - pci_msi_set_enable(dev, 0); > > - struct irq_affinity_desc *masks __free(kfree) = > - affd ? irq_create_affinity_masks(nvec, affd) : NULL; > - > - msi_lock_descs(&dev->dev); > - ret = msi_setup_msi_desc(dev, nvec, masks); > if (ret) > - goto unlock; > + return ret; > > /* All MSIs are unmasked by default; mask them all */ > entry = msi_first_desc(&dev->dev, MSI_DESC_ALL); > @@ -395,16 +370,45 @@ static int msi_capability_init(struct pc > > pcibios_free_irq(dev); > dev->irq = entry->irq; > - goto unlock; > - > + return 0; > err: > pci_msi_unmask(&desc, msi_multi_mask(&desc)); > pci_free_msi_irqs(dev); > -unlock: > - msi_unlock_descs(&dev->dev); > return ret; > } > > +/** > + * msi_capability_init - configure device's MSI capability structure > + * @dev: pointer to the pci_dev data structure of MSI device function > + * @nvec: number of interrupts to allocate > + * @affd: description of automatic IRQ affinity assignments (may be %NULL) > + * > + * Setup the MSI capability structure of the device with the requested > + * number of interrupts. A return value of zero indicates the successful > + * setup of an entry with the new MSI IRQ. A negative return value indicates > + * an error, and a positive return value indicates the number of interrupts > + * which could have been allocated. > + */ > +static int msi_capability_init(struct pci_dev *dev, int nvec, > + struct irq_affinity *affd) > +{ > + /* Reject multi-MSI early on irq domain enabled architectures */ > + if (nvec > 1 && !pci_msi_domain_supports(dev, MSI_FLAG_MULTI_PCI_MSI, ALLOW_LEGACY)) > + return 1; > + > + /* > + * Disable MSI during setup in the hardware, but mark it enabled > + * so that setup code can evaluate it. > + */ > + pci_msi_set_enable(dev, 0); > + > + struct irq_affinity_desc *masks __free(kfree) = > + affd ? irq_create_affinity_masks(nvec, affd) : NULL; > + > + guard(msi_descs_lock)(&dev->dev); > + return __msi_capability_init(dev, nvec, masks); > +} > + > int __pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec, > struct irq_affinity *affd) > { >