summaryrefslogtreecommitdiff
path: root/kernel/irq/msi.c
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2021-12-06 23:51:45 +0100
committerThomas Gleixner <tglx@linutronix.de>2021-12-16 22:22:20 +0100
commit495c66aca3da704e063fa373fdbe371e71d3f4ee (patch)
tree874527c4d03ee27cc49400b3b46dcfc119d091a8 /kernel/irq/msi.c
parentef8dd01538ea2553ab101ddce6a85a321406d9c0 (diff)
genirq/msi: Convert to new functions
Use the new iterator functions and add locking where required. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Tested-by: Michael Kelley <mikelley@microsoft.com> Tested-by: Nishanth Menon <nm@ti.com> Reviewed-by: Jason Gunthorpe <jgg@nvidia.com> Link: https://lore.kernel.org/r/20211206210749.063705667@linutronix.de
Diffstat (limited to 'kernel/irq/msi.c')
-rw-r--r--kernel/irq/msi.c23
1 files changed, 14 insertions, 9 deletions
diff --git a/kernel/irq/msi.c b/kernel/irq/msi.c
index bbe36e20a986..745434efb557 100644
--- a/kernel/irq/msi.c
+++ b/kernel/irq/msi.c
@@ -320,6 +320,7 @@ EXPORT_SYMBOL_GPL(msi_next_desc);
unsigned int msi_get_virq(struct device *dev, unsigned int index)
{
struct msi_desc *desc;
+ unsigned int ret = 0;
bool pcimsi;
if (!dev->msi.data)
@@ -327,11 +328,12 @@ unsigned int msi_get_virq(struct device *dev, unsigned int index)
pcimsi = dev_is_pci(dev) ? to_pci_dev(dev)->msi_enabled : false;
- for_each_msi_entry(desc, dev) {
+ msi_lock_descs(dev);
+ msi_for_each_desc(desc, dev, MSI_DESC_ASSOCIATED) {
/* PCI-MSI has only one descriptor for multiple interrupts. */
if (pcimsi) {
- if (desc->irq && index < desc->nvec_used)
- return desc->irq + index;
+ if (index < desc->nvec_used)
+ ret = desc->irq + index;
break;
}
@@ -339,10 +341,13 @@ unsigned int msi_get_virq(struct device *dev, unsigned int index)
* PCI-MSIX and platform MSI use a descriptor per
* interrupt.
*/
- if (desc->msi_index == index)
- return desc->irq;
+ if (desc->msi_index == index) {
+ ret = desc->irq;
+ break;
+ }
}
- return 0;
+ msi_unlock_descs(dev);
+ return ret;
}
EXPORT_SYMBOL_GPL(msi_get_virq);
@@ -373,7 +378,7 @@ static const struct attribute_group **msi_populate_sysfs(struct device *dev)
int i;
/* Determine how many msi entries we have */
- for_each_msi_entry(entry, dev)
+ msi_for_each_desc(entry, dev, MSI_DESC_ALL)
num_msi += entry->nvec_used;
if (!num_msi)
return NULL;
@@ -383,7 +388,7 @@ static const struct attribute_group **msi_populate_sysfs(struct device *dev)
if (!msi_attrs)
return ERR_PTR(-ENOMEM);
- for_each_msi_entry(entry, dev) {
+ msi_for_each_desc(entry, dev, MSI_DESC_ALL) {
for (i = 0; i < entry->nvec_used; i++) {
msi_dev_attr = kzalloc(sizeof(*msi_dev_attr), GFP_KERNEL);
if (!msi_dev_attr)
@@ -803,7 +808,7 @@ static bool msi_check_reservation_mode(struct irq_domain *domain,
* Checking the first MSI descriptor is sufficient. MSIX supports
* masking and MSI does so when the can_mask attribute is set.
*/
- desc = first_msi_entry(dev);
+ desc = msi_first_desc(dev, MSI_DESC_ALL);
return desc->pci.msi_attrib.is_msix || desc->pci.msi_attrib.can_mask;
}