summaryrefslogtreecommitdiff
path: root/drivers/irqchip
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/irqchip')
-rw-r--r--drivers/irqchip/irq-gic-v2m.c6
-rw-r--r--drivers/irqchip/irq-gic-v3-its.c16
-rw-r--r--drivers/irqchip/irq-gic-v3.c21
-rw-r--r--drivers/irqchip/irq-loongarch-cpu.c6
-rw-r--r--drivers/irqchip/irq-mbigen.c20
-rw-r--r--drivers/irqchip/irq-meson-gpio.c14
-rw-r--r--drivers/irqchip/irq-msi-lib.c5
-rw-r--r--drivers/irqchip/irq-pic32-evic.c6
-rw-r--r--drivers/irqchip/irq-riscv-aplic-main.c4
-rw-r--r--drivers/irqchip/irq-riscv-aplic-msi.c32
-rw-r--r--drivers/irqchip/irq-sifive-plic.c115
-rw-r--r--drivers/irqchip/irq-sun6i-r.c2
-rw-r--r--drivers/irqchip/irq-xilinx-intc.c2
13 files changed, 161 insertions, 88 deletions
diff --git a/drivers/irqchip/irq-gic-v2m.c b/drivers/irqchip/irq-gic-v2m.c
index 51af63c046ed..be35c5349986 100644
--- a/drivers/irqchip/irq-gic-v2m.c
+++ b/drivers/irqchip/irq-gic-v2m.c
@@ -407,12 +407,12 @@ static int __init gicv2m_of_init(struct fwnode_handle *parent_handle,
ret = gicv2m_init_one(&child->fwnode, spi_start, nr_spis,
&res, 0);
- if (ret) {
- of_node_put(child);
+ if (ret)
break;
- }
}
+ if (ret && child)
+ of_node_put(child);
if (!ret)
ret = gicv2m_allocate_domains(parent);
if (ret)
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index 9b34596b3542..fdec478ba5e7 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -1330,12 +1330,6 @@ static void its_send_vmovp(struct its_vpe *vpe)
}
/*
- * Protect against concurrent updates of the mapping state on
- * individual VMs.
- */
- guard(raw_spinlock_irqsave)(&vpe->its_vm->vmapp_lock);
-
- /*
* Yet another marvel of the architecture. If using the
* its_list "feature", we need to make sure that all ITSs
* receive all VMOVP commands in the same order. The only way
@@ -3824,7 +3818,14 @@ static int its_vpe_set_affinity(struct irq_data *d,
* protect us, and that we must ensure nobody samples vpe->col_idx
* during the update, hence the lock below which must also be
* taken on any vLPI handling path that evaluates vpe->col_idx.
+ *
+ * Finally, we must protect ourselves against concurrent updates of
+ * the mapping state on this VM should the ITS list be in use (see
+ * the shortcut in its_send_vmovp() otherewise).
*/
+ if (its_list_map)
+ raw_spin_lock(&vpe->its_vm->vmapp_lock);
+
from = vpe_to_cpuid_lock(vpe, &flags);
table_mask = gic_data_rdist_cpu(from)->vpe_table_mask;
@@ -3854,6 +3855,9 @@ out:
irq_data_update_effective_affinity(d, cpumask_of(cpu));
vpe_to_cpuid_unlock(vpe, flags);
+ if (its_list_map)
+ raw_spin_unlock(&vpe->its_vm->vmapp_lock);
+
return IRQ_SET_MASK_OK_DONE;
}
diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
index c19083bfb943..74f21e03d4a3 100644
--- a/drivers/irqchip/irq-gic-v3.c
+++ b/drivers/irqchip/irq-gic-v3.c
@@ -1154,14 +1154,8 @@ static void gic_update_rdist_properties(void)
gic_data.rdists.has_vpend_valid_dirty ? "Valid+Dirty " : "");
}
-static void gic_cpu_sys_reg_init(void)
+static void gic_cpu_sys_reg_enable(void)
{
- int i, cpu = smp_processor_id();
- u64 mpidr = gic_cpu_to_affinity(cpu);
- u64 need_rss = MPIDR_RS(mpidr);
- bool group0;
- u32 pribits;
-
/*
* Need to check that the SRE bit has actually been set. If
* not, it means that SRE is disabled at EL2. We're going to
@@ -1172,6 +1166,16 @@ static void gic_cpu_sys_reg_init(void)
if (!gic_enable_sre())
pr_err("GIC: unable to set SRE (disabled at EL2), panic ahead\n");
+}
+
+static void gic_cpu_sys_reg_init(void)
+{
+ int i, cpu = smp_processor_id();
+ u64 mpidr = gic_cpu_to_affinity(cpu);
+ u64 need_rss = MPIDR_RS(mpidr);
+ bool group0;
+ u32 pribits;
+
pribits = gic_get_pribits();
group0 = gic_has_group0();
@@ -1333,6 +1337,7 @@ static int gic_check_rdist(unsigned int cpu)
static int gic_starting_cpu(unsigned int cpu)
{
+ gic_cpu_sys_reg_enable();
gic_cpu_init();
if (gic_dist_supports_lpis())
@@ -1498,6 +1503,7 @@ static int gic_cpu_pm_notifier(struct notifier_block *self,
if (cmd == CPU_PM_EXIT) {
if (gic_dist_security_disabled())
gic_enable_redist(true);
+ gic_cpu_sys_reg_enable();
gic_cpu_sys_reg_init();
} else if (cmd == CPU_PM_ENTER && gic_dist_security_disabled()) {
gic_write_grpen1(0);
@@ -2070,6 +2076,7 @@ static int __init gic_init_bases(phys_addr_t dist_phys_base,
gic_update_rdist_properties();
+ gic_cpu_sys_reg_enable();
gic_prio_init();
gic_dist_init();
gic_cpu_init();
diff --git a/drivers/irqchip/irq-loongarch-cpu.c b/drivers/irqchip/irq-loongarch-cpu.c
index 9d8f2c406043..b35903a06902 100644
--- a/drivers/irqchip/irq-loongarch-cpu.c
+++ b/drivers/irqchip/irq-loongarch-cpu.c
@@ -18,11 +18,13 @@ struct fwnode_handle *cpuintc_handle;
static u32 lpic_gsi_to_irq(u32 gsi)
{
+ int irq = 0;
+
/* Only pch irqdomain transferring is required for LoongArch. */
if (gsi >= GSI_MIN_PCH_IRQ && gsi <= GSI_MAX_PCH_IRQ)
- return acpi_register_gsi(NULL, gsi, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_HIGH);
+ irq = acpi_register_gsi(NULL, gsi, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_HIGH);
- return 0;
+ return (irq > 0) ? irq : 0;
}
static struct fwnode_handle *lpic_get_gsi_domain_id(u32 gsi)
diff --git a/drivers/irqchip/irq-mbigen.c b/drivers/irqchip/irq-mbigen.c
index 093fd42893a7..53cc08387588 100644
--- a/drivers/irqchip/irq-mbigen.c
+++ b/drivers/irqchip/irq-mbigen.c
@@ -64,6 +64,20 @@ struct mbigen_device {
void __iomem *base;
};
+static inline unsigned int get_mbigen_node_offset(unsigned int nid)
+{
+ unsigned int offset = nid * MBIGEN_NODE_OFFSET;
+
+ /*
+ * To avoid touched clear register in unexpected way, we need to directly
+ * skip clear register when access to more than 10 mbigen nodes.
+ */
+ if (nid >= (REG_MBIGEN_CLEAR_OFFSET / MBIGEN_NODE_OFFSET))
+ offset += MBIGEN_NODE_OFFSET;
+
+ return offset;
+}
+
static inline unsigned int get_mbigen_vec_reg(irq_hw_number_t hwirq)
{
unsigned int nid, pin;
@@ -72,8 +86,7 @@ static inline unsigned int get_mbigen_vec_reg(irq_hw_number_t hwirq)
nid = hwirq / IRQS_PER_MBIGEN_NODE + 1;
pin = hwirq % IRQS_PER_MBIGEN_NODE;
- return pin * 4 + nid * MBIGEN_NODE_OFFSET
- + REG_MBIGEN_VEC_OFFSET;
+ return pin * 4 + get_mbigen_node_offset(nid) + REG_MBIGEN_VEC_OFFSET;
}
static inline void get_mbigen_type_reg(irq_hw_number_t hwirq,
@@ -88,8 +101,7 @@ static inline void get_mbigen_type_reg(irq_hw_number_t hwirq,
*mask = 1 << (irq_ofst % 32);
ofst = irq_ofst / 32 * 4;
- *addr = ofst + nid * MBIGEN_NODE_OFFSET
- + REG_MBIGEN_TYPE_OFFSET;
+ *addr = ofst + get_mbigen_node_offset(nid) + REG_MBIGEN_TYPE_OFFSET;
}
static inline void get_mbigen_clear_reg(irq_hw_number_t hwirq,
diff --git a/drivers/irqchip/irq-meson-gpio.c b/drivers/irqchip/irq-meson-gpio.c
index 27e30ce41db3..cd789fa51519 100644
--- a/drivers/irqchip/irq-meson-gpio.c
+++ b/drivers/irqchip/irq-meson-gpio.c
@@ -178,7 +178,7 @@ struct meson_gpio_irq_controller {
void __iomem *base;
u32 channel_irqs[MAX_NUM_CHANNEL];
DECLARE_BITMAP(channel_map, MAX_NUM_CHANNEL);
- spinlock_t lock;
+ raw_spinlock_t lock;
};
static void meson_gpio_irq_update_bits(struct meson_gpio_irq_controller *ctl,
@@ -187,14 +187,14 @@ static void meson_gpio_irq_update_bits(struct meson_gpio_irq_controller *ctl,
unsigned long flags;
u32 tmp;
- spin_lock_irqsave(&ctl->lock, flags);
+ raw_spin_lock_irqsave(&ctl->lock, flags);
tmp = readl_relaxed(ctl->base + reg);
tmp &= ~mask;
tmp |= val;
writel_relaxed(tmp, ctl->base + reg);
- spin_unlock_irqrestore(&ctl->lock, flags);
+ raw_spin_unlock_irqrestore(&ctl->lock, flags);
}
static void meson_gpio_irq_init_dummy(struct meson_gpio_irq_controller *ctl)
@@ -244,12 +244,12 @@ meson_gpio_irq_request_channel(struct meson_gpio_irq_controller *ctl,
unsigned long flags;
unsigned int idx;
- spin_lock_irqsave(&ctl->lock, flags);
+ raw_spin_lock_irqsave(&ctl->lock, flags);
/* Find a free channel */
idx = find_first_zero_bit(ctl->channel_map, ctl->params->nr_channels);
if (idx >= ctl->params->nr_channels) {
- spin_unlock_irqrestore(&ctl->lock, flags);
+ raw_spin_unlock_irqrestore(&ctl->lock, flags);
pr_err("No channel available\n");
return -ENOSPC;
}
@@ -257,7 +257,7 @@ meson_gpio_irq_request_channel(struct meson_gpio_irq_controller *ctl,
/* Mark the channel as used */
set_bit(idx, ctl->channel_map);
- spin_unlock_irqrestore(&ctl->lock, flags);
+ raw_spin_unlock_irqrestore(&ctl->lock, flags);
/*
* Setup the mux of the channel to route the signal of the pad
@@ -567,7 +567,7 @@ static int meson_gpio_irq_of_init(struct device_node *node, struct device_node *
if (!ctl)
return -ENOMEM;
- spin_lock_init(&ctl->lock);
+ raw_spin_lock_init(&ctl->lock);
ctl->base = of_iomap(node, 0);
if (!ctl->base) {
diff --git a/drivers/irqchip/irq-msi-lib.c b/drivers/irqchip/irq-msi-lib.c
index b5b90003311a..d8e29fc0d406 100644
--- a/drivers/irqchip/irq-msi-lib.c
+++ b/drivers/irqchip/irq-msi-lib.c
@@ -128,6 +128,9 @@ int msi_lib_irq_domain_select(struct irq_domain *d, struct irq_fwspec *fwspec,
const struct msi_parent_ops *ops = d->msi_parent_ops;
u32 busmask = BIT(bus_token);
+ if (!ops)
+ return 0;
+
if (fwspec->fwnode != d->fwnode || fwspec->param_count != 0)
return 0;
@@ -135,6 +138,6 @@ int msi_lib_irq_domain_select(struct irq_domain *d, struct irq_fwspec *fwspec,
if (bus_token == ops->bus_select_token)
return 1;
- return ops && !!(ops->bus_select_mask & busmask);
+ return !!(ops->bus_select_mask & busmask);
}
EXPORT_SYMBOL_GPL(msi_lib_irq_domain_select);
diff --git a/drivers/irqchip/irq-pic32-evic.c b/drivers/irqchip/irq-pic32-evic.c
index 5d6b8e025bb8..eb6ca516a166 100644
--- a/drivers/irqchip/irq-pic32-evic.c
+++ b/drivers/irqchip/irq-pic32-evic.c
@@ -161,9 +161,9 @@ static int pic32_irq_domain_map(struct irq_domain *d, unsigned int virq,
return ret;
}
-int pic32_irq_domain_xlate(struct irq_domain *d, struct device_node *ctrlr,
- const u32 *intspec, unsigned int intsize,
- irq_hw_number_t *out_hwirq, unsigned int *out_type)
+static int pic32_irq_domain_xlate(struct irq_domain *d, struct device_node *ctrlr,
+ const u32 *intspec, unsigned int intsize,
+ irq_hw_number_t *out_hwirq, unsigned int *out_type)
{
struct evic_chip_data *priv = d->host_data;
diff --git a/drivers/irqchip/irq-riscv-aplic-main.c b/drivers/irqchip/irq-riscv-aplic-main.c
index 28dd175b5764..981fad6fb8f7 100644
--- a/drivers/irqchip/irq-riscv-aplic-main.c
+++ b/drivers/irqchip/irq-riscv-aplic-main.c
@@ -175,9 +175,9 @@ static int aplic_probe(struct platform_device *pdev)
/* Map the MMIO registers */
regs = devm_platform_ioremap_resource(pdev, 0);
- if (!regs) {
+ if (IS_ERR(regs)) {
dev_err(dev, "failed map MMIO registers\n");
- return -ENOMEM;
+ return PTR_ERR(regs);
}
/*
diff --git a/drivers/irqchip/irq-riscv-aplic-msi.c b/drivers/irqchip/irq-riscv-aplic-msi.c
index 028444af48bd..d7773f76e5d0 100644
--- a/drivers/irqchip/irq-riscv-aplic-msi.c
+++ b/drivers/irqchip/irq-riscv-aplic-msi.c
@@ -32,15 +32,10 @@ static void aplic_msi_irq_unmask(struct irq_data *d)
aplic_irq_unmask(d);
}
-static void aplic_msi_irq_eoi(struct irq_data *d)
+static void aplic_msi_irq_retrigger_level(struct irq_data *d)
{
struct aplic_priv *priv = irq_data_get_irq_chip_data(d);
- /*
- * EOI handling is required only for level-triggered interrupts
- * when APLIC is in MSI mode.
- */
-
switch (irqd_get_trigger_type(d)) {
case IRQ_TYPE_LEVEL_LOW:
case IRQ_TYPE_LEVEL_HIGH:
@@ -59,6 +54,29 @@ static void aplic_msi_irq_eoi(struct irq_data *d)
}
}
+static void aplic_msi_irq_eoi(struct irq_data *d)
+{
+ /*
+ * EOI handling is required only for level-triggered interrupts
+ * when APLIC is in MSI mode.
+ */
+ aplic_msi_irq_retrigger_level(d);
+}
+
+static int aplic_msi_irq_set_type(struct irq_data *d, unsigned int type)
+{
+ int rc = aplic_irq_set_type(d, type);
+
+ if (rc)
+ return rc;
+ /*
+ * Updating sourcecfg register for level-triggered interrupts
+ * requires interrupt retriggering when APLIC is in MSI mode.
+ */
+ aplic_msi_irq_retrigger_level(d);
+ return 0;
+}
+
static void aplic_msi_write_msg(struct irq_data *d, struct msi_msg *msg)
{
unsigned int group_index, hart_index, guest_index, val;
@@ -130,7 +148,7 @@ static const struct msi_domain_template aplic_msi_template = {
.name = "APLIC-MSI",
.irq_mask = aplic_msi_irq_mask,
.irq_unmask = aplic_msi_irq_unmask,
- .irq_set_type = aplic_irq_set_type,
+ .irq_set_type = aplic_msi_irq_set_type,
.irq_eoi = aplic_msi_irq_eoi,
#ifdef CONFIG_SMP
.irq_set_affinity = irq_chip_set_affinity_parent,
diff --git a/drivers/irqchip/irq-sifive-plic.c b/drivers/irqchip/irq-sifive-plic.c
index 9e22f7e378f5..4d9ea718086d 100644
--- a/drivers/irqchip/irq-sifive-plic.c
+++ b/drivers/irqchip/irq-sifive-plic.c
@@ -3,6 +3,7 @@
* Copyright (C) 2017 SiFive
* Copyright (C) 2018 Christoph Hellwig
*/
+#define pr_fmt(fmt) "riscv-plic: " fmt
#include <linux/cpu.h>
#include <linux/interrupt.h>
#include <linux/io.h>
@@ -63,7 +64,7 @@
#define PLIC_QUIRK_EDGE_INTERRUPT 0
struct plic_priv {
- struct device *dev;
+ struct fwnode_handle *fwnode;
struct cpumask lmask;
struct irq_domain *irqdomain;
void __iomem *regs;
@@ -378,8 +379,8 @@ static void plic_handle_irq(struct irq_desc *desc)
int err = generic_handle_domain_irq(handler->priv->irqdomain,
hwirq);
if (unlikely(err)) {
- dev_warn_ratelimited(handler->priv->dev,
- "can't find mapping for hwirq %lu\n", hwirq);
+ pr_warn_ratelimited("%pfwP: can't find mapping for hwirq %lu\n",
+ handler->priv->fwnode, hwirq);
}
}
@@ -408,7 +409,8 @@ static int plic_starting_cpu(unsigned int cpu)
enable_percpu_irq(plic_parent_irq,
irq_get_trigger_type(plic_parent_irq));
else
- dev_warn(handler->priv->dev, "cpu%d: parent irq not available\n", cpu);
+ pr_warn("%pfwP: cpu%d: parent irq not available\n",
+ handler->priv->fwnode, cpu);
plic_set_threshold(handler, PLIC_ENABLE_THRESHOLD);
return 0;
@@ -424,38 +426,36 @@ static const struct of_device_id plic_match[] = {
{}
};
-static int plic_parse_nr_irqs_and_contexts(struct platform_device *pdev,
+static int plic_parse_nr_irqs_and_contexts(struct fwnode_handle *fwnode,
u32 *nr_irqs, u32 *nr_contexts)
{
- struct device *dev = &pdev->dev;
int rc;
/*
* Currently, only OF fwnode is supported so extend this
* function for ACPI support.
*/
- if (!is_of_node(dev->fwnode))
+ if (!is_of_node(fwnode))
return -EINVAL;
- rc = of_property_read_u32(to_of_node(dev->fwnode), "riscv,ndev", nr_irqs);
+ rc = of_property_read_u32(to_of_node(fwnode), "riscv,ndev", nr_irqs);
if (rc) {
- dev_err(dev, "riscv,ndev property not available\n");
+ pr_err("%pfwP: riscv,ndev property not available\n", fwnode);
return rc;
}
- *nr_contexts = of_irq_count(to_of_node(dev->fwnode));
+ *nr_contexts = of_irq_count(to_of_node(fwnode));
if (WARN_ON(!(*nr_contexts))) {
- dev_err(dev, "no PLIC context available\n");
+ pr_err("%pfwP: no PLIC context available\n", fwnode);
return -EINVAL;
}
return 0;
}
-static int plic_parse_context_parent(struct platform_device *pdev, u32 context,
+static int plic_parse_context_parent(struct fwnode_handle *fwnode, u32 context,
u32 *parent_hwirq, int *parent_cpu)
{
- struct device *dev = &pdev->dev;
struct of_phandle_args parent;
unsigned long hartid;
int rc;
@@ -464,10 +464,10 @@ static int plic_parse_context_parent(struct platform_device *pdev, u32 context,
* Currently, only OF fwnode is supported so extend this
* function for ACPI support.
*/
- if (!is_of_node(dev->fwnode))
+ if (!is_of_node(fwnode))
return -EINVAL;
- rc = of_irq_parse_one(to_of_node(dev->fwnode), context, &parent);
+ rc = of_irq_parse_one(to_of_node(fwnode), context, &parent);
if (rc)
return rc;
@@ -480,48 +480,55 @@ static int plic_parse_context_parent(struct platform_device *pdev, u32 context,
return 0;
}
-static int plic_probe(struct platform_device *pdev)
+static int plic_probe(struct fwnode_handle *fwnode)
{
int error = 0, nr_contexts, nr_handlers = 0, cpu, i;
- struct device *dev = &pdev->dev;
unsigned long plic_quirks = 0;
struct plic_handler *handler;
u32 nr_irqs, parent_hwirq;
struct plic_priv *priv;
irq_hw_number_t hwirq;
+ void __iomem *regs;
- if (is_of_node(dev->fwnode)) {
+ if (is_of_node(fwnode)) {
const struct of_device_id *id;
- id = of_match_node(plic_match, to_of_node(dev->fwnode));
+ id = of_match_node(plic_match, to_of_node(fwnode));
if (id)
plic_quirks = (unsigned long)id->data;
+
+ regs = of_iomap(to_of_node(fwnode), 0);
+ if (!regs)
+ return -ENOMEM;
+ } else {
+ return -ENODEV;
}
- error = plic_parse_nr_irqs_and_contexts(pdev, &nr_irqs, &nr_contexts);
+ error = plic_parse_nr_irqs_and_contexts(fwnode, &nr_irqs, &nr_contexts);
if (error)
- return error;
+ goto fail_free_regs;
- priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv) {
+ error = -ENOMEM;
+ goto fail_free_regs;
+ }
- priv->dev = dev;
+ priv->fwnode = fwnode;
priv->plic_quirks = plic_quirks;
priv->nr_irqs = nr_irqs;
+ priv->regs = regs;
- priv->regs = devm_platform_ioremap_resource(pdev, 0);
- if (WARN_ON(!priv->regs))
- return -EIO;
-
- priv->prio_save = devm_bitmap_zalloc(dev, nr_irqs, GFP_KERNEL);
- if (!priv->prio_save)
- return -ENOMEM;
+ priv->prio_save = bitmap_zalloc(nr_irqs, GFP_KERNEL);
+ if (!priv->prio_save) {
+ error = -ENOMEM;
+ goto fail_free_priv;
+ }
for (i = 0; i < nr_contexts; i++) {
- error = plic_parse_context_parent(pdev, i, &parent_hwirq, &cpu);
+ error = plic_parse_context_parent(fwnode, i, &parent_hwirq, &cpu);
if (error) {
- dev_warn(dev, "hwirq for context%d not found\n", i);
+ pr_warn("%pfwP: hwirq for context%d not found\n", fwnode, i);
continue;
}
@@ -543,7 +550,7 @@ static int plic_probe(struct platform_device *pdev)
}
if (cpu < 0) {
- dev_warn(dev, "Invalid cpuid for context %d\n", i);
+ pr_warn("%pfwP: Invalid cpuid for context %d\n", fwnode, i);
continue;
}
@@ -554,7 +561,7 @@ static int plic_probe(struct platform_device *pdev)
*/
handler = per_cpu_ptr(&plic_handlers, cpu);
if (handler->present) {
- dev_warn(dev, "handler already present for context %d.\n", i);
+ pr_warn("%pfwP: handler already present for context %d.\n", fwnode, i);
plic_set_threshold(handler, PLIC_DISABLE_THRESHOLD);
goto done;
}
@@ -568,8 +575,8 @@ static int plic_probe(struct platform_device *pdev)
i * CONTEXT_ENABLE_SIZE;
handler->priv = priv;
- handler->enable_save = devm_kcalloc(dev, DIV_ROUND_UP(nr_irqs, 32),
- sizeof(*handler->enable_save), GFP_KERNEL);
+ handler->enable_save = kcalloc(DIV_ROUND_UP(nr_irqs, 32),
+ sizeof(*handler->enable_save), GFP_KERNEL);
if (!handler->enable_save)
goto fail_cleanup_contexts;
done:
@@ -581,7 +588,7 @@ done:
nr_handlers++;
}
- priv->irqdomain = irq_domain_add_linear(to_of_node(dev->fwnode), nr_irqs + 1,
+ priv->irqdomain = irq_domain_add_linear(to_of_node(fwnode), nr_irqs + 1,
&plic_irqdomain_ops, priv);
if (WARN_ON(!priv->irqdomain))
goto fail_cleanup_contexts;
@@ -619,13 +626,13 @@ done:
}
}
- dev_info(dev, "mapped %d interrupts with %d handlers for %d contexts.\n",
- nr_irqs, nr_handlers, nr_contexts);
+ pr_info("%pfwP: mapped %d interrupts with %d handlers for %d contexts.\n",
+ fwnode, nr_irqs, nr_handlers, nr_contexts);
return 0;
fail_cleanup_contexts:
for (i = 0; i < nr_contexts; i++) {
- if (plic_parse_context_parent(pdev, i, &parent_hwirq, &cpu))
+ if (plic_parse_context_parent(fwnode, i, &parent_hwirq, &cpu))
continue;
if (parent_hwirq != RV_IRQ_EXT || cpu < 0)
continue;
@@ -634,17 +641,37 @@ fail_cleanup_contexts:
handler->present = false;
handler->hart_base = NULL;
handler->enable_base = NULL;
+ kfree(handler->enable_save);
handler->enable_save = NULL;
handler->priv = NULL;
}
- return -ENOMEM;
+ bitmap_free(priv->prio_save);
+fail_free_priv:
+ kfree(priv);
+fail_free_regs:
+ iounmap(regs);
+ return error;
+}
+
+static int plic_platform_probe(struct platform_device *pdev)
+{
+ return plic_probe(pdev->dev.fwnode);
}
static struct platform_driver plic_driver = {
.driver = {
.name = "riscv-plic",
.of_match_table = plic_match,
+ .suppress_bind_attrs = true,
},
- .probe = plic_probe,
+ .probe = plic_platform_probe,
};
builtin_platform_driver(plic_driver);
+
+static int __init plic_early_probe(struct device_node *node,
+ struct device_node *parent)
+{
+ return plic_probe(&node->fwnode);
+}
+
+IRQCHIP_DECLARE(riscv, "allwinner,sun20i-d1-plic", plic_early_probe);
diff --git a/drivers/irqchip/irq-sun6i-r.c b/drivers/irqchip/irq-sun6i-r.c
index a01e44049415..99958d470d62 100644
--- a/drivers/irqchip/irq-sun6i-r.c
+++ b/drivers/irqchip/irq-sun6i-r.c
@@ -270,7 +270,7 @@ static const struct irq_domain_ops sun6i_r_intc_domain_ops = {
static int sun6i_r_intc_suspend(void)
{
- u32 buf[BITS_TO_U32(max(SUN6I_NR_TOP_LEVEL_IRQS, SUN6I_NR_MUX_BITS))];
+ u32 buf[BITS_TO_U32(MAX(SUN6I_NR_TOP_LEVEL_IRQS, SUN6I_NR_MUX_BITS))];
int i;
/* Wake IRQs are enabled during system sleep and shutdown. */
diff --git a/drivers/irqchip/irq-xilinx-intc.c b/drivers/irqchip/irq-xilinx-intc.c
index 238d3d344949..7e08714d507f 100644
--- a/drivers/irqchip/irq-xilinx-intc.c
+++ b/drivers/irqchip/irq-xilinx-intc.c
@@ -189,7 +189,7 @@ static int __init xilinx_intc_of_init(struct device_node *intc,
irqc->intr_mask = 0;
}
- if (irqc->intr_mask >> irqc->nr_irq)
+ if ((u64)irqc->intr_mask >> irqc->nr_irq)
pr_warn("irq-xilinx: mismatch in kind-of-intr param\n");
pr_info("irq-xilinx: %pOF: num_irq=%d, edge=0x%x\n",