summaryrefslogtreecommitdiff
path: root/kernel/irq
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2020-12-15 15:03:31 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2020-12-15 15:03:31 -0800
commit2cffa11e2aa76a0560c890f057858b68fe744d03 (patch)
tree8e9eadb4267e6c00ce7b04cec687f7c995b0c985 /kernel/irq
parent5b200f578960a9635918a0ed41be3d8dc90186bf (diff)
parent3c41e57a1e168d879e923c5583adeae47eec9f64 (diff)
Merge tag 'irq-core-2020-12-15' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull irq updates from Thomas Gleixner: "Generic interrupt and irqchips subsystem updates. Unusually, there is not a single completely new irq chip driver, just new DT bindings and extensions of existing drivers to accomodate new variants! Core: - Consolidation and robustness changes for irq time accounting - Cleanup and consolidation of irq stats - Remove the fasteoi IPI flow which has been proved useless - Provide an interface for converting legacy interrupt mechanism into irqdomains Drivers: - Preliminary support for managed interrupts on platform devices - Correctly identify allocation of MSIs proxyied by another device - Generalise the Ocelot support to new SoCs - Improve GICv4.1 vcpu entry, matching the corresponding KVM optimisation - Work around spurious interrupts on Qualcomm PDC - Random fixes and cleanups" * tag 'irq-core-2020-12-15' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (54 commits) irqchip/qcom-pdc: Fix phantom irq when changing between rising/falling driver core: platform: Add devm_platform_get_irqs_affinity() ACPI: Drop acpi_dev_irqresource_disabled() resource: Add irqresource_disabled() genirq/affinity: Add irq_update_affinity_desc() irqchip/gic-v3-its: Flag device allocation as proxied if behind a PCI bridge irqchip/gic-v3-its: Tag ITS device as shared if allocating for a proxy device platform-msi: Track shared domain allocation irqchip/ti-sci-intr: Fix freeing of irqs irqchip/ti-sci-inta: Fix printing of inta id on probe success drivers/irqchip: Remove EZChip NPS interrupt controller Revert "genirq: Add fasteoi IPI flow" irqchip/hip04: Make IPIs use handle_percpu_devid_irq() irqchip/bcm2836: Make IPIs use handle_percpu_devid_irq() irqchip/armada-370-xp: Make IPIs use handle_percpu_devid_irq() irqchip/gic, gic-v3: Make SGIs use handle_percpu_devid_irq() irqchip/ocelot: Add support for Jaguar2 platforms irqchip/ocelot: Add support for Serval platforms irqchip/ocelot: Add support for Luton platforms irqchip/ocelot: prepare to support more SoC ...
Diffstat (limited to 'kernel/irq')
-rw-r--r--kernel/irq/Kconfig5
-rw-r--r--kernel/irq/chip.c29
-rw-r--r--kernel/irq/generic-chip.c2
-rw-r--r--kernel/irq/irqdesc.c51
-rw-r--r--kernel/irq/irqdomain.c32
-rw-r--r--kernel/irq/manage.c70
6 files changed, 97 insertions, 92 deletions
diff --git a/kernel/irq/Kconfig b/kernel/irq/Kconfig
index 164a031cfdb6..d79ef2493a28 100644
--- a/kernel/irq/Kconfig
+++ b/kernel/irq/Kconfig
@@ -26,11 +26,6 @@ config GENERIC_IRQ_SHOW_LEVEL
config GENERIC_IRQ_EFFECTIVE_AFF_MASK
bool
-# Facility to allocate a hardware interrupt. This is legacy support
-# and should not be used in new code. Use irq domains instead.
-config GENERIC_IRQ_LEGACY_ALLOC_HWIRQ
- bool
-
# Support for delayed migration from interrupt context
config GENERIC_PENDING_IRQ
bool
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c
index b9b9618e1aca..6d89e33fe3aa 100644
--- a/kernel/irq/chip.c
+++ b/kernel/irq/chip.c
@@ -61,7 +61,7 @@ int irq_set_chip(unsigned int irq, struct irq_chip *chip)
EXPORT_SYMBOL(irq_set_chip);
/**
- * irq_set_type - set the irq trigger type for an irq
+ * irq_set_irq_type - set the irq trigger type for an irq
* @irq: irq number
* @type: IRQ_TYPE_{LEVEL,EDGE}_* value - see include/linux/irq.h
*/
@@ -945,33 +945,6 @@ void handle_percpu_devid_irq(struct irq_desc *desc)
}
/**
- * handle_percpu_devid_fasteoi_ipi - Per CPU local IPI handler with per cpu
- * dev ids
- * @desc: the interrupt description structure for this irq
- *
- * The biggest difference with the IRQ version is that the interrupt is
- * EOIed early, as the IPI could result in a context switch, and we need to
- * make sure the IPI can fire again. We also assume that the arch code has
- * registered an action. If not, we are positively doomed.
- */
-void handle_percpu_devid_fasteoi_ipi(struct irq_desc *desc)
-{
- struct irq_chip *chip = irq_desc_get_chip(desc);
- struct irqaction *action = desc->action;
- unsigned int irq = irq_desc_get_irq(desc);
- irqreturn_t res;
-
- __kstat_incr_irqs_this_cpu(desc);
-
- if (chip->irq_eoi)
- chip->irq_eoi(&desc->irq_data);
-
- trace_irq_handler_entry(irq, action);
- res = action->handler(irq, raw_cpu_ptr(action->percpu_dev_id));
- trace_irq_handler_exit(irq, action, res);
-}
-
-/**
* handle_percpu_devid_fasteoi_nmi - Per CPU local NMI handler with per cpu
* dev ids
* @desc: the interrupt description structure for this irq
diff --git a/kernel/irq/generic-chip.c b/kernel/irq/generic-chip.c
index e2999a070a99..a23ac2bbf433 100644
--- a/kernel/irq/generic-chip.c
+++ b/kernel/irq/generic-chip.c
@@ -269,7 +269,7 @@ irq_gc_init_mask_cache(struct irq_chip_generic *gc, enum irq_gc_flags flags)
}
/**
- * __irq_alloc_domain_generic_chip - Allocate generic chips for an irq domain
+ * __irq_alloc_domain_generic_chips - Allocate generic chips for an irq domain
* @d: irq domain for which to allocate chips
* @irqs_per_chip: Number of interrupts each chip handles (max 32)
* @num_ct: Number of irq_chip_type instances associated with this
diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c
index 1a7723604399..e810eb9906ea 100644
--- a/kernel/irq/irqdesc.c
+++ b/kernel/irq/irqdesc.c
@@ -810,57 +810,6 @@ unlock:
}
EXPORT_SYMBOL_GPL(__irq_alloc_descs);
-#ifdef CONFIG_GENERIC_IRQ_LEGACY_ALLOC_HWIRQ
-/**
- * irq_alloc_hwirqs - Allocate an irq descriptor and initialize the hardware
- * @cnt: number of interrupts to allocate
- * @node: node on which to allocate
- *
- * Returns an interrupt number > 0 or 0, if the allocation fails.
- */
-unsigned int irq_alloc_hwirqs(int cnt, int node)
-{
- int i, irq = __irq_alloc_descs(-1, 0, cnt, node, NULL, NULL);
-
- if (irq < 0)
- return 0;
-
- for (i = irq; cnt > 0; i++, cnt--) {
- if (arch_setup_hwirq(i, node))
- goto err;
- irq_clear_status_flags(i, _IRQ_NOREQUEST);
- }
- return irq;
-
-err:
- for (i--; i >= irq; i--) {
- irq_set_status_flags(i, _IRQ_NOREQUEST | _IRQ_NOPROBE);
- arch_teardown_hwirq(i);
- }
- irq_free_descs(irq, cnt);
- return 0;
-}
-EXPORT_SYMBOL_GPL(irq_alloc_hwirqs);
-
-/**
- * irq_free_hwirqs - Free irq descriptor and cleanup the hardware
- * @from: Free from irq number
- * @cnt: number of interrupts to free
- *
- */
-void irq_free_hwirqs(unsigned int from, int cnt)
-{
- int i, j;
-
- for (i = from, j = cnt; j > 0; i++, j--) {
- irq_set_status_flags(i, _IRQ_NOREQUEST | _IRQ_NOPROBE);
- arch_teardown_hwirq(i);
- }
- irq_free_descs(from, cnt);
-}
-EXPORT_SYMBOL_GPL(irq_free_hwirqs);
-#endif
-
/**
* irq_get_next_irq - get next allocated irq number
* @offset: where to start the search
diff --git a/kernel/irq/irqdomain.c b/kernel/irq/irqdomain.c
index a21acac9a71a..6aacd342cd14 100644
--- a/kernel/irq/irqdomain.c
+++ b/kernel/irq/irqdomain.c
@@ -360,16 +360,27 @@ struct irq_domain *irq_domain_add_legacy(struct device_node *of_node,
const struct irq_domain_ops *ops,
void *host_data)
{
+ return irq_domain_create_legacy(of_node_to_fwnode(of_node), size,
+ first_irq, first_hwirq, ops, host_data);
+}
+EXPORT_SYMBOL_GPL(irq_domain_add_legacy);
+
+struct irq_domain *irq_domain_create_legacy(struct fwnode_handle *fwnode,
+ unsigned int size,
+ unsigned int first_irq,
+ irq_hw_number_t first_hwirq,
+ const struct irq_domain_ops *ops,
+ void *host_data)
+{
struct irq_domain *domain;
- domain = __irq_domain_add(of_node_to_fwnode(of_node), first_hwirq + size,
- first_hwirq + size, 0, ops, host_data);
+ domain = __irq_domain_add(fwnode, first_hwirq + size, first_hwirq + size, 0, ops, host_data);
if (domain)
irq_domain_associate_many(domain, first_irq, first_hwirq, size);
return domain;
}
-EXPORT_SYMBOL_GPL(irq_domain_add_legacy);
+EXPORT_SYMBOL_GPL(irq_domain_create_legacy);
/**
* irq_find_matching_fwspec() - Locates a domain for a given fwspec
@@ -494,7 +505,7 @@ static void irq_domain_set_mapping(struct irq_domain *domain,
}
}
-void irq_domain_disassociate(struct irq_domain *domain, unsigned int irq)
+static void irq_domain_disassociate(struct irq_domain *domain, unsigned int irq)
{
struct irq_data *irq_data = irq_get_irq_data(irq);
irq_hw_number_t hwirq;
@@ -749,7 +760,7 @@ static void of_phandle_args_to_fwspec(struct device_node *np, const u32 *args,
{
int i;
- fwspec->fwnode = np ? &np->fwnode : NULL;
+ fwspec->fwnode = of_node_to_fwnode(np);
fwspec->param_count = count;
for (i = 0; i < count; i++)
@@ -1382,8 +1393,15 @@ static void irq_domain_free_irqs_hierarchy(struct irq_domain *domain,
unsigned int irq_base,
unsigned int nr_irqs)
{
- if (domain->ops->free)
- domain->ops->free(domain, irq_base, nr_irqs);
+ unsigned int i;
+
+ if (!domain->ops->free)
+ return;
+
+ for (i = 0; i < nr_irqs; i++) {
+ if (irq_domain_get_irq_data(domain, irq_base + i))
+ domain->ops->free(domain, irq_base + i, 1);
+ }
}
int irq_domain_alloc_irqs_hierarchy(struct irq_domain *domain,
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index c460e0496006..c826ba4141fe 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -371,6 +371,76 @@ int irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask,
return ret;
}
+/**
+ * irq_update_affinity_desc - Update affinity management for an interrupt
+ * @irq: The interrupt number to update
+ * @affinity: Pointer to the affinity descriptor
+ *
+ * This interface can be used to configure the affinity management of
+ * interrupts which have been allocated already.
+ *
+ * There are certain limitations on when it may be used - attempts to use it
+ * for when the kernel is configured for generic IRQ reservation mode (in
+ * config GENERIC_IRQ_RESERVATION_MODE) will fail, as it may conflict with
+ * managed/non-managed interrupt accounting. In addition, attempts to use it on
+ * an interrupt which is already started or which has already been configured
+ * as managed will also fail, as these mean invalid init state or double init.
+ */
+int irq_update_affinity_desc(unsigned int irq,
+ struct irq_affinity_desc *affinity)
+{
+ struct irq_desc *desc;
+ unsigned long flags;
+ bool activated;
+ int ret = 0;
+
+ /*
+ * Supporting this with the reservation scheme used by x86 needs
+ * some more thought. Fail it for now.
+ */
+ if (IS_ENABLED(CONFIG_GENERIC_IRQ_RESERVATION_MODE))
+ return -EOPNOTSUPP;
+
+ desc = irq_get_desc_buslock(irq, &flags, 0);
+ if (!desc)
+ return -EINVAL;
+
+ /* Requires the interrupt to be shut down */
+ if (irqd_is_started(&desc->irq_data)) {
+ ret = -EBUSY;
+ goto out_unlock;
+ }
+
+ /* Interrupts which are already managed cannot be modified */
+ if (irqd_affinity_is_managed(&desc->irq_data)) {
+ ret = -EBUSY;
+ goto out_unlock;
+ }
+
+ /*
+ * Deactivate the interrupt. That's required to undo
+ * anything an earlier activation has established.
+ */
+ activated = irqd_is_activated(&desc->irq_data);
+ if (activated)
+ irq_domain_deactivate_irq(&desc->irq_data);
+
+ if (affinity->is_managed) {
+ irqd_set(&desc->irq_data, IRQD_AFFINITY_MANAGED);
+ irqd_set(&desc->irq_data, IRQD_MANAGED_SHUTDOWN);
+ }
+
+ cpumask_copy(desc->irq_common_data.affinity, &affinity->mask);
+
+ /* Restore the activation state */
+ if (activated)
+ irq_domain_activate_irq(&desc->irq_data, false);
+
+out_unlock:
+ irq_put_desc_busunlock(desc, flags);
+ return ret;
+}
+
int __irq_set_affinity(unsigned int irq, const struct cpumask *mask, bool force)
{
struct irq_desc *desc = irq_to_desc(irq);