summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2015-11-03 14:40:01 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2015-11-03 14:40:01 -0800
commit6aa2fdb87cf01d7746955c600cbac352dc04d451 (patch)
tree75ba04b2579fafb103dfa049289e7e6b7b3d5bb9 /kernel
parent7b2a4306f9e7d64bb408a6df3bb419500578068a (diff)
parentd9e4ad5badf4ccbfddee208c898fb8fd0c8836b1 (diff)
Merge branch 'irq-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull irq updates from Thomas Gleixner: "The irq departement delivers: - Rework the irqdomain core infrastructure to accomodate ACPI based systems. This is required to support ARM64 without creating artificial device tree nodes. - Sanitize the ACPI based ARM GIC initialization by making use of the new firmware independent irqdomain core - Further improvements to the generic MSI management - Generalize the irq migration on CPU hotplug - Improvements to the threaded interrupt infrastructure - Allow the migration of "chained" low level interrupt handlers - Allow optional force masking of interrupts in disable_irq[_nosysnc] - Support for two new interrupt chips - Sigh! - A larger set of errata fixes for ARM gicv3 - The usual pile of fixes, updates, improvements and cleanups all over the place" * 'irq-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (71 commits) Document that IRQ_NONE should be returned when IRQ not actually handled PCI/MSI: Allow the MSI domain to be device-specific PCI: Add per-device MSI domain hook of/irq: Use the msi-map property to provide device-specific MSI domain of/irq: Split of_msi_map_rid to reuse msi-map lookup irqchip/gic-v3-its: Parse new version of msi-parent property PCI/MSI: Use of_msi_get_domain instead of open-coded "msi-parent" parsing of/irq: Use of_msi_get_domain instead of open-coded "msi-parent" parsing of/irq: Add support code for multi-parent version of "msi-parent" irqchip/gic-v3-its: Add handling of PCI requester id. PCI/MSI: Add helper function pci_msi_domain_get_msi_rid(). of/irq: Add new function of_msi_map_rid() Docs: dt: Add PCI MSI map bindings irqchip/gic-v2m: Add support for multiple MSI frames irqchip/gic-v3: Fix translation of LPIs after conversion to irq_fwspec irqchip/mxs: Add Alphascale ASM9260 support irqchip/mxs: Prepare driver for hardware with different offsets irqchip/mxs: Panic if ioremap or domain creation fails irqdomain: Documentation updates irqdomain/msi: Use fwnode instead of of_node ...
Diffstat (limited to 'kernel')
-rw-r--r--kernel/irq/Kconfig4
-rw-r--r--kernel/irq/Makefile1
-rw-r--r--kernel/irq/chip.c28
-rw-r--r--kernel/irq/cpuhotplug.c82
-rw-r--r--kernel/irq/handle.c7
-rw-r--r--kernel/irq/internals.h4
-rw-r--r--kernel/irq/irqdomain.c177
-rw-r--r--kernel/irq/manage.c221
-rw-r--r--kernel/irq/msi.c8
-rw-r--r--kernel/irq/proc.c2
-rw-r--r--kernel/irq/settings.h12
11 files changed, 426 insertions, 120 deletions
diff --git a/kernel/irq/Kconfig b/kernel/irq/Kconfig
index 9a76e3beda54..3b48dab80164 100644
--- a/kernel/irq/Kconfig
+++ b/kernel/irq/Kconfig
@@ -30,6 +30,10 @@ config GENERIC_IRQ_LEGACY_ALLOC_HWIRQ
config GENERIC_PENDING_IRQ
bool
+# Support for generic irq migrating off cpu before the cpu is offline.
+config GENERIC_IRQ_MIGRATION
+ bool
+
# Alpha specific irq affinity mechanism
config AUTO_IRQ_AFFINITY
bool
diff --git a/kernel/irq/Makefile b/kernel/irq/Makefile
index d12123526e2b..2fc9cbdf35b6 100644
--- a/kernel/irq/Makefile
+++ b/kernel/irq/Makefile
@@ -5,5 +5,6 @@ obj-$(CONFIG_GENERIC_IRQ_PROBE) += autoprobe.o
obj-$(CONFIG_IRQ_DOMAIN) += irqdomain.o
obj-$(CONFIG_PROC_FS) += proc.o
obj-$(CONFIG_GENERIC_PENDING_IRQ) += migration.o
+obj-$(CONFIG_GENERIC_IRQ_MIGRATION) += cpuhotplug.o
obj-$(CONFIG_PM_SLEEP) += pm.o
obj-$(CONFIG_GENERIC_MSI_IRQ) += msi.o
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c
index e28169dd1c36..15206453b12a 100644
--- a/kernel/irq/chip.c
+++ b/kernel/irq/chip.c
@@ -21,6 +21,20 @@
#include "internals.h"
+static irqreturn_t bad_chained_irq(int irq, void *dev_id)
+{
+ WARN_ONCE(1, "Chained irq %d should not call an action\n", irq);
+ return IRQ_NONE;
+}
+
+/*
+ * Chained handlers should never call action on their IRQ. This default
+ * action will emit warning if such thing happens.
+ */
+struct irqaction chained_action = {
+ .handler = bad_chained_irq,
+};
+
/**
* irq_set_chip - set the irq chip for an irq
* @irq: irq number
@@ -227,6 +241,13 @@ void irq_enable(struct irq_desc *desc)
* disabled. If an interrupt happens, then the interrupt flow
* handler masks the line at the hardware level and marks it
* pending.
+ *
+ * If the interrupt chip does not implement the irq_disable callback,
+ * a driver can disable the lazy approach for a particular irq line by
+ * calling 'irq_set_status_flags(irq, IRQ_DISABLE_UNLAZY)'. This can
+ * be used for devices which cannot disable the interrupt at the
+ * device level under certain circumstances and have to use
+ * disable_irq[_nosync] instead.
*/
void irq_disable(struct irq_desc *desc)
{
@@ -234,6 +255,8 @@ void irq_disable(struct irq_desc *desc)
if (desc->irq_data.chip->irq_disable) {
desc->irq_data.chip->irq_disable(&desc->irq_data);
irq_state_set_masked(desc);
+ } else if (irq_settings_disable_unlazy(desc)) {
+ mask_irq(desc);
}
}
@@ -669,7 +692,7 @@ void handle_percpu_irq(struct irq_desc *desc)
if (chip->irq_ack)
chip->irq_ack(&desc->irq_data);
- handle_irq_event_percpu(desc, desc->action);
+ handle_irq_event_percpu(desc);
if (chip->irq_eoi)
chip->irq_eoi(&desc->irq_data);
@@ -746,6 +769,8 @@ __irq_do_set_handler(struct irq_desc *desc, irq_flow_handler_t handle,
if (desc->irq_data.chip != &no_irq_chip)
mask_ack_irq(desc);
irq_state_set_disabled(desc);
+ if (is_chained)
+ desc->action = NULL;
desc->depth = 1;
}
desc->handle_irq = handle;
@@ -755,6 +780,7 @@ __irq_do_set_handler(struct irq_desc *desc, irq_flow_handler_t handle,
irq_settings_set_noprobe(desc);
irq_settings_set_norequest(desc);
irq_settings_set_nothread(desc);
+ desc->action = &chained_action;
irq_startup(desc, true);
}
}
diff --git a/kernel/irq/cpuhotplug.c b/kernel/irq/cpuhotplug.c
new file mode 100644
index 000000000000..80f4f4e56fed
--- /dev/null
+++ b/kernel/irq/cpuhotplug.c
@@ -0,0 +1,82 @@
+/*
+ * Generic cpu hotunplug interrupt migration code copied from the
+ * arch/arm implementation
+ *
+ * Copyright (C) Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/interrupt.h>
+#include <linux/ratelimit.h>
+#include <linux/irq.h>
+
+#include "internals.h"
+
+static bool migrate_one_irq(struct irq_desc *desc)
+{
+ struct irq_data *d = irq_desc_get_irq_data(desc);
+ const struct cpumask *affinity = d->common->affinity;
+ struct irq_chip *c;
+ bool ret = false;
+
+ /*
+ * If this is a per-CPU interrupt, or the affinity does not
+ * include this CPU, then we have nothing to do.
+ */
+ if (irqd_is_per_cpu(d) ||
+ !cpumask_test_cpu(smp_processor_id(), affinity))
+ return false;
+
+ if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) {
+ affinity = cpu_online_mask;
+ ret = true;
+ }
+
+ c = irq_data_get_irq_chip(d);
+ if (!c->irq_set_affinity) {
+ pr_warn_ratelimited("IRQ%u: unable to set affinity\n", d->irq);
+ } else {
+ int r = irq_do_set_affinity(d, affinity, false);
+ if (r)
+ pr_warn_ratelimited("IRQ%u: set affinity failed(%d).\n",
+ d->irq, r);
+ }
+
+ return ret;
+}
+
+/**
+ * irq_migrate_all_off_this_cpu - Migrate irqs away from offline cpu
+ *
+ * The current CPU has been marked offline. Migrate IRQs off this CPU.
+ * If the affinity settings do not allow other CPUs, force them onto any
+ * available CPU.
+ *
+ * Note: we must iterate over all IRQs, whether they have an attached
+ * action structure or not, as we need to get chained interrupts too.
+ */
+void irq_migrate_all_off_this_cpu(void)
+{
+ unsigned int irq;
+ struct irq_desc *desc;
+ unsigned long flags;
+
+ local_irq_save(flags);
+
+ for_each_active_irq(irq) {
+ bool affinity_broken;
+
+ desc = irq_to_desc(irq);
+ raw_spin_lock(&desc->lock);
+ affinity_broken = migrate_one_irq(desc);
+ raw_spin_unlock(&desc->lock);
+
+ if (affinity_broken)
+ pr_warn_ratelimited("IRQ%u no longer affine to CPU%u\n",
+ irq, smp_processor_id());
+ }
+
+ local_irq_restore(flags);
+}
diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c
index e25a83b67cce..a302cf9a2126 100644
--- a/kernel/irq/handle.c
+++ b/kernel/irq/handle.c
@@ -132,11 +132,11 @@ void __irq_wake_thread(struct irq_desc *desc, struct irqaction *action)
wake_up_process(action->thread);
}
-irqreturn_t
-handle_irq_event_percpu(struct irq_desc *desc, struct irqaction *action)
+irqreturn_t handle_irq_event_percpu(struct irq_desc *desc)
{
irqreturn_t retval = IRQ_NONE;
unsigned int flags = 0, irq = desc->irq_data.irq;
+ struct irqaction *action = desc->action;
do {
irqreturn_t res;
@@ -184,14 +184,13 @@ handle_irq_event_percpu(struct irq_desc *desc, struct irqaction *action)
irqreturn_t handle_irq_event(struct irq_desc *desc)
{
- struct irqaction *action = desc->action;
irqreturn_t ret;
desc->istate &= ~IRQS_PENDING;
irqd_set(&desc->irq_data, IRQD_IRQ_INPROGRESS);
raw_spin_unlock(&desc->lock);
- ret = handle_irq_event_percpu(desc, action);
+ ret = handle_irq_event_percpu(desc);
raw_spin_lock(&desc->lock);
irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS);
diff --git a/kernel/irq/internals.h b/kernel/irq/internals.h
index 5ef0c2dbe930..05c2188271b8 100644
--- a/kernel/irq/internals.h
+++ b/kernel/irq/internals.h
@@ -18,6 +18,8 @@
extern bool noirqdebug;
+extern struct irqaction chained_action;
+
/*
* Bits used by threaded handlers:
* IRQTF_RUNTHREAD - signals that the interrupt handler thread should run
@@ -81,7 +83,7 @@ extern void irq_mark_irq(unsigned int irq);
extern void init_kstat_irqs(struct irq_desc *desc, int node, int nr);
-irqreturn_t handle_irq_event_percpu(struct irq_desc *desc, struct irqaction *action);
+irqreturn_t handle_irq_event_percpu(struct irq_desc *desc);
irqreturn_t handle_irq_event(struct irq_desc *desc);
/* Resending of interrupts :*/
diff --git a/kernel/irq/irqdomain.c b/kernel/irq/irqdomain.c
index dc9d27c0c158..22aa9612ef7c 100644
--- a/kernel/irq/irqdomain.c
+++ b/kernel/irq/irqdomain.c
@@ -27,6 +27,57 @@ static int irq_domain_alloc_descs(int virq, unsigned int nr_irqs,
irq_hw_number_t hwirq, int node);
static void irq_domain_check_hierarchy(struct irq_domain *domain);
+struct irqchip_fwid {
+ struct fwnode_handle fwnode;
+ char *name;
+ void *data;
+};
+
+/**
+ * irq_domain_alloc_fwnode - Allocate a fwnode_handle suitable for
+ * identifying an irq domain
+ * @data: optional user-provided data
+ *
+ * Allocate a struct device_node, and return a poiner to the embedded
+ * fwnode_handle (or NULL on failure).
+ */
+struct fwnode_handle *irq_domain_alloc_fwnode(void *data)
+{
+ struct irqchip_fwid *fwid;
+ char *name;
+
+ fwid = kzalloc(sizeof(*fwid), GFP_KERNEL);
+ name = kasprintf(GFP_KERNEL, "irqchip@%p", data);
+
+ if (!fwid || !name) {
+ kfree(fwid);
+ kfree(name);
+ return NULL;
+ }
+
+ fwid->name = name;
+ fwid->data = data;
+ fwid->fwnode.type = FWNODE_IRQCHIP;
+ return &fwid->fwnode;
+}
+
+/**
+ * irq_domain_free_fwnode - Free a non-OF-backed fwnode_handle
+ *
+ * Free a fwnode_handle allocated with irq_domain_alloc_fwnode.
+ */
+void irq_domain_free_fwnode(struct fwnode_handle *fwnode)
+{
+ struct irqchip_fwid *fwid;
+
+ if (WARN_ON(fwnode->type != FWNODE_IRQCHIP))
+ return;
+
+ fwid = container_of(fwnode, struct irqchip_fwid, fwnode);
+ kfree(fwid->name);
+ kfree(fwid);
+}
+
/**
* __irq_domain_add() - Allocate a new irq_domain data structure
* @of_node: optional device-tree node of the interrupt controller
@@ -40,23 +91,28 @@ static void irq_domain_check_hierarchy(struct irq_domain *domain);
* Allocates and initialize and irq_domain structure.
* Returns pointer to IRQ domain, or NULL on failure.
*/
-struct irq_domain *__irq_domain_add(struct device_node *of_node, int size,
+struct irq_domain *__irq_domain_add(struct fwnode_handle *fwnode, int size,
irq_hw_number_t hwirq_max, int direct_max,
const struct irq_domain_ops *ops,
void *host_data)
{
struct irq_domain *domain;
+ struct device_node *of_node;
+
+ of_node = to_of_node(fwnode);
domain = kzalloc_node(sizeof(*domain) + (sizeof(unsigned int) * size),
GFP_KERNEL, of_node_to_nid(of_node));
if (WARN_ON(!domain))
return NULL;
+ of_node_get(of_node);
+
/* Fill structure */
INIT_RADIX_TREE(&domain->revmap_tree, GFP_KERNEL);
domain->ops = ops;
domain->host_data = host_data;
- domain->of_node = of_node_get(of_node);
+ domain->fwnode = fwnode;
domain->hwirq_max = hwirq_max;
domain->revmap_size = size;
domain->revmap_direct_max_irq = direct_max;
@@ -102,7 +158,7 @@ void irq_domain_remove(struct irq_domain *domain)
pr_debug("Removed domain %s\n", domain->name);
- of_node_put(domain->of_node);
+ of_node_put(irq_domain_get_of_node(domain));
kfree(domain);
}
EXPORT_SYMBOL_GPL(irq_domain_remove);
@@ -133,7 +189,7 @@ struct irq_domain *irq_domain_add_simple(struct device_node *of_node,
{
struct irq_domain *domain;
- domain = __irq_domain_add(of_node, size, size, 0, ops, host_data);
+ domain = __irq_domain_add(of_node_to_fwnode(of_node), size, size, 0, ops, host_data);
if (!domain)
return NULL;
@@ -177,7 +233,7 @@ struct irq_domain *irq_domain_add_legacy(struct device_node *of_node,
{
struct irq_domain *domain;
- domain = __irq_domain_add(of_node, first_hwirq + size,
+ domain = __irq_domain_add(of_node_to_fwnode(of_node), first_hwirq + size,
first_hwirq + size, 0, ops, host_data);
if (domain)
irq_domain_associate_many(domain, first_irq, first_hwirq, size);
@@ -187,12 +243,12 @@ struct irq_domain *irq_domain_add_legacy(struct device_node *of_node,
EXPORT_SYMBOL_GPL(irq_domain_add_legacy);
/**
- * irq_find_matching_host() - Locates a domain for a given device node
- * @node: device-tree node of the interrupt controller
+ * irq_find_matching_fwnode() - Locates a domain for a given fwnode
+ * @fwnode: FW descriptor of the interrupt controller
* @bus_token: domain-specific data
*/
-struct irq_domain *irq_find_matching_host(struct device_node *node,
- enum irq_domain_bus_token bus_token)
+struct irq_domain *irq_find_matching_fwnode(struct fwnode_handle *fwnode,
+ enum irq_domain_bus_token bus_token)
{
struct irq_domain *h, *found = NULL;
int rc;
@@ -209,9 +265,9 @@ struct irq_domain *irq_find_matching_host(struct device_node *node,
mutex_lock(&irq_domain_mutex);
list_for_each_entry(h, &irq_domain_list, link) {
if (h->ops->match)
- rc = h->ops->match(h, node, bus_token);
+ rc = h->ops->match(h, to_of_node(fwnode), bus_token);
else
- rc = ((h->of_node != NULL) && (h->of_node == node) &&
+ rc = ((fwnode != NULL) && (h->fwnode == fwnode) &&
((bus_token == DOMAIN_BUS_ANY) ||
(h->bus_token == bus_token)));
@@ -223,7 +279,7 @@ struct irq_domain *irq_find_matching_host(struct device_node *node,
mutex_unlock(&irq_domain_mutex);
return found;
}
-EXPORT_SYMBOL_GPL(irq_find_matching_host);
+EXPORT_SYMBOL_GPL(irq_find_matching_fwnode);
/**
* irq_set_default_host() - Set a "default" irq domain
@@ -336,10 +392,12 @@ EXPORT_SYMBOL_GPL(irq_domain_associate);
void irq_domain_associate_many(struct irq_domain *domain, unsigned int irq_base,
irq_hw_number_t hwirq_base, int count)
{
+ struct device_node *of_node;
int i;
+ of_node = irq_domain_get_of_node(domain);
pr_debug("%s(%s, irqbase=%i, hwbase=%i, count=%i)\n", __func__,
- of_node_full_name(domain->of_node), irq_base, (int)hwirq_base, count);
+ of_node_full_name(of_node), irq_base, (int)hwirq_base, count);
for (i = 0; i < count; i++) {
irq_domain_associate(domain, irq_base + i, hwirq_base + i);
@@ -359,12 +417,14 @@ EXPORT_SYMBOL_GPL(irq_domain_associate_many);
*/
unsigned int irq_create_direct_mapping(struct irq_domain *domain)
{
+ struct device_node *of_node;
unsigned int virq;
if (domain == NULL)
domain = irq_default_domain;
- virq = irq_alloc_desc_from(1, of_node_to_nid(domain->of_node));
+ of_node = irq_domain_get_of_node(domain);
+ virq = irq_alloc_desc_from(1, of_node_to_nid(of_node));
if (!virq) {
pr_debug("create_direct virq allocation failed\n");
return 0;
@@ -399,6 +459,7 @@ EXPORT_SYMBOL_GPL(irq_create_direct_mapping);
unsigned int irq_create_mapping(struct irq_domain *domain,
irq_hw_number_t hwirq)
{
+ struct device_node *of_node;
int virq;
pr_debug("irq_create_mapping(0x%p, 0x%lx)\n", domain, hwirq);
@@ -412,6 +473,8 @@ unsigned int irq_create_mapping(struct irq_domain *domain,
}
pr_debug("-> using domain @%p\n", domain);
+ of_node = irq_domain_get_of_node(domain);
+
/* Check if mapping already exists */
virq = irq_find_mapping(domain, hwirq);
if (virq) {
@@ -420,8 +483,7 @@ unsigned int irq_create_mapping(struct irq_domain *domain,
}
/* Allocate a virtual interrupt number */
- virq = irq_domain_alloc_descs(-1, 1, hwirq,
- of_node_to_nid(domain->of_node));
+ virq = irq_domain_alloc_descs(-1, 1, hwirq, of_node_to_nid(of_node));
if (virq <= 0) {
pr_debug("-> virq allocation failed\n");
return 0;
@@ -433,7 +495,7 @@ unsigned int irq_create_mapping(struct irq_domain *domain,
}
pr_debug("irq %lu on domain %s mapped to virtual irq %u\n",
- hwirq, of_node_full_name(domain->of_node), virq);
+ hwirq, of_node_full_name(of_node), virq);
return virq;
}
@@ -460,10 +522,12 @@ EXPORT_SYMBOL_GPL(irq_create_mapping);
int irq_create_strict_mappings(struct irq_domain *domain, unsigned int irq_base,
irq_hw_number_t hwirq_base, int count)
{
+ struct device_node *of_node;
int ret;
+ of_node = irq_domain_get_of_node(domain);
ret = irq_alloc_descs(irq_base, irq_base, count,
- of_node_to_nid(domain->of_node));
+ of_node_to_nid(of_node));
if (unlikely(ret < 0))
return ret;
@@ -472,28 +536,56 @@ int irq_create_strict_mappings(struct irq_domain *domain, unsigned int irq_base,
}
EXPORT_SYMBOL_GPL(irq_create_strict_mappings);
-unsigned int irq_create_of_mapping(struct of_phandle_args *irq_data)
+static int irq_domain_translate(struct irq_domain *d,
+ struct irq_fwspec *fwspec,
+ irq_hw_number_t *hwirq, unsigned int *type)
+{
+#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
+ if (d->ops->translate)
+ return d->ops->translate(d, fwspec, hwirq, type);
+#endif
+ if (d->ops->xlate)
+ return d->ops->xlate(d, to_of_node(fwspec->fwnode),
+ fwspec->param, fwspec->param_count,
+ hwirq, type);
+
+ /* If domain has no translation, then we assume interrupt line */
+ *hwirq = fwspec->param[0];
+ return 0;
+}
+
+static void of_phandle_args_to_fwspec(struct of_phandle_args *irq_data,
+ struct irq_fwspec *fwspec)
+{
+ int i;
+
+ fwspec->fwnode = irq_data->np ? &irq_data->np->fwnode : NULL;
+ fwspec->param_count = irq_data->args_count;
+
+ for (i = 0; i < irq_data->args_count; i++)
+ fwspec->param[i] = irq_data->args[i];
+}
+
+unsigned int irq_create_fwspec_mapping(struct irq_fwspec *fwspec)
{
struct irq_domain *domain;
irq_hw_number_t hwirq;
unsigned int type = IRQ_TYPE_NONE;
int virq;
- domain = irq_data->np ? irq_find_host(irq_data->np) : irq_default_domain;
+ if (fwspec->fwnode)
+ domain = irq_find_matching_fwnode(fwspec->fwnode, DOMAIN_BUS_ANY);
+ else
+ domain = irq_default_domain;
+
if (!domain) {
pr_warn("no irq domain found for %s !\n",
- of_node_full_name(irq_data->np));
+ of_node_full_name(to_of_node(fwspec->fwnode)));
return 0;
}
- /* If domain has no translation, then we assume interrupt line */
- if (domain->ops->xlate == NULL)
- hwirq = irq_data->args[0];
- else {
- if (domain->ops->xlate(domain, irq_data->np, irq_data->args,
- irq_data->args_count, &hwirq, &type))
- return 0;
- }
+ if (irq_domain_translate(domain, fwspec, &hwirq, &type))
+ return 0;
if (irq_domain_is_hierarchy(domain)) {
/*
@@ -504,7 +596,7 @@ unsigned int irq_create_of_mapping(struct of_phandle_args *irq_data)
if (virq)
return virq;
- virq = irq_domain_alloc_irqs(domain, 1, NUMA_NO_NODE, irq_data);
+ virq = irq_domain_alloc_irqs(domain, 1, NUMA_NO_NODE, fwspec);
if (virq <= 0)
return 0;
} else {
@@ -520,6 +612,15 @@ unsigned int irq_create_of_mapping(struct of_phandle_args *irq_data)
irq_set_irq_type(virq, type);
return virq;
}
+EXPORT_SYMBOL_GPL(irq_create_fwspec_mapping);
+
+unsigned int irq_create_of_mapping(struct of_phandle_args *irq_data)
+{
+ struct irq_fwspec fwspec;
+
+ of_phandle_args_to_fwspec(irq_data, &fwspec);
+ return irq_create_fwspec_mapping(&fwspec);
+}
EXPORT_SYMBOL_GPL(irq_create_of_mapping);
/**
@@ -590,14 +691,16 @@ static int virq_debug_show(struct seq_file *m, void *private)
"name", "mapped", "linear-max", "direct-max", "devtree-node");
mutex_lock(&irq_domain_mutex);
list_for_each_entry(domain, &irq_domain_list, link) {
+ struct device_node *of_node;
int count = 0;
+ of_node = irq_domain_get_of_node(domain);
radix_tree_for_each_slot(slot, &domain->revmap_tree, &iter, 0)
count++;
seq_printf(m, "%c%-16s %6u %10u %10u %s\n",
domain == irq_default_domain ? '*' : ' ', domain->name,
domain->revmap_size + count, domain->revmap_size,
domain->revmap_direct_max_irq,
- domain->of_node ? of_node_full_name(domain->of_node) : "");
+ of_node ? of_node_full_name(of_node) : "");
}
mutex_unlock(&irq_domain_mutex);
@@ -751,11 +854,11 @@ static int irq_domain_alloc_descs(int virq, unsigned int cnt,
#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
/**
- * irq_domain_add_hierarchy - Add a irqdomain into the hierarchy
+ * irq_domain_create_hierarchy - Add a irqdomain into the hierarchy
* @parent: Parent irq domain to associate with the new domain
* @flags: Irq domain flags associated to the domain
* @size: Size of the domain. See below
- * @node: Optional device-tree node of the interrupt controller
+ * @fwnode: Optional fwnode of the interrupt controller
* @ops: Pointer to the interrupt domain callbacks
* @host_data: Controller private data pointer
*
@@ -765,19 +868,19 @@ static int irq_domain_alloc_descs(int virq, unsigned int cnt,
* domain flags are set.
* Returns pointer to IRQ domain, or NULL on failure.
*/
-struct irq_domain *irq_domain_add_hierarchy(struct irq_domain *parent,
+struct irq_domain *irq_domain_create_hierarchy(struct irq_domain *parent,
unsigned int flags,
unsigned int size,
- struct device_node *node,
+ struct fwnode_handle *fwnode,
const struct irq_domain_ops *ops,
void *host_data)
{
struct irq_domain *domain;
if (size)
- domain = irq_domain_add_linear(node, size, ops, host_data);
+ domain = irq_domain_create_linear(fwnode, size, ops, host_data);
else
- domain = irq_domain_add_tree(node, ops, host_data);
+ domain = irq_domain_create_tree(fwnode, ops, host_data);
if (domain) {
domain->parent = parent;
domain->flags |= flags;
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index f9a59f6cabd2..a71175ff98d5 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -258,37 +258,6 @@ int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m)
}
EXPORT_SYMBOL_GPL(irq_set_affinity_hint);
-/**
- * irq_set_vcpu_affinity - Set vcpu affinity for the interrupt
- * @irq: interrupt number to set affinity
- * @vcpu_info: vCPU specific data
- *
- * This function uses the vCPU specific data to set the vCPU
- * affinity for an irq. The vCPU specific data is passed from
- * outside, such as KVM. One example code path is as below:
- * KVM -> IOMMU -> irq_set_vcpu_affinity().
- */
-int irq_set_vcpu_affinity(unsigned int irq, void *vcpu_info)
-{
- unsigned long flags;
- struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
- struct irq_data *data;
- struct irq_chip *chip;
- int ret = -ENOSYS;
-
- if (!desc)
- return -EINVAL;
-
- data = irq_desc_get_irq_data(desc);
- chip = irq_data_get_irq_chip(data);
- if (chip && chip->irq_set_vcpu_affinity)
- ret = chip->irq_set_vcpu_affinity(data, vcpu_info);
- irq_put_desc_unlock(desc, flags);
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(irq_set_vcpu_affinity);
-
static void irq_affinity_notify(struct work_struct *work)
{
struct irq_affinity_notify *notify =
@@ -424,6 +393,37 @@ setup_affinity(struct irq_desc *desc, struct cpumask *mask)
}
#endif
+/**
+ * irq_set_vcpu_affinity - Set vcpu affinity for the interrupt
+ * @irq: interrupt number to set affinity
+ * @vcpu_info: vCPU specific data
+ *
+ * This function uses the vCPU specific data to set the vCPU
+ * affinity for an irq. The vCPU specific data is passed from
+ * outside, such as KVM. One example code path is as below:
+ * KVM -> IOMMU -> irq_set_vcpu_affinity().
+ */
+int irq_set_vcpu_affinity(unsigned int irq, void *vcpu_info)
+{
+ unsigned long flags;
+ struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
+ struct irq_data *data;
+ struct irq_chip *chip;
+ int ret = -ENOSYS;
+
+ if (!desc)
+ return -EINVAL;
+
+ data = irq_desc_get_irq_data(desc);
+ chip = irq_data_get_irq_chip(data);
+ if (chip && chip->irq_set_vcpu_affinity)
+ ret = chip->irq_set_vcpu_affinity(data, vcpu_info);
+ irq_put_desc_unlock(desc, flags);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(irq_set_vcpu_affinity);
+
void __disable_irq(struct irq_desc *desc)
{
if (!desc->depth++)
@@ -730,6 +730,12 @@ static irqreturn_t irq_nested_primary_handler(int irq, void *dev_id)
return IRQ_NONE;
}
+static irqreturn_t irq_forced_secondary_handler(int irq, void *dev_id)
+{
+ WARN(1, "Secondary action handler called for irq %d\n", irq);
+ return IRQ_NONE;
+}
+
static int irq_wait_for_interrupt(struct irqaction *action)
{
set_current_state(TASK_INTERRUPTIBLE);
@@ -756,7 +762,8 @@ static int irq_wait_for_interrupt(struct irqaction *action)
static void irq_finalize_oneshot(struct irq_desc *desc,
struct irqaction *action)
{
- if (!(desc->istate & IRQS_ONESHOT))
+ if (!(desc->istate & IRQS_ONESHOT) ||
+ action->handler == irq_forced_secondary_handler)
return;
again:
chip_bus_lock(desc);
@@ -910,6 +917,18 @@ static void irq_thread_dtor(struct callback_head *unused)
irq_finalize_oneshot(desc, action);
}
+static void irq_wake_secondary(struct irq_desc *desc, struct irqaction *action)
+{
+ struct irqaction *secondary = action->secondary;
+
+ if (WARN_ON_ONCE(!secondary))
+ return;
+
+ raw_spin_lock_irq(&desc->lock);
+ __irq_wake_thread(desc, secondary);
+ raw_spin_unlock_irq(&desc->lock);
+}
+
/*
* Interrupt handler thread
*/
@@ -940,6 +959,8 @@ static int irq_thread(void *data)
action_ret = handler_fn(desc, action);
if (action_ret == IRQ_HANDLED)
atomic_inc(&desc->threads_handled);
+ if (action_ret == IRQ_WAKE_THREAD)
+ irq_wake_secondary(desc, action);
wake_threads_waitq(desc);
}
@@ -984,20 +1005,36 @@ void irq_wake_thread(unsigned int irq, void *dev_id)
}
EXPORT_SYMBOL_GPL(irq_wake_thread);
-static void irq_setup_forced_threading(struct irqaction *new)
+static int irq_setup_forced_threading(struct irqaction *new)
{
if (!force_irqthreads)
- return;
+ return 0;
if (new->flags & (IRQF_NO_THREAD | IRQF_PERCPU | IRQF_ONESHOT))
- return;
+ return 0;
new->flags |= IRQF_ONESHOT;
- if (!new->thread_fn) {
- set_bit(IRQTF_FORCED_THREAD, &new->thread_flags);
- new->thread_fn = new->handler;
- new->handler = irq_default_primary_handler;
+ /*
+ * Handle the case where we have a real primary handler and a
+ * thread handler. We force thread them as well by creating a
+ * secondary action.
+ */
+ if (new->handler != irq_default_primary_handler && new->thread_fn) {
+ /* Allocate the secondary action */
+ new->secondary = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
+ if (!new->secondary)
+ return -ENOMEM;
+ new->secondary->handler = irq_forced_secondary_handler;
+ new->secondary->thread_fn = new->thread_fn;
+ new->secondary->dev_id = new->dev_id;
+ new->secondary->irq = new->irq;
+ new->secondary->name = new->name;
}
+ /* Deal with the primary handler */
+ set_bit(IRQTF_FORCED_THREAD, &new->thread_flags);
+ new->thread_fn = new->handler;
+ new->handler = irq_default_primary_handler;
+ return 0;
}
static int irq_request_resources(struct irq_desc *desc)
@@ -1017,6 +1054,48 @@ static void irq_release_resources(struct irq_desc *desc)
c->irq_release_resources(d);
}
+static int
+setup_irq_thread(struct irqaction *new, unsigned int irq, bool secondary)
+{
+ struct task_struct *t;
+ struct sched_param param = {
+ .sched_priority = MAX_USER_RT_PRIO/2,
+ };
+
+ if (!secondary) {
+ t = kthread_create(irq_thread, new, "irq/%d-%s", irq,
+ new->name);
+ } else {
+ t = kthread_create(irq_thread, new, "irq/%d-s-%s", irq,
+ new->name);
+ param.sched_priority -= 1;
+ }
+
+ if (IS_ERR(t))
+ return PTR_ERR(t);
+
+ sched_setscheduler_nocheck(t, SCHED_FIFO, &param);
+
+ /*
+ * We keep the reference to the task struct even if
+ * the thread dies to avoid that the interrupt code
+ * references an already freed task_struct.
+ */
+ get_task_struct(t);
+ new->thread = t;
+ /*
+ * Tell the thread to set its affinity. This is
+ * important for shared interrupt handlers as we do
+ * not invoke setup_affinity() for the secondary
+ * handlers as everything is already set up. Even for
+ * interrupts marked with IRQF_NO_BALANCE this is
+ * correct as we want the thread to move to the cpu(s)
+ * on which the requesting code placed the interrupt.
+ */
+ set_bit(IRQTF_AFFINITY, &new->thread_flags);
+ return 0;
+}
+
/*
* Internal function to register an irqaction - typically used to
* allocate special interrupts that are part of the architecture.
@@ -1037,6 +1116,8 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
if (!try_module_get(desc->owner))
return -ENODEV;
+ new->irq = irq;
+
/*
* Check whether the interrupt nests into another interrupt
* thread.
@@ -1054,8 +1135,11 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
*/
new->handler = irq_nested_primary_handler;
} else {
- if (irq_settings_can_thread(desc))
- irq_setup_forced_threading(new);
+ if (irq_settings_can_thread(desc)) {
+ ret = irq_setup_forced_threading(new);
+ if (ret)
+ goto out_mput;
+ }
}
/*
@@ -1064,37 +1148,14 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
* thread.
*/
if (new->thread_fn && !nested) {
- struct task_struct *t;
- static const struct sched_param param = {
- .sched_priority = MAX_USER_RT_PRIO/2,
- };
-
- t = kthread_create(irq_thread, new, "irq/%d-%s", irq,
- new->name);
- if (IS_ERR(t)) {
- ret = PTR_ERR(t);
+ ret = setup_irq_thread(new, irq, false);
+ if (ret)
goto out_mput;
+ if (new->secondary) {
+ ret = setup_irq_thread(new->secondary, irq, true);
+ if (ret)
+ goto out_thread;
}
-
- sched_setscheduler_nocheck(t, SCHED_FIFO, &param);
-
- /*
- * We keep the reference to the task struct even if
- * the thread dies to avoid that the interrupt code
- * references an already freed task_struct.
- */
- get_task_struct(t);
- new->thread = t;
- /*
- * Tell the thread to set its affinity. This is
- * important for shared interrupt handlers as we do
- * not invoke setup_affinity() for the secondary
- * handlers as everything is already set up. Even for
- * interrupts marked with IRQF_NO_BALANCE this is
- * correct as we want the thread to move to the cpu(s)
- * on which the requesting code placed the interrupt.
- */
- set_bit(IRQTF_AFFINITY, &new->thread_flags);
}
if (!alloc_cpumask_var(&mask, GFP_KERNEL)) {
@@ -1267,7 +1328,6 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
irq, nmsk, omsk);
}
- new->irq = irq;
*old_ptr = new;
irq_pm_install_action(desc, new);
@@ -1293,6 +1353,8 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
*/
if (new->thread)
wake_up_process(new->thread);
+ if (new->secondary)
+ wake_up_process(new->secondary->thread);
register_irq_proc(irq, desc);
new->dir = NULL;
@@ -1323,6 +1385,13 @@ out_thread:
kthread_stop(t);
put_task_struct(t);
}
+ if (new->secondary && new->secondary->thread) {
+ struct task_struct *t = new->secondary->thread;
+
+ new->secondary->thread = NULL;
+ kthread_stop(t);
+ put_task_struct(t);
+ }
out_mput:
module_put(desc->owner);
return ret;
@@ -1394,6 +1463,7 @@ static struct irqaction *__free_irq(unsigned int irq, void *dev_id)
/* If this was the last handler, shut down the IRQ line: */
if (!desc->action) {
+ irq_settings_clr_disable_unlazy(desc);
irq_shutdown(desc);
irq_release_resources(desc);
}
@@ -1430,9 +1500,14 @@ static struct irqaction *__free_irq(unsigned int irq, void *dev_id)
if (action->thread) {
kthread_stop(action->thread);
put_task_struct(action->thread);
+ if (action->secondary && action->secondary->thread) {
+ kthread_stop(action->secondary->thread);
+ put_task_struct(action->secondary->thread);
+ }
}
module_put(desc->owner);
+ kfree(action->secondary);
return action;
}
@@ -1576,8 +1651,10 @@ int request_threaded_irq(unsigned int irq, irq_handler_t handler,
retval = __setup_irq(irq, desc, action);
chip_bus_sync_unlock(desc);
- if (retval)
+ if (retval) {
+ kfree(action->secondary);
kfree(action);
+ }
#ifdef CONFIG_DEBUG_SHIRQ_FIXME
if (!retval && (irqflags & IRQF_SHARED)) {
diff --git a/kernel/irq/msi.c b/kernel/irq/msi.c
index be9149f62eb8..6b0c0b74a2a1 100644
--- a/kernel/irq/msi.c
+++ b/kernel/irq/msi.c
@@ -235,11 +235,11 @@ static void msi_domain_update_chip_ops(struct msi_domain_info *info)
/**
* msi_create_irq_domain - Create a MSI interrupt domain
- * @of_node: Optional device-tree node of the interrupt controller
+ * @fwnode: Optional fwnode of the interrupt controller
* @info: MSI domain info
* @parent: Parent irq domain
*/
-struct irq_domain *msi_create_irq_domain(struct device_node *node,
+struct irq_domain *msi_create_irq_domain(struct fwnode_handle *fwnode,
struct msi_domain_info *info,
struct irq_domain *parent)
{
@@ -248,8 +248,8 @@ struct irq_domain *msi_create_irq_domain(struct device_node *node,
if (info->flags & MSI_FLAG_USE_DEF_CHIP_OPS)
msi_domain_update_chip_ops(info);
- return irq_domain_add_hierarchy(parent, 0, 0, node, &msi_domain_ops,
- info);
+ return irq_domain_create_hierarchy(parent, 0, 0, fwnode,
+ &msi_domain_ops, info);
}
/**
diff --git a/kernel/irq/proc.c b/kernel/irq/proc.c
index a50ddc9417ff..a916cf144b65 100644
--- a/kernel/irq/proc.c
+++ b/kernel/irq/proc.c
@@ -475,7 +475,7 @@ int show_interrupts(struct seq_file *p, void *v)
for_each_online_cpu(j)
any_count |= kstat_irqs_cpu(i, j);
action = desc->action;
- if (!action && !any_count)
+ if ((!action || action == &chained_action) && !any_count)
goto out;
seq_printf(p, "%*d: ", prec, i);
diff --git a/kernel/irq/settings.h b/kernel/irq/settings.h
index 3320b84cc60f..320579d89091 100644
--- a/kernel/irq/settings.h
+++ b/kernel/irq/settings.h
@@ -15,6 +15,7 @@ enum {
_IRQ_NESTED_THREAD = IRQ_NESTED_THREAD,
_IRQ_PER_CPU_DEVID = IRQ_PER_CPU_DEVID,
_IRQ_IS_POLLED = IRQ_IS_POLLED,
+ _IRQ_DISABLE_UNLAZY = IRQ_DISABLE_UNLAZY,
_IRQF_MODIFY_MASK = IRQF_MODIFY_MASK,
};
@@ -28,6 +29,7 @@ enum {
#define IRQ_NESTED_THREAD GOT_YOU_MORON
#define IRQ_PER_CPU_DEVID GOT_YOU_MORON
#define IRQ_IS_POLLED GOT_YOU_MORON
+#define IRQ_DISABLE_UNLAZY GOT_YOU_MORON
#undef IRQF_MODIFY_MASK
#define IRQF_MODIFY_MASK GOT_YOU_MORON
@@ -154,3 +156,13 @@ static inline bool irq_settings_is_polled(struct irq_desc *desc)
{
return desc->status_use_accessors & _IRQ_IS_POLLED;
}
+
+static inline bool irq_settings_disable_unlazy(struct irq_desc *desc)
+{
+ return desc->status_use_accessors & _IRQ_DISABLE_UNLAZY;
+}
+
+static inline void irq_settings_clr_disable_unlazy(struct irq_desc *desc)
+{
+ desc->status_use_accessors &= ~_IRQ_DISABLE_UNLAZY;
+}