summaryrefslogtreecommitdiff
path: root/arch/um/kernel/irq.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/um/kernel/irq.c')
-rw-r--r--arch/um/kernel/irq.c849
1 files changed, 554 insertions, 295 deletions
diff --git a/arch/um/kernel/irq.c b/arch/um/kernel/irq.c
index 36e12f0cefd5..f4b13f15a9c1 100644
--- a/arch/um/kernel/irq.c
+++ b/arch/um/kernel/irq.c
@@ -1,6 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
+ * Copyright (C) 2017 - Cambridge Greys Ltd
+ * Copyright (C) 2011 - 2014 Cisco Systems Inc
* Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
- * Licensed under the GPL
* Derived (i.e. mostly copied) from arch/i386/kernel/irq.c:
* Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar
*/
@@ -16,245 +18,424 @@
#include <as-layout.h>
#include <kern_util.h>
#include <os.h>
+#include <irq_user.h>
+#include <irq_kern.h>
+#include <linux/time-internal.h>
+DEFINE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat);
+
+#define irq_stats(x) (&per_cpu(irq_stat, x))
+
+/* When epoll triggers we do not know why it did so
+ * we can also have different IRQs for read and write.
+ * This is why we keep a small irq_reg array for each fd -
+ * one entry per IRQ type
+ */
+struct irq_reg {
+ void *id;
+ int irq;
+ /* it's cheaper to store this than to query it */
+ int events;
+ bool active;
+ bool pending;
+ bool wakeup;
+#ifdef CONFIG_UML_TIME_TRAVEL_SUPPORT
+ bool pending_event;
+ void (*timetravel_handler)(int, int, void *,
+ struct time_travel_event *);
+ struct time_travel_event event;
+#endif
+};
+
+struct irq_entry {
+ struct list_head list;
+ int fd;
+ struct irq_reg reg[NUM_IRQ_TYPES];
+ bool suspended;
+ bool sigio_workaround;
+};
+
+static DEFINE_RAW_SPINLOCK(irq_lock);
+static LIST_HEAD(active_fds);
+static DECLARE_BITMAP(irqs_allocated, UM_LAST_SIGNAL_IRQ);
+static bool irqs_suspended;
+#ifdef CONFIG_UML_TIME_TRAVEL_SUPPORT
+static bool irqs_pending;
+#endif
+
+static void irq_io_loop(struct irq_reg *irq, struct uml_pt_regs *regs)
+{
/*
- * This list is accessed under irq_lock, except in sigio_handler,
- * where it is safe from being modified. IRQ handlers won't change it -
- * if an IRQ source has vanished, it will be freed by free_irqs just
- * before returning from sigio_handler. That will process a separate
- * list of irqs to free, with its own locking, coming back here to
- * remove list elements, taking the irq_lock to do so.
+ * irq->active guards against reentry
+ * irq->pending accumulates pending requests
+ * if pending is raised the irq_handler is re-run
+ * until pending is cleared
*/
-static struct irq_fd *active_fds = NULL;
-static struct irq_fd **last_irq_ptr = &active_fds;
+ if (irq->active) {
+ irq->active = false;
-extern void free_irqs(void);
+ do {
+ irq->pending = false;
+ do_IRQ(irq->irq, regs);
+ } while (irq->pending);
+
+ irq->active = true;
+ } else {
+ irq->pending = true;
+ }
+}
-void sigio_handler(int sig, struct siginfo *unused_si, struct uml_pt_regs *regs)
+#ifdef CONFIG_UML_TIME_TRAVEL_SUPPORT
+static void irq_event_handler(struct time_travel_event *ev)
{
- struct irq_fd *irq_fd;
- int n;
+ struct irq_reg *reg = container_of(ev, struct irq_reg, event);
- if (smp_sigio_handler())
+ /* do nothing if suspended; just cause a wakeup and mark as pending */
+ if (irqs_suspended) {
+ irqs_pending = true;
+ reg->pending_event = true;
return;
+ }
- while (1) {
- n = os_waiting_for_events(active_fds);
- if (n <= 0) {
- if (n == -EINTR)
- continue;
- else break;
- }
+ generic_handle_irq(reg->irq);
+}
+
+static bool irq_do_timetravel_handler(struct irq_entry *entry,
+ enum um_irq_type t)
+{
+ struct irq_reg *reg = &entry->reg[t];
+
+ if (!reg->timetravel_handler)
+ return false;
+
+ /*
+ * Handle all messages - we might get multiple even while
+ * interrupts are already suspended, due to suspend order
+ * etc. Note that time_travel_add_irq_event() will not add
+ * an event twice, if it's pending already "first wins".
+ */
+ reg->timetravel_handler(reg->irq, entry->fd, reg->id, &reg->event);
- for (irq_fd = active_fds; irq_fd != NULL;
- irq_fd = irq_fd->next) {
- if (irq_fd->current_events != 0) {
- irq_fd->current_events = 0;
- do_IRQ(irq_fd->irq, regs);
+ if (!reg->event.pending)
+ return false;
+
+ return true;
+}
+
+static void irq_do_pending_events(bool timetravel_handlers_only)
+{
+ struct irq_entry *entry;
+
+ if (!irqs_pending || timetravel_handlers_only)
+ return;
+
+ irqs_pending = false;
+
+ list_for_each_entry(entry, &active_fds, list) {
+ enum um_irq_type t;
+
+ for (t = 0; t < NUM_IRQ_TYPES; t++) {
+ struct irq_reg *reg = &entry->reg[t];
+
+ /*
+ * Any timetravel_handler was invoked already, just
+ * directly run the IRQ.
+ */
+ if (reg->pending_event) {
+ irq_enter();
+ generic_handle_irq(reg->irq);
+ irq_exit();
+ reg->pending_event = false;
}
}
}
-
- free_irqs();
+}
+#else
+static bool irq_do_timetravel_handler(struct irq_entry *entry,
+ enum um_irq_type t)
+{
+ return false;
}
-static DEFINE_SPINLOCK(irq_lock);
+static void irq_do_pending_events(bool timetravel_handlers_only)
+{
+}
+#endif
-static int activate_fd(int irq, int fd, int type, void *dev_id)
+static void sigio_reg_handler(int idx, struct irq_entry *entry, enum um_irq_type t,
+ struct uml_pt_regs *regs,
+ bool timetravel_handlers_only)
{
- struct pollfd *tmp_pfd;
- struct irq_fd *new_fd, *irq_fd;
- unsigned long flags;
- int events, err, n;
+ struct irq_reg *reg = &entry->reg[t];
- err = os_set_fd_async(fd);
- if (err < 0)
- goto out;
+ if (!reg->events)
+ return;
- err = -ENOMEM;
- new_fd = kmalloc(sizeof(struct irq_fd), GFP_KERNEL);
- if (new_fd == NULL)
- goto out;
+ if (os_epoll_triggered(idx, reg->events) <= 0)
+ return;
- if (type == IRQ_READ)
- events = UM_POLLIN | UM_POLLPRI;
- else events = UM_POLLOUT;
- *new_fd = ((struct irq_fd) { .next = NULL,
- .id = dev_id,
- .fd = fd,
- .type = type,
- .irq = irq,
- .events = events,
- .current_events = 0 } );
-
- err = -EBUSY;
- spin_lock_irqsave(&irq_lock, flags);
- for (irq_fd = active_fds; irq_fd != NULL; irq_fd = irq_fd->next) {
- if ((irq_fd->fd == fd) && (irq_fd->type == type)) {
- printk(KERN_ERR "Registering fd %d twice\n", fd);
- printk(KERN_ERR "Irqs : %d, %d\n", irq_fd->irq, irq);
- printk(KERN_ERR "Ids : 0x%p, 0x%p\n", irq_fd->id,
- dev_id);
- goto out_unlock;
- }
+ if (irq_do_timetravel_handler(entry, t))
+ return;
+
+ /*
+ * If we're called to only run time-travel handlers then don't
+ * actually proceed but mark sigio as pending (if applicable).
+ * For suspend/resume, timetravel_handlers_only may be true
+ * despite time-travel not being configured and used.
+ */
+ if (timetravel_handlers_only) {
+#ifdef CONFIG_UML_TIME_TRAVEL_SUPPORT
+ reg->pending_event = true;
+ irqs_pending = true;
+ mark_sigio_pending();
+#endif
+ return;
}
- if (type == IRQ_WRITE)
- fd = -1;
+ irq_io_loop(reg, regs);
+}
- tmp_pfd = NULL;
- n = 0;
+static void _sigio_handler(struct uml_pt_regs *regs,
+ bool timetravel_handlers_only)
+{
+ struct irq_entry *irq_entry;
+ int n, i;
- while (1) {
- n = os_create_pollfd(fd, events, tmp_pfd, n);
- if (n == 0)
- break;
-
- /*
- * n > 0
- * It means we couldn't put new pollfd to current pollfds
- * and tmp_fds is NULL or too small for new pollfds array.
- * Needed size is equal to n as minimum.
- *
- * Here we have to drop the lock in order to call
- * kmalloc, which might sleep.
- * If something else came in and changed the pollfds array
- * so we will not be able to put new pollfd struct to pollfds
- * then we free the buffer tmp_fds and try again.
- */
- spin_unlock_irqrestore(&irq_lock, flags);
- kfree(tmp_pfd);
+ if (timetravel_handlers_only && !um_irq_timetravel_handler_used())
+ return;
- tmp_pfd = kmalloc(n, GFP_KERNEL);
- if (tmp_pfd == NULL)
- goto out_kfree;
+ /* Flush out pending events that were ignored due to time-travel. */
+ if (!irqs_suspended)
+ irq_do_pending_events(timetravel_handlers_only);
- spin_lock_irqsave(&irq_lock, flags);
- }
+ while (1) {
+ /* This is now lockless - epoll keeps back-referencesto the irqs
+ * which have trigger it so there is no need to walk the irq
+ * list and lock it every time. We avoid locking by turning off
+ * IO for a specific fd by executing os_del_epoll_fd(fd) before
+ * we do any changes to the actual data structures
+ */
+ n = os_waiting_for_events_epoll();
- *last_irq_ptr = new_fd;
- last_irq_ptr = &new_fd->next;
+ if (n <= 0) {
+ if (n == -EINTR)
+ continue;
+ else
+ break;
+ }
- spin_unlock_irqrestore(&irq_lock, flags);
+ for (i = 0; i < n ; i++) {
+ enum um_irq_type t;
- /*
- * This calls activate_fd, so it has to be outside the critical
- * section.
- */
- maybe_sigio_broken(fd, (type == IRQ_READ));
+ irq_entry = os_epoll_get_data_pointer(i);
- return 0;
+ for (t = 0; t < NUM_IRQ_TYPES; t++)
+ sigio_reg_handler(i, irq_entry, t, regs,
+ timetravel_handlers_only);
+ }
+ }
- out_unlock:
- spin_unlock_irqrestore(&irq_lock, flags);
- out_kfree:
- kfree(new_fd);
- out:
- return err;
+ if (!timetravel_handlers_only)
+ free_irqs();
}
-static void free_irq_by_cb(int (*test)(struct irq_fd *, void *), void *arg)
+void sigio_handler(int sig, struct siginfo *unused_si, struct uml_pt_regs *regs,
+ void *mc)
{
- unsigned long flags;
-
- spin_lock_irqsave(&irq_lock, flags);
- os_free_irq_by_cb(test, arg, active_fds, &last_irq_ptr);
- spin_unlock_irqrestore(&irq_lock, flags);
+ preempt_disable();
+ _sigio_handler(regs, irqs_suspended);
+ preempt_enable();
}
-struct irq_and_dev {
- int irq;
- void *dev;
-};
-
-static int same_irq_and_dev(struct irq_fd *irq, void *d)
+static struct irq_entry *get_irq_entry_by_fd(int fd)
{
- struct irq_and_dev *data = d;
+ struct irq_entry *walk;
- return ((irq->irq == data->irq) && (irq->id == data->dev));
+ lockdep_assert_held(&irq_lock);
+
+ list_for_each_entry(walk, &active_fds, list) {
+ if (walk->fd == fd)
+ return walk;
+ }
+
+ return NULL;
}
-static void free_irq_by_irq_and_dev(unsigned int irq, void *dev)
+static void remove_irq_entry(struct irq_entry *to_free, bool remove)
{
- struct irq_and_dev data = ((struct irq_and_dev) { .irq = irq,
- .dev = dev });
+ if (!to_free)
+ return;
- free_irq_by_cb(same_irq_and_dev, &data);
+ if (remove)
+ os_del_epoll_fd(to_free->fd);
+ list_del(&to_free->list);
}
-static int same_fd(struct irq_fd *irq, void *fd)
+static bool update_irq_entry(struct irq_entry *entry)
{
- return (irq->fd == *((int *)fd));
+ enum um_irq_type i;
+ int events = 0;
+
+ for (i = 0; i < NUM_IRQ_TYPES; i++)
+ events |= entry->reg[i].events;
+
+ if (events) {
+ /* will modify (instead of add) if needed */
+ os_add_epoll_fd(events, entry->fd, entry);
+ return true;
+ }
+
+ os_del_epoll_fd(entry->fd);
+ return false;
}
-void free_irq_by_fd(int fd)
+static struct irq_entry *update_or_remove_irq_entry(struct irq_entry *entry)
{
- free_irq_by_cb(same_fd, &fd);
+ if (update_irq_entry(entry))
+ return NULL;
+ remove_irq_entry(entry, false);
+ return entry;
}
-/* Must be called with irq_lock held */
-static struct irq_fd *find_irq_by_fd(int fd, int irqnum, int *index_out)
+static int activate_fd(int irq, int fd, enum um_irq_type type, void *dev_id,
+ void (*timetravel_handler)(int, int, void *,
+ struct time_travel_event *))
{
- struct irq_fd *irq;
- int i = 0;
- int fdi;
+ struct irq_entry *irq_entry, *to_free = NULL;
+ int err, events = os_event_mask(type);
+ unsigned long flags;
- for (irq = active_fds; irq != NULL; irq = irq->next) {
- if ((irq->fd == fd) && (irq->irq == irqnum))
- break;
- i++;
- }
- if (irq == NULL) {
- printk(KERN_ERR "find_irq_by_fd doesn't have descriptor %d\n",
- fd);
+ err = os_set_fd_async(fd);
+ if (err < 0)
goto out;
+
+ raw_spin_lock_irqsave(&irq_lock, flags);
+ irq_entry = get_irq_entry_by_fd(fd);
+ if (irq_entry) {
+already:
+ /* cannot register the same FD twice with the same type */
+ if (WARN_ON(irq_entry->reg[type].events)) {
+ err = -EALREADY;
+ goto out_unlock;
+ }
+
+ /* temporarily disable to avoid IRQ-side locking */
+ os_del_epoll_fd(fd);
+ } else {
+ struct irq_entry *new;
+
+ /* don't restore interrupts */
+ raw_spin_unlock(&irq_lock);
+ new = kzalloc(sizeof(*irq_entry), GFP_ATOMIC);
+ if (!new) {
+ local_irq_restore(flags);
+ return -ENOMEM;
+ }
+ raw_spin_lock(&irq_lock);
+ irq_entry = get_irq_entry_by_fd(fd);
+ if (irq_entry) {
+ to_free = new;
+ goto already;
+ }
+ irq_entry = new;
+ irq_entry->fd = fd;
+ list_add_tail(&irq_entry->list, &active_fds);
+ maybe_sigio_broken(fd);
}
- fdi = os_get_pollfd(i);
- if ((fdi != -1) && (fdi != fd)) {
- printk(KERN_ERR "find_irq_by_fd - mismatch between active_fds "
- "and pollfds, fd %d vs %d, need %d\n", irq->fd,
- fdi, fd);
- irq = NULL;
- goto out;
+
+ irq_entry->reg[type].id = dev_id;
+ irq_entry->reg[type].irq = irq;
+ irq_entry->reg[type].active = true;
+ irq_entry->reg[type].events = events;
+
+#ifdef CONFIG_UML_TIME_TRAVEL_SUPPORT
+ if (um_irq_timetravel_handler_used()) {
+ irq_entry->reg[type].timetravel_handler = timetravel_handler;
+ irq_entry->reg[type].event.fn = irq_event_handler;
}
- *index_out = i;
- out:
- return irq;
+#endif
+
+ WARN_ON(!update_irq_entry(irq_entry));
+ err = 0;
+out_unlock:
+ raw_spin_unlock_irqrestore(&irq_lock, flags);
+out:
+ kfree(to_free);
+ return err;
+}
+
+/*
+ * Remove the entry or entries for a specific FD, if you
+ * don't want to remove all the possible entries then use
+ * um_free_irq() or deactivate_fd() instead.
+ */
+void free_irq_by_fd(int fd)
+{
+ struct irq_entry *to_free;
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&irq_lock, flags);
+ to_free = get_irq_entry_by_fd(fd);
+ remove_irq_entry(to_free, true);
+ raw_spin_unlock_irqrestore(&irq_lock, flags);
+ kfree(to_free);
}
+EXPORT_SYMBOL(free_irq_by_fd);
-void reactivate_fd(int fd, int irqnum)
+static void free_irq_by_irq_and_dev(unsigned int irq, void *dev)
{
- struct irq_fd *irq;
+ struct irq_entry *entry, *to_free = NULL;
unsigned long flags;
- int i;
- spin_lock_irqsave(&irq_lock, flags);
- irq = find_irq_by_fd(fd, irqnum, &i);
- if (irq == NULL) {
- spin_unlock_irqrestore(&irq_lock, flags);
- return;
- }
- os_set_pollfd(i, irq->fd);
- spin_unlock_irqrestore(&irq_lock, flags);
+ raw_spin_lock_irqsave(&irq_lock, flags);
+ list_for_each_entry(entry, &active_fds, list) {
+ enum um_irq_type i;
- add_sigio_fd(fd);
+ for (i = 0; i < NUM_IRQ_TYPES; i++) {
+ struct irq_reg *reg = &entry->reg[i];
+
+ if (!reg->events)
+ continue;
+ if (reg->irq != irq)
+ continue;
+ if (reg->id != dev)
+ continue;
+
+ os_del_epoll_fd(entry->fd);
+ reg->events = 0;
+ to_free = update_or_remove_irq_entry(entry);
+ goto out;
+ }
+ }
+out:
+ raw_spin_unlock_irqrestore(&irq_lock, flags);
+ kfree(to_free);
}
void deactivate_fd(int fd, int irqnum)
{
- struct irq_fd *irq;
+ struct irq_entry *entry;
unsigned long flags;
- int i;
+ enum um_irq_type i;
- spin_lock_irqsave(&irq_lock, flags);
- irq = find_irq_by_fd(fd, irqnum, &i);
- if (irq == NULL) {
- spin_unlock_irqrestore(&irq_lock, flags);
- return;
+ os_del_epoll_fd(fd);
+
+ raw_spin_lock_irqsave(&irq_lock, flags);
+ entry = get_irq_entry_by_fd(fd);
+ if (!entry)
+ goto out;
+
+ for (i = 0; i < NUM_IRQ_TYPES; i++) {
+ if (!entry->reg[i].events)
+ continue;
+ if (entry->reg[i].irq == irqnum)
+ entry->reg[i].events = 0;
}
- os_set_pollfd(i, -1);
- spin_unlock_irqrestore(&irq_lock, flags);
+ entry = update_or_remove_irq_entry(entry);
+out:
+ raw_spin_unlock_irqrestore(&irq_lock, flags);
+ kfree(entry);
ignore_sigio_fd(fd);
}
@@ -268,17 +449,18 @@ EXPORT_SYMBOL(deactivate_fd);
*/
int deactivate_all_fds(void)
{
- struct irq_fd *irq;
- int err;
+ struct irq_entry *entry;
- for (irq = active_fds; irq != NULL; irq = irq->next) {
- err = os_clear_fd_async(irq->fd);
- if (err)
- return err;
- }
- /* If there is a signal already queued, after unblocking ignore it */
+ /* Stop IO. The IRQ loop has no lock so this is our
+ * only way of making sure we are safe to dispose
+ * of all IRQ handlers
+ */
os_set_ioignore();
+ /* we can no longer call kfree() here so just deactivate */
+ list_for_each_entry(entry, &active_fds, list)
+ os_del_epoll_fd(entry->fd);
+ os_close_epoll_fd();
return 0;
}
@@ -297,31 +479,180 @@ unsigned int do_IRQ(int irq, struct uml_pt_regs *regs)
return 1;
}
-void um_free_irq(unsigned int irq, void *dev)
+void um_free_irq(int irq, void *dev)
{
+ if (WARN(irq < 0 || irq > UM_LAST_SIGNAL_IRQ,
+ "freeing invalid irq %d", irq))
+ return;
+
free_irq_by_irq_and_dev(irq, dev);
free_irq(irq, dev);
+ clear_bit(irq, irqs_allocated);
}
EXPORT_SYMBOL(um_free_irq);
-int um_request_irq(unsigned int irq, int fd, int type,
- irq_handler_t handler,
- unsigned long irqflags, const char * devname,
- void *dev_id)
+static int
+_um_request_irq(int irq, int fd, enum um_irq_type type,
+ irq_handler_t handler, unsigned long irqflags,
+ const char *devname, void *dev_id,
+ void (*timetravel_handler)(int, int, void *,
+ struct time_travel_event *))
{
int err;
+ if (irq == UM_IRQ_ALLOC) {
+ int i;
+
+ for (i = UM_FIRST_DYN_IRQ; i < NR_IRQS; i++) {
+ if (!test_and_set_bit(i, irqs_allocated)) {
+ irq = i;
+ break;
+ }
+ }
+ }
+
+ if (irq < 0)
+ return -ENOSPC;
+
if (fd != -1) {
- err = activate_fd(irq, fd, type, dev_id);
+ err = activate_fd(irq, fd, type, dev_id, timetravel_handler);
if (err)
- return err;
+ goto error;
}
- return request_irq(irq, handler, irqflags, devname, dev_id);
+ err = request_irq(irq, handler, irqflags, devname, dev_id);
+ if (err < 0)
+ goto error;
+
+ return irq;
+error:
+ clear_bit(irq, irqs_allocated);
+ return err;
}
+int um_request_irq(int irq, int fd, enum um_irq_type type,
+ irq_handler_t handler, unsigned long irqflags,
+ const char *devname, void *dev_id)
+{
+ return _um_request_irq(irq, fd, type, handler, irqflags,
+ devname, dev_id, NULL);
+}
EXPORT_SYMBOL(um_request_irq);
-EXPORT_SYMBOL(reactivate_fd);
+
+#ifdef CONFIG_UML_TIME_TRAVEL_SUPPORT
+int um_request_irq_tt(int irq, int fd, enum um_irq_type type,
+ irq_handler_t handler, unsigned long irqflags,
+ const char *devname, void *dev_id,
+ void (*timetravel_handler)(int, int, void *,
+ struct time_travel_event *))
+{
+ return _um_request_irq(irq, fd, type, handler, irqflags,
+ devname, dev_id, timetravel_handler);
+}
+EXPORT_SYMBOL(um_request_irq_tt);
+
+void sigio_run_timetravel_handlers(void)
+{
+ _sigio_handler(NULL, true);
+}
+#endif
+
+#ifdef CONFIG_PM_SLEEP
+void um_irqs_suspend(void)
+{
+ struct irq_entry *entry;
+ unsigned long flags;
+
+ irqs_suspended = true;
+
+ raw_spin_lock_irqsave(&irq_lock, flags);
+ list_for_each_entry(entry, &active_fds, list) {
+ enum um_irq_type t;
+ bool clear = true;
+
+ for (t = 0; t < NUM_IRQ_TYPES; t++) {
+ if (!entry->reg[t].events)
+ continue;
+
+ /*
+ * For the SIGIO_WRITE_IRQ, which is used to handle the
+ * SIGIO workaround thread, we need special handling:
+ * enable wake for it itself, but below we tell it about
+ * any FDs that should be suspended.
+ */
+ if (entry->reg[t].wakeup ||
+ entry->reg[t].irq == SIGIO_WRITE_IRQ
+#ifdef CONFIG_UML_TIME_TRAVEL_SUPPORT
+ || entry->reg[t].timetravel_handler
+#endif
+ ) {
+ clear = false;
+ break;
+ }
+ }
+
+ if (clear) {
+ entry->suspended = true;
+ os_clear_fd_async(entry->fd);
+ entry->sigio_workaround =
+ !__ignore_sigio_fd(entry->fd);
+ }
+ }
+ raw_spin_unlock_irqrestore(&irq_lock, flags);
+}
+
+void um_irqs_resume(void)
+{
+ struct irq_entry *entry;
+ unsigned long flags;
+
+
+ raw_spin_lock_irqsave(&irq_lock, flags);
+ list_for_each_entry(entry, &active_fds, list) {
+ if (entry->suspended) {
+ int err = os_set_fd_async(entry->fd);
+
+ WARN(err < 0, "os_set_fd_async returned %d\n", err);
+ entry->suspended = false;
+
+ if (entry->sigio_workaround) {
+ err = __add_sigio_fd(entry->fd);
+ WARN(err < 0, "add_sigio_returned %d\n", err);
+ }
+ }
+ }
+ raw_spin_unlock_irqrestore(&irq_lock, flags);
+
+ irqs_suspended = false;
+ send_sigio_to_self();
+}
+
+static int normal_irq_set_wake(struct irq_data *d, unsigned int on)
+{
+ struct irq_entry *entry;
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&irq_lock, flags);
+ list_for_each_entry(entry, &active_fds, list) {
+ enum um_irq_type t;
+
+ for (t = 0; t < NUM_IRQ_TYPES; t++) {
+ if (!entry->reg[t].events)
+ continue;
+
+ if (entry->reg[t].irq != d->irq)
+ continue;
+ entry->reg[t].wakeup = on;
+ goto unlock;
+ }
+ }
+unlock:
+ raw_spin_unlock_irqrestore(&irq_lock, flags);
+ return 0;
+}
+#else
+#define normal_irq_set_wake NULL
+#endif
/*
* irq_chip must define at least enable/disable and ack when
@@ -331,139 +662,67 @@ static void dummy(struct irq_data *d)
{
}
-/* This is used for everything else than the timer. */
+/* This is used for everything other than the timer. */
static struct irq_chip normal_irq_type = {
.name = "SIGIO",
.irq_disable = dummy,
.irq_enable = dummy,
.irq_ack = dummy,
+ .irq_mask = dummy,
+ .irq_unmask = dummy,
+ .irq_set_wake = normal_irq_set_wake,
};
-static struct irq_chip SIGVTALRM_irq_type = {
- .name = "SIGVTALRM",
+static struct irq_chip alarm_irq_type = {
+ .name = "SIGALRM",
.irq_disable = dummy,
.irq_enable = dummy,
.irq_ack = dummy,
+ .irq_mask = dummy,
+ .irq_unmask = dummy,
};
void __init init_IRQ(void)
{
int i;
- irq_set_chip_and_handler(TIMER_IRQ, &SIGVTALRM_irq_type, handle_edge_irq);
+ irq_set_chip_and_handler(TIMER_IRQ, &alarm_irq_type, handle_percpu_irq);
- for (i = 1; i < NR_IRQS; i++)
+ for (i = 1; i < UM_LAST_SIGNAL_IRQ; i++)
irq_set_chip_and_handler(i, &normal_irq_type, handle_edge_irq);
+ /* Initialize EPOLL Loop */
+ os_setup_epoll();
}
-/*
- * IRQ stack entry and exit:
- *
- * Unlike i386, UML doesn't receive IRQs on the normal kernel stack
- * and switch over to the IRQ stack after some preparation. We use
- * sigaltstack to receive signals on a separate stack from the start.
- * These two functions make sure the rest of the kernel won't be too
- * upset by being on a different stack. The IRQ stack has a
- * thread_info structure at the bottom so that current et al continue
- * to work.
- *
- * to_irq_stack copies the current task's thread_info to the IRQ stack
- * thread_info and sets the tasks's stack to point to the IRQ stack.
- *
- * from_irq_stack copies the thread_info struct back (flags may have
- * been modified) and resets the task's stack pointer.
- *
- * Tricky bits -
- *
- * What happens when two signals race each other? UML doesn't block
- * signals with sigprocmask, SA_DEFER, or sa_mask, so a second signal
- * could arrive while a previous one is still setting up the
- * thread_info.
- *
- * There are three cases -
- * The first interrupt on the stack - sets up the thread_info and
- * handles the interrupt
- * A nested interrupt interrupting the copying of the thread_info -
- * can't handle the interrupt, as the stack is in an unknown state
- * A nested interrupt not interrupting the copying of the
- * thread_info - doesn't do any setup, just handles the interrupt
- *
- * The first job is to figure out whether we interrupted stack setup.
- * This is done by xchging the signal mask with thread_info->pending.
- * If the value that comes back is zero, then there is no setup in
- * progress, and the interrupt can be handled. If the value is
- * non-zero, then there is stack setup in progress. In order to have
- * the interrupt handled, we leave our signal in the mask, and it will
- * be handled by the upper handler after it has set up the stack.
- *
- * Next is to figure out whether we are the outer handler or a nested
- * one. As part of setting up the stack, thread_info->real_thread is
- * set to non-NULL (and is reset to NULL on exit). This is the
- * nesting indicator. If it is non-NULL, then the stack is already
- * set up and the handler can run.
- */
-
-static unsigned long pending_mask;
-
-unsigned long to_irq_stack(unsigned long *mask_out)
+int __init arch_probe_nr_irqs(void)
{
- struct thread_info *ti;
- unsigned long mask, old;
- int nested;
-
- mask = xchg(&pending_mask, *mask_out);
- if (mask != 0) {
- /*
- * If any interrupts come in at this point, we want to
- * make sure that their bits aren't lost by our
- * putting our bit in. So, this loop accumulates bits
- * until xchg returns the same value that we put in.
- * When that happens, there were no new interrupts,
- * and pending_mask contains a bit for each interrupt
- * that came in.
- */
- old = *mask_out;
- do {
- old |= mask;
- mask = xchg(&pending_mask, old);
- } while (mask != old);
- return 1;
- }
-
- ti = current_thread_info();
- nested = (ti->real_thread != NULL);
- if (!nested) {
- struct task_struct *task;
- struct thread_info *tti;
-
- task = cpu_tasks[ti->cpu].task;
- tti = task_thread_info(task);
-
- *ti = *tti;
- ti->real_thread = tti;
- task->stack = ti;
- }
-
- mask = xchg(&pending_mask, 0);
- *mask_out |= mask | nested;
- return 0;
+ return NR_IRQS;
}
-unsigned long from_irq_stack(int nested)
+void sigchld_handler(int sig, struct siginfo *unused_si,
+ struct uml_pt_regs *regs, void *mc)
{
- struct thread_info *ti, *to;
- unsigned long mask;
+ do_IRQ(SIGCHLD_IRQ, regs);
+}
- ti = current_thread_info();
+/*
+ * /proc/interrupts printing for arch specific interrupts
+ */
+int arch_show_interrupts(struct seq_file *p, int prec)
+{
+#if IS_ENABLED(CONFIG_SMP)
+ int cpu;
- pending_mask = 1;
+ seq_printf(p, "%*s: ", prec, "RES");
+ for_each_online_cpu(cpu)
+ seq_printf(p, "%10u ", irq_stats(cpu)->irq_resched_count);
+ seq_puts(p, " Rescheduling interrupts\n");
- to = ti->real_thread;
- current->stack = to;
- ti->real_thread = NULL;
- *to = *ti;
+ seq_printf(p, "%*s: ", prec, "CAL");
+ for_each_online_cpu(cpu)
+ seq_printf(p, "%10u ", irq_stats(cpu)->irq_call_count);
+ seq_puts(p, " Function call interrupts\n");
+#endif
- mask = xchg(&pending_mask, 0);
- return mask & ~1;
+ return 0;
}
-