summaryrefslogtreecommitdiff
path: root/arch/um/kernel
diff options
context:
space:
mode:
authorJohannes Berg <johannes.berg@intel.com>2020-12-02 20:58:07 +0100
committerRichard Weinberger <richard@nod.at>2020-12-13 22:22:49 +0100
commita374b7cb1ea648a27ceaa2dea19aa967725e938b (patch)
tree3caccdb36c1cc214f10a2f2d2e267acee1f101a6 /arch/um/kernel
parent92dcd3d31843fbe1a95d880dc912e1f6beac6632 (diff)
um: Support suspend to RAM
With all the previous bits in place, we can now also support suspend to RAM, in the sense that everything is suspended, not just most, including userspace, processes like in s2idle. Since um_idle_sleep() now waits forever, we can simply call that to "suspend" the system. As before, you can wake it up using SIGUSR1 since we're just in a pause() call that only needs to return. In order to implement selective resume from certain devices, and not have any arbitrary device interrupt wake up, suspend interrupts by removing SIGIO notification (O_ASYNC) from all the FDs that are not supposed to wake up the system. However, swap out the handler so we don't actually handle the SIGIO as an interrupt. Since we're in pause(), the mere act of receiving SIGIO wakes us up, and then after things have been restored enough, re-set O_ASYNC for all previously suspended FDs, reinstall the proper SIGIO handler, and send SIGIO to self to process anything that might now be pending. Signed-off-by: Johannes Berg <johannes.berg@intel.com> Signed-off-by: Richard Weinberger <richard@nod.at>
Diffstat (limited to 'arch/um/kernel')
-rw-r--r--arch/um/kernel/irq.c88
-rw-r--r--arch/um/kernel/process.c2
-rw-r--r--arch/um/kernel/um_arch.c42
3 files changed, 130 insertions, 2 deletions
diff --git a/arch/um/kernel/irq.c b/arch/um/kernel/irq.c
index 482269580b79..ea43312cbfd3 100644
--- a/arch/um/kernel/irq.c
+++ b/arch/um/kernel/irq.c
@@ -20,6 +20,7 @@
#include <os.h>
#include <irq_user.h>
#include <irq_kern.h>
+#include <as-layout.h>
extern void free_irqs(void);
@@ -36,12 +37,14 @@ struct irq_reg {
int events;
bool active;
bool pending;
+ bool wakeup;
};
struct irq_entry {
struct list_head list;
int fd;
struct irq_reg reg[NUM_IRQ_TYPES];
+ bool suspended;
};
static DEFINE_SPINLOCK(irq_lock);
@@ -70,6 +73,11 @@ static void irq_io_loop(struct irq_reg *irq, struct uml_pt_regs *regs)
}
}
+void sigio_handler_suspend(int sig, struct siginfo *unused_si, struct uml_pt_regs *regs)
+{
+ /* nothing */
+}
+
void sigio_handler(int sig, struct siginfo *unused_si, struct uml_pt_regs *regs)
{
struct irq_entry *irq_entry;
@@ -365,9 +373,86 @@ error:
clear_bit(irq, irqs_allocated);
return err;
}
-
EXPORT_SYMBOL(um_request_irq);
+#ifdef CONFIG_PM_SLEEP
+void um_irqs_suspend(void)
+{
+ struct irq_entry *entry;
+ unsigned long flags;
+
+ sig_info[SIGIO] = sigio_handler_suspend;
+
+ spin_lock_irqsave(&irq_lock, flags);
+ list_for_each_entry(entry, &active_fds, list) {
+ enum um_irq_type t;
+ bool wake = false;
+
+ for (t = 0; t < NUM_IRQ_TYPES; t++) {
+ if (!entry->reg[t].events)
+ continue;
+
+ if (entry->reg[t].wakeup) {
+ wake = true;
+ break;
+ }
+ }
+
+ if (!wake) {
+ entry->suspended = true;
+ os_clear_fd_async(entry->fd);
+ }
+ }
+ spin_unlock_irqrestore(&irq_lock, flags);
+}
+
+void um_irqs_resume(void)
+{
+ struct irq_entry *entry;
+ unsigned long flags;
+
+ spin_lock_irqsave(&irq_lock, flags);
+ list_for_each_entry(entry, &active_fds, list) {
+ if (entry->suspended) {
+ int err = os_set_fd_async(entry->fd);
+
+ WARN(err < 0, "os_set_fd_async returned %d\n", err);
+ entry->suspended = false;
+ }
+ }
+ spin_unlock_irqrestore(&irq_lock, flags);
+
+ sig_info[SIGIO] = sigio_handler;
+ send_sigio_to_self();
+}
+
+static int normal_irq_set_wake(struct irq_data *d, unsigned int on)
+{
+ struct irq_entry *entry;
+ unsigned long flags;
+
+ spin_lock_irqsave(&irq_lock, flags);
+ list_for_each_entry(entry, &active_fds, list) {
+ enum um_irq_type t;
+
+ for (t = 0; t < NUM_IRQ_TYPES; t++) {
+ if (!entry->reg[t].events)
+ continue;
+
+ if (entry->reg[t].irq != d->irq)
+ continue;
+ entry->reg[t].wakeup = on;
+ goto unlock;
+ }
+ }
+unlock:
+ spin_unlock_irqrestore(&irq_lock, flags);
+ return 0;
+}
+#else
+#define normal_irq_set_wake NULL
+#endif
+
/*
* irq_chip must define at least enable/disable and ack when
* the edge handler is used.
@@ -384,6 +469,7 @@ static struct irq_chip normal_irq_type = {
.irq_ack = dummy,
.irq_mask = dummy,
.irq_unmask = dummy,
+ .irq_set_wake = normal_irq_set_wake,
};
static struct irq_chip alarm_irq_type = {
diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c
index a85c48ac2b27..81d508daf67c 100644
--- a/arch/um/kernel/process.c
+++ b/arch/um/kernel/process.c
@@ -203,7 +203,7 @@ void initial_thread_cb(void (*proc)(void *), void *arg)
kmalloc_ok = save_kmalloc_ok;
}
-static void um_idle_sleep(void)
+void um_idle_sleep(void)
{
if (time_travel_mode != TT_MODE_OFF)
time_travel_sleep();
diff --git a/arch/um/kernel/um_arch.c b/arch/um/kernel/um_arch.c
index 237a8d73a096..9c7e6d7ea1b3 100644
--- a/arch/um/kernel/um_arch.c
+++ b/arch/um/kernel/um_arch.c
@@ -385,6 +385,45 @@ void uml_pm_wake(void)
pm_system_wakeup();
}
+static int um_suspend_valid(suspend_state_t state)
+{
+ return state == PM_SUSPEND_MEM;
+}
+
+static int um_suspend_prepare(void)
+{
+ um_irqs_suspend();
+ return 0;
+}
+
+static int um_suspend_enter(suspend_state_t state)
+{
+ if (WARN_ON(state != PM_SUSPEND_MEM))
+ return -EINVAL;
+
+ /*
+ * This is identical to the idle sleep, but we've just
+ * (during suspend) turned off all interrupt sources
+ * except for the ones we want, so now we can only wake
+ * up on something we actually want to wake up on. All
+ * timing has also been suspended.
+ */
+ um_idle_sleep();
+ return 0;
+}
+
+static void um_suspend_finish(void)
+{
+ um_irqs_resume();
+}
+
+const struct platform_suspend_ops um_suspend_ops = {
+ .valid = um_suspend_valid,
+ .prepare = um_suspend_prepare,
+ .enter = um_suspend_enter,
+ .finish = um_suspend_finish,
+};
+
static int init_pm_wake_signal(void)
{
/*
@@ -397,6 +436,9 @@ static int init_pm_wake_signal(void)
*/
if (time_travel_mode != TT_MODE_EXTERNAL)
register_pm_wake_signal();
+
+ suspend_set_ops(&um_suspend_ops);
+
return 0;
}