summaryrefslogtreecommitdiff
path: root/kernel/power
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/power')
-rw-r--r--kernel/power/Kconfig71
-rw-r--r--kernel/power/Makefile6
-rw-r--r--kernel/power/autosleep.c2
-rw-r--r--kernel/power/energy_model.c756
-rw-r--r--kernel/power/hibernate.c454
-rw-r--r--kernel/power/main.c315
-rw-r--r--kernel/power/power.h61
-rw-r--r--kernel/power/poweroff.c2
-rw-r--r--kernel/power/process.c48
-rw-r--r--kernel/power/qos.c22
-rw-r--r--kernel/power/snapshot.c409
-rw-r--r--kernel/power/suspend.c69
-rw-r--r--kernel/power/suspend_test.c12
-rw-r--r--kernel/power/swap.c405
-rw-r--r--kernel/power/user.c83
-rw-r--r--kernel/power/wakelock.c11
16 files changed, 1960 insertions, 766 deletions
diff --git a/kernel/power/Kconfig b/kernel/power/Kconfig
index a7320f07689d..afce8130d8b9 100644
--- a/kernel/power/Kconfig
+++ b/kernel/power/Kconfig
@@ -39,9 +39,9 @@ config HIBERNATION
bool "Hibernation (aka 'suspend to disk')"
depends on SWAP && ARCH_HIBERNATION_POSSIBLE
select HIBERNATE_CALLBACKS
- select LZO_COMPRESS
- select LZO_DECOMPRESS
select CRC32
+ select CRYPTO
+ select CRYPTO_LZO
help
Enable the suspend to disk (STD) functionality, which is usually
called "hibernation" in user interfaces. STD checkpoints the
@@ -92,33 +92,54 @@ config HIBERNATION_SNAPSHOT_DEV
If in doubt, say Y.
+choice
+ prompt "Default compressor"
+ default HIBERNATION_COMP_LZO
+ depends on HIBERNATION
+
+config HIBERNATION_COMP_LZO
+ bool "lzo"
+ depends on CRYPTO_LZO
+
+config HIBERNATION_COMP_LZ4
+ bool "lz4"
+ depends on CRYPTO_LZ4
+
+endchoice
+
+config HIBERNATION_DEF_COMP
+ string
+ default "lzo" if HIBERNATION_COMP_LZO
+ default "lz4" if HIBERNATION_COMP_LZ4
+ help
+ Default compressor to be used for hibernation.
+
config PM_STD_PARTITION
string "Default resume partition"
depends on HIBERNATION
default ""
help
The default resume partition is the partition that the suspend-
- to-disk implementation will look for a suspended disk image.
+ to-disk implementation will look for a suspended disk image.
- The partition specified here will be different for almost every user.
+ The partition specified here will be different for almost every user.
It should be a valid swap partition (at least for now) that is turned
- on before suspending.
+ on before suspending.
The partition specified can be overridden by specifying:
- resume=/dev/<other device>
+ resume=/dev/<other device>
- which will set the resume partition to the device specified.
+ which will set the resume partition to the device specified.
Note there is currently not a way to specify which device to save the
- suspended image to. It will simply pick the first available swap
+ suspended image to. It will simply pick the first available swap
device.
config PM_SLEEP
def_bool y
depends on SUSPEND || HIBERNATE_CALLBACKS
select PM
- select SRCU
config PM_SLEEP_SMP
def_bool y
@@ -139,15 +160,33 @@ config PM_SLEEP_SMP_NONZERO_CPU
config PM_AUTOSLEEP
bool "Opportunistic sleep"
depends on PM_SLEEP
- default n
help
Allow the kernel to trigger a system transition into a global sleep
state automatically whenever there are no active wakeup sources.
+config PM_USERSPACE_AUTOSLEEP
+ bool "Userspace opportunistic sleep"
+ depends on PM_SLEEP
+ help
+ Notify kernel of aggressive userspace autosleep power management policy.
+
+ This option changes the behavior of various sleep-sensitive code to deal
+ with frequent userspace-initiated transitions into a global sleep state.
+
+ Saying Y here, disables code paths that most users really should keep
+ enabled. In particular, only enable this if it is very common to be
+ asleep/awake for very short periods of time (<= 2 seconds).
+
+ Only platforms, such as Android, that implement opportunistic sleep from
+ a userspace power manager service should enable this option; and not
+ other machines. Therefore, you should say N here, unless you are
+ extremely certain that this is what you want. The option otherwise has
+ bad, undesirable effects, and should not be enabled just for fun.
+
+
config PM_WAKELOCKS
bool "User space wakeup sources interface"
depends on PM_SLEEP
- default n
help
Allow user space to create, activate and deactivate wakeup source
objects with the help of a sysfs-based interface.
@@ -293,7 +332,6 @@ config PM_GENERIC_DOMAINS
config WQ_POWER_EFFICIENT_DEFAULT
bool "Enable workqueue power-efficient mode by default"
depends on PM
- default n
help
Per-cpu workqueues are generally preferred because they show
better performance thanks to cache locality; unfortunately,
@@ -322,15 +360,14 @@ config CPU_PM
bool
config ENERGY_MODEL
- bool "Energy Model for CPUs"
+ bool "Energy Model for devices with DVFS (CPUs, GPUs, etc)"
depends on SMP
depends on CPU_FREQ
- default n
help
Several subsystems (thermal and/or the task scheduler for example)
- can leverage information about the energy consumed by CPUs to make
- smarter decisions. This config option enables the framework from
- which subsystems can access the energy models.
+ can leverage information about the energy consumed by devices to
+ make smarter decisions. This config option enables the framework
+ from which subsystems can access the energy models.
The exact usage of the energy model is subsystem-dependent.
diff --git a/kernel/power/Makefile b/kernel/power/Makefile
index 5899260a8bef..874ad834dc8d 100644
--- a/kernel/power/Makefile
+++ b/kernel/power/Makefile
@@ -1,6 +1,10 @@
# SPDX-License-Identifier: GPL-2.0
-ccflags-$(CONFIG_PM_DEBUG) := -DDEBUG
+ifeq ($(CONFIG_DYNAMIC_DEBUG), y)
+CFLAGS_swap.o := -DDEBUG
+CFLAGS_snapshot.o := -DDEBUG
+CFLAGS_energy_model.o := -DDEBUG
+endif
KASAN_SANITIZE_snapshot.o := n
diff --git a/kernel/power/autosleep.c b/kernel/power/autosleep.c
index 9af5a50d3489..b29c8aca7486 100644
--- a/kernel/power/autosleep.c
+++ b/kernel/power/autosleep.c
@@ -54,7 +54,7 @@ static void try_to_suspend(struct work_struct *work)
goto out;
/*
- * If the wakeup occured for an unknown reason, wait to prevent the
+ * If the wakeup occurred for an unknown reason, wait to prevent the
* system from trying to suspend and waking up in a tight loop.
*/
if (final_count == initial_count)
diff --git a/kernel/power/energy_model.c b/kernel/power/energy_model.c
index 0a9326f5f421..9e1c9aa399ea 100644
--- a/kernel/power/energy_model.c
+++ b/kernel/power/energy_model.c
@@ -1,44 +1,101 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Energy Model of CPUs
+ * Energy Model of devices
*
- * Copyright (c) 2018, Arm ltd.
+ * Copyright (c) 2018-2021, Arm ltd.
* Written by: Quentin Perret, Arm ltd.
+ * Improvements provided by: Lukasz Luba, Arm ltd.
*/
#define pr_fmt(fmt) "energy_model: " fmt
#include <linux/cpu.h>
+#include <linux/cpufreq.h>
#include <linux/cpumask.h>
#include <linux/debugfs.h>
#include <linux/energy_model.h>
#include <linux/sched/topology.h>
#include <linux/slab.h>
-/* Mapping of each CPU to the performance domain to which it belongs. */
-static DEFINE_PER_CPU(struct em_perf_domain *, em_data);
-
/*
* Mutex serializing the registrations of performance domains and letting
* callbacks defined by drivers sleep.
*/
static DEFINE_MUTEX(em_pd_mutex);
+static void em_cpufreq_update_efficiencies(struct device *dev,
+ struct em_perf_state *table);
+static void em_check_capacity_update(void);
+static void em_update_workfn(struct work_struct *work);
+static DECLARE_DELAYED_WORK(em_update_work, em_update_workfn);
+
+static bool _is_cpu_device(struct device *dev)
+{
+ return (dev->bus == &cpu_subsys);
+}
+
#ifdef CONFIG_DEBUG_FS
static struct dentry *rootdir;
-static void em_debug_create_cs(struct em_cap_state *cs, struct dentry *pd)
+struct em_dbg_info {
+ struct em_perf_domain *pd;
+ int ps_id;
+};
+
+#define DEFINE_EM_DBG_SHOW(name, fname) \
+static int em_debug_##fname##_show(struct seq_file *s, void *unused) \
+{ \
+ struct em_dbg_info *em_dbg = s->private; \
+ struct em_perf_state *table; \
+ unsigned long val; \
+ \
+ rcu_read_lock(); \
+ table = em_perf_state_from_pd(em_dbg->pd); \
+ val = table[em_dbg->ps_id].name; \
+ rcu_read_unlock(); \
+ \
+ seq_printf(s, "%lu\n", val); \
+ return 0; \
+} \
+DEFINE_SHOW_ATTRIBUTE(em_debug_##fname)
+
+DEFINE_EM_DBG_SHOW(frequency, frequency);
+DEFINE_EM_DBG_SHOW(power, power);
+DEFINE_EM_DBG_SHOW(cost, cost);
+DEFINE_EM_DBG_SHOW(performance, performance);
+DEFINE_EM_DBG_SHOW(flags, inefficiency);
+
+static void em_debug_create_ps(struct em_perf_domain *em_pd,
+ struct em_dbg_info *em_dbg, int i,
+ struct dentry *pd)
{
+ struct em_perf_state *table;
+ unsigned long freq;
struct dentry *d;
char name[24];
- snprintf(name, sizeof(name), "cs:%lu", cs->frequency);
+ em_dbg[i].pd = em_pd;
+ em_dbg[i].ps_id = i;
+
+ rcu_read_lock();
+ table = em_perf_state_from_pd(em_pd);
+ freq = table[i].frequency;
+ rcu_read_unlock();
- /* Create per-cs directory */
+ snprintf(name, sizeof(name), "ps:%lu", freq);
+
+ /* Create per-ps directory */
d = debugfs_create_dir(name, pd);
- debugfs_create_ulong("frequency", 0444, d, &cs->frequency);
- debugfs_create_ulong("power", 0444, d, &cs->power);
- debugfs_create_ulong("cost", 0444, d, &cs->cost);
+ debugfs_create_file("frequency", 0444, d, &em_dbg[i],
+ &em_debug_frequency_fops);
+ debugfs_create_file("power", 0444, d, &em_dbg[i],
+ &em_debug_power_fops);
+ debugfs_create_file("cost", 0444, d, &em_dbg[i],
+ &em_debug_cost_fops);
+ debugfs_create_file("performance", 0444, d, &em_dbg[i],
+ &em_debug_performance_fops);
+ debugfs_create_file("inefficient", 0444, d, &em_dbg[i],
+ &em_debug_inefficiency_fops);
}
static int em_debug_cpus_show(struct seq_file *s, void *unused)
@@ -49,22 +106,46 @@ static int em_debug_cpus_show(struct seq_file *s, void *unused)
}
DEFINE_SHOW_ATTRIBUTE(em_debug_cpus);
-static void em_debug_create_pd(struct em_perf_domain *pd, int cpu)
+static int em_debug_flags_show(struct seq_file *s, void *unused)
+{
+ struct em_perf_domain *pd = s->private;
+
+ seq_printf(s, "%#lx\n", pd->flags);
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(em_debug_flags);
+
+static void em_debug_create_pd(struct device *dev)
{
+ struct em_dbg_info *em_dbg;
struct dentry *d;
- char name[8];
int i;
- snprintf(name, sizeof(name), "pd%d", cpu);
-
/* Create the directory of the performance domain */
- d = debugfs_create_dir(name, rootdir);
+ d = debugfs_create_dir(dev_name(dev), rootdir);
+
+ if (_is_cpu_device(dev))
+ debugfs_create_file("cpus", 0444, d, dev->em_pd->cpus,
+ &em_debug_cpus_fops);
+
+ debugfs_create_file("flags", 0444, d, dev->em_pd,
+ &em_debug_flags_fops);
+
+ em_dbg = devm_kcalloc(dev, dev->em_pd->nr_perf_states,
+ sizeof(*em_dbg), GFP_KERNEL);
+ if (!em_dbg)
+ return;
- debugfs_create_file("cpus", 0444, d, pd->cpus, &em_debug_cpus_fops);
+ /* Create a sub-directory for each performance state */
+ for (i = 0; i < dev->em_pd->nr_perf_states; i++)
+ em_debug_create_ps(dev->em_pd, em_dbg, i, d);
- /* Create a sub-directory for each capacity state */
- for (i = 0; i < pd->nr_cap_states; i++)
- em_debug_create_cs(&pd->table[i], d);
+}
+
+static void em_debug_remove_pd(struct device *dev)
+{
+ debugfs_lookup_and_remove(dev_name(dev), rootdir);
}
static int __init em_debug_init(void)
@@ -74,136 +155,415 @@ static int __init em_debug_init(void)
return 0;
}
-core_initcall(em_debug_init);
+fs_initcall(em_debug_init);
#else /* CONFIG_DEBUG_FS */
-static void em_debug_create_pd(struct em_perf_domain *pd, int cpu) {}
+static void em_debug_create_pd(struct device *dev) {}
+static void em_debug_remove_pd(struct device *dev) {}
#endif
-static struct em_perf_domain *em_create_pd(cpumask_t *span, int nr_states,
- struct em_data_callback *cb)
+
+static void em_destroy_table_rcu(struct rcu_head *rp)
{
- unsigned long opp_eff, prev_opp_eff = ULONG_MAX;
- unsigned long power, freq, prev_freq = 0;
- int i, ret, cpu = cpumask_first(span);
- struct em_cap_state *table;
- struct em_perf_domain *pd;
- u64 fmax;
+ struct em_perf_table __rcu *table;
- if (!cb->active_power)
- return NULL;
+ table = container_of(rp, struct em_perf_table, rcu);
+ kfree(table);
+}
- pd = kzalloc(sizeof(*pd) + cpumask_size(), GFP_KERNEL);
- if (!pd)
- return NULL;
+static void em_release_table_kref(struct kref *kref)
+{
+ struct em_perf_table __rcu *table;
+
+ /* It was the last owner of this table so we can free */
+ table = container_of(kref, struct em_perf_table, kref);
+
+ call_rcu(&table->rcu, em_destroy_table_rcu);
+}
+
+/**
+ * em_table_free() - Handles safe free of the EM table when needed
+ * @table : EM table which is going to be freed
+ *
+ * No return values.
+ */
+void em_table_free(struct em_perf_table __rcu *table)
+{
+ kref_put(&table->kref, em_release_table_kref);
+}
+
+/**
+ * em_table_alloc() - Allocate a new EM table
+ * @pd : EM performance domain for which this must be done
+ *
+ * Allocate a new EM table and initialize its kref to indicate that it
+ * has a user.
+ * Returns allocated table or NULL.
+ */
+struct em_perf_table __rcu *em_table_alloc(struct em_perf_domain *pd)
+{
+ struct em_perf_table __rcu *table;
+ int table_size;
+
+ table_size = sizeof(struct em_perf_state) * pd->nr_perf_states;
- table = kcalloc(nr_states, sizeof(*table), GFP_KERNEL);
+ table = kzalloc(sizeof(*table) + table_size, GFP_KERNEL);
if (!table)
- goto free_pd;
+ return NULL;
+
+ kref_init(&table->kref);
+
+ return table;
+}
+
+static void em_init_performance(struct device *dev, struct em_perf_domain *pd,
+ struct em_perf_state *table, int nr_states)
+{
+ u64 fmax, max_cap;
+ int i, cpu;
+
+ /* This is needed only for CPUs and EAS skip other devices */
+ if (!_is_cpu_device(dev))
+ return;
+
+ cpu = cpumask_first(em_span_cpus(pd));
+
+ /*
+ * Calculate the performance value for each frequency with
+ * linear relationship. The final CPU capacity might not be ready at
+ * boot time, but the EM will be updated a bit later with correct one.
+ */
+ fmax = (u64) table[nr_states - 1].frequency;
+ max_cap = (u64) arch_scale_cpu_capacity(cpu);
+ for (i = 0; i < nr_states; i++)
+ table[i].performance = div64_u64(max_cap * table[i].frequency,
+ fmax);
+}
+
+static int em_compute_costs(struct device *dev, struct em_perf_state *table,
+ struct em_data_callback *cb, int nr_states,
+ unsigned long flags)
+{
+ unsigned long prev_cost = ULONG_MAX;
+ int i, ret;
+
+ /* Compute the cost of each performance state. */
+ for (i = nr_states - 1; i >= 0; i--) {
+ unsigned long power_res, cost;
+
+ if ((flags & EM_PERF_DOMAIN_ARTIFICIAL) && cb->get_cost) {
+ ret = cb->get_cost(dev, table[i].frequency, &cost);
+ if (ret || !cost || cost > EM_MAX_POWER) {
+ dev_err(dev, "EM: invalid cost %lu %d\n",
+ cost, ret);
+ return -EINVAL;
+ }
+ } else {
+ /* increase resolution of 'cost' precision */
+ power_res = table[i].power * 10;
+ cost = power_res / table[i].performance;
+ }
+
+ table[i].cost = cost;
+
+ if (table[i].cost >= prev_cost) {
+ table[i].flags = EM_PERF_STATE_INEFFICIENT;
+ dev_dbg(dev, "EM: OPP:%lu is inefficient\n",
+ table[i].frequency);
+ } else {
+ prev_cost = table[i].cost;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * em_dev_compute_costs() - Calculate cost values for new runtime EM table
+ * @dev : Device for which the EM table is to be updated
+ * @table : The new EM table that is going to get the costs calculated
+ * @nr_states : Number of performance states
+ *
+ * Calculate the em_perf_state::cost values for new runtime EM table. The
+ * values are used for EAS during task placement. It also calculates and sets
+ * the efficiency flag for each performance state. When the function finish
+ * successfully the EM table is ready to be updated and used by EAS.
+ *
+ * Return 0 on success or a proper error in case of failure.
+ */
+int em_dev_compute_costs(struct device *dev, struct em_perf_state *table,
+ int nr_states)
+{
+ return em_compute_costs(dev, table, NULL, nr_states, 0);
+}
+
+/**
+ * em_dev_update_perf_domain() - Update runtime EM table for a device
+ * @dev : Device for which the EM is to be updated
+ * @new_table : The new EM table that is going to be used from now
+ *
+ * Update EM runtime modifiable table for the @dev using the provided @table.
+ *
+ * This function uses a mutex to serialize writers, so it must not be called
+ * from a non-sleeping context.
+ *
+ * Return 0 on success or an error code on failure.
+ */
+int em_dev_update_perf_domain(struct device *dev,
+ struct em_perf_table __rcu *new_table)
+{
+ struct em_perf_table __rcu *old_table;
+ struct em_perf_domain *pd;
+
+ if (!dev)
+ return -EINVAL;
+
+ /* Serialize update/unregister or concurrent updates */
+ mutex_lock(&em_pd_mutex);
+
+ if (!dev->em_pd) {
+ mutex_unlock(&em_pd_mutex);
+ return -EINVAL;
+ }
+ pd = dev->em_pd;
+
+ kref_get(&new_table->kref);
+
+ old_table = pd->em_table;
+ rcu_assign_pointer(pd->em_table, new_table);
+
+ em_cpufreq_update_efficiencies(dev, new_table->state);
+
+ em_table_free(old_table);
+
+ mutex_unlock(&em_pd_mutex);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(em_dev_update_perf_domain);
- /* Build the list of capacity states for this performance domain */
+static int em_create_perf_table(struct device *dev, struct em_perf_domain *pd,
+ struct em_perf_state *table,
+ struct em_data_callback *cb,
+ unsigned long flags)
+{
+ unsigned long power, freq, prev_freq = 0;
+ int nr_states = pd->nr_perf_states;
+ int i, ret;
+
+ /* Build the list of performance states for this performance domain */
for (i = 0, freq = 0; i < nr_states; i++, freq++) {
/*
* active_power() is a driver callback which ceils 'freq' to
- * lowest capacity state of 'cpu' above 'freq' and updates
+ * lowest performance state of 'dev' above 'freq' and updates
* 'power' and 'freq' accordingly.
*/
- ret = cb->active_power(&power, &freq, cpu);
+ ret = cb->active_power(dev, &power, &freq);
if (ret) {
- pr_err("pd%d: invalid cap. state: %d\n", cpu, ret);
- goto free_cs_table;
+ dev_err(dev, "EM: invalid perf. state: %d\n",
+ ret);
+ return -EINVAL;
}
/*
* We expect the driver callback to increase the frequency for
- * higher capacity states.
+ * higher performance states.
*/
if (freq <= prev_freq) {
- pr_err("pd%d: non-increasing freq: %lu\n", cpu, freq);
- goto free_cs_table;
+ dev_err(dev, "EM: non-increasing freq: %lu\n",
+ freq);
+ return -EINVAL;
}
/*
* The power returned by active_state() is expected to be
- * positive, in milli-watts and to fit into 16 bits.
+ * positive and be in range.
*/
- if (!power || power > EM_CPU_MAX_POWER) {
- pr_err("pd%d: invalid power: %lu\n", cpu, power);
- goto free_cs_table;
+ if (!power || power > EM_MAX_POWER) {
+ dev_err(dev, "EM: invalid power: %lu\n",
+ power);
+ return -EINVAL;
}
table[i].power = power;
table[i].frequency = prev_freq = freq;
-
- /*
- * The hertz/watts efficiency ratio should decrease as the
- * frequency grows on sane platforms. But this isn't always
- * true in practice so warn the user if a higher OPP is more
- * power efficient than a lower one.
- */
- opp_eff = freq / power;
- if (opp_eff >= prev_opp_eff)
- pr_warn("pd%d: hertz/watts ratio non-monotonically decreasing: em_cap_state %d >= em_cap_state%d\n",
- cpu, i, i - 1);
- prev_opp_eff = opp_eff;
}
- /* Compute the cost of each capacity_state. */
- fmax = (u64) table[nr_states - 1].frequency;
- for (i = 0; i < nr_states; i++) {
- table[i].cost = div64_u64(fmax * table[i].power,
- table[i].frequency);
+ em_init_performance(dev, pd, table, nr_states);
+
+ ret = em_compute_costs(dev, table, cb, nr_states, flags);
+ if (ret)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int em_create_pd(struct device *dev, int nr_states,
+ struct em_data_callback *cb, cpumask_t *cpus,
+ unsigned long flags)
+{
+ struct em_perf_table __rcu *em_table;
+ struct em_perf_domain *pd;
+ struct device *cpu_dev;
+ int cpu, ret, num_cpus;
+
+ if (_is_cpu_device(dev)) {
+ num_cpus = cpumask_weight(cpus);
+
+ /* Prevent max possible energy calculation to not overflow */
+ if (num_cpus > EM_MAX_NUM_CPUS) {
+ dev_err(dev, "EM: too many CPUs, overflow possible\n");
+ return -EINVAL;
+ }
+
+ pd = kzalloc(sizeof(*pd) + cpumask_size(), GFP_KERNEL);
+ if (!pd)
+ return -ENOMEM;
+
+ cpumask_copy(em_span_cpus(pd), cpus);
+ } else {
+ pd = kzalloc(sizeof(*pd), GFP_KERNEL);
+ if (!pd)
+ return -ENOMEM;
}
- pd->table = table;
- pd->nr_cap_states = nr_states;
- cpumask_copy(to_cpumask(pd->cpus), span);
+ pd->nr_perf_states = nr_states;
+
+ em_table = em_table_alloc(pd);
+ if (!em_table)
+ goto free_pd;
- em_debug_create_pd(pd, cpu);
+ ret = em_create_perf_table(dev, pd, em_table->state, cb, flags);
+ if (ret)
+ goto free_pd_table;
- return pd;
+ rcu_assign_pointer(pd->em_table, em_table);
-free_cs_table:
- kfree(table);
+ if (_is_cpu_device(dev))
+ for_each_cpu(cpu, cpus) {
+ cpu_dev = get_cpu_device(cpu);
+ cpu_dev->em_pd = pd;
+ }
+
+ dev->em_pd = pd;
+
+ return 0;
+
+free_pd_table:
+ kfree(em_table);
free_pd:
kfree(pd);
+ return -EINVAL;
+}
+
+static void
+em_cpufreq_update_efficiencies(struct device *dev, struct em_perf_state *table)
+{
+ struct em_perf_domain *pd = dev->em_pd;
+ struct cpufreq_policy *policy;
+ int found = 0;
+ int i, cpu;
+
+ if (!_is_cpu_device(dev))
+ return;
+
+ /* Try to get a CPU which is active and in this PD */
+ cpu = cpumask_first_and(em_span_cpus(pd), cpu_active_mask);
+ if (cpu >= nr_cpu_ids) {
+ dev_warn(dev, "EM: No online CPU for CPUFreq policy\n");
+ return;
+ }
+
+ policy = cpufreq_cpu_get(cpu);
+ if (!policy) {
+ dev_warn(dev, "EM: Access to CPUFreq policy failed\n");
+ return;
+ }
+
+ for (i = 0; i < pd->nr_perf_states; i++) {
+ if (!(table[i].flags & EM_PERF_STATE_INEFFICIENT))
+ continue;
+
+ if (!cpufreq_table_set_inefficient(policy, table[i].frequency))
+ found++;
+ }
+
+ cpufreq_cpu_put(policy);
+
+ if (!found)
+ return;
+
+ /*
+ * Efficiencies have been installed in CPUFreq, inefficient frequencies
+ * will be skipped. The EM can do the same.
+ */
+ pd->flags |= EM_PERF_DOMAIN_SKIP_INEFFICIENCIES;
+}
+
+/**
+ * em_pd_get() - Return the performance domain for a device
+ * @dev : Device to find the performance domain for
+ *
+ * Returns the performance domain to which @dev belongs, or NULL if it doesn't
+ * exist.
+ */
+struct em_perf_domain *em_pd_get(struct device *dev)
+{
+ if (IS_ERR_OR_NULL(dev))
+ return NULL;
- return NULL;
+ return dev->em_pd;
}
+EXPORT_SYMBOL_GPL(em_pd_get);
/**
* em_cpu_get() - Return the performance domain for a CPU
* @cpu : CPU to find the performance domain for
*
- * Return: the performance domain to which 'cpu' belongs, or NULL if it doesn't
+ * Returns the performance domain to which @cpu belongs, or NULL if it doesn't
* exist.
*/
struct em_perf_domain *em_cpu_get(int cpu)
{
- return READ_ONCE(per_cpu(em_data, cpu));
+ struct device *cpu_dev;
+
+ cpu_dev = get_cpu_device(cpu);
+ if (!cpu_dev)
+ return NULL;
+
+ return em_pd_get(cpu_dev);
}
EXPORT_SYMBOL_GPL(em_cpu_get);
/**
- * em_register_perf_domain() - Register the Energy Model of a performance domain
- * @span : Mask of CPUs in the performance domain
- * @nr_states : Number of capacity states to register
+ * em_dev_register_perf_domain() - Register the Energy Model (EM) for a device
+ * @dev : Device for which the EM is to register
+ * @nr_states : Number of performance states to register
* @cb : Callback functions providing the data of the Energy Model
+ * @cpus : Pointer to cpumask_t, which in case of a CPU device is
+ * obligatory. It can be taken from i.e. 'policy->cpus'. For other
+ * type of devices this should be set to NULL.
+ * @microwatts : Flag indicating that the power values are in micro-Watts or
+ * in some other scale. It must be set properly.
*
* Create Energy Model tables for a performance domain using the callbacks
* defined in cb.
*
+ * The @microwatts is important to set with correct value. Some kernel
+ * sub-systems might rely on this flag and check if all devices in the EM are
+ * using the same scale.
+ *
* If multiple clients register the same performance domain, all but the first
* registration will be ignored.
*
* Return 0 on success
*/
-int em_register_perf_domain(cpumask_t *span, unsigned int nr_states,
- struct em_data_callback *cb)
+int em_dev_register_perf_domain(struct device *dev, unsigned int nr_states,
+ struct em_data_callback *cb, cpumask_t *cpus,
+ bool microwatts)
{
unsigned long cap, prev_cap = 0;
- struct em_perf_domain *pd;
- int cpu, ret = 0;
+ unsigned long flags = 0;
+ int cpu, ret;
- if (!span || !nr_states || !cb)
+ if (!dev || !nr_states || !cb)
return -EINVAL;
/*
@@ -212,47 +572,219 @@ int em_register_perf_domain(cpumask_t *span, unsigned int nr_states,
*/
mutex_lock(&em_pd_mutex);
- for_each_cpu(cpu, span) {
- /* Make sure we don't register again an existing domain. */
- if (READ_ONCE(per_cpu(em_data, cpu))) {
- ret = -EEXIST;
- goto unlock;
- }
+ if (dev->em_pd) {
+ ret = -EEXIST;
+ goto unlock;
+ }
- /*
- * All CPUs of a domain must have the same micro-architecture
- * since they all share the same table.
- */
- cap = arch_scale_cpu_capacity(cpu);
- if (prev_cap && prev_cap != cap) {
- pr_err("CPUs of %*pbl must have the same capacity\n",
- cpumask_pr_args(span));
+ if (_is_cpu_device(dev)) {
+ if (!cpus) {
+ dev_err(dev, "EM: invalid CPU mask\n");
ret = -EINVAL;
goto unlock;
}
- prev_cap = cap;
+
+ for_each_cpu(cpu, cpus) {
+ if (em_cpu_get(cpu)) {
+ dev_err(dev, "EM: exists for CPU%d\n", cpu);
+ ret = -EEXIST;
+ goto unlock;
+ }
+ /*
+ * All CPUs of a domain must have the same
+ * micro-architecture since they all share the same
+ * table.
+ */
+ cap = arch_scale_cpu_capacity(cpu);
+ if (prev_cap && prev_cap != cap) {
+ dev_err(dev, "EM: CPUs of %*pbl must have the same capacity\n",
+ cpumask_pr_args(cpus));
+
+ ret = -EINVAL;
+ goto unlock;
+ }
+ prev_cap = cap;
+ }
}
- /* Create the performance domain and add it to the Energy Model. */
- pd = em_create_pd(span, nr_states, cb);
- if (!pd) {
+ if (microwatts)
+ flags |= EM_PERF_DOMAIN_MICROWATTS;
+ else if (cb->get_cost)
+ flags |= EM_PERF_DOMAIN_ARTIFICIAL;
+
+ /*
+ * EM only supports uW (exception is artificial EM).
+ * Therefore, check and force the drivers to provide
+ * power in uW.
+ */
+ if (!microwatts && !(flags & EM_PERF_DOMAIN_ARTIFICIAL)) {
+ dev_err(dev, "EM: only supports uW power values\n");
ret = -EINVAL;
goto unlock;
}
- for_each_cpu(cpu, span) {
- /*
- * The per-cpu array can be read concurrently from em_cpu_get().
- * The barrier enforces the ordering needed to make sure readers
- * can only access well formed em_perf_domain structs.
- */
- smp_store_release(per_cpu_ptr(&em_data, cpu), pd);
- }
+ ret = em_create_pd(dev, nr_states, cb, cpus, flags);
+ if (ret)
+ goto unlock;
+
+ dev->em_pd->flags |= flags;
+
+ em_cpufreq_update_efficiencies(dev, dev->em_pd->em_table->state);
+
+ em_debug_create_pd(dev);
+ dev_info(dev, "EM: created perf domain\n");
- pr_debug("Created perf domain %*pbl\n", cpumask_pr_args(span));
unlock:
mutex_unlock(&em_pd_mutex);
+ if (_is_cpu_device(dev))
+ em_check_capacity_update();
+
return ret;
}
-EXPORT_SYMBOL_GPL(em_register_perf_domain);
+EXPORT_SYMBOL_GPL(em_dev_register_perf_domain);
+
+/**
+ * em_dev_unregister_perf_domain() - Unregister Energy Model (EM) for a device
+ * @dev : Device for which the EM is registered
+ *
+ * Unregister the EM for the specified @dev (but not a CPU device).
+ */
+void em_dev_unregister_perf_domain(struct device *dev)
+{
+ if (IS_ERR_OR_NULL(dev) || !dev->em_pd)
+ return;
+
+ if (_is_cpu_device(dev))
+ return;
+
+ /*
+ * The mutex separates all register/unregister requests and protects
+ * from potential clean-up/setup issues in the debugfs directories.
+ * The debugfs directory name is the same as device's name.
+ */
+ mutex_lock(&em_pd_mutex);
+ em_debug_remove_pd(dev);
+
+ em_table_free(dev->em_pd->em_table);
+
+ kfree(dev->em_pd);
+ dev->em_pd = NULL;
+ mutex_unlock(&em_pd_mutex);
+}
+EXPORT_SYMBOL_GPL(em_dev_unregister_perf_domain);
+
+/*
+ * Adjustment of CPU performance values after boot, when all CPUs capacites
+ * are correctly calculated.
+ */
+static void em_adjust_new_capacity(struct device *dev,
+ struct em_perf_domain *pd,
+ u64 max_cap)
+{
+ struct em_perf_table __rcu *em_table;
+ struct em_perf_state *ps, *new_ps;
+ int ret, ps_size;
+
+ em_table = em_table_alloc(pd);
+ if (!em_table) {
+ dev_warn(dev, "EM: allocation failed\n");
+ return;
+ }
+
+ new_ps = em_table->state;
+
+ rcu_read_lock();
+ ps = em_perf_state_from_pd(pd);
+ /* Initialize data based on old table */
+ ps_size = sizeof(struct em_perf_state) * pd->nr_perf_states;
+ memcpy(new_ps, ps, ps_size);
+
+ rcu_read_unlock();
+
+ em_init_performance(dev, pd, new_ps, pd->nr_perf_states);
+ ret = em_compute_costs(dev, new_ps, NULL, pd->nr_perf_states,
+ pd->flags);
+ if (ret) {
+ dev_warn(dev, "EM: compute costs failed\n");
+ return;
+ }
+
+ ret = em_dev_update_perf_domain(dev, em_table);
+ if (ret)
+ dev_warn(dev, "EM: update failed %d\n", ret);
+
+ /*
+ * This is one-time-update, so give up the ownership in this updater.
+ * The EM framework has incremented the usage counter and from now
+ * will keep the reference (then free the memory when needed).
+ */
+ em_table_free(em_table);
+}
+
+static void em_check_capacity_update(void)
+{
+ cpumask_var_t cpu_done_mask;
+ struct em_perf_state *table;
+ struct em_perf_domain *pd;
+ unsigned long cpu_capacity;
+ int cpu;
+
+ if (!zalloc_cpumask_var(&cpu_done_mask, GFP_KERNEL)) {
+ pr_warn("no free memory\n");
+ return;
+ }
+
+ /* Check if CPUs capacity has changed than update EM */
+ for_each_possible_cpu(cpu) {
+ struct cpufreq_policy *policy;
+ unsigned long em_max_perf;
+ struct device *dev;
+
+ if (cpumask_test_cpu(cpu, cpu_done_mask))
+ continue;
+
+ policy = cpufreq_cpu_get(cpu);
+ if (!policy) {
+ pr_debug("Accessing cpu%d policy failed\n", cpu);
+ schedule_delayed_work(&em_update_work,
+ msecs_to_jiffies(1000));
+ break;
+ }
+ cpufreq_cpu_put(policy);
+
+ pd = em_cpu_get(cpu);
+ if (!pd || em_is_artificial(pd))
+ continue;
+
+ cpumask_or(cpu_done_mask, cpu_done_mask,
+ em_span_cpus(pd));
+
+ cpu_capacity = arch_scale_cpu_capacity(cpu);
+
+ rcu_read_lock();
+ table = em_perf_state_from_pd(pd);
+ em_max_perf = table[pd->nr_perf_states - 1].performance;
+ rcu_read_unlock();
+
+ /*
+ * Check if the CPU capacity has been adjusted during boot
+ * and trigger the update for new performance values.
+ */
+ if (em_max_perf == cpu_capacity)
+ continue;
+
+ pr_debug("updating cpu%d cpu_cap=%lu old capacity=%lu\n",
+ cpu, cpu_capacity, em_max_perf);
+
+ dev = get_cpu_device(cpu);
+ em_adjust_new_capacity(dev, pd, cpu_capacity);
+ }
+
+ free_cpumask_var(cpu_done_mask);
+}
+
+static void em_update_workfn(struct work_struct *work)
+{
+ em_check_capacity_update();
+}
diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c
index 02ec716a4927..43b1a82e800c 100644
--- a/kernel/power/hibernate.c
+++ b/kernel/power/hibernate.c
@@ -11,6 +11,7 @@
#define pr_fmt(fmt) "PM: hibernation: " fmt
+#include <linux/blkdev.h>
#include <linux/export.h>
#include <linux/suspend.h>
#include <linux/reboot.h>
@@ -28,9 +29,9 @@
#include <linux/gfp.h>
#include <linux/syscore_ops.h>
#include <linux/ctype.h>
-#include <linux/genhd.h>
#include <linux/ktime.h>
#include <linux/security.h>
+#include <linux/secretmem.h>
#include <trace/events/power.h>
#include "power.h"
@@ -46,6 +47,15 @@ dev_t swsusp_resume_device;
sector_t swsusp_resume_block;
__visible int in_suspend __nosavedata;
+static char hibernate_compressor[CRYPTO_MAX_ALG_NAME] = CONFIG_HIBERNATION_DEF_COMP;
+
+/*
+ * Compression/decompression algorithm to be used while saving/loading
+ * image to/from disk. This would later be used in 'kernel/power/swap.c'
+ * to allocate comp streams.
+ */
+char hib_comp_algo[CRYPTO_MAX_ALG_NAME];
+
enum {
HIBERNATION_INVALID,
HIBERNATION_PLATFORM,
@@ -81,7 +91,9 @@ void hibernate_release(void)
bool hibernation_available(void)
{
- return nohibernate == 0 && !security_locked_down(LOCKDOWN_HIBERNATION);
+ return nohibernate == 0 &&
+ !security_locked_down(LOCKDOWN_HIBERNATION) &&
+ !secretmem_active() && !cxl_mem_active();
}
/**
@@ -90,20 +102,24 @@ bool hibernation_available(void)
*/
void hibernation_set_ops(const struct platform_hibernation_ops *ops)
{
+ unsigned int sleep_flags;
+
if (ops && !(ops->begin && ops->end && ops->pre_snapshot
&& ops->prepare && ops->finish && ops->enter && ops->pre_restore
&& ops->restore_cleanup && ops->leave)) {
WARN_ON(1);
return;
}
- lock_system_sleep();
+
+ sleep_flags = lock_system_sleep();
+
hibernation_ops = ops;
if (ops)
hibernation_mode = HIBERNATION_PLATFORM;
else if (hibernation_mode == HIBERNATION_PLATFORM)
hibernation_mode = HIBERNATION_SHUTDOWN;
- unlock_system_sleep();
+ unlock_system_sleep(sleep_flags);
}
EXPORT_SYMBOL_GPL(hibernation_set_ops);
@@ -297,7 +313,7 @@ static int create_image(int platform_mode)
if (error || hibernation_test(TEST_PLATFORM))
goto Platform_finish;
- error = suspend_disable_secondary_cpus();
+ error = pm_sleep_disable_secondary_cpus();
if (error || hibernation_test(TEST_CPUS))
goto Enable_cpus;
@@ -326,7 +342,7 @@ static int create_image(int platform_mode)
if (!in_suspend) {
events_check_enabled = false;
- clear_free_pages();
+ clear_or_poison_free_pages();
}
platform_leave(platform_mode);
@@ -339,7 +355,7 @@ static int create_image(int platform_mode)
local_irq_enable();
Enable_cpus:
- suspend_enable_secondary_cpus();
+ pm_sleep_enable_secondary_cpus();
/* Allow architectures to do nosmt-specific post-resume dances */
if (!in_suspend)
@@ -463,6 +479,8 @@ static int resume_target_kernel(bool platform_mode)
if (error)
goto Cleanup;
+ cpuidle_pause();
+
error = hibernate_resume_nonboot_cpu_disable();
if (error)
goto Enable_cpus;
@@ -506,7 +524,7 @@ static int resume_target_kernel(bool platform_mode)
local_irq_enable();
Enable_cpus:
- suspend_enable_secondary_cpus();
+ pm_sleep_enable_secondary_cpus();
Cleanup:
platform_restore_cleanup(platform_mode);
@@ -584,7 +602,7 @@ int hibernation_platform_enter(void)
if (error)
goto Platform_finish;
- error = suspend_disable_secondary_cpus();
+ error = pm_sleep_disable_secondary_cpus();
if (error)
goto Enable_cpus;
@@ -606,7 +624,7 @@ int hibernation_platform_enter(void)
local_irq_enable();
Enable_cpus:
- suspend_enable_secondary_cpus();
+ pm_sleep_enable_secondary_cpus();
Platform_finish:
hibernation_ops->finish();
@@ -633,11 +651,11 @@ int hibernation_platform_enter(void)
*/
static void power_down(void)
{
-#ifdef CONFIG_SUSPEND
int error;
+#ifdef CONFIG_SUSPEND
if (hibernation_mode == HIBERNATION_SUSPEND) {
- error = suspend_devices_and_enter(PM_SUSPEND_MEM);
+ error = suspend_devices_and_enter(mem_sleep_current);
if (error) {
hibernation_mode = hibernation_ops ?
HIBERNATION_PLATFORM :
@@ -658,10 +676,16 @@ static void power_down(void)
kernel_restart(NULL);
break;
case HIBERNATION_PLATFORM:
- hibernation_platform_enter();
- /* Fall through */
+ error = hibernation_platform_enter();
+ if (error == -EAGAIN || error == -EBUSY) {
+ swsusp_unmark();
+ events_check_enabled = false;
+ pr_info("Wakeup event detected during hibernation, rolling back.\n");
+ return;
+ }
+ fallthrough;
case HIBERNATION_SHUTDOWN:
- if (pm_power_off)
+ if (kernel_can_power_off())
kernel_power_off();
break;
}
@@ -684,11 +708,13 @@ static int load_image_and_restore(void)
lock_device_hotplug();
error = create_basic_memory_bitmaps();
- if (error)
+ if (error) {
+ swsusp_close();
goto Unlock;
+ }
error = swsusp_read(&flags);
- swsusp_close(FMODE_READ);
+ swsusp_close();
if (!error)
error = hibernation_restore(flags & SF_PLATFORM_MODE);
@@ -701,20 +727,35 @@ static int load_image_and_restore(void)
return error;
}
+#define COMPRESSION_ALGO_LZO "lzo"
+#define COMPRESSION_ALGO_LZ4 "lz4"
+
/**
* hibernate - Carry out system hibernation, including saving the image.
*/
int hibernate(void)
{
- int error, nr_calls = 0;
bool snapshot_test = false;
+ unsigned int sleep_flags;
+ int error;
if (!hibernation_available()) {
pm_pr_dbg("Hibernation not available.\n");
return -EPERM;
}
- lock_system_sleep();
+ /*
+ * Query for the compression algorithm support if compression is enabled.
+ */
+ if (!nocompress) {
+ strscpy(hib_comp_algo, hibernate_compressor, sizeof(hib_comp_algo));
+ if (crypto_has_comp(hib_comp_algo, 0, 0) != 1) {
+ pr_err("%s compression is not available\n", hib_comp_algo);
+ return -EOPNOTSUPP;
+ }
+ }
+
+ sleep_flags = lock_system_sleep();
/* The snapshot device should not be opened while we're running */
if (!hibernate_acquire()) {
error = -EBUSY;
@@ -723,11 +764,9 @@ int hibernate(void)
pr_info("hibernation entry\n");
pm_prepare_console();
- error = __pm_notifier_call_chain(PM_HIBERNATION_PREPARE, -1, &nr_calls);
- if (error) {
- nr_calls--;
- goto Exit;
- }
+ error = pm_notifier_call_chain_robust(PM_HIBERNATION_PREPARE, PM_POST_HIBERNATION);
+ if (error)
+ goto Restore;
ksys_sync_helper();
@@ -750,11 +789,24 @@ int hibernate(void)
if (hibernation_mode == HIBERNATION_PLATFORM)
flags |= SF_PLATFORM_MODE;
- if (nocompress)
+ if (nocompress) {
flags |= SF_NOCOMPRESS_MODE;
- else
+ } else {
flags |= SF_CRC32_MODE;
+ /*
+ * By default, LZO compression is enabled. Use SF_COMPRESSION_ALG_LZ4
+ * to override this behaviour and use LZ4.
+ *
+ * Refer kernel/power/power.h for more details
+ */
+
+ if (!strcmp(hib_comp_algo, COMPRESSION_ALGO_LZ4))
+ flags |= SF_COMPRESSION_ALG_LZ4;
+ else
+ flags |= SF_COMPRESSION_ALG_LZO;
+ }
+
pm_pr_dbg("Writing hibernation image.\n");
error = swsusp_write(flags);
swsusp_free();
@@ -776,7 +828,7 @@ int hibernate(void)
unlock_device_hotplug();
if (snapshot_test) {
pm_pr_dbg("Checking hibernation image\n");
- error = swsusp_check();
+ error = swsusp_check(false);
if (!error)
error = load_image_and_restore();
}
@@ -785,61 +837,119 @@ int hibernate(void)
/* Don't bother checking whether freezer_test_done is true */
freezer_test_done = false;
Exit:
- __pm_notifier_call_chain(PM_POST_HIBERNATION, nr_calls, NULL);
+ pm_notifier_call_chain(PM_POST_HIBERNATION);
+ Restore:
pm_restore_console();
hibernate_release();
Unlock:
- unlock_system_sleep();
+ unlock_system_sleep(sleep_flags);
pr_info("hibernation exit\n");
return error;
}
-
/**
- * software_resume - Resume from a saved hibernation image.
- *
- * This routine is called as a late initcall, when all devices have been
- * discovered and initialized already.
+ * hibernate_quiet_exec - Execute a function with all devices frozen.
+ * @func: Function to execute.
+ * @data: Data pointer to pass to @func.
*
- * The image reading code is called to see if there is a hibernation image
- * available for reading. If that is the case, devices are quiesced and the
- * contents of memory is restored from the saved image.
- *
- * If this is successful, control reappears in the restored target kernel in
- * hibernation_snapshot() which returns to hibernate(). Otherwise, the routine
- * attempts to recover gracefully and make the kernel return to the normal mode
- * of operation.
+ * Return the @func return value or an error code if it cannot be executed.
*/
-static int software_resume(void)
+int hibernate_quiet_exec(int (*func)(void *data), void *data)
{
- int error, nr_calls = 0;
+ unsigned int sleep_flags;
+ int error;
- /*
- * If the user said "noresume".. bail out early.
- */
- if (noresume || !hibernation_available())
- return 0;
+ sleep_flags = lock_system_sleep();
- /*
- * name_to_dev_t() below takes a sysfs buffer mutex when sysfs
- * is configured into the kernel. Since the regular hibernate
- * trigger path is via sysfs which takes a buffer mutex before
- * calling hibernate functions (which take system_transition_mutex)
- * this can cause lockdep to complain about a possible ABBA deadlock
- * which cannot happen since we're in the boot code here and
- * sysfs can't be invoked yet. Therefore, we use a subclass
- * here to avoid lockdep complaining.
- */
- mutex_lock_nested(&system_transition_mutex, SINGLE_DEPTH_NESTING);
+ if (!hibernate_acquire()) {
+ error = -EBUSY;
+ goto unlock;
+ }
- if (swsusp_resume_device)
- goto Check_image;
+ pm_prepare_console();
- if (!strlen(resume_file)) {
- error = -ENOENT;
- goto Unlock;
- }
+ error = pm_notifier_call_chain_robust(PM_HIBERNATION_PREPARE, PM_POST_HIBERNATION);
+ if (error)
+ goto restore;
+
+ error = freeze_processes();
+ if (error)
+ goto exit;
+
+ lock_device_hotplug();
+
+ pm_suspend_clear_flags();
+
+ error = platform_begin(true);
+ if (error)
+ goto thaw;
+
+ error = freeze_kernel_threads();
+ if (error)
+ goto thaw;
+
+ error = dpm_prepare(PMSG_FREEZE);
+ if (error)
+ goto dpm_complete;
+
+ suspend_console();
+
+ error = dpm_suspend(PMSG_FREEZE);
+ if (error)
+ goto dpm_resume;
+
+ error = dpm_suspend_end(PMSG_FREEZE);
+ if (error)
+ goto dpm_resume;
+
+ error = platform_pre_snapshot(true);
+ if (error)
+ goto skip;
+
+ error = func(data);
+
+skip:
+ platform_finish(true);
+
+ dpm_resume_start(PMSG_THAW);
+
+dpm_resume:
+ dpm_resume(PMSG_THAW);
+
+ resume_console();
+
+dpm_complete:
+ dpm_complete(PMSG_THAW);
+
+ thaw_kernel_threads();
+
+thaw:
+ platform_end(true);
+
+ unlock_device_hotplug();
+
+ thaw_processes();
+
+exit:
+ pm_notifier_call_chain(PM_POST_HIBERNATION);
+
+restore:
+ pm_restore_console();
+
+ hibernate_release();
+
+unlock:
+ unlock_system_sleep(sleep_flags);
+
+ return error;
+}
+EXPORT_SYMBOL_GPL(hibernate_quiet_exec);
+
+static int __init find_resume_device(void)
+{
+ if (!strlen(resume_file))
+ return -ENOENT;
pm_pr_dbg("Checking hibernation image partition %s\n", resume_file);
@@ -850,61 +960,65 @@ static int software_resume(void)
}
/* Check if the device is there */
- swsusp_resume_device = name_to_dev_t(resume_file);
+ if (!early_lookup_bdev(resume_file, &swsusp_resume_device))
+ return 0;
/*
- * name_to_dev_t is ineffective to verify parition if resume_file is in
- * integer format. (e.g. major:minor)
+ * Some device discovery might still be in progress; we need to wait for
+ * this to finish.
*/
- if (isdigit(resume_file[0]) && resume_wait) {
- int partno;
- while (!get_gendisk(swsusp_resume_device, &partno))
+ wait_for_device_probe();
+ if (resume_wait) {
+ while (early_lookup_bdev(resume_file, &swsusp_resume_device))
msleep(10);
+ async_synchronize_full();
}
- if (!swsusp_resume_device) {
- /*
- * Some device discovery might still be in progress; we need
- * to wait for this to finish.
- */
- wait_for_device_probe();
-
- if (resume_wait) {
- while ((swsusp_resume_device = name_to_dev_t(resume_file)) == 0)
- msleep(10);
- async_synchronize_full();
- }
+ return early_lookup_bdev(resume_file, &swsusp_resume_device);
+}
- swsusp_resume_device = name_to_dev_t(resume_file);
- if (!swsusp_resume_device) {
- error = -ENODEV;
- goto Unlock;
- }
- }
+static int software_resume(void)
+{
+ int error;
- Check_image:
pm_pr_dbg("Hibernation image partition %d:%d present\n",
MAJOR(swsusp_resume_device), MINOR(swsusp_resume_device));
pm_pr_dbg("Looking for hibernation image.\n");
- error = swsusp_check();
+
+ mutex_lock(&system_transition_mutex);
+ error = swsusp_check(true);
if (error)
goto Unlock;
+ /*
+ * Check if the hibernation image is compressed. If so, query for
+ * the algorithm support.
+ */
+ if (!(swsusp_header_flags & SF_NOCOMPRESS_MODE)) {
+ if (swsusp_header_flags & SF_COMPRESSION_ALG_LZ4)
+ strscpy(hib_comp_algo, COMPRESSION_ALGO_LZ4, sizeof(hib_comp_algo));
+ else
+ strscpy(hib_comp_algo, COMPRESSION_ALGO_LZO, sizeof(hib_comp_algo));
+ if (crypto_has_comp(hib_comp_algo, 0, 0) != 1) {
+ pr_err("%s compression is not available\n", hib_comp_algo);
+ error = -EOPNOTSUPP;
+ goto Unlock;
+ }
+ }
+
/* The snapshot device should not be opened while we're running */
if (!hibernate_acquire()) {
error = -EBUSY;
- swsusp_close(FMODE_READ);
+ swsusp_close();
goto Unlock;
}
pr_info("resume from hibernation\n");
pm_prepare_console();
- error = __pm_notifier_call_chain(PM_RESTORE_PREPARE, -1, &nr_calls);
- if (error) {
- nr_calls--;
- goto Close_Finish;
- }
+ error = pm_notifier_call_chain_robust(PM_RESTORE_PREPARE, PM_POST_RESTORE);
+ if (error)
+ goto Restore;
pm_pr_dbg("Preparing processes for hibernation restore.\n");
error = freeze_processes();
@@ -920,7 +1034,8 @@ static int software_resume(void)
error = load_image_and_restore();
thaw_processes();
Finish:
- __pm_notifier_call_chain(PM_POST_RESTORE, nr_calls, NULL);
+ pm_notifier_call_chain(PM_POST_RESTORE);
+ Restore:
pm_restore_console();
pr_info("resume failed (%d)\n", error);
hibernate_release();
@@ -930,11 +1045,43 @@ static int software_resume(void)
pm_pr_dbg("Hibernation image not present or could not be loaded.\n");
return error;
Close_Finish:
- swsusp_close(FMODE_READ);
+ swsusp_close();
goto Finish;
}
-late_initcall_sync(software_resume);
+/**
+ * software_resume_initcall - Resume from a saved hibernation image.
+ *
+ * This routine is called as a late initcall, when all devices have been
+ * discovered and initialized already.
+ *
+ * The image reading code is called to see if there is a hibernation image
+ * available for reading. If that is the case, devices are quiesced and the
+ * contents of memory is restored from the saved image.
+ *
+ * If this is successful, control reappears in the restored target kernel in
+ * hibernation_snapshot() which returns to hibernate(). Otherwise, the routine
+ * attempts to recover gracefully and make the kernel return to the normal mode
+ * of operation.
+ */
+static int __init software_resume_initcall(void)
+{
+ /*
+ * If the user said "noresume".. bail out early.
+ */
+ if (noresume || !hibernation_available())
+ return 0;
+
+ if (!swsusp_resume_device) {
+ int error = find_resume_device();
+
+ if (error)
+ return error;
+ }
+
+ return software_resume();
+}
+late_initcall_sync(software_resume_initcall);
static const char * const hibernation_modes[] = {
@@ -1011,11 +1158,12 @@ static ssize_t disk_show(struct kobject *kobj, struct kobj_attribute *attr,
static ssize_t disk_store(struct kobject *kobj, struct kobj_attribute *attr,
const char *buf, size_t n)
{
+ int mode = HIBERNATION_INVALID;
+ unsigned int sleep_flags;
int error = 0;
- int i;
int len;
char *p;
- int mode = HIBERNATION_INVALID;
+ int i;
if (!hibernation_available())
return -EPERM;
@@ -1023,7 +1171,7 @@ static ssize_t disk_store(struct kobject *kobj, struct kobj_attribute *attr,
p = memchr(buf, '\n', n);
len = p ? p - buf : n;
- lock_system_sleep();
+ sleep_flags = lock_system_sleep();
for (i = HIBERNATION_FIRST; i <= HIBERNATION_MAX; i++) {
if (len == strlen(hibernation_modes[i])
&& !strncmp(buf, hibernation_modes[i], len)) {
@@ -1053,7 +1201,7 @@ static ssize_t disk_store(struct kobject *kobj, struct kobj_attribute *attr,
if (!error)
pm_pr_dbg("Hibernation mode set to '%s'\n",
hibernation_modes[mode]);
- unlock_system_sleep();
+ unlock_system_sleep(sleep_flags);
return error ? error : n;
}
@@ -1062,16 +1210,21 @@ power_attr(disk);
static ssize_t resume_show(struct kobject *kobj, struct kobj_attribute *attr,
char *buf)
{
- return sprintf(buf,"%d:%d\n", MAJOR(swsusp_resume_device),
+ return sprintf(buf, "%d:%d\n", MAJOR(swsusp_resume_device),
MINOR(swsusp_resume_device));
}
static ssize_t resume_store(struct kobject *kobj, struct kobj_attribute *attr,
const char *buf, size_t n)
{
- dev_t res;
+ unsigned int sleep_flags;
int len = n;
char *name;
+ dev_t dev;
+ int error;
+
+ if (!hibernation_available())
+ return n;
if (len && buf[len-1] == '\n')
len--;
@@ -1079,14 +1232,32 @@ static ssize_t resume_store(struct kobject *kobj, struct kobj_attribute *attr,
if (!name)
return -ENOMEM;
- res = name_to_dev_t(name);
+ error = lookup_bdev(name, &dev);
+ if (error) {
+ unsigned maj, min, offset;
+ char *p, dummy;
+
+ error = 0;
+ if (sscanf(name, "%u:%u%c", &maj, &min, &dummy) == 2 ||
+ sscanf(name, "%u:%u:%u:%c", &maj, &min, &offset,
+ &dummy) == 3) {
+ dev = MKDEV(maj, min);
+ if (maj != MAJOR(dev) || min != MINOR(dev))
+ error = -EINVAL;
+ } else {
+ dev = new_decode_dev(simple_strtoul(name, &p, 16));
+ if (*p)
+ error = -EINVAL;
+ }
+ }
kfree(name);
- if (!res)
- return -EINVAL;
+ if (error)
+ return error;
+
+ sleep_flags = lock_system_sleep();
+ swsusp_resume_device = dev;
+ unlock_system_sleep(sleep_flags);
- lock_system_sleep();
- swsusp_resume_device = res;
- unlock_system_sleep();
pm_pr_dbg("Configured hibernation resume from disk to %u\n",
swsusp_resume_device);
noresume = 0;
@@ -1162,7 +1333,7 @@ static ssize_t reserved_size_store(struct kobject *kobj,
power_attr(reserved_size);
-static struct attribute * g[] = {
+static struct attribute *g[] = {
&disk_attr.attr,
&resume_offset_attr.attr,
&resume_attr.attr,
@@ -1190,7 +1361,7 @@ static int __init resume_setup(char *str)
if (noresume)
return 1;
- strncpy( resume_file, str, 255 );
+ strncpy(resume_file, str, 255);
return 1;
}
@@ -1240,7 +1411,7 @@ static int __init resumedelay_setup(char *str)
int rc = kstrtouint(str, 0, &resume_delay);
if (rc)
- return rc;
+ pr_warn("resumedelay: bad option string '%s'\n", str);
return 1;
}
@@ -1251,6 +1422,57 @@ static int __init nohibernate_setup(char *str)
return 1;
}
+static const char * const comp_alg_enabled[] = {
+#if IS_ENABLED(CONFIG_CRYPTO_LZO)
+ COMPRESSION_ALGO_LZO,
+#endif
+#if IS_ENABLED(CONFIG_CRYPTO_LZ4)
+ COMPRESSION_ALGO_LZ4,
+#endif
+};
+
+static int hibernate_compressor_param_set(const char *compressor,
+ const struct kernel_param *kp)
+{
+ unsigned int sleep_flags;
+ int index, ret;
+
+ sleep_flags = lock_system_sleep();
+
+ index = sysfs_match_string(comp_alg_enabled, compressor);
+ if (index >= 0) {
+ ret = param_set_copystring(comp_alg_enabled[index], kp);
+ if (!ret)
+ strscpy(hib_comp_algo, comp_alg_enabled[index],
+ sizeof(hib_comp_algo));
+ } else {
+ ret = index;
+ }
+
+ unlock_system_sleep(sleep_flags);
+
+ if (ret)
+ pr_debug("Cannot set specified compressor %s\n",
+ compressor);
+
+ return ret;
+}
+
+static const struct kernel_param_ops hibernate_compressor_param_ops = {
+ .set = hibernate_compressor_param_set,
+ .get = param_get_string,
+};
+
+static struct kparam_string hibernate_compressor_param_string = {
+ .maxlen = sizeof(hibernate_compressor),
+ .string = hibernate_compressor,
+};
+
+module_param_cb(compressor, &hibernate_compressor_param_ops,
+ &hibernate_compressor_param_string, 0644);
+MODULE_PARM_DESC(compressor,
+ "Compression algorithm to be used with hibernation");
+
__setup("noresume", noresume_setup);
__setup("resume_offset=", resume_offset_setup);
__setup("resume=", resume_setup);
diff --git a/kernel/power/main.c b/kernel/power/main.c
index 40f86ec4ab30..a9e0693aaf69 100644
--- a/kernel/power/main.c
+++ b/kernel/power/main.c
@@ -6,6 +6,7 @@
* Copyright (c) 2003 Open Source Development Lab
*/
+#include <linux/acpi.h>
#include <linux/export.h>
#include <linux/kobject.h>
#include <linux/string.h>
@@ -20,33 +21,47 @@
#include "power.h"
#ifdef CONFIG_PM_SLEEP
+/*
+ * The following functions are used by the suspend/hibernate code to temporarily
+ * change gfp_allowed_mask in order to avoid using I/O during memory allocations
+ * while devices are suspended. To avoid races with the suspend/hibernate code,
+ * they should always be called with system_transition_mutex held
+ * (gfp_allowed_mask also should only be modified with system_transition_mutex
+ * held, unless the suspend/hibernate code is guaranteed not to run in parallel
+ * with that modification).
+ */
+static gfp_t saved_gfp_mask;
+
+void pm_restore_gfp_mask(void)
+{
+ WARN_ON(!mutex_is_locked(&system_transition_mutex));
+ if (saved_gfp_mask) {
+ gfp_allowed_mask = saved_gfp_mask;
+ saved_gfp_mask = 0;
+ }
+}
-void lock_system_sleep(void)
+void pm_restrict_gfp_mask(void)
{
- current->flags |= PF_FREEZER_SKIP;
+ WARN_ON(!mutex_is_locked(&system_transition_mutex));
+ WARN_ON(saved_gfp_mask);
+ saved_gfp_mask = gfp_allowed_mask;
+ gfp_allowed_mask &= ~(__GFP_IO | __GFP_FS);
+}
+
+unsigned int lock_system_sleep(void)
+{
+ unsigned int flags = current->flags;
+ current->flags |= PF_NOFREEZE;
mutex_lock(&system_transition_mutex);
+ return flags;
}
EXPORT_SYMBOL_GPL(lock_system_sleep);
-void unlock_system_sleep(void)
-{
- /*
- * Don't use freezer_count() because we don't want the call to
- * try_to_freeze() here.
- *
- * Reason:
- * Fundamentally, we just don't need it, because freezing condition
- * doesn't come into effect until we release the
- * system_transition_mutex lock, since the freezer always works with
- * system_transition_mutex held.
- *
- * More importantly, in the case of hibernation,
- * unlock_system_sleep() gets called in snapshot_read() and
- * snapshot_write() when the freezing condition is still in effect.
- * Which means, if we use try_to_freeze() here, it would make them
- * enter the refrigerator, thus causing hibernation to lockup.
- */
- current->flags &= ~PF_FREEZER_SKIP;
+void unlock_system_sleep(unsigned int flags)
+{
+ if (!(flags & PF_NOFREEZE))
+ current->flags &= ~PF_NOFREEZE;
mutex_unlock(&system_transition_mutex);
}
EXPORT_SYMBOL_GPL(unlock_system_sleep);
@@ -80,18 +95,18 @@ int unregister_pm_notifier(struct notifier_block *nb)
}
EXPORT_SYMBOL_GPL(unregister_pm_notifier);
-int __pm_notifier_call_chain(unsigned long val, int nr_to_call, int *nr_calls)
+int pm_notifier_call_chain_robust(unsigned long val_up, unsigned long val_down)
{
int ret;
- ret = __blocking_notifier_call_chain(&pm_chain_head, val, NULL,
- nr_to_call, nr_calls);
+ ret = blocking_notifier_call_chain_robust(&pm_chain_head, val_up, val_down, NULL);
return notifier_to_errno(ret);
}
+
int pm_notifier_call_chain(unsigned long val)
{
- return __pm_notifier_call_chain(val, -1, NULL);
+ return blocking_notifier_call_chain(&pm_chain_head, val, NULL);
}
/* If set, devices may be suspended and resumed asynchronously. */
@@ -127,7 +142,9 @@ static ssize_t mem_sleep_show(struct kobject *kobj, struct kobj_attribute *attr,
char *s = buf;
suspend_state_t i;
- for (i = PM_SUSPEND_MIN; i < PM_SUSPEND_MAX; i++)
+ for (i = PM_SUSPEND_MIN; i < PM_SUSPEND_MAX; i++) {
+ if (i >= PM_SUSPEND_MEM && cxl_mem_active())
+ continue;
if (mem_sleep_states[i]) {
const char *label = mem_sleep_states[i];
@@ -136,6 +153,7 @@ static ssize_t mem_sleep_show(struct kobject *kobj, struct kobj_attribute *attr,
else
s += sprintf(s, "%s ", label);
}
+ }
/* Convert the last space to a newline if needed. */
if (s != buf)
@@ -260,16 +278,17 @@ static ssize_t pm_test_show(struct kobject *kobj, struct kobj_attribute *attr,
static ssize_t pm_test_store(struct kobject *kobj, struct kobj_attribute *attr,
const char *buf, size_t n)
{
+ unsigned int sleep_flags;
const char * const *s;
+ int error = -EINVAL;
int level;
char *p;
int len;
- int error = -EINVAL;
p = memchr(buf, '\n', n);
len = p ? p - buf : n;
- lock_system_sleep();
+ sleep_flags = lock_system_sleep();
level = TEST_FIRST;
for (s = &pm_tests[level]; level <= TEST_MAX; s++, level++)
@@ -279,7 +298,7 @@ static ssize_t pm_test_store(struct kobject *kobj, struct kobj_attribute *attr,
break;
}
- unlock_system_sleep();
+ unlock_system_sleep(sleep_flags);
return error ? error : n;
}
@@ -287,44 +306,117 @@ static ssize_t pm_test_store(struct kobject *kobj, struct kobj_attribute *attr,
power_attr(pm_test);
#endif /* CONFIG_PM_SLEEP_DEBUG */
-static char *suspend_step_name(enum suspend_stat_step step)
-{
- switch (step) {
- case SUSPEND_FREEZE:
- return "freeze";
- case SUSPEND_PREPARE:
- return "prepare";
- case SUSPEND_SUSPEND:
- return "suspend";
- case SUSPEND_SUSPEND_NOIRQ:
- return "suspend_noirq";
- case SUSPEND_RESUME_NOIRQ:
- return "resume_noirq";
- case SUSPEND_RESUME:
- return "resume";
- default:
- return "";
+#define SUSPEND_NR_STEPS SUSPEND_RESUME
+#define REC_FAILED_NUM 2
+
+struct suspend_stats {
+ unsigned int step_failures[SUSPEND_NR_STEPS];
+ unsigned int success;
+ unsigned int fail;
+ int last_failed_dev;
+ char failed_devs[REC_FAILED_NUM][40];
+ int last_failed_errno;
+ int errno[REC_FAILED_NUM];
+ int last_failed_step;
+ u64 last_hw_sleep;
+ u64 total_hw_sleep;
+ u64 max_hw_sleep;
+ enum suspend_stat_step failed_steps[REC_FAILED_NUM];
+};
+
+static struct suspend_stats suspend_stats;
+static DEFINE_MUTEX(suspend_stats_lock);
+
+void dpm_save_failed_dev(const char *name)
+{
+ mutex_lock(&suspend_stats_lock);
+
+ strscpy(suspend_stats.failed_devs[suspend_stats.last_failed_dev],
+ name, sizeof(suspend_stats.failed_devs[0]));
+ suspend_stats.last_failed_dev++;
+ suspend_stats.last_failed_dev %= REC_FAILED_NUM;
+
+ mutex_unlock(&suspend_stats_lock);
+}
+
+void dpm_save_failed_step(enum suspend_stat_step step)
+{
+ suspend_stats.step_failures[step-1]++;
+ suspend_stats.failed_steps[suspend_stats.last_failed_step] = step;
+ suspend_stats.last_failed_step++;
+ suspend_stats.last_failed_step %= REC_FAILED_NUM;
+}
+
+void dpm_save_errno(int err)
+{
+ if (!err) {
+ suspend_stats.success++;
+ return;
}
+
+ suspend_stats.fail++;
+
+ suspend_stats.errno[suspend_stats.last_failed_errno] = err;
+ suspend_stats.last_failed_errno++;
+ suspend_stats.last_failed_errno %= REC_FAILED_NUM;
}
-#define suspend_attr(_name) \
+void pm_report_hw_sleep_time(u64 t)
+{
+ suspend_stats.last_hw_sleep = t;
+ suspend_stats.total_hw_sleep += t;
+}
+EXPORT_SYMBOL_GPL(pm_report_hw_sleep_time);
+
+void pm_report_max_hw_sleep(u64 t)
+{
+ suspend_stats.max_hw_sleep = t;
+}
+EXPORT_SYMBOL_GPL(pm_report_max_hw_sleep);
+
+static const char * const suspend_step_names[] = {
+ [SUSPEND_WORKING] = "",
+ [SUSPEND_FREEZE] = "freeze",
+ [SUSPEND_PREPARE] = "prepare",
+ [SUSPEND_SUSPEND] = "suspend",
+ [SUSPEND_SUSPEND_LATE] = "suspend_late",
+ [SUSPEND_SUSPEND_NOIRQ] = "suspend_noirq",
+ [SUSPEND_RESUME_NOIRQ] = "resume_noirq",
+ [SUSPEND_RESUME_EARLY] = "resume_early",
+ [SUSPEND_RESUME] = "resume",
+};
+
+#define suspend_attr(_name, format_str) \
+static ssize_t _name##_show(struct kobject *kobj, \
+ struct kobj_attribute *attr, char *buf) \
+{ \
+ return sprintf(buf, format_str, suspend_stats._name); \
+} \
+static struct kobj_attribute _name = __ATTR_RO(_name)
+
+suspend_attr(success, "%u\n");
+suspend_attr(fail, "%u\n");
+suspend_attr(last_hw_sleep, "%llu\n");
+suspend_attr(total_hw_sleep, "%llu\n");
+suspend_attr(max_hw_sleep, "%llu\n");
+
+#define suspend_step_attr(_name, step) \
static ssize_t _name##_show(struct kobject *kobj, \
struct kobj_attribute *attr, char *buf) \
{ \
- return sprintf(buf, "%d\n", suspend_stats._name); \
+ return sprintf(buf, "%u\n", \
+ suspend_stats.step_failures[step-1]); \
} \
static struct kobj_attribute _name = __ATTR_RO(_name)
-suspend_attr(success);
-suspend_attr(fail);
-suspend_attr(failed_freeze);
-suspend_attr(failed_prepare);
-suspend_attr(failed_suspend);
-suspend_attr(failed_suspend_late);
-suspend_attr(failed_suspend_noirq);
-suspend_attr(failed_resume);
-suspend_attr(failed_resume_early);
-suspend_attr(failed_resume_noirq);
+suspend_step_attr(failed_freeze, SUSPEND_FREEZE);
+suspend_step_attr(failed_prepare, SUSPEND_PREPARE);
+suspend_step_attr(failed_suspend, SUSPEND_SUSPEND);
+suspend_step_attr(failed_suspend_late, SUSPEND_SUSPEND_LATE);
+suspend_step_attr(failed_suspend_noirq, SUSPEND_SUSPEND_NOIRQ);
+suspend_step_attr(failed_resume, SUSPEND_RESUME);
+suspend_step_attr(failed_resume_early, SUSPEND_RESUME_EARLY);
+suspend_step_attr(failed_resume_noirq, SUSPEND_RESUME_NOIRQ);
static ssize_t last_failed_dev_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
@@ -357,16 +449,14 @@ static struct kobj_attribute last_failed_errno = __ATTR_RO(last_failed_errno);
static ssize_t last_failed_step_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
- int index;
enum suspend_stat_step step;
- char *last_failed_step = NULL;
+ int index;
index = suspend_stats.last_failed_step + REC_FAILED_NUM - 1;
index %= REC_FAILED_NUM;
step = suspend_stats.failed_steps[index];
- last_failed_step = suspend_step_name(step);
- return sprintf(buf, "%s\n", last_failed_step);
+ return sprintf(buf, "%s\n", suspend_step_names[step]);
}
static struct kobj_attribute last_failed_step = __ATTR_RO(last_failed_step);
@@ -384,18 +474,37 @@ static struct attribute *suspend_attrs[] = {
&last_failed_dev.attr,
&last_failed_errno.attr,
&last_failed_step.attr,
+ &last_hw_sleep.attr,
+ &total_hw_sleep.attr,
+ &max_hw_sleep.attr,
NULL,
};
-static struct attribute_group suspend_attr_group = {
+static umode_t suspend_attr_is_visible(struct kobject *kobj, struct attribute *attr, int idx)
+{
+ if (attr != &last_hw_sleep.attr &&
+ attr != &total_hw_sleep.attr &&
+ attr != &max_hw_sleep.attr)
+ return 0444;
+
+#ifdef CONFIG_ACPI
+ if (acpi_gbl_FADT.flags & ACPI_FADT_LOW_POWER_S0)
+ return 0444;
+#endif
+ return 0;
+}
+
+static const struct attribute_group suspend_attr_group = {
.name = "suspend_stats",
.attrs = suspend_attrs,
+ .is_visible = suspend_attr_is_visible,
};
#ifdef CONFIG_DEBUG_FS
static int suspend_stats_show(struct seq_file *s, void *unused)
{
int i, index, last_dev, last_errno, last_step;
+ enum suspend_stat_step step;
last_dev = suspend_stats.last_failed_dev + REC_FAILED_NUM - 1;
last_dev %= REC_FAILED_NUM;
@@ -403,47 +512,35 @@ static int suspend_stats_show(struct seq_file *s, void *unused)
last_errno %= REC_FAILED_NUM;
last_step = suspend_stats.last_failed_step + REC_FAILED_NUM - 1;
last_step %= REC_FAILED_NUM;
- seq_printf(s, "%s: %d\n%s: %d\n%s: %d\n%s: %d\n%s: %d\n"
- "%s: %d\n%s: %d\n%s: %d\n%s: %d\n%s: %d\n",
- "success", suspend_stats.success,
- "fail", suspend_stats.fail,
- "failed_freeze", suspend_stats.failed_freeze,
- "failed_prepare", suspend_stats.failed_prepare,
- "failed_suspend", suspend_stats.failed_suspend,
- "failed_suspend_late",
- suspend_stats.failed_suspend_late,
- "failed_suspend_noirq",
- suspend_stats.failed_suspend_noirq,
- "failed_resume", suspend_stats.failed_resume,
- "failed_resume_early",
- suspend_stats.failed_resume_early,
- "failed_resume_noirq",
- suspend_stats.failed_resume_noirq);
+
+ seq_printf(s, "success: %u\nfail: %u\n",
+ suspend_stats.success, suspend_stats.fail);
+
+ for (step = SUSPEND_FREEZE; step <= SUSPEND_NR_STEPS; step++)
+ seq_printf(s, "failed_%s: %u\n", suspend_step_names[step],
+ suspend_stats.step_failures[step-1]);
+
seq_printf(s, "failures:\n last_failed_dev:\t%-s\n",
- suspend_stats.failed_devs[last_dev]);
+ suspend_stats.failed_devs[last_dev]);
for (i = 1; i < REC_FAILED_NUM; i++) {
index = last_dev + REC_FAILED_NUM - i;
index %= REC_FAILED_NUM;
- seq_printf(s, "\t\t\t%-s\n",
- suspend_stats.failed_devs[index]);
+ seq_printf(s, "\t\t\t%-s\n", suspend_stats.failed_devs[index]);
}
seq_printf(s, " last_failed_errno:\t%-d\n",
suspend_stats.errno[last_errno]);
for (i = 1; i < REC_FAILED_NUM; i++) {
index = last_errno + REC_FAILED_NUM - i;
index %= REC_FAILED_NUM;
- seq_printf(s, "\t\t\t%-d\n",
- suspend_stats.errno[index]);
+ seq_printf(s, "\t\t\t%-d\n", suspend_stats.errno[index]);
}
seq_printf(s, " last_failed_step:\t%-s\n",
- suspend_step_name(
- suspend_stats.failed_steps[last_step]));
+ suspend_step_names[suspend_stats.failed_steps[last_step]]);
for (i = 1; i < REC_FAILED_NUM; i++) {
index = last_step + REC_FAILED_NUM - i;
index %= REC_FAILED_NUM;
seq_printf(s, "\t\t\t%-s\n",
- suspend_step_name(
- suspend_stats.failed_steps[index]));
+ suspend_step_names[suspend_stats.failed_steps[index]]);
}
return 0;
@@ -504,13 +601,22 @@ static ssize_t pm_wakeup_irq_show(struct kobject *kobj,
struct kobj_attribute *attr,
char *buf)
{
- return pm_wakeup_irq ? sprintf(buf, "%u\n", pm_wakeup_irq) : -ENODATA;
+ if (!pm_wakeup_irq())
+ return -ENODATA;
+
+ return sprintf(buf, "%u\n", pm_wakeup_irq());
}
power_attr_ro(pm_wakeup_irq);
bool pm_debug_messages_on __read_mostly;
+bool pm_debug_messages_should_print(void)
+{
+ return pm_debug_messages_on && pm_suspend_target_state != PM_SUSPEND_ON;
+}
+EXPORT_SYMBOL_GPL(pm_debug_messages_should_print);
+
static ssize_t pm_debug_messages_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
@@ -542,42 +648,13 @@ static int __init pm_debug_messages_setup(char *str)
}
__setup("pm_debug_messages", pm_debug_messages_setup);
-/**
- * __pm_pr_dbg - Print a suspend debug message to the kernel log.
- * @defer: Whether or not to use printk_deferred() to print the message.
- * @fmt: Message format.
- *
- * The message will be emitted if enabled through the pm_debug_messages
- * sysfs attribute.
- */
-void __pm_pr_dbg(bool defer, const char *fmt, ...)
-{
- struct va_format vaf;
- va_list args;
-
- if (!pm_debug_messages_on)
- return;
-
- va_start(args, fmt);
-
- vaf.fmt = fmt;
- vaf.va = &args;
-
- if (defer)
- printk_deferred(KERN_DEBUG "PM: %pV", &vaf);
- else
- printk(KERN_DEBUG "PM: %pV", &vaf);
-
- va_end(args);
-}
-
#else /* !CONFIG_PM_SLEEP_DEBUG */
static inline void pm_print_times_init(void) {}
#endif /* CONFIG_PM_SLEEP_DEBUG */
struct kobject *power_kobj;
-/**
+/*
* state - control system sleep states.
*
* show() returns available sleep state labels, which may be "mem", "standby",
diff --git a/kernel/power/power.h b/kernel/power/power.h
index ba2094db6294..de0e6b1077f2 100644
--- a/kernel/power/power.h
+++ b/kernel/power/power.h
@@ -4,6 +4,9 @@
#include <linux/utsname.h>
#include <linux/freezer.h>
#include <linux/compiler.h>
+#include <linux/cpu.h>
+#include <linux/cpuidle.h>
+#include <linux/crypto.h>
struct swsusp_info {
struct new_utsname uts;
@@ -24,23 +27,18 @@ extern void __init hibernate_image_size_init(void);
/* Maximum size of architecture specific data in a hibernation header */
#define MAX_ARCH_HEADER_SIZE (sizeof(struct new_utsname) + 4)
-extern int arch_hibernation_header_save(void *addr, unsigned int max_size);
-extern int arch_hibernation_header_restore(void *addr);
-
static inline int init_header_complete(struct swsusp_info *info)
{
return arch_hibernation_header_save(info, MAX_ARCH_HEADER_SIZE);
}
-static inline char *check_image_kernel(struct swsusp_info *info)
+static inline const char *check_image_kernel(struct swsusp_info *info)
{
return arch_hibernation_header_restore(info) ?
"architecture specific data" : NULL;
}
#endif /* CONFIG_ARCH_HIBERNATION_HEADER */
-extern int hibernate_resume_nonboot_cpu_disable(void);
-
/*
* Keep some memory free so that I/O operations can succeed without paging
* [Might this be more than 4 MB?]
@@ -57,6 +55,10 @@ asmlinkage int swsusp_save(void);
/* kernel/power/hibernate.c */
extern bool freezer_test_done;
+extern char hib_comp_algo[CRYPTO_MAX_ALG_NAME];
+
+/* kernel/power/swap.c */
+extern unsigned int swsusp_header_flags;
extern int hibernation_snapshot(int platform_mode);
extern int hibernation_restore(int platform_mode);
@@ -106,7 +108,7 @@ extern int create_basic_memory_bitmaps(void);
extern void free_basic_memory_bitmaps(void);
extern int hibernate_preallocate_memory(void);
-extern void clear_free_pages(void);
+extern void clear_or_poison_free_pages(void);
/**
* Auxiliary structure used for reading the snapshot image data and
@@ -151,7 +153,7 @@ extern unsigned int snapshot_additional_pages(struct zone *zone);
extern unsigned long snapshot_get_image_size(void);
extern int snapshot_read_next(struct snapshot_handle *handle);
extern int snapshot_write_next(struct snapshot_handle *handle);
-extern void snapshot_write_finalize(struct snapshot_handle *handle);
+int snapshot_write_finalize(struct snapshot_handle *handle);
extern int snapshot_image_loaded(struct snapshot_handle *handle);
extern bool hibernate_acquire(void);
@@ -165,18 +167,35 @@ extern int swsusp_swap_in_use(void);
* Flags that can be passed from the hibernatig hernel to the "boot" kernel in
* the image header.
*/
+#define SF_COMPRESSION_ALG_LZO 0 /* dummy, details given below */
#define SF_PLATFORM_MODE 1
#define SF_NOCOMPRESS_MODE 2
#define SF_CRC32_MODE 4
+#define SF_HW_SIG 8
+
+/*
+ * Bit to indicate the compression algorithm to be used(for LZ4). The same
+ * could be checked while saving/loading image to/from disk to use the
+ * corresponding algorithms.
+ *
+ * By default, LZO compression is enabled if SF_CRC32_MODE is set. Use
+ * SF_COMPRESSION_ALG_LZ4 to override this behaviour and use LZ4.
+ *
+ * SF_CRC32_MODE, SF_COMPRESSION_ALG_LZO(dummy) -> Compression, LZO
+ * SF_CRC32_MODE, SF_COMPRESSION_ALG_LZ4 -> Compression, LZ4
+ */
+#define SF_COMPRESSION_ALG_LZ4 16
/* kernel/power/hibernate.c */
-extern int swsusp_check(void);
+int swsusp_check(bool exclusive);
extern void swsusp_free(void);
extern int swsusp_read(unsigned int *flags_p);
extern int swsusp_write(unsigned int flags);
-extern void swsusp_close(fmode_t);
+void swsusp_close(void);
#ifdef CONFIG_SUSPEND
extern int swsusp_unmark(void);
+#else
+static inline int swsusp_unmark(void) { return 0; }
#endif
struct __kernel_old_timeval;
@@ -210,9 +229,13 @@ static inline void suspend_test_finish(const char *label) {}
#ifdef CONFIG_PM_SLEEP
/* kernel/power/main.c */
-extern int __pm_notifier_call_chain(unsigned long val, int nr_to_call,
- int *nr_calls);
+extern int pm_notifier_call_chain_robust(unsigned long val_up, unsigned long val_down);
extern int pm_notifier_call_chain(unsigned long val);
+void pm_restrict_gfp_mask(void);
+void pm_restore_gfp_mask(void);
+#else
+static inline void pm_restrict_gfp_mask(void) {}
+static inline void pm_restore_gfp_mask(void) {}
#endif
#ifdef CONFIG_HIGHMEM
@@ -311,3 +334,17 @@ extern int pm_wake_lock(const char *buf);
extern int pm_wake_unlock(const char *buf);
#endif /* !CONFIG_PM_WAKELOCKS */
+
+static inline int pm_sleep_disable_secondary_cpus(void)
+{
+ cpuidle_pause();
+ return suspend_disable_secondary_cpus();
+}
+
+static inline void pm_sleep_enable_secondary_cpus(void)
+{
+ suspend_enable_secondary_cpus();
+ cpuidle_resume();
+}
+
+void dpm_save_errno(int err);
diff --git a/kernel/power/poweroff.c b/kernel/power/poweroff.c
index 562aa0e450ed..1f306f158696 100644
--- a/kernel/power/poweroff.c
+++ b/kernel/power/poweroff.c
@@ -23,7 +23,7 @@ static void do_poweroff(struct work_struct *dummy)
static DECLARE_WORK(poweroff_work, do_poweroff);
-static void handle_poweroff(int key)
+static void handle_poweroff(u8 key)
{
/* run sysrq poweroff on boot cpu */
schedule_work_on(cpumask_first(cpu_online_mask), &poweroff_work);
diff --git a/kernel/power/process.c b/kernel/power/process.c
index 4b6a54da7e65..cae81a87cc91 100644
--- a/kernel/power/process.c
+++ b/kernel/power/process.c
@@ -1,14 +1,11 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * drivers/power/process.c - Functions for starting/stopping processes on
+ * drivers/power/process.c - Functions for starting/stopping processes on
* suspend transitions.
*
* Originally from swsusp.
*/
-
-#undef DEBUG
-
#include <linux/interrupt.h>
#include <linux/oom.h>
#include <linux/suspend.h>
@@ -30,6 +27,8 @@ unsigned int __read_mostly freeze_timeout_msecs = 20 * MSEC_PER_SEC;
static int try_to_freeze_tasks(bool user_only)
{
+ const char *what = user_only ? "user space processes" :
+ "remaining freezable tasks";
struct task_struct *g, *p;
unsigned long end_time;
unsigned int todo;
@@ -39,6 +38,8 @@ static int try_to_freeze_tasks(bool user_only)
bool wakeup = false;
int sleep_usecs = USEC_PER_MSEC;
+ pr_info("Freezing %s\n", what);
+
start = ktime_get_boottime();
end_time = jiffies + msecs_to_jiffies(freeze_timeout_msecs);
@@ -53,8 +54,7 @@ static int try_to_freeze_tasks(bool user_only)
if (p == current || !freeze_task(p))
continue;
- if (!freezer_should_skip(p))
- todo++;
+ todo++;
}
read_unlock(&tasklist_lock);
@@ -86,28 +86,26 @@ static int try_to_freeze_tasks(bool user_only)
elapsed_msecs = ktime_to_ms(elapsed);
if (todo) {
- pr_cont("\n");
- pr_err("Freezing of tasks %s after %d.%03d seconds "
- "(%d tasks refusing to freeze, wq_busy=%d):\n",
+ pr_err("Freezing %s %s after %d.%03d seconds "
+ "(%d tasks refusing to freeze, wq_busy=%d):\n", what,
wakeup ? "aborted" : "failed",
elapsed_msecs / 1000, elapsed_msecs % 1000,
todo - wq_busy, wq_busy);
if (wq_busy)
- show_workqueue_state();
+ show_freezable_workqueues();
if (!wakeup || pm_debug_messages_on) {
read_lock(&tasklist_lock);
for_each_process_thread(g, p) {
- if (p != current && !freezer_should_skip(p)
- && freezing(p) && !frozen(p))
+ if (p != current && freezing(p) && !frozen(p))
sched_show_task(p);
}
read_unlock(&tasklist_lock);
}
} else {
- pr_cont("(elapsed %d.%03d seconds) ", elapsed_msecs / 1000,
- elapsed_msecs % 1000);
+ pr_info("Freezing %s completed (elapsed %d.%03d seconds)\n",
+ what, elapsed_msecs / 1000, elapsed_msecs % 1000);
}
return todo ? -EBUSY : 0;
@@ -132,21 +130,18 @@ int freeze_processes(void)
current->flags |= PF_SUSPEND_TASK;
if (!pm_freezing)
- atomic_inc(&system_freezing_cnt);
+ static_branch_inc(&freezer_active);
- pm_wakeup_clear(true);
- pr_info("Freezing user space processes ... ");
+ pm_wakeup_clear(0);
pm_freezing = true;
error = try_to_freeze_tasks(true);
- if (!error) {
+ if (!error)
__usermodehelper_set_disable_depth(UMH_DISABLED);
- pr_cont("done.");
- }
- pr_cont("\n");
+
BUG_ON(in_atomic());
/*
- * Now that the whole userspace is frozen we need to disbale
+ * Now that the whole userspace is frozen we need to disable
* the OOM killer to disallow any further interference with
* killable tasks. There is no guarantee oom victims will
* ever reach a point they go away we have to wait with a timeout.
@@ -171,14 +166,9 @@ int freeze_kernel_threads(void)
{
int error;
- pr_info("Freezing remaining freezable tasks ... ");
-
pm_nosig_freezing = true;
error = try_to_freeze_tasks(false);
- if (!error)
- pr_cont("done.");
- pr_cont("\n");
BUG_ON(in_atomic());
if (error)
@@ -193,7 +183,7 @@ void thaw_processes(void)
trace_suspend_resume(TPS("thaw_processes"), 0, true);
if (pm_freezing)
- atomic_dec(&system_freezing_cnt);
+ static_branch_dec(&freezer_active);
pm_freezing = false;
pm_nosig_freezing = false;
@@ -235,7 +225,7 @@ void thaw_kernel_threads(void)
read_lock(&tasklist_lock);
for_each_process_thread(g, p) {
- if (p->flags & (PF_KTHREAD | PF_WQ_WORKER))
+ if (p->flags & PF_KTHREAD)
__thaw_task(p);
}
read_unlock(&tasklist_lock);
diff --git a/kernel/power/qos.c b/kernel/power/qos.c
index db0bed2cae26..4244b069442e 100644
--- a/kernel/power/qos.c
+++ b/kernel/power/qos.c
@@ -119,7 +119,7 @@ int pm_qos_update_target(struct pm_qos_constraints *c, struct plist_node *node,
* and add, then see if the aggregate has changed.
*/
plist_del(node, &c->list);
- /* fall through */
+ fallthrough;
case PM_QOS_ADD_REQ:
plist_node_init(node, new_value);
plist_add(node, &c->list);
@@ -188,7 +188,7 @@ bool pm_qos_update_flags(struct pm_qos_flags *pqf,
break;
case PM_QOS_UPDATE_REQ:
pm_qos_flags_remove_req(pqf, req);
- /* fall through */
+ fallthrough;
case PM_QOS_ADD_REQ:
req->flags = val;
INIT_LIST_HEAD(&req->node);
@@ -220,6 +220,11 @@ static struct pm_qos_constraints cpu_latency_constraints = {
.type = PM_QOS_MIN,
};
+static inline bool cpu_latency_qos_value_invalid(s32 value)
+{
+ return value < 0 && value != PM_QOS_DEFAULT_VALUE;
+}
+
/**
* cpu_latency_qos_limit - Return current system-wide CPU latency QoS limit.
*/
@@ -263,7 +268,7 @@ static void cpu_latency_qos_apply(struct pm_qos_request *req,
*/
void cpu_latency_qos_add_request(struct pm_qos_request *req, s32 value)
{
- if (!req)
+ if (!req || cpu_latency_qos_value_invalid(value))
return;
if (cpu_latency_qos_request_active(req)) {
@@ -289,7 +294,7 @@ EXPORT_SYMBOL_GPL(cpu_latency_qos_add_request);
*/
void cpu_latency_qos_update_request(struct pm_qos_request *req, s32 new_value)
{
- if (!req)
+ if (!req || cpu_latency_qos_value_invalid(new_value))
return;
if (!cpu_latency_qos_request_active(req)) {
@@ -426,6 +431,11 @@ late_initcall(cpu_latency_qos_init);
/* Definitions related to the frequency QoS below. */
+static inline bool freq_qos_value_invalid(s32 value)
+{
+ return value < 0 && value != PM_QOS_DEFAULT_VALUE;
+}
+
/**
* freq_constraints_init - Initialize frequency QoS constraints.
* @qos: Frequency QoS constraints to initialize.
@@ -531,7 +541,7 @@ int freq_qos_add_request(struct freq_constraints *qos,
{
int ret;
- if (IS_ERR_OR_NULL(qos) || !req)
+ if (IS_ERR_OR_NULL(qos) || !req || freq_qos_value_invalid(value))
return -EINVAL;
if (WARN(freq_qos_request_active(req),
@@ -563,7 +573,7 @@ EXPORT_SYMBOL_GPL(freq_qos_add_request);
*/
int freq_qos_update_request(struct freq_qos_request *req, s32 new_value)
{
- if (!req)
+ if (!req || freq_qos_value_invalid(new_value))
return -EINVAL;
if (WARN(!freq_qos_request_active(req),
diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c
index 881128b9351e..405eddbda4fc 100644
--- a/kernel/power/snapshot.c
+++ b/kernel/power/snapshot.c
@@ -58,24 +58,60 @@ static inline void hibernate_restore_protection_end(void)
hibernate_restore_protection_active = false;
}
-static inline void hibernate_restore_protect_page(void *page_address)
+static inline int __must_check hibernate_restore_protect_page(void *page_address)
{
if (hibernate_restore_protection_active)
- set_memory_ro((unsigned long)page_address, 1);
+ return set_memory_ro((unsigned long)page_address, 1);
+ return 0;
}
-static inline void hibernate_restore_unprotect_page(void *page_address)
+static inline int hibernate_restore_unprotect_page(void *page_address)
{
if (hibernate_restore_protection_active)
- set_memory_rw((unsigned long)page_address, 1);
+ return set_memory_rw((unsigned long)page_address, 1);
+ return 0;
}
#else
static inline void hibernate_restore_protection_begin(void) {}
static inline void hibernate_restore_protection_end(void) {}
-static inline void hibernate_restore_protect_page(void *page_address) {}
-static inline void hibernate_restore_unprotect_page(void *page_address) {}
+static inline int __must_check hibernate_restore_protect_page(void *page_address) {return 0; }
+static inline int hibernate_restore_unprotect_page(void *page_address) {return 0; }
#endif /* CONFIG_STRICT_KERNEL_RWX && CONFIG_ARCH_HAS_SET_MEMORY */
+
+/*
+ * The calls to set_direct_map_*() should not fail because remapping a page
+ * here means that we only update protection bits in an existing PTE.
+ * It is still worth to have a warning here if something changes and this
+ * will no longer be the case.
+ */
+static inline void hibernate_map_page(struct page *page)
+{
+ if (IS_ENABLED(CONFIG_ARCH_HAS_SET_DIRECT_MAP)) {
+ int ret = set_direct_map_default_noflush(page);
+
+ if (ret)
+ pr_warn_once("Failed to remap page\n");
+ } else {
+ debug_pagealloc_map_pages(page, 1);
+ }
+}
+
+static inline void hibernate_unmap_page(struct page *page)
+{
+ if (IS_ENABLED(CONFIG_ARCH_HAS_SET_DIRECT_MAP)) {
+ unsigned long addr = (unsigned long)page_address(page);
+ int ret = set_direct_map_invalid_noflush(page);
+
+ if (ret)
+ pr_warn_once("Failed to remap page\n");
+
+ flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
+ } else {
+ debug_pagealloc_unmap_pages(page, 1);
+ }
+}
+
static int swsusp_page_is_free(struct page *);
static void swsusp_set_page_forbidden(struct page *);
static void swsusp_unset_page_forbidden(struct page *);
@@ -292,12 +328,12 @@ static void *chain_alloc(struct chain_allocator *ca, unsigned int size)
return ret;
}
-/**
+/*
* Data types related to memory bitmaps.
*
- * Memory bitmap is a structure consiting of many linked lists of
+ * Memory bitmap is a structure consisting of many linked lists of
* objects. The main list's elements are of type struct zone_bitmap
- * and each of them corresonds to one zone. For each zone bitmap
+ * and each of them corresponds to one zone. For each zone bitmap
* object there is a list of objects of type struct bm_block that
* represent each blocks of bitmap in which information is stored.
*
@@ -364,12 +400,13 @@ struct mem_zone_bm_rtree {
unsigned int blocks; /* Number of Bitmap Blocks */
};
-/* strcut bm_position is used for browsing memory bitmaps */
+/* struct bm_position is used for browsing memory bitmaps */
struct bm_position {
struct mem_zone_bm_rtree *zone;
struct rtree_node *node;
unsigned long node_pfn;
+ unsigned long cur_pfn;
int node_bit;
};
@@ -393,6 +430,10 @@ struct memory_bitmap {
/**
* alloc_rtree_node - Allocate a new node and add it to the radix tree.
+ * @gfp_mask: GFP mask for the allocation.
+ * @safe_needed: Get pages not used before hibernation (restore only)
+ * @ca: Pointer to a linked list of pages ("a chain") to allocate from
+ * @list: Radix Tree node to add.
*
* This function is used to allocate inner nodes as well as the
* leave nodes of the radix tree. It also adds the node to the
@@ -551,6 +592,7 @@ static void memory_bm_position_reset(struct memory_bitmap *bm)
bm->cur.node = list_entry(bm->cur.zone->leaves.next,
struct rtree_node, list);
bm->cur.node_pfn = 0;
+ bm->cur.cur_pfn = BM_END_OF_MAP;
bm->cur.node_bit = 0;
}
@@ -735,7 +777,7 @@ zone_found:
*/
/*
- * If the zone we wish to scan is the the current zone and the
+ * If the zone we wish to scan is the current zone and the
* pfn falls into the current node then we do not need to walk
* the tree.
*/
@@ -761,6 +803,7 @@ node_found:
bm->cur.zone = zone;
bm->cur.node = node;
bm->cur.node_pfn = (pfn - zone->start_pfn) & ~BM_BLOCK_MASK;
+ bm->cur.cur_pfn = pfn;
/* Set return values */
*addr = node->data;
@@ -812,6 +855,11 @@ static void memory_bm_clear_current(struct memory_bitmap *bm)
clear_bit(bit, bm->cur.node->data);
}
+static unsigned long memory_bm_get_current(struct memory_bitmap *bm)
+{
+ return bm->cur.cur_pfn;
+}
+
static int memory_bm_test_bit(struct memory_bitmap *bm, unsigned long pfn)
{
void *addr;
@@ -868,7 +916,7 @@ static bool rtree_next_node(struct memory_bitmap *bm)
}
/**
- * memory_bm_rtree_next_pfn - Find the next set bit in a memory bitmap.
+ * memory_bm_next_pfn - Find the next set bit in a memory bitmap.
* @bm: Memory bitmap.
*
* Starting from the last returned position this function searches for the next
@@ -891,10 +939,12 @@ static unsigned long memory_bm_next_pfn(struct memory_bitmap *bm)
if (bit < bits) {
pfn = bm->cur.zone->start_pfn + bm->cur.node_pfn + bit;
bm->cur.node_bit = bit + 1;
+ bm->cur.cur_pfn = pfn;
return pfn;
}
} while (rtree_next_node(bm));
+ bm->cur.cur_pfn = BM_END_OF_MAP;
return BM_END_OF_MAP;
}
@@ -944,8 +994,7 @@ static void memory_bm_recycle(struct memory_bitmap *bm)
* Register a range of page frames the contents of which should not be saved
* during hibernation (to be used in the early initialization code).
*/
-void __init __register_nosave_region(unsigned long start_pfn,
- unsigned long end_pfn, int use_kmalloc)
+void __init register_nosave_region(unsigned long start_pfn, unsigned long end_pfn)
{
struct nosave_region *region;
@@ -961,18 +1010,12 @@ void __init __register_nosave_region(unsigned long start_pfn,
goto Report;
}
}
- if (use_kmalloc) {
- /* During init, this shouldn't fail */
- region = kmalloc(sizeof(struct nosave_region), GFP_KERNEL);
- BUG_ON(!region);
- } else {
- /* This allocation cannot fail */
- region = memblock_alloc(sizeof(struct nosave_region),
- SMP_CACHE_BYTES);
- if (!region)
- panic("%s: Failed to allocate %zu bytes\n", __func__,
- sizeof(struct nosave_region));
- }
+ /* This allocation cannot fail */
+ region = memblock_alloc(sizeof(struct nosave_region),
+ SMP_CACHE_BYTES);
+ if (!region)
+ panic("%s: Failed to allocate %zu bytes\n", __func__,
+ sizeof(struct nosave_region));
region->start_pfn = start_pfn;
region->end_pfn = end_pfn;
list_add_tail(&region->list, &nosave_regions);
@@ -1078,7 +1121,7 @@ static void mark_nosave_pages(struct memory_bitmap *bm)
int create_basic_memory_bitmaps(void)
{
struct memory_bitmap *bm1, *bm2;
- int error = 0;
+ int error;
if (forbidden_pages_map && free_pages_map)
return 0;
@@ -1112,7 +1155,7 @@ int create_basic_memory_bitmaps(void)
Free_second_object:
kfree(bm2);
Free_first_bitmap:
- memory_bm_free(bm1, PG_UNSAFE_CLEAR);
+ memory_bm_free(bm1, PG_UNSAFE_CLEAR);
Free_first_object:
kfree(bm1);
return -ENOMEM;
@@ -1144,7 +1187,15 @@ void free_basic_memory_bitmaps(void)
pr_debug("Basic memory bitmaps freed\n");
}
-void clear_free_pages(void)
+static void clear_or_poison_free_page(struct page *page)
+{
+ if (page_poisoning_enabled_static())
+ __kernel_poison_pages(page, 1);
+ else if (want_init_on_free())
+ clear_highpage(page);
+}
+
+void clear_or_poison_free_pages(void)
{
struct memory_bitmap *bm = free_pages_map;
unsigned long pfn;
@@ -1152,12 +1203,12 @@ void clear_free_pages(void)
if (WARN_ON(!(free_pages_map)))
return;
- if (IS_ENABLED(CONFIG_PAGE_POISONING_ZERO) || want_init_on_free()) {
+ if (page_poisoning_enabled() || want_init_on_free()) {
memory_bm_position_reset(bm);
pfn = memory_bm_next_pfn(bm);
while (pfn != BM_END_OF_MAP) {
if (pfn_valid(pfn))
- clear_highpage(pfn_to_page(pfn));
+ clear_or_poison_free_page(pfn_to_page(pfn));
pfn = memory_bm_next_pfn(bm);
}
@@ -1189,6 +1240,58 @@ unsigned int snapshot_additional_pages(struct zone *zone)
return 2 * rtree;
}
+/*
+ * Touch the watchdog for every WD_PAGE_COUNT pages.
+ */
+#define WD_PAGE_COUNT (128*1024)
+
+static void mark_free_pages(struct zone *zone)
+{
+ unsigned long pfn, max_zone_pfn, page_count = WD_PAGE_COUNT;
+ unsigned long flags;
+ unsigned int order, t;
+ struct page *page;
+
+ if (zone_is_empty(zone))
+ return;
+
+ spin_lock_irqsave(&zone->lock, flags);
+
+ max_zone_pfn = zone_end_pfn(zone);
+ for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
+ if (pfn_valid(pfn)) {
+ page = pfn_to_page(pfn);
+
+ if (!--page_count) {
+ touch_nmi_watchdog();
+ page_count = WD_PAGE_COUNT;
+ }
+
+ if (page_zone(page) != zone)
+ continue;
+
+ if (!swsusp_page_is_forbidden(page))
+ swsusp_unset_page_free(page);
+ }
+
+ for_each_migratetype_order(order, t) {
+ list_for_each_entry(page,
+ &zone->free_area[order].free_list[t], buddy_list) {
+ unsigned long i;
+
+ pfn = page_to_pfn(page);
+ for (i = 0; i < (1UL << order); i++) {
+ if (!--page_count) {
+ touch_nmi_watchdog();
+ page_count = WD_PAGE_COUNT;
+ }
+ swsusp_set_page_free(pfn_to_page(pfn + i));
+ }
+ }
+ }
+ spin_unlock_irqrestore(&zone->lock, flags);
+}
+
#ifdef CONFIG_HIGHMEM
/**
* count_free_highmem_pages - Compute the total number of free highmem pages.
@@ -1332,14 +1435,19 @@ static unsigned int count_data_pages(void)
/*
* This is needed, because copy_page and memcpy are not usable for copying
- * task structs.
+ * task structs. Returns true if the page was filled with only zeros,
+ * otherwise false.
*/
-static inline void do_copy_page(long *dst, long *src)
+static inline bool do_copy_page(long *dst, long *src)
{
+ long z = 0;
int n;
- for (n = PAGE_SIZE / sizeof(long); n; n--)
+ for (n = PAGE_SIZE / sizeof(long); n; n--) {
+ z |= *src;
*dst++ = *src++;
+ }
+ return !z;
}
/**
@@ -1348,17 +1456,21 @@ static inline void do_copy_page(long *dst, long *src)
* Check if the page we are going to copy is marked as present in the kernel
* page tables. This always is the case if CONFIG_DEBUG_PAGEALLOC or
* CONFIG_ARCH_HAS_SET_DIRECT_MAP is not set. In that case kernel_page_present()
- * always returns 'true'.
+ * always returns 'true'. Returns true if the page was entirely composed of
+ * zeros, otherwise it will return false.
*/
-static void safe_copy_page(void *dst, struct page *s_page)
+static bool safe_copy_page(void *dst, struct page *s_page)
{
+ bool zeros_only;
+
if (kernel_page_present(s_page)) {
- do_copy_page(dst, page_address(s_page));
+ zeros_only = do_copy_page(dst, page_address(s_page));
} else {
- kernel_map_pages(s_page, 1, 1);
- do_copy_page(dst, page_address(s_page));
- kernel_map_pages(s_page, 1, 0);
+ hibernate_map_page(s_page);
+ zeros_only = do_copy_page(dst, page_address(s_page));
+ hibernate_unmap_page(s_page);
}
+ return zeros_only;
}
#ifdef CONFIG_HIGHMEM
@@ -1368,49 +1480,59 @@ static inline struct page *page_is_saveable(struct zone *zone, unsigned long pfn
saveable_highmem_page(zone, pfn) : saveable_page(zone, pfn);
}
-static void copy_data_page(unsigned long dst_pfn, unsigned long src_pfn)
+static bool copy_data_page(unsigned long dst_pfn, unsigned long src_pfn)
{
struct page *s_page, *d_page;
void *src, *dst;
+ bool zeros_only;
s_page = pfn_to_page(src_pfn);
d_page = pfn_to_page(dst_pfn);
if (PageHighMem(s_page)) {
- src = kmap_atomic(s_page);
- dst = kmap_atomic(d_page);
- do_copy_page(dst, src);
- kunmap_atomic(dst);
- kunmap_atomic(src);
+ src = kmap_local_page(s_page);
+ dst = kmap_local_page(d_page);
+ zeros_only = do_copy_page(dst, src);
+ kunmap_local(dst);
+ kunmap_local(src);
} else {
if (PageHighMem(d_page)) {
/*
* The page pointed to by src may contain some kernel
* data modified by kmap_atomic()
*/
- safe_copy_page(buffer, s_page);
- dst = kmap_atomic(d_page);
+ zeros_only = safe_copy_page(buffer, s_page);
+ dst = kmap_local_page(d_page);
copy_page(dst, buffer);
- kunmap_atomic(dst);
+ kunmap_local(dst);
} else {
- safe_copy_page(page_address(d_page), s_page);
+ zeros_only = safe_copy_page(page_address(d_page), s_page);
}
}
+ return zeros_only;
}
#else
#define page_is_saveable(zone, pfn) saveable_page(zone, pfn)
-static inline void copy_data_page(unsigned long dst_pfn, unsigned long src_pfn)
+static inline int copy_data_page(unsigned long dst_pfn, unsigned long src_pfn)
{
- safe_copy_page(page_address(pfn_to_page(dst_pfn)),
+ return safe_copy_page(page_address(pfn_to_page(dst_pfn)),
pfn_to_page(src_pfn));
}
#endif /* CONFIG_HIGHMEM */
-static void copy_data_pages(struct memory_bitmap *copy_bm,
- struct memory_bitmap *orig_bm)
+/*
+ * Copy data pages will copy all pages into pages pulled from the copy_bm.
+ * If a page was entirely filled with zeros it will be marked in the zero_bm.
+ *
+ * Returns the number of pages copied.
+ */
+static unsigned long copy_data_pages(struct memory_bitmap *copy_bm,
+ struct memory_bitmap *orig_bm,
+ struct memory_bitmap *zero_bm)
{
+ unsigned long copied_pages = 0;
struct zone *zone;
- unsigned long pfn;
+ unsigned long pfn, copy_pfn;
for_each_populated_zone(zone) {
unsigned long max_zone_pfn;
@@ -1423,18 +1545,29 @@ static void copy_data_pages(struct memory_bitmap *copy_bm,
}
memory_bm_position_reset(orig_bm);
memory_bm_position_reset(copy_bm);
+ copy_pfn = memory_bm_next_pfn(copy_bm);
for(;;) {
pfn = memory_bm_next_pfn(orig_bm);
if (unlikely(pfn == BM_END_OF_MAP))
break;
- copy_data_page(memory_bm_next_pfn(copy_bm), pfn);
+ if (copy_data_page(copy_pfn, pfn)) {
+ memory_bm_set_bit(zero_bm, pfn);
+ /* Use this copy_pfn for a page that is not full of zeros */
+ continue;
+ }
+ copied_pages++;
+ copy_pfn = memory_bm_next_pfn(copy_bm);
}
+ return copied_pages;
}
/* Total number of image pages */
static unsigned int nr_copy_pages;
/* Number of pages needed for saving the original pfns of the image pages */
static unsigned int nr_meta_pages;
+/* Number of zero pages */
+static unsigned int nr_zero_pages;
+
/*
* Numbers of normal and highmem page frames allocated for hibernation image
* before suspending devices.
@@ -1455,10 +1588,13 @@ static struct memory_bitmap orig_bm;
*/
static struct memory_bitmap copy_bm;
+/* Memory bitmap which tracks which saveable pages were zero filled. */
+static struct memory_bitmap zero_bm;
+
/**
* swsusp_free - Free pages allocated for hibernation image.
*
- * Image pages are alocated before snapshot creation, so they need to be
+ * Image pages are allocated before snapshot creation, so they need to be
* released after resume.
*/
void swsusp_free(void)
@@ -1499,6 +1635,7 @@ loop:
out:
nr_copy_pages = 0;
nr_meta_pages = 0;
+ nr_zero_pages = 0;
restore_pblist = NULL;
buffer = NULL;
alloc_normal = 0;
@@ -1663,7 +1800,7 @@ static unsigned long minimum_image_size(unsigned long saveable)
{
unsigned long size;
- size = global_node_page_state(NR_SLAB_RECLAIMABLE)
+ size = global_node_page_state_pages(NR_SLAB_RECLAIMABLE_B)
+ global_node_page_state(NR_ACTIVE_ANON)
+ global_node_page_state(NR_INACTIVE_ANON)
+ global_node_page_state(NR_ACTIVE_FILE)
@@ -1684,8 +1821,8 @@ static unsigned long minimum_image_size(unsigned long saveable)
* /sys/power/reserved_size, respectively). To make this happen, we compute the
* total number of available page frames and allocate at least
*
- * ([page frames total] + PAGES_FOR_IO + [metadata pages]) / 2
- * + 2 * DIV_ROUND_UP(reserved_size, PAGE_SIZE)
+ * ([page frames total] - PAGES_FOR_IO - [metadata pages]) / 2
+ * - 2 * DIV_ROUND_UP(reserved_size, PAGE_SIZE)
*
* of them, which corresponds to the maximum size of a hibernation image.
*
@@ -1717,8 +1854,15 @@ int hibernate_preallocate_memory(void)
goto err_out;
}
+ error = memory_bm_create(&zero_bm, GFP_IMAGE, PG_ANY);
+ if (error) {
+ pr_err("Cannot allocate zero bitmap\n");
+ goto err_out;
+ }
+
alloc_normal = 0;
alloc_highmem = 0;
+ nr_zero_pages = 0;
/* Count the number of saveable data pages. */
save_highmem = count_highmem_pages();
@@ -1902,7 +2046,7 @@ static inline int get_highmem_buffer(int safe_needed)
}
/**
- * alloc_highmem_image_pages - Allocate some highmem pages for the image.
+ * alloc_highmem_pages - Allocate some highmem pages for the image.
*
* Try to allocate as many pages as needed, but if the number of free highmem
* pages is less than that, allocate them all.
@@ -1998,19 +2142,19 @@ asmlinkage __visible int swsusp_save(void)
* Kill them.
*/
drain_local_pages(NULL);
- copy_data_pages(&copy_bm, &orig_bm);
+ nr_copy_pages = copy_data_pages(&copy_bm, &orig_bm, &zero_bm);
/*
* End of critical section. From now on, we can write to memory,
* but we should not touch disk. This specially means we must _not_
* touch swap space! Except we must write out our image of course.
*/
-
nr_pages += nr_highmem;
- nr_copy_pages = nr_pages;
+ /* We don't actually copy the zero pages */
+ nr_zero_pages = nr_pages - nr_copy_pages;
nr_meta_pages = DIV_ROUND_UP(nr_pages * sizeof(long), PAGE_SIZE);
- pr_info("Image created (%d pages copied)\n", nr_pages);
+ pr_info("Image created (%d pages copied, %d zero pages)\n", nr_copy_pages, nr_zero_pages);
return 0;
}
@@ -2023,7 +2167,7 @@ static int init_header_complete(struct swsusp_info *info)
return 0;
}
-static char *check_image_kernel(struct swsusp_info *info)
+static const char *check_image_kernel(struct swsusp_info *info)
{
if (info->version_code != LINUX_VERSION_CODE)
return "kernel version";
@@ -2055,15 +2199,22 @@ static int init_header(struct swsusp_info *info)
return init_header_complete(info);
}
+#define ENCODED_PFN_ZERO_FLAG ((unsigned long)1 << (BITS_PER_LONG - 1))
+#define ENCODED_PFN_MASK (~ENCODED_PFN_ZERO_FLAG)
+
/**
* pack_pfns - Prepare PFNs for saving.
* @bm: Memory bitmap.
* @buf: Memory buffer to store the PFNs in.
+ * @zero_bm: Memory bitmap containing PFNs of zero pages.
*
* PFNs corresponding to set bits in @bm are stored in the area of memory
- * pointed to by @buf (1 page at a time).
+ * pointed to by @buf (1 page at a time). Pages which were filled with only
+ * zeros will have the highest bit set in the packed format to distinguish
+ * them from PFNs which will be contained in the image file.
*/
-static inline void pack_pfns(unsigned long *buf, struct memory_bitmap *bm)
+static inline void pack_pfns(unsigned long *buf, struct memory_bitmap *bm,
+ struct memory_bitmap *zero_bm)
{
int j;
@@ -2071,6 +2222,8 @@ static inline void pack_pfns(unsigned long *buf, struct memory_bitmap *bm)
buf[j] = memory_bm_next_pfn(bm);
if (unlikely(buf[j] == BM_END_OF_MAP))
break;
+ if (memory_bm_test_bit(zero_bm, buf[j]))
+ buf[j] |= ENCODED_PFN_ZERO_FLAG;
}
}
@@ -2112,7 +2265,7 @@ int snapshot_read_next(struct snapshot_handle *handle)
memory_bm_position_reset(&copy_bm);
} else if (handle->cur <= nr_meta_pages) {
clear_page(buffer);
- pack_pfns(buffer, &orig_bm);
+ pack_pfns(buffer, &orig_bm, &zero_bm);
} else {
struct page *page;
@@ -2176,7 +2329,7 @@ static void mark_unsafe_pages(struct memory_bitmap *bm)
static int check_header(struct swsusp_info *info)
{
- char *reason;
+ const char *reason;
reason = check_image_kernel(info);
if (!reason && info->num_physpages != get_num_physpages())
@@ -2189,7 +2342,7 @@ static int check_header(struct swsusp_info *info)
}
/**
- * load header - Check the image header and copy the data from it.
+ * load_header - Check the image header and copy the data from it.
*/
static int load_header(struct swsusp_info *info)
{
@@ -2208,22 +2361,37 @@ static int load_header(struct swsusp_info *info)
* unpack_orig_pfns - Set bits corresponding to given PFNs in a memory bitmap.
* @bm: Memory bitmap.
* @buf: Area of memory containing the PFNs.
+ * @zero_bm: Memory bitmap with the zero PFNs marked.
*
* For each element of the array pointed to by @buf (1 page at a time), set the
- * corresponding bit in @bm.
+ * corresponding bit in @bm. If the page was originally populated with only
+ * zeros then a corresponding bit will also be set in @zero_bm.
*/
-static int unpack_orig_pfns(unsigned long *buf, struct memory_bitmap *bm)
+static int unpack_orig_pfns(unsigned long *buf, struct memory_bitmap *bm,
+ struct memory_bitmap *zero_bm)
{
+ unsigned long decoded_pfn;
+ bool zero;
int j;
for (j = 0; j < PAGE_SIZE / sizeof(long); j++) {
if (unlikely(buf[j] == BM_END_OF_MAP))
break;
- if (pfn_valid(buf[j]) && memory_bm_pfn_present(bm, buf[j]))
- memory_bm_set_bit(bm, buf[j]);
- else
+ zero = !!(buf[j] & ENCODED_PFN_ZERO_FLAG);
+ decoded_pfn = buf[j] & ENCODED_PFN_MASK;
+ if (pfn_valid(decoded_pfn) && memory_bm_pfn_present(bm, decoded_pfn)) {
+ memory_bm_set_bit(bm, decoded_pfn);
+ if (zero) {
+ memory_bm_set_bit(zero_bm, decoded_pfn);
+ nr_zero_pages++;
+ }
+ } else {
+ if (!pfn_valid(decoded_pfn))
+ pr_err(FW_BUG "Memory map mismatch at 0x%llx after hibernation\n",
+ (unsigned long long)PFN_PHYS(decoded_pfn));
return -EFAULT;
+ }
}
return 0;
@@ -2284,7 +2452,7 @@ static struct memory_bitmap *safe_highmem_bm;
* (@nr_highmem_p points to the variable containing the number of highmem image
* pages). The pages that are "safe" (ie. will not be overwritten when the
* hibernation image is restored entirely) have the corresponding bits set in
- * @bm (it must be unitialized).
+ * @bm (it must be uninitialized).
*
* NOTE: This function should not be called if there are no highmem image pages.
*/
@@ -2379,8 +2547,9 @@ static void *get_highmem_page_buffer(struct page *page,
pbe->copy_page = tmp;
} else {
/* Copy of the page will be stored in normal memory */
- kaddr = safe_pages_list;
- safe_pages_list = safe_pages_list->next;
+ kaddr = __get_safe_page(ca->gfp_mask);
+ if (!kaddr)
+ return ERR_PTR(-ENOMEM);
pbe->copy_page = virt_to_page(kaddr);
}
pbe->next = highmem_pblist;
@@ -2441,8 +2610,9 @@ static inline void free_highmem_data(void) {}
/**
* prepare_image - Make room for loading hibernation image.
- * @new_bm: Unitialized memory bitmap structure.
+ * @new_bm: Uninitialized memory bitmap structure.
* @bm: Memory bitmap with unsafe pages marked.
+ * @zero_bm: Memory bitmap containing the zero pages.
*
* Use @bm to mark the pages that will be overwritten in the process of
* restoring the system memory state from the suspend image ("unsafe" pages)
@@ -2453,10 +2623,15 @@ static inline void free_highmem_data(void) {}
* pages will be used for just yet. Instead, we mark them all as allocated and
* create a lists of "safe" pages to be used later. On systems with high
* memory a list of "safe" highmem pages is created too.
+ *
+ * Because it was not known which pages were unsafe when @zero_bm was created,
+ * make a copy of it and recreate it within safe pages.
*/
-static int prepare_image(struct memory_bitmap *new_bm, struct memory_bitmap *bm)
+static int prepare_image(struct memory_bitmap *new_bm, struct memory_bitmap *bm,
+ struct memory_bitmap *zero_bm)
{
unsigned int nr_pages, nr_highmem;
+ struct memory_bitmap tmp;
struct linked_page *lp;
int error;
@@ -2473,6 +2648,24 @@ static int prepare_image(struct memory_bitmap *new_bm, struct memory_bitmap *bm)
duplicate_memory_bitmap(new_bm, bm);
memory_bm_free(bm, PG_UNSAFE_KEEP);
+
+ /* Make a copy of zero_bm so it can be created in safe pages */
+ error = memory_bm_create(&tmp, GFP_ATOMIC, PG_SAFE);
+ if (error)
+ goto Free;
+
+ duplicate_memory_bitmap(&tmp, zero_bm);
+ memory_bm_free(zero_bm, PG_UNSAFE_KEEP);
+
+ /* Recreate zero_bm in safe pages */
+ error = memory_bm_create(zero_bm, GFP_ATOMIC, PG_SAFE);
+ if (error)
+ goto Free;
+
+ duplicate_memory_bitmap(zero_bm, &tmp);
+ memory_bm_free(&tmp, PG_UNSAFE_CLEAR);
+ /* At this point zero_bm is in safe pages and it can be used for restoring. */
+
if (nr_highmem > 0) {
error = prepare_highmem_image(bm, &nr_highmem);
if (error)
@@ -2487,7 +2680,7 @@ static int prepare_image(struct memory_bitmap *new_bm, struct memory_bitmap *bm)
*
* nr_copy_pages cannot be less than allocated_unsafe_pages too.
*/
- nr_pages = nr_copy_pages - nr_highmem - allocated_unsafe_pages;
+ nr_pages = (nr_zero_pages + nr_copy_pages) - nr_highmem - allocated_unsafe_pages;
nr_pages = DIV_ROUND_UP(nr_pages, PBES_PER_LINKED_PAGE);
while (nr_pages > 0) {
lp = get_image_page(GFP_ATOMIC, PG_SAFE);
@@ -2500,7 +2693,7 @@ static int prepare_image(struct memory_bitmap *new_bm, struct memory_bitmap *bm)
nr_pages--;
}
/* Preallocate memory for the image */
- nr_pages = nr_copy_pages - nr_highmem - allocated_unsafe_pages;
+ nr_pages = (nr_zero_pages + nr_copy_pages) - nr_highmem - allocated_unsafe_pages;
while (nr_pages > 0) {
lp = (struct linked_page *)get_zeroed_page(GFP_ATOMIC);
if (!lp) {
@@ -2560,8 +2753,9 @@ static void *get_buffer(struct memory_bitmap *bm, struct chain_allocator *ca)
return ERR_PTR(-ENOMEM);
}
pbe->orig_address = page_address(page);
- pbe->address = safe_pages_list;
- safe_pages_list = safe_pages_list->next;
+ pbe->address = __get_safe_page(ca->gfp_mask);
+ if (!pbe->address)
+ return ERR_PTR(-ENOMEM);
pbe->next = restore_pblist;
restore_pblist = pbe;
return pbe->address;
@@ -2586,14 +2780,13 @@ static void *get_buffer(struct memory_bitmap *bm, struct chain_allocator *ca)
int snapshot_write_next(struct snapshot_handle *handle)
{
static struct chain_allocator ca;
- int error = 0;
+ int error;
+next:
/* Check if we have already loaded the entire image */
- if (handle->cur > 1 && handle->cur > nr_meta_pages + nr_copy_pages)
+ if (handle->cur > 1 && handle->cur > nr_meta_pages + nr_copy_pages + nr_zero_pages)
return 0;
- handle->sync_read = 1;
-
if (!handle->cur) {
if (!buffer)
/* This makes the buffer be freed by swsusp_free() */
@@ -2614,35 +2807,50 @@ int snapshot_write_next(struct snapshot_handle *handle)
if (error)
return error;
+ error = memory_bm_create(&zero_bm, GFP_ATOMIC, PG_ANY);
+ if (error)
+ return error;
+
+ nr_zero_pages = 0;
+
hibernate_restore_protection_begin();
} else if (handle->cur <= nr_meta_pages + 1) {
- error = unpack_orig_pfns(buffer, &copy_bm);
+ error = unpack_orig_pfns(buffer, &copy_bm, &zero_bm);
if (error)
return error;
if (handle->cur == nr_meta_pages + 1) {
- error = prepare_image(&orig_bm, &copy_bm);
+ error = prepare_image(&orig_bm, &copy_bm, &zero_bm);
if (error)
return error;
chain_init(&ca, GFP_ATOMIC, PG_SAFE);
memory_bm_position_reset(&orig_bm);
+ memory_bm_position_reset(&zero_bm);
restore_pblist = NULL;
handle->buffer = get_buffer(&orig_bm, &ca);
- handle->sync_read = 0;
if (IS_ERR(handle->buffer))
return PTR_ERR(handle->buffer);
}
} else {
copy_last_highmem_page();
- hibernate_restore_protect_page(handle->buffer);
+ error = hibernate_restore_protect_page(handle->buffer);
+ if (error)
+ return error;
handle->buffer = get_buffer(&orig_bm, &ca);
if (IS_ERR(handle->buffer))
return PTR_ERR(handle->buffer);
- if (handle->buffer != buffer)
- handle->sync_read = 0;
}
+ handle->sync_read = (handle->buffer == buffer);
handle->cur++;
+
+ /* Zero pages were not included in the image, memset it and move on. */
+ if (handle->cur > nr_meta_pages + 1 &&
+ memory_bm_test_bit(&zero_bm, memory_bm_get_current(&orig_bm))) {
+ memset(handle->buffer, 0, PAGE_SIZE);
+ goto next;
+ }
+
return PAGE_SIZE;
}
@@ -2654,21 +2862,24 @@ int snapshot_write_next(struct snapshot_handle *handle)
* stored in highmem. Additionally, it recycles bitmap memory that's not
* necessary any more.
*/
-void snapshot_write_finalize(struct snapshot_handle *handle)
+int snapshot_write_finalize(struct snapshot_handle *handle)
{
+ int error;
+
copy_last_highmem_page();
- hibernate_restore_protect_page(handle->buffer);
+ error = hibernate_restore_protect_page(handle->buffer);
/* Do that only if we have loaded the image entirely */
- if (handle->cur > 1 && handle->cur > nr_meta_pages + nr_copy_pages) {
+ if (handle->cur > 1 && handle->cur > nr_meta_pages + nr_copy_pages + nr_zero_pages) {
memory_bm_recycle(&orig_bm);
free_highmem_data();
}
+ return error;
}
int snapshot_image_loaded(struct snapshot_handle *handle)
{
return !(!nr_copy_pages || !last_highmem_page_copied() ||
- handle->cur <= nr_meta_pages + nr_copy_pages);
+ handle->cur <= nr_meta_pages + nr_copy_pages + nr_zero_pages);
}
#ifdef CONFIG_HIGHMEM
diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c
index 8b1bb5ee7e5d..09f8397bae15 100644
--- a/kernel/power/suspend.c
+++ b/kernel/power/suspend.c
@@ -75,9 +75,11 @@ EXPORT_SYMBOL_GPL(pm_suspend_default_s2idle);
void s2idle_set_ops(const struct platform_s2idle_ops *ops)
{
- lock_system_sleep();
+ unsigned int sleep_flags;
+
+ sleep_flags = lock_system_sleep();
s2idle_ops = ops;
- unlock_system_sleep();
+ unlock_system_sleep(sleep_flags);
}
static void s2idle_begin(void)
@@ -96,8 +98,7 @@ static void s2idle_enter(void)
s2idle_state = S2IDLE_STATE_ENTER;
raw_spin_unlock_irq(&s2idle_lock);
- get_online_cpus();
- cpuidle_resume();
+ cpus_read_lock();
/* Push all the CPUs into the idle loop. */
wake_up_all_idle_cpus();
@@ -105,8 +106,13 @@ static void s2idle_enter(void)
swait_event_exclusive(s2idle_wait_head,
s2idle_state == S2IDLE_STATE_WAKE);
- cpuidle_pause();
- put_online_cpus();
+ /*
+ * Kick all CPUs to ensure that they resume their timers and restore
+ * consistent system state.
+ */
+ wake_up_all_idle_cpus();
+
+ cpus_read_unlock();
raw_spin_lock_irq(&s2idle_lock);
@@ -138,7 +144,8 @@ static void s2idle_loop(void)
break;
}
- pm_wakeup_clear(false);
+ if (s2idle_ops && s2idle_ops->check)
+ s2idle_ops->check();
s2idle_enter();
}
@@ -162,11 +169,13 @@ EXPORT_SYMBOL_GPL(s2idle_wake);
static bool valid_state(suspend_state_t state)
{
/*
- * PM_SUSPEND_STANDBY and PM_SUSPEND_MEM states need low level
- * support and need to be valid to the low level
- * implementation, no valid callback implies that none are valid.
+ * The PM_SUSPEND_STANDBY and PM_SUSPEND_MEM states require low-level
+ * support and need to be valid to the low-level implementation.
+ *
+ * No ->valid() or ->enter() callback implies that none are valid.
*/
- return suspend_ops && suspend_ops->valid && suspend_ops->valid(state);
+ return suspend_ops && suspend_ops->valid && suspend_ops->valid(state) &&
+ suspend_ops->enter;
}
void __init pm_states_init(void)
@@ -189,6 +198,7 @@ static int __init mem_sleep_default_setup(char *str)
if (mem_sleep_labels[state] &&
!strcmp(str, mem_sleep_labels[state])) {
mem_sleep_default = state;
+ mem_sleep_current = state;
break;
}
@@ -202,7 +212,9 @@ __setup("mem_sleep_default=", mem_sleep_default_setup);
*/
void suspend_set_ops(const struct platform_suspend_ops *ops)
{
- lock_system_sleep();
+ unsigned int sleep_flags;
+
+ sleep_flags = lock_system_sleep();
suspend_ops = ops;
@@ -218,12 +230,13 @@ void suspend_set_ops(const struct platform_suspend_ops *ops)
mem_sleep_current = PM_SUSPEND_MEM;
}
- unlock_system_sleep();
+ unlock_system_sleep(sleep_flags);
}
EXPORT_SYMBOL_GPL(suspend_set_ops);
/**
* suspend_valid_only_mem - Generic memory-only valid callback.
+ * @state: Target system sleep state.
*
* Platform drivers that implement mem suspend only and only need to check for
* that in their .valid() callback can use this instead of rolling their own
@@ -237,7 +250,8 @@ EXPORT_SYMBOL_GPL(suspend_valid_only_mem);
static bool sleep_state_supported(suspend_state_t state)
{
- return state == PM_SUSPEND_TO_IDLE || (suspend_ops && suspend_ops->enter);
+ return state == PM_SUSPEND_TO_IDLE ||
+ (valid_state(state) && !cxl_mem_active());
}
static int platform_suspend_prepare(suspend_state_t state)
@@ -335,6 +349,7 @@ static int suspend_test(int level)
/**
* suspend_prepare - Prepare for entering system sleep state.
+ * @state: Target system sleep state.
*
* Common code run for every system sleep state that can be entered (except for
* hibernation). Run suspend notifiers, allocate the "suspend" console and
@@ -342,18 +357,16 @@ static int suspend_test(int level)
*/
static int suspend_prepare(suspend_state_t state)
{
- int error, nr_calls = 0;
+ int error;
if (!sleep_state_supported(state))
return -EPERM;
pm_prepare_console();
- error = __pm_notifier_call_chain(PM_SUSPEND_PREPARE, -1, &nr_calls);
- if (error) {
- nr_calls--;
- goto Finish;
- }
+ error = pm_notifier_call_chain_robust(PM_SUSPEND_PREPARE, PM_POST_SUSPEND);
+ if (error)
+ goto Restore;
trace_suspend_resume(TPS("freeze_processes"), 0, true);
error = suspend_freeze_processes();
@@ -361,10 +374,9 @@ static int suspend_prepare(suspend_state_t state)
if (!error)
return 0;
- suspend_stats.failed_freeze++;
dpm_save_failed_step(SUSPEND_FREEZE);
- Finish:
- __pm_notifier_call_chain(PM_POST_SUSPEND, nr_calls, NULL);
+ pm_notifier_call_chain(PM_POST_SUSPEND);
+ Restore:
pm_restore_console();
return error;
}
@@ -422,7 +434,7 @@ static int suspend_enter(suspend_state_t state, bool *wakeup)
goto Platform_wake;
}
- error = suspend_disable_secondary_cpus();
+ error = pm_sleep_disable_secondary_cpus();
if (error || suspend_test(TEST_CPUS))
goto Enable_cpus;
@@ -452,7 +464,7 @@ static int suspend_enter(suspend_state_t state, bool *wakeup)
BUG_ON(irqs_disabled());
Enable_cpus:
- suspend_enable_secondary_cpus();
+ pm_sleep_enable_secondary_cpus();
Platform_wake:
platform_resume_noirq(state);
@@ -611,12 +623,7 @@ int pm_suspend(suspend_state_t state)
pr_info("suspend entry (%s)\n", mem_sleep_labels[state]);
error = enter_state(state);
- if (error) {
- suspend_stats.fail++;
- dpm_save_failed_errno(error);
- } else {
- suspend_stats.success++;
- }
+ dpm_save_errno(error);
pr_info("suspend exit\n");
return error;
}
diff --git a/kernel/power/suspend_test.c b/kernel/power/suspend_test.c
index e1ed58adb69e..d4856ec61570 100644
--- a/kernel/power/suspend_test.c
+++ b/kernel/power/suspend_test.c
@@ -129,7 +129,7 @@ static int __init has_wakealarm(struct device *dev, const void *data)
{
struct rtc_device *candidate = to_rtc_device(dev);
- if (!candidate->ops->set_alarm)
+ if (!test_bit(RTC_FEATURE_ALARM, candidate->features))
return 0;
if (!device_may_wakeup(candidate->dev.parent))
return 0;
@@ -157,22 +157,22 @@ static int __init setup_test_suspend(char *value)
value++;
suspend_type = strsep(&value, ",");
if (!suspend_type)
- return 0;
+ return 1;
repeat = strsep(&value, ",");
if (repeat) {
if (kstrtou32(repeat, 0, &test_repeat_count_max))
- return 0;
+ return 1;
}
for (i = PM_SUSPEND_MIN; i < PM_SUSPEND_MAX; i++)
if (!strcmp(pm_labels[i], suspend_type)) {
test_state_label = pm_labels[i];
- return 0;
+ return 1;
}
printk(warn_bad_state, suspend_type);
- return 0;
+ return 1;
}
__setup("test_suspend", setup_test_suspend);
@@ -201,7 +201,7 @@ static int __init test_suspend(void)
}
/* RTCs have initialized by now too ... can we use one? */
- dev = class_find_device(rtc_class, NULL, NULL, has_wakealarm);
+ dev = class_find_device(&rtc_class, NULL, NULL, has_wakealarm);
if (dev) {
rtc = rtc_class_open(dev_name(dev));
put_device(dev);
diff --git a/kernel/power/swap.c b/kernel/power/swap.c
index 01e2858b5fe3..5bc04bfe2db1 100644
--- a/kernel/power/swap.c
+++ b/kernel/power/swap.c
@@ -16,7 +16,6 @@
#include <linux/file.h>
#include <linux/delay.h>
#include <linux/bitops.h>
-#include <linux/genhd.h>
#include <linux/device.h>
#include <linux/bio.h>
#include <linux/blkdev.h>
@@ -24,7 +23,6 @@
#include <linux/swapops.h>
#include <linux/pm.h>
#include <linux/slab.h>
-#include <linux/lzo.h>
#include <linux/vmalloc.h>
#include <linux/cpumask.h>
#include <linux/atomic.h>
@@ -36,6 +34,8 @@
#define HIBERNATE_SIG "S1SUSPEND"
+u32 swsusp_hardware_signature;
+
/*
* When reading an {un,}compressed image, we may restore pages in place,
* in which case some architectures need these pages cleaning before they
@@ -87,7 +87,7 @@ struct swap_map_page_list {
struct swap_map_page_list *next;
};
-/**
+/*
* The swap_map_handle structure is used for handling swap in
* a file-alike way
*/
@@ -104,7 +104,8 @@ struct swap_map_handle {
struct swsusp_header {
char reserved[PAGE_SIZE - 20 - sizeof(sector_t) - sizeof(int) -
- sizeof(u32)];
+ sizeof(u32) - sizeof(u32)];
+ u32 hw_sig;
u32 crc32;
sector_t image;
unsigned int flags; /* Flags to pass to the "boot" kernel */
@@ -114,7 +115,7 @@ struct swsusp_header {
static struct swsusp_header *swsusp_header;
-/**
+/*
* The following functions are used for tracing the allocated
* swap pages, so that they can be freed in case of an error.
*/
@@ -168,7 +169,7 @@ static int swsusp_extents_insert(unsigned long swap_offset)
return 0;
}
-/**
+/*
* alloc_swapdev_block - allocate a swap page and register that it has
* been allocated, so that it can be freed in case of an error.
*/
@@ -187,7 +188,7 @@ sector_t alloc_swapdev_block(int swap)
return 0;
}
-/**
+/*
* free_all_swap_pages - free swap pages allocated for saving image data.
* It also frees the extents used to register which swap entries had been
* allocated.
@@ -220,12 +221,13 @@ int swsusp_swap_in_use(void)
*/
static unsigned short root_swap = 0xffff;
-static struct block_device *hib_resume_bdev;
+static struct file *hib_resume_bdev_file;
struct hib_bio_batch {
atomic_t count;
wait_queue_head_t wait;
blk_status_t error;
+ struct blk_plug plug;
};
static void hib_init_batch(struct hib_bio_batch *hb)
@@ -233,6 +235,12 @@ static void hib_init_batch(struct hib_bio_batch *hb)
atomic_set(&hb->count, 0);
init_waitqueue_head(&hb->wait);
hb->error = BLK_STS_OK;
+ blk_start_plug(&hb->plug);
+}
+
+static void hib_finish_batch(struct hib_bio_batch *hb)
+{
+ blk_finish_plug(&hb->plug);
}
static void hib_end_io(struct bio *bio)
@@ -260,17 +268,16 @@ static void hib_end_io(struct bio *bio)
bio_put(bio);
}
-static int hib_submit_io(int op, int op_flags, pgoff_t page_off, void *addr,
- struct hib_bio_batch *hb)
+static int hib_submit_io(blk_opf_t opf, pgoff_t page_off, void *addr,
+ struct hib_bio_batch *hb)
{
struct page *page = virt_to_page(addr);
struct bio *bio;
int error = 0;
- bio = bio_alloc(GFP_NOIO | __GFP_HIGH, 1);
+ bio = bio_alloc(file_bdev(hib_resume_bdev_file), 1, opf,
+ GFP_NOIO | __GFP_HIGH);
bio->bi_iter.bi_sector = page_off * (PAGE_SIZE >> 9);
- bio_set_dev(bio, hib_resume_bdev);
- bio_set_op_attrs(bio, op, op_flags);
if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) {
pr_err("Adding page to bio failed at %llu\n",
@@ -292,8 +299,12 @@ static int hib_submit_io(int op, int op_flags, pgoff_t page_off, void *addr,
return error;
}
-static blk_status_t hib_wait_io(struct hib_bio_batch *hb)
+static int hib_wait_io(struct hib_bio_batch *hb)
{
+ /*
+ * We are relying on the behavior of blk_plug that a thread with
+ * a plug will flush the plug list before sleeping.
+ */
wait_event(hb->wait, atomic_read(&hb->count) == 0);
return blk_status_to_errno(hb->error);
}
@@ -301,22 +312,24 @@ static blk_status_t hib_wait_io(struct hib_bio_batch *hb)
/*
* Saving part
*/
-
static int mark_swapfiles(struct swap_map_handle *handle, unsigned int flags)
{
int error;
- hib_submit_io(REQ_OP_READ, 0, swsusp_resume_block,
- swsusp_header, NULL);
+ hib_submit_io(REQ_OP_READ, swsusp_resume_block, swsusp_header, NULL);
if (!memcmp("SWAP-SPACE",swsusp_header->sig, 10) ||
!memcmp("SWAPSPACE2",swsusp_header->sig, 10)) {
memcpy(swsusp_header->orig_sig,swsusp_header->sig, 10);
memcpy(swsusp_header->sig, HIBERNATE_SIG, 10);
swsusp_header->image = handle->first_sector;
+ if (swsusp_hardware_signature) {
+ swsusp_header->hw_sig = swsusp_hardware_signature;
+ flags |= SF_HW_SIG;
+ }
swsusp_header->flags = flags;
if (flags & SF_CRC32_MODE)
swsusp_header->crc32 = handle->crc32;
- error = hib_submit_io(REQ_OP_WRITE, REQ_SYNC,
+ error = hib_submit_io(REQ_OP_WRITE | REQ_SYNC,
swsusp_resume_block, swsusp_header, NULL);
} else {
pr_err("Swap header not found!\n");
@@ -325,6 +338,13 @@ static int mark_swapfiles(struct swap_map_handle *handle, unsigned int flags)
return error;
}
+/*
+ * Hold the swsusp_header flag. This is used in software_resume() in
+ * 'kernel/power/hibernate' to check if the image is compressed and query
+ * for the compression algorithm support(if so).
+ */
+unsigned int swsusp_header_flags;
+
/**
* swsusp_swap_check - check if the resume device is a swap device
* and get its index (if so)
@@ -335,26 +355,23 @@ static int swsusp_swap_check(void)
{
int res;
- res = swap_type_of(swsusp_resume_device, swsusp_resume_block,
- &hib_resume_bdev);
+ if (swsusp_resume_device)
+ res = swap_type_of(swsusp_resume_device, swsusp_resume_block);
+ else
+ res = find_first_swap(&swsusp_resume_device);
if (res < 0)
return res;
-
root_swap = res;
- res = blkdev_get(hib_resume_bdev, FMODE_WRITE, NULL);
- if (res)
- return res;
- res = set_blocksize(hib_resume_bdev, PAGE_SIZE);
+ hib_resume_bdev_file = bdev_file_open_by_dev(swsusp_resume_device,
+ BLK_OPEN_WRITE, NULL, NULL);
+ if (IS_ERR(hib_resume_bdev_file))
+ return PTR_ERR(hib_resume_bdev_file);
+
+ res = set_blocksize(file_bdev(hib_resume_bdev_file), PAGE_SIZE);
if (res < 0)
- blkdev_put(hib_resume_bdev, FMODE_WRITE);
+ fput(hib_resume_bdev_file);
- /*
- * Update the resume device to the one actually used,
- * so the test_resume mode can use it in case it is
- * invoked from hibernate() to test the snapshot.
- */
- swsusp_resume_device = hib_resume_bdev->bd_dev;
return res;
}
@@ -396,7 +413,7 @@ static int write_page(void *buf, sector_t offset, struct hib_bio_batch *hb)
} else {
src = buf;
}
- return hib_submit_io(REQ_OP_WRITE, REQ_SYNC, offset, src, hb);
+ return hib_submit_io(REQ_OP_WRITE | REQ_SYNC, offset, src, hb);
}
static void release_swap_writer(struct swap_map_handle *handle)
@@ -433,14 +450,14 @@ static int get_swap_writer(struct swap_map_handle *handle)
err_rel:
release_swap_writer(handle);
err_close:
- swsusp_close(FMODE_WRITE);
+ swsusp_close();
return ret;
}
static int swap_write_page(struct swap_map_handle *handle, void *buf,
struct hib_bio_batch *hb)
{
- int error = 0;
+ int error;
sector_t offset;
if (!handle->cur)
@@ -489,39 +506,44 @@ static int swap_writer_finish(struct swap_map_handle *handle,
unsigned int flags, int error)
{
if (!error) {
- flush_swap_writer(handle);
pr_info("S");
error = mark_swapfiles(handle, flags);
pr_cont("|\n");
+ flush_swap_writer(handle);
}
if (error)
free_all_swap_pages(root_swap);
release_swap_writer(handle);
- swsusp_close(FMODE_WRITE);
+ swsusp_close();
return error;
}
+/*
+ * Bytes we need for compressed data in worst case. We assume(limitation)
+ * this is the worst of all the compression algorithms.
+ */
+#define bytes_worst_compress(x) ((x) + ((x) / 16) + 64 + 3 + 2)
+
/* We need to remember how much compressed data we need to read. */
-#define LZO_HEADER sizeof(size_t)
+#define CMP_HEADER sizeof(size_t)
/* Number of pages/bytes we'll compress at one time. */
-#define LZO_UNC_PAGES 32
-#define LZO_UNC_SIZE (LZO_UNC_PAGES * PAGE_SIZE)
+#define UNC_PAGES 32
+#define UNC_SIZE (UNC_PAGES * PAGE_SIZE)
-/* Number of pages/bytes we need for compressed data (worst case). */
-#define LZO_CMP_PAGES DIV_ROUND_UP(lzo1x_worst_compress(LZO_UNC_SIZE) + \
- LZO_HEADER, PAGE_SIZE)
-#define LZO_CMP_SIZE (LZO_CMP_PAGES * PAGE_SIZE)
+/* Number of pages we need for compressed data (worst case). */
+#define CMP_PAGES DIV_ROUND_UP(bytes_worst_compress(UNC_SIZE) + \
+ CMP_HEADER, PAGE_SIZE)
+#define CMP_SIZE (CMP_PAGES * PAGE_SIZE)
/* Maximum number of threads for compression/decompression. */
-#define LZO_THREADS 3
+#define CMP_THREADS 3
/* Minimum/maximum number of pages for read buffering. */
-#define LZO_MIN_RD_PAGES 1024
-#define LZO_MAX_RD_PAGES 8192
-
+#define CMP_MIN_RD_PAGES 1024
+#define CMP_MAX_RD_PAGES 8192
/**
* save_image - save the suspend image data
@@ -561,6 +583,7 @@ static int save_image(struct swap_map_handle *handle,
nr_pages++;
}
err2 = hib_wait_io(&hb);
+ hib_finish_batch(&hb);
stop = ktime_get();
if (!ret)
ret = err2;
@@ -570,7 +593,7 @@ static int save_image(struct swap_map_handle *handle,
return ret;
}
-/**
+/*
* Structure used for CRC32.
*/
struct crc_data {
@@ -581,11 +604,11 @@ struct crc_data {
wait_queue_head_t go; /* start crc update */
wait_queue_head_t done; /* crc update done */
u32 *crc32; /* points to handle's crc32 */
- size_t *unc_len[LZO_THREADS]; /* uncompressed lengths */
- unsigned char *unc[LZO_THREADS]; /* uncompressed data */
+ size_t *unc_len[CMP_THREADS]; /* uncompressed lengths */
+ unsigned char *unc[CMP_THREADS]; /* uncompressed data */
};
-/**
+/*
* CRC32 update function that runs in its own thread.
*/
static int crc32_threadfn(void *data)
@@ -594,11 +617,11 @@ static int crc32_threadfn(void *data)
unsigned i;
while (1) {
- wait_event(d->go, atomic_read(&d->ready) ||
+ wait_event(d->go, atomic_read_acquire(&d->ready) ||
kthread_should_stop());
if (kthread_should_stop()) {
d->thr = NULL;
- atomic_set(&d->stop, 1);
+ atomic_set_release(&d->stop, 1);
wake_up(&d->done);
break;
}
@@ -607,16 +630,17 @@ static int crc32_threadfn(void *data)
for (i = 0; i < d->run_threads; i++)
*d->crc32 = crc32_le(*d->crc32,
d->unc[i], *d->unc_len[i]);
- atomic_set(&d->stop, 1);
+ atomic_set_release(&d->stop, 1);
wake_up(&d->done);
}
return 0;
}
-/**
- * Structure used for LZO data compression.
+/*
+ * Structure used for data compression.
*/
struct cmp_data {
struct task_struct *thr; /* thread */
+ struct crypto_comp *cc; /* crypto compressor stream */
atomic_t ready; /* ready to start flag */
atomic_t stop; /* ready to stop flag */
int ret; /* return code */
@@ -624,48 +648,55 @@ struct cmp_data {
wait_queue_head_t done; /* compression done */
size_t unc_len; /* uncompressed length */
size_t cmp_len; /* compressed length */
- unsigned char unc[LZO_UNC_SIZE]; /* uncompressed buffer */
- unsigned char cmp[LZO_CMP_SIZE]; /* compressed buffer */
- unsigned char wrk[LZO1X_1_MEM_COMPRESS]; /* compression workspace */
+ unsigned char unc[UNC_SIZE]; /* uncompressed buffer */
+ unsigned char cmp[CMP_SIZE]; /* compressed buffer */
};
-/**
+/* Indicates the image size after compression */
+static atomic_t compressed_size = ATOMIC_INIT(0);
+
+/*
* Compression function that runs in its own thread.
*/
-static int lzo_compress_threadfn(void *data)
+static int compress_threadfn(void *data)
{
struct cmp_data *d = data;
+ unsigned int cmp_len = 0;
while (1) {
- wait_event(d->go, atomic_read(&d->ready) ||
+ wait_event(d->go, atomic_read_acquire(&d->ready) ||
kthread_should_stop());
if (kthread_should_stop()) {
d->thr = NULL;
d->ret = -1;
- atomic_set(&d->stop, 1);
+ atomic_set_release(&d->stop, 1);
wake_up(&d->done);
break;
}
atomic_set(&d->ready, 0);
- d->ret = lzo1x_1_compress(d->unc, d->unc_len,
- d->cmp + LZO_HEADER, &d->cmp_len,
- d->wrk);
- atomic_set(&d->stop, 1);
+ cmp_len = CMP_SIZE - CMP_HEADER;
+ d->ret = crypto_comp_compress(d->cc, d->unc, d->unc_len,
+ d->cmp + CMP_HEADER,
+ &cmp_len);
+ d->cmp_len = cmp_len;
+
+ atomic_set(&compressed_size, atomic_read(&compressed_size) + d->cmp_len);
+ atomic_set_release(&d->stop, 1);
wake_up(&d->done);
}
return 0;
}
/**
- * save_image_lzo - Save the suspend image data compressed with LZO.
+ * save_compressed_image - Save the suspend image data after compression.
* @handle: Swap map handle to use for saving the image.
* @snapshot: Image to read data from.
* @nr_to_write: Number of pages to save.
*/
-static int save_image_lzo(struct swap_map_handle *handle,
- struct snapshot_handle *snapshot,
- unsigned int nr_to_write)
+static int save_compressed_image(struct swap_map_handle *handle,
+ struct snapshot_handle *snapshot,
+ unsigned int nr_to_write)
{
unsigned int m;
int ret = 0;
@@ -682,36 +713,35 @@ static int save_image_lzo(struct swap_map_handle *handle,
hib_init_batch(&hb);
+ atomic_set(&compressed_size, 0);
+
/*
* We'll limit the number of threads for compression to limit memory
* footprint.
*/
nr_threads = num_online_cpus() - 1;
- nr_threads = clamp_val(nr_threads, 1, LZO_THREADS);
+ nr_threads = clamp_val(nr_threads, 1, CMP_THREADS);
page = (void *)__get_free_page(GFP_NOIO | __GFP_HIGH);
if (!page) {
- pr_err("Failed to allocate LZO page\n");
+ pr_err("Failed to allocate %s page\n", hib_comp_algo);
ret = -ENOMEM;
goto out_clean;
}
- data = vmalloc(array_size(nr_threads, sizeof(*data)));
+ data = vzalloc(array_size(nr_threads, sizeof(*data)));
if (!data) {
- pr_err("Failed to allocate LZO data\n");
+ pr_err("Failed to allocate %s data\n", hib_comp_algo);
ret = -ENOMEM;
goto out_clean;
}
- for (thr = 0; thr < nr_threads; thr++)
- memset(&data[thr], 0, offsetof(struct cmp_data, go));
- crc = kmalloc(sizeof(*crc), GFP_KERNEL);
+ crc = kzalloc(sizeof(*crc), GFP_KERNEL);
if (!crc) {
pr_err("Failed to allocate crc\n");
ret = -ENOMEM;
goto out_clean;
}
- memset(crc, 0, offsetof(struct crc_data, go));
/*
* Start the compression threads.
@@ -720,7 +750,14 @@ static int save_image_lzo(struct swap_map_handle *handle,
init_waitqueue_head(&data[thr].go);
init_waitqueue_head(&data[thr].done);
- data[thr].thr = kthread_run(lzo_compress_threadfn,
+ data[thr].cc = crypto_alloc_comp(hib_comp_algo, 0, 0);
+ if (IS_ERR_OR_NULL(data[thr].cc)) {
+ pr_err("Could not allocate comp stream %ld\n", PTR_ERR(data[thr].cc));
+ ret = -EFAULT;
+ goto out_clean;
+ }
+
+ data[thr].thr = kthread_run(compress_threadfn,
&data[thr],
"image_compress/%u", thr);
if (IS_ERR(data[thr].thr)) {
@@ -758,7 +795,7 @@ static int save_image_lzo(struct swap_map_handle *handle,
*/
handle->reqd_free_pages = reqd_free_pages();
- pr_info("Using %u thread(s) for compression\n", nr_threads);
+ pr_info("Using %u thread(s) for %s compression\n", nr_threads, hib_comp_algo);
pr_info("Compressing and saving image data (%u pages)...\n",
nr_to_write);
m = nr_to_write / 10;
@@ -768,7 +805,7 @@ static int save_image_lzo(struct swap_map_handle *handle,
start = ktime_get();
for (;;) {
for (thr = 0; thr < nr_threads; thr++) {
- for (off = 0; off < LZO_UNC_SIZE; off += PAGE_SIZE) {
+ for (off = 0; off < UNC_SIZE; off += PAGE_SIZE) {
ret = snapshot_read_next(snapshot);
if (ret < 0)
goto out_finish;
@@ -789,7 +826,7 @@ static int save_image_lzo(struct swap_map_handle *handle,
data[thr].unc_len = off;
- atomic_set(&data[thr].ready, 1);
+ atomic_set_release(&data[thr].ready, 1);
wake_up(&data[thr].go);
}
@@ -797,25 +834,25 @@ static int save_image_lzo(struct swap_map_handle *handle,
break;
crc->run_threads = thr;
- atomic_set(&crc->ready, 1);
+ atomic_set_release(&crc->ready, 1);
wake_up(&crc->go);
for (run_threads = thr, thr = 0; thr < run_threads; thr++) {
wait_event(data[thr].done,
- atomic_read(&data[thr].stop));
+ atomic_read_acquire(&data[thr].stop));
atomic_set(&data[thr].stop, 0);
ret = data[thr].ret;
if (ret < 0) {
- pr_err("LZO compression failed\n");
+ pr_err("%s compression failed\n", hib_comp_algo);
goto out_finish;
}
if (unlikely(!data[thr].cmp_len ||
data[thr].cmp_len >
- lzo1x_worst_compress(data[thr].unc_len))) {
- pr_err("Invalid LZO compressed length\n");
+ bytes_worst_compress(data[thr].unc_len))) {
+ pr_err("Invalid %s compressed length\n", hib_comp_algo);
ret = -1;
goto out_finish;
}
@@ -831,7 +868,7 @@ static int save_image_lzo(struct swap_map_handle *handle,
* read it.
*/
for (off = 0;
- off < LZO_HEADER + data[thr].cmp_len;
+ off < CMP_HEADER + data[thr].cmp_len;
off += PAGE_SIZE) {
memcpy(page, data[thr].cmp + off, PAGE_SIZE);
@@ -841,7 +878,7 @@ static int save_image_lzo(struct swap_map_handle *handle,
}
}
- wait_event(crc->done, atomic_read(&crc->stop));
+ wait_event(crc->done, atomic_read_acquire(&crc->stop));
atomic_set(&crc->stop, 0);
}
@@ -853,16 +890,23 @@ out_finish:
if (!ret)
pr_info("Image saving done\n");
swsusp_show_speed(start, stop, nr_to_write, "Wrote");
+ pr_info("Image size after compression: %d kbytes\n",
+ (atomic_read(&compressed_size) / 1024));
+
out_clean:
+ hib_finish_batch(&hb);
if (crc) {
if (crc->thr)
kthread_stop(crc->thr);
kfree(crc);
}
if (data) {
- for (thr = 0; thr < nr_threads; thr++)
+ for (thr = 0; thr < nr_threads; thr++) {
if (data[thr].thr)
kthread_stop(data[thr].thr);
+ if (data[thr].cc)
+ crypto_free_comp(data[thr].cc);
+ }
vfree(data);
}
if (page) free_page((unsigned long)page);
@@ -874,7 +918,7 @@ out_clean:
* enough_swap - Make sure we have enough swap to save the image.
*
* Returns TRUE or FALSE after checking the total amount of swap
- * space avaiable from the resume partition.
+ * space available from the resume partition.
*/
static int enough_swap(unsigned int nr_pages)
@@ -932,16 +976,16 @@ int swsusp_write(unsigned int flags)
if (!error) {
error = (flags & SF_NOCOMPRESS_MODE) ?
save_image(&handle, &snapshot, pages - 1) :
- save_image_lzo(&handle, &snapshot, pages - 1);
+ save_compressed_image(&handle, &snapshot, pages - 1);
}
out_finish:
error = swap_writer_finish(&handle, flags, error);
return error;
}
-/**
+/*
* The following functions allow us to read data using a swap map
- * in a file-alike way
+ * in a file-like way.
*/
static void release_swap_reader(struct swap_map_handle *handle)
@@ -992,7 +1036,7 @@ static int get_swap_reader(struct swap_map_handle *handle,
return -ENOMEM;
}
- error = hib_submit_io(REQ_OP_READ, 0, offset, tmp->map, NULL);
+ error = hib_submit_io(REQ_OP_READ, offset, tmp->map, NULL);
if (error) {
release_swap_reader(handle);
return error;
@@ -1016,7 +1060,7 @@ static int swap_read_page(struct swap_map_handle *handle, void *buf,
offset = handle->cur->entries[handle->k];
if (!offset)
return -EFAULT;
- error = hib_submit_io(REQ_OP_READ, 0, offset, buf, hb);
+ error = hib_submit_io(REQ_OP_READ, offset, buf, hb);
if (error)
return error;
if (++handle->k >= MAP_PAGE_ENTRIES) {
@@ -1084,24 +1128,26 @@ static int load_image(struct swap_map_handle *handle,
nr_pages++;
}
err2 = hib_wait_io(&hb);
+ hib_finish_batch(&hb);
stop = ktime_get();
if (!ret)
ret = err2;
if (!ret) {
pr_info("Image loading done\n");
- snapshot_write_finalize(snapshot);
- if (!snapshot_image_loaded(snapshot))
+ ret = snapshot_write_finalize(snapshot);
+ if (!ret && !snapshot_image_loaded(snapshot))
ret = -ENODATA;
}
swsusp_show_speed(start, stop, nr_to_read, "Read");
return ret;
}
-/**
- * Structure used for LZO data decompression.
+/*
+ * Structure used for data decompression.
*/
struct dec_data {
struct task_struct *thr; /* thread */
+ struct crypto_comp *cc; /* crypto compressor stream */
atomic_t ready; /* ready to start flag */
atomic_t stop; /* ready to stop flag */
int ret; /* return code */
@@ -1109,51 +1155,54 @@ struct dec_data {
wait_queue_head_t done; /* decompression done */
size_t unc_len; /* uncompressed length */
size_t cmp_len; /* compressed length */
- unsigned char unc[LZO_UNC_SIZE]; /* uncompressed buffer */
- unsigned char cmp[LZO_CMP_SIZE]; /* compressed buffer */
+ unsigned char unc[UNC_SIZE]; /* uncompressed buffer */
+ unsigned char cmp[CMP_SIZE]; /* compressed buffer */
};
-/**
- * Deompression function that runs in its own thread.
+/*
+ * Decompression function that runs in its own thread.
*/
-static int lzo_decompress_threadfn(void *data)
+static int decompress_threadfn(void *data)
{
struct dec_data *d = data;
+ unsigned int unc_len = 0;
while (1) {
- wait_event(d->go, atomic_read(&d->ready) ||
+ wait_event(d->go, atomic_read_acquire(&d->ready) ||
kthread_should_stop());
if (kthread_should_stop()) {
d->thr = NULL;
d->ret = -1;
- atomic_set(&d->stop, 1);
+ atomic_set_release(&d->stop, 1);
wake_up(&d->done);
break;
}
atomic_set(&d->ready, 0);
- d->unc_len = LZO_UNC_SIZE;
- d->ret = lzo1x_decompress_safe(d->cmp + LZO_HEADER, d->cmp_len,
- d->unc, &d->unc_len);
+ unc_len = UNC_SIZE;
+ d->ret = crypto_comp_decompress(d->cc, d->cmp + CMP_HEADER, d->cmp_len,
+ d->unc, &unc_len);
+ d->unc_len = unc_len;
+
if (clean_pages_on_decompress)
flush_icache_range((unsigned long)d->unc,
(unsigned long)d->unc + d->unc_len);
- atomic_set(&d->stop, 1);
+ atomic_set_release(&d->stop, 1);
wake_up(&d->done);
}
return 0;
}
/**
- * load_image_lzo - Load compressed image data and decompress them with LZO.
+ * load_compressed_image - Load compressed image data and decompress it.
* @handle: Swap map handle to use for loading data.
* @snapshot: Image to copy uncompressed data into.
* @nr_to_read: Number of pages to load.
*/
-static int load_image_lzo(struct swap_map_handle *handle,
- struct snapshot_handle *snapshot,
- unsigned int nr_to_read)
+static int load_compressed_image(struct swap_map_handle *handle,
+ struct snapshot_handle *snapshot,
+ unsigned int nr_to_read)
{
unsigned int m;
int ret = 0;
@@ -1178,31 +1227,28 @@ static int load_image_lzo(struct swap_map_handle *handle,
* footprint.
*/
nr_threads = num_online_cpus() - 1;
- nr_threads = clamp_val(nr_threads, 1, LZO_THREADS);
+ nr_threads = clamp_val(nr_threads, 1, CMP_THREADS);
- page = vmalloc(array_size(LZO_MAX_RD_PAGES, sizeof(*page)));
+ page = vmalloc(array_size(CMP_MAX_RD_PAGES, sizeof(*page)));
if (!page) {
- pr_err("Failed to allocate LZO page\n");
+ pr_err("Failed to allocate %s page\n", hib_comp_algo);
ret = -ENOMEM;
goto out_clean;
}
- data = vmalloc(array_size(nr_threads, sizeof(*data)));
+ data = vzalloc(array_size(nr_threads, sizeof(*data)));
if (!data) {
- pr_err("Failed to allocate LZO data\n");
+ pr_err("Failed to allocate %s data\n", hib_comp_algo);
ret = -ENOMEM;
goto out_clean;
}
- for (thr = 0; thr < nr_threads; thr++)
- memset(&data[thr], 0, offsetof(struct dec_data, go));
- crc = kmalloc(sizeof(*crc), GFP_KERNEL);
+ crc = kzalloc(sizeof(*crc), GFP_KERNEL);
if (!crc) {
pr_err("Failed to allocate crc\n");
ret = -ENOMEM;
goto out_clean;
}
- memset(crc, 0, offsetof(struct crc_data, go));
clean_pages_on_decompress = true;
@@ -1213,7 +1259,14 @@ static int load_image_lzo(struct swap_map_handle *handle,
init_waitqueue_head(&data[thr].go);
init_waitqueue_head(&data[thr].done);
- data[thr].thr = kthread_run(lzo_decompress_threadfn,
+ data[thr].cc = crypto_alloc_comp(hib_comp_algo, 0, 0);
+ if (IS_ERR_OR_NULL(data[thr].cc)) {
+ pr_err("Could not allocate comp stream %ld\n", PTR_ERR(data[thr].cc));
+ ret = -EFAULT;
+ goto out_clean;
+ }
+
+ data[thr].thr = kthread_run(decompress_threadfn,
&data[thr],
"image_decompress/%u", thr);
if (IS_ERR(data[thr].thr)) {
@@ -1254,18 +1307,18 @@ static int load_image_lzo(struct swap_map_handle *handle,
*/
if (low_free_pages() > snapshot_get_image_size())
read_pages = (low_free_pages() - snapshot_get_image_size()) / 2;
- read_pages = clamp_val(read_pages, LZO_MIN_RD_PAGES, LZO_MAX_RD_PAGES);
+ read_pages = clamp_val(read_pages, CMP_MIN_RD_PAGES, CMP_MAX_RD_PAGES);
for (i = 0; i < read_pages; i++) {
- page[i] = (void *)__get_free_page(i < LZO_CMP_PAGES ?
+ page[i] = (void *)__get_free_page(i < CMP_PAGES ?
GFP_NOIO | __GFP_HIGH :
GFP_NOIO | __GFP_NOWARN |
__GFP_NORETRY);
if (!page[i]) {
- if (i < LZO_CMP_PAGES) {
+ if (i < CMP_PAGES) {
ring_size = i;
- pr_err("Failed to allocate LZO pages\n");
+ pr_err("Failed to allocate %s pages\n", hib_comp_algo);
ret = -ENOMEM;
goto out_clean;
} else {
@@ -1275,7 +1328,7 @@ static int load_image_lzo(struct swap_map_handle *handle,
}
want = ring_size = i;
- pr_info("Using %u thread(s) for decompression\n", nr_threads);
+ pr_info("Using %u thread(s) for %s decompression\n", nr_threads, hib_comp_algo);
pr_info("Loading and decompressing image data (%u pages)...\n",
nr_to_read);
m = nr_to_read / 10;
@@ -1327,7 +1380,7 @@ static int load_image_lzo(struct swap_map_handle *handle,
}
if (crc->run_threads) {
- wait_event(crc->done, atomic_read(&crc->stop));
+ wait_event(crc->done, atomic_read_acquire(&crc->stop));
atomic_set(&crc->stop, 0);
crc->run_threads = 0;
}
@@ -1336,13 +1389,13 @@ static int load_image_lzo(struct swap_map_handle *handle,
data[thr].cmp_len = *(size_t *)page[pg];
if (unlikely(!data[thr].cmp_len ||
data[thr].cmp_len >
- lzo1x_worst_compress(LZO_UNC_SIZE))) {
- pr_err("Invalid LZO compressed length\n");
+ bytes_worst_compress(UNC_SIZE))) {
+ pr_err("Invalid %s compressed length\n", hib_comp_algo);
ret = -1;
goto out_finish;
}
- need = DIV_ROUND_UP(data[thr].cmp_len + LZO_HEADER,
+ need = DIV_ROUND_UP(data[thr].cmp_len + CMP_HEADER,
PAGE_SIZE);
if (need > have) {
if (eof > 1) {
@@ -1353,7 +1406,7 @@ static int load_image_lzo(struct swap_map_handle *handle,
}
for (off = 0;
- off < LZO_HEADER + data[thr].cmp_len;
+ off < CMP_HEADER + data[thr].cmp_len;
off += PAGE_SIZE) {
memcpy(data[thr].cmp + off,
page[pg], PAGE_SIZE);
@@ -1363,14 +1416,14 @@ static int load_image_lzo(struct swap_map_handle *handle,
pg = 0;
}
- atomic_set(&data[thr].ready, 1);
+ atomic_set_release(&data[thr].ready, 1);
wake_up(&data[thr].go);
}
/*
* Wait for more data while we are decompressing.
*/
- if (have < LZO_CMP_PAGES && asked) {
+ if (have < CMP_PAGES && asked) {
ret = hib_wait_io(&hb);
if (ret)
goto out_finish;
@@ -1382,20 +1435,20 @@ static int load_image_lzo(struct swap_map_handle *handle,
for (run_threads = thr, thr = 0; thr < run_threads; thr++) {
wait_event(data[thr].done,
- atomic_read(&data[thr].stop));
+ atomic_read_acquire(&data[thr].stop));
atomic_set(&data[thr].stop, 0);
ret = data[thr].ret;
if (ret < 0) {
- pr_err("LZO decompression failed\n");
+ pr_err("%s decompression failed\n", hib_comp_algo);
goto out_finish;
}
if (unlikely(!data[thr].unc_len ||
- data[thr].unc_len > LZO_UNC_SIZE ||
- data[thr].unc_len & (PAGE_SIZE - 1))) {
- pr_err("Invalid LZO uncompressed length\n");
+ data[thr].unc_len > UNC_SIZE ||
+ data[thr].unc_len & (PAGE_SIZE - 1))) {
+ pr_err("Invalid %s uncompressed length\n", hib_comp_algo);
ret = -1;
goto out_finish;
}
@@ -1413,7 +1466,7 @@ static int load_image_lzo(struct swap_map_handle *handle,
ret = snapshot_write_next(snapshot);
if (ret <= 0) {
crc->run_threads = thr + 1;
- atomic_set(&crc->ready, 1);
+ atomic_set_release(&crc->ready, 1);
wake_up(&crc->go);
goto out_finish;
}
@@ -1421,20 +1474,20 @@ static int load_image_lzo(struct swap_map_handle *handle,
}
crc->run_threads = thr;
- atomic_set(&crc->ready, 1);
+ atomic_set_release(&crc->ready, 1);
wake_up(&crc->go);
}
out_finish:
if (crc->run_threads) {
- wait_event(crc->done, atomic_read(&crc->stop));
+ wait_event(crc->done, atomic_read_acquire(&crc->stop));
atomic_set(&crc->stop, 0);
}
stop = ktime_get();
if (!ret) {
pr_info("Image loading done\n");
- snapshot_write_finalize(snapshot);
- if (!snapshot_image_loaded(snapshot))
+ ret = snapshot_write_finalize(snapshot);
+ if (!ret && !snapshot_image_loaded(snapshot))
ret = -ENODATA;
if (!ret) {
if (swsusp_header->flags & SF_CRC32_MODE) {
@@ -1447,6 +1500,7 @@ out_finish:
}
swsusp_show_speed(start, stop, nr_to_read, "Read");
out_clean:
+ hib_finish_batch(&hb);
for (i = 0; i < ring_size; i++)
free_page((unsigned long)page[i]);
if (crc) {
@@ -1455,9 +1509,12 @@ out_clean:
kfree(crc);
}
if (data) {
- for (thr = 0; thr < nr_threads; thr++)
+ for (thr = 0; thr < nr_threads; thr++) {
if (data[thr].thr)
kthread_stop(data[thr].thr);
+ if (data[thr].cc)
+ crypto_free_comp(data[thr].cc);
+ }
vfree(data);
}
vfree(page);
@@ -1491,7 +1548,7 @@ int swsusp_read(unsigned int *flags_p)
if (!error) {
error = (*flags_p & SF_NOCOMPRESS_MODE) ?
load_image(&handle, &snapshot, header->pages - 1) :
- load_image_lzo(&handle, &snapshot, header->pages - 1);
+ load_compressed_image(&handle, &snapshot, header->pages - 1);
}
swap_reader_finish(&handle);
end:
@@ -1502,42 +1559,52 @@ end:
return error;
}
+static void *swsusp_holder;
+
/**
- * swsusp_check - Check for swsusp signature in the resume device
+ * swsusp_check - Open the resume device and check for the swsusp signature.
+ * @exclusive: Open the resume device exclusively.
*/
-int swsusp_check(void)
+int swsusp_check(bool exclusive)
{
+ void *holder = exclusive ? &swsusp_holder : NULL;
int error;
- hib_resume_bdev = blkdev_get_by_dev(swsusp_resume_device,
- FMODE_READ, NULL);
- if (!IS_ERR(hib_resume_bdev)) {
- set_blocksize(hib_resume_bdev, PAGE_SIZE);
+ hib_resume_bdev_file = bdev_file_open_by_dev(swsusp_resume_device,
+ BLK_OPEN_READ, holder, NULL);
+ if (!IS_ERR(hib_resume_bdev_file)) {
+ set_blocksize(file_bdev(hib_resume_bdev_file), PAGE_SIZE);
clear_page(swsusp_header);
- error = hib_submit_io(REQ_OP_READ, 0,
- swsusp_resume_block,
+ error = hib_submit_io(REQ_OP_READ, swsusp_resume_block,
swsusp_header, NULL);
if (error)
goto put;
if (!memcmp(HIBERNATE_SIG, swsusp_header->sig, 10)) {
memcpy(swsusp_header->sig, swsusp_header->orig_sig, 10);
+ swsusp_header_flags = swsusp_header->flags;
/* Reset swap signature now */
- error = hib_submit_io(REQ_OP_WRITE, REQ_SYNC,
+ error = hib_submit_io(REQ_OP_WRITE | REQ_SYNC,
swsusp_resume_block,
swsusp_header, NULL);
} else {
error = -EINVAL;
}
+ if (!error && swsusp_header->flags & SF_HW_SIG &&
+ swsusp_header->hw_sig != swsusp_hardware_signature) {
+ pr_info("Suspend image hardware signature mismatch (%08x now %08x); aborting resume.\n",
+ swsusp_header->hw_sig, swsusp_hardware_signature);
+ error = -EINVAL;
+ }
put:
if (error)
- blkdev_put(hib_resume_bdev, FMODE_READ);
+ fput(hib_resume_bdev_file);
else
pr_debug("Image signature found, resuming\n");
} else {
- error = PTR_ERR(hib_resume_bdev);
+ error = PTR_ERR(hib_resume_bdev_file);
}
if (error)
@@ -1547,17 +1614,17 @@ put:
}
/**
- * swsusp_close - close swap device.
+ * swsusp_close - close resume device.
*/
-void swsusp_close(fmode_t mode)
+void swsusp_close(void)
{
- if (IS_ERR(hib_resume_bdev)) {
+ if (IS_ERR(hib_resume_bdev_file)) {
pr_debug("Image device not initialised\n");
return;
}
- blkdev_put(hib_resume_bdev, mode);
+ fput(hib_resume_bdev_file);
}
/**
@@ -1569,11 +1636,11 @@ int swsusp_unmark(void)
{
int error;
- hib_submit_io(REQ_OP_READ, 0, swsusp_resume_block,
- swsusp_header, NULL);
+ hib_submit_io(REQ_OP_READ, swsusp_resume_block,
+ swsusp_header, NULL);
if (!memcmp(HIBERNATE_SIG,swsusp_header->sig, 10)) {
memcpy(swsusp_header->sig,swsusp_header->orig_sig, 10);
- error = hib_submit_io(REQ_OP_WRITE, REQ_SYNC,
+ error = hib_submit_io(REQ_OP_WRITE | REQ_SYNC,
swsusp_resume_block,
swsusp_header, NULL);
} else {
diff --git a/kernel/power/user.c b/kernel/power/user.c
index d5eedc2baa2a..3aa41ba22129 100644
--- a/kernel/power/user.c
+++ b/kernel/power/user.c
@@ -26,6 +26,7 @@
#include "power.h"
+static bool need_wait;
static struct snapshot_data {
struct snapshot_handle handle;
@@ -35,23 +36,24 @@ static struct snapshot_data {
bool ready;
bool platform_support;
bool free_bitmaps;
- struct inode *bd_inode;
+ dev_t dev;
} snapshot_state;
-int is_hibernate_resume_dev(const struct inode *bd_inode)
+int is_hibernate_resume_dev(dev_t dev)
{
- return hibernation_available() && snapshot_state.bd_inode == bd_inode;
+ return hibernation_available() && snapshot_state.dev == dev;
}
static int snapshot_open(struct inode *inode, struct file *filp)
{
struct snapshot_data *data;
- int error, nr_calls = 0;
+ unsigned int sleep_flags;
+ int error;
if (!hibernation_available())
return -EPERM;
- lock_system_sleep();
+ sleep_flags = lock_system_sleep();
if (!hibernate_acquire()) {
error = -EBUSY;
@@ -69,31 +71,24 @@ static int snapshot_open(struct inode *inode, struct file *filp)
memset(&data->handle, 0, sizeof(struct snapshot_handle));
if ((filp->f_flags & O_ACCMODE) == O_RDONLY) {
/* Hibernating. The image device should be accessible. */
- data->swap = swsusp_resume_device ?
- swap_type_of(swsusp_resume_device, 0, NULL) : -1;
+ data->swap = swap_type_of(swsusp_resume_device, 0);
data->mode = O_RDONLY;
data->free_bitmaps = false;
- error = __pm_notifier_call_chain(PM_HIBERNATION_PREPARE, -1, &nr_calls);
- if (error)
- __pm_notifier_call_chain(PM_POST_HIBERNATION, --nr_calls, NULL);
+ error = pm_notifier_call_chain_robust(PM_HIBERNATION_PREPARE, PM_POST_HIBERNATION);
} else {
/*
* Resuming. We may need to wait for the image device to
* appear.
*/
- wait_for_device_probe();
+ need_wait = true;
data->swap = -1;
data->mode = O_WRONLY;
- error = __pm_notifier_call_chain(PM_RESTORE_PREPARE, -1, &nr_calls);
+ error = pm_notifier_call_chain_robust(PM_RESTORE_PREPARE, PM_POST_RESTORE);
if (!error) {
error = create_basic_memory_bitmaps();
data->free_bitmaps = !error;
- } else
- nr_calls--;
-
- if (error)
- __pm_notifier_call_chain(PM_POST_RESTORE, nr_calls, NULL);
+ }
}
if (error)
hibernate_release();
@@ -101,10 +96,10 @@ static int snapshot_open(struct inode *inode, struct file *filp)
data->frozen = false;
data->ready = false;
data->platform_support = false;
- data->bd_inode = NULL;
+ data->dev = 0;
Unlock:
- unlock_system_sleep();
+ unlock_system_sleep(sleep_flags);
return error;
}
@@ -112,12 +107,13 @@ static int snapshot_open(struct inode *inode, struct file *filp)
static int snapshot_release(struct inode *inode, struct file *filp)
{
struct snapshot_data *data;
+ unsigned int sleep_flags;
- lock_system_sleep();
+ sleep_flags = lock_system_sleep();
swsusp_free();
data = filp->private_data;
- data->bd_inode = NULL;
+ data->dev = 0;
free_all_swap_pages(data->swap);
if (data->frozen) {
pm_restore_gfp_mask();
@@ -130,7 +126,7 @@ static int snapshot_release(struct inode *inode, struct file *filp)
PM_POST_HIBERNATION : PM_POST_RESTORE);
hibernate_release();
- unlock_system_sleep();
+ unlock_system_sleep(sleep_flags);
return 0;
}
@@ -138,11 +134,12 @@ static int snapshot_release(struct inode *inode, struct file *filp)
static ssize_t snapshot_read(struct file *filp, char __user *buf,
size_t count, loff_t *offp)
{
+ loff_t pg_offp = *offp & ~PAGE_MASK;
struct snapshot_data *data;
+ unsigned int sleep_flags;
ssize_t res;
- loff_t pg_offp = *offp & ~PAGE_MASK;
- lock_system_sleep();
+ sleep_flags = lock_system_sleep();
data = filp->private_data;
if (!data->ready) {
@@ -163,7 +160,7 @@ static ssize_t snapshot_read(struct file *filp, char __user *buf,
*offp += res;
Unlock:
- unlock_system_sleep();
+ unlock_system_sleep(sleep_flags);
return res;
}
@@ -171,11 +168,17 @@ static ssize_t snapshot_read(struct file *filp, char __user *buf,
static ssize_t snapshot_write(struct file *filp, const char __user *buf,
size_t count, loff_t *offp)
{
+ loff_t pg_offp = *offp & ~PAGE_MASK;
struct snapshot_data *data;
+ unsigned long sleep_flags;
ssize_t res;
- loff_t pg_offp = *offp & ~PAGE_MASK;
- lock_system_sleep();
+ if (need_wait) {
+ wait_for_device_probe();
+ need_wait = false;
+ }
+
+ sleep_flags = lock_system_sleep();
data = filp->private_data;
@@ -184,7 +187,7 @@ static ssize_t snapshot_write(struct file *filp, const char __user *buf,
if (res <= 0)
goto unlock;
} else {
- res = PAGE_SIZE - pg_offp;
+ res = PAGE_SIZE;
}
if (!data_of(data->handle)) {
@@ -197,7 +200,7 @@ static ssize_t snapshot_write(struct file *filp, const char __user *buf,
if (res > 0)
*offp += res;
unlock:
- unlock_system_sleep();
+ unlock_system_sleep(sleep_flags);
return res;
}
@@ -210,7 +213,6 @@ struct compat_resume_swap_area {
static int snapshot_set_swap_area(struct snapshot_data *data,
void __user *argp)
{
- struct block_device *bdev;
sector_t offset;
dev_t swdev;
@@ -237,16 +239,10 @@ static int snapshot_set_swap_area(struct snapshot_data *data,
* User space encodes device types as two-byte values,
* so we need to recode them
*/
- if (!swdev) {
- data->swap = -1;
- return -EINVAL;
- }
- data->swap = swap_type_of(swdev, offset, &bdev);
+ data->swap = swap_type_of(swdev, offset);
if (data->swap < 0)
- return -ENODEV;
-
- data->bd_inode = bdev->bd_inode;
- bdput(bdev);
+ return swdev ? -ENODEV : -EINVAL;
+ data->dev = swdev;
return 0;
}
@@ -258,6 +254,11 @@ static long snapshot_ioctl(struct file *filp, unsigned int cmd,
loff_t size;
sector_t offset;
+ if (need_wait) {
+ wait_for_device_probe();
+ need_wait = false;
+ }
+
if (_IOC_TYPE(cmd) != SNAPSHOT_IOC_MAGIC)
return -ENOTTY;
if (_IOC_NR(cmd) > SNAPSHOT_IOC_MAXNR)
@@ -316,7 +317,9 @@ static long snapshot_ioctl(struct file *filp, unsigned int cmd,
break;
case SNAPSHOT_ATOMIC_RESTORE:
- snapshot_write_finalize(&data->handle);
+ error = snapshot_write_finalize(&data->handle);
+ if (error)
+ break;
if (data->mode != O_WRONLY || !data->frozen ||
!snapshot_image_loaded(&data->handle)) {
error = -EPERM;
diff --git a/kernel/power/wakelock.c b/kernel/power/wakelock.c
index 105df4dfc783..52571dcad768 100644
--- a/kernel/power/wakelock.c
+++ b/kernel/power/wakelock.c
@@ -39,23 +39,20 @@ ssize_t pm_show_wakelocks(char *buf, bool show_active)
{
struct rb_node *node;
struct wakelock *wl;
- char *str = buf;
- char *end = buf + PAGE_SIZE;
+ int len = 0;
mutex_lock(&wakelocks_lock);
for (node = rb_first(&wakelocks_tree); node; node = rb_next(node)) {
wl = rb_entry(node, struct wakelock, node);
if (wl->ws->active == show_active)
- str += scnprintf(str, end - str, "%s ", wl->name);
+ len += sysfs_emit_at(buf, len, "%s ", wl->name);
}
- if (str > buf)
- str--;
- str += scnprintf(str, end - str, "\n");
+ len += sysfs_emit_at(buf, len, "\n");
mutex_unlock(&wakelocks_lock);
- return (str - buf);
+ return len;
}
#if CONFIG_PM_WAKELOCKS_LIMIT > 0