summaryrefslogtreecommitdiff
path: root/mm/zswap.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/zswap.c')
-rw-r--r--mm/zswap.c1117
1 files changed, 598 insertions, 519 deletions
diff --git a/mm/zswap.c b/mm/zswap.c
index a50e2986cd2f..5d0f8b13a958 100644
--- a/mm/zswap.c
+++ b/mm/zswap.c
@@ -25,7 +25,6 @@
#include <linux/scatterlist.h>
#include <linux/mempolicy.h>
#include <linux/mempool.h>
-#include <linux/zpool.h>
#include <crypto/acompress.h>
#include <linux/zswap.h>
#include <linux/mm_types.h>
@@ -35,6 +34,7 @@
#include <linux/pagemap.h>
#include <linux/workqueue.h>
#include <linux/list_lru.h>
+#include <linux/zsmalloc.h>
#include "swap.h"
#include "internal.h"
@@ -42,10 +42,10 @@
/*********************************
* statistics
**********************************/
-/* The number of compressed pages currently stored in zswap */
-atomic_t zswap_stored_pages = ATOMIC_INIT(0);
-/* The number of same-value filled pages currently stored in zswap */
-static atomic_t zswap_same_filled_pages = ATOMIC_INIT(0);
+/* The number of pages currently stored in zswap */
+atomic_long_t zswap_stored_pages = ATOMIC_LONG_INIT(0);
+/* The number of incompressible pages currently stored in zswap */
+static atomic_long_t zswap_stored_incompressible_pages = ATOMIC_LONG_INIT(0);
/*
* The statistics below are not protected from concurrent access for
@@ -64,6 +64,8 @@ static u64 zswap_reject_reclaim_fail;
static u64 zswap_reject_compress_fail;
/* Compressed page was too big for the allocator to (optimally) store */
static u64 zswap_reject_compress_poor;
+/* Load or writeback failed due to decompression failure */
+static u64 zswap_decompress_fail;
/* Store failed because underlying allocator could not get memory */
static u64 zswap_reject_alloc_fail;
/* Store failed because the entry metadata could not be allocated (rare) */
@@ -83,6 +85,7 @@ static bool zswap_pool_reached_full;
static int zswap_setup(void);
/* Enable/disable zswap */
+static DEFINE_STATIC_KEY_MAYBE(CONFIG_ZSWAP_DEFAULT_ON, zswap_ever_enabled);
static bool zswap_enabled = IS_ENABLED(CONFIG_ZSWAP_DEFAULT_ON);
static int zswap_enabled_param_set(const char *,
const struct kernel_param *);
@@ -104,16 +107,6 @@ static const struct kernel_param_ops zswap_compressor_param_ops = {
module_param_cb(compressor, &zswap_compressor_param_ops,
&zswap_compressor, 0644);
-/* Compressed storage zpool to use */
-static char *zswap_zpool_type = CONFIG_ZSWAP_ZPOOL_DEFAULT;
-static int zswap_zpool_param_set(const char *, const struct kernel_param *);
-static const struct kernel_param_ops zswap_zpool_param_ops = {
- .set = zswap_zpool_param_set,
- .get = param_get_charp,
- .free = param_free_charp,
-};
-module_param_cb(zpool, &zswap_zpool_param_ops, &zswap_zpool_type, 0644);
-
/* The maximum percentage of memory that the compressed pool can occupy */
static unsigned int zswap_max_pool_percent = 20;
module_param_named(max_pool_percent, zswap_max_pool_percent, uint, 0644);
@@ -123,19 +116,21 @@ static unsigned int zswap_accept_thr_percent = 90; /* of max pool size */
module_param_named(accept_threshold_percent, zswap_accept_thr_percent,
uint, 0644);
-/* Number of zpools in zswap_pool (empirically determined for scalability) */
-#define ZSWAP_NR_ZPOOLS 32
-
/* Enable/disable memory pressure-based shrinker. */
static bool zswap_shrinker_enabled = IS_ENABLED(
CONFIG_ZSWAP_SHRINKER_DEFAULT_ON);
module_param_named(shrinker_enabled, zswap_shrinker_enabled, bool, 0644);
-bool is_zswap_enabled(void)
+bool zswap_is_enabled(void)
{
return zswap_enabled;
}
+bool zswap_never_enabled(void)
+{
+ return !static_branch_maybe(CONFIG_ZSWAP_DEFAULT_ON, &zswap_ever_enabled);
+}
+
/*********************************
* data structures
**********************************/
@@ -156,7 +151,7 @@ struct crypto_acomp_ctx {
* needs to be verified that it's still valid in the tree.
*/
struct zswap_pool {
- struct zpool *zpools[ZSWAP_NR_ZPOOLS];
+ struct zs_pool *zs_pool;
struct crypto_acomp_ctx __percpu *acomp_ctx;
struct percpu_ref ref;
struct list_head list;
@@ -180,24 +175,24 @@ static struct shrinker *zswap_shrinker;
* This structure contains the metadata for tracking a single compressed
* page within zswap.
*
- * swpentry - associated swap entry, the offset indexes into the red-black tree
+ * swpentry - associated swap entry, the offset indexes into the xarray
* length - the length in bytes of the compressed page data. Needed during
- * decompression. For a same value filled page length is 0, and both
- * pool and lru are invalid and must be ignored.
+ * decompression.
+ * referenced - true if the entry recently entered the zswap pool. Unset by the
+ * writeback logic. The entry is only reclaimed by the writeback
+ * logic if referenced is unset. See comments in the shrinker
+ * section for context.
* pool - the zswap_pool the entry's data is in
- * handle - zpool allocation handle that stores the compressed page data
- * value - value of the same-value filled pages which have same content
+ * handle - zsmalloc allocation handle that stores the compressed page data
* objcg - the obj_cgroup that the compressed memory is charged to
* lru - handle to the pool's lru used to evict pages.
*/
struct zswap_entry {
swp_entry_t swpentry;
unsigned int length;
+ bool referenced;
struct zswap_pool *pool;
- union {
- unsigned long handle;
- unsigned long value;
- };
+ unsigned long handle;
struct obj_cgroup *objcg;
struct list_head lru;
};
@@ -209,7 +204,7 @@ static unsigned int nr_zswap_trees[MAX_SWAPFILES];
static LIST_HEAD(zswap_pools);
/* protects zswap_pools list modification */
static DEFINE_SPINLOCK(zswap_pools_lock);
-/* pool counter to provide unique names to zpool */
+/* pool counter to provide unique names to zsmalloc */
static atomic_t zswap_pools_count = ATOMIC_INIT(0);
enum zswap_init_type {
@@ -230,56 +225,41 @@ static bool zswap_has_pool;
* helpers and fwd declarations
**********************************/
+/* One swap address space for each 64M swap space */
+#define ZSWAP_ADDRESS_SPACE_SHIFT 14
+#define ZSWAP_ADDRESS_SPACE_PAGES (1 << ZSWAP_ADDRESS_SPACE_SHIFT)
static inline struct xarray *swap_zswap_tree(swp_entry_t swp)
{
return &zswap_trees[swp_type(swp)][swp_offset(swp)
- >> SWAP_ADDRESS_SPACE_SHIFT];
+ >> ZSWAP_ADDRESS_SPACE_SHIFT];
}
-#define zswap_pool_debug(msg, p) \
- pr_debug("%s pool %s/%s\n", msg, (p)->tfm_name, \
- zpool_get_type((p)->zpools[0]))
+#define zswap_pool_debug(msg, p) \
+ pr_debug("%s pool %s\n", msg, (p)->tfm_name)
/*********************************
* pool functions
**********************************/
static void __zswap_pool_empty(struct percpu_ref *ref);
-static struct zswap_pool *zswap_pool_create(char *type, char *compressor)
+static struct zswap_pool *zswap_pool_create(char *compressor)
{
- int i;
struct zswap_pool *pool;
char name[38]; /* 'zswap' + 32 char (max) num + \0 */
- gfp_t gfp = __GFP_NORETRY | __GFP_NOWARN | __GFP_KSWAPD_RECLAIM;
- int ret;
+ int ret, cpu;
- if (!zswap_has_pool) {
- /* if either are unset, pool initialization failed, and we
- * need both params to be set correctly before trying to
- * create a pool.
- */
- if (!strcmp(type, ZSWAP_PARAM_UNSET))
- return NULL;
- if (!strcmp(compressor, ZSWAP_PARAM_UNSET))
- return NULL;
- }
+ if (!zswap_has_pool && !strcmp(compressor, ZSWAP_PARAM_UNSET))
+ return NULL;
pool = kzalloc(sizeof(*pool), GFP_KERNEL);
if (!pool)
return NULL;
- for (i = 0; i < ZSWAP_NR_ZPOOLS; i++) {
- /* unique name for each pool specifically required by zsmalloc */
- snprintf(name, 38, "zswap%x",
- atomic_inc_return(&zswap_pools_count));
-
- pool->zpools[i] = zpool_create_pool(type, name, gfp);
- if (!pool->zpools[i]) {
- pr_err("%s zpool not available\n", type);
- goto error;
- }
- }
- pr_debug("using %s zpool\n", zpool_get_type(pool->zpools[0]));
+ /* unique name for each pool specifically required by zsmalloc */
+ snprintf(name, 38, "zswap%x", atomic_inc_return(&zswap_pools_count));
+ pool->zs_pool = zs_create_pool(name);
+ if (!pool->zs_pool)
+ goto error;
strscpy(pool->tfm_name, compressor, sizeof(pool->tfm_name));
@@ -289,6 +269,9 @@ static struct zswap_pool *zswap_pool_create(char *type, char *compressor)
goto error;
}
+ for_each_possible_cpu(cpu)
+ mutex_init(&per_cpu_ptr(pool->acomp_ctx, cpu)->mutex);
+
ret = cpuhp_state_add_instance(CPUHP_MM_ZSWP_POOL_PREPARE,
&pool->node);
if (ret)
@@ -312,65 +295,39 @@ ref_fail:
error:
if (pool->acomp_ctx)
free_percpu(pool->acomp_ctx);
- while (i--)
- zpool_destroy_pool(pool->zpools[i]);
+ if (pool->zs_pool)
+ zs_destroy_pool(pool->zs_pool);
kfree(pool);
return NULL;
}
static struct zswap_pool *__zswap_pool_create_fallback(void)
{
- bool has_comp, has_zpool;
-
- has_comp = crypto_has_acomp(zswap_compressor, 0, 0);
- if (!has_comp && strcmp(zswap_compressor,
- CONFIG_ZSWAP_COMPRESSOR_DEFAULT)) {
+ if (!crypto_has_acomp(zswap_compressor, 0, 0) &&
+ strcmp(zswap_compressor, CONFIG_ZSWAP_COMPRESSOR_DEFAULT)) {
pr_err("compressor %s not available, using default %s\n",
zswap_compressor, CONFIG_ZSWAP_COMPRESSOR_DEFAULT);
param_free_charp(&zswap_compressor);
zswap_compressor = CONFIG_ZSWAP_COMPRESSOR_DEFAULT;
- has_comp = crypto_has_acomp(zswap_compressor, 0, 0);
- }
- if (!has_comp) {
- pr_err("default compressor %s not available\n",
- zswap_compressor);
- param_free_charp(&zswap_compressor);
- zswap_compressor = ZSWAP_PARAM_UNSET;
- }
-
- has_zpool = zpool_has_pool(zswap_zpool_type);
- if (!has_zpool && strcmp(zswap_zpool_type,
- CONFIG_ZSWAP_ZPOOL_DEFAULT)) {
- pr_err("zpool %s not available, using default %s\n",
- zswap_zpool_type, CONFIG_ZSWAP_ZPOOL_DEFAULT);
- param_free_charp(&zswap_zpool_type);
- zswap_zpool_type = CONFIG_ZSWAP_ZPOOL_DEFAULT;
- has_zpool = zpool_has_pool(zswap_zpool_type);
- }
- if (!has_zpool) {
- pr_err("default zpool %s not available\n",
- zswap_zpool_type);
- param_free_charp(&zswap_zpool_type);
- zswap_zpool_type = ZSWAP_PARAM_UNSET;
}
- if (!has_comp || !has_zpool)
+ /* Default compressor should be available. Kconfig bug? */
+ if (WARN_ON_ONCE(!crypto_has_acomp(zswap_compressor, 0, 0))) {
+ zswap_compressor = ZSWAP_PARAM_UNSET;
return NULL;
+ }
- return zswap_pool_create(zswap_zpool_type, zswap_compressor);
+ return zswap_pool_create(zswap_compressor);
}
static void zswap_pool_destroy(struct zswap_pool *pool)
{
- int i;
-
zswap_pool_debug("destroying", pool);
cpuhp_state_remove_instance(CPUHP_MM_ZSWP_POOL_PREPARE, &pool->node);
free_percpu(pool->acomp_ctx);
- for (i = 0; i < ZSWAP_NR_ZPOOLS; i++)
- zpool_destroy_pool(pool->zpools[i]);
+ zs_destroy_pool(pool->zs_pool);
kfree(pool);
}
@@ -409,7 +366,7 @@ static void __zswap_pool_empty(struct percpu_ref *ref)
spin_unlock_bh(&zswap_pools_lock);
}
-static int __must_check zswap_pool_get(struct zswap_pool *pool)
+static int __must_check zswap_pool_tryget(struct zswap_pool *pool)
{
if (!pool)
return 0;
@@ -417,6 +374,12 @@ static int __must_check zswap_pool_get(struct zswap_pool *pool)
return percpu_ref_tryget(&pool->ref);
}
+/* The caller must already have a reference. */
+static void zswap_pool_get(struct zswap_pool *pool)
+{
+ percpu_ref_get(&pool->ref);
+}
+
static void zswap_pool_put(struct zswap_pool *pool)
{
percpu_ref_put(&pool->ref);
@@ -447,7 +410,7 @@ static struct zswap_pool *zswap_pool_current_get(void)
rcu_read_lock();
pool = __zswap_pool_current();
- if (!zswap_pool_get(pool))
+ if (!zswap_pool_tryget(pool))
pool = NULL;
rcu_read_unlock();
@@ -456,7 +419,7 @@ static struct zswap_pool *zswap_pool_current_get(void)
}
/* type and compressor must be null-terminated */
-static struct zswap_pool *zswap_pool_find_get(char *type, char *compressor)
+static struct zswap_pool *zswap_pool_find_get(char *compressor)
{
struct zswap_pool *pool;
@@ -465,11 +428,8 @@ static struct zswap_pool *zswap_pool_find_get(char *type, char *compressor)
list_for_each_entry_rcu(pool, &zswap_pools, list) {
if (strcmp(pool->tfm_name, compressor))
continue;
- /* all zpools share the same type */
- if (strcmp(zpool_get_type(pool->zpools[0]), type))
- continue;
/* if we can't get it, it's about to be destroyed */
- if (!zswap_pool_get(pool))
+ if (!zswap_pool_tryget(pool))
continue;
return pool;
}
@@ -493,12 +453,8 @@ unsigned long zswap_total_pages(void)
unsigned long total = 0;
rcu_read_lock();
- list_for_each_entry_rcu(pool, &zswap_pools, list) {
- int i;
-
- for (i = 0; i < ZSWAP_NR_ZPOOLS; i++)
- total += zpool_get_total_pages(pool->zpools[i]);
- }
+ list_for_each_entry_rcu(pool, &zswap_pools, list)
+ total += zs_get_total_pages(pool->zs_pool);
rcu_read_unlock();
return total;
@@ -523,33 +479,22 @@ static bool zswap_check_limits(void)
* param callbacks
**********************************/
-static bool zswap_pool_changed(const char *s, const struct kernel_param *kp)
-{
- /* no change required */
- if (!strcmp(s, *(char **)kp->arg) && zswap_has_pool)
- return false;
- return true;
-}
-
-/* val must be a null-terminated string */
-static int __zswap_param_set(const char *val, const struct kernel_param *kp,
- char *type, char *compressor)
+static int zswap_compressor_param_set(const char *val, const struct kernel_param *kp)
{
struct zswap_pool *pool, *put_pool = NULL;
char *s = strstrip((char *)val);
+ bool create_pool = false;
int ret = 0;
- bool new_pool = false;
mutex_lock(&zswap_init_lock);
switch (zswap_init_state) {
case ZSWAP_UNINIT:
- /* if this is load-time (pre-init) param setting,
- * don't create a pool; that's done during init.
- */
+ /* Handled in zswap_setup() */
ret = param_set_charp(s, kp);
break;
case ZSWAP_INIT_SUCCEED:
- new_pool = zswap_pool_changed(s, kp);
+ if (!zswap_has_pool || strcmp(s, *(char **)kp->arg))
+ create_pool = true;
break;
case ZSWAP_INIT_FAILED:
pr_err("can't set param, initialization failed\n");
@@ -557,30 +502,17 @@ static int __zswap_param_set(const char *val, const struct kernel_param *kp,
}
mutex_unlock(&zswap_init_lock);
- /* no need to create a new pool, return directly */
- if (!new_pool)
+ if (!create_pool)
return ret;
- if (!type) {
- if (!zpool_has_pool(s)) {
- pr_err("zpool %s not available\n", s);
- return -ENOENT;
- }
- type = s;
- } else if (!compressor) {
- if (!crypto_has_acomp(s, 0, 0)) {
- pr_err("compressor %s not available\n", s);
- return -ENOENT;
- }
- compressor = s;
- } else {
- WARN_ON(1);
- return -EINVAL;
+ if (!crypto_has_acomp(s, 0, 0)) {
+ pr_err("compressor %s not available\n", s);
+ return -ENOENT;
}
spin_lock_bh(&zswap_pools_lock);
- pool = zswap_pool_find_get(type, compressor);
+ pool = zswap_pool_find_get(s);
if (pool) {
zswap_pool_debug("using existing", pool);
WARN_ON(pool == zswap_pool_current());
@@ -590,7 +522,7 @@ static int __zswap_param_set(const char *val, const struct kernel_param *kp,
spin_unlock_bh(&zswap_pools_lock);
if (!pool)
- pool = zswap_pool_create(type, compressor);
+ pool = zswap_pool_create(s);
else {
/*
* Restore the initial ref dropped by percpu_ref_kill()
@@ -615,7 +547,8 @@ static int __zswap_param_set(const char *val, const struct kernel_param *kp,
list_add_rcu(&pool->list, &zswap_pools);
zswap_has_pool = true;
} else if (pool) {
- /* add the possibly pre-existing pool to the end of the pools
+ /*
+ * Add the possibly pre-existing pool to the end of the pools
* list; if it's new (and empty) then it'll be removed and
* destroyed by the put after we drop the lock
*/
@@ -625,18 +558,8 @@ static int __zswap_param_set(const char *val, const struct kernel_param *kp,
spin_unlock_bh(&zswap_pools_lock);
- if (!zswap_has_pool && !pool) {
- /* if initial pool creation failed, and this pool creation also
- * failed, maybe both compressor and zpool params were bad.
- * Allow changing this param, so pool creation will succeed
- * when the other param is changed. We already verified this
- * param is ok in the zpool_has_pool() or crypto_has_acomp()
- * checks above.
- */
- ret = param_set_charp(s, kp);
- }
-
- /* drop the ref from either the old current pool,
+ /*
+ * Drop the ref from either the old current pool,
* or the new pool we failed to add
*/
if (put_pool)
@@ -645,18 +568,6 @@ static int __zswap_param_set(const char *val, const struct kernel_param *kp,
return ret;
}
-static int zswap_compressor_param_set(const char *val,
- const struct kernel_param *kp)
-{
- return __zswap_param_set(val, kp, zswap_zpool_type, NULL);
-}
-
-static int zswap_zpool_param_set(const char *val,
- const struct kernel_param *kp)
-{
- return __zswap_param_set(val, kp, NULL, zswap_compressor);
-}
-
static int zswap_enabled_param_set(const char *val,
const struct kernel_param *kp)
{
@@ -710,20 +621,16 @@ static inline int entry_to_nid(struct zswap_entry *entry)
static void zswap_lru_add(struct list_lru *list_lru, struct zswap_entry *entry)
{
- atomic_long_t *nr_zswap_protected;
- unsigned long lru_size, old, new;
int nid = entry_to_nid(entry);
struct mem_cgroup *memcg;
- struct lruvec *lruvec;
/*
* Note that it is safe to use rcu_read_lock() here, even in the face of
- * concurrent memcg offlining. Thanks to the memcg->kmemcg_id indirection
- * used in list_lru lookup, only two scenarios are possible:
+ * concurrent memcg offlining:
*
- * 1. list_lru_add() is called before memcg->kmemcg_id is updated. The
+ * 1. list_lru_add() is called before list_lru_one is dead. The
* new entry will be reparented to memcg's parent's list_lru.
- * 2. list_lru_add() is called after memcg->kmemcg_id is updated. The
+ * 2. list_lru_add() is called after list_lru_one is dead. The
* new entry will be added directly to memcg's parent's list_lru.
*
* Similar reasoning holds for list_lru_del().
@@ -732,19 +639,6 @@ static void zswap_lru_add(struct list_lru *list_lru, struct zswap_entry *entry)
memcg = mem_cgroup_from_entry(entry);
/* will always succeed */
list_lru_add(list_lru, &entry->lru, nid, memcg);
-
- /* Update the protection area */
- lru_size = list_lru_count_one(list_lru, nid, memcg);
- lruvec = mem_cgroup_lruvec(memcg, NODE_DATA(nid));
- nr_zswap_protected = &lruvec->zswap_lruvec_state.nr_zswap_protected;
- old = atomic_long_inc_return(nr_zswap_protected);
- /*
- * Decay to avoid overflow and adapt to changing workloads.
- * This is based on LRU reclaim cost decaying heuristics.
- */
- do {
- new = old > lru_size / 4 ? old / 2 : old;
- } while (!atomic_long_try_cmpxchg(nr_zswap_protected, &old, new));
rcu_read_unlock();
}
@@ -762,7 +656,7 @@ static void zswap_lru_del(struct list_lru *list_lru, struct zswap_entry *entry)
void zswap_lruvec_state_init(struct lruvec *lruvec)
{
- atomic_long_set(&lruvec->zswap_lruvec_state.nr_zswap_protected, 0);
+ atomic_long_set(&lruvec->zswap_lruvec_state.nr_disk_swapins, 0);
}
void zswap_folio_swapin(struct folio *folio)
@@ -771,16 +665,29 @@ void zswap_folio_swapin(struct folio *folio)
if (folio) {
lruvec = folio_lruvec(folio);
- atomic_long_inc(&lruvec->zswap_lruvec_state.nr_zswap_protected);
+ atomic_long_inc(&lruvec->zswap_lruvec_state.nr_disk_swapins);
}
}
+/*
+ * This function should be called when a memcg is being offlined.
+ *
+ * Since the global shrinker shrink_worker() may hold a reference
+ * of the memcg, we must check and release the reference in
+ * zswap_next_shrink.
+ *
+ * shrink_worker() must handle the case where this function releases
+ * the reference of memcg being shrunk.
+ */
void zswap_memcg_offline_cleanup(struct mem_cgroup *memcg)
{
/* lock out zswap shrinker walking memcg tree */
spin_lock(&zswap_shrink_lock);
- if (zswap_next_shrink == memcg)
- zswap_next_shrink = mem_cgroup_iter(NULL, zswap_next_shrink, NULL);
+ if (zswap_next_shrink == memcg) {
+ do {
+ zswap_next_shrink = mem_cgroup_iter(NULL, zswap_next_shrink, NULL);
+ } while (zswap_next_shrink && !mem_cgroup_online(zswap_next_shrink));
+ }
spin_unlock(&zswap_shrink_lock);
}
@@ -803,30 +710,23 @@ static void zswap_entry_cache_free(struct zswap_entry *entry)
kmem_cache_free(zswap_entry_cache, entry);
}
-static struct zpool *zswap_find_zpool(struct zswap_entry *entry)
-{
- return entry->pool->zpools[hash_ptr(entry, ilog2(ZSWAP_NR_ZPOOLS))];
-}
-
/*
- * Carries out the common pattern of freeing and entry's zpool allocation,
+ * Carries out the common pattern of freeing an entry's zsmalloc allocation,
* freeing the entry itself, and decrementing the number of stored pages.
*/
static void zswap_entry_free(struct zswap_entry *entry)
{
- if (!entry->length)
- atomic_dec(&zswap_same_filled_pages);
- else {
- zswap_lru_del(&zswap_list_lru, entry);
- zpool_free(zswap_find_zpool(entry), entry->handle);
- zswap_pool_put(entry->pool);
- }
+ zswap_lru_del(&zswap_list_lru, entry);
+ zs_free(entry->pool->zs_pool, entry->handle);
+ zswap_pool_put(entry->pool);
if (entry->objcg) {
obj_cgroup_uncharge_zswap(entry->objcg, entry->length);
obj_cgroup_put(entry->objcg);
}
+ if (entry->length == PAGE_SIZE)
+ atomic_long_dec(&zswap_stored_incompressible_pages);
zswap_entry_cache_free(entry);
- atomic_dec(&zswap_stored_pages);
+ atomic_long_dec(&zswap_stored_pages);
}
/*********************************
@@ -836,36 +736,41 @@ static int zswap_cpu_comp_prepare(unsigned int cpu, struct hlist_node *node)
{
struct zswap_pool *pool = hlist_entry(node, struct zswap_pool, node);
struct crypto_acomp_ctx *acomp_ctx = per_cpu_ptr(pool->acomp_ctx, cpu);
- struct crypto_acomp *acomp;
- struct acomp_req *req;
+ struct crypto_acomp *acomp = NULL;
+ struct acomp_req *req = NULL;
+ u8 *buffer = NULL;
int ret;
- mutex_init(&acomp_ctx->mutex);
-
- acomp_ctx->buffer = kmalloc_node(PAGE_SIZE * 2, GFP_KERNEL, cpu_to_node(cpu));
- if (!acomp_ctx->buffer)
- return -ENOMEM;
+ buffer = kmalloc_node(PAGE_SIZE, GFP_KERNEL, cpu_to_node(cpu));
+ if (!buffer) {
+ ret = -ENOMEM;
+ goto fail;
+ }
acomp = crypto_alloc_acomp_node(pool->tfm_name, 0, 0, cpu_to_node(cpu));
if (IS_ERR(acomp)) {
pr_err("could not alloc crypto acomp %s : %ld\n",
pool->tfm_name, PTR_ERR(acomp));
ret = PTR_ERR(acomp);
- goto acomp_fail;
+ goto fail;
}
- acomp_ctx->acomp = acomp;
- acomp_ctx->is_sleepable = acomp_is_async(acomp);
- req = acomp_request_alloc(acomp_ctx->acomp);
+ req = acomp_request_alloc(acomp);
if (!req) {
pr_err("could not alloc crypto acomp_request %s\n",
pool->tfm_name);
ret = -ENOMEM;
- goto req_fail;
+ goto fail;
}
- acomp_ctx->req = req;
+ /*
+ * Only hold the mutex after completing allocations, otherwise we may
+ * recurse into zswap through reclaim and attempt to hold the mutex
+ * again resulting in a deadlock.
+ */
+ mutex_lock(&acomp_ctx->mutex);
crypto_init_wait(&acomp_ctx->wait);
+
/*
* if the backend of acomp is async zip, crypto_req_done() will wakeup
* crypto_wait_req(); if the backend of acomp is scomp, the callback
@@ -874,12 +779,17 @@ static int zswap_cpu_comp_prepare(unsigned int cpu, struct hlist_node *node)
acomp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
crypto_req_done, &acomp_ctx->wait);
+ acomp_ctx->buffer = buffer;
+ acomp_ctx->acomp = acomp;
+ acomp_ctx->is_sleepable = acomp_is_async(acomp);
+ acomp_ctx->req = req;
+ mutex_unlock(&acomp_ctx->mutex);
return 0;
-req_fail:
- crypto_free_acomp(acomp_ctx->acomp);
-acomp_fail:
- kfree(acomp_ctx->buffer);
+fail:
+ if (acomp)
+ crypto_free_acomp(acomp);
+ kfree(buffer);
return ret;
}
@@ -887,44 +797,78 @@ static int zswap_cpu_comp_dead(unsigned int cpu, struct hlist_node *node)
{
struct zswap_pool *pool = hlist_entry(node, struct zswap_pool, node);
struct crypto_acomp_ctx *acomp_ctx = per_cpu_ptr(pool->acomp_ctx, cpu);
+ struct acomp_req *req;
+ struct crypto_acomp *acomp;
+ u8 *buffer;
- if (!IS_ERR_OR_NULL(acomp_ctx)) {
- if (!IS_ERR_OR_NULL(acomp_ctx->req))
- acomp_request_free(acomp_ctx->req);
- if (!IS_ERR_OR_NULL(acomp_ctx->acomp))
- crypto_free_acomp(acomp_ctx->acomp);
- kfree(acomp_ctx->buffer);
- }
+ if (IS_ERR_OR_NULL(acomp_ctx))
+ return 0;
+
+ mutex_lock(&acomp_ctx->mutex);
+ req = acomp_ctx->req;
+ acomp = acomp_ctx->acomp;
+ buffer = acomp_ctx->buffer;
+ acomp_ctx->req = NULL;
+ acomp_ctx->acomp = NULL;
+ acomp_ctx->buffer = NULL;
+ mutex_unlock(&acomp_ctx->mutex);
+
+ /*
+ * Do the actual freeing after releasing the mutex to avoid subtle
+ * locking dependencies causing deadlocks.
+ */
+ if (!IS_ERR_OR_NULL(req))
+ acomp_request_free(req);
+ if (!IS_ERR_OR_NULL(acomp))
+ crypto_free_acomp(acomp);
+ kfree(buffer);
return 0;
}
-static bool zswap_compress(struct folio *folio, struct zswap_entry *entry)
+static struct crypto_acomp_ctx *acomp_ctx_get_cpu_lock(struct zswap_pool *pool)
+{
+ struct crypto_acomp_ctx *acomp_ctx;
+
+ for (;;) {
+ acomp_ctx = raw_cpu_ptr(pool->acomp_ctx);
+ mutex_lock(&acomp_ctx->mutex);
+ if (likely(acomp_ctx->req))
+ return acomp_ctx;
+ /*
+ * It is possible that we were migrated to a different CPU after
+ * getting the per-CPU ctx but before the mutex was acquired. If
+ * the old CPU got offlined, zswap_cpu_comp_dead() could have
+ * already freed ctx->req (among other things) and set it to
+ * NULL. Just try again on the new CPU that we ended up on.
+ */
+ mutex_unlock(&acomp_ctx->mutex);
+ }
+}
+
+static void acomp_ctx_put_unlock(struct crypto_acomp_ctx *acomp_ctx)
+{
+ mutex_unlock(&acomp_ctx->mutex);
+}
+
+static bool zswap_compress(struct page *page, struct zswap_entry *entry,
+ struct zswap_pool *pool)
{
struct crypto_acomp_ctx *acomp_ctx;
struct scatterlist input, output;
int comp_ret = 0, alloc_ret = 0;
unsigned int dlen = PAGE_SIZE;
unsigned long handle;
- struct zpool *zpool;
- char *buf;
gfp_t gfp;
u8 *dst;
+ bool mapped = false;
- acomp_ctx = raw_cpu_ptr(entry->pool->acomp_ctx);
-
- mutex_lock(&acomp_ctx->mutex);
-
+ acomp_ctx = acomp_ctx_get_cpu_lock(pool);
dst = acomp_ctx->buffer;
sg_init_table(&input, 1);
- sg_set_page(&input, &folio->page, PAGE_SIZE, 0);
+ sg_set_page(&input, page, PAGE_SIZE, 0);
- /*
- * We need PAGE_SIZE * 2 here since there maybe over-compression case,
- * and hardware-accelerators may won't check the dst buffer size, so
- * giving the dst buffer with enough length to avoid buffer overflow.
- */
- sg_init_one(&output, dst, PAGE_SIZE * 2);
+ sg_init_one(&output, dst, PAGE_SIZE);
acomp_request_set_params(acomp_ctx->req, &input, &output, PAGE_SIZE, dlen);
/*
@@ -935,31 +879,46 @@ static bool zswap_compress(struct folio *folio, struct zswap_entry *entry)
* acomp instance, then get those requests done simultaneously. but in this
* case, zswap actually does store and load page by page, there is no
* existing method to send the second page before the first page is done
- * in one thread doing zwap.
+ * in one thread doing zswap.
* but in different threads running on different cpu, we have different
* acomp instance, so multiple threads can do (de)compression in parallel.
*/
comp_ret = crypto_wait_req(crypto_acomp_compress(acomp_ctx->req), &acomp_ctx->wait);
dlen = acomp_ctx->req->dlen;
- if (comp_ret)
- goto unlock;
- zpool = zswap_find_zpool(entry);
- gfp = __GFP_NORETRY | __GFP_NOWARN | __GFP_KSWAPD_RECLAIM;
- if (zpool_malloc_support_movable(zpool))
- gfp |= __GFP_HIGHMEM | __GFP_MOVABLE;
- alloc_ret = zpool_malloc(zpool, dlen, gfp, &handle);
- if (alloc_ret)
- goto unlock;
+ /*
+ * If a page cannot be compressed into a size smaller than PAGE_SIZE,
+ * save the content as is without a compression, to keep the LRU order
+ * of writebacks. If writeback is disabled, reject the page since it
+ * only adds metadata overhead. swap_writeout() will put the page back
+ * to the active LRU list in the case.
+ */
+ if (comp_ret || !dlen || dlen >= PAGE_SIZE) {
+ if (!mem_cgroup_zswap_writeback_enabled(
+ folio_memcg(page_folio(page)))) {
+ comp_ret = comp_ret ? comp_ret : -EINVAL;
+ goto unlock;
+ }
+ comp_ret = 0;
+ dlen = PAGE_SIZE;
+ dst = kmap_local_page(page);
+ mapped = true;
+ }
- buf = zpool_map_handle(zpool, handle, ZPOOL_MM_WO);
- memcpy(buf, dst, dlen);
- zpool_unmap_handle(zpool, handle);
+ gfp = GFP_NOWAIT | __GFP_NORETRY | __GFP_HIGHMEM | __GFP_MOVABLE;
+ handle = zs_malloc(pool->zs_pool, dlen, gfp, page_to_nid(page));
+ if (IS_ERR_VALUE(handle)) {
+ alloc_ret = PTR_ERR((void *)handle);
+ goto unlock;
+ }
+ zs_obj_write(pool->zs_pool, handle, dst, dlen);
entry->handle = handle;
entry->length = dlen;
unlock:
+ if (mapped)
+ kunmap_local(dst);
if (comp_ret == -ENOSPC || alloc_ret == -ENOSPC)
zswap_reject_compress_poor++;
else if (comp_ret)
@@ -967,47 +926,60 @@ unlock:
else if (alloc_ret)
zswap_reject_alloc_fail++;
- mutex_unlock(&acomp_ctx->mutex);
+ acomp_ctx_put_unlock(acomp_ctx);
return comp_ret == 0 && alloc_ret == 0;
}
-static void zswap_decompress(struct zswap_entry *entry, struct page *page)
+static bool zswap_decompress(struct zswap_entry *entry, struct folio *folio)
{
- struct zpool *zpool = zswap_find_zpool(entry);
+ struct zswap_pool *pool = entry->pool;
struct scatterlist input, output;
struct crypto_acomp_ctx *acomp_ctx;
- u8 *src;
+ int decomp_ret = 0, dlen = PAGE_SIZE;
+ u8 *src, *obj;
- acomp_ctx = raw_cpu_ptr(entry->pool->acomp_ctx);
- mutex_lock(&acomp_ctx->mutex);
+ acomp_ctx = acomp_ctx_get_cpu_lock(pool);
+ obj = zs_obj_read_begin(pool->zs_pool, entry->handle, acomp_ctx->buffer);
+
+ /* zswap entries of length PAGE_SIZE are not compressed. */
+ if (entry->length == PAGE_SIZE) {
+ memcpy_to_folio(folio, 0, obj, entry->length);
+ goto read_done;
+ }
- src = zpool_map_handle(zpool, entry->handle, ZPOOL_MM_RO);
/*
- * If zpool_map_handle is atomic, we cannot reliably utilize its mapped buffer
- * to do crypto_acomp_decompress() which might sleep. In such cases, we must
- * resort to copying the buffer to a temporary one.
- * Meanwhile, zpool_map_handle() might return a non-linearly mapped buffer,
- * such as a kmap address of high memory or even ever a vmap address.
- * However, sg_init_one is only equipped to handle linearly mapped low memory.
- * In such cases, we also must copy the buffer to a temporary and lowmem one.
+ * zs_obj_read_begin() might return a kmap address of highmem when
+ * acomp_ctx->buffer is not used. However, sg_init_one() does not
+ * handle highmem addresses, so copy the object to acomp_ctx->buffer.
*/
- if ((acomp_ctx->is_sleepable && !zpool_can_sleep_mapped(zpool)) ||
- !virt_addr_valid(src)) {
- memcpy(acomp_ctx->buffer, src, entry->length);
+ if (virt_addr_valid(obj)) {
+ src = obj;
+ } else {
+ WARN_ON_ONCE(obj == acomp_ctx->buffer);
+ memcpy(acomp_ctx->buffer, obj, entry->length);
src = acomp_ctx->buffer;
- zpool_unmap_handle(zpool, entry->handle);
}
sg_init_one(&input, src, entry->length);
sg_init_table(&output, 1);
- sg_set_page(&output, page, PAGE_SIZE, 0);
+ sg_set_folio(&output, folio, PAGE_SIZE, 0);
acomp_request_set_params(acomp_ctx->req, &input, &output, entry->length, PAGE_SIZE);
- BUG_ON(crypto_wait_req(crypto_acomp_decompress(acomp_ctx->req), &acomp_ctx->wait));
- BUG_ON(acomp_ctx->req->dlen != PAGE_SIZE);
- mutex_unlock(&acomp_ctx->mutex);
+ decomp_ret = crypto_wait_req(crypto_acomp_decompress(acomp_ctx->req), &acomp_ctx->wait);
+ dlen = acomp_ctx->req->dlen;
- if (src != acomp_ctx->buffer)
- zpool_unmap_handle(zpool, entry->handle);
+read_done:
+ zs_obj_read_end(pool->zs_pool, entry->handle, obj);
+ acomp_ctx_put_unlock(acomp_ctx);
+
+ if (!decomp_ret && dlen == PAGE_SIZE)
+ return true;
+
+ zswap_decompress_fail++;
+ pr_alert_ratelimited("Decompression error from zswap (%d:%lu %s %u->%d)\n",
+ swp_type(entry->swpentry),
+ swp_offset(entry->swpentry),
+ entry->pool->tfm_name, entry->length, dlen);
+ return false;
}
/*********************************
@@ -1033,14 +1005,18 @@ static int zswap_writeback_entry(struct zswap_entry *entry,
struct folio *folio;
struct mempolicy *mpol;
bool folio_was_allocated;
- struct writeback_control wbc = {
- .sync_mode = WB_SYNC_NONE,
- };
+ struct swap_info_struct *si;
+ int ret = 0;
/* try to allocate swap cache folio */
+ si = get_swap_device(swpentry);
+ if (!si)
+ return -EEXIST;
+
mpol = get_task_policy(current);
folio = __read_swap_cache_async(swpentry, GFP_KERNEL, mpol,
- NO_INTERLEAVE_INDEX, &folio_was_allocated, true);
+ NO_INTERLEAVE_INDEX, &folio_was_allocated, true);
+ put_swap_device(si);
if (!folio)
return -ENOMEM;
@@ -1052,8 +1028,8 @@ static int zswap_writeback_entry(struct zswap_entry *entry,
* and freed when invalidated by the concurrent shrinker anyway.
*/
if (!folio_was_allocated) {
- folio_put(folio);
- return -EEXIST;
+ ret = -EEXIST;
+ goto out;
}
/*
@@ -1066,18 +1042,21 @@ static int zswap_writeback_entry(struct zswap_entry *entry,
* be dereferenced.
*/
tree = swap_zswap_tree(swpentry);
- if (entry != xa_cmpxchg(tree, offset, entry, NULL, GFP_KERNEL)) {
- delete_from_swap_cache(folio);
- folio_unlock(folio);
- folio_put(folio);
- return -ENOMEM;
+ if (entry != xa_load(tree, offset)) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ if (!zswap_decompress(entry, folio)) {
+ ret = -EIO;
+ goto out;
}
- zswap_decompress(entry, &folio->page);
+ xa_erase(tree, offset);
count_vm_event(ZSWPWB);
if (entry->objcg)
- count_objcg_event(entry->objcg, ZSWPWB);
+ count_objcg_events(entry->objcg, ZSWPWB, 1);
zswap_entry_free(entry);
@@ -1088,17 +1067,44 @@ static int zswap_writeback_entry(struct zswap_entry *entry,
folio_set_reclaim(folio);
/* start writeback */
- __swap_writepage(folio, &wbc);
- folio_put(folio);
+ __swap_writepage(folio, NULL);
- return 0;
+out:
+ if (ret && ret != -EEXIST) {
+ swap_cache_del_folio(folio);
+ folio_unlock(folio);
+ }
+ folio_put(folio);
+ return ret;
}
/*********************************
* shrinker functions
**********************************/
+/*
+ * The dynamic shrinker is modulated by the following factors:
+ *
+ * 1. Each zswap entry has a referenced bit, which the shrinker unsets (giving
+ * the entry a second chance) before rotating it in the LRU list. If the
+ * entry is considered again by the shrinker, with its referenced bit unset,
+ * it is written back. The writeback rate as a result is dynamically
+ * adjusted by the pool activities - if the pool is dominated by new entries
+ * (i.e lots of recent zswapouts), these entries will be protected and
+ * the writeback rate will slow down. On the other hand, if the pool has a
+ * lot of stagnant entries, these entries will be reclaimed immediately,
+ * effectively increasing the writeback rate.
+ *
+ * 2. Swapins counter: If we observe swapins, it is a sign that we are
+ * overshrinking and should slow down. We maintain a swapins counter, which
+ * is consumed and subtract from the number of eligible objects on the LRU
+ * in zswap_shrinker_count().
+ *
+ * 3. Compression ratio. The better the workload compresses, the less gains we
+ * can expect from writeback. We scale down the number of objects available
+ * for reclaim by this ratio.
+ */
static enum lru_status shrink_memcg_cb(struct list_head *item, struct list_lru_one *l,
- spinlock_t *lock, void *arg)
+ void *arg)
{
struct zswap_entry *entry = container_of(item, struct zswap_entry, lru);
bool *encountered_page_in_swapcache = (bool *)arg;
@@ -1107,12 +1113,22 @@ static enum lru_status shrink_memcg_cb(struct list_head *item, struct list_lru_o
int writeback_result;
/*
+ * Second chance algorithm: if the entry has its referenced bit set, give it
+ * a second chance. Only clear the referenced bit and rotate it in the
+ * zswap's LRU list.
+ */
+ if (entry->referenced) {
+ entry->referenced = false;
+ return LRU_ROTATE;
+ }
+
+ /*
* As soon as we drop the LRU lock, the entry can be freed by
* a concurrent invalidation. This means the following:
*
* 1. We extract the swp_entry_t to the stack, allowing
* zswap_writeback_entry() to pin the swap entry and
- * then validate the zwap entry against that swap entry's
+ * then validate the zswap entry against that swap entry's
* tree using pointer value comparison. Only when that
* is successful can the entry be dereferenced.
*
@@ -1142,9 +1158,9 @@ static enum lru_status shrink_memcg_cb(struct list_head *item, struct list_lru_o
/*
* It's safe to drop the lock here because we return either
- * LRU_REMOVED_RETRY or LRU_RETRY.
+ * LRU_REMOVED_RETRY, LRU_RETRY or LRU_STOP.
*/
- spin_unlock(lock);
+ spin_unlock(&l->lock);
writeback_result = zswap_writeback_entry(entry, swpentry);
@@ -1165,15 +1181,13 @@ static enum lru_status shrink_memcg_cb(struct list_head *item, struct list_lru_o
zswap_written_back_pages++;
}
- spin_lock(lock);
return ret;
}
static unsigned long zswap_shrinker_scan(struct shrinker *shrinker,
struct shrink_control *sc)
{
- struct lruvec *lruvec = mem_cgroup_lruvec(sc->memcg, NODE_DATA(sc->nid));
- unsigned long shrink_ret, nr_protected, lru_size;
+ unsigned long shrink_ret;
bool encountered_page_in_swapcache = false;
if (!zswap_shrinker_enabled ||
@@ -1182,25 +1196,6 @@ static unsigned long zswap_shrinker_scan(struct shrinker *shrinker,
return SHRINK_STOP;
}
- nr_protected =
- atomic_long_read(&lruvec->zswap_lruvec_state.nr_zswap_protected);
- lru_size = list_lru_shrink_count(&zswap_list_lru, sc);
-
- /*
- * Abort if we are shrinking into the protected region.
- *
- * This short-circuiting is necessary because if we have too many multiple
- * concurrent reclaimers getting the freeable zswap object counts at the
- * same time (before any of them made reasonable progress), the total
- * number of reclaimed objects might be more than the number of unprotected
- * objects (i.e the reclaimers will reclaim into the protected area of the
- * zswap LRU).
- */
- if (nr_protected >= lru_size - sc->nr_to_scan) {
- sc->nr_scanned = 0;
- return SHRINK_STOP;
- }
-
shrink_ret = list_lru_shrink_walk(&zswap_list_lru, sc, &shrink_memcg_cb,
&encountered_page_in_swapcache);
@@ -1215,7 +1210,10 @@ static unsigned long zswap_shrinker_count(struct shrinker *shrinker,
{
struct mem_cgroup *memcg = sc->memcg;
struct lruvec *lruvec = mem_cgroup_lruvec(memcg, NODE_DATA(sc->nid));
- unsigned long nr_backing, nr_stored, nr_freeable, nr_protected;
+ atomic_long_t *nr_disk_swapins =
+ &lruvec->zswap_lruvec_state.nr_disk_swapins;
+ unsigned long nr_backing, nr_stored, nr_freeable, nr_disk_swapins_cur,
+ nr_remain;
if (!zswap_shrinker_enabled || !mem_cgroup_zswap_writeback_enabled(memcg))
return 0;
@@ -1242,31 +1240,39 @@ static unsigned long zswap_shrinker_count(struct shrinker *shrinker,
nr_stored = memcg_page_state(memcg, MEMCG_ZSWAPPED);
} else {
nr_backing = zswap_total_pages();
- nr_stored = atomic_read(&zswap_stored_pages);
+ nr_stored = atomic_long_read(&zswap_stored_pages);
}
if (!nr_stored)
return 0;
- nr_protected =
- atomic_long_read(&lruvec->zswap_lruvec_state.nr_zswap_protected);
nr_freeable = list_lru_shrink_count(&zswap_list_lru, sc);
+ if (!nr_freeable)
+ return 0;
+
/*
- * Subtract the lru size by an estimate of the number of pages
- * that should be protected.
+ * Subtract from the lru size the number of pages that are recently swapped
+ * in from disk. The idea is that had we protect the zswap's LRU by this
+ * amount of pages, these disk swapins would not have happened.
*/
- nr_freeable = nr_freeable > nr_protected ? nr_freeable - nr_protected : 0;
+ nr_disk_swapins_cur = atomic_long_read(nr_disk_swapins);
+ do {
+ if (nr_freeable >= nr_disk_swapins_cur)
+ nr_remain = 0;
+ else
+ nr_remain = nr_disk_swapins_cur - nr_freeable;
+ } while (!atomic_long_try_cmpxchg(
+ nr_disk_swapins, &nr_disk_swapins_cur, nr_remain));
+
+ nr_freeable -= nr_disk_swapins_cur - nr_remain;
+ if (!nr_freeable)
+ return 0;
/*
* Scale the number of freeable pages by the memory saving factor.
* This ensures that the better zswap compresses memory, the fewer
* pages we will evict to swap (as it will otherwise incur IO for
* relatively small memory saving).
- *
- * The memory saving factor calculated here takes same-filled pages into
- * account, but those are not freeable since they almost occupy no
- * space. Hence, we may scale nr_freeable down a little bit more than we
- * should if we have a lot of same-filled pages.
*/
return mult_frac(nr_freeable, nr_backing, nr_stored);
}
@@ -1289,10 +1295,10 @@ static struct shrinker *zswap_alloc_shrinker(void)
static int shrink_memcg(struct mem_cgroup *memcg)
{
- int nid, shrunk = 0;
+ int nid, shrunk = 0, scanned = 0;
if (!mem_cgroup_zswap_writeback_enabled(memcg))
- return -EINVAL;
+ return -ENOENT;
/*
* Skip zombies because their LRUs are reparented and we would be
@@ -1306,63 +1312,94 @@ static int shrink_memcg(struct mem_cgroup *memcg)
shrunk += list_lru_walk_one(&zswap_list_lru, nid, memcg,
&shrink_memcg_cb, NULL, &nr_to_walk);
+ scanned += 1 - nr_to_walk;
}
+
+ if (!scanned)
+ return -ENOENT;
+
return shrunk ? 0 : -EAGAIN;
}
static void shrink_worker(struct work_struct *w)
{
struct mem_cgroup *memcg;
- int ret, failures = 0;
+ int ret, failures = 0, attempts = 0;
unsigned long thr;
/* Reclaim down to the accept threshold */
thr = zswap_accept_thr_pages();
- /* global reclaim will select cgroup in a round-robin fashion. */
+ /*
+ * Global reclaim will select cgroup in a round-robin fashion from all
+ * online memcgs, but memcgs that have no pages in zswap and
+ * writeback-disabled memcgs (memory.zswap.writeback=0) are not
+ * candidates for shrinking.
+ *
+ * Shrinking will be aborted if we encounter the following
+ * MAX_RECLAIM_RETRIES times:
+ * - No writeback-candidate memcgs found in a memcg tree walk.
+ * - Shrinking a writeback-candidate memcg failed.
+ *
+ * We save iteration cursor memcg into zswap_next_shrink,
+ * which can be modified by the offline memcg cleaner
+ * zswap_memcg_offline_cleanup().
+ *
+ * Since the offline cleaner is called only once, we cannot leave an
+ * offline memcg reference in zswap_next_shrink.
+ * We can rely on the cleaner only if we get online memcg under lock.
+ *
+ * If we get an offline memcg, we cannot determine if the cleaner has
+ * already been called or will be called later. We must put back the
+ * reference before returning from this function. Otherwise, the
+ * offline memcg left in zswap_next_shrink will hold the reference
+ * until the next run of shrink_worker().
+ */
do {
- spin_lock(&zswap_shrink_lock);
- zswap_next_shrink = mem_cgroup_iter(NULL, zswap_next_shrink, NULL);
- memcg = zswap_next_shrink;
-
/*
- * We need to retry if we have gone through a full round trip, or if we
- * got an offline memcg (or else we risk undoing the effect of the
- * zswap memcg offlining cleanup callback). This is not catastrophic
- * per se, but it will keep the now offlined memcg hostage for a while.
+ * Start shrinking from the next memcg after zswap_next_shrink.
+ * When the offline cleaner has already advanced the cursor,
+ * advancing the cursor here overlooks one memcg, but this
+ * should be negligibly rare.
*
- * Note that if we got an online memcg, we will keep the extra
- * reference in case the original reference obtained by mem_cgroup_iter
- * is dropped by the zswap memcg offlining callback, ensuring that the
- * memcg is not killed when we are reclaiming.
+ * If we get an online memcg, keep the extra reference in case
+ * the original one obtained by mem_cgroup_iter() is dropped by
+ * zswap_memcg_offline_cleanup() while we are shrinking the
+ * memcg.
*/
- if (!memcg) {
- spin_unlock(&zswap_shrink_lock);
- if (++failures == MAX_RECLAIM_RETRIES)
- break;
-
- goto resched;
- }
-
- if (!mem_cgroup_tryget_online(memcg)) {
- /* drop the reference from mem_cgroup_iter() */
- mem_cgroup_iter_break(NULL, memcg);
- zswap_next_shrink = NULL;
- spin_unlock(&zswap_shrink_lock);
+ spin_lock(&zswap_shrink_lock);
+ do {
+ memcg = mem_cgroup_iter(NULL, zswap_next_shrink, NULL);
+ zswap_next_shrink = memcg;
+ } while (memcg && !mem_cgroup_tryget_online(memcg));
+ spin_unlock(&zswap_shrink_lock);
- if (++failures == MAX_RECLAIM_RETRIES)
+ if (!memcg) {
+ /*
+ * Continue shrinking without incrementing failures if
+ * we found candidate memcgs in the last tree walk.
+ */
+ if (!attempts && ++failures == MAX_RECLAIM_RETRIES)
break;
+ attempts = 0;
goto resched;
}
- spin_unlock(&zswap_shrink_lock);
ret = shrink_memcg(memcg);
/* drop the extra reference */
mem_cgroup_put(memcg);
- if (ret == -EINVAL)
- break;
+ /*
+ * There are no writeback-candidate pages in the memcg.
+ * This is not an issue as long as we can find another memcg
+ * with pages in zswap. Skip this without incrementing attempts
+ * and failures.
+ */
+ if (ret == -ENOENT)
+ continue;
+ ++attempts;
+
if (ret && ++failures == MAX_RECLAIM_RETRIES)
break;
resched:
@@ -1371,96 +1408,121 @@ resched:
}
/*********************************
-* same-filled functions
+* main API
**********************************/
-static bool zswap_is_folio_same_filled(struct folio *folio, unsigned long *value)
+
+static bool zswap_store_page(struct page *page,
+ struct obj_cgroup *objcg,
+ struct zswap_pool *pool)
{
- unsigned long *page;
- unsigned long val;
- unsigned int pos, last_pos = PAGE_SIZE / sizeof(*page) - 1;
- bool ret = false;
+ swp_entry_t page_swpentry = page_swap_entry(page);
+ struct zswap_entry *entry, *old;
+
+ /* allocate entry */
+ entry = zswap_entry_cache_alloc(GFP_KERNEL, page_to_nid(page));
+ if (!entry) {
+ zswap_reject_kmemcache_fail++;
+ return false;
+ }
- page = kmap_local_folio(folio, 0);
- val = page[0];
+ if (!zswap_compress(page, entry, pool))
+ goto compress_failed;
- if (val != page[last_pos])
- goto out;
+ old = xa_store(swap_zswap_tree(page_swpentry),
+ swp_offset(page_swpentry),
+ entry, GFP_KERNEL);
+ if (xa_is_err(old)) {
+ int err = xa_err(old);
- for (pos = 1; pos < last_pos; pos++) {
- if (val != page[pos])
- goto out;
+ WARN_ONCE(err != -ENOMEM, "unexpected xarray error: %d\n", err);
+ zswap_reject_alloc_fail++;
+ goto store_failed;
}
- *value = val;
- ret = true;
-out:
- kunmap_local(page);
- return ret;
-}
+ /*
+ * We may have had an existing entry that became stale when
+ * the folio was redirtied and now the new version is being
+ * swapped out. Get rid of the old.
+ */
+ if (old)
+ zswap_entry_free(old);
-static void zswap_fill_page(void *ptr, unsigned long value)
-{
- unsigned long *page;
+ /*
+ * The entry is successfully compressed and stored in the tree, there is
+ * no further possibility of failure. Grab refs to the pool and objcg,
+ * charge zswap memory, and increment zswap_stored_pages.
+ * The opposite actions will be performed by zswap_entry_free()
+ * when the entry is removed from the tree.
+ */
+ zswap_pool_get(pool);
+ if (objcg) {
+ obj_cgroup_get(objcg);
+ obj_cgroup_charge_zswap(objcg, entry->length);
+ }
+ atomic_long_inc(&zswap_stored_pages);
+ if (entry->length == PAGE_SIZE)
+ atomic_long_inc(&zswap_stored_incompressible_pages);
+
+ /*
+ * We finish initializing the entry while it's already in xarray.
+ * This is safe because:
+ *
+ * 1. Concurrent stores and invalidations are excluded by folio lock.
+ *
+ * 2. Writeback is excluded by the entry not being on the LRU yet.
+ * The publishing order matters to prevent writeback from seeing
+ * an incoherent entry.
+ */
+ entry->pool = pool;
+ entry->swpentry = page_swpentry;
+ entry->objcg = objcg;
+ entry->referenced = true;
+ if (entry->length) {
+ INIT_LIST_HEAD(&entry->lru);
+ zswap_lru_add(&zswap_list_lru, entry);
+ }
- page = (unsigned long *)ptr;
- memset_l(page, value, PAGE_SIZE / sizeof(unsigned long));
+ return true;
+
+store_failed:
+ zs_free(pool->zs_pool, entry->handle);
+compress_failed:
+ zswap_entry_cache_free(entry);
+ return false;
}
-/*********************************
-* main API
-**********************************/
bool zswap_store(struct folio *folio)
{
+ long nr_pages = folio_nr_pages(folio);
swp_entry_t swp = folio->swap;
- pgoff_t offset = swp_offset(swp);
- struct xarray *tree = swap_zswap_tree(swp);
- struct zswap_entry *entry, *old;
struct obj_cgroup *objcg = NULL;
struct mem_cgroup *memcg = NULL;
- unsigned long value;
+ struct zswap_pool *pool;
+ bool ret = false;
+ long index;
VM_WARN_ON_ONCE(!folio_test_locked(folio));
VM_WARN_ON_ONCE(!folio_test_swapcache(folio));
- /* Large folios aren't supported */
- if (folio_test_large(folio))
- return false;
-
if (!zswap_enabled)
goto check_old;
- /* Check cgroup limits */
objcg = get_obj_cgroup_from_folio(folio);
if (objcg && !obj_cgroup_may_zswap(objcg)) {
memcg = get_mem_cgroup_from_objcg(objcg);
if (shrink_memcg(memcg)) {
mem_cgroup_put(memcg);
- goto reject;
+ goto put_objcg;
}
mem_cgroup_put(memcg);
}
if (zswap_check_limits())
- goto reject;
-
- /* allocate entry */
- entry = zswap_entry_cache_alloc(GFP_KERNEL, folio_nid(folio));
- if (!entry) {
- zswap_reject_kmemcache_fail++;
- goto reject;
- }
-
- if (zswap_is_folio_same_filled(folio, &value)) {
- entry->length = 0;
- entry->value = value;
- atomic_inc(&zswap_same_filled_pages);
- goto store_entry;
- }
+ goto put_objcg;
- /* if entry is successfully added, it keeps the reference */
- entry->pool = zswap_pool_current_get();
- if (!entry->pool)
- goto freepage;
+ pool = zswap_pool_current_get();
+ if (!pool)
+ goto put_objcg;
if (objcg) {
memcg = get_mem_cgroup_from_objcg(objcg);
@@ -1471,94 +1533,108 @@ bool zswap_store(struct folio *folio)
mem_cgroup_put(memcg);
}
- if (!zswap_compress(folio, entry))
- goto put_pool;
-
-store_entry:
- entry->swpentry = swp;
- entry->objcg = objcg;
-
- old = xa_store(tree, offset, entry, GFP_KERNEL);
- if (xa_is_err(old)) {
- int err = xa_err(old);
+ for (index = 0; index < nr_pages; ++index) {
+ struct page *page = folio_page(folio, index);
- WARN_ONCE(err != -ENOMEM, "unexpected xarray error: %d\n", err);
- zswap_reject_alloc_fail++;
- goto store_failed;
- }
-
- /*
- * We may have had an existing entry that became stale when
- * the folio was redirtied and now the new version is being
- * swapped out. Get rid of the old.
- */
- if (old)
- zswap_entry_free(old);
-
- if (objcg) {
- obj_cgroup_charge_zswap(objcg, entry->length);
- count_objcg_event(objcg, ZSWPOUT);
+ if (!zswap_store_page(page, objcg, pool))
+ goto put_pool;
}
- /*
- * We finish initializing the entry while it's already in xarray.
- * This is safe because:
- *
- * 1. Concurrent stores and invalidations are excluded by folio lock.
- *
- * 2. Writeback is excluded by the entry not being on the LRU yet.
- * The publishing order matters to prevent writeback from seeing
- * an incoherent entry.
- */
- if (entry->length) {
- INIT_LIST_HEAD(&entry->lru);
- zswap_lru_add(&zswap_list_lru, entry);
- }
+ if (objcg)
+ count_objcg_events(objcg, ZSWPOUT, nr_pages);
- /* update stats */
- atomic_inc(&zswap_stored_pages);
- count_vm_event(ZSWPOUT);
+ count_vm_events(ZSWPOUT, nr_pages);
- return true;
+ ret = true;
-store_failed:
- if (!entry->length)
- atomic_dec(&zswap_same_filled_pages);
- else {
- zpool_free(zswap_find_zpool(entry), entry->handle);
put_pool:
- zswap_pool_put(entry->pool);
- }
-freepage:
- zswap_entry_cache_free(entry);
-reject:
+ zswap_pool_put(pool);
+put_objcg:
obj_cgroup_put(objcg);
- if (zswap_pool_reached_full)
+ if (!ret && zswap_pool_reached_full)
queue_work(shrink_wq, &zswap_shrink_work);
check_old:
/*
- * If the zswap store fails or zswap is disabled, we must invalidate the
- * possibly stale entry which was previously stored at this offset.
- * Otherwise, writeback could overwrite the new data in the swapfile.
+ * If the zswap store fails or zswap is disabled, we must invalidate
+ * the possibly stale entries which were previously stored at the
+ * offsets corresponding to each page of the folio. Otherwise,
+ * writeback could overwrite the new data in the swapfile.
*/
- entry = xa_erase(tree, offset);
- if (entry)
- zswap_entry_free(entry);
- return false;
+ if (!ret) {
+ unsigned type = swp_type(swp);
+ pgoff_t offset = swp_offset(swp);
+ struct zswap_entry *entry;
+ struct xarray *tree;
+
+ for (index = 0; index < nr_pages; ++index) {
+ tree = swap_zswap_tree(swp_entry(type, offset + index));
+ entry = xa_erase(tree, offset + index);
+ if (entry)
+ zswap_entry_free(entry);
+ }
+ }
+
+ return ret;
}
-bool zswap_load(struct folio *folio)
+/**
+ * zswap_load() - load a folio from zswap
+ * @folio: folio to load
+ *
+ * Return: 0 on success, with the folio unlocked and marked up-to-date, or one
+ * of the following error codes:
+ *
+ * -EIO: if the swapped out content was in zswap, but could not be loaded
+ * into the page due to a decompression failure. The folio is unlocked, but
+ * NOT marked up-to-date, so that an IO error is emitted (e.g. do_swap_page()
+ * will SIGBUS).
+ *
+ * -EINVAL: if the swapped out content was in zswap, but the page belongs
+ * to a large folio, which is not supported by zswap. The folio is unlocked,
+ * but NOT marked up-to-date, so that an IO error is emitted (e.g.
+ * do_swap_page() will SIGBUS).
+ *
+ * -ENOENT: if the swapped out content was not in zswap. The folio remains
+ * locked on return.
+ */
+int zswap_load(struct folio *folio)
{
swp_entry_t swp = folio->swap;
pgoff_t offset = swp_offset(swp);
- struct page *page = &folio->page;
bool swapcache = folio_test_swapcache(folio);
struct xarray *tree = swap_zswap_tree(swp);
struct zswap_entry *entry;
- u8 *dst;
VM_WARN_ON_ONCE(!folio_test_locked(folio));
+ if (zswap_never_enabled())
+ return -ENOENT;
+
+ /*
+ * Large folios should not be swapped in while zswap is being used, as
+ * they are not properly handled. Zswap does not properly load large
+ * folios, and a large folio may only be partially in zswap.
+ */
+ if (WARN_ON_ONCE(folio_test_large(folio))) {
+ folio_unlock(folio);
+ return -EINVAL;
+ }
+
+ entry = xa_load(tree, offset);
+ if (!entry)
+ return -ENOENT;
+
+ if (!zswap_decompress(entry, folio)) {
+ folio_unlock(folio);
+ return -EIO;
+ }
+
+ folio_mark_uptodate(folio);
+
+ count_vm_event(ZSWPIN);
+ if (entry->objcg)
+ count_objcg_events(entry->objcg, ZSWPIN, 1);
+
/*
* When reading into the swapcache, invalidate our entry. The
* swapcache can be the authoritative owner of the page and
@@ -1571,32 +1647,14 @@ bool zswap_load(struct folio *folio)
* files, which reads into a private page and may free it if
* the fault fails. We remain the primary owner of the entry.)
*/
- if (swapcache)
- entry = xa_erase(tree, offset);
- else
- entry = xa_load(tree, offset);
-
- if (!entry)
- return false;
-
- if (entry->length)
- zswap_decompress(entry, page);
- else {
- dst = kmap_local_page(page);
- zswap_fill_page(dst, entry->value);
- kunmap_local(dst);
- }
-
- count_vm_event(ZSWPIN);
- if (entry->objcg)
- count_objcg_event(entry->objcg, ZSWPIN);
-
if (swapcache) {
- zswap_entry_free(entry);
folio_mark_dirty(folio);
+ xa_erase(tree, offset);
+ zswap_entry_free(entry);
}
- return true;
+ folio_unlock(folio);
+ return 0;
}
void zswap_invalidate(swp_entry_t swp)
@@ -1605,6 +1663,9 @@ void zswap_invalidate(swp_entry_t swp)
struct xarray *tree = swap_zswap_tree(swp);
struct zswap_entry *entry;
+ if (xa_empty(tree))
+ return;
+
entry = xa_erase(tree, offset);
if (entry)
zswap_entry_free(entry);
@@ -1615,7 +1676,7 @@ int zswap_swapon(int type, unsigned long nr_pages)
struct xarray *trees, *tree;
unsigned int nr, i;
- nr = DIV_ROUND_UP(nr_pages, SWAP_ADDRESS_SPACE_PAGES);
+ nr = DIV_ROUND_UP(nr_pages, ZSWAP_ADDRESS_SPACE_PAGES);
trees = kvcalloc(nr, sizeof(*tree), GFP_KERNEL);
if (!trees) {
pr_err("alloc failed, zswap disabled for swap type %d\n", type);
@@ -1662,6 +1723,21 @@ static int debugfs_get_total_size(void *data, u64 *val)
}
DEFINE_DEBUGFS_ATTRIBUTE(total_size_fops, debugfs_get_total_size, NULL, "%llu\n");
+static int debugfs_get_stored_pages(void *data, u64 *val)
+{
+ *val = atomic_long_read(&zswap_stored_pages);
+ return 0;
+}
+DEFINE_DEBUGFS_ATTRIBUTE(stored_pages_fops, debugfs_get_stored_pages, NULL, "%llu\n");
+
+static int debugfs_get_stored_incompressible_pages(void *data, u64 *val)
+{
+ *val = atomic_long_read(&zswap_stored_incompressible_pages);
+ return 0;
+}
+DEFINE_DEBUGFS_ATTRIBUTE(stored_incompressible_pages_fops,
+ debugfs_get_stored_incompressible_pages, NULL, "%llu\n");
+
static int zswap_debugfs_init(void)
{
if (!debugfs_initialized())
@@ -1681,14 +1757,17 @@ static int zswap_debugfs_init(void)
zswap_debugfs_root, &zswap_reject_compress_fail);
debugfs_create_u64("reject_compress_poor", 0444,
zswap_debugfs_root, &zswap_reject_compress_poor);
+ debugfs_create_u64("decompress_fail", 0444,
+ zswap_debugfs_root, &zswap_decompress_fail);
debugfs_create_u64("written_back_pages", 0444,
zswap_debugfs_root, &zswap_written_back_pages);
debugfs_create_file("pool_total_size", 0444,
zswap_debugfs_root, NULL, &total_size_fops);
- debugfs_create_atomic_t("stored_pages", 0444,
- zswap_debugfs_root, &zswap_stored_pages);
- debugfs_create_atomic_t("same_filled_pages", 0444,
- zswap_debugfs_root, &zswap_same_filled_pages);
+ debugfs_create_file("stored_pages", 0444,
+ zswap_debugfs_root, NULL, &stored_pages_fops);
+ debugfs_create_file("stored_incompressible_pages", 0444,
+ zswap_debugfs_root, NULL,
+ &stored_incompressible_pages_fops);
return 0;
}
@@ -1736,10 +1815,10 @@ static int zswap_setup(void)
pool = __zswap_pool_create_fallback();
if (pool) {
- pr_info("loaded using pool %s/%s\n", pool->tfm_name,
- zpool_get_type(pool->zpools[0]));
+ pr_info("loaded using pool %s\n", pool->tfm_name);
list_add(&pool->list, &zswap_pools);
zswap_has_pool = true;
+ static_branch_enable(&zswap_ever_enabled);
} else {
pr_err("pool creation failed\n");
zswap_enabled = false;