diff options
Diffstat (limited to 'mm/zsmalloc.c')
| -rw-r--r-- | mm/zsmalloc.c | 1969 |
1 files changed, 780 insertions, 1189 deletions
diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c index 9445bee6b014..5bf832f9c05c 100644 --- a/mm/zsmalloc.c +++ b/mm/zsmalloc.c @@ -1,3 +1,5 @@ +// SPDX-License-Identifier: GPL-2.0-or-later + /* * zsmalloc memory allocator * @@ -11,57 +13,32 @@ * Released under the terms of GNU General Public License Version 2.0 */ -/* - * Following is how we use various fields and flags of underlying - * struct page(s) to form a zspage. - * - * Usage of struct page fields: - * page->private: points to zspage - * page->index: links together all component pages of a zspage - * For the huge page, this is always 0, so we use this field - * to store handle. - * page->page_type: first object offset in a subpage of zspage - * - * Usage of struct page flags: - * PG_private: identifies the first component page - * PG_owner_priv_1: identifies the huge component page - * - */ - #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt /* * lock ordering: * page_lock * pool->lock + * class->lock * zspage->lock */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/sched.h> -#include <linux/bitops.h> #include <linux/errno.h> #include <linux/highmem.h> #include <linux/string.h> #include <linux/slab.h> -#include <linux/pgtable.h> -#include <asm/tlbflush.h> -#include <linux/cpumask.h> -#include <linux/cpu.h> -#include <linux/vmalloc.h> -#include <linux/preempt.h> #include <linux/spinlock.h> +#include <linux/sprintf.h> #include <linux/shrinker.h> #include <linux/types.h> #include <linux/debugfs.h> #include <linux/zsmalloc.h> -#include <linux/zpool.h> -#include <linux/migrate.h> -#include <linux/wait.h> -#include <linux/pagemap.h> #include <linux/fs.h> -#include <linux/local_lock.h> +#include <linux/workqueue.h> +#include "zpdesc.h" #define ZSPAGE_MAGIC 0x58 @@ -73,13 +50,6 @@ */ #define ZS_ALIGN 8 -/* - * A single 'zspage' is composed of up to 2^N discontiguous 0-order (single) - * pages. ZS_MAX_ZSPAGE_ORDER defines upper limit on N. - */ -#define ZS_MAX_ZSPAGE_ORDER 2 -#define ZS_MAX_PAGES_PER_ZSPAGE (_AC(1, UL) << ZS_MAX_ZSPAGE_ORDER) - #define ZS_HANDLE_SIZE (sizeof(unsigned long)) /* @@ -113,17 +83,20 @@ * have room for two bit at least. */ #define OBJ_ALLOCATED_TAG 1 -#define OBJ_TAG_BITS 1 -#define OBJ_INDEX_BITS (BITS_PER_LONG - _PFN_BITS - OBJ_TAG_BITS) + +#define OBJ_TAG_BITS 1 +#define OBJ_TAG_MASK OBJ_ALLOCATED_TAG + +#define OBJ_INDEX_BITS (BITS_PER_LONG - _PFN_BITS) #define OBJ_INDEX_MASK ((_AC(1, UL) << OBJ_INDEX_BITS) - 1) #define HUGE_BITS 1 -#define FULLNESS_BITS 2 +#define FULLNESS_BITS 4 #define CLASS_BITS 8 -#define ISOLATED_BITS 3 #define MAGIC_VAL_BITS 8 -#define MAX(a, b) ((a) >= (b) ? (a) : (b)) +#define ZS_MAX_PAGES_PER_ZSPAGE (_AC(CONFIG_ZSMALLOC_CHAIN_SIZE, UL)) + /* ZS_MIN_ALLOC_SIZE must be multiple of ZS_ALIGN */ #define ZS_MIN_ALLOC_SIZE \ MAX(32, (ZS_MAX_PAGES_PER_ZSPAGE << PAGE_SHIFT >> OBJ_INDEX_BITS)) @@ -147,51 +120,45 @@ #define ZS_SIZE_CLASSES (DIV_ROUND_UP(ZS_MAX_ALLOC_SIZE - ZS_MIN_ALLOC_SIZE, \ ZS_SIZE_CLASS_DELTA) + 1) +/* + * Pages are distinguished by the ratio of used memory (that is the ratio + * of ->inuse objects to all objects that page can store). For example, + * INUSE_RATIO_10 means that the ratio of used objects is > 0% and <= 10%. + * + * The number of fullness groups is not random. It allows us to keep + * difference between the least busy page in the group (minimum permitted + * number of ->inuse objects) and the most busy page (maximum permitted + * number of ->inuse objects) at a reasonable value. + */ enum fullness_group { - ZS_EMPTY, - ZS_ALMOST_EMPTY, - ZS_ALMOST_FULL, - ZS_FULL, - NR_ZS_FULLNESS, + ZS_INUSE_RATIO_0, + ZS_INUSE_RATIO_10, + /* NOTE: 8 more fullness groups here */ + ZS_INUSE_RATIO_99 = 10, + ZS_INUSE_RATIO_100, + NR_FULLNESS_GROUPS, }; enum class_stat_type { - CLASS_EMPTY, - CLASS_ALMOST_EMPTY, - CLASS_ALMOST_FULL, - CLASS_FULL, - OBJ_ALLOCATED, - OBJ_USED, - NR_ZS_STAT_TYPE, + /* NOTE: stats for 12 fullness groups here: from inuse 0 to 100 */ + ZS_OBJS_ALLOCATED = NR_FULLNESS_GROUPS, + ZS_OBJS_INUSE, + NR_CLASS_STAT_TYPES, }; struct zs_size_stat { - unsigned long objs[NR_ZS_STAT_TYPE]; + unsigned long objs[NR_CLASS_STAT_TYPES]; }; #ifdef CONFIG_ZSMALLOC_STAT static struct dentry *zs_stat_root; #endif -/* - * We assign a page to ZS_ALMOST_EMPTY fullness group when: - * n <= N / f, where - * n = number of allocated objects - * N = total number of objects zspage can store - * f = fullness_threshold_frac - * - * Similarly, we assign zspage to: - * ZS_ALMOST_FULL when n > N / f - * ZS_EMPTY when n == 0 - * ZS_FULL when n == N - * - * (see: fix_fullness_group()) - */ -static const int fullness_threshold_frac = 4; static size_t huge_class_size; struct size_class { - struct list_head fullness_list[NR_ZS_FULLNESS]; + spinlock_t lock; + struct list_head fullness_list[NR_FULLNESS_GROUPS]; /* * Size of objects stored in this class. Must be multiple * of ZS_ALIGN. @@ -237,14 +204,7 @@ struct zs_pool { struct zs_pool_stats stats; /* Compact classes */ - struct shrinker shrinker; - -#ifdef CONFIG_ZPOOL - /* List tracking the zspages in LRU order by most recently added object */ - struct list_head lru; - struct zpool *zpool; - const struct zpool_ops *zpool_ops; -#endif + struct shrinker *shrinker; #ifdef CONFIG_ZSMALLOC_STAT struct dentry *stat_dentry; @@ -252,7 +212,48 @@ struct zs_pool { #ifdef CONFIG_COMPACTION struct work_struct free_work; #endif + /* protect zspage migration/compaction */ + rwlock_t lock; + atomic_t compaction_in_progress; +}; + +static inline void zpdesc_set_first(struct zpdesc *zpdesc) +{ + SetPagePrivate(zpdesc_page(zpdesc)); +} + +static inline void zpdesc_inc_zone_page_state(struct zpdesc *zpdesc) +{ + inc_zone_page_state(zpdesc_page(zpdesc), NR_ZSPAGES); +} + +static inline void zpdesc_dec_zone_page_state(struct zpdesc *zpdesc) +{ + dec_zone_page_state(zpdesc_page(zpdesc), NR_ZSPAGES); +} + +static inline struct zpdesc *alloc_zpdesc(gfp_t gfp, const int nid) +{ + struct page *page = alloc_pages_node(nid, gfp, 0); + + return page_zpdesc(page); +} + +static inline void free_zpdesc(struct zpdesc *zpdesc) +{ + struct page *page = zpdesc_page(zpdesc); + + /* PageZsmalloc is sticky until the page is freed to the buddy. */ + __free_page(page); +} + +#define ZS_PAGE_UNLOCKED 0 +#define ZS_PAGE_WRLOCKED -1 + +struct zspage_lock { spinlock_t lock; + int cnt; + struct lockdep_map dep_map; }; struct zspage { @@ -260,32 +261,93 @@ struct zspage { unsigned int huge:HUGE_BITS; unsigned int fullness:FULLNESS_BITS; unsigned int class:CLASS_BITS + 1; - unsigned int isolated:ISOLATED_BITS; unsigned int magic:MAGIC_VAL_BITS; }; unsigned int inuse; unsigned int freeobj; - struct page *first_page; + struct zpdesc *first_zpdesc; struct list_head list; /* fullness list */ - -#ifdef CONFIG_ZPOOL - /* links the zspage to the lru list in the pool */ - struct list_head lru; - bool under_reclaim; - /* list of unfreed handles whose objects have been reclaimed */ - unsigned long *deferred_handles; -#endif - struct zs_pool *pool; - rwlock_t lock; + struct zspage_lock zsl; }; -struct mapping_area { - local_lock_t lock; - char *vm_buf; /* copy buffer for objects that span pages */ - char *vm_addr; /* address of kmap_atomic()'ed pages */ - enum zs_mapmode vm_mm; /* mapping mode */ -}; +static void zspage_lock_init(struct zspage *zspage) +{ + static struct lock_class_key __key; + struct zspage_lock *zsl = &zspage->zsl; + + lockdep_init_map(&zsl->dep_map, "zspage->lock", &__key, 0); + spin_lock_init(&zsl->lock); + zsl->cnt = ZS_PAGE_UNLOCKED; +} + +/* + * The zspage lock can be held from atomic contexts, but it needs to remain + * preemptible when held for reading because it remains held outside of those + * atomic contexts, otherwise we unnecessarily lose preemptibility. + * + * To achieve this, the following rules are enforced on readers and writers: + * + * - Writers are blocked by both writers and readers, while readers are only + * blocked by writers (i.e. normal rwlock semantics). + * + * - Writers are always atomic (to allow readers to spin waiting for them). + * + * - Writers always use trylock (as the lock may be held be sleeping readers). + * + * - Readers may spin on the lock (as they can only wait for atomic writers). + * + * - Readers may sleep while holding the lock (as writes only use trylock). + */ +static void zspage_read_lock(struct zspage *zspage) +{ + struct zspage_lock *zsl = &zspage->zsl; + + rwsem_acquire_read(&zsl->dep_map, 0, 0, _RET_IP_); + + spin_lock(&zsl->lock); + zsl->cnt++; + spin_unlock(&zsl->lock); + + lock_acquired(&zsl->dep_map, _RET_IP_); +} + +static void zspage_read_unlock(struct zspage *zspage) +{ + struct zspage_lock *zsl = &zspage->zsl; + + rwsem_release(&zsl->dep_map, _RET_IP_); + + spin_lock(&zsl->lock); + zsl->cnt--; + spin_unlock(&zsl->lock); +} + +static __must_check bool zspage_write_trylock(struct zspage *zspage) +{ + struct zspage_lock *zsl = &zspage->zsl; + + spin_lock(&zsl->lock); + if (zsl->cnt == ZS_PAGE_UNLOCKED) { + zsl->cnt = ZS_PAGE_WRLOCKED; + rwsem_acquire(&zsl->dep_map, 0, 1, _RET_IP_); + lock_acquired(&zsl->dep_map, _RET_IP_); + return true; + } + + spin_unlock(&zsl->lock); + return false; +} + +static void zspage_write_unlock(struct zspage *zspage) +{ + struct zspage_lock *zsl = &zspage->zsl; + + rwsem_release(&zsl->dep_map, _RET_IP_); + + zsl->cnt = ZS_PAGE_UNLOCKED; + spin_unlock(&zsl->lock); +} /* huge object: pages_per_zspage == 1 && maxobj_per_zspage == 1 */ static void SetZsHugePage(struct zspage *zspage) @@ -298,21 +360,11 @@ static bool ZsHugePage(struct zspage *zspage) return zspage->huge; } -static void migrate_lock_init(struct zspage *zspage); -static void migrate_read_lock(struct zspage *zspage); -static void migrate_read_unlock(struct zspage *zspage); - #ifdef CONFIG_COMPACTION -static void migrate_write_lock(struct zspage *zspage); -static void migrate_write_lock_nested(struct zspage *zspage); -static void migrate_write_unlock(struct zspage *zspage); static void kick_deferred_free(struct zs_pool *pool); static void init_deferred_free(struct zs_pool *pool); static void SetZsPageMovable(struct zs_pool *pool, struct zspage *zspage); #else -static void migrate_write_lock(struct zspage *zspage) {} -static void migrate_write_lock_nested(struct zspage *zspage) {} -static void migrate_write_unlock(struct zspage *zspage) {} static void kick_deferred_free(struct zs_pool *pool) {} static void init_deferred_free(struct zs_pool *pool) {} static void SetZsPageMovable(struct zs_pool *pool, struct zspage *zspage) {} @@ -320,17 +372,27 @@ static void SetZsPageMovable(struct zs_pool *pool, struct zspage *zspage) {} static int create_cache(struct zs_pool *pool) { - pool->handle_cachep = kmem_cache_create("zs_handle", ZS_HANDLE_SIZE, - 0, 0, NULL); + char *name; + + name = kasprintf(GFP_KERNEL, "zs_handle-%s", pool->name); + if (!name) + return -ENOMEM; + pool->handle_cachep = kmem_cache_create(name, ZS_HANDLE_SIZE, + 0, 0, NULL); + kfree(name); if (!pool->handle_cachep) - return 1; + return -EINVAL; - pool->zspage_cachep = kmem_cache_create("zspage", sizeof(struct zspage), - 0, 0, NULL); + name = kasprintf(GFP_KERNEL, "zspage-%s", pool->name); + if (!name) + return -ENOMEM; + pool->zspage_cachep = kmem_cache_create(name, sizeof(struct zspage), + 0, 0, NULL); + kfree(name); if (!pool->zspage_cachep) { kmem_cache_destroy(pool->handle_cachep); pool->handle_cachep = NULL; - return 1; + return -EINVAL; } return 0; @@ -364,160 +426,52 @@ static void cache_free_zspage(struct zs_pool *pool, struct zspage *zspage) kmem_cache_free(pool->zspage_cachep, zspage); } -/* pool->lock(which owns the handle) synchronizes races */ +/* class->lock(which owns the handle) synchronizes races */ static void record_obj(unsigned long handle, unsigned long obj) { *(unsigned long *)handle = obj; } -/* zpool driver */ - -#ifdef CONFIG_ZPOOL - -static void *zs_zpool_create(const char *name, gfp_t gfp, - const struct zpool_ops *zpool_ops, - struct zpool *zpool) -{ - /* - * Ignore global gfp flags: zs_malloc() may be invoked from - * different contexts and its caller must provide a valid - * gfp mask. - */ - struct zs_pool *pool = zs_create_pool(name); - - if (pool) { - pool->zpool = zpool; - pool->zpool_ops = zpool_ops; - } - - return pool; -} - -static void zs_zpool_destroy(void *pool) -{ - zs_destroy_pool(pool); -} - -static int zs_zpool_malloc(void *pool, size_t size, gfp_t gfp, - unsigned long *handle) -{ - *handle = zs_malloc(pool, size, gfp); - - if (IS_ERR_VALUE(*handle)) - return PTR_ERR((void *)*handle); - return 0; -} -static void zs_zpool_free(void *pool, unsigned long handle) -{ - zs_free(pool, handle); -} - -static int zs_reclaim_page(struct zs_pool *pool, unsigned int retries); - -static int zs_zpool_shrink(void *pool, unsigned int pages, - unsigned int *reclaimed) -{ - unsigned int total = 0; - int ret = -EINVAL; - - while (total < pages) { - ret = zs_reclaim_page(pool, 8); - if (ret < 0) - break; - total++; - } - - if (reclaimed) - *reclaimed = total; - - return ret; -} - -static void *zs_zpool_map(void *pool, unsigned long handle, - enum zpool_mapmode mm) +static inline bool __maybe_unused is_first_zpdesc(struct zpdesc *zpdesc) { - enum zs_mapmode zs_mm; - - switch (mm) { - case ZPOOL_MM_RO: - zs_mm = ZS_MM_RO; - break; - case ZPOOL_MM_WO: - zs_mm = ZS_MM_WO; - break; - case ZPOOL_MM_RW: - default: - zs_mm = ZS_MM_RW; - break; - } - - return zs_map_object(pool, handle, zs_mm); + return PagePrivate(zpdesc_page(zpdesc)); } -static void zs_zpool_unmap(void *pool, unsigned long handle) -{ - zs_unmap_object(pool, handle); -} - -static u64 zs_zpool_total_size(void *pool) -{ - return zs_get_total_pages(pool) << PAGE_SHIFT; -} - -static struct zpool_driver zs_zpool_driver = { - .type = "zsmalloc", - .owner = THIS_MODULE, - .create = zs_zpool_create, - .destroy = zs_zpool_destroy, - .malloc_support_movable = true, - .malloc = zs_zpool_malloc, - .free = zs_zpool_free, - .shrink = zs_zpool_shrink, - .map = zs_zpool_map, - .unmap = zs_zpool_unmap, - .total_size = zs_zpool_total_size, -}; -MODULE_ALIAS("zpool-zsmalloc"); -#endif /* CONFIG_ZPOOL */ - -/* per-cpu VM mapping areas for zspage accesses that cross page boundaries */ -static DEFINE_PER_CPU(struct mapping_area, zs_map_area) = { - .lock = INIT_LOCAL_LOCK(lock), -}; - -static __maybe_unused int is_first_page(struct page *page) -{ - return PagePrivate(page); -} - -/* Protected by pool->lock */ +/* Protected by class->lock */ static inline int get_zspage_inuse(struct zspage *zspage) { return zspage->inuse; } - static inline void mod_zspage_inuse(struct zspage *zspage, int val) { zspage->inuse += val; } -static inline struct page *get_first_page(struct zspage *zspage) +static struct zpdesc *get_first_zpdesc(struct zspage *zspage) { - struct page *first_page = zspage->first_page; + struct zpdesc *first_zpdesc = zspage->first_zpdesc; - VM_BUG_ON_PAGE(!is_first_page(first_page), first_page); - return first_page; + VM_BUG_ON_PAGE(!is_first_zpdesc(first_zpdesc), zpdesc_page(first_zpdesc)); + return first_zpdesc; } -static inline unsigned int get_first_obj_offset(struct page *page) +#define FIRST_OBJ_PAGE_TYPE_MASK 0xffffff + +static inline unsigned int get_first_obj_offset(struct zpdesc *zpdesc) { - return page->page_type; + VM_WARN_ON_ONCE(!PageZsmalloc(zpdesc_page(zpdesc))); + return zpdesc->first_obj_offset & FIRST_OBJ_PAGE_TYPE_MASK; } -static inline void set_first_obj_offset(struct page *page, unsigned int offset) +static inline void set_first_obj_offset(struct zpdesc *zpdesc, unsigned int offset) { - page->page_type = offset; + /* With 24 bits available, we can support offsets into 16 MiB pages. */ + BUILD_BUG_ON(PAGE_SIZE > SZ_16M); + VM_WARN_ON_ONCE(!PageZsmalloc(zpdesc_page(zpdesc))); + VM_WARN_ON_ONCE(offset & ~FIRST_OBJ_PAGE_TYPE_MASK); + zpdesc->first_obj_offset &= ~FIRST_OBJ_PAGE_TYPE_MASK; + zpdesc->first_obj_offset |= offset & FIRST_OBJ_PAGE_TYPE_MASK; } static inline unsigned int get_freeobj(struct zspage *zspage) @@ -530,30 +484,12 @@ static inline void set_freeobj(struct zspage *zspage, unsigned int obj) zspage->freeobj = obj; } -static void get_zspage_mapping(struct zspage *zspage, - unsigned int *class_idx, - enum fullness_group *fullness) -{ - BUG_ON(zspage->magic != ZSPAGE_MAGIC); - - *fullness = zspage->fullness; - *class_idx = zspage->class; -} - static struct size_class *zspage_class(struct zs_pool *pool, - struct zspage *zspage) + struct zspage *zspage) { return pool->size_class[zspage->class]; } -static void set_zspage_mapping(struct zspage *zspage, - unsigned int class_idx, - enum fullness_group fullness) -{ - zspage->class = class_idx; - zspage->fullness = fullness; -} - /* * zsmalloc divides the pool into various size classes where each * class maintains a list of zspages where each zspage is divided @@ -572,23 +508,19 @@ static int get_size_class_index(int size) return min_t(int, ZS_SIZE_CLASSES - 1, idx); } -/* type can be of enum type class_stat_type or fullness_group */ -static inline void class_stat_inc(struct size_class *class, - int type, unsigned long cnt) +static inline void class_stat_add(struct size_class *class, int type, + unsigned long cnt) { class->stats.objs[type] += cnt; } -/* type can be of enum type class_stat_type or fullness_group */ -static inline void class_stat_dec(struct size_class *class, - int type, unsigned long cnt) +static inline void class_stat_sub(struct size_class *class, int type, + unsigned long cnt) { class->stats.objs[type] -= cnt; } -/* type can be of enum type class_stat_type or fullness_group */ -static inline unsigned long zs_stat_get(struct size_class *class, - int type) +static inline unsigned long class_stat_read(struct size_class *class, int type) { return class->stats.objs[type]; } @@ -614,47 +546,49 @@ static unsigned long zs_can_compact(struct size_class *class); static int zs_stats_size_show(struct seq_file *s, void *v) { - int i; + int i, fg; struct zs_pool *pool = s->private; struct size_class *class; int objs_per_zspage; - unsigned long class_almost_full, class_almost_empty; unsigned long obj_allocated, obj_used, pages_used, freeable; - unsigned long total_class_almost_full = 0, total_class_almost_empty = 0; unsigned long total_objs = 0, total_used_objs = 0, total_pages = 0; unsigned long total_freeable = 0; + unsigned long inuse_totals[NR_FULLNESS_GROUPS] = {0, }; - seq_printf(s, " %5s %5s %11s %12s %13s %10s %10s %16s %8s\n", - "class", "size", "almost_full", "almost_empty", + seq_printf(s, " %5s %5s %9s %9s %9s %9s %9s %9s %9s %9s %9s %9s %9s %13s %10s %10s %16s %8s\n", + "class", "size", "10%", "20%", "30%", "40%", + "50%", "60%", "70%", "80%", "90%", "99%", "100%", "obj_allocated", "obj_used", "pages_used", "pages_per_zspage", "freeable"); for (i = 0; i < ZS_SIZE_CLASSES; i++) { + class = pool->size_class[i]; if (class->index != i) continue; - spin_lock(&pool->lock); - class_almost_full = zs_stat_get(class, CLASS_ALMOST_FULL); - class_almost_empty = zs_stat_get(class, CLASS_ALMOST_EMPTY); - obj_allocated = zs_stat_get(class, OBJ_ALLOCATED); - obj_used = zs_stat_get(class, OBJ_USED); + spin_lock(&class->lock); + + seq_printf(s, " %5u %5u ", i, class->size); + for (fg = ZS_INUSE_RATIO_10; fg < NR_FULLNESS_GROUPS; fg++) { + inuse_totals[fg] += class_stat_read(class, fg); + seq_printf(s, "%9lu ", class_stat_read(class, fg)); + } + + obj_allocated = class_stat_read(class, ZS_OBJS_ALLOCATED); + obj_used = class_stat_read(class, ZS_OBJS_INUSE); freeable = zs_can_compact(class); - spin_unlock(&pool->lock); + spin_unlock(&class->lock); objs_per_zspage = class->objs_per_zspage; pages_used = obj_allocated / objs_per_zspage * class->pages_per_zspage; - seq_printf(s, " %5u %5u %11lu %12lu %13lu" - " %10lu %10lu %16d %8lu\n", - i, class->size, class_almost_full, class_almost_empty, - obj_allocated, obj_used, pages_used, - class->pages_per_zspage, freeable); + seq_printf(s, "%13lu %10lu %10lu %16d %8lu\n", + obj_allocated, obj_used, pages_used, + class->pages_per_zspage, freeable); - total_class_almost_full += class_almost_full; - total_class_almost_empty += class_almost_empty; total_objs += obj_allocated; total_used_objs += obj_used; total_pages += pages_used; @@ -662,10 +596,14 @@ static int zs_stats_size_show(struct seq_file *s, void *v) } seq_puts(s, "\n"); - seq_printf(s, " %5s %5s %11lu %12lu %13lu %10lu %10lu %16s %8lu\n", - "Total", "", total_class_almost_full, - total_class_almost_empty, total_objs, - total_used_objs, total_pages, "", total_freeable); + seq_printf(s, " %5s %5s ", "Total", ""); + + for (fg = ZS_INUSE_RATIO_10; fg < NR_FULLNESS_GROUPS; fg++) + seq_printf(s, "%9lu ", inuse_totals[fg]); + + seq_printf(s, "%13lu %10lu %10lu %16s %8lu\n", + total_objs, total_used_objs, total_pages, "", + total_freeable); return 0; } @@ -710,30 +648,28 @@ static inline void zs_pool_stat_destroy(struct zs_pool *pool) /* * For each size class, zspages are divided into different groups - * depending on how "full" they are. This was done so that we could - * easily find empty or nearly empty zspages when we try to shrink - * the pool (not yet implemented). This function returns fullness + * depending on their usage ratio. This function returns fullness * status of the given page. */ -static enum fullness_group get_fullness_group(struct size_class *class, - struct zspage *zspage) +static int get_fullness_group(struct size_class *class, struct zspage *zspage) { - int inuse, objs_per_zspage; - enum fullness_group fg; + int inuse, objs_per_zspage, ratio; inuse = get_zspage_inuse(zspage); objs_per_zspage = class->objs_per_zspage; if (inuse == 0) - fg = ZS_EMPTY; - else if (inuse == objs_per_zspage) - fg = ZS_FULL; - else if (inuse <= 3 * objs_per_zspage / fullness_threshold_frac) - fg = ZS_ALMOST_EMPTY; - else - fg = ZS_ALMOST_FULL; + return ZS_INUSE_RATIO_0; + if (inuse == objs_per_zspage) + return ZS_INUSE_RATIO_100; - return fg; + ratio = 100 * inuse / objs_per_zspage; + /* + * Take integer division into consideration: a page with one inuse + * object out of 127 possible, will end up having 0 usage ratio, + * which is wrong as it belongs in ZS_INUSE_RATIO_10 fullness group. + */ + return ratio / 10 + 1; } /* @@ -744,150 +680,97 @@ static enum fullness_group get_fullness_group(struct size_class *class, */ static void insert_zspage(struct size_class *class, struct zspage *zspage, - enum fullness_group fullness) + int fullness) { - struct zspage *head; - - class_stat_inc(class, fullness, 1); - head = list_first_entry_or_null(&class->fullness_list[fullness], - struct zspage, list); - /* - * We want to see more ZS_FULL pages and less almost empty/full. - * Put pages with higher ->inuse first. - */ - if (head && get_zspage_inuse(zspage) < get_zspage_inuse(head)) - list_add(&zspage->list, &head->list); - else - list_add(&zspage->list, &class->fullness_list[fullness]); + class_stat_add(class, fullness, 1); + list_add(&zspage->list, &class->fullness_list[fullness]); + zspage->fullness = fullness; } /* * This function removes the given zspage from the freelist identified * by <class, fullness_group>. */ -static void remove_zspage(struct size_class *class, - struct zspage *zspage, - enum fullness_group fullness) +static void remove_zspage(struct size_class *class, struct zspage *zspage) { + int fullness = zspage->fullness; + VM_BUG_ON(list_empty(&class->fullness_list[fullness])); list_del_init(&zspage->list); - class_stat_dec(class, fullness, 1); + class_stat_sub(class, fullness, 1); } /* * Each size class maintains zspages in different fullness groups depending * on the number of live objects they contain. When allocating or freeing - * objects, the fullness status of the page can change, say, from ALMOST_FULL - * to ALMOST_EMPTY when freeing an object. This function checks if such - * a status change has occurred for the given page and accordingly moves the - * page from the freelist of the old fullness group to that of the new - * fullness group. + * objects, the fullness status of the page can change, for instance, from + * INUSE_RATIO_80 to INUSE_RATIO_70 when freeing an object. This function + * checks if such a status change has occurred for the given page and + * accordingly moves the page from the list of the old fullness group to that + * of the new fullness group. */ -static enum fullness_group fix_fullness_group(struct size_class *class, - struct zspage *zspage) +static int fix_fullness_group(struct size_class *class, struct zspage *zspage) { - int class_idx; - enum fullness_group currfg, newfg; + int newfg; - get_zspage_mapping(zspage, &class_idx, &currfg); newfg = get_fullness_group(class, zspage); - if (newfg == currfg) + if (newfg == zspage->fullness) goto out; - remove_zspage(class, zspage, currfg); + remove_zspage(class, zspage); insert_zspage(class, zspage, newfg); - set_zspage_mapping(zspage, class_idx, newfg); out: return newfg; } -/* - * We have to decide on how many pages to link together - * to form a zspage for each size class. This is important - * to reduce wastage due to unusable space left at end of - * each zspage which is given as: - * wastage = Zp % class_size - * usage = Zp - wastage - * where Zp = zspage size = k * PAGE_SIZE where k = 1, 2, ... - * - * For example, for size class of 3/8 * PAGE_SIZE, we should - * link together 3 PAGE_SIZE sized pages to form a zspage - * since then we can perfectly fit in 8 such objects. - */ -static int get_pages_per_zspage(int class_size) -{ - int i, max_usedpc = 0; - /* zspage order which gives maximum used size per KB */ - int max_usedpc_order = 1; - - for (i = 1; i <= ZS_MAX_PAGES_PER_ZSPAGE; i++) { - int zspage_size; - int waste, usedpc; - - zspage_size = i * PAGE_SIZE; - waste = zspage_size % class_size; - usedpc = (zspage_size - waste) * 100 / zspage_size; - - if (usedpc > max_usedpc) { - max_usedpc = usedpc; - max_usedpc_order = i; - } - } - - return max_usedpc_order; -} - -static struct zspage *get_zspage(struct page *page) +static struct zspage *get_zspage(struct zpdesc *zpdesc) { - struct zspage *zspage = (struct zspage *)page_private(page); + struct zspage *zspage = zpdesc->zspage; BUG_ON(zspage->magic != ZSPAGE_MAGIC); return zspage; } -static struct page *get_next_page(struct page *page) +static struct zpdesc *get_next_zpdesc(struct zpdesc *zpdesc) { - struct zspage *zspage = get_zspage(page); + struct zspage *zspage = get_zspage(zpdesc); if (unlikely(ZsHugePage(zspage))) return NULL; - return (struct page *)page->index; + return zpdesc->next; } /** - * obj_to_location - get (<page>, <obj_idx>) from encoded object value + * obj_to_location - get (<zpdesc>, <obj_idx>) from encoded object value * @obj: the encoded object value - * @page: page object resides in zspage + * @zpdesc: zpdesc object resides in zspage * @obj_idx: object index */ -static void obj_to_location(unsigned long obj, struct page **page, +static void obj_to_location(unsigned long obj, struct zpdesc **zpdesc, unsigned int *obj_idx) { - obj >>= OBJ_TAG_BITS; - *page = pfn_to_page(obj >> OBJ_INDEX_BITS); + *zpdesc = pfn_zpdesc(obj >> OBJ_INDEX_BITS); *obj_idx = (obj & OBJ_INDEX_MASK); } -static void obj_to_page(unsigned long obj, struct page **page) +static void obj_to_zpdesc(unsigned long obj, struct zpdesc **zpdesc) { - obj >>= OBJ_TAG_BITS; - *page = pfn_to_page(obj >> OBJ_INDEX_BITS); + *zpdesc = pfn_zpdesc(obj >> OBJ_INDEX_BITS); } /** - * location_to_obj - get obj value encoded from (<page>, <obj_idx>) - * @page: page object resides in zspage + * location_to_obj - get obj value encoded from (<zpdesc>, <obj_idx>) + * @zpdesc: zpdesc object resides in zspage * @obj_idx: object index */ -static unsigned long location_to_obj(struct page *page, unsigned int obj_idx) +static unsigned long location_to_obj(struct zpdesc *zpdesc, unsigned int obj_idx) { unsigned long obj; - obj = page_to_pfn(page) << OBJ_INDEX_BITS; + obj = zpdesc_pfn(zpdesc) << OBJ_INDEX_BITS; obj |= obj_idx & OBJ_INDEX_MASK; - obj <<= OBJ_TAG_BITS; return obj; } @@ -897,40 +780,43 @@ static unsigned long handle_to_obj(unsigned long handle) return *(unsigned long *)handle; } -static bool obj_allocated(struct page *page, void *obj, unsigned long *phandle) +static inline bool obj_allocated(struct zpdesc *zpdesc, void *obj, + unsigned long *phandle) { unsigned long handle; - struct zspage *zspage = get_zspage(page); + struct zspage *zspage = get_zspage(zpdesc); if (unlikely(ZsHugePage(zspage))) { - VM_BUG_ON_PAGE(!is_first_page(page), page); - handle = page->index; + VM_BUG_ON_PAGE(!is_first_zpdesc(zpdesc), zpdesc_page(zpdesc)); + handle = zpdesc->handle; } else handle = *(unsigned long *)obj; if (!(handle & OBJ_ALLOCATED_TAG)) return false; - *phandle = handle & ~OBJ_ALLOCATED_TAG; + /* Clear all tags before returning the handle */ + *phandle = handle & ~OBJ_TAG_MASK; return true; } -static void reset_page(struct page *page) +static void reset_zpdesc(struct zpdesc *zpdesc) { - __ClearPageMovable(page); + struct page *page = zpdesc_page(zpdesc); + ClearPagePrivate(page); - set_page_private(page, 0); - page_mapcount_reset(page); - page->index = 0; + zpdesc->zspage = NULL; + zpdesc->next = NULL; + /* PageZsmalloc is sticky until the page is freed to the buddy. */ } static int trylock_zspage(struct zspage *zspage) { - struct page *cursor, *fail; + struct zpdesc *cursor, *fail; - for (cursor = get_first_page(zspage); cursor != NULL; cursor = - get_next_page(cursor)) { - if (!trylock_page(cursor)) { + for (cursor = get_first_zpdesc(zspage); cursor != NULL; cursor = + get_next_zpdesc(cursor)) { + if (!zpdesc_trylock(cursor)) { fail = cursor; goto unlock; } @@ -938,65 +824,38 @@ static int trylock_zspage(struct zspage *zspage) return 1; unlock: - for (cursor = get_first_page(zspage); cursor != fail; cursor = - get_next_page(cursor)) - unlock_page(cursor); + for (cursor = get_first_zpdesc(zspage); cursor != fail; cursor = + get_next_zpdesc(cursor)) + zpdesc_unlock(cursor); return 0; } -#ifdef CONFIG_ZPOOL -/* - * Free all the deferred handles whose objects are freed in zs_free. - */ -static void free_handles(struct zs_pool *pool, struct zspage *zspage) -{ - unsigned long handle = (unsigned long)zspage->deferred_handles; - - while (handle) { - unsigned long nxt_handle = handle_to_obj(handle); - - cache_free_handle(pool, handle); - handle = nxt_handle; - } -} -#else -static inline void free_handles(struct zs_pool *pool, struct zspage *zspage) {} -#endif - static void __free_zspage(struct zs_pool *pool, struct size_class *class, struct zspage *zspage) { - struct page *page, *next; - enum fullness_group fg; - unsigned int class_idx; - - get_zspage_mapping(zspage, &class_idx, &fg); + struct zpdesc *zpdesc, *next; - assert_spin_locked(&pool->lock); + assert_spin_locked(&class->lock); VM_BUG_ON(get_zspage_inuse(zspage)); - VM_BUG_ON(fg != ZS_EMPTY); + VM_BUG_ON(zspage->fullness != ZS_INUSE_RATIO_0); - /* Free all deferred handles from zs_free */ - free_handles(pool, zspage); - - next = page = get_first_page(zspage); + next = zpdesc = get_first_zpdesc(zspage); do { - VM_BUG_ON_PAGE(!PageLocked(page), page); - next = get_next_page(page); - reset_page(page); - unlock_page(page); - dec_zone_page_state(page, NR_ZSPAGES); - put_page(page); - page = next; - } while (page != NULL); + VM_BUG_ON_PAGE(!zpdesc_is_locked(zpdesc), zpdesc_page(zpdesc)); + next = get_next_zpdesc(zpdesc); + reset_zpdesc(zpdesc); + zpdesc_unlock(zpdesc); + zpdesc_dec_zone_page_state(zpdesc); + zpdesc_put(zpdesc); + zpdesc = next; + } while (zpdesc != NULL); cache_free_zspage(pool, zspage); - class_stat_dec(class, OBJ_ALLOCATED, class->objs_per_zspage); - atomic_long_sub(class->pages_per_zspage, - &pool->pages_allocated); + class_stat_sub(class, ZS_OBJS_ALLOCATED, class->objs_per_zspage); + atomic_long_sub(class->pages_per_zspage, &pool->pages_allocated); } static void free_zspage(struct zs_pool *pool, struct size_class *class, @@ -1015,10 +874,7 @@ static void free_zspage(struct zs_pool *pool, struct size_class *class, return; } - remove_zspage(class, zspage, ZS_EMPTY); -#ifdef CONFIG_ZPOOL - list_del(&zspage->lru); -#endif + remove_zspage(class, zspage); __free_zspage(pool, class, zspage); } @@ -1027,16 +883,16 @@ static void init_zspage(struct size_class *class, struct zspage *zspage) { unsigned int freeobj = 1; unsigned long off = 0; - struct page *page = get_first_page(zspage); + struct zpdesc *zpdesc = get_first_zpdesc(zspage); - while (page) { - struct page *next_page; + while (zpdesc) { + struct zpdesc *next_zpdesc; struct link_free *link; void *vaddr; - set_first_obj_offset(page, off); + set_first_obj_offset(zpdesc, off); - vaddr = kmap_atomic(page); + vaddr = kmap_local_zpdesc(zpdesc); link = (struct link_free *)vaddr + off / sizeof(*link); while ((off += class->size) < PAGE_SIZE) { @@ -1049,8 +905,8 @@ static void init_zspage(struct size_class *class, struct zspage *zspage) * page, which must point to the first object on the next * page (if present) */ - next_page = get_next_page(page); - if (next_page) { + next_zpdesc = get_next_zpdesc(zpdesc); + if (next_zpdesc) { link->next = freeobj++ << OBJ_TAG_BITS; } else { /* @@ -1059,50 +915,44 @@ static void init_zspage(struct size_class *class, struct zspage *zspage) */ link->next = -1UL << OBJ_TAG_BITS; } - kunmap_atomic(vaddr); - page = next_page; + kunmap_local(vaddr); + zpdesc = next_zpdesc; off %= PAGE_SIZE; } -#ifdef CONFIG_ZPOOL - INIT_LIST_HEAD(&zspage->lru); - zspage->under_reclaim = false; - zspage->deferred_handles = NULL; -#endif - set_freeobj(zspage, 0); } static void create_page_chain(struct size_class *class, struct zspage *zspage, - struct page *pages[]) + struct zpdesc *zpdescs[]) { int i; - struct page *page; - struct page *prev_page = NULL; - int nr_pages = class->pages_per_zspage; + struct zpdesc *zpdesc; + struct zpdesc *prev_zpdesc = NULL; + int nr_zpdescs = class->pages_per_zspage; /* * Allocate individual pages and link them together as: - * 1. all pages are linked together using page->index - * 2. each sub-page point to zspage using page->private + * 1. all pages are linked together using zpdesc->next + * 2. each sub-page point to zspage using zpdesc->zspage * - * we set PG_private to identify the first page (i.e. no other sub-page + * we set PG_private to identify the first zpdesc (i.e. no other zpdesc * has this flag set). */ - for (i = 0; i < nr_pages; i++) { - page = pages[i]; - set_page_private(page, (unsigned long)zspage); - page->index = 0; + for (i = 0; i < nr_zpdescs; i++) { + zpdesc = zpdescs[i]; + zpdesc->zspage = zspage; + zpdesc->next = NULL; if (i == 0) { - zspage->first_page = page; - SetPagePrivate(page); + zspage->first_zpdesc = zpdesc; + zpdesc_set_first(zpdesc); if (unlikely(class->objs_per_zspage == 1 && class->pages_per_zspage == 1)) SetZsHugePage(zspage); } else { - prev_page->index = (unsigned long)page; + prev_zpdesc->next = zpdesc; } - prev_page = page; + prev_zpdesc = zpdesc; } } @@ -1110,39 +960,44 @@ static void create_page_chain(struct size_class *class, struct zspage *zspage, * Allocate a zspage for the given size class */ static struct zspage *alloc_zspage(struct zs_pool *pool, - struct size_class *class, - gfp_t gfp) + struct size_class *class, + gfp_t gfp, const int nid) { int i; - struct page *pages[ZS_MAX_PAGES_PER_ZSPAGE]; + struct zpdesc *zpdescs[ZS_MAX_PAGES_PER_ZSPAGE]; struct zspage *zspage = cache_alloc_zspage(pool, gfp); if (!zspage) return NULL; + if (!IS_ENABLED(CONFIG_COMPACTION)) + gfp &= ~__GFP_MOVABLE; + zspage->magic = ZSPAGE_MAGIC; - migrate_lock_init(zspage); + zspage->pool = pool; + zspage->class = class->index; + zspage_lock_init(zspage); for (i = 0; i < class->pages_per_zspage; i++) { - struct page *page; + struct zpdesc *zpdesc; - page = alloc_page(gfp); - if (!page) { + zpdesc = alloc_zpdesc(gfp, nid); + if (!zpdesc) { while (--i >= 0) { - dec_zone_page_state(pages[i], NR_ZSPAGES); - __free_page(pages[i]); + zpdesc_dec_zone_page_state(zpdescs[i]); + free_zpdesc(zpdescs[i]); } cache_free_zspage(pool, zspage); return NULL; } + __zpdesc_set_zsmalloc(zpdesc); - inc_zone_page_state(page, NR_ZSPAGES); - pages[i] = page; + zpdesc_inc_zone_page_state(zpdesc); + zpdescs[i] = zpdesc; } - create_page_chain(class, zspage, pages); + create_page_chain(class, zspage, zpdescs); init_zspage(class, zspage); - zspage->pool = pool; return zspage; } @@ -1152,9 +1007,9 @@ static struct zspage *find_get_zspage(struct size_class *class) int i; struct zspage *zspage; - for (i = ZS_ALMOST_FULL; i >= ZS_EMPTY; i--) { + for (i = ZS_INUSE_RATIO_99; i >= ZS_INUSE_RATIO_0; i--) { zspage = list_first_entry_or_null(&class->fullness_list[i], - struct zspage, list); + struct zspage, list); if (zspage) break; } @@ -1162,103 +1017,6 @@ static struct zspage *find_get_zspage(struct size_class *class) return zspage; } -static inline int __zs_cpu_up(struct mapping_area *area) -{ - /* - * Make sure we don't leak memory if a cpu UP notification - * and zs_init() race and both call zs_cpu_up() on the same cpu - */ - if (area->vm_buf) - return 0; - area->vm_buf = kmalloc(ZS_MAX_ALLOC_SIZE, GFP_KERNEL); - if (!area->vm_buf) - return -ENOMEM; - return 0; -} - -static inline void __zs_cpu_down(struct mapping_area *area) -{ - kfree(area->vm_buf); - area->vm_buf = NULL; -} - -static void *__zs_map_object(struct mapping_area *area, - struct page *pages[2], int off, int size) -{ - int sizes[2]; - void *addr; - char *buf = area->vm_buf; - - /* disable page faults to match kmap_atomic() return conditions */ - pagefault_disable(); - - /* no read fastpath */ - if (area->vm_mm == ZS_MM_WO) - goto out; - - sizes[0] = PAGE_SIZE - off; - sizes[1] = size - sizes[0]; - - /* copy object to per-cpu buffer */ - addr = kmap_atomic(pages[0]); - memcpy(buf, addr + off, sizes[0]); - kunmap_atomic(addr); - addr = kmap_atomic(pages[1]); - memcpy(buf + sizes[0], addr, sizes[1]); - kunmap_atomic(addr); -out: - return area->vm_buf; -} - -static void __zs_unmap_object(struct mapping_area *area, - struct page *pages[2], int off, int size) -{ - int sizes[2]; - void *addr; - char *buf; - - /* no write fastpath */ - if (area->vm_mm == ZS_MM_RO) - goto out; - - buf = area->vm_buf; - buf = buf + ZS_HANDLE_SIZE; - size -= ZS_HANDLE_SIZE; - off += ZS_HANDLE_SIZE; - - sizes[0] = PAGE_SIZE - off; - sizes[1] = size - sizes[0]; - - /* copy per-cpu buffer to object */ - addr = kmap_atomic(pages[0]); - memcpy(addr + off, buf, sizes[0]); - kunmap_atomic(addr); - addr = kmap_atomic(pages[1]); - memcpy(addr, buf + sizes[0], sizes[1]); - kunmap_atomic(addr); - -out: - /* enable page faults to match kunmap_atomic() return conditions */ - pagefault_enable(); -} - -static int zs_cpu_prepare(unsigned int cpu) -{ - struct mapping_area *area; - - area = &per_cpu(zs_map_area, cpu); - return __zs_cpu_up(area); -} - -static int zs_cpu_dead(unsigned int cpu) -{ - struct mapping_area *area; - - area = &per_cpu(zs_map_area, cpu); - __zs_cpu_down(area); - return 0; -} - static bool can_merge(struct size_class *prev, int pages_per_zspage, int objs_per_zspage) { @@ -1274,6 +1032,11 @@ static bool zspage_full(struct size_class *class, struct zspage *zspage) return get_zspage_inuse(zspage) == class->objs_per_zspage; } +static bool zspage_empty(struct zspage *zspage) +{ + return get_zspage_inuse(zspage) == 0; +} + /** * zs_lookup_class_index() - Returns index of the zsmalloc &size_class * that hold objects of the provided size. @@ -1301,141 +1064,130 @@ unsigned long zs_get_total_pages(struct zs_pool *pool) } EXPORT_SYMBOL_GPL(zs_get_total_pages); -/** - * zs_map_object - get address of allocated object from handle. - * @pool: pool from which the object was allocated - * @handle: handle returned from zs_malloc - * @mm: mapping mode to use - * - * Before using an object allocated from zs_malloc, it must be mapped using - * this function. When done with the object, it must be unmapped using - * zs_unmap_object. - * - * Only one object can be mapped per cpu at a time. There is no protection - * against nested mappings. - * - * This function returns with preemption and page faults disabled. - */ -void *zs_map_object(struct zs_pool *pool, unsigned long handle, - enum zs_mapmode mm) +void *zs_obj_read_begin(struct zs_pool *pool, unsigned long handle, + void *local_copy) { struct zspage *zspage; - struct page *page; + struct zpdesc *zpdesc; unsigned long obj, off; unsigned int obj_idx; - struct size_class *class; - struct mapping_area *area; - struct page *pages[2]; - void *ret; - - /* - * Because we use per-cpu mapping areas shared among the - * pools/users, we can't allow mapping in interrupt context - * because it can corrupt another users mappings. - */ - BUG_ON(in_interrupt()); + void *addr; - /* It guarantees it can get zspage from handle safely */ - spin_lock(&pool->lock); + /* Guarantee we can get zspage from handle safely */ + read_lock(&pool->lock); obj = handle_to_obj(handle); - obj_to_location(obj, &page, &obj_idx); - zspage = get_zspage(page); - -#ifdef CONFIG_ZPOOL - /* - * Move the zspage to front of pool's LRU. - * - * Note that this is swap-specific, so by definition there are no ongoing - * accesses to the memory while the page is swapped out that would make - * it "hot". A new entry is hot, then ages to the tail until it gets either - * written back or swaps back in. - * - * Furthermore, map is also called during writeback. We must not put an - * isolated page on the LRU mid-reclaim. - * - * As a result, only update the LRU when the page is mapped for write - * when it's first instantiated. - * - * This is a deviation from the other backends, which perform this update - * in the allocation function (zbud_alloc, z3fold_alloc). - */ - if (mm == ZS_MM_WO) { - if (!list_empty(&zspage->lru)) - list_del(&zspage->lru); - list_add(&zspage->lru, &pool->lru); - } -#endif + obj_to_location(obj, &zpdesc, &obj_idx); + zspage = get_zspage(zpdesc); - /* - * migration cannot move any zpages in this zspage. Here, pool->lock - * is too heavy since callers would take some time until they calls - * zs_unmap_object API so delegate the locking from class to zspage - * which is smaller granularity. - */ - migrate_read_lock(zspage); - spin_unlock(&pool->lock); + /* Make sure migration doesn't move any pages in this zspage */ + zspage_read_lock(zspage); + read_unlock(&pool->lock); class = zspage_class(pool, zspage); - off = (class->size * obj_idx) & ~PAGE_MASK; + off = offset_in_page(class->size * obj_idx); - local_lock(&zs_map_area.lock); - area = this_cpu_ptr(&zs_map_area); - area->vm_mm = mm; if (off + class->size <= PAGE_SIZE) { /* this object is contained entirely within a page */ - area->vm_addr = kmap_atomic(page); - ret = area->vm_addr + off; - goto out; + addr = kmap_local_zpdesc(zpdesc); + addr += off; + } else { + size_t sizes[2]; + + /* this object spans two pages */ + sizes[0] = PAGE_SIZE - off; + sizes[1] = class->size - sizes[0]; + addr = local_copy; + + memcpy_from_page(addr, zpdesc_page(zpdesc), + off, sizes[0]); + zpdesc = get_next_zpdesc(zpdesc); + memcpy_from_page(addr + sizes[0], + zpdesc_page(zpdesc), + 0, sizes[1]); } - /* this object spans two pages */ - pages[0] = page; - pages[1] = get_next_page(page); - BUG_ON(!pages[1]); - - ret = __zs_map_object(area, pages, off, class->size); -out: - if (likely(!ZsHugePage(zspage))) - ret += ZS_HANDLE_SIZE; + if (!ZsHugePage(zspage)) + addr += ZS_HANDLE_SIZE; - return ret; + return addr; } -EXPORT_SYMBOL_GPL(zs_map_object); +EXPORT_SYMBOL_GPL(zs_obj_read_begin); -void zs_unmap_object(struct zs_pool *pool, unsigned long handle) +void zs_obj_read_end(struct zs_pool *pool, unsigned long handle, + void *handle_mem) { struct zspage *zspage; - struct page *page; + struct zpdesc *zpdesc; unsigned long obj, off; unsigned int obj_idx; - struct size_class *class; - struct mapping_area *area; obj = handle_to_obj(handle); - obj_to_location(obj, &page, &obj_idx); - zspage = get_zspage(page); + obj_to_location(obj, &zpdesc, &obj_idx); + zspage = get_zspage(zpdesc); class = zspage_class(pool, zspage); - off = (class->size * obj_idx) & ~PAGE_MASK; + off = offset_in_page(class->size * obj_idx); - area = this_cpu_ptr(&zs_map_area); - if (off + class->size <= PAGE_SIZE) - kunmap_atomic(area->vm_addr); - else { - struct page *pages[2]; + if (off + class->size <= PAGE_SIZE) { + if (!ZsHugePage(zspage)) + off += ZS_HANDLE_SIZE; + handle_mem -= off; + kunmap_local(handle_mem); + } - pages[0] = page; - pages[1] = get_next_page(page); - BUG_ON(!pages[1]); + zspage_read_unlock(zspage); +} +EXPORT_SYMBOL_GPL(zs_obj_read_end); - __zs_unmap_object(area, pages, off, class->size); +void zs_obj_write(struct zs_pool *pool, unsigned long handle, + void *handle_mem, size_t mem_len) +{ + struct zspage *zspage; + struct zpdesc *zpdesc; + unsigned long obj, off; + unsigned int obj_idx; + struct size_class *class; + + /* Guarantee we can get zspage from handle safely */ + read_lock(&pool->lock); + obj = handle_to_obj(handle); + obj_to_location(obj, &zpdesc, &obj_idx); + zspage = get_zspage(zpdesc); + + /* Make sure migration doesn't move any pages in this zspage */ + zspage_read_lock(zspage); + read_unlock(&pool->lock); + + class = zspage_class(pool, zspage); + off = offset_in_page(class->size * obj_idx); + + if (!ZsHugePage(zspage)) + off += ZS_HANDLE_SIZE; + + if (off + mem_len <= PAGE_SIZE) { + /* this object is contained entirely within a page */ + void *dst = kmap_local_zpdesc(zpdesc); + + memcpy(dst + off, handle_mem, mem_len); + kunmap_local(dst); + } else { + /* this object spans two pages */ + size_t sizes[2]; + + sizes[0] = PAGE_SIZE - off; + sizes[1] = mem_len - sizes[0]; + + memcpy_to_page(zpdesc_page(zpdesc), off, + handle_mem, sizes[0]); + zpdesc = get_next_zpdesc(zpdesc); + memcpy_to_page(zpdesc_page(zpdesc), 0, + handle_mem + sizes[0], sizes[1]); } - local_unlock(&zs_map_area.lock); - migrate_read_unlock(zspage); + zspage_read_unlock(zspage); } -EXPORT_SYMBOL_GPL(zs_unmap_object); +EXPORT_SYMBOL_GPL(zs_obj_write); /** * zs_huge_class_size() - Returns the size (in bytes) of the first huge @@ -1459,41 +1211,40 @@ EXPORT_SYMBOL_GPL(zs_huge_class_size); static unsigned long obj_malloc(struct zs_pool *pool, struct zspage *zspage, unsigned long handle) { - int i, nr_page, offset; + int i, nr_zpdesc, offset; unsigned long obj; struct link_free *link; struct size_class *class; - struct page *m_page; + struct zpdesc *m_zpdesc; unsigned long m_offset; void *vaddr; class = pool->size_class[zspage->class]; - handle |= OBJ_ALLOCATED_TAG; obj = get_freeobj(zspage); offset = obj * class->size; - nr_page = offset >> PAGE_SHIFT; - m_offset = offset & ~PAGE_MASK; - m_page = get_first_page(zspage); + nr_zpdesc = offset >> PAGE_SHIFT; + m_offset = offset_in_page(offset); + m_zpdesc = get_first_zpdesc(zspage); - for (i = 0; i < nr_page; i++) - m_page = get_next_page(m_page); + for (i = 0; i < nr_zpdesc; i++) + m_zpdesc = get_next_zpdesc(m_zpdesc); - vaddr = kmap_atomic(m_page); + vaddr = kmap_local_zpdesc(m_zpdesc); link = (struct link_free *)vaddr + m_offset / sizeof(*link); set_freeobj(zspage, link->next >> OBJ_TAG_BITS); if (likely(!ZsHugePage(zspage))) /* record handle in the header of allocated chunk */ - link->handle = handle; + link->handle = handle | OBJ_ALLOCATED_TAG; else - /* record handle to page->index */ - zspage->first_page->index = handle; + zspage->first_zpdesc->handle = handle | OBJ_ALLOCATED_TAG; - kunmap_atomic(vaddr); + kunmap_local(vaddr); mod_zspage_inuse(zspage, 1); - obj = location_to_obj(m_page, obj); + obj = location_to_obj(m_zpdesc, obj); + record_obj(handle, obj); return obj; } @@ -1504,21 +1255,26 @@ static unsigned long obj_malloc(struct zs_pool *pool, * @pool: pool to allocate from * @size: size of block to allocate * @gfp: gfp flags when allocating object + * @nid: The preferred node id to allocate new zspage (if needed) * * On success, handle to the allocated object is returned, * otherwise an ERR_PTR(). * Allocation requests with size > ZS_MAX_ALLOC_SIZE will fail. */ -unsigned long zs_malloc(struct zs_pool *pool, size_t size, gfp_t gfp) +unsigned long zs_malloc(struct zs_pool *pool, size_t size, gfp_t gfp, + const int nid) { - unsigned long handle, obj; + unsigned long handle; struct size_class *class; - enum fullness_group newfg; + int newfg; struct zspage *zspage; - if (unlikely(!size || size > ZS_MAX_ALLOC_SIZE)) + if (unlikely(!size)) return (unsigned long)ERR_PTR(-EINVAL); + if (unlikely(size > ZS_MAX_ALLOC_SIZE)) + return (unsigned long)ERR_PTR(-ENOSPC); + handle = cache_alloc_handle(pool, gfp); if (!handle) return (unsigned long)ERR_PTR(-ENOMEM); @@ -1527,42 +1283,38 @@ unsigned long zs_malloc(struct zs_pool *pool, size_t size, gfp_t gfp) size += ZS_HANDLE_SIZE; class = pool->size_class[get_size_class_index(size)]; - /* pool->lock effectively protects the zpage migration */ - spin_lock(&pool->lock); + /* class->lock effectively protects the zpage migration */ + spin_lock(&class->lock); zspage = find_get_zspage(class); if (likely(zspage)) { - obj = obj_malloc(pool, zspage, handle); + obj_malloc(pool, zspage, handle); /* Now move the zspage to another fullness group, if required */ fix_fullness_group(class, zspage); - record_obj(handle, obj); - class_stat_inc(class, OBJ_USED, 1); - spin_unlock(&pool->lock); + class_stat_add(class, ZS_OBJS_INUSE, 1); - return handle; + goto out; } - spin_unlock(&pool->lock); + spin_unlock(&class->lock); - zspage = alloc_zspage(pool, class, gfp); + zspage = alloc_zspage(pool, class, gfp, nid); if (!zspage) { cache_free_handle(pool, handle); return (unsigned long)ERR_PTR(-ENOMEM); } - spin_lock(&pool->lock); - obj = obj_malloc(pool, zspage, handle); + spin_lock(&class->lock); + obj_malloc(pool, zspage, handle); newfg = get_fullness_group(class, zspage); insert_zspage(class, zspage, newfg); - set_zspage_mapping(zspage, class->index, newfg); - record_obj(handle, obj); - atomic_long_add(class->pages_per_zspage, - &pool->pages_allocated); - class_stat_inc(class, OBJ_ALLOCATED, class->objs_per_zspage); - class_stat_inc(class, OBJ_USED, 1); + atomic_long_add(class->pages_per_zspage, &pool->pages_allocated); + class_stat_add(class, ZS_OBJS_ALLOCATED, class->objs_per_zspage); + class_stat_add(class, ZS_OBJS_INUSE, 1); /* We completely set up zspage so mark them as movable */ SetZsPageMovable(pool, zspage); - spin_unlock(&pool->lock); +out: + spin_unlock(&class->lock); return handle; } @@ -1572,35 +1324,37 @@ static void obj_free(int class_size, unsigned long obj) { struct link_free *link; struct zspage *zspage; - struct page *f_page; + struct zpdesc *f_zpdesc; unsigned long f_offset; unsigned int f_objidx; void *vaddr; - obj_to_location(obj, &f_page, &f_objidx); - f_offset = (class_size * f_objidx) & ~PAGE_MASK; - zspage = get_zspage(f_page); - vaddr = kmap_atomic(f_page); + obj_to_location(obj, &f_zpdesc, &f_objidx); + f_offset = offset_in_page(class_size * f_objidx); + zspage = get_zspage(f_zpdesc); - /* Insert this object in containing zspage's freelist */ + vaddr = kmap_local_zpdesc(f_zpdesc); link = (struct link_free *)(vaddr + f_offset); + + /* Insert this object in containing zspage's freelist */ if (likely(!ZsHugePage(zspage))) link->next = get_freeobj(zspage) << OBJ_TAG_BITS; else - f_page->index = 0; - kunmap_atomic(vaddr); + f_zpdesc->handle = 0; set_freeobj(zspage, f_objidx); + + kunmap_local(vaddr); mod_zspage_inuse(zspage, -1); } void zs_free(struct zs_pool *pool, unsigned long handle) { struct zspage *zspage; - struct page *f_page; + struct zpdesc *f_zpdesc; unsigned long obj; struct size_class *class; - enum fullness_group fullness; + int fullness; if (IS_ERR_OR_NULL((void *)handle)) return; @@ -1609,35 +1363,22 @@ void zs_free(struct zs_pool *pool, unsigned long handle) * The pool->lock protects the race with zpage's migration * so it's safe to get the page from handle. */ - spin_lock(&pool->lock); + read_lock(&pool->lock); obj = handle_to_obj(handle); - obj_to_page(obj, &f_page); - zspage = get_zspage(f_page); + obj_to_zpdesc(obj, &f_zpdesc); + zspage = get_zspage(f_zpdesc); class = zspage_class(pool, zspage); + spin_lock(&class->lock); + read_unlock(&pool->lock); + class_stat_sub(class, ZS_OBJS_INUSE, 1); obj_free(class->size, obj); - class_stat_dec(class, OBJ_USED, 1); -#ifdef CONFIG_ZPOOL - if (zspage->under_reclaim) { - /* - * Reclaim needs the handles during writeback. It'll free - * them along with the zspage when it's done with them. - * - * Record current deferred handle at the memory location - * whose address is given by handle. - */ - record_obj(handle, (unsigned long)zspage->deferred_handles); - zspage->deferred_handles = (unsigned long *)handle; - spin_unlock(&pool->lock); - return; - } -#endif fullness = fix_fullness_group(class, zspage); - if (fullness == ZS_EMPTY) + if (fullness == ZS_INUSE_RATIO_0) free_zspage(pool, class, zspage); - spin_unlock(&pool->lock); + spin_unlock(&class->lock); cache_free_handle(pool, handle); } EXPORT_SYMBOL_GPL(zs_free); @@ -1645,7 +1386,7 @@ EXPORT_SYMBOL_GPL(zs_free); static void zs_object_copy(struct size_class *class, unsigned long dst, unsigned long src) { - struct page *s_page, *d_page; + struct zpdesc *s_zpdesc, *d_zpdesc; unsigned int s_objidx, d_objidx; unsigned long s_off, d_off; void *s_addr, *d_addr; @@ -1654,11 +1395,11 @@ static void zs_object_copy(struct size_class *class, unsigned long dst, s_size = d_size = class->size; - obj_to_location(src, &s_page, &s_objidx); - obj_to_location(dst, &d_page, &d_objidx); + obj_to_location(src, &s_zpdesc, &s_objidx); + obj_to_location(dst, &d_zpdesc, &d_objidx); - s_off = (class->size * s_objidx) & ~PAGE_MASK; - d_off = (class->size * d_objidx) & ~PAGE_MASK; + s_off = offset_in_page(class->size * s_objidx); + d_off = offset_in_page(class->size * d_objidx); if (s_off + class->size > PAGE_SIZE) s_size = PAGE_SIZE - s_off; @@ -1666,8 +1407,8 @@ static void zs_object_copy(struct size_class *class, unsigned long dst, if (d_off + class->size > PAGE_SIZE) d_size = PAGE_SIZE - d_off; - s_addr = kmap_atomic(s_page); - d_addr = kmap_atomic(d_page); + s_addr = kmap_local_zpdesc(s_zpdesc); + d_addr = kmap_local_zpdesc(d_zpdesc); while (1) { size = min(s_size, d_size); @@ -1683,33 +1424,33 @@ static void zs_object_copy(struct size_class *class, unsigned long dst, d_size -= size; /* - * Calling kunmap_atomic(d_addr) is necessary. kunmap_atomic() - * calls must occurs in reverse order of calls to kmap_atomic(). - * So, to call kunmap_atomic(s_addr) we should first call - * kunmap_atomic(d_addr). For more details see + * Calling kunmap_local(d_addr) is necessary. kunmap_local() + * calls must occurs in reverse order of calls to kmap_local_page(). + * So, to call kunmap_local(s_addr) we should first call + * kunmap_local(d_addr). For more details see * Documentation/mm/highmem.rst. */ if (s_off >= PAGE_SIZE) { - kunmap_atomic(d_addr); - kunmap_atomic(s_addr); - s_page = get_next_page(s_page); - s_addr = kmap_atomic(s_page); - d_addr = kmap_atomic(d_page); + kunmap_local(d_addr); + kunmap_local(s_addr); + s_zpdesc = get_next_zpdesc(s_zpdesc); + s_addr = kmap_local_zpdesc(s_zpdesc); + d_addr = kmap_local_zpdesc(d_zpdesc); s_size = class->size - written; s_off = 0; } if (d_off >= PAGE_SIZE) { - kunmap_atomic(d_addr); - d_page = get_next_page(d_page); - d_addr = kmap_atomic(d_page); + kunmap_local(d_addr); + d_zpdesc = get_next_zpdesc(d_zpdesc); + d_addr = kmap_local_zpdesc(d_zpdesc); d_size = class->size - written; d_off = 0; } } - kunmap_atomic(d_addr); - kunmap_atomic(s_addr); + kunmap_local(d_addr); + kunmap_local(s_addr); } /* @@ -1717,99 +1458,93 @@ static void zs_object_copy(struct size_class *class, unsigned long dst, * return handle. */ static unsigned long find_alloced_obj(struct size_class *class, - struct page *page, int *obj_idx) + struct zpdesc *zpdesc, int *obj_idx) { unsigned int offset; int index = *obj_idx; unsigned long handle = 0; - void *addr = kmap_atomic(page); + void *addr = kmap_local_zpdesc(zpdesc); - offset = get_first_obj_offset(page); + offset = get_first_obj_offset(zpdesc); offset += class->size * index; while (offset < PAGE_SIZE) { - if (obj_allocated(page, addr + offset, &handle)) + if (obj_allocated(zpdesc, addr + offset, &handle)) break; offset += class->size; index++; } - kunmap_atomic(addr); + kunmap_local(addr); *obj_idx = index; return handle; } -struct zs_compact_control { - /* Source spage for migration which could be a subpage of zspage */ - struct page *s_page; - /* Destination page for migration which should be a first page - * of zspage. */ - struct page *d_page; - /* Starting object index within @s_page which used for live object - * in the subpage. */ - int obj_idx; -}; - -static int migrate_zspage(struct zs_pool *pool, struct size_class *class, - struct zs_compact_control *cc) +static void migrate_zspage(struct zs_pool *pool, struct zspage *src_zspage, + struct zspage *dst_zspage) { unsigned long used_obj, free_obj; unsigned long handle; - struct page *s_page = cc->s_page; - struct page *d_page = cc->d_page; - int obj_idx = cc->obj_idx; - int ret = 0; + int obj_idx = 0; + struct zpdesc *s_zpdesc = get_first_zpdesc(src_zspage); + struct size_class *class = pool->size_class[src_zspage->class]; while (1) { - handle = find_alloced_obj(class, s_page, &obj_idx); + handle = find_alloced_obj(class, s_zpdesc, &obj_idx); if (!handle) { - s_page = get_next_page(s_page); - if (!s_page) + s_zpdesc = get_next_zpdesc(s_zpdesc); + if (!s_zpdesc) break; obj_idx = 0; continue; } - /* Stop if there is no more space */ - if (zspage_full(class, get_zspage(d_page))) { - ret = -ENOMEM; - break; - } - used_obj = handle_to_obj(handle); - free_obj = obj_malloc(pool, get_zspage(d_page), handle); + free_obj = obj_malloc(pool, dst_zspage, handle); zs_object_copy(class, free_obj, used_obj); obj_idx++; - record_obj(handle, free_obj); obj_free(class->size, used_obj); - } - /* Remember last position in this iteration */ - cc->s_page = s_page; - cc->obj_idx = obj_idx; + /* Stop if there is no more space */ + if (zspage_full(class, dst_zspage)) + break; - return ret; + /* Stop if there are no more objects to migrate */ + if (zspage_empty(src_zspage)) + break; + } } -static struct zspage *isolate_zspage(struct size_class *class, bool source) +static struct zspage *isolate_src_zspage(struct size_class *class) { - int i; struct zspage *zspage; - enum fullness_group fg[2] = {ZS_ALMOST_EMPTY, ZS_ALMOST_FULL}; + int fg; - if (!source) { - fg[0] = ZS_ALMOST_FULL; - fg[1] = ZS_ALMOST_EMPTY; + for (fg = ZS_INUSE_RATIO_10; fg <= ZS_INUSE_RATIO_99; fg++) { + zspage = list_first_entry_or_null(&class->fullness_list[fg], + struct zspage, list); + if (zspage) { + remove_zspage(class, zspage); + return zspage; + } } - for (i = 0; i < 2; i++) { - zspage = list_first_entry_or_null(&class->fullness_list[fg[i]], - struct zspage, list); + return zspage; +} + +static struct zspage *isolate_dst_zspage(struct size_class *class) +{ + struct zspage *zspage; + int fg; + + for (fg = ZS_INUSE_RATIO_99; fg >= ZS_INUSE_RATIO_10; fg--) { + zspage = list_first_entry_or_null(&class->fullness_list[fg], + struct zspage, list); if (zspage) { - remove_zspage(class, zspage, fg[i]); + remove_zspage(class, zspage); return zspage; } } @@ -1822,166 +1557,97 @@ static struct zspage *isolate_zspage(struct size_class *class, bool source) * @class: destination class * @zspage: target page * - * Return @zspage's fullness_group + * Return @zspage's fullness status */ -static enum fullness_group putback_zspage(struct size_class *class, - struct zspage *zspage) +static int putback_zspage(struct size_class *class, struct zspage *zspage) { - enum fullness_group fullness; + int fullness; fullness = get_fullness_group(class, zspage); insert_zspage(class, zspage, fullness); - set_zspage_mapping(zspage, class->index, fullness); return fullness; } -#if defined(CONFIG_ZPOOL) || defined(CONFIG_COMPACTION) +#ifdef CONFIG_COMPACTION /* * To prevent zspage destroy during migration, zspage freeing should * hold locks of all pages in the zspage. */ static void lock_zspage(struct zspage *zspage) { - struct page *curr_page, *page; + struct zpdesc *curr_zpdesc, *zpdesc; /* * Pages we haven't locked yet can be migrated off the list while we're * trying to lock them, so we need to be careful and only attempt to - * lock each page under migrate_read_lock(). Otherwise, the page we lock + * lock each page under zspage_read_lock(). Otherwise, the page we lock * may no longer belong to the zspage. This means that we may wait for * the wrong page to unlock, so we must take a reference to the page - * prior to waiting for it to unlock outside migrate_read_lock(). + * prior to waiting for it to unlock outside zspage_read_lock(). */ while (1) { - migrate_read_lock(zspage); - page = get_first_page(zspage); - if (trylock_page(page)) + zspage_read_lock(zspage); + zpdesc = get_first_zpdesc(zspage); + if (zpdesc_trylock(zpdesc)) break; - get_page(page); - migrate_read_unlock(zspage); - wait_on_page_locked(page); - put_page(page); + zpdesc_get(zpdesc); + zspage_read_unlock(zspage); + zpdesc_wait_locked(zpdesc); + zpdesc_put(zpdesc); } - curr_page = page; - while ((page = get_next_page(curr_page))) { - if (trylock_page(page)) { - curr_page = page; + curr_zpdesc = zpdesc; + while ((zpdesc = get_next_zpdesc(curr_zpdesc))) { + if (zpdesc_trylock(zpdesc)) { + curr_zpdesc = zpdesc; } else { - get_page(page); - migrate_read_unlock(zspage); - wait_on_page_locked(page); - put_page(page); - migrate_read_lock(zspage); + zpdesc_get(zpdesc); + zspage_read_unlock(zspage); + zpdesc_wait_locked(zpdesc); + zpdesc_put(zpdesc); + zspage_read_lock(zspage); } } - migrate_read_unlock(zspage); -} -#endif /* defined(CONFIG_ZPOOL) || defined(CONFIG_COMPACTION) */ - -#ifdef CONFIG_ZPOOL -/* - * Unlocks all the pages of the zspage. - * - * pool->lock must be held before this function is called - * to prevent the underlying pages from migrating. - */ -static void unlock_zspage(struct zspage *zspage) -{ - struct page *page = get_first_page(zspage); - - do { - unlock_page(page); - } while ((page = get_next_page(page)) != NULL); -} -#endif /* CONFIG_ZPOOL */ - -static void migrate_lock_init(struct zspage *zspage) -{ - rwlock_init(&zspage->lock); -} - -static void migrate_read_lock(struct zspage *zspage) __acquires(&zspage->lock) -{ - read_lock(&zspage->lock); -} - -static void migrate_read_unlock(struct zspage *zspage) __releases(&zspage->lock) -{ - read_unlock(&zspage->lock); + zspage_read_unlock(zspage); } +#endif /* CONFIG_COMPACTION */ #ifdef CONFIG_COMPACTION -static void migrate_write_lock(struct zspage *zspage) -{ - write_lock(&zspage->lock); -} - -static void migrate_write_lock_nested(struct zspage *zspage) -{ - write_lock_nested(&zspage->lock, SINGLE_DEPTH_NESTING); -} - -static void migrate_write_unlock(struct zspage *zspage) -{ - write_unlock(&zspage->lock); -} - -/* Number of isolated subpage for *page migration* in this zspage */ -static void inc_zspage_isolation(struct zspage *zspage) -{ - zspage->isolated++; -} - -static void dec_zspage_isolation(struct zspage *zspage) -{ - VM_BUG_ON(zspage->isolated == 0); - zspage->isolated--; -} - -static const struct movable_operations zsmalloc_mops; static void replace_sub_page(struct size_class *class, struct zspage *zspage, - struct page *newpage, struct page *oldpage) + struct zpdesc *newzpdesc, struct zpdesc *oldzpdesc) { - struct page *page; - struct page *pages[ZS_MAX_PAGES_PER_ZSPAGE] = {NULL, }; + struct zpdesc *zpdesc; + struct zpdesc *zpdescs[ZS_MAX_PAGES_PER_ZSPAGE] = {NULL, }; + unsigned int first_obj_offset; int idx = 0; - page = get_first_page(zspage); + zpdesc = get_first_zpdesc(zspage); do { - if (page == oldpage) - pages[idx] = newpage; + if (zpdesc == oldzpdesc) + zpdescs[idx] = newzpdesc; else - pages[idx] = page; + zpdescs[idx] = zpdesc; idx++; - } while ((page = get_next_page(page)) != NULL); + } while ((zpdesc = get_next_zpdesc(zpdesc)) != NULL); - create_page_chain(class, zspage, pages); - set_first_obj_offset(newpage, get_first_obj_offset(oldpage)); + create_page_chain(class, zspage, zpdescs); + first_obj_offset = get_first_obj_offset(oldzpdesc); + set_first_obj_offset(newzpdesc, first_obj_offset); if (unlikely(ZsHugePage(zspage))) - newpage->index = oldpage->index; - __SetPageMovable(newpage, &zsmalloc_mops); + newzpdesc->handle = oldzpdesc->handle; + __zpdesc_set_movable(newzpdesc); } static bool zs_page_isolate(struct page *page, isolate_mode_t mode) { - struct zspage *zspage; - /* - * Page is locked so zspage couldn't be destroyed. For detail, look at - * lock_zspage in free_zspage. + * Page is locked so zspage can't be destroyed concurrently + * (see free_zspage()). But if the page was already destroyed + * (see reset_zpdesc()), refuse isolation here. */ - VM_BUG_ON_PAGE(!PageMovable(page), page); - VM_BUG_ON_PAGE(PageIsolated(page), page); - - zspage = get_zspage(page); - migrate_write_lock(zspage); - inc_zspage_isolation(zspage); - migrate_write_unlock(zspage); - - return true; + return page_zpdesc(page)->zspage; } static int zs_page_migrate(struct page *newpage, struct page *page, @@ -1990,7 +1656,9 @@ static int zs_page_migrate(struct page *newpage, struct page *page, struct zs_pool *pool; struct size_class *class; struct zspage *zspage; - struct page *dummy; + struct zpdesc *dummy; + struct zpdesc *newzpdesc = page_zpdesc(newpage); + struct zpdesc *zpdesc = page_zpdesc(page); void *s_addr, *d_addr, *addr; unsigned int offset; unsigned long handle; @@ -1998,88 +1666,88 @@ static int zs_page_migrate(struct page *newpage, struct page *page, unsigned int obj_idx; /* - * We cannot support the _NO_COPY case here, because copy needs to - * happen under the zs lock, which does not work with - * MIGRATE_SYNC_NO_COPY workflow. + * TODO: nothing prevents a zspage from getting destroyed while + * it is isolated for migration, as the page lock is temporarily + * dropped after zs_page_isolate() succeeded: we should rework that + * and defer destroying such pages once they are un-isolated (putback) + * instead. */ - if (mode == MIGRATE_SYNC_NO_COPY) - return -EINVAL; - - VM_BUG_ON_PAGE(!PageMovable(page), page); - VM_BUG_ON_PAGE(!PageIsolated(page), page); + if (!zpdesc->zspage) + return 0; /* The page is locked, so this pointer must remain valid */ - zspage = get_zspage(page); + zspage = get_zspage(zpdesc); pool = zspage->pool; /* - * The pool's lock protects the race between zpage migration + * The pool migrate_lock protects the race between zpage migration * and zs_free. */ - spin_lock(&pool->lock); + write_lock(&pool->lock); class = zspage_class(pool, zspage); - /* the migrate_write_lock protects zpage access via zs_map_object */ - migrate_write_lock(zspage); + /* + * the class lock protects zpage alloc/free in the zspage. + */ + spin_lock(&class->lock); + /* the zspage write_lock protects zpage access via zs_obj_read/write() */ + if (!zspage_write_trylock(zspage)) { + spin_unlock(&class->lock); + write_unlock(&pool->lock); + return -EINVAL; + } - offset = get_first_obj_offset(page); - s_addr = kmap_atomic(page); + /* We're committed, tell the world that this is a Zsmalloc page. */ + __zpdesc_set_zsmalloc(newzpdesc); + + offset = get_first_obj_offset(zpdesc); + s_addr = kmap_local_zpdesc(zpdesc); /* * Here, any user cannot access all objects in the zspage so let's move. */ - d_addr = kmap_atomic(newpage); - memcpy(d_addr, s_addr, PAGE_SIZE); - kunmap_atomic(d_addr); + d_addr = kmap_local_zpdesc(newzpdesc); + copy_page(d_addr, s_addr); + kunmap_local(d_addr); for (addr = s_addr + offset; addr < s_addr + PAGE_SIZE; addr += class->size) { - if (obj_allocated(page, addr, &handle)) { + if (obj_allocated(zpdesc, addr, &handle)) { old_obj = handle_to_obj(handle); obj_to_location(old_obj, &dummy, &obj_idx); - new_obj = (unsigned long)location_to_obj(newpage, - obj_idx); + new_obj = (unsigned long)location_to_obj(newzpdesc, obj_idx); record_obj(handle, new_obj); } } - kunmap_atomic(s_addr); + kunmap_local(s_addr); - replace_sub_page(class, zspage, newpage, page); + replace_sub_page(class, zspage, newzpdesc, zpdesc); /* * Since we complete the data copy and set up new zspage structure, - * it's okay to release the pool's lock. + * it's okay to release migration_lock. */ - spin_unlock(&pool->lock); - dec_zspage_isolation(zspage); - migrate_write_unlock(zspage); - - get_page(newpage); - if (page_zone(newpage) != page_zone(page)) { - dec_zone_page_state(page, NR_ZSPAGES); - inc_zone_page_state(newpage, NR_ZSPAGES); + write_unlock(&pool->lock); + spin_unlock(&class->lock); + zspage_write_unlock(zspage); + + zpdesc_get(newzpdesc); + if (zpdesc_zone(newzpdesc) != zpdesc_zone(zpdesc)) { + zpdesc_dec_zone_page_state(zpdesc); + zpdesc_inc_zone_page_state(newzpdesc); } - reset_page(page); - put_page(page); + reset_zpdesc(zpdesc); + zpdesc_put(zpdesc); - return MIGRATEPAGE_SUCCESS; + return 0; } static void zs_page_putback(struct page *page) { - struct zspage *zspage; - - VM_BUG_ON_PAGE(!PageMovable(page), page); - VM_BUG_ON_PAGE(!PageIsolated(page), page); - - zspage = get_zspage(page); - migrate_write_lock(zspage); - dec_zspage_isolation(zspage); - migrate_write_unlock(zspage); } -static const struct movable_operations zsmalloc_mops = { +const struct movable_operations zsmalloc_mops = { .isolate_page = zs_page_isolate, .migrate_page = zs_page_migrate, .putback_page = zs_page_putback, @@ -2093,8 +1761,6 @@ static void async_free_zspage(struct work_struct *work) { int i; struct size_class *class; - unsigned int class_idx; - enum fullness_group fullness; struct zspage *zspage, *tmp; LIST_HEAD(free_pages); struct zs_pool *pool = container_of(work, struct zs_pool, @@ -2105,24 +1771,21 @@ static void async_free_zspage(struct work_struct *work) if (class->index != i) continue; - spin_lock(&pool->lock); - list_splice_init(&class->fullness_list[ZS_EMPTY], &free_pages); - spin_unlock(&pool->lock); + spin_lock(&class->lock); + list_splice_init(&class->fullness_list[ZS_INUSE_RATIO_0], + &free_pages); + spin_unlock(&class->lock); } list_for_each_entry_safe(zspage, tmp, &free_pages, list) { list_del(&zspage->list); lock_zspage(zspage); - get_zspage_mapping(zspage, &class_idx, &fullness); - VM_BUG_ON(fullness != ZS_EMPTY); - class = pool->size_class[class_idx]; - spin_lock(&pool->lock); -#ifdef CONFIG_ZPOOL - list_del(&zspage->lru); -#endif + class = zspage_class(pool, zspage); + spin_lock(&class->lock); + class_stat_sub(class, ZS_INUSE_RATIO_0, 1); __free_zspage(pool, class, zspage); - spin_unlock(&pool->lock); + spin_unlock(&class->lock); } }; @@ -2143,13 +1806,13 @@ static void init_deferred_free(struct zs_pool *pool) static void SetZsPageMovable(struct zs_pool *pool, struct zspage *zspage) { - struct page *page = get_first_page(zspage); + struct zpdesc *zpdesc = get_first_zpdesc(zspage); do { - WARN_ON(!trylock_page(page)); - __SetPageMovable(page, &zsmalloc_mops); - unlock_page(page); - } while ((page = get_next_page(page)) != NULL); + WARN_ON(!zpdesc_trylock(zpdesc)); + __zpdesc_set_movable(zpdesc); + zpdesc_unlock(zpdesc); + } while ((zpdesc = get_next_zpdesc(zpdesc)) != NULL); } #else static inline void zs_flush_migration(struct zs_pool *pool) { } @@ -2163,8 +1826,8 @@ static inline void zs_flush_migration(struct zs_pool *pool) { } static unsigned long zs_can_compact(struct size_class *class) { unsigned long obj_wasted; - unsigned long obj_allocated = zs_stat_get(class, OBJ_ALLOCATED); - unsigned long obj_used = zs_stat_get(class, OBJ_USED); + unsigned long obj_allocated = class_stat_read(class, ZS_OBJS_ALLOCATED); + unsigned long obj_used = class_stat_read(class, ZS_OBJS_INUSE); if (obj_allocated <= obj_used) return 0; @@ -2178,8 +1841,7 @@ static unsigned long zs_can_compact(struct size_class *class) static unsigned long __zs_compact(struct zs_pool *pool, struct size_class *class) { - struct zs_compact_control cc; - struct zspage *src_zspage; + struct zspage *src_zspage = NULL; struct zspage *dst_zspage = NULL; unsigned long pages_freed = 0; @@ -2187,59 +1849,55 @@ static unsigned long __zs_compact(struct zs_pool *pool, * protect the race between zpage migration and zs_free * as well as zpage allocation/free */ - spin_lock(&pool->lock); - while ((src_zspage = isolate_zspage(class, true))) { - /* protect someone accessing the zspage(i.e., zs_map_object) */ - migrate_write_lock(src_zspage); - - if (!zs_can_compact(class)) - break; - - cc.obj_idx = 0; - cc.s_page = get_first_page(src_zspage); - - while ((dst_zspage = isolate_zspage(class, false))) { - migrate_write_lock_nested(dst_zspage); - - cc.d_page = get_first_page(dst_zspage); - /* - * If there is no more space in dst_page, resched - * and see if anyone had allocated another zspage. - */ - if (!migrate_zspage(pool, class, &cc)) - break; + write_lock(&pool->lock); + spin_lock(&class->lock); + while (zs_can_compact(class)) { + int fg; - putback_zspage(class, dst_zspage); - migrate_write_unlock(dst_zspage); - dst_zspage = NULL; - if (spin_is_contended(&pool->lock)) + if (!dst_zspage) { + dst_zspage = isolate_dst_zspage(class); + if (!dst_zspage) break; } - /* Stop if we couldn't find slot */ - if (dst_zspage == NULL) + src_zspage = isolate_src_zspage(class); + if (!src_zspage) break; - putback_zspage(class, dst_zspage); - migrate_write_unlock(dst_zspage); + if (!zspage_write_trylock(src_zspage)) + break; - if (putback_zspage(class, src_zspage) == ZS_EMPTY) { - migrate_write_unlock(src_zspage); + migrate_zspage(pool, src_zspage, dst_zspage); + zspage_write_unlock(src_zspage); + + fg = putback_zspage(class, src_zspage); + if (fg == ZS_INUSE_RATIO_0) { free_zspage(pool, class, src_zspage); pages_freed += class->pages_per_zspage; - } else - migrate_write_unlock(src_zspage); - spin_unlock(&pool->lock); - cond_resched(); - spin_lock(&pool->lock); + } + src_zspage = NULL; + + if (get_fullness_group(class, dst_zspage) == ZS_INUSE_RATIO_100 + || rwlock_is_contended(&pool->lock)) { + putback_zspage(class, dst_zspage); + dst_zspage = NULL; + + spin_unlock(&class->lock); + write_unlock(&pool->lock); + cond_resched(); + write_lock(&pool->lock); + spin_lock(&class->lock); + } } - if (src_zspage) { + if (src_zspage) putback_zspage(class, src_zspage); - migrate_write_unlock(src_zspage); - } - spin_unlock(&pool->lock); + if (dst_zspage) + putback_zspage(class, dst_zspage); + + spin_unlock(&class->lock); + write_unlock(&pool->lock); return pages_freed; } @@ -2250,6 +1908,15 @@ unsigned long zs_compact(struct zs_pool *pool) struct size_class *class; unsigned long pages_freed = 0; + /* + * Pool compaction is performed under pool->lock so it is basically + * single-threaded. Having more than one thread in __zs_compact() + * will increase pool->lock contention, which will impact other + * zsmalloc operations that need pool->lock. + */ + if (atomic_xchg(&pool->compaction_in_progress, 1)) + return 0; + for (i = ZS_SIZE_CLASSES - 1; i >= 0; i--) { class = pool->size_class[i]; if (class->index != i) @@ -2257,6 +1924,7 @@ unsigned long zs_compact(struct zs_pool *pool) pages_freed += __zs_compact(pool, class); } atomic_long_add(pages_freed, &pool->stats.pages_compacted); + atomic_set(&pool->compaction_in_progress, 0); return pages_freed; } @@ -2272,8 +1940,7 @@ static unsigned long zs_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc) { unsigned long pages_freed; - struct zs_pool *pool = container_of(shrinker, struct zs_pool, - shrinker); + struct zs_pool *pool = shrinker->private_data; /* * Compact classes and calculate compaction delta. @@ -2291,8 +1958,7 @@ static unsigned long zs_shrinker_count(struct shrinker *shrinker, int i; struct size_class *class; unsigned long pages_to_free = 0; - struct zs_pool *pool = container_of(shrinker, struct zs_pool, - shrinker); + struct zs_pool *pool = shrinker->private_data; for (i = ZS_SIZE_CLASSES - 1; i >= 0; i--) { class = pool->size_class[i]; @@ -2307,18 +1973,44 @@ static unsigned long zs_shrinker_count(struct shrinker *shrinker, static void zs_unregister_shrinker(struct zs_pool *pool) { - unregister_shrinker(&pool->shrinker); + shrinker_free(pool->shrinker); } static int zs_register_shrinker(struct zs_pool *pool) { - pool->shrinker.scan_objects = zs_shrinker_scan; - pool->shrinker.count_objects = zs_shrinker_count; - pool->shrinker.batch = 0; - pool->shrinker.seeks = DEFAULT_SEEKS; + pool->shrinker = shrinker_alloc(0, "mm-zspool:%s", pool->name); + if (!pool->shrinker) + return -ENOMEM; + + pool->shrinker->scan_objects = zs_shrinker_scan; + pool->shrinker->count_objects = zs_shrinker_count; + pool->shrinker->batch = 0; + pool->shrinker->private_data = pool; + + shrinker_register(pool->shrinker); + + return 0; +} + +static int calculate_zspage_chain_size(int class_size) +{ + int i, min_waste = INT_MAX; + int chain_size = 1; + + if (is_power_of_2(class_size)) + return chain_size; + + for (i = 1; i <= ZS_MAX_PAGES_PER_ZSPAGE; i++) { + int waste; - return register_shrinker(&pool->shrinker, "mm-zspool:%s", - pool->name); + waste = (i * PAGE_SIZE) % class_size; + if (waste < min_waste) { + min_waste = waste; + chain_size = i; + } + } + + return chain_size; } /** @@ -2342,7 +2034,8 @@ struct zs_pool *zs_create_pool(const char *name) return NULL; init_deferred_free(pool); - spin_lock_init(&pool->lock); + rwlock_init(&pool->lock); + atomic_set(&pool->compaction_in_progress, 0); pool->name = kstrdup(name, GFP_KERNEL); if (!pool->name) @@ -2360,12 +2053,12 @@ struct zs_pool *zs_create_pool(const char *name) int pages_per_zspage; int objs_per_zspage; struct size_class *class; - int fullness = 0; + int fullness; size = ZS_MIN_ALLOC_SIZE + i * ZS_SIZE_CLASS_DELTA; if (size > ZS_MAX_ALLOC_SIZE) size = ZS_MAX_ALLOC_SIZE; - pages_per_zspage = get_pages_per_zspage(size); + pages_per_zspage = calculate_zspage_chain_size(size); objs_per_zspage = pages_per_zspage * PAGE_SIZE / size; /* @@ -2413,10 +2106,14 @@ struct zs_pool *zs_create_pool(const char *name) class->index = i; class->pages_per_zspage = pages_per_zspage; class->objs_per_zspage = objs_per_zspage; + spin_lock_init(&class->lock); pool->size_class[i] = class; - for (fullness = ZS_EMPTY; fullness < NR_ZS_FULLNESS; - fullness++) + + fullness = ZS_INUSE_RATIO_0; + while (fullness < NR_FULLNESS_GROUPS) { INIT_LIST_HEAD(&class->fullness_list[fullness]); + fullness++; + } prev_class = class; } @@ -2432,10 +2129,6 @@ struct zs_pool *zs_create_pool(const char *name) */ zs_register_shrinker(pool); -#ifdef CONFIG_ZPOOL - INIT_LIST_HEAD(&pool->lru); -#endif - return pool; err: @@ -2462,11 +2155,12 @@ void zs_destroy_pool(struct zs_pool *pool) if (class->index != i) continue; - for (fg = ZS_EMPTY; fg < NR_ZS_FULLNESS; fg++) { - if (!list_empty(&class->fullness_list[fg])) { - pr_info("Freeing non-empty class with size %db, fullness group %d\n", - class->size, fg); - } + for (fg = ZS_INUSE_RATIO_0; fg < NR_FULLNESS_GROUPS; fg++) { + if (list_empty(&class->fullness_list[fg])) + continue; + + pr_err("Class-%d fullness group %d is not empty\n", + class->size, fg); } kfree(class); } @@ -2477,128 +2171,24 @@ void zs_destroy_pool(struct zs_pool *pool) } EXPORT_SYMBOL_GPL(zs_destroy_pool); -#ifdef CONFIG_ZPOOL -static int zs_reclaim_page(struct zs_pool *pool, unsigned int retries) -{ - int i, obj_idx, ret = 0; - unsigned long handle; - struct zspage *zspage; - struct page *page; - enum fullness_group fullness; - - /* Lock LRU and fullness list */ - spin_lock(&pool->lock); - if (list_empty(&pool->lru)) { - spin_unlock(&pool->lock); - return -EINVAL; - } - - for (i = 0; i < retries; i++) { - struct size_class *class; - - zspage = list_last_entry(&pool->lru, struct zspage, lru); - list_del(&zspage->lru); - - /* zs_free may free objects, but not the zspage and handles */ - zspage->under_reclaim = true; - - class = zspage_class(pool, zspage); - fullness = get_fullness_group(class, zspage); - - /* Lock out object allocations and object compaction */ - remove_zspage(class, zspage, fullness); - - spin_unlock(&pool->lock); - cond_resched(); - - /* Lock backing pages into place */ - lock_zspage(zspage); - - obj_idx = 0; - page = get_first_page(zspage); - while (1) { - handle = find_alloced_obj(class, page, &obj_idx); - if (!handle) { - page = get_next_page(page); - if (!page) - break; - obj_idx = 0; - continue; - } - - /* - * This will write the object and call zs_free. - * - * zs_free will free the object, but the - * under_reclaim flag prevents it from freeing - * the zspage altogether. This is necessary so - * that we can continue working with the - * zspage potentially after the last object - * has been freed. - */ - ret = pool->zpool_ops->evict(pool->zpool, handle); - if (ret) - goto next; - - obj_idx++; - } - -next: - /* For freeing the zspage, or putting it back in the pool and LRU list. */ - spin_lock(&pool->lock); - zspage->under_reclaim = false; - - if (!get_zspage_inuse(zspage)) { - /* - * Fullness went stale as zs_free() won't touch it - * while the page is removed from the pool. Fix it - * up for the check in __free_zspage(). - */ - zspage->fullness = ZS_EMPTY; - - __free_zspage(pool, class, zspage); - spin_unlock(&pool->lock); - return 0; - } - - putback_zspage(class, zspage); - list_add(&zspage->lru, &pool->lru); - unlock_zspage(zspage); - } - - spin_unlock(&pool->lock); - return -EAGAIN; -} -#endif /* CONFIG_ZPOOL */ - static int __init zs_init(void) { - int ret; - - ret = cpuhp_setup_state(CPUHP_MM_ZS_PREPARE, "mm/zsmalloc:prepare", - zs_cpu_prepare, zs_cpu_dead); - if (ret) - goto out; + int rc __maybe_unused; -#ifdef CONFIG_ZPOOL - zpool_register_driver(&zs_zpool_driver); +#ifdef CONFIG_COMPACTION + rc = set_movable_ops(&zsmalloc_mops, PGTY_zsmalloc); + if (rc) + return rc; #endif - zs_stat_init(); - return 0; - -out: - return ret; } static void __exit zs_exit(void) { -#ifdef CONFIG_ZPOOL - zpool_unregister_driver(&zs_zpool_driver); +#ifdef CONFIG_COMPACTION + set_movable_ops(NULL, PGTY_zsmalloc); #endif - cpuhp_remove_state(CPUHP_MM_ZS_PREPARE); - zs_stat_exit(); } @@ -2607,3 +2197,4 @@ module_exit(zs_exit); MODULE_LICENSE("Dual BSD/GPL"); MODULE_AUTHOR("Nitin Gupta <ngupta@vflare.org>"); +MODULE_DESCRIPTION("zsmalloc memory allocator"); |
