summaryrefslogtreecommitdiff
path: root/drivers/block/zram/zram_drv.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/block/zram/zram_drv.c')
-rw-r--r--drivers/block/zram/zram_drv.c1386
1 files changed, 935 insertions, 451 deletions
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index 6772e0c654fa..54c57103715f 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -33,6 +33,7 @@
#include <linux/debugfs.h>
#include <linux/cpuhotplug.h>
#include <linux/part_stat.h>
+#include <linux/kernel_read_file.h>
#include "zram_drv.h"
@@ -43,6 +44,8 @@ static DEFINE_MUTEX(zram_index_mutex);
static int zram_major;
static const char *default_compressor = CONFIG_ZRAM_DEF_COMP;
+#define ZRAM_MAX_ALGO_NAME_SZ 128
+
/* Module params (documentation at end) */
static unsigned int num_devices = 1;
/*
@@ -54,22 +57,59 @@ static size_t huge_class_size;
static const struct block_device_operations zram_devops;
static void zram_free_page(struct zram *zram, size_t index);
-static int zram_read_page(struct zram *zram, struct page *page, u32 index,
- struct bio *parent);
+static int zram_read_from_zspool(struct zram *zram, struct page *page,
+ u32 index);
+
+#define slot_dep_map(zram, index) (&(zram)->table[(index)].dep_map)
+
+static void zram_slot_lock_init(struct zram *zram, u32 index)
+{
+ static struct lock_class_key __key;
+
+ lockdep_init_map(slot_dep_map(zram, index), "zram->table[index].lock",
+ &__key, 0);
+}
-static int zram_slot_trylock(struct zram *zram, u32 index)
+/*
+ * entry locking rules:
+ *
+ * 1) Lock is exclusive
+ *
+ * 2) lock() function can sleep waiting for the lock
+ *
+ * 3) Lock owner can sleep
+ *
+ * 4) Use TRY lock variant when in atomic context
+ * - must check return value and handle locking failers
+ */
+static __must_check bool zram_slot_trylock(struct zram *zram, u32 index)
{
- return bit_spin_trylock(ZRAM_LOCK, &zram->table[index].flags);
+ unsigned long *lock = &zram->table[index].flags;
+
+ if (!test_and_set_bit_lock(ZRAM_ENTRY_LOCK, lock)) {
+ mutex_acquire(slot_dep_map(zram, index), 0, 1, _RET_IP_);
+ lock_acquired(slot_dep_map(zram, index), _RET_IP_);
+ return true;
+ }
+
+ return false;
}
static void zram_slot_lock(struct zram *zram, u32 index)
{
- bit_spin_lock(ZRAM_LOCK, &zram->table[index].flags);
+ unsigned long *lock = &zram->table[index].flags;
+
+ mutex_acquire(slot_dep_map(zram, index), 0, 0, _RET_IP_);
+ wait_on_bit_lock(lock, ZRAM_ENTRY_LOCK, TASK_UNINTERRUPTIBLE);
+ lock_acquired(slot_dep_map(zram, index), _RET_IP_);
}
static void zram_slot_unlock(struct zram *zram, u32 index)
{
- bit_spin_unlock(ZRAM_LOCK, &zram->table[index].flags);
+ unsigned long *lock = &zram->table[index].flags;
+
+ mutex_release(slot_dep_map(zram, index), _RET_IP_);
+ clear_and_wake_up_bit(ZRAM_ENTRY_LOCK, lock);
}
static inline bool init_done(struct zram *zram)
@@ -92,7 +132,6 @@ static void zram_set_handle(struct zram *zram, u32 index, unsigned long handle)
zram->table[index].handle = handle;
}
-/* flag operations require table entry bit_spin_lock() being held */
static bool zram_test_flag(struct zram *zram, u32 index,
enum zram_pageflags flag)
{
@@ -111,17 +150,6 @@ static void zram_clear_flag(struct zram *zram, u32 index,
zram->table[index].flags &= ~BIT(flag);
}
-static inline void zram_set_element(struct zram *zram, u32 index,
- unsigned long element)
-{
- zram->table[index].element = element;
-}
-
-static unsigned long zram_get_element(struct zram *zram, u32 index)
-{
- return zram->table[index].element;
-}
-
static size_t zram_get_obj_size(struct zram *zram, u32 index)
{
return zram->table[index].flags & (BIT(ZRAM_FLAG_SHIFT) - 1);
@@ -142,6 +170,27 @@ static inline bool zram_allocated(struct zram *zram, u32 index)
zram_test_flag(zram, index, ZRAM_WB);
}
+static inline void update_used_max(struct zram *zram, const unsigned long pages)
+{
+ unsigned long cur_max = atomic_long_read(&zram->stats.max_used_pages);
+
+ do {
+ if (cur_max >= pages)
+ return;
+ } while (!atomic_long_try_cmpxchg(&zram->stats.max_used_pages,
+ &cur_max, pages));
+}
+
+static bool zram_can_store_page(struct zram *zram)
+{
+ unsigned long alloced_pages;
+
+ alloced_pages = zs_get_total_pages(zram->mem_pool);
+ update_used_max(zram, alloced_pages);
+
+ return !zram->limit_pages || alloced_pages <= zram->limit_pages;
+}
+
#if PAGE_SIZE != 4096
static inline bool is_partial_io(struct bio_vec *bvec)
{
@@ -177,22 +226,113 @@ static inline u32 zram_get_priority(struct zram *zram, u32 index)
static void zram_accessed(struct zram *zram, u32 index)
{
zram_clear_flag(zram, index, ZRAM_IDLE);
+ zram_clear_flag(zram, index, ZRAM_PP_SLOT);
#ifdef CONFIG_ZRAM_TRACK_ENTRY_ACTIME
zram->table[index].ac_time = ktime_get_boottime();
#endif
}
-static inline void update_used_max(struct zram *zram,
- const unsigned long pages)
+#if defined CONFIG_ZRAM_WRITEBACK || defined CONFIG_ZRAM_MULTI_COMP
+struct zram_pp_slot {
+ unsigned long index;
+ struct list_head entry;
+};
+
+/*
+ * A post-processing bucket is, essentially, a size class, this defines
+ * the range (in bytes) of pp-slots sizes in particular bucket.
+ */
+#define PP_BUCKET_SIZE_RANGE 64
+#define NUM_PP_BUCKETS ((PAGE_SIZE / PP_BUCKET_SIZE_RANGE) + 1)
+
+struct zram_pp_ctl {
+ struct list_head pp_buckets[NUM_PP_BUCKETS];
+};
+
+static struct zram_pp_ctl *init_pp_ctl(void)
{
- unsigned long cur_max = atomic_long_read(&zram->stats.max_used_pages);
+ struct zram_pp_ctl *ctl;
+ u32 idx;
- do {
- if (cur_max >= pages)
- return;
- } while (!atomic_long_try_cmpxchg(&zram->stats.max_used_pages,
- &cur_max, pages));
+ ctl = kmalloc(sizeof(*ctl), GFP_KERNEL);
+ if (!ctl)
+ return NULL;
+
+ for (idx = 0; idx < NUM_PP_BUCKETS; idx++)
+ INIT_LIST_HEAD(&ctl->pp_buckets[idx]);
+ return ctl;
+}
+
+static void release_pp_slot(struct zram *zram, struct zram_pp_slot *pps)
+{
+ list_del_init(&pps->entry);
+
+ zram_slot_lock(zram, pps->index);
+ zram_clear_flag(zram, pps->index, ZRAM_PP_SLOT);
+ zram_slot_unlock(zram, pps->index);
+
+ kfree(pps);
+}
+
+static void release_pp_ctl(struct zram *zram, struct zram_pp_ctl *ctl)
+{
+ u32 idx;
+
+ if (!ctl)
+ return;
+
+ for (idx = 0; idx < NUM_PP_BUCKETS; idx++) {
+ while (!list_empty(&ctl->pp_buckets[idx])) {
+ struct zram_pp_slot *pps;
+
+ pps = list_first_entry(&ctl->pp_buckets[idx],
+ struct zram_pp_slot,
+ entry);
+ release_pp_slot(zram, pps);
+ }
+ }
+
+ kfree(ctl);
+}
+
+static bool place_pp_slot(struct zram *zram, struct zram_pp_ctl *ctl,
+ u32 index)
+{
+ struct zram_pp_slot *pps;
+ u32 bid;
+
+ pps = kmalloc(sizeof(*pps), GFP_NOIO | __GFP_NOWARN);
+ if (!pps)
+ return false;
+
+ INIT_LIST_HEAD(&pps->entry);
+ pps->index = index;
+
+ bid = zram_get_obj_size(zram, pps->index) / PP_BUCKET_SIZE_RANGE;
+ list_add(&pps->entry, &ctl->pp_buckets[bid]);
+
+ zram_set_flag(zram, pps->index, ZRAM_PP_SLOT);
+ return true;
+}
+
+static struct zram_pp_slot *select_pp_slot(struct zram_pp_ctl *ctl)
+{
+ struct zram_pp_slot *pps = NULL;
+ s32 idx = NUM_PP_BUCKETS - 1;
+
+ /* The higher the bucket id the more optimal slot post-processing is */
+ while (idx >= 0) {
+ pps = list_first_entry_or_null(&ctl->pp_buckets[idx],
+ struct zram_pp_slot,
+ entry);
+ if (pps)
+ break;
+
+ idx--;
+ }
+ return pps;
}
+#endif
static inline void zram_fill_page(void *ptr, unsigned long len,
unsigned long value)
@@ -295,19 +435,28 @@ static void mark_idle(struct zram *zram, ktime_t cutoff)
for (index = 0; index < nr_pages; index++) {
/*
- * Do not mark ZRAM_UNDER_WB slot as ZRAM_IDLE to close race.
- * See the comment in writeback_store.
+ * Do not mark ZRAM_SAME slots as ZRAM_IDLE, because no
+ * post-processing (recompress, writeback) happens to the
+ * ZRAM_SAME slot.
+ *
+ * And ZRAM_WB slots simply cannot be ZRAM_IDLE.
*/
zram_slot_lock(zram, index);
- if (zram_allocated(zram, index) &&
- !zram_test_flag(zram, index, ZRAM_UNDER_WB)) {
+ if (!zram_allocated(zram, index) ||
+ zram_test_flag(zram, index, ZRAM_WB) ||
+ zram_test_flag(zram, index, ZRAM_SAME)) {
+ zram_slot_unlock(zram, index);
+ continue;
+ }
+
#ifdef CONFIG_ZRAM_TRACK_ENTRY_ACTIME
- is_idle = !cutoff || ktime_after(cutoff,
- zram->table[index].ac_time);
+ is_idle = !cutoff ||
+ ktime_after(cutoff, zram->table[index].ac_time);
#endif
- if (is_idle)
- zram_set_flag(zram, index, ZRAM_IDLE);
- }
+ if (is_idle)
+ zram_set_flag(zram, index, ZRAM_IDLE);
+ else
+ zram_clear_flag(zram, index, ZRAM_IDLE);
zram_slot_unlock(zram, index);
}
}
@@ -426,11 +575,10 @@ static void reset_bdev(struct zram *zram)
if (!zram->backing_dev)
return;
- bdev_release(zram->bdev_handle);
/* hope filp_close flush all of IO */
filp_close(zram->backing_dev, NULL);
zram->backing_dev = NULL;
- zram->bdev_handle = NULL;
+ zram->bdev = NULL;
zram->disk->fops = &zram_devops;
kvfree(zram->bitmap);
zram->bitmap = NULL;
@@ -473,10 +621,8 @@ static ssize_t backing_dev_store(struct device *dev,
size_t sz;
struct file *backing_dev = NULL;
struct inode *inode;
- struct address_space *mapping;
unsigned int bitmap_sz;
unsigned long nr_pages, *bitmap = NULL;
- struct bdev_handle *bdev_handle = NULL;
int err;
struct zram *zram = dev_to_zram(dev);
@@ -497,15 +643,14 @@ static ssize_t backing_dev_store(struct device *dev,
if (sz > 0 && file_name[sz - 1] == '\n')
file_name[sz - 1] = 0x00;
- backing_dev = filp_open(file_name, O_RDWR|O_LARGEFILE, 0);
+ backing_dev = filp_open(file_name, O_RDWR | O_LARGEFILE | O_EXCL, 0);
if (IS_ERR(backing_dev)) {
err = PTR_ERR(backing_dev);
backing_dev = NULL;
goto out;
}
- mapping = backing_dev->f_mapping;
- inode = mapping->host;
+ inode = backing_dev->f_mapping->host;
/* Support only block device in this moment */
if (!S_ISBLK(inode->i_mode)) {
@@ -513,15 +658,13 @@ static ssize_t backing_dev_store(struct device *dev,
goto out;
}
- bdev_handle = bdev_open_by_dev(inode->i_rdev,
- BLK_OPEN_READ | BLK_OPEN_WRITE, zram, NULL);
- if (IS_ERR(bdev_handle)) {
- err = PTR_ERR(bdev_handle);
- bdev_handle = NULL;
+ nr_pages = i_size_read(inode) >> PAGE_SHIFT;
+ /* Refuse to use zero sized device (also prevents self reference) */
+ if (!nr_pages) {
+ err = -EINVAL;
goto out;
}
- nr_pages = i_size_read(inode) >> PAGE_SHIFT;
bitmap_sz = BITS_TO_LONGS(nr_pages) * sizeof(long);
bitmap = kvzalloc(bitmap_sz, GFP_KERNEL);
if (!bitmap) {
@@ -531,7 +674,7 @@ static ssize_t backing_dev_store(struct device *dev,
reset_bdev(zram);
- zram->bdev_handle = bdev_handle;
+ zram->bdev = I_BDEV(inode);
zram->backing_dev = backing_dev;
zram->bitmap = bitmap;
zram->nr_pages = nr_pages;
@@ -544,9 +687,6 @@ static ssize_t backing_dev_store(struct device *dev,
out:
kvfree(bitmap);
- if (bdev_handle)
- bdev_release(bdev_handle);
-
if (backing_dev)
filp_close(backing_dev, NULL);
@@ -587,71 +727,28 @@ static void read_from_bdev_async(struct zram *zram, struct page *page,
{
struct bio *bio;
- bio = bio_alloc(zram->bdev_handle->bdev, 1, parent->bi_opf, GFP_NOIO);
+ bio = bio_alloc(zram->bdev, 1, parent->bi_opf, GFP_NOIO);
bio->bi_iter.bi_sector = entry * (PAGE_SIZE >> 9);
__bio_add_page(bio, page, PAGE_SIZE, 0);
bio_chain(bio, parent);
submit_bio(bio);
}
-#define PAGE_WB_SIG "page_index="
-
-#define PAGE_WRITEBACK 0
-#define HUGE_WRITEBACK (1<<0)
-#define IDLE_WRITEBACK (1<<1)
-#define INCOMPRESSIBLE_WRITEBACK (1<<2)
-
-static ssize_t writeback_store(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t len)
+static int zram_writeback_slots(struct zram *zram, struct zram_pp_ctl *ctl)
{
- struct zram *zram = dev_to_zram(dev);
- unsigned long nr_pages = zram->disksize >> PAGE_SHIFT;
- unsigned long index = 0;
- struct bio bio;
- struct bio_vec bio_vec;
- struct page *page;
- ssize_t ret = len;
- int mode, err;
unsigned long blk_idx = 0;
-
- if (sysfs_streq(buf, "idle"))
- mode = IDLE_WRITEBACK;
- else if (sysfs_streq(buf, "huge"))
- mode = HUGE_WRITEBACK;
- else if (sysfs_streq(buf, "huge_idle"))
- mode = IDLE_WRITEBACK | HUGE_WRITEBACK;
- else if (sysfs_streq(buf, "incompressible"))
- mode = INCOMPRESSIBLE_WRITEBACK;
- else {
- if (strncmp(buf, PAGE_WB_SIG, sizeof(PAGE_WB_SIG) - 1))
- return -EINVAL;
-
- if (kstrtol(buf + sizeof(PAGE_WB_SIG) - 1, 10, &index) ||
- index >= nr_pages)
- return -EINVAL;
-
- nr_pages = 1;
- mode = PAGE_WRITEBACK;
- }
-
- down_read(&zram->init_lock);
- if (!init_done(zram)) {
- ret = -EINVAL;
- goto release_init_lock;
- }
-
- if (!zram->backing_dev) {
- ret = -ENODEV;
- goto release_init_lock;
- }
+ struct page *page = NULL;
+ struct zram_pp_slot *pps;
+ struct bio_vec bio_vec;
+ struct bio bio;
+ int ret = 0, err;
+ u32 index;
page = alloc_page(GFP_KERNEL);
- if (!page) {
- ret = -ENOMEM;
- goto release_init_lock;
- }
+ if (!page)
+ return -ENOMEM;
- for (; nr_pages != 0; index++, nr_pages--) {
+ while ((pps = select_pp_slot(ctl))) {
spin_lock(&zram->wb_limit_lock);
if (zram->wb_limit_enable && !zram->bd_wb_limit) {
spin_unlock(&zram->wb_limit_lock);
@@ -668,42 +765,21 @@ static ssize_t writeback_store(struct device *dev,
}
}
+ index = pps->index;
zram_slot_lock(zram, index);
- if (!zram_allocated(zram, index))
- goto next;
-
- if (zram_test_flag(zram, index, ZRAM_WB) ||
- zram_test_flag(zram, index, ZRAM_SAME) ||
- zram_test_flag(zram, index, ZRAM_UNDER_WB))
- goto next;
-
- if (mode & IDLE_WRITEBACK &&
- !zram_test_flag(zram, index, ZRAM_IDLE))
- goto next;
- if (mode & HUGE_WRITEBACK &&
- !zram_test_flag(zram, index, ZRAM_HUGE))
- goto next;
- if (mode & INCOMPRESSIBLE_WRITEBACK &&
- !zram_test_flag(zram, index, ZRAM_INCOMPRESSIBLE))
- goto next;
-
/*
- * Clearing ZRAM_UNDER_WB is duty of caller.
- * IOW, zram_free_page never clear it.
+ * scan_slots() sets ZRAM_PP_SLOT and relases slot lock, so
+ * slots can change in the meantime. If slots are accessed or
+ * freed they lose ZRAM_PP_SLOT flag and hence we don't
+ * post-process them.
*/
- zram_set_flag(zram, index, ZRAM_UNDER_WB);
- /* Need for hugepage writeback racing */
- zram_set_flag(zram, index, ZRAM_IDLE);
+ if (!zram_test_flag(zram, index, ZRAM_PP_SLOT))
+ goto next;
+ if (zram_read_from_zspool(zram, page, index))
+ goto next;
zram_slot_unlock(zram, index);
- if (zram_read_page(zram, page, index, NULL)) {
- zram_slot_lock(zram, index);
- zram_clear_flag(zram, index, ZRAM_UNDER_WB);
- zram_clear_flag(zram, index, ZRAM_IDLE);
- zram_slot_unlock(zram, index);
- continue;
- }
- bio_init(&bio, zram->bdev_handle->bdev, &bio_vec, 1,
+ bio_init(&bio, zram->bdev, &bio_vec, 1,
REQ_OP_WRITE | REQ_SYNC);
bio.bi_iter.bi_sector = blk_idx * (PAGE_SIZE >> 9);
__bio_add_page(&bio, page, PAGE_SIZE, 0);
@@ -714,10 +790,7 @@ static ssize_t writeback_store(struct device *dev,
*/
err = submit_bio_wait(&bio);
if (err) {
- zram_slot_lock(zram, index);
- zram_clear_flag(zram, index, ZRAM_UNDER_WB);
- zram_clear_flag(zram, index, ZRAM_IDLE);
- zram_slot_unlock(zram, index);
+ release_pp_slot(zram, pps);
/*
* BIO errors are not fatal, we continue and simply
* attempt to writeback the remaining objects (pages).
@@ -731,27 +804,21 @@ static ssize_t writeback_store(struct device *dev,
}
atomic64_inc(&zram->stats.bd_writes);
+ zram_slot_lock(zram, index);
/*
- * We released zram_slot_lock so need to check if the slot was
- * changed. If there is freeing for the slot, we can catch it
- * easily by zram_allocated.
- * A subtle case is the slot is freed/reallocated/marked as
- * ZRAM_IDLE again. To close the race, idle_store doesn't
- * mark ZRAM_IDLE once it found the slot was ZRAM_UNDER_WB.
- * Thus, we could close the race by checking ZRAM_IDLE bit.
+ * Same as above, we release slot lock during writeback so
+ * slot can change under us: slot_free() or slot_free() and
+ * reallocation (zram_write_page()). In both cases slot loses
+ * ZRAM_PP_SLOT flag. No concurrent post-processing can set
+ * ZRAM_PP_SLOT on such slots until current post-processing
+ * finishes.
*/
- zram_slot_lock(zram, index);
- if (!zram_allocated(zram, index) ||
- !zram_test_flag(zram, index, ZRAM_IDLE)) {
- zram_clear_flag(zram, index, ZRAM_UNDER_WB);
- zram_clear_flag(zram, index, ZRAM_IDLE);
+ if (!zram_test_flag(zram, index, ZRAM_PP_SLOT))
goto next;
- }
zram_free_page(zram, index);
- zram_clear_flag(zram, index, ZRAM_UNDER_WB);
zram_set_flag(zram, index, ZRAM_WB);
- zram_set_element(zram, index, blk_idx);
+ zram_set_handle(zram, index, blk_idx);
blk_idx = 0;
atomic64_inc(&zram->stats.pages_stored);
spin_lock(&zram->wb_limit_lock);
@@ -760,12 +827,224 @@ static ssize_t writeback_store(struct device *dev,
spin_unlock(&zram->wb_limit_lock);
next:
zram_slot_unlock(zram, index);
+ release_pp_slot(zram, pps);
+
+ cond_resched();
}
if (blk_idx)
free_block_bdev(zram, blk_idx);
- __free_page(page);
+ if (page)
+ __free_page(page);
+
+ return ret;
+}
+
+#define PAGE_WRITEBACK 0
+#define HUGE_WRITEBACK (1 << 0)
+#define IDLE_WRITEBACK (1 << 1)
+#define INCOMPRESSIBLE_WRITEBACK (1 << 2)
+
+static int parse_page_index(char *val, unsigned long nr_pages,
+ unsigned long *lo, unsigned long *hi)
+{
+ int ret;
+
+ ret = kstrtoul(val, 10, lo);
+ if (ret)
+ return ret;
+ if (*lo >= nr_pages)
+ return -ERANGE;
+ *hi = *lo + 1;
+ return 0;
+}
+
+static int parse_page_indexes(char *val, unsigned long nr_pages,
+ unsigned long *lo, unsigned long *hi)
+{
+ char *delim;
+ int ret;
+
+ delim = strchr(val, '-');
+ if (!delim)
+ return -EINVAL;
+
+ *delim = 0x00;
+ ret = kstrtoul(val, 10, lo);
+ if (ret)
+ return ret;
+ if (*lo >= nr_pages)
+ return -ERANGE;
+
+ ret = kstrtoul(delim + 1, 10, hi);
+ if (ret)
+ return ret;
+ if (*hi >= nr_pages || *lo > *hi)
+ return -ERANGE;
+ *hi += 1;
+ return 0;
+}
+
+static int parse_mode(char *val, u32 *mode)
+{
+ *mode = 0;
+
+ if (!strcmp(val, "idle"))
+ *mode = IDLE_WRITEBACK;
+ if (!strcmp(val, "huge"))
+ *mode = HUGE_WRITEBACK;
+ if (!strcmp(val, "huge_idle"))
+ *mode = IDLE_WRITEBACK | HUGE_WRITEBACK;
+ if (!strcmp(val, "incompressible"))
+ *mode = INCOMPRESSIBLE_WRITEBACK;
+
+ if (*mode == 0)
+ return -EINVAL;
+ return 0;
+}
+
+static int scan_slots_for_writeback(struct zram *zram, u32 mode,
+ unsigned long lo, unsigned long hi,
+ struct zram_pp_ctl *ctl)
+{
+ u32 index = lo;
+
+ while (index < hi) {
+ bool ok = true;
+
+ zram_slot_lock(zram, index);
+ if (!zram_allocated(zram, index))
+ goto next;
+
+ if (zram_test_flag(zram, index, ZRAM_WB) ||
+ zram_test_flag(zram, index, ZRAM_SAME))
+ goto next;
+
+ if (mode & IDLE_WRITEBACK &&
+ !zram_test_flag(zram, index, ZRAM_IDLE))
+ goto next;
+ if (mode & HUGE_WRITEBACK &&
+ !zram_test_flag(zram, index, ZRAM_HUGE))
+ goto next;
+ if (mode & INCOMPRESSIBLE_WRITEBACK &&
+ !zram_test_flag(zram, index, ZRAM_INCOMPRESSIBLE))
+ goto next;
+
+ ok = place_pp_slot(zram, ctl, index);
+next:
+ zram_slot_unlock(zram, index);
+ if (!ok)
+ break;
+ index++;
+ }
+
+ return 0;
+}
+
+static ssize_t writeback_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct zram *zram = dev_to_zram(dev);
+ u64 nr_pages = zram->disksize >> PAGE_SHIFT;
+ unsigned long lo = 0, hi = nr_pages;
+ struct zram_pp_ctl *ctl = NULL;
+ char *args, *param, *val;
+ ssize_t ret = len;
+ int err, mode = 0;
+
+ down_read(&zram->init_lock);
+ if (!init_done(zram)) {
+ up_read(&zram->init_lock);
+ return -EINVAL;
+ }
+
+ /* Do not permit concurrent post-processing actions. */
+ if (atomic_xchg(&zram->pp_in_progress, 1)) {
+ up_read(&zram->init_lock);
+ return -EAGAIN;
+ }
+
+ if (!zram->backing_dev) {
+ ret = -ENODEV;
+ goto release_init_lock;
+ }
+
+ ctl = init_pp_ctl();
+ if (!ctl) {
+ ret = -ENOMEM;
+ goto release_init_lock;
+ }
+
+ args = skip_spaces(buf);
+ while (*args) {
+ args = next_arg(args, &param, &val);
+
+ /*
+ * Workaround to support the old writeback interface.
+ *
+ * The old writeback interface has a minor inconsistency and
+ * requires key=value only for page_index parameter, while the
+ * writeback mode is a valueless parameter.
+ *
+ * This is not the case anymore and now all parameters are
+ * required to have values, however, we need to support the
+ * legacy writeback interface format so we check if we can
+ * recognize a valueless parameter as the (legacy) writeback
+ * mode.
+ */
+ if (!val || !*val) {
+ err = parse_mode(param, &mode);
+ if (err) {
+ ret = err;
+ goto release_init_lock;
+ }
+
+ scan_slots_for_writeback(zram, mode, lo, hi, ctl);
+ break;
+ }
+
+ if (!strcmp(param, "type")) {
+ err = parse_mode(val, &mode);
+ if (err) {
+ ret = err;
+ goto release_init_lock;
+ }
+
+ scan_slots_for_writeback(zram, mode, lo, hi, ctl);
+ break;
+ }
+
+ if (!strcmp(param, "page_index")) {
+ err = parse_page_index(val, nr_pages, &lo, &hi);
+ if (err) {
+ ret = err;
+ goto release_init_lock;
+ }
+
+ scan_slots_for_writeback(zram, mode, lo, hi, ctl);
+ continue;
+ }
+
+ if (!strcmp(param, "page_indexes")) {
+ err = parse_page_indexes(val, nr_pages, &lo, &hi);
+ if (err) {
+ ret = err;
+ goto release_init_lock;
+ }
+
+ scan_slots_for_writeback(zram, mode, lo, hi, ctl);
+ continue;
+ }
+ }
+
+ err = zram_writeback_slots(zram, ctl);
+ if (err)
+ ret = err;
+
release_init_lock:
+ release_pp_ctl(zram, ctl);
+ atomic_set(&zram->pp_in_progress, 0);
up_read(&zram->init_lock);
return ret;
@@ -785,7 +1064,7 @@ static void zram_sync_read(struct work_struct *work)
struct bio_vec bv;
struct bio bio;
- bio_init(&bio, zw->zram->bdev_handle->bdev, &bv, 1, REQ_OP_READ);
+ bio_init(&bio, zw->zram->bdev, &bv, 1, REQ_OP_READ);
bio.bi_iter.bi_sector = zw->entry * (PAGE_SIZE >> 9);
__bio_add_page(&bio, zw->page, PAGE_SIZE, 0);
zw->error = submit_bio_wait(&bio);
@@ -937,27 +1216,6 @@ static void zram_debugfs_register(struct zram *zram) {};
static void zram_debugfs_unregister(struct zram *zram) {};
#endif
-/*
- * We switched to per-cpu streams and this attr is not needed anymore.
- * However, we will keep it around for some time, because:
- * a) we may revert per-cpu streams in the future
- * b) it's visible to user space and we need to follow our 2 years
- * retirement rule; but we already have a number of 'soon to be
- * altered' attrs, so max_comp_streams need to wait for the next
- * layoff cycle.
- */
-static ssize_t max_comp_streams_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- return scnprintf(buf, PAGE_SIZE, "%d\n", num_online_cpus());
-}
-
-static ssize_t max_comp_streams_store(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t len)
-{
- return len;
-}
-
static void comp_algorithm_set(struct zram *zram, u32 prio, const char *alg)
{
/* Do not free statically defined compression algorithms */
@@ -984,7 +1242,7 @@ static int __comp_algorithm_store(struct zram *zram, u32 prio, const char *buf)
size_t sz;
sz = strlen(buf);
- if (sz >= CRYPTO_MAX_ALG_NAME)
+ if (sz >= ZRAM_MAX_ALGO_NAME_SZ)
return -E2BIG;
compressor = kstrdup(buf, GFP_KERNEL);
@@ -1013,6 +1271,116 @@ static int __comp_algorithm_store(struct zram *zram, u32 prio, const char *buf)
return 0;
}
+static void comp_params_reset(struct zram *zram, u32 prio)
+{
+ struct zcomp_params *params = &zram->params[prio];
+
+ vfree(params->dict);
+ params->level = ZCOMP_PARAM_NOT_SET;
+ params->deflate.winbits = ZCOMP_PARAM_NOT_SET;
+ params->dict_sz = 0;
+ params->dict = NULL;
+}
+
+static int comp_params_store(struct zram *zram, u32 prio, s32 level,
+ const char *dict_path,
+ struct deflate_params *deflate_params)
+{
+ ssize_t sz = 0;
+
+ comp_params_reset(zram, prio);
+
+ if (dict_path) {
+ sz = kernel_read_file_from_path(dict_path, 0,
+ &zram->params[prio].dict,
+ INT_MAX,
+ NULL,
+ READING_POLICY);
+ if (sz < 0)
+ return -EINVAL;
+ }
+
+ zram->params[prio].dict_sz = sz;
+ zram->params[prio].level = level;
+ zram->params[prio].deflate.winbits = deflate_params->winbits;
+ return 0;
+}
+
+static ssize_t algorithm_params_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ s32 prio = ZRAM_PRIMARY_COMP, level = ZCOMP_PARAM_NOT_SET;
+ char *args, *param, *val, *algo = NULL, *dict_path = NULL;
+ struct deflate_params deflate_params;
+ struct zram *zram = dev_to_zram(dev);
+ int ret;
+
+ deflate_params.winbits = ZCOMP_PARAM_NOT_SET;
+
+ args = skip_spaces(buf);
+ while (*args) {
+ args = next_arg(args, &param, &val);
+
+ if (!val || !*val)
+ return -EINVAL;
+
+ if (!strcmp(param, "priority")) {
+ ret = kstrtoint(val, 10, &prio);
+ if (ret)
+ return ret;
+ continue;
+ }
+
+ if (!strcmp(param, "level")) {
+ ret = kstrtoint(val, 10, &level);
+ if (ret)
+ return ret;
+ continue;
+ }
+
+ if (!strcmp(param, "algo")) {
+ algo = val;
+ continue;
+ }
+
+ if (!strcmp(param, "dict")) {
+ dict_path = val;
+ continue;
+ }
+
+ if (!strcmp(param, "deflate.winbits")) {
+ ret = kstrtoint(val, 10, &deflate_params.winbits);
+ if (ret)
+ return ret;
+ continue;
+ }
+ }
+
+ /* Lookup priority by algorithm name */
+ if (algo) {
+ s32 p;
+
+ prio = -EINVAL;
+ for (p = ZRAM_PRIMARY_COMP; p < ZRAM_MAX_COMPS; p++) {
+ if (!zram->comp_algs[p])
+ continue;
+
+ if (!strcmp(zram->comp_algs[p], algo)) {
+ prio = p;
+ break;
+ }
+ }
+ }
+
+ if (prio < ZRAM_PRIMARY_COMP || prio >= ZRAM_MAX_COMPS)
+ return -EINVAL;
+
+ ret = comp_params_store(zram, prio, level, dict_path, &deflate_params);
+ return ret ? ret : len;
+}
+
static ssize_t comp_algorithm_show(struct device *dev,
struct device_attribute *attr,
char *buf)
@@ -1195,9 +1563,8 @@ static ssize_t debug_stat_show(struct device *dev,
down_read(&zram->init_lock);
ret = scnprintf(buf, PAGE_SIZE,
- "version: %d\n%8llu %8llu\n",
+ "version: %d\n0 %8llu\n",
version,
- (u64)atomic64_read(&zram->stats.writestall),
(u64)atomic64_read(&zram->stats.miss_free));
up_read(&zram->init_lock);
@@ -1216,17 +1583,21 @@ static void zram_meta_free(struct zram *zram, u64 disksize)
size_t num_pages = disksize >> PAGE_SHIFT;
size_t index;
+ if (!zram->table)
+ return;
+
/* Free all pages that are still in this zram device */
for (index = 0; index < num_pages; index++)
zram_free_page(zram, index);
zs_destroy_pool(zram->mem_pool);
vfree(zram->table);
+ zram->table = NULL;
}
static bool zram_meta_alloc(struct zram *zram, u64 disksize)
{
- size_t num_pages;
+ size_t num_pages, index;
num_pages = disksize >> PAGE_SHIFT;
zram->table = vzalloc(array_size(num_pages, sizeof(*zram->table)));
@@ -1236,19 +1607,19 @@ static bool zram_meta_alloc(struct zram *zram, u64 disksize)
zram->mem_pool = zs_create_pool(zram->disk->disk_name);
if (!zram->mem_pool) {
vfree(zram->table);
+ zram->table = NULL;
return false;
}
if (!huge_class_size)
huge_class_size = zs_huge_class_size(zram->mem_pool);
+
+ for (index = 0; index < num_pages; index++)
+ zram_slot_lock_init(zram, index);
+
return true;
}
-/*
- * To protect concurrent access to the same index entry,
- * caller should hold this table index entry's bit_spinlock to
- * indicate this index entry is accessing.
- */
static void zram_free_page(struct zram *zram, size_t index)
{
unsigned long handle;
@@ -1256,22 +1627,20 @@ static void zram_free_page(struct zram *zram, size_t index)
#ifdef CONFIG_ZRAM_TRACK_ENTRY_ACTIME
zram->table[index].ac_time = 0;
#endif
- if (zram_test_flag(zram, index, ZRAM_IDLE))
- zram_clear_flag(zram, index, ZRAM_IDLE);
+
+ zram_clear_flag(zram, index, ZRAM_IDLE);
+ zram_clear_flag(zram, index, ZRAM_INCOMPRESSIBLE);
+ zram_clear_flag(zram, index, ZRAM_PP_SLOT);
+ zram_set_priority(zram, index, 0);
if (zram_test_flag(zram, index, ZRAM_HUGE)) {
zram_clear_flag(zram, index, ZRAM_HUGE);
atomic64_dec(&zram->stats.huge_pages);
}
- if (zram_test_flag(zram, index, ZRAM_INCOMPRESSIBLE))
- zram_clear_flag(zram, index, ZRAM_INCOMPRESSIBLE);
-
- zram_set_priority(zram, index, 0);
-
if (zram_test_flag(zram, index, ZRAM_WB)) {
zram_clear_flag(zram, index, ZRAM_WB);
- free_block_bdev(zram, zram_get_element(zram, index));
+ free_block_bdev(zram, zram_get_handle(zram, index));
goto out;
}
@@ -1292,64 +1661,80 @@ static void zram_free_page(struct zram *zram, size_t index)
zs_free(zram->mem_pool, handle);
atomic64_sub(zram_get_obj_size(zram, index),
- &zram->stats.compr_data_size);
+ &zram->stats.compr_data_size);
out:
atomic64_dec(&zram->stats.pages_stored);
zram_set_handle(zram, index, 0);
zram_set_obj_size(zram, index, 0);
- WARN_ON_ONCE(zram->table[index].flags &
- ~(1UL << ZRAM_LOCK | 1UL << ZRAM_UNDER_WB));
}
-/*
- * Reads (decompresses if needed) a page from zspool (zsmalloc).
- * Corresponding ZRAM slot should be locked.
- */
-static int zram_read_from_zspool(struct zram *zram, struct page *page,
+static int read_same_filled_page(struct zram *zram, struct page *page,
u32 index)
{
+ void *mem;
+
+ mem = kmap_local_page(page);
+ zram_fill_page(mem, PAGE_SIZE, zram_get_handle(zram, index));
+ kunmap_local(mem);
+ return 0;
+}
+
+static int read_incompressible_page(struct zram *zram, struct page *page,
+ u32 index)
+{
+ unsigned long handle;
+ void *src, *dst;
+
+ handle = zram_get_handle(zram, index);
+ src = zs_obj_read_begin(zram->mem_pool, handle, NULL);
+ dst = kmap_local_page(page);
+ copy_page(dst, src);
+ kunmap_local(dst);
+ zs_obj_read_end(zram->mem_pool, handle, src);
+
+ return 0;
+}
+
+static int read_compressed_page(struct zram *zram, struct page *page, u32 index)
+{
struct zcomp_strm *zstrm;
unsigned long handle;
unsigned int size;
void *src, *dst;
- u32 prio;
- int ret;
+ int ret, prio;
handle = zram_get_handle(zram, index);
- if (!handle || zram_test_flag(zram, index, ZRAM_SAME)) {
- unsigned long value;
- void *mem;
-
- value = handle ? zram_get_element(zram, index) : 0;
- mem = kmap_local_page(page);
- zram_fill_page(mem, PAGE_SIZE, value);
- kunmap_local(mem);
- return 0;
- }
-
size = zram_get_obj_size(zram, index);
+ prio = zram_get_priority(zram, index);
- if (size != PAGE_SIZE) {
- prio = zram_get_priority(zram, index);
- zstrm = zcomp_stream_get(zram->comps[prio]);
- }
+ zstrm = zcomp_stream_get(zram->comps[prio]);
+ src = zs_obj_read_begin(zram->mem_pool, handle, zstrm->local_copy);
+ dst = kmap_local_page(page);
+ ret = zcomp_decompress(zram->comps[prio], zstrm, src, size, dst);
+ kunmap_local(dst);
+ zs_obj_read_end(zram->mem_pool, handle, src);
+ zcomp_stream_put(zstrm);
- src = zs_map_object(zram->mem_pool, handle, ZS_MM_RO);
- if (size == PAGE_SIZE) {
- dst = kmap_local_page(page);
- memcpy(dst, src, PAGE_SIZE);
- kunmap_local(dst);
- ret = 0;
- } else {
- dst = kmap_local_page(page);
- ret = zcomp_decompress(zstrm, src, size, dst);
- kunmap_local(dst);
- zcomp_stream_put(zram->comps[prio]);
- }
- zs_unmap_object(zram->mem_pool, handle);
return ret;
}
+/*
+ * Reads (decompresses if needed) a page from zspool (zsmalloc).
+ * Corresponding ZRAM slot should be locked.
+ */
+static int zram_read_from_zspool(struct zram *zram, struct page *page,
+ u32 index)
+{
+ if (zram_test_flag(zram, index, ZRAM_SAME) ||
+ !zram_get_handle(zram, index))
+ return read_same_filled_page(zram, page, index);
+
+ if (!zram_test_flag(zram, index, ZRAM_HUGE))
+ return read_compressed_page(zram, page, index);
+ else
+ return read_incompressible_page(zram, page, index);
+}
+
static int zram_read_page(struct zram *zram, struct page *page, u32 index,
struct bio *parent)
{
@@ -1367,7 +1752,7 @@ static int zram_read_page(struct zram *zram, struct page *page, u32 index,
*/
zram_slot_unlock(zram, index);
- ret = read_from_bdev(zram, page, zram_get_element(zram, index),
+ ret = read_from_bdev(zram, page, zram_get_handle(zram, index),
parent);
}
@@ -1405,128 +1790,124 @@ static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
return zram_read_page(zram, bvec->bv_page, index, bio);
}
+static int write_same_filled_page(struct zram *zram, unsigned long fill,
+ u32 index)
+{
+ zram_slot_lock(zram, index);
+ zram_set_flag(zram, index, ZRAM_SAME);
+ zram_set_handle(zram, index, fill);
+ zram_slot_unlock(zram, index);
+
+ atomic64_inc(&zram->stats.same_pages);
+ atomic64_inc(&zram->stats.pages_stored);
+
+ return 0;
+}
+
+static int write_incompressible_page(struct zram *zram, struct page *page,
+ u32 index)
+{
+ unsigned long handle;
+ void *src;
+
+ /*
+ * This function is called from preemptible context so we don't need
+ * to do optimistic and fallback to pessimistic handle allocation,
+ * like we do for compressible pages.
+ */
+ handle = zs_malloc(zram->mem_pool, PAGE_SIZE,
+ GFP_NOIO | __GFP_NOWARN |
+ __GFP_HIGHMEM | __GFP_MOVABLE, page_to_nid(page));
+ if (IS_ERR_VALUE(handle))
+ return PTR_ERR((void *)handle);
+
+ if (!zram_can_store_page(zram)) {
+ zs_free(zram->mem_pool, handle);
+ return -ENOMEM;
+ }
+
+ src = kmap_local_page(page);
+ zs_obj_write(zram->mem_pool, handle, src, PAGE_SIZE);
+ kunmap_local(src);
+
+ zram_slot_lock(zram, index);
+ zram_set_flag(zram, index, ZRAM_HUGE);
+ zram_set_handle(zram, index, handle);
+ zram_set_obj_size(zram, index, PAGE_SIZE);
+ zram_slot_unlock(zram, index);
+
+ atomic64_add(PAGE_SIZE, &zram->stats.compr_data_size);
+ atomic64_inc(&zram->stats.huge_pages);
+ atomic64_inc(&zram->stats.huge_pages_since);
+ atomic64_inc(&zram->stats.pages_stored);
+
+ return 0;
+}
+
static int zram_write_page(struct zram *zram, struct page *page, u32 index)
{
int ret = 0;
- unsigned long alloced_pages;
- unsigned long handle = -ENOMEM;
- unsigned int comp_len = 0;
- void *src, *dst, *mem;
+ unsigned long handle;
+ unsigned int comp_len;
+ void *mem;
struct zcomp_strm *zstrm;
- unsigned long element = 0;
- enum zram_pageflags flags = 0;
+ unsigned long element;
+ bool same_filled;
+
+ /* First, free memory allocated to this slot (if any) */
+ zram_slot_lock(zram, index);
+ zram_free_page(zram, index);
+ zram_slot_unlock(zram, index);
mem = kmap_local_page(page);
- if (page_same_filled(mem, &element)) {
- kunmap_local(mem);
- /* Free memory associated with this sector now. */
- flags = ZRAM_SAME;
- atomic64_inc(&zram->stats.same_pages);
- goto out;
- }
+ same_filled = page_same_filled(mem, &element);
kunmap_local(mem);
+ if (same_filled)
+ return write_same_filled_page(zram, element, index);
-compress_again:
zstrm = zcomp_stream_get(zram->comps[ZRAM_PRIMARY_COMP]);
- src = kmap_local_page(page);
- ret = zcomp_compress(zstrm, src, &comp_len);
- kunmap_local(src);
+ mem = kmap_local_page(page);
+ ret = zcomp_compress(zram->comps[ZRAM_PRIMARY_COMP], zstrm,
+ mem, &comp_len);
+ kunmap_local(mem);
if (unlikely(ret)) {
- zcomp_stream_put(zram->comps[ZRAM_PRIMARY_COMP]);
+ zcomp_stream_put(zstrm);
pr_err("Compression failed! err=%d\n", ret);
- zs_free(zram->mem_pool, handle);
return ret;
}
- if (comp_len >= huge_class_size)
- comp_len = PAGE_SIZE;
- /*
- * handle allocation has 2 paths:
- * a) fast path is executed with preemption disabled (for
- * per-cpu streams) and has __GFP_DIRECT_RECLAIM bit clear,
- * since we can't sleep;
- * b) slow path enables preemption and attempts to allocate
- * the page with __GFP_DIRECT_RECLAIM bit set. we have to
- * put per-cpu compression stream and, thus, to re-do
- * the compression once handle is allocated.
- *
- * if we have a 'non-null' handle here then we are coming
- * from the slow path and handle has already been allocated.
- */
- if (IS_ERR_VALUE(handle))
- handle = zs_malloc(zram->mem_pool, comp_len,
- __GFP_KSWAPD_RECLAIM |
- __GFP_NOWARN |
- __GFP_HIGHMEM |
- __GFP_MOVABLE);
- if (IS_ERR_VALUE(handle)) {
- zcomp_stream_put(zram->comps[ZRAM_PRIMARY_COMP]);
- atomic64_inc(&zram->stats.writestall);
- handle = zs_malloc(zram->mem_pool, comp_len,
- GFP_NOIO | __GFP_HIGHMEM |
- __GFP_MOVABLE);
- if (IS_ERR_VALUE(handle))
- return PTR_ERR((void *)handle);
-
- if (comp_len != PAGE_SIZE)
- goto compress_again;
- /*
- * If the page is not compressible, you need to acquire the
- * lock and execute the code below. The zcomp_stream_get()
- * call is needed to disable the cpu hotplug and grab the
- * zstrm buffer back. It is necessary that the dereferencing
- * of the zstrm variable below occurs correctly.
- */
- zstrm = zcomp_stream_get(zram->comps[ZRAM_PRIMARY_COMP]);
+ if (comp_len >= huge_class_size) {
+ zcomp_stream_put(zstrm);
+ return write_incompressible_page(zram, page, index);
}
- alloced_pages = zs_get_total_pages(zram->mem_pool);
- update_used_max(zram, alloced_pages);
+ handle = zs_malloc(zram->mem_pool, comp_len,
+ GFP_NOIO | __GFP_NOWARN |
+ __GFP_HIGHMEM | __GFP_MOVABLE, page_to_nid(page));
+ if (IS_ERR_VALUE(handle)) {
+ zcomp_stream_put(zstrm);
+ return PTR_ERR((void *)handle);
+ }
- if (zram->limit_pages && alloced_pages > zram->limit_pages) {
- zcomp_stream_put(zram->comps[ZRAM_PRIMARY_COMP]);
+ if (!zram_can_store_page(zram)) {
+ zcomp_stream_put(zstrm);
zs_free(zram->mem_pool, handle);
return -ENOMEM;
}
- dst = zs_map_object(zram->mem_pool, handle, ZS_MM_WO);
-
- src = zstrm->buffer;
- if (comp_len == PAGE_SIZE)
- src = kmap_local_page(page);
- memcpy(dst, src, comp_len);
- if (comp_len == PAGE_SIZE)
- kunmap_local(src);
+ zs_obj_write(zram->mem_pool, handle, zstrm->buffer, comp_len);
+ zcomp_stream_put(zstrm);
- zcomp_stream_put(zram->comps[ZRAM_PRIMARY_COMP]);
- zs_unmap_object(zram->mem_pool, handle);
- atomic64_add(comp_len, &zram->stats.compr_data_size);
-out:
- /*
- * Free memory associated with this sector
- * before overwriting unused sectors.
- */
zram_slot_lock(zram, index);
- zram_free_page(zram, index);
-
- if (comp_len == PAGE_SIZE) {
- zram_set_flag(zram, index, ZRAM_HUGE);
- atomic64_inc(&zram->stats.huge_pages);
- atomic64_inc(&zram->stats.huge_pages_since);
- }
-
- if (flags) {
- zram_set_flag(zram, index, flags);
- zram_set_element(zram, index, element);
- } else {
- zram_set_handle(zram, index, handle);
- zram_set_obj_size(zram, index, comp_len);
- }
+ zram_set_handle(zram, index, handle);
+ zram_set_obj_size(zram, index, comp_len);
zram_slot_unlock(zram, index);
/* Update stats */
atomic64_inc(&zram->stats.pages_stored);
+ atomic64_add(comp_len, &zram->stats.compr_data_size);
+
return ret;
}
@@ -1560,6 +1941,49 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec,
}
#ifdef CONFIG_ZRAM_MULTI_COMP
+#define RECOMPRESS_IDLE (1 << 0)
+#define RECOMPRESS_HUGE (1 << 1)
+
+static int scan_slots_for_recompress(struct zram *zram, u32 mode, u32 prio_max,
+ struct zram_pp_ctl *ctl)
+{
+ unsigned long nr_pages = zram->disksize >> PAGE_SHIFT;
+ unsigned long index;
+
+ for (index = 0; index < nr_pages; index++) {
+ bool ok = true;
+
+ zram_slot_lock(zram, index);
+ if (!zram_allocated(zram, index))
+ goto next;
+
+ if (mode & RECOMPRESS_IDLE &&
+ !zram_test_flag(zram, index, ZRAM_IDLE))
+ goto next;
+
+ if (mode & RECOMPRESS_HUGE &&
+ !zram_test_flag(zram, index, ZRAM_HUGE))
+ goto next;
+
+ if (zram_test_flag(zram, index, ZRAM_WB) ||
+ zram_test_flag(zram, index, ZRAM_SAME) ||
+ zram_test_flag(zram, index, ZRAM_INCOMPRESSIBLE))
+ goto next;
+
+ /* Already compressed with same of higher priority */
+ if (zram_get_priority(zram, index) + 1 >= prio_max)
+ goto next;
+
+ ok = place_pp_slot(zram, ctl, index);
+next:
+ zram_slot_unlock(zram, index);
+ if (!ok)
+ break;
+ }
+
+ return 0;
+}
+
/*
* This function will decompress (unless it's ZRAM_HUGE) the page and then
* attempt to compress it using provided compression algorithm priority
@@ -1567,8 +1991,9 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec,
*
* Corresponding ZRAM slot should be locked.
*/
-static int zram_recompress(struct zram *zram, u32 index, struct page *page,
- u32 threshold, u32 prio, u32 prio_max)
+static int recompress_slot(struct zram *zram, u32 index, struct page *page,
+ u64 *num_recomp_pages, u32 threshold, u32 prio,
+ u32 prio_max)
{
struct zcomp_strm *zstrm = NULL;
unsigned long handle_old;
@@ -1577,9 +2002,8 @@ static int zram_recompress(struct zram *zram, u32 index, struct page *page,
unsigned int comp_len_new;
unsigned int class_index_old;
unsigned int class_index_new;
- u32 num_recomps = 0;
- void *src, *dst;
- int ret;
+ void *src;
+ int ret = 0;
handle_old = zram_get_handle(zram, index);
if (!handle_old)
@@ -1596,7 +2020,24 @@ static int zram_recompress(struct zram *zram, u32 index, struct page *page,
if (ret)
return ret;
+ /*
+ * We touched this entry so mark it as non-IDLE. This makes sure that
+ * we don't preserve IDLE flag and don't incorrectly pick this entry
+ * for different post-processing type (e.g. writeback).
+ */
+ zram_clear_flag(zram, index, ZRAM_IDLE);
+
class_index_old = zs_lookup_class_index(zram->mem_pool, comp_len_old);
+
+ prio = max(prio, zram_get_priority(zram, index) + 1);
+ /*
+ * Recompression slots scan should not select slots that are
+ * already compressed with a higher priority algorithm, but
+ * just in case
+ */
+ if (prio >= prio_max)
+ return 0;
+
/*
* Iterate the secondary comp algorithms list (in order of priority)
* and try to recompress the page.
@@ -1605,22 +2046,16 @@ static int zram_recompress(struct zram *zram, u32 index, struct page *page,
if (!zram->comps[prio])
continue;
- /*
- * Skip if the object is already re-compressed with a higher
- * priority algorithm (or same algorithm).
- */
- if (prio <= zram_get_priority(zram, index))
- continue;
-
- num_recomps++;
zstrm = zcomp_stream_get(zram->comps[prio]);
src = kmap_local_page(page);
- ret = zcomp_compress(zstrm, src, &comp_len_new);
+ ret = zcomp_compress(zram->comps[prio], zstrm,
+ src, &comp_len_new);
kunmap_local(src);
if (ret) {
- zcomp_stream_put(zram->comps[prio]);
- return ret;
+ zcomp_stream_put(zstrm);
+ zstrm = NULL;
+ break;
}
class_index_new = zs_lookup_class_index(zram->mem_pool,
@@ -1629,7 +2064,8 @@ static int zram_recompress(struct zram *zram, u32 index, struct page *page,
/* Continue until we make progress */
if (class_index_new >= class_index_old ||
(threshold && comp_len_new >= threshold)) {
- zcomp_stream_put(zram->comps[prio]);
+ zcomp_stream_put(zstrm);
+ zstrm = NULL;
continue;
}
@@ -1638,55 +2074,52 @@ static int zram_recompress(struct zram *zram, u32 index, struct page *page,
}
/*
- * We did not try to recompress, e.g. when we have only one
- * secondary algorithm and the page is already recompressed
- * using that algorithm
+ * Decrement the limit (if set) on pages we can recompress, even
+ * when current recompression was unsuccessful or did not compress
+ * the page below the threshold, because we still spent resources
+ * on it.
*/
- if (!zstrm)
- return 0;
+ if (*num_recomp_pages)
+ *num_recomp_pages -= 1;
+
+ /* Compression error */
+ if (ret)
+ return ret;
- if (class_index_new >= class_index_old) {
+ if (!zstrm) {
/*
* Secondary algorithms failed to re-compress the page
- * in a way that would save memory, mark the object as
- * incompressible so that we will not try to compress
- * it again.
+ * in a way that would save memory.
*
- * We need to make sure that all secondary algorithms have
- * failed, so we test if the number of recompressions matches
- * the number of active secondary algorithms.
+ * Mark the object incompressible if the max-priority
+ * algorithm couldn't re-compress it.
*/
- if (num_recomps == zram->num_active_comps - 1)
- zram_set_flag(zram, index, ZRAM_INCOMPRESSIBLE);
+ if (prio < zram->num_active_comps)
+ return 0;
+ zram_set_flag(zram, index, ZRAM_INCOMPRESSIBLE);
return 0;
}
- /* Successful recompression but above threshold */
- if (threshold && comp_len_new >= threshold)
- return 0;
-
/*
- * No direct reclaim (slow path) for handle allocation and no
- * re-compression attempt (unlike in zram_write_bvec()) since
- * we already have stored that object in zsmalloc. If we cannot
- * alloc memory for recompressed object then we bail out and
- * simply keep the old (existing) object in zsmalloc.
+ * We are holding per-CPU stream mutex and entry lock so better
+ * avoid direct reclaim. Allocation error is not fatal since
+ * we still have the old object in the mem_pool.
+ *
+ * XXX: technically, the node we really want here is the node that holds
+ * the original compressed data. But that would require us to modify
+ * zsmalloc API to return this information. For now, we will make do with
+ * the node of the page allocated for recompression.
*/
handle_new = zs_malloc(zram->mem_pool, comp_len_new,
- __GFP_KSWAPD_RECLAIM |
- __GFP_NOWARN |
- __GFP_HIGHMEM |
- __GFP_MOVABLE);
+ GFP_NOIO | __GFP_NOWARN |
+ __GFP_HIGHMEM | __GFP_MOVABLE, page_to_nid(page));
if (IS_ERR_VALUE(handle_new)) {
- zcomp_stream_put(zram->comps[prio]);
+ zcomp_stream_put(zstrm);
return PTR_ERR((void *)handle_new);
}
- dst = zs_map_object(zram->mem_pool, handle_new, ZS_MM_WO);
- memcpy(dst, zstrm->buffer, comp_len_new);
- zcomp_stream_put(zram->comps[prio]);
-
- zs_unmap_object(zram->mem_pool, handle_new);
+ zs_obj_write(zram->mem_pool, handle_new, zstrm->buffer, comp_len_new);
+ zcomp_stream_put(zstrm);
zram_free_page(zram, index);
zram_set_handle(zram, index, handle_new);
@@ -1699,22 +2132,23 @@ static int zram_recompress(struct zram *zram, u32 index, struct page *page,
return 0;
}
-#define RECOMPRESS_IDLE (1 << 0)
-#define RECOMPRESS_HUGE (1 << 1)
-
static ssize_t recompress_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t len)
{
- u32 prio = ZRAM_SECONDARY_COMP, prio_max = ZRAM_MAX_COMPS;
struct zram *zram = dev_to_zram(dev);
- unsigned long nr_pages = zram->disksize >> PAGE_SHIFT;
char *args, *param, *val, *algo = NULL;
+ u64 num_recomp_pages = ULLONG_MAX;
+ struct zram_pp_ctl *ctl = NULL;
+ struct zram_pp_slot *pps;
u32 mode = 0, threshold = 0;
- unsigned long index;
- struct page *page;
+ u32 prio, prio_max;
+ struct page *page = NULL;
ssize_t ret;
+ prio = ZRAM_SECONDARY_COMP;
+ prio_max = zram->num_active_comps;
+
args = skip_spaces(buf);
while (*args) {
args = next_arg(args, &param, &val);
@@ -1732,6 +2166,17 @@ static ssize_t recompress_store(struct device *dev,
continue;
}
+ if (!strcmp(param, "max_pages")) {
+ /*
+ * Limit the number of entries (pages) we attempt to
+ * recompress.
+ */
+ ret = kstrtoull(val, 10, &num_recomp_pages);
+ if (ret)
+ return ret;
+ continue;
+ }
+
if (!strcmp(param, "threshold")) {
/*
* We will re-compress only idle objects equal or
@@ -1747,6 +2192,18 @@ static ssize_t recompress_store(struct device *dev,
algo = val;
continue;
}
+
+ if (!strcmp(param, "priority")) {
+ ret = kstrtouint(val, 10, &prio);
+ if (ret)
+ return ret;
+
+ if (prio == ZRAM_PRIMARY_COMP)
+ prio = ZRAM_SECONDARY_COMP;
+
+ prio_max = prio + 1;
+ continue;
+ }
}
if (threshold >= huge_class_size)
@@ -1758,6 +2215,12 @@ static ssize_t recompress_store(struct device *dev,
goto release_init_lock;
}
+ /* Do not permit concurrent post-processing actions. */
+ if (atomic_xchg(&zram->pp_in_progress, 1)) {
+ up_read(&zram->init_lock);
+ return -EAGAIN;
+ }
+
if (algo) {
bool found = false;
@@ -1766,7 +2229,7 @@ static ssize_t recompress_store(struct device *dev,
continue;
if (!strcmp(zram->comp_algs[prio], algo)) {
- prio_max = min(prio + 1, ZRAM_MAX_COMPS);
+ prio_max = prio + 1;
found = true;
break;
}
@@ -1778,39 +2241,44 @@ static ssize_t recompress_store(struct device *dev,
}
}
+ prio_max = min(prio_max, (u32)zram->num_active_comps);
+ if (prio >= prio_max) {
+ ret = -EINVAL;
+ goto release_init_lock;
+ }
+
page = alloc_page(GFP_KERNEL);
if (!page) {
ret = -ENOMEM;
goto release_init_lock;
}
- ret = len;
- for (index = 0; index < nr_pages; index++) {
- int err = 0;
-
- zram_slot_lock(zram, index);
+ ctl = init_pp_ctl();
+ if (!ctl) {
+ ret = -ENOMEM;
+ goto release_init_lock;
+ }
- if (!zram_allocated(zram, index))
- goto next;
+ scan_slots_for_recompress(zram, mode, prio_max, ctl);
- if (mode & RECOMPRESS_IDLE &&
- !zram_test_flag(zram, index, ZRAM_IDLE))
- goto next;
+ ret = len;
+ while ((pps = select_pp_slot(ctl))) {
+ int err = 0;
- if (mode & RECOMPRESS_HUGE &&
- !zram_test_flag(zram, index, ZRAM_HUGE))
- goto next;
+ if (!num_recomp_pages)
+ break;
- if (zram_test_flag(zram, index, ZRAM_WB) ||
- zram_test_flag(zram, index, ZRAM_UNDER_WB) ||
- zram_test_flag(zram, index, ZRAM_SAME) ||
- zram_test_flag(zram, index, ZRAM_INCOMPRESSIBLE))
+ zram_slot_lock(zram, pps->index);
+ if (!zram_test_flag(zram, pps->index, ZRAM_PP_SLOT))
goto next;
- err = zram_recompress(zram, index, page, threshold,
+ err = recompress_slot(zram, pps->index, page,
+ &num_recomp_pages, threshold,
prio, prio_max);
next:
- zram_slot_unlock(zram, index);
+ zram_slot_unlock(zram, pps->index);
+ release_pp_slot(zram, pps);
+
if (err) {
ret = err;
break;
@@ -1819,9 +2287,11 @@ next:
cond_resched();
}
- __free_page(page);
-
release_init_lock:
+ if (page)
+ __free_page(page);
+ release_pp_ctl(zram, ctl);
+ atomic_set(&zram->pp_in_progress, 0);
up_read(&zram->init_lock);
return ret;
}
@@ -1966,11 +2436,20 @@ static void zram_slot_free_notify(struct block_device *bdev,
zram_slot_unlock(zram, index);
}
+static void zram_comp_params_reset(struct zram *zram)
+{
+ u32 prio;
+
+ for (prio = ZRAM_PRIMARY_COMP; prio < ZRAM_MAX_COMPS; prio++) {
+ comp_params_reset(zram, prio);
+ }
+}
+
static void zram_destroy_comps(struct zram *zram)
{
u32 prio;
- for (prio = 0; prio < ZRAM_MAX_COMPS; prio++) {
+ for (prio = ZRAM_PRIMARY_COMP; prio < ZRAM_MAX_COMPS; prio++) {
struct zcomp *comp = zram->comps[prio];
zram->comps[prio] = NULL;
@@ -1979,6 +2458,15 @@ static void zram_destroy_comps(struct zram *zram)
zcomp_destroy(comp);
zram->num_active_comps--;
}
+
+ for (prio = ZRAM_PRIMARY_COMP; prio < ZRAM_MAX_COMPS; prio++) {
+ /* Do not free statically defined compression algorithms */
+ if (zram->comp_algs[prio] != default_compressor)
+ kfree(zram->comp_algs[prio]);
+ zram->comp_algs[prio] = NULL;
+ }
+
+ zram_comp_params_reset(zram);
}
static void zram_reset_device(struct zram *zram)
@@ -1987,11 +2475,6 @@ static void zram_reset_device(struct zram *zram)
zram->limit_pages = 0;
- if (!init_done(zram)) {
- up_write(&zram->init_lock);
- return;
- }
-
set_capacity_and_notify(zram->disk, 0);
part_stat_set_all(zram->disk->part0, 0);
@@ -2000,6 +2483,7 @@ static void zram_reset_device(struct zram *zram)
zram->disksize = 0;
zram_destroy_comps(zram);
memset(&zram->stats, 0, sizeof(zram->stats));
+ atomic_set(&zram->pp_in_progress, 0);
reset_bdev(zram);
comp_algorithm_set(zram, ZRAM_PRIMARY_COMP, default_compressor);
@@ -2032,11 +2516,12 @@ static ssize_t disksize_store(struct device *dev,
goto out_unlock;
}
- for (prio = 0; prio < ZRAM_MAX_COMPS; prio++) {
+ for (prio = ZRAM_PRIMARY_COMP; prio < ZRAM_MAX_COMPS; prio++) {
if (!zram->comp_algs[prio])
continue;
- comp = zcomp_create(zram->comp_algs[prio]);
+ comp = zcomp_create(zram->comp_algs[prio],
+ &zram->params[prio]);
if (IS_ERR(comp)) {
pr_err("Cannot initialise %s compressing backend\n",
zram->comp_algs[prio]);
@@ -2127,7 +2612,6 @@ static DEVICE_ATTR_WO(reset);
static DEVICE_ATTR_WO(mem_limit);
static DEVICE_ATTR_WO(mem_used_max);
static DEVICE_ATTR_WO(idle);
-static DEVICE_ATTR_RW(max_comp_streams);
static DEVICE_ATTR_RW(comp_algorithm);
#ifdef CONFIG_ZRAM_WRITEBACK
static DEVICE_ATTR_RW(backing_dev);
@@ -2139,6 +2623,7 @@ static DEVICE_ATTR_RW(writeback_limit_enable);
static DEVICE_ATTR_RW(recomp_algorithm);
static DEVICE_ATTR_WO(recompress);
#endif
+static DEVICE_ATTR_WO(algorithm_params);
static struct attribute *zram_disk_attrs[] = {
&dev_attr_disksize.attr,
@@ -2148,7 +2633,6 @@ static struct attribute *zram_disk_attrs[] = {
&dev_attr_mem_limit.attr,
&dev_attr_mem_used_max.attr,
&dev_attr_idle.attr,
- &dev_attr_max_comp_streams.attr,
&dev_attr_comp_algorithm.attr,
#ifdef CONFIG_ZRAM_WRITEBACK
&dev_attr_backing_dev.attr,
@@ -2166,6 +2650,7 @@ static struct attribute *zram_disk_attrs[] = {
&dev_attr_recomp_algorithm.attr,
&dev_attr_recompress.attr,
#endif
+ &dev_attr_algorithm_params.attr,
NULL,
};
@@ -2177,6 +2662,30 @@ ATTRIBUTE_GROUPS(zram_disk);
*/
static int zram_add(void)
{
+ struct queue_limits lim = {
+ .logical_block_size = ZRAM_LOGICAL_BLOCK_SIZE,
+ /*
+ * To ensure that we always get PAGE_SIZE aligned and
+ * n*PAGE_SIZED sized I/O requests.
+ */
+ .physical_block_size = PAGE_SIZE,
+ .io_min = PAGE_SIZE,
+ .io_opt = PAGE_SIZE,
+ .max_hw_discard_sectors = UINT_MAX,
+ /*
+ * zram_bio_discard() will clear all logical blocks if logical
+ * block size is identical with physical block size(PAGE_SIZE).
+ * But if it is different, we will skip discarding some parts of
+ * logical blocks in the part of the request range which isn't
+ * aligned to physical block size. So we can't ensure that all
+ * discarded logical blocks are zeroed.
+ */
+#if ZRAM_LOGICAL_BLOCK_SIZE == PAGE_SIZE
+ .max_write_zeroes_sectors = UINT_MAX,
+#endif
+ .features = BLK_FEAT_STABLE_WRITES |
+ BLK_FEAT_SYNCHRONOUS,
+ };
struct zram *zram;
int ret, device_id;
@@ -2195,11 +2704,11 @@ static int zram_add(void)
#endif
/* gendisk structure */
- zram->disk = blk_alloc_disk(NUMA_NO_NODE);
- if (!zram->disk) {
+ zram->disk = blk_alloc_disk(&lim, NUMA_NO_NODE);
+ if (IS_ERR(zram->disk)) {
pr_err("Error allocating disk structure for device %d\n",
device_id);
- ret = -ENOMEM;
+ ret = PTR_ERR(zram->disk);
goto out_free_idr;
}
@@ -2210,42 +2719,16 @@ static int zram_add(void)
zram->disk->fops = &zram_devops;
zram->disk->private_data = zram;
snprintf(zram->disk->disk_name, 16, "zram%d", device_id);
+ atomic_set(&zram->pp_in_progress, 0);
+ zram_comp_params_reset(zram);
+ comp_algorithm_set(zram, ZRAM_PRIMARY_COMP, default_compressor);
/* Actual capacity set using sysfs (/sys/block/zram<id>/disksize */
set_capacity(zram->disk, 0);
- /* zram devices sort of resembles non-rotational disks */
- blk_queue_flag_set(QUEUE_FLAG_NONROT, zram->disk->queue);
- blk_queue_flag_set(QUEUE_FLAG_SYNCHRONOUS, zram->disk->queue);
-
- /*
- * To ensure that we always get PAGE_SIZE aligned
- * and n*PAGE_SIZED sized I/O requests.
- */
- blk_queue_physical_block_size(zram->disk->queue, PAGE_SIZE);
- blk_queue_logical_block_size(zram->disk->queue,
- ZRAM_LOGICAL_BLOCK_SIZE);
- blk_queue_io_min(zram->disk->queue, PAGE_SIZE);
- blk_queue_io_opt(zram->disk->queue, PAGE_SIZE);
- blk_queue_max_discard_sectors(zram->disk->queue, UINT_MAX);
-
- /*
- * zram_bio_discard() will clear all logical blocks if logical block
- * size is identical with physical block size(PAGE_SIZE). But if it is
- * different, we will skip discarding some parts of logical blocks in
- * the part of the request range which isn't aligned to physical block
- * size. So we can't ensure that all discarded logical blocks are
- * zeroed.
- */
- if (ZRAM_LOGICAL_BLOCK_SIZE == PAGE_SIZE)
- blk_queue_max_write_zeroes_sectors(zram->disk->queue, UINT_MAX);
-
- blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, zram->disk->queue);
ret = device_add_disk(NULL, zram->disk, zram_disk_groups);
if (ret)
goto out_cleanup_disk;
- comp_algorithm_set(zram, ZRAM_PRIMARY_COMP, default_compressor);
-
zram_debugfs_register(zram);
pr_info("Added device: %s\n", zram->disk->disk_name);
return device_id;
@@ -2394,9 +2877,10 @@ static void destroy_devices(void)
static int __init zram_init(void)
{
+ struct zram_table_entry zram_te;
int ret;
- BUILD_BUG_ON(__NR_ZRAM_PAGEFLAGS > BITS_PER_LONG);
+ BUILD_BUG_ON(__NR_ZRAM_PAGEFLAGS > sizeof(zram_te.flags) * 8);
ret = cpuhp_setup_state_multi(CPUHP_ZCOMP_PREPARE, "block/zram:prepare",
zcomp_cpu_up_prepare, zcomp_cpu_dead);