summaryrefslogtreecommitdiff
path: root/drivers/block
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2018-12-28 16:55:46 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2018-12-28 16:55:46 -0800
commitf346b0becb1bc62e45495f9cdbae3eef35d0b635 (patch)
treeae79f3dfb8e031da51d38f0f095f89d7d23f3643 /drivers/block
parent00d59fde8532b2d42e80909d2e58678755e04da9 (diff)
parent0f4991e8fd48987ae476a92cdee6bfec4aff31b8 (diff)
Merge branch 'akpm' (patches from Andrew)
Merge misc updates from Andrew Morton: - large KASAN update to use arm's "software tag-based mode" - a few misc things - sh updates - ocfs2 updates - just about all of MM * emailed patches from Andrew Morton <akpm@linux-foundation.org>: (167 commits) kernel/fork.c: mark 'stack_vm_area' with __maybe_unused memcg, oom: notify on oom killer invocation from the charge path mm, swap: fix swapoff with KSM pages include/linux/gfp.h: fix typo mm/hmm: fix memremap.h, move dev_page_fault_t callback to hmm hugetlbfs: Use i_mmap_rwsem to fix page fault/truncate race hugetlbfs: use i_mmap_rwsem for more pmd sharing synchronization memory_hotplug: add missing newlines to debugging output mm: remove __hugepage_set_anon_rmap() include/linux/vmstat.h: remove unused page state adjustment macro mm/page_alloc.c: allow error injection mm: migrate: drop unused argument of migrate_page_move_mapping() blkdev: avoid migration stalls for blkdev pages mm: migrate: provide buffer_migrate_page_norefs() mm: migrate: move migrate_page_lock_buffers() mm: migrate: lock buffers before migrate_page_move_mapping() mm: migration: factor out code to compute expected number of page references mm, page_alloc: enable pcpu_drain with zone capability kmemleak: add config to select auto scan mm/page_alloc.c: don't call kasan_free_pages() at deferred mem init ...
Diffstat (limited to 'drivers/block')
-rw-r--r--drivers/block/zram/Kconfig5
-rw-r--r--drivers/block/zram/zram_drv.c502
-rw-r--r--drivers/block/zram/zram_drv.h19
3 files changed, 372 insertions, 154 deletions
diff --git a/drivers/block/zram/Kconfig b/drivers/block/zram/Kconfig
index fcd055457364..1ffc64770643 100644
--- a/drivers/block/zram/Kconfig
+++ b/drivers/block/zram/Kconfig
@@ -15,7 +15,7 @@ config ZRAM
See Documentation/blockdev/zram.txt for more information.
config ZRAM_WRITEBACK
- bool "Write back incompressible page to backing device"
+ bool "Write back incompressible or idle page to backing device"
depends on ZRAM
help
With incompressible page, there is no memory saving to keep it
@@ -23,6 +23,9 @@ config ZRAM_WRITEBACK
For this feature, admin should set up backing device via
/sys/block/zramX/backing_dev.
+ With /sys/block/zramX/{idle,writeback}, application could ask
+ idle page's writeback to the backing device to save in memory.
+
See Documentation/blockdev/zram.txt for more information.
config ZRAM_MEMORY_TRACKING
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index 4879595200e1..33c5cc879f24 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -52,15 +52,23 @@ static unsigned int num_devices = 1;
static size_t huge_class_size;
static void zram_free_page(struct zram *zram, size_t index);
+static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
+ u32 index, int offset, struct bio *bio);
+
+
+static int zram_slot_trylock(struct zram *zram, u32 index)
+{
+ return bit_spin_trylock(ZRAM_LOCK, &zram->table[index].flags);
+}
static void zram_slot_lock(struct zram *zram, u32 index)
{
- bit_spin_lock(ZRAM_LOCK, &zram->table[index].value);
+ bit_spin_lock(ZRAM_LOCK, &zram->table[index].flags);
}
static void zram_slot_unlock(struct zram *zram, u32 index)
{
- bit_spin_unlock(ZRAM_LOCK, &zram->table[index].value);
+ bit_spin_unlock(ZRAM_LOCK, &zram->table[index].flags);
}
static inline bool init_done(struct zram *zram)
@@ -68,13 +76,6 @@ static inline bool init_done(struct zram *zram)
return zram->disksize;
}
-static inline bool zram_allocated(struct zram *zram, u32 index)
-{
-
- return (zram->table[index].value >> (ZRAM_FLAG_SHIFT + 1)) ||
- zram->table[index].handle;
-}
-
static inline struct zram *dev_to_zram(struct device *dev)
{
return (struct zram *)dev_to_disk(dev)->private_data;
@@ -94,19 +95,19 @@ static void zram_set_handle(struct zram *zram, u32 index, unsigned long handle)
static bool zram_test_flag(struct zram *zram, u32 index,
enum zram_pageflags flag)
{
- return zram->table[index].value & BIT(flag);
+ return zram->table[index].flags & BIT(flag);
}
static void zram_set_flag(struct zram *zram, u32 index,
enum zram_pageflags flag)
{
- zram->table[index].value |= BIT(flag);
+ zram->table[index].flags |= BIT(flag);
}
static void zram_clear_flag(struct zram *zram, u32 index,
enum zram_pageflags flag)
{
- zram->table[index].value &= ~BIT(flag);
+ zram->table[index].flags &= ~BIT(flag);
}
static inline void zram_set_element(struct zram *zram, u32 index,
@@ -122,15 +123,22 @@ static unsigned long zram_get_element(struct zram *zram, u32 index)
static size_t zram_get_obj_size(struct zram *zram, u32 index)
{
- return zram->table[index].value & (BIT(ZRAM_FLAG_SHIFT) - 1);
+ return zram->table[index].flags & (BIT(ZRAM_FLAG_SHIFT) - 1);
}
static void zram_set_obj_size(struct zram *zram,
u32 index, size_t size)
{
- unsigned long flags = zram->table[index].value >> ZRAM_FLAG_SHIFT;
+ unsigned long flags = zram->table[index].flags >> ZRAM_FLAG_SHIFT;
- zram->table[index].value = (flags << ZRAM_FLAG_SHIFT) | size;
+ zram->table[index].flags = (flags << ZRAM_FLAG_SHIFT) | size;
+}
+
+static inline bool zram_allocated(struct zram *zram, u32 index)
+{
+ return zram_get_obj_size(zram, index) ||
+ zram_test_flag(zram, index, ZRAM_SAME) ||
+ zram_test_flag(zram, index, ZRAM_WB);
}
#if PAGE_SIZE != 4096
@@ -276,17 +284,90 @@ static ssize_t mem_used_max_store(struct device *dev,
return len;
}
+static ssize_t idle_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t len)
+{
+ struct zram *zram = dev_to_zram(dev);
+ unsigned long nr_pages = zram->disksize >> PAGE_SHIFT;
+ int index;
+ char mode_buf[8];
+ ssize_t sz;
+
+ sz = strscpy(mode_buf, buf, sizeof(mode_buf));
+ if (sz <= 0)
+ return -EINVAL;
+
+ /* ignore trailing new line */
+ if (mode_buf[sz - 1] == '\n')
+ mode_buf[sz - 1] = 0x00;
+
+ if (strcmp(mode_buf, "all"))
+ return -EINVAL;
+
+ down_read(&zram->init_lock);
+ if (!init_done(zram)) {
+ up_read(&zram->init_lock);
+ return -EINVAL;
+ }
+
+ for (index = 0; index < nr_pages; index++) {
+ /*
+ * Do not mark ZRAM_UNDER_WB slot as ZRAM_IDLE to close race.
+ * See the comment in writeback_store.
+ */
+ zram_slot_lock(zram, index);
+ if (!zram_allocated(zram, index) ||
+ zram_test_flag(zram, index, ZRAM_UNDER_WB))
+ goto next;
+ zram_set_flag(zram, index, ZRAM_IDLE);
+next:
+ zram_slot_unlock(zram, index);
+ }
+
+ up_read(&zram->init_lock);
+
+ return len;
+}
+
#ifdef CONFIG_ZRAM_WRITEBACK
-static bool zram_wb_enabled(struct zram *zram)
+static ssize_t writeback_limit_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t len)
+{
+ struct zram *zram = dev_to_zram(dev);
+ u64 val;
+ ssize_t ret = -EINVAL;
+
+ if (kstrtoull(buf, 10, &val))
+ return ret;
+
+ down_read(&zram->init_lock);
+ atomic64_set(&zram->stats.bd_wb_limit, val);
+ if (val == 0)
+ zram->stop_writeback = false;
+ up_read(&zram->init_lock);
+ ret = len;
+
+ return ret;
+}
+
+static ssize_t writeback_limit_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
- return zram->backing_dev;
+ u64 val;
+ struct zram *zram = dev_to_zram(dev);
+
+ down_read(&zram->init_lock);
+ val = atomic64_read(&zram->stats.bd_wb_limit);
+ up_read(&zram->init_lock);
+
+ return scnprintf(buf, PAGE_SIZE, "%llu\n", val);
}
static void reset_bdev(struct zram *zram)
{
struct block_device *bdev;
- if (!zram_wb_enabled(zram))
+ if (!zram->backing_dev)
return;
bdev = zram->bdev;
@@ -313,7 +394,7 @@ static ssize_t backing_dev_show(struct device *dev,
ssize_t ret;
down_read(&zram->init_lock);
- if (!zram_wb_enabled(zram)) {
+ if (!zram->backing_dev) {
memcpy(buf, "none\n", 5);
up_read(&zram->init_lock);
return 5;
@@ -382,8 +463,10 @@ static ssize_t backing_dev_store(struct device *dev,
bdev = bdgrab(I_BDEV(inode));
err = blkdev_get(bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL, zram);
- if (err < 0)
+ if (err < 0) {
+ bdev = NULL;
goto out;
+ }
nr_pages = i_size_read(inode) >> PAGE_SHIFT;
bitmap_sz = BITS_TO_LONGS(nr_pages) * sizeof(long);
@@ -399,7 +482,6 @@ static ssize_t backing_dev_store(struct device *dev,
goto out;
reset_bdev(zram);
- spin_lock_init(&zram->bitmap_lock);
zram->old_block_size = old_block_size;
zram->bdev = bdev;
@@ -441,32 +523,29 @@ out:
return err;
}
-static unsigned long get_entry_bdev(struct zram *zram)
+static unsigned long alloc_block_bdev(struct zram *zram)
{
- unsigned long entry;
-
- spin_lock(&zram->bitmap_lock);
+ unsigned long blk_idx = 1;
+retry:
/* skip 0 bit to confuse zram.handle = 0 */
- entry = find_next_zero_bit(zram->bitmap, zram->nr_pages, 1);
- if (entry == zram->nr_pages) {
- spin_unlock(&zram->bitmap_lock);
+ blk_idx = find_next_zero_bit(zram->bitmap, zram->nr_pages, blk_idx);
+ if (blk_idx == zram->nr_pages)
return 0;
- }
- set_bit(entry, zram->bitmap);
- spin_unlock(&zram->bitmap_lock);
+ if (test_and_set_bit(blk_idx, zram->bitmap))
+ goto retry;
- return entry;
+ atomic64_inc(&zram->stats.bd_count);
+ return blk_idx;
}
-static void put_entry_bdev(struct zram *zram, unsigned long entry)
+static void free_block_bdev(struct zram *zram, unsigned long blk_idx)
{
int was_set;
- spin_lock(&zram->bitmap_lock);
- was_set = test_and_clear_bit(entry, zram->bitmap);
- spin_unlock(&zram->bitmap_lock);
+ was_set = test_and_clear_bit(blk_idx, zram->bitmap);
WARN_ON_ONCE(!was_set);
+ atomic64_dec(&zram->stats.bd_count);
}
static void zram_page_end_io(struct bio *bio)
@@ -509,6 +588,169 @@ static int read_from_bdev_async(struct zram *zram, struct bio_vec *bvec,
return 1;
}
+#define HUGE_WRITEBACK 0x1
+#define IDLE_WRITEBACK 0x2
+
+static ssize_t writeback_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t len)
+{
+ struct zram *zram = dev_to_zram(dev);
+ unsigned long nr_pages = zram->disksize >> PAGE_SHIFT;
+ unsigned long index;
+ struct bio bio;
+ struct bio_vec bio_vec;
+ struct page *page;
+ ssize_t ret, sz;
+ char mode_buf[8];
+ unsigned long mode = -1UL;
+ unsigned long blk_idx = 0;
+
+ sz = strscpy(mode_buf, buf, sizeof(mode_buf));
+ if (sz <= 0)
+ return -EINVAL;
+
+ /* ignore trailing newline */
+ if (mode_buf[sz - 1] == '\n')
+ mode_buf[sz - 1] = 0x00;
+
+ if (!strcmp(mode_buf, "idle"))
+ mode = IDLE_WRITEBACK;
+ else if (!strcmp(mode_buf, "huge"))
+ mode = HUGE_WRITEBACK;
+
+ if (mode == -1UL)
+ return -EINVAL;
+
+ down_read(&zram->init_lock);
+ if (!init_done(zram)) {
+ ret = -EINVAL;
+ goto release_init_lock;
+ }
+
+ if (!zram->backing_dev) {
+ ret = -ENODEV;
+ goto release_init_lock;
+ }
+
+ page = alloc_page(GFP_KERNEL);
+ if (!page) {
+ ret = -ENOMEM;
+ goto release_init_lock;
+ }
+
+ for (index = 0; index < nr_pages; index++) {
+ struct bio_vec bvec;
+
+ bvec.bv_page = page;
+ bvec.bv_len = PAGE_SIZE;
+ bvec.bv_offset = 0;
+
+ if (zram->stop_writeback) {
+ ret = -EIO;
+ break;
+ }
+
+ if (!blk_idx) {
+ blk_idx = alloc_block_bdev(zram);
+ if (!blk_idx) {
+ ret = -ENOSPC;
+ break;
+ }
+ }
+
+ zram_slot_lock(zram, index);
+ if (!zram_allocated(zram, index))
+ goto next;
+
+ if (zram_test_flag(zram, index, ZRAM_WB) ||
+ zram_test_flag(zram, index, ZRAM_SAME) ||
+ zram_test_flag(zram, index, ZRAM_UNDER_WB))
+ goto next;
+
+ if ((mode & IDLE_WRITEBACK &&
+ !zram_test_flag(zram, index, ZRAM_IDLE)) &&
+ (mode & HUGE_WRITEBACK &&
+ !zram_test_flag(zram, index, ZRAM_HUGE)))
+ goto next;
+ /*
+ * Clearing ZRAM_UNDER_WB is duty of caller.
+ * IOW, zram_free_page never clear it.
+ */
+ zram_set_flag(zram, index, ZRAM_UNDER_WB);
+ /* Need for hugepage writeback racing */
+ zram_set_flag(zram, index, ZRAM_IDLE);
+ zram_slot_unlock(zram, index);
+ if (zram_bvec_read(zram, &bvec, index, 0, NULL)) {
+ zram_slot_lock(zram, index);
+ zram_clear_flag(zram, index, ZRAM_UNDER_WB);
+ zram_clear_flag(zram, index, ZRAM_IDLE);
+ zram_slot_unlock(zram, index);
+ continue;
+ }
+
+ bio_init(&bio, &bio_vec, 1);
+ bio_set_dev(&bio, zram->bdev);
+ bio.bi_iter.bi_sector = blk_idx * (PAGE_SIZE >> 9);
+ bio.bi_opf = REQ_OP_WRITE | REQ_SYNC;
+
+ bio_add_page(&bio, bvec.bv_page, bvec.bv_len,
+ bvec.bv_offset);
+ /*
+ * XXX: A single page IO would be inefficient for write
+ * but it would be not bad as starter.
+ */
+ ret = submit_bio_wait(&bio);
+ if (ret) {
+ zram_slot_lock(zram, index);
+ zram_clear_flag(zram, index, ZRAM_UNDER_WB);
+ zram_clear_flag(zram, index, ZRAM_IDLE);
+ zram_slot_unlock(zram, index);
+ continue;
+ }
+
+ atomic64_inc(&zram->stats.bd_writes);
+ /*
+ * We released zram_slot_lock so need to check if the slot was
+ * changed. If there is freeing for the slot, we can catch it
+ * easily by zram_allocated.
+ * A subtle case is the slot is freed/reallocated/marked as
+ * ZRAM_IDLE again. To close the race, idle_store doesn't
+ * mark ZRAM_IDLE once it found the slot was ZRAM_UNDER_WB.
+ * Thus, we could close the race by checking ZRAM_IDLE bit.
+ */
+ zram_slot_lock(zram, index);
+ if (!zram_allocated(zram, index) ||
+ !zram_test_flag(zram, index, ZRAM_IDLE)) {
+ zram_clear_flag(zram, index, ZRAM_UNDER_WB);
+ zram_clear_flag(zram, index, ZRAM_IDLE);
+ goto next;
+ }
+
+ zram_free_page(zram, index);
+ zram_clear_flag(zram, index, ZRAM_UNDER_WB);
+ zram_set_flag(zram, index, ZRAM_WB);
+ zram_set_element(zram, index, blk_idx);
+ blk_idx = 0;
+ atomic64_inc(&zram->stats.pages_stored);
+ if (atomic64_add_unless(&zram->stats.bd_wb_limit,
+ -1 << (PAGE_SHIFT - 12), 0)) {
+ if (atomic64_read(&zram->stats.bd_wb_limit) == 0)
+ zram->stop_writeback = true;
+ }
+next:
+ zram_slot_unlock(zram, index);
+ }
+
+ if (blk_idx)
+ free_block_bdev(zram, blk_idx);
+ ret = len;
+ __free_page(page);
+release_init_lock:
+ up_read(&zram->init_lock);
+
+ return ret;
+}
+
struct zram_work {
struct work_struct work;
struct zram *zram;
@@ -561,79 +803,21 @@ static int read_from_bdev_sync(struct zram *zram, struct bio_vec *bvec,
static int read_from_bdev(struct zram *zram, struct bio_vec *bvec,
unsigned long entry, struct bio *parent, bool sync)
{
+ atomic64_inc(&zram->stats.bd_reads);
if (sync)
return read_from_bdev_sync(zram, bvec, entry, parent);
else
return read_from_bdev_async(zram, bvec, entry, parent);
}
-
-static int write_to_bdev(struct zram *zram, struct bio_vec *bvec,
- u32 index, struct bio *parent,
- unsigned long *pentry)
-{
- struct bio *bio;
- unsigned long entry;
-
- bio = bio_alloc(GFP_ATOMIC, 1);
- if (!bio)
- return -ENOMEM;
-
- entry = get_entry_bdev(zram);
- if (!entry) {
- bio_put(bio);
- return -ENOSPC;
- }
-
- bio->bi_iter.bi_sector = entry * (PAGE_SIZE >> 9);
- bio_set_dev(bio, zram->bdev);
- if (!bio_add_page(bio, bvec->bv_page, bvec->bv_len,
- bvec->bv_offset)) {
- bio_put(bio);
- put_entry_bdev(zram, entry);
- return -EIO;
- }
-
- if (!parent) {
- bio->bi_opf = REQ_OP_WRITE | REQ_SYNC;
- bio->bi_end_io = zram_page_end_io;
- } else {
- bio->bi_opf = parent->bi_opf;
- bio_chain(bio, parent);
- }
-
- submit_bio(bio);
- *pentry = entry;
-
- return 0;
-}
-
-static void zram_wb_clear(struct zram *zram, u32 index)
-{
- unsigned long entry;
-
- zram_clear_flag(zram, index, ZRAM_WB);
- entry = zram_get_element(zram, index);
- zram_set_element(zram, index, 0);
- put_entry_bdev(zram, entry);
-}
-
#else
-static bool zram_wb_enabled(struct zram *zram) { return false; }
static inline void reset_bdev(struct zram *zram) {};
-static int write_to_bdev(struct zram *zram, struct bio_vec *bvec,
- u32 index, struct bio *parent,
- unsigned long *pentry)
-
-{
- return -EIO;
-}
-
static int read_from_bdev(struct zram *zram, struct bio_vec *bvec,
unsigned long entry, struct bio *parent, bool sync)
{
return -EIO;
}
-static void zram_wb_clear(struct zram *zram, u32 index) {}
+
+static void free_block_bdev(struct zram *zram, unsigned long blk_idx) {};
#endif
#ifdef CONFIG_ZRAM_MEMORY_TRACKING
@@ -652,14 +836,10 @@ static void zram_debugfs_destroy(void)
static void zram_accessed(struct zram *zram, u32 index)
{
+ zram_clear_flag(zram, index, ZRAM_IDLE);
zram->table[index].ac_time = ktime_get_boottime();
}
-static void zram_reset_access(struct zram *zram, u32 index)
-{
- zram->table[index].ac_time = 0;
-}
-
static ssize_t read_block_state(struct file *file, char __user *buf,
size_t count, loff_t *ppos)
{
@@ -689,12 +869,13 @@ static ssize_t read_block_state(struct file *file, char __user *buf,
ts = ktime_to_timespec64(zram->table[index].ac_time);
copied = snprintf(kbuf + written, count,
- "%12zd %12lld.%06lu %c%c%c\n",
+ "%12zd %12lld.%06lu %c%c%c%c\n",
index, (s64)ts.tv_sec,
ts.tv_nsec / NSEC_PER_USEC,
zram_test_flag(zram, index, ZRAM_SAME) ? 's' : '.',
zram_test_flag(zram, index, ZRAM_WB) ? 'w' : '.',
- zram_test_flag(zram, index, ZRAM_HUGE) ? 'h' : '.');
+ zram_test_flag(zram, index, ZRAM_HUGE) ? 'h' : '.',
+ zram_test_flag(zram, index, ZRAM_IDLE) ? 'i' : '.');
if (count < copied) {
zram_slot_unlock(zram, index);
@@ -739,8 +920,10 @@ static void zram_debugfs_unregister(struct zram *zram)
#else
static void zram_debugfs_create(void) {};
static void zram_debugfs_destroy(void) {};
-static void zram_accessed(struct zram *zram, u32 index) {};
-static void zram_reset_access(struct zram *zram, u32 index) {};
+static void zram_accessed(struct zram *zram, u32 index)
+{
+ zram_clear_flag(zram, index, ZRAM_IDLE);
+};
static void zram_debugfs_register(struct zram *zram) {};
static void zram_debugfs_unregister(struct zram *zram) {};
#endif
@@ -877,6 +1060,26 @@ static ssize_t mm_stat_show(struct device *dev,
return ret;
}
+#ifdef CONFIG_ZRAM_WRITEBACK
+#define FOUR_K(x) ((x) * (1 << (PAGE_SHIFT - 12)))
+static ssize_t bd_stat_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct zram *zram = dev_to_zram(dev);
+ ssize_t ret;
+
+ down_read(&zram->init_lock);
+ ret = scnprintf(buf, PAGE_SIZE,
+ "%8llu %8llu %8llu\n",
+ FOUR_K((u64)atomic64_read(&zram->stats.bd_count)),
+ FOUR_K((u64)atomic64_read(&zram->stats.bd_reads)),
+ FOUR_K((u64)atomic64_read(&zram->stats.bd_writes)));
+ up_read(&zram->init_lock);
+
+ return ret;
+}
+#endif
+
static ssize_t debug_stat_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -886,9 +1089,10 @@ static ssize_t debug_stat_show(struct device *dev,
down_read(&zram->init_lock);
ret = scnprintf(buf, PAGE_SIZE,
- "version: %d\n%8llu\n",
+ "version: %d\n%8llu %8llu\n",
version,
- (u64)atomic64_read(&zram->stats.writestall));
+ (u64)atomic64_read(&zram->stats.writestall),
+ (u64)atomic64_read(&zram->stats.miss_free));
up_read(&zram->init_lock);
return ret;
@@ -896,6 +1100,9 @@ static ssize_t debug_stat_show(struct device *dev,
static DEVICE_ATTR_RO(io_stat);
static DEVICE_ATTR_RO(mm_stat);
+#ifdef CONFIG_ZRAM_WRITEBACK
+static DEVICE_ATTR_RO(bd_stat);
+#endif
static DEVICE_ATTR_RO(debug_stat);
static void zram_meta_free(struct zram *zram, u64 disksize)
@@ -940,17 +1147,21 @@ static void zram_free_page(struct zram *zram, size_t index)
{
unsigned long handle;
- zram_reset_access(zram, index);
+#ifdef CONFIG_ZRAM_MEMORY_TRACKING
+ zram->table[index].ac_time = 0;
+#endif
+ if (zram_test_flag(zram, index, ZRAM_IDLE))
+ zram_clear_flag(zram, index, ZRAM_IDLE);
if (zram_test_flag(zram, index, ZRAM_HUGE)) {
zram_clear_flag(zram, index, ZRAM_HUGE);
atomic64_dec(&zram->stats.huge_pages);
}
- if (zram_wb_enabled(zram) && zram_test_flag(zram, index, ZRAM_WB)) {
- zram_wb_clear(zram, index);
- atomic64_dec(&zram->stats.pages_stored);
- return;
+ if (zram_test_flag(zram, index, ZRAM_WB)) {
+ zram_clear_flag(zram, index, ZRAM_WB);
+ free_block_bdev(zram, zram_get_element(zram, index));
+ goto out;
}
/*
@@ -959,10 +1170,8 @@ static void zram_free_page(struct zram *zram, size_t index)
*/
if (zram_test_flag(zram, index, ZRAM_SAME)) {
zram_clear_flag(zram, index, ZRAM_SAME);
- zram_set_element(zram, index, 0);
atomic64_dec(&zram->stats.same_pages);
- atomic64_dec(&zram->stats.pages_stored);
- return;
+ goto out;
}
handle = zram_get_handle(zram, index);
@@ -973,10 +1182,12 @@ static void zram_free_page(struct zram *zram, size_t index)
atomic64_sub(zram_get_obj_size(zram, index),
&zram->stats.compr_data_size);
+out:
atomic64_dec(&zram->stats.pages_stored);
-
zram_set_handle(zram, index, 0);
zram_set_obj_size(zram, index, 0);
+ WARN_ON_ONCE(zram->table[index].flags &
+ ~(1UL << ZRAM_LOCK | 1UL << ZRAM_UNDER_WB));
}
static int __zram_bvec_read(struct zram *zram, struct page *page, u32 index,
@@ -987,24 +1198,20 @@ static int __zram_bvec_read(struct zram *zram, struct page *page, u32 index,
unsigned int size;
void *src, *dst;
- if (zram_wb_enabled(zram)) {
- zram_slot_lock(zram, index);
- if (zram_test_flag(zram, index, ZRAM_WB)) {
- struct bio_vec bvec;
-
- zram_slot_unlock(zram, index);
+ zram_slot_lock(zram, index);
+ if (zram_test_flag(zram, index, ZRAM_WB)) {
+ struct bio_vec bvec;
- bvec.bv_page = page;
- bvec.bv_len = PAGE_SIZE;
- bvec.bv_offset = 0;
- return read_from_bdev(zram, &bvec,
- zram_get_element(zram, index),
- bio, partial_io);
- }
zram_slot_unlock(zram, index);
+
+ bvec.bv_page = page;
+ bvec.bv_len = PAGE_SIZE;
+ bvec.bv_offset = 0;
+ return read_from_bdev(zram, &bvec,
+ zram_get_element(zram, index),
+ bio, partial_io);
}
- zram_slot_lock(zram, index);
handle = zram_get_handle(zram, index);
if (!handle || zram_test_flag(zram, index, ZRAM_SAME)) {
unsigned long value;
@@ -1089,7 +1296,6 @@ static int __zram_bvec_write(struct zram *zram, struct bio_vec *bvec,
struct page *page = bvec->bv_page;
unsigned long element = 0;
enum zram_pageflags flags = 0;
- bool allow_wb = true;
mem = kmap_atomic(page);
if (page_same_filled(mem, &element)) {
@@ -1114,21 +1320,8 @@ compress_again:
return ret;
}
- if (unlikely(comp_len >= huge_class_size)) {
+ if (comp_len >= huge_class_size)
comp_len = PAGE_SIZE;
- if (zram_wb_enabled(zram) && allow_wb) {
- zcomp_stream_put(zram->comp);
- ret = write_to_bdev(zram, bvec, index, bio, &element);
- if (!ret) {
- flags = ZRAM_WB;
- ret = 1;
- goto out;
- }
- allow_wb = false;
- goto compress_again;
- }
- }
-
/*
* handle allocation has 2 paths:
* a) fast path is executed with preemption disabled (for
@@ -1400,10 +1593,14 @@ static void zram_slot_free_notify(struct block_device *bdev,
zram = bdev->bd_disk->private_data;
- zram_slot_lock(zram, index);
+ atomic64_inc(&zram->stats.notify_free);
+ if (!zram_slot_trylock(zram, index)) {
+ atomic64_inc(&zram->stats.miss_free);
+ return;
+ }
+
zram_free_page(zram, index);
zram_slot_unlock(zram, index);
- atomic64_inc(&zram->stats.notify_free);
}
static int zram_rw_page(struct block_device *bdev, sector_t sector,
@@ -1608,10 +1805,13 @@ static DEVICE_ATTR_RO(initstate);
static DEVICE_ATTR_WO(reset);
static DEVICE_ATTR_WO(mem_limit);
static DEVICE_ATTR_WO(mem_used_max);
+static DEVICE_ATTR_WO(idle);
static DEVICE_ATTR_RW(max_comp_streams);
static DEVICE_ATTR_RW(comp_algorithm);
#ifdef CONFIG_ZRAM_WRITEBACK
static DEVICE_ATTR_RW(backing_dev);
+static DEVICE_ATTR_WO(writeback);
+static DEVICE_ATTR_RW(writeback_limit);
#endif
static struct attribute *zram_disk_attrs[] = {
@@ -1621,13 +1821,19 @@ static struct attribute *zram_disk_attrs[] = {
&dev_attr_compact.attr,
&dev_attr_mem_limit.attr,
&dev_attr_mem_used_max.attr,
+ &dev_attr_idle.attr,
&dev_attr_max_comp_streams.attr,
&dev_attr_comp_algorithm.attr,
#ifdef CONFIG_ZRAM_WRITEBACK
&dev_attr_backing_dev.attr,
+ &dev_attr_writeback.attr,
+ &dev_attr_writeback_limit.attr,
#endif
&dev_attr_io_stat.attr,
&dev_attr_mm_stat.attr,
+#ifdef CONFIG_ZRAM_WRITEBACK
+ &dev_attr_bd_stat.attr,
+#endif
&dev_attr_debug_stat.attr,
NULL,
};
diff --git a/drivers/block/zram/zram_drv.h b/drivers/block/zram/zram_drv.h
index 72c8584b6dff..4bd3afd15e83 100644
--- a/drivers/block/zram/zram_drv.h
+++ b/drivers/block/zram/zram_drv.h
@@ -30,7 +30,7 @@
/*
- * The lower ZRAM_FLAG_SHIFT bits of table.value is for
+ * The lower ZRAM_FLAG_SHIFT bits of table.flags is for
* object size (excluding header), the higher bits is for
* zram_pageflags.
*
@@ -41,13 +41,15 @@
*/
#define ZRAM_FLAG_SHIFT 24
-/* Flags for zram pages (table[page_no].value) */
+/* Flags for zram pages (table[page_no].flags) */
enum zram_pageflags {
/* zram slot is locked */
ZRAM_LOCK = ZRAM_FLAG_SHIFT,
ZRAM_SAME, /* Page consists the same element */
ZRAM_WB, /* page is stored on backing_device */
+ ZRAM_UNDER_WB, /* page is under writeback */
ZRAM_HUGE, /* Incompressible page */
+ ZRAM_IDLE, /* not accessed page since last idle marking */
__NR_ZRAM_PAGEFLAGS,
};
@@ -60,7 +62,7 @@ struct zram_table_entry {
unsigned long handle;
unsigned long element;
};
- unsigned long value;
+ unsigned long flags;
#ifdef CONFIG_ZRAM_MEMORY_TRACKING
ktime_t ac_time;
#endif
@@ -79,6 +81,13 @@ struct zram_stats {
atomic64_t pages_stored; /* no. of pages currently stored */
atomic_long_t max_used_pages; /* no. of maximum pages stored */
atomic64_t writestall; /* no. of write slow paths */
+ atomic64_t miss_free; /* no. of missed free */
+#ifdef CONFIG_ZRAM_WRITEBACK
+ atomic64_t bd_count; /* no. of pages in backing device */
+ atomic64_t bd_reads; /* no. of reads from backing device */
+ atomic64_t bd_writes; /* no. of writes from backing device */
+ atomic64_t bd_wb_limit; /* writeback limit of backing device */
+#endif
};
struct zram {
@@ -104,13 +113,13 @@ struct zram {
* zram is claimed so open request will be failed
*/
bool claim; /* Protected by bdev->bd_mutex */
-#ifdef CONFIG_ZRAM_WRITEBACK
struct file *backing_dev;
+ bool stop_writeback;
+#ifdef CONFIG_ZRAM_WRITEBACK
struct block_device *bdev;
unsigned int old_block_size;
unsigned long *bitmap;
unsigned long nr_pages;
- spinlock_t bitmap_lock;
#endif
#ifdef CONFIG_ZRAM_MEMORY_TRACKING
struct dentry *debugfs_dir;