diff options
Diffstat (limited to 'fs/f2fs/compress.c')
| -rw-r--r-- | fs/f2fs/compress.c | 1057 |
1 files changed, 589 insertions, 468 deletions
diff --git a/fs/f2fs/compress.c b/fs/f2fs/compress.c index d0c3aeba5945..7b68bf22989d 100644 --- a/fs/f2fs/compress.c +++ b/fs/f2fs/compress.c @@ -23,20 +23,18 @@ static struct kmem_cache *cic_entry_slab; static struct kmem_cache *dic_entry_slab; -static void *page_array_alloc(struct inode *inode, int nr) +static void *page_array_alloc(struct f2fs_sb_info *sbi, int nr) { - struct f2fs_sb_info *sbi = F2FS_I_SB(inode); unsigned int size = sizeof(struct page *) * nr; if (likely(size <= sbi->page_array_slab_size)) return f2fs_kmem_cache_alloc(sbi->page_array_slab, - GFP_F2FS_ZERO, false, F2FS_I_SB(inode)); + GFP_F2FS_ZERO, false, sbi); return f2fs_kzalloc(sbi, size, GFP_NOFS); } -static void page_array_free(struct inode *inode, void *pages, int nr) +static void page_array_free(struct f2fs_sb_info *sbi, void *pages, int nr) { - struct f2fs_sb_info *sbi = F2FS_I_SB(inode); unsigned int size = sizeof(struct page *) * nr; if (!pages) @@ -55,6 +53,7 @@ struct f2fs_compress_ops { int (*init_decompress_ctx)(struct decompress_io_ctx *dic); void (*destroy_decompress_ctx)(struct decompress_io_ctx *dic); int (*decompress_pages)(struct decompress_io_ctx *dic); + bool (*is_level_valid)(int level); }; static unsigned int offset_in_cluster(struct compress_ctx *cc, pgoff_t index) @@ -72,28 +71,28 @@ static pgoff_t start_idx_of_cluster(struct compress_ctx *cc) return cc->cluster_idx << cc->log_cluster_size; } -bool f2fs_is_compressed_page(struct page *page) +bool f2fs_is_compressed_page(struct folio *folio) { - if (!PagePrivate(page)) - return false; - if (!page_private(page)) + if (!folio->private) return false; - if (page_private_nonpointer(page)) + if (folio_test_f2fs_nonpointer(folio)) return false; - f2fs_bug_on(F2FS_M_SB(page->mapping), - *((u32 *)page_private(page)) != F2FS_COMPRESSED_PAGE_MAGIC); + f2fs_bug_on(F2FS_F_SB(folio), + *((u32 *)folio->private) != F2FS_COMPRESSED_PAGE_MAGIC); return true; } static void f2fs_set_compressed_page(struct page *page, struct inode *inode, pgoff_t index, void *data) { - attach_page_private(page, (void *)data); + struct folio *folio = page_folio(page); + + folio_attach_private(folio, (void *)data); /* i_crypto_info and iv index */ - page->index = index; - page->mapping = inode->i_mapping; + folio->index = index; + folio->mapping = inode->i_mapping; } static void f2fs_drop_rpages(struct compress_ctx *cc, int len, bool unlock) @@ -121,7 +120,7 @@ static void f2fs_unlock_rpages(struct compress_ctx *cc, int len) } static void f2fs_put_rpages_wbc(struct compress_ctx *cc, - struct writeback_control *wbc, bool redirty, int unlock) + struct writeback_control *wbc, bool redirty, bool unlock) { unsigned int i; @@ -134,9 +133,11 @@ static void f2fs_put_rpages_wbc(struct compress_ctx *cc, } } -struct page *f2fs_compress_control_page(struct page *page) +struct folio *f2fs_compress_control_folio(struct folio *folio) { - return ((struct compress_io_ctx *)page_private(page))->rpages[0]; + struct compress_io_ctx *ctx = folio->private; + + return page_folio(ctx->rpages[0]); } int f2fs_init_compress_ctx(struct compress_ctx *cc) @@ -144,13 +145,13 @@ int f2fs_init_compress_ctx(struct compress_ctx *cc) if (cc->rpages) return 0; - cc->rpages = page_array_alloc(cc->inode, cc->cluster_size); + cc->rpages = page_array_alloc(F2FS_I_SB(cc->inode), cc->cluster_size); return cc->rpages ? 0 : -ENOMEM; } void f2fs_destroy_compress_ctx(struct compress_ctx *cc, bool reuse) { - page_array_free(cc->inode, cc->rpages, cc->cluster_size); + page_array_free(F2FS_I_SB(cc->inode), cc->rpages, cc->cluster_size); cc->rpages = NULL; cc->nr_rpages = 0; cc->nr_cpages = 0; @@ -159,24 +160,24 @@ void f2fs_destroy_compress_ctx(struct compress_ctx *cc, bool reuse) cc->cluster_idx = NULL_CLUSTER; } -void f2fs_compress_ctx_add_page(struct compress_ctx *cc, struct page *page) +void f2fs_compress_ctx_add_page(struct compress_ctx *cc, struct folio *folio) { unsigned int cluster_ofs; - if (!f2fs_cluster_can_merge_page(cc, page->index)) + if (!f2fs_cluster_can_merge_page(cc, folio->index)) f2fs_bug_on(F2FS_I_SB(cc->inode), 1); - cluster_ofs = offset_in_cluster(cc, page->index); - cc->rpages[cluster_ofs] = page; + cluster_ofs = offset_in_cluster(cc, folio->index); + cc->rpages[cluster_ofs] = folio_page(folio, 0); cc->nr_rpages++; - cc->cluster_idx = cluster_idx(cc, page->index); + cc->cluster_idx = cluster_idx(cc, folio->index); } #ifdef CONFIG_F2FS_FS_LZO static int lzo_init_compress_ctx(struct compress_ctx *cc) { - cc->private = f2fs_kvmalloc(F2FS_I_SB(cc->inode), - LZO1X_MEM_COMPRESS, GFP_NOFS); + cc->private = f2fs_vmalloc(F2FS_I_SB(cc->inode), + LZO1X_MEM_COMPRESS); if (!cc->private) return -ENOMEM; @@ -186,7 +187,7 @@ static int lzo_init_compress_ctx(struct compress_ctx *cc) static void lzo_destroy_compress_ctx(struct compress_ctx *cc) { - kvfree(cc->private); + vfree(cc->private); cc->private = NULL; } @@ -197,8 +198,8 @@ static int lzo_compress_pages(struct compress_ctx *cc) ret = lzo1x_1_compress(cc->rbuf, cc->rlen, cc->cbuf->cdata, &cc->clen, cc->private); if (ret != LZO_E_OK) { - printk_ratelimited("%sF2FS-fs (%s): lzo compress failed, ret:%d\n", - KERN_ERR, F2FS_I_SB(cc->inode)->sb->s_id, ret); + f2fs_err_ratelimited(F2FS_I_SB(cc->inode), + "lzo compress failed, ret:%d", ret); return -EIO; } return 0; @@ -211,17 +212,15 @@ static int lzo_decompress_pages(struct decompress_io_ctx *dic) ret = lzo1x_decompress_safe(dic->cbuf->cdata, dic->clen, dic->rbuf, &dic->rlen); if (ret != LZO_E_OK) { - printk_ratelimited("%sF2FS-fs (%s): lzo decompress failed, ret:%d\n", - KERN_ERR, F2FS_I_SB(dic->inode)->sb->s_id, ret); + f2fs_err_ratelimited(dic->sbi, + "lzo decompress failed, ret:%d", ret); return -EIO; } if (dic->rlen != PAGE_SIZE << dic->log_cluster_size) { - printk_ratelimited("%sF2FS-fs (%s): lzo invalid rlen:%zu, " - "expected:%lu\n", KERN_ERR, - F2FS_I_SB(dic->inode)->sb->s_id, - dic->rlen, - PAGE_SIZE << dic->log_cluster_size); + f2fs_err_ratelimited(dic->sbi, + "lzo invalid rlen:%zu, expected:%lu", + dic->rlen, PAGE_SIZE << dic->log_cluster_size); return -EIO; } return 0; @@ -241,11 +240,11 @@ static int lz4_init_compress_ctx(struct compress_ctx *cc) unsigned int size = LZ4_MEM_COMPRESS; #ifdef CONFIG_F2FS_FS_LZ4HC - if (F2FS_I(cc->inode)->i_compress_flag >> COMPRESS_LEVEL_OFFSET) + if (F2FS_I(cc->inode)->i_compress_level) size = LZ4HC_MEM_COMPRESS; #endif - cc->private = f2fs_kvmalloc(F2FS_I_SB(cc->inode), size, GFP_NOFS); + cc->private = f2fs_vmalloc(F2FS_I_SB(cc->inode), size); if (!cc->private) return -ENOMEM; @@ -260,40 +259,25 @@ static int lz4_init_compress_ctx(struct compress_ctx *cc) static void lz4_destroy_compress_ctx(struct compress_ctx *cc) { - kvfree(cc->private); + vfree(cc->private); cc->private = NULL; } -#ifdef CONFIG_F2FS_FS_LZ4HC -static int lz4hc_compress_pages(struct compress_ctx *cc) +static int lz4_compress_pages(struct compress_ctx *cc) { - unsigned char level = F2FS_I(cc->inode)->i_compress_flag >> - COMPRESS_LEVEL_OFFSET; - int len; + int len = -EINVAL; + unsigned char level = F2FS_I(cc->inode)->i_compress_level; - if (level) - len = LZ4_compress_HC(cc->rbuf, cc->cbuf->cdata, cc->rlen, - cc->clen, level, cc->private); - else + if (!level) len = LZ4_compress_default(cc->rbuf, cc->cbuf->cdata, cc->rlen, cc->clen, cc->private); - if (!len) - return -EAGAIN; - - cc->clen = len; - return 0; -} -#endif - -static int lz4_compress_pages(struct compress_ctx *cc) -{ - int len; - #ifdef CONFIG_F2FS_FS_LZ4HC - return lz4hc_compress_pages(cc); + else + len = LZ4_compress_HC(cc->rbuf, cc->cbuf->cdata, cc->rlen, + cc->clen, level, cc->private); #endif - len = LZ4_compress_default(cc->rbuf, cc->cbuf->cdata, cc->rlen, - cc->clen, cc->private); + if (len < 0) + return len; if (!len) return -EAGAIN; @@ -308,59 +292,63 @@ static int lz4_decompress_pages(struct decompress_io_ctx *dic) ret = LZ4_decompress_safe(dic->cbuf->cdata, dic->rbuf, dic->clen, dic->rlen); if (ret < 0) { - printk_ratelimited("%sF2FS-fs (%s): lz4 decompress failed, ret:%d\n", - KERN_ERR, F2FS_I_SB(dic->inode)->sb->s_id, ret); + f2fs_err_ratelimited(dic->sbi, + "lz4 decompress failed, ret:%d", ret); return -EIO; } if (ret != PAGE_SIZE << dic->log_cluster_size) { - printk_ratelimited("%sF2FS-fs (%s): lz4 invalid rlen:%zu, " - "expected:%lu\n", KERN_ERR, - F2FS_I_SB(dic->inode)->sb->s_id, - dic->rlen, - PAGE_SIZE << dic->log_cluster_size); + f2fs_err_ratelimited(dic->sbi, + "lz4 invalid ret:%d, expected:%lu", + ret, PAGE_SIZE << dic->log_cluster_size); return -EIO; } return 0; } +static bool lz4_is_level_valid(int lvl) +{ +#ifdef CONFIG_F2FS_FS_LZ4HC + return !lvl || (lvl >= LZ4HC_MIN_CLEVEL && lvl <= LZ4HC_MAX_CLEVEL); +#else + return lvl == 0; +#endif +} + static const struct f2fs_compress_ops f2fs_lz4_ops = { .init_compress_ctx = lz4_init_compress_ctx, .destroy_compress_ctx = lz4_destroy_compress_ctx, .compress_pages = lz4_compress_pages, .decompress_pages = lz4_decompress_pages, + .is_level_valid = lz4_is_level_valid, }; #endif #ifdef CONFIG_F2FS_FS_ZSTD -#define F2FS_ZSTD_DEFAULT_CLEVEL 1 - static int zstd_init_compress_ctx(struct compress_ctx *cc) { zstd_parameters params; zstd_cstream *stream; void *workspace; unsigned int workspace_size; - unsigned char level = F2FS_I(cc->inode)->i_compress_flag >> - COMPRESS_LEVEL_OFFSET; + unsigned char level = F2FS_I(cc->inode)->i_compress_level; + /* Need to remain this for backward compatibility */ if (!level) level = F2FS_ZSTD_DEFAULT_CLEVEL; - params = zstd_get_params(F2FS_ZSTD_DEFAULT_CLEVEL, cc->rlen); + params = zstd_get_params(level, cc->rlen); workspace_size = zstd_cstream_workspace_bound(¶ms.cParams); - workspace = f2fs_kvmalloc(F2FS_I_SB(cc->inode), - workspace_size, GFP_NOFS); + workspace = f2fs_vmalloc(F2FS_I_SB(cc->inode), workspace_size); if (!workspace) return -ENOMEM; stream = zstd_init_cstream(¶ms, 0, workspace, workspace_size); if (!stream) { - printk_ratelimited("%sF2FS-fs (%s): %s zstd_init_cstream failed\n", - KERN_ERR, F2FS_I_SB(cc->inode)->sb->s_id, - __func__); - kvfree(workspace); + f2fs_err_ratelimited(F2FS_I_SB(cc->inode), + "%s zstd_init_cstream failed", __func__); + vfree(workspace); return -EIO; } @@ -373,7 +361,7 @@ static int zstd_init_compress_ctx(struct compress_ctx *cc) static void zstd_destroy_compress_ctx(struct compress_ctx *cc) { - kvfree(cc->private); + vfree(cc->private); cc->private = NULL; cc->private2 = NULL; } @@ -397,16 +385,16 @@ static int zstd_compress_pages(struct compress_ctx *cc) ret = zstd_compress_stream(stream, &outbuf, &inbuf); if (zstd_is_error(ret)) { - printk_ratelimited("%sF2FS-fs (%s): %s zstd_compress_stream failed, ret: %d\n", - KERN_ERR, F2FS_I_SB(cc->inode)->sb->s_id, + f2fs_err_ratelimited(F2FS_I_SB(cc->inode), + "%s zstd_compress_stream failed, ret: %d", __func__, zstd_get_error_code(ret)); return -EIO; } ret = zstd_end_stream(stream, &outbuf); if (zstd_is_error(ret)) { - printk_ratelimited("%sF2FS-fs (%s): %s zstd_end_stream returned %d\n", - KERN_ERR, F2FS_I_SB(cc->inode)->sb->s_id, + f2fs_err_ratelimited(F2FS_I_SB(cc->inode), + "%s zstd_end_stream returned %d", __func__, zstd_get_error_code(ret)); return -EIO; } @@ -432,17 +420,15 @@ static int zstd_init_decompress_ctx(struct decompress_io_ctx *dic) workspace_size = zstd_dstream_workspace_bound(max_window_size); - workspace = f2fs_kvmalloc(F2FS_I_SB(dic->inode), - workspace_size, GFP_NOFS); + workspace = f2fs_vmalloc(dic->sbi, workspace_size); if (!workspace) return -ENOMEM; stream = zstd_init_dstream(max_window_size, workspace, workspace_size); if (!stream) { - printk_ratelimited("%sF2FS-fs (%s): %s zstd_init_dstream failed\n", - KERN_ERR, F2FS_I_SB(dic->inode)->sb->s_id, - __func__); - kvfree(workspace); + f2fs_err_ratelimited(dic->sbi, + "%s zstd_init_dstream failed", __func__); + vfree(workspace); return -EIO; } @@ -454,7 +440,7 @@ static int zstd_init_decompress_ctx(struct decompress_io_ctx *dic) static void zstd_destroy_decompress_ctx(struct decompress_io_ctx *dic) { - kvfree(dic->private); + vfree(dic->private); dic->private = NULL; dic->private2 = NULL; } @@ -476,16 +462,15 @@ static int zstd_decompress_pages(struct decompress_io_ctx *dic) ret = zstd_decompress_stream(stream, &outbuf, &inbuf); if (zstd_is_error(ret)) { - printk_ratelimited("%sF2FS-fs (%s): %s zstd_decompress_stream failed, ret: %d\n", - KERN_ERR, F2FS_I_SB(dic->inode)->sb->s_id, + f2fs_err_ratelimited(dic->sbi, + "%s zstd_decompress_stream failed, ret: %d", __func__, zstd_get_error_code(ret)); return -EIO; } if (dic->rlen != outbuf.pos) { - printk_ratelimited("%sF2FS-fs (%s): %s ZSTD invalid rlen:%zu, " - "expected:%lu\n", KERN_ERR, - F2FS_I_SB(dic->inode)->sb->s_id, + f2fs_err_ratelimited(dic->sbi, + "%s ZSTD invalid rlen:%zu, expected:%lu", __func__, dic->rlen, PAGE_SIZE << dic->log_cluster_size); return -EIO; @@ -494,6 +479,11 @@ static int zstd_decompress_pages(struct decompress_io_ctx *dic) return 0; } +static bool zstd_is_level_valid(int lvl) +{ + return lvl >= zstd_min_clevel() && lvl <= zstd_max_clevel(); +} + static const struct f2fs_compress_ops f2fs_zstd_ops = { .init_compress_ctx = zstd_init_compress_ctx, .destroy_compress_ctx = zstd_destroy_compress_ctx, @@ -501,6 +491,7 @@ static const struct f2fs_compress_ops f2fs_zstd_ops = { .init_decompress_ctx = zstd_init_decompress_ctx, .destroy_decompress_ctx = zstd_destroy_decompress_ctx, .decompress_pages = zstd_decompress_pages, + .is_level_valid = zstd_is_level_valid, }; #endif @@ -513,8 +504,8 @@ static int lzorle_compress_pages(struct compress_ctx *cc) ret = lzorle1x_1_compress(cc->rbuf, cc->rlen, cc->cbuf->cdata, &cc->clen, cc->private); if (ret != LZO_E_OK) { - printk_ratelimited("%sF2FS-fs (%s): lzo-rle compress failed, ret:%d\n", - KERN_ERR, F2FS_I_SB(cc->inode)->sb->s_id, ret); + f2fs_err_ratelimited(F2FS_I_SB(cc->inode), + "lzo-rle compress failed, ret:%d", ret); return -EIO; } return 0; @@ -559,19 +550,26 @@ bool f2fs_is_compress_backend_ready(struct inode *inode) return f2fs_cops[F2FS_I(inode)->i_compress_algorithm]; } +bool f2fs_is_compress_level_valid(int alg, int lvl) +{ + const struct f2fs_compress_ops *cops = f2fs_cops[alg]; + + if (cops->is_level_valid) + return cops->is_level_valid(lvl); + + return lvl == 0; +} + static mempool_t *compress_page_pool; static int num_compress_pages = 512; module_param(num_compress_pages, uint, 0444); MODULE_PARM_DESC(num_compress_pages, "Number of intermediate compress pages to preallocate"); -int f2fs_init_compress_mempool(void) +int __init f2fs_init_compress_mempool(void) { compress_page_pool = mempool_create_page_pool(num_compress_pages, 0); - if (!compress_page_pool) - return -ENOMEM; - - return 0; + return compress_page_pool ? 0 : -ENOMEM; } void f2fs_destroy_compress_mempool(void) @@ -591,11 +589,14 @@ static struct page *f2fs_compress_alloc_page(void) static void f2fs_compress_free_page(struct page *page) { + struct folio *folio; + if (!page) return; - detach_page_private(page); - page->mapping = NULL; - unlock_page(page); + folio = page_folio(page); + folio_detach_private(folio); + folio->mapping = NULL; + folio_unlock(folio); mempool_free(page, compress_page_pool); } @@ -617,6 +618,7 @@ static void *f2fs_vmap(struct page **pages, unsigned int count) static int f2fs_compress_pages(struct compress_ctx *cc) { + struct f2fs_sb_info *sbi = F2FS_I_SB(cc->inode); struct f2fs_inode_info *fi = F2FS_I(cc->inode); const struct f2fs_compress_ops *cops = f2fs_cops[fi->i_compress_algorithm]; @@ -637,19 +639,14 @@ static int f2fs_compress_pages(struct compress_ctx *cc) cc->nr_cpages = DIV_ROUND_UP(max_len, PAGE_SIZE); cc->valid_nr_cpages = cc->nr_cpages; - cc->cpages = page_array_alloc(cc->inode, cc->nr_cpages); + cc->cpages = page_array_alloc(sbi, cc->nr_cpages); if (!cc->cpages) { ret = -ENOMEM; goto destroy_compress_ctx; } - for (i = 0; i < cc->nr_cpages; i++) { + for (i = 0; i < cc->nr_cpages; i++) cc->cpages[i] = f2fs_compress_alloc_page(); - if (!cc->cpages[i]) { - ret = -ENOMEM; - goto out_free_cpages; - } - } cc->rbuf = f2fs_vmap(cc->rpages, cc->cluster_size); if (!cc->rbuf) { @@ -676,9 +673,8 @@ static int f2fs_compress_pages(struct compress_ctx *cc) cc->cbuf->clen = cpu_to_le32(cc->clen); - if (fi->i_compress_flag & 1 << COMPRESS_CHKSUM) - chksum = f2fs_crc32(F2FS_I_SB(cc->inode), - cc->cbuf->cdata, cc->clen); + if (fi->i_compress_flag & BIT(COMPRESS_CHKSUM)) + chksum = f2fs_crc32(cc->cbuf->cdata, cc->clen); cc->cbuf->chksum = cpu_to_le32(chksum); for (i = 0; i < COMPRESS_DATA_RESERVED_SIZE; i++) @@ -694,9 +690,7 @@ static int f2fs_compress_pages(struct compress_ctx *cc) vm_unmap_ram(cc->cbuf, cc->nr_cpages); vm_unmap_ram(cc->rbuf, cc->cluster_size); - for (i = 0; i < cc->nr_cpages; i++) { - if (i < new_nr_cpages) - continue; + for (i = new_nr_cpages; i < cc->nr_cpages; i++) { f2fs_compress_free_page(cc->cpages[i]); cc->cpages[i] = NULL; } @@ -719,7 +713,7 @@ out_free_cpages: if (cc->cpages[i]) f2fs_compress_free_page(cc->cpages[i]); } - page_array_free(cc->inode, cc->cpages, cc->nr_cpages); + page_array_free(sbi, cc->cpages, cc->nr_cpages); cc->cpages = NULL; destroy_compress_ctx: if (cops->destroy_compress_ctx) @@ -730,14 +724,19 @@ out: return ret; } -void f2fs_decompress_cluster(struct decompress_io_ctx *dic) +static int f2fs_prepare_decomp_mem(struct decompress_io_ctx *dic, + bool pre_alloc); +static void f2fs_release_decomp_mem(struct decompress_io_ctx *dic, + bool bypass_destroy_callback, bool pre_alloc); + +void f2fs_decompress_cluster(struct decompress_io_ctx *dic, bool in_task) { - struct f2fs_sb_info *sbi = F2FS_I_SB(dic->inode); + struct f2fs_sb_info *sbi = dic->sbi; struct f2fs_inode_info *fi = F2FS_I(dic->inode); const struct f2fs_compress_ops *cops = f2fs_cops[fi->i_compress_algorithm]; + bool bypass_callback = false; int ret; - int i; trace_f2fs_decompress_pages_start(dic->inode, dic->cluster_idx, dic->cluster_size, fi->i_compress_algorithm); @@ -747,41 +746,10 @@ void f2fs_decompress_cluster(struct decompress_io_ctx *dic) goto out_end_io; } - dic->tpages = page_array_alloc(dic->inode, dic->cluster_size); - if (!dic->tpages) { - ret = -ENOMEM; - goto out_end_io; - } - - for (i = 0; i < dic->cluster_size; i++) { - if (dic->rpages[i]) { - dic->tpages[i] = dic->rpages[i]; - continue; - } - - dic->tpages[i] = f2fs_compress_alloc_page(); - if (!dic->tpages[i]) { - ret = -ENOMEM; - goto out_end_io; - } - } - - if (cops->init_decompress_ctx) { - ret = cops->init_decompress_ctx(dic); - if (ret) - goto out_end_io; - } - - dic->rbuf = f2fs_vmap(dic->tpages, dic->cluster_size); - if (!dic->rbuf) { - ret = -ENOMEM; - goto out_destroy_decompress_ctx; - } - - dic->cbuf = f2fs_vmap(dic->cpages, dic->nr_cpages); - if (!dic->cbuf) { - ret = -ENOMEM; - goto out_vunmap_rbuf; + ret = f2fs_prepare_decomp_mem(dic, false); + if (ret) { + bypass_callback = true; + goto out_release; } dic->clen = le32_to_cpu(dic->cbuf->clen); @@ -789,63 +757,64 @@ void f2fs_decompress_cluster(struct decompress_io_ctx *dic) if (dic->clen > PAGE_SIZE * dic->nr_cpages - COMPRESS_HEADER_SIZE) { ret = -EFSCORRUPTED; - goto out_vunmap_cbuf; + + /* Avoid f2fs_commit_super in irq context */ + f2fs_handle_error(sbi, ERROR_FAIL_DECOMPRESSION); + goto out_release; } ret = cops->decompress_pages(dic); - if (!ret && (fi->i_compress_flag & 1 << COMPRESS_CHKSUM)) { + if (!ret && (fi->i_compress_flag & BIT(COMPRESS_CHKSUM))) { u32 provided = le32_to_cpu(dic->cbuf->chksum); - u32 calculated = f2fs_crc32(sbi, dic->cbuf->cdata, dic->clen); + u32 calculated = f2fs_crc32(dic->cbuf->cdata, dic->clen); if (provided != calculated) { if (!is_inode_flag_set(dic->inode, FI_COMPRESS_CORRUPT)) { set_inode_flag(dic->inode, FI_COMPRESS_CORRUPT); - printk_ratelimited( - "%sF2FS-fs (%s): checksum invalid, nid = %lu, %x vs %x", - KERN_INFO, sbi->sb->s_id, dic->inode->i_ino, + f2fs_info_ratelimited(sbi, + "checksum invalid, nid = %lu, %x vs %x", + dic->inode->i_ino, provided, calculated); } set_sbi_flag(sbi, SBI_NEED_FSCK); } } -out_vunmap_cbuf: - vm_unmap_ram(dic->cbuf, dic->nr_cpages); -out_vunmap_rbuf: - vm_unmap_ram(dic->rbuf, dic->cluster_size); -out_destroy_decompress_ctx: - if (cops->destroy_decompress_ctx) - cops->destroy_decompress_ctx(dic); +out_release: + f2fs_release_decomp_mem(dic, bypass_callback, false); + out_end_io: trace_f2fs_decompress_pages_end(dic->inode, dic->cluster_idx, dic->clen, ret); - f2fs_decompress_end_io(dic, ret); + f2fs_decompress_end_io(dic, ret, in_task); } +static void f2fs_cache_compressed_page(struct f2fs_sb_info *sbi, + struct folio *folio, nid_t ino, block_t blkaddr); + /* * This is called when a page of a compressed cluster has been read from disk * (or failed to be read from disk). It checks whether this page was the last * page being waited on in the cluster, and if so, it decompresses the cluster * (or in the case of a failure, cleans up without actually decompressing). */ -void f2fs_end_read_compressed_page(struct page *page, bool failed, - block_t blkaddr) +void f2fs_end_read_compressed_page(struct folio *folio, bool failed, + block_t blkaddr, bool in_task) { - struct decompress_io_ctx *dic = - (struct decompress_io_ctx *)page_private(page); - struct f2fs_sb_info *sbi = F2FS_I_SB(dic->inode); + struct decompress_io_ctx *dic = folio->private; + struct f2fs_sb_info *sbi = dic->sbi; dec_page_count(sbi, F2FS_RD_DATA); if (failed) WRITE_ONCE(dic->failed, true); - else if (blkaddr) - f2fs_cache_compressed_page(sbi, page, + else if (blkaddr && in_task) + f2fs_cache_compressed_page(sbi, folio, dic->inode->i_ino, blkaddr); if (atomic_dec_and_test(&dic->remaining_pages)) - f2fs_decompress_cluster(dic); + f2fs_decompress_cluster(dic, in_task); } static bool is_page_in_cluster(struct compress_ctx *cc, pgoff_t index) @@ -872,19 +841,28 @@ bool f2fs_cluster_can_merge_page(struct compress_ctx *cc, pgoff_t index) return is_page_in_cluster(cc, index); } -bool f2fs_all_cluster_page_loaded(struct compress_ctx *cc, struct pagevec *pvec, - int index, int nr_pages) +bool f2fs_all_cluster_page_ready(struct compress_ctx *cc, struct page **pages, + int index, int nr_pages, bool uptodate) { - unsigned long pgidx; - int i; + unsigned long pgidx = page_folio(pages[index])->index; + int i = uptodate ? 0 : 1; + + /* + * when uptodate set to true, try to check all pages in cluster is + * uptodate or not. + */ + if (uptodate && (pgidx % cc->cluster_size)) + return false; if (nr_pages - index < cc->cluster_size) return false; - pgidx = pvec->pages[index]->index; + for (; i < cc->cluster_size; i++) { + struct folio *folio = page_folio(pages[index + i]); - for (i = 1; i < cc->cluster_size; i++) { - if (pvec->pages[index + i]->index != pgidx + i) + if (folio->index != pgidx + i) + return false; + if (uptodate && !folio_test_uptodate(folio)) return false; } @@ -903,7 +881,7 @@ static bool cluster_has_invalid_data(struct compress_ctx *cc) f2fs_bug_on(F2FS_I_SB(cc->inode), !page); /* beyond EOF */ - if (page->index >= nr_pages) + if (page_folio(page)->index >= nr_pages) return true; } return false; @@ -911,14 +889,15 @@ static bool cluster_has_invalid_data(struct compress_ctx *cc) bool f2fs_sanity_check_cluster(struct dnode_of_data *dn) { +#ifdef CONFIG_F2FS_CHECK_FS struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode); unsigned int cluster_size = F2FS_I(dn->inode)->i_cluster_size; - bool compressed = dn->data_blkaddr == COMPRESS_ADDR; int cluster_end = 0; + unsigned int count; int i; char *reason = ""; - if (!compressed) + if (dn->data_blkaddr != COMPRESS_ADDR) return false; /* [..., COMPR_ADDR, ...] */ @@ -927,8 +906,8 @@ bool f2fs_sanity_check_cluster(struct dnode_of_data *dn) goto out; } - for (i = 1; i < cluster_size; i++) { - block_t blkaddr = data_blkaddr(dn->inode, dn->node_page, + for (i = 1, count = 1; i < cluster_size; i++, count++) { + block_t blkaddr = data_blkaddr(dn->inode, dn->node_folio, dn->ofs_in_node + i); /* [COMPR_ADDR, ..., COMPR_ADDR] */ @@ -936,32 +915,53 @@ bool f2fs_sanity_check_cluster(struct dnode_of_data *dn) reason = "[C|*|C|*]"; goto out; } - if (compressed) { - if (!__is_valid_data_blkaddr(blkaddr)) { - if (!cluster_end) - cluster_end = i; - continue; - } - /* [COMPR_ADDR, NULL_ADDR or NEW_ADDR, valid_blkaddr] */ - if (cluster_end) { - reason = "[C|N|N|V]"; - goto out; - } + if (!__is_valid_data_blkaddr(blkaddr)) { + if (!cluster_end) + cluster_end = i; + continue; + } + /* [COMPR_ADDR, NULL_ADDR or NEW_ADDR, valid_blkaddr] */ + if (cluster_end) { + reason = "[C|N|N|V]"; + goto out; } } + + f2fs_bug_on(F2FS_I_SB(dn->inode), count != cluster_size && + !is_inode_flag_set(dn->inode, FI_COMPRESS_RELEASED)); + return false; out: f2fs_warn(sbi, "access invalid cluster, ino:%lu, nid:%u, ofs_in_node:%u, reason:%s", dn->inode->i_ino, dn->nid, dn->ofs_in_node, reason); set_sbi_flag(sbi, SBI_NEED_FSCK); return true; +#else + return false; +#endif } -static int __f2fs_cluster_blocks(struct inode *inode, - unsigned int cluster_idx, bool compr) +static int __f2fs_get_cluster_blocks(struct inode *inode, + struct dnode_of_data *dn) { - struct dnode_of_data dn; unsigned int cluster_size = F2FS_I(inode)->i_cluster_size; + int count, i; + + for (i = 0, count = 0; i < cluster_size; i++) { + block_t blkaddr = data_blkaddr(dn->inode, dn->node_folio, + dn->ofs_in_node + i); + + if (__is_valid_data_blkaddr(blkaddr)) + count++; + } + + return count; +} + +static int __f2fs_cluster_blocks(struct inode *inode, unsigned int cluster_idx, + enum cluster_check_type type) +{ + struct dnode_of_data dn; unsigned int start_idx = cluster_idx << F2FS_I(inode)->i_log_cluster_size; int ret; @@ -980,26 +980,12 @@ static int __f2fs_cluster_blocks(struct inode *inode, } if (dn.data_blkaddr == COMPRESS_ADDR) { - int i; - - ret = 1; - for (i = 1; i < cluster_size; i++) { - block_t blkaddr; - - blkaddr = data_blkaddr(dn.inode, - dn.node_page, dn.ofs_in_node + i); - if (compr) { - if (__is_valid_data_blkaddr(blkaddr)) - ret++; - } else { - if (blkaddr != NULL_ADDR) - ret++; - } - } - - f2fs_bug_on(F2FS_I_SB(inode), - !compr && ret != cluster_size && - !is_inode_flag_set(inode, FI_COMPRESS_RELEASED)); + if (type == CLUSTER_COMPR_BLKS) + ret = 1 + __f2fs_get_cluster_blocks(inode, &dn); + else if (type == CLUSTER_IS_COMPR) + ret = 1; + } else if (type == CLUSTER_RAW_BLKS) { + ret = __f2fs_get_cluster_blocks(inode, &dn); } fail: f2fs_put_dnode(&dn); @@ -1009,15 +995,33 @@ fail: /* return # of compressed blocks in compressed cluster */ static int f2fs_compressed_blocks(struct compress_ctx *cc) { - return __f2fs_cluster_blocks(cc->inode, cc->cluster_idx, true); + return __f2fs_cluster_blocks(cc->inode, cc->cluster_idx, + CLUSTER_COMPR_BLKS); +} + +/* return # of raw blocks in non-compressed cluster */ +static int f2fs_decompressed_blocks(struct inode *inode, + unsigned int cluster_idx) +{ + return __f2fs_cluster_blocks(inode, cluster_idx, + CLUSTER_RAW_BLKS); } -/* return # of valid blocks in compressed cluster */ +/* return whether cluster is compressed one or not */ int f2fs_is_compressed_cluster(struct inode *inode, pgoff_t index) { return __f2fs_cluster_blocks(inode, index >> F2FS_I(inode)->i_log_cluster_size, - false); + CLUSTER_IS_COMPR); +} + +/* return whether cluster contains non raw blocks or not */ +bool f2fs_is_sparse_cluster(struct inode *inode, pgoff_t index) +{ + unsigned int cluster_idx = index >> F2FS_I(inode)->i_log_cluster_size; + + return f2fs_decompressed_blocks(inode, cluster_idx) != + F2FS_I(inode)->i_cluster_size; } static bool cluster_may_compress(struct compress_ctx *cc) @@ -1043,13 +1047,40 @@ static void set_cluster_writeback(struct compress_ctx *cc) } } +static void cancel_cluster_writeback(struct compress_ctx *cc, + struct compress_io_ctx *cic, int submitted) +{ + int i; + + /* Wait for submitted IOs. */ + if (submitted > 1) { + f2fs_submit_merged_write(F2FS_I_SB(cc->inode), DATA); + while (atomic_read(&cic->pending_pages) != + (cc->valid_nr_cpages - submitted + 1)) + f2fs_io_schedule_timeout(DEFAULT_SCHEDULE_TIMEOUT); + } + + /* Cancel writeback and stay locked. */ + for (i = 0; i < cc->cluster_size; i++) { + if (i < submitted) { + inode_inc_dirty_pages(cc->inode); + lock_page(cc->rpages[i]); + } + clear_page_private_gcing(cc->rpages[i]); + if (folio_test_writeback(page_folio(cc->rpages[i]))) + end_page_writeback(cc->rpages[i]); + } +} + static void set_cluster_dirty(struct compress_ctx *cc) { int i; for (i = 0; i < cc->cluster_size; i++) - if (cc->rpages[i]) + if (cc->rpages[i]) { set_page_dirty(cc->rpages[i]); + set_page_private_gcing(cc->rpages[i]); + } } static int prepare_compress_overwrite(struct compress_ctx *cc, @@ -1057,9 +1088,9 @@ static int prepare_compress_overwrite(struct compress_ctx *cc, { struct f2fs_sb_info *sbi = F2FS_I_SB(cc->inode); struct address_space *mapping = cc->inode->i_mapping; - struct page *page; + struct folio *folio; sector_t last_block_in_bio; - unsigned fgp_flag = FGP_LOCK | FGP_WRITE | FGP_CREAT; + fgf_t fgp_flag = FGP_LOCK | FGP_WRITE | FGP_CREAT; pgoff_t start_idx = start_idx_of_cluster(cc); int i, ret; @@ -1072,32 +1103,32 @@ retry: if (ret) return ret; - /* keep page reference to avoid page reclaim */ + /* keep folio reference to avoid page reclaim */ for (i = 0; i < cc->cluster_size; i++) { - page = f2fs_pagecache_get_page(mapping, start_idx + i, - fgp_flag, GFP_NOFS); - if (!page) { - ret = -ENOMEM; + folio = f2fs_filemap_get_folio(mapping, start_idx + i, + fgp_flag, GFP_NOFS); + if (IS_ERR(folio)) { + ret = PTR_ERR(folio); goto unlock_pages; } - if (PageUptodate(page)) - f2fs_put_page(page, 1); + if (folio_test_uptodate(folio)) + f2fs_folio_put(folio, true); else - f2fs_compress_ctx_add_page(cc, page); + f2fs_compress_ctx_add_page(cc, folio); } if (!f2fs_cluster_is_empty(cc)) { struct bio *bio = NULL; ret = f2fs_read_multi_pages(cc, &bio, cc->cluster_size, - &last_block_in_bio, false, true); + &last_block_in_bio, NULL, true); f2fs_put_rpages(cc); f2fs_destroy_compress_ctx(cc, true); if (ret) goto out; if (bio) - f2fs_submit_bio(sbi, bio, DATA); + f2fs_submit_read_bio(sbi, bio, DATA); ret = f2fs_init_compress_ctx(cc); if (ret) @@ -1107,16 +1138,17 @@ retry: for (i = 0; i < cc->cluster_size; i++) { f2fs_bug_on(sbi, cc->rpages[i]); - page = find_lock_page(mapping, start_idx + i); - if (!page) { - /* page can be truncated */ + folio = filemap_lock_folio(mapping, start_idx + i); + if (IS_ERR(folio)) { + /* folio could be truncated */ goto release_and_retry; } - f2fs_wait_on_page_writeback(page, DATA, true, true); - f2fs_compress_ctx_add_page(cc, page); + f2fs_folio_wait_writeback(folio, DATA, true, true); + f2fs_compress_ctx_add_page(cc, folio); - if (!PageUptodate(page)) { + if (!folio_test_uptodate(folio)) { + f2fs_handle_page_eio(sbi, folio, DATA); release_and_retry: f2fs_put_rpages(cc); f2fs_unlock_rpages(cc, i + 1); @@ -1164,12 +1196,13 @@ bool f2fs_compress_write_end(struct inode *inode, void *fsdata, .cluster_size = F2FS_I(inode)->i_cluster_size, .rpages = fsdata, }; - bool first_index = (index == cc.rpages[0]->index); + struct folio *folio = page_folio(cc.rpages[0]); + bool first_index = (index == folio->index); if (copied) set_cluster_dirty(&cc); - f2fs_put_rpages_wbc(&cc, NULL, false, 1); + f2fs_put_rpages_wbc(&cc, NULL, false, true); f2fs_destroy_compress_ctx(&cc, false); return first_index; @@ -1179,9 +1212,11 @@ int f2fs_truncate_partial_cluster(struct inode *inode, u64 from, bool lock) { void *fsdata = NULL; struct page *pagep; + struct page **rpages; int log_cluster_size = F2FS_I(inode)->i_log_cluster_size; pgoff_t start_idx = from >> (PAGE_SHIFT + log_cluster_size) << log_cluster_size; + int i; int err; err = f2fs_is_compressed_cluster(inode, start_idx); @@ -1202,26 +1237,30 @@ int f2fs_truncate_partial_cluster(struct inode *inode, u64 from, bool lock) if (err <= 0) return err; - if (err > 0) { - struct page **rpages = fsdata; - int cluster_size = F2FS_I(inode)->i_cluster_size; - int i; + rpages = fsdata; - for (i = cluster_size - 1; i >= 0; i--) { - loff_t start = rpages[i]->index << PAGE_SHIFT; + for (i = (1 << log_cluster_size) - 1; i >= 0; i--) { + struct folio *folio = page_folio(rpages[i]); + loff_t start = (loff_t)folio->index << PAGE_SHIFT; + loff_t offset = from > start ? from - start : 0; - if (from <= start) { - zero_user_segment(rpages[i], 0, PAGE_SIZE); - } else { - zero_user_segment(rpages[i], from - start, - PAGE_SIZE); - break; - } - } + folio_zero_segment(folio, offset, folio_size(folio)); - f2fs_compress_write_end(inode, fsdata, start_idx, true); + if (from >= start) + break; } - return 0; + + f2fs_compress_write_end(inode, fsdata, start_idx, true); + + err = filemap_write_and_wait_range(inode->i_mapping, + round_down(from, 1 << log_cluster_size << PAGE_SHIFT), + LLONG_MAX); + if (err) + return err; + + truncate_pagecache(inode, from); + + return f2fs_do_truncate_blocks(inode, round_up(from, PAGE_SIZE), lock); } static int f2fs_write_compressed_pages(struct compress_ctx *cc, @@ -1242,11 +1281,12 @@ static int f2fs_write_compressed_pages(struct compress_ctx *cc, .page = NULL, .encrypted_page = NULL, .compressed_page = NULL, - .submitted = false, .io_type = io_type, .io_wbc = wbc, - .encrypted = fscrypt_inode_uses_fs_layer_crypto(cc->inode), + .encrypted = fscrypt_inode_uses_fs_layer_crypto(cc->inode) ? + 1 : 0, }; + struct folio *folio; struct dnode_of_data dn; struct node_info ni; struct compress_io_ctx *cic; @@ -1254,20 +1294,21 @@ static int f2fs_write_compressed_pages(struct compress_ctx *cc, unsigned int last_index = cc->cluster_size - 1; loff_t psize; int i, err; + bool quota_inode = IS_NOQUOTA(inode); - /* we should bypass data pages to proceed the kworkder jobs */ + /* we should bypass data pages to proceed the kworker jobs */ if (unlikely(f2fs_cp_error(sbi))) { - mapping_set_error(cc->rpages[0]->mapping, -EIO); + mapping_set_error(inode->i_mapping, -EIO); goto out_free; } - if (IS_NOQUOTA(inode)) { + if (quota_inode) { /* * We need to wait for node_write to avoid block allocation during * checkpoint. This can only happen to quota writes which can cause * the below discard race condition. */ - down_read(&sbi->node_write); + f2fs_down_read(&sbi->node_write); } else if (!f2fs_trylock_op(sbi)) { goto out_free; } @@ -1279,12 +1320,13 @@ static int f2fs_write_compressed_pages(struct compress_ctx *cc, goto out_unlock_op; for (i = 0; i < cc->cluster_size; i++) { - if (data_blkaddr(dn.inode, dn.node_page, + if (data_blkaddr(dn.inode, dn.node_folio, dn.ofs_in_node + i) == NULL_ADDR) goto out_put_dnode; } - psize = (loff_t)(cc->rpages[last_index]->index + 1) << PAGE_SHIFT; + folio = page_folio(cc->rpages[last_index]); + psize = folio_next_pos(folio); err = f2fs_get_node_info(fio.sbi, dn.nid, &ni, false); if (err) @@ -1299,7 +1341,7 @@ static int f2fs_write_compressed_pages(struct compress_ctx *cc, cic->magic = F2FS_COMPRESSED_PAGE_MAGIC; cic->inode = inode; atomic_set(&cic->pending_pages, cc->valid_nr_cpages); - cic->rpages = page_array_alloc(cc->inode, cc->cluster_size); + cic->rpages = page_array_alloc(sbi, cc->cluster_size); if (!cic->rpages) goto out_put_cic; @@ -1307,10 +1349,10 @@ static int f2fs_write_compressed_pages(struct compress_ctx *cc, for (i = 0; i < cc->valid_nr_cpages; i++) { f2fs_set_compressed_page(cc->cpages[i], inode, - cc->rpages[i + 1]->index, cic); + page_folio(cc->rpages[i + 1])->index, cic); fio.compressed_page = cc->cpages[i]; - fio.old_blkaddr = data_blkaddr(dn.inode, dn.node_page, + fio.old_blkaddr = data_blkaddr(dn.inode, dn.node_folio, dn.ofs_in_node + i + 1); /* wait for GCed page writeback via META_MAPPING */ @@ -1342,7 +1384,7 @@ static int f2fs_write_compressed_pages(struct compress_ctx *cc, if (blkaddr == COMPRESS_ADDR) fio.compr_blocks++; if (__is_valid_data_blkaddr(blkaddr)) - f2fs_invalidate_blocks(sbi, blkaddr); + f2fs_invalidate_blocks(sbi, blkaddr, 1); f2fs_update_data_blkaddr(&dn, COMPRESS_ADDR); goto unlock_continue; } @@ -1352,7 +1394,7 @@ static int f2fs_write_compressed_pages(struct compress_ctx *cc, if (i > cc->valid_nr_cpages) { if (__is_valid_data_blkaddr(blkaddr)) { - f2fs_invalidate_blocks(sbi, blkaddr); + f2fs_invalidate_blocks(sbi, blkaddr, 1); f2fs_update_data_blkaddr(&dn, NEW_ADDR); } goto unlock_continue; @@ -1366,11 +1408,20 @@ static int f2fs_write_compressed_pages(struct compress_ctx *cc, fio.compressed_page = cc->cpages[i - 1]; cc->cpages[i - 1] = NULL; + fio.submitted = 0; f2fs_outplace_write_data(&dn, &fio); + if (unlikely(!fio.submitted)) { + cancel_cluster_writeback(cc, cic, i); + + /* To call fscrypt_finalize_bounce_page */ + i = cc->valid_nr_cpages; + *submitted = 0; + goto out_destroy_crypt; + } (*submitted)++; unlock_continue: inode_dec_dirty_pages(cc->inode); - unlock_page(fio.page); + folio_unlock(fio.folio); } if (fio.compr_blocks) @@ -1379,12 +1430,10 @@ unlock_continue: add_compr_block_stat(inode, cc->valid_nr_cpages); set_inode_flag(cc->inode, FI_APPEND_WRITE); - if (cc->cluster_idx == 0) - set_inode_flag(inode, FI_FIRST_BLOCK_WRITTEN); f2fs_put_dnode(&dn); - if (IS_NOQUOTA(inode)) - up_read(&sbi->node_write); + if (quota_inode) + f2fs_up_read(&sbi->node_write); else f2fs_unlock_op(sbi); @@ -1394,23 +1443,26 @@ unlock_continue: spin_unlock(&fi->i_size_lock); f2fs_put_rpages(cc); - page_array_free(cc->inode, cc->cpages, cc->nr_cpages); + page_array_free(sbi, cc->cpages, cc->nr_cpages); cc->cpages = NULL; f2fs_destroy_compress_ctx(cc, false); return 0; out_destroy_crypt: - page_array_free(cc->inode, cic->rpages, cc->cluster_size); + page_array_free(sbi, cic->rpages, cc->cluster_size); - for (--i; i >= 0; i--) + for (--i; i >= 0; i--) { + if (!cc->cpages[i]) + continue; fscrypt_finalize_bounce_page(&cc->cpages[i]); + } out_put_cic: kmem_cache_free(cic_entry_slab, cic); out_put_dnode: f2fs_put_dnode(&dn); out_unlock_op: - if (IS_NOQUOTA(inode)) - up_read(&sbi->node_write); + if (quota_inode) + f2fs_up_read(&sbi->node_write); else f2fs_unlock_op(sbi); out_free: @@ -1418,24 +1470,26 @@ out_free: f2fs_compress_free_page(cc->cpages[i]); cc->cpages[i] = NULL; } - page_array_free(cc->inode, cc->cpages, cc->nr_cpages); + page_array_free(sbi, cc->cpages, cc->nr_cpages); cc->cpages = NULL; return -EAGAIN; } -void f2fs_compress_write_end_io(struct bio *bio, struct page *page) +void f2fs_compress_write_end_io(struct bio *bio, struct folio *folio) { + struct page *page = &folio->page; struct f2fs_sb_info *sbi = bio->bi_private; - struct compress_io_ctx *cic = - (struct compress_io_ctx *)page_private(page); + struct compress_io_ctx *cic = folio->private; + enum count_type type = WB_DATA_TYPE(folio, + f2fs_is_compressed_page(folio)); int i; - if (unlikely(bio->bi_status)) + if (unlikely(bio->bi_status != BLK_STS_OK)) mapping_set_error(cic->inode->i_mapping, -EIO); f2fs_compress_free_page(page); - dec_page_count(sbi, F2FS_WB_DATA); + dec_page_count(sbi, type); if (atomic_dec_return(&cic->pending_pages)) return; @@ -1446,17 +1500,19 @@ void f2fs_compress_write_end_io(struct bio *bio, struct page *page) end_page_writeback(cic->rpages[i]); } - page_array_free(cic->inode, cic->rpages, cic->nr_rpages); + page_array_free(sbi, cic->rpages, cic->nr_rpages); kmem_cache_free(cic_entry_slab, cic); } static int f2fs_write_raw_pages(struct compress_ctx *cc, - int *submitted, + int *submitted_p, struct writeback_control *wbc, enum iostat_type io_type) { struct address_space *mapping = cc->inode->i_mapping; - int _submitted, compr_blocks, ret, i; + struct f2fs_sb_info *sbi = F2FS_M_SB(mapping); + int submitted, compr_blocks, i; + int ret = 0; compr_blocks = f2fs_compressed_blocks(cc); @@ -1471,54 +1527,68 @@ static int f2fs_write_raw_pages(struct compress_ctx *cc, if (compr_blocks < 0) return compr_blocks; + /* overwrite compressed cluster w/ normal cluster */ + if (compr_blocks > 0) + f2fs_lock_op(sbi); + for (i = 0; i < cc->cluster_size; i++) { + struct folio *folio; + if (!cc->rpages[i]) continue; + folio = page_folio(cc->rpages[i]); retry_write: - lock_page(cc->rpages[i]); + folio_lock(folio); - if (cc->rpages[i]->mapping != mapping) { + if (folio->mapping != mapping) { continue_unlock: - unlock_page(cc->rpages[i]); + folio_unlock(folio); continue; } - if (!PageDirty(cc->rpages[i])) + if (!folio_test_dirty(folio)) goto continue_unlock; - if (!clear_page_dirty_for_io(cc->rpages[i])) + if (folio_test_writeback(folio)) { + if (wbc->sync_mode == WB_SYNC_NONE) + goto continue_unlock; + f2fs_folio_wait_writeback(folio, DATA, true, true); + } + + if (!folio_clear_dirty_for_io(folio)) goto continue_unlock; - ret = f2fs_write_single_data_page(cc->rpages[i], &_submitted, + submitted = 0; + ret = f2fs_write_single_data_page(folio, &submitted, NULL, NULL, wbc, io_type, compr_blocks, false); if (ret) { - if (ret == AOP_WRITEPAGE_ACTIVATE) { - unlock_page(cc->rpages[i]); + if (ret == 1) { ret = 0; } else if (ret == -EAGAIN) { + ret = 0; /* * for quota file, just redirty left pages to * avoid deadlock caused by cluster update race * from foreground operation. */ if (IS_NOQUOTA(cc->inode)) - return 0; - ret = 0; - cond_resched(); - congestion_wait(BLK_RW_ASYNC, - DEFAULT_IO_TIMEOUT); + goto out; + f2fs_schedule_timeout(DEFAULT_SCHEDULE_TIMEOUT); goto retry_write; } - return ret; + goto out; } - *submitted += _submitted; + *submitted_p += submitted; } - f2fs_balance_fs(F2FS_M_SB(mapping), true); +out: + if (compr_blocks > 0) + f2fs_unlock_op(sbi); - return 0; + f2fs_balance_fs(sbi, true); + return ret; } int f2fs_write_multi_pages(struct compress_ctx *cc, @@ -1535,7 +1605,7 @@ int f2fs_write_multi_pages(struct compress_ctx *cc, add_compr_block_stat(cc->inode, cc->cluster_size); goto write; } else if (err) { - f2fs_put_rpages_wbc(cc, wbc, true, 1); + f2fs_put_rpages_wbc(cc, wbc, true, true); goto destroy_out; } @@ -1549,26 +1619,87 @@ write: f2fs_bug_on(F2FS_I_SB(cc->inode), *submitted); err = f2fs_write_raw_pages(cc, submitted, wbc, io_type); - f2fs_put_rpages_wbc(cc, wbc, false, 0); + f2fs_put_rpages_wbc(cc, wbc, false, false); destroy_out: f2fs_destroy_compress_ctx(cc, false); return err; } -static void f2fs_free_dic(struct decompress_io_ctx *dic); +static inline bool allow_memalloc_for_decomp(struct f2fs_sb_info *sbi, + bool pre_alloc) +{ + return pre_alloc ^ f2fs_low_mem_mode(sbi); +} + +static int f2fs_prepare_decomp_mem(struct decompress_io_ctx *dic, + bool pre_alloc) +{ + const struct f2fs_compress_ops *cops = f2fs_cops[dic->compress_algorithm]; + int i; + + if (!allow_memalloc_for_decomp(dic->sbi, pre_alloc)) + return 0; + + dic->tpages = page_array_alloc(dic->sbi, dic->cluster_size); + if (!dic->tpages) + return -ENOMEM; + + for (i = 0; i < dic->cluster_size; i++) { + if (dic->rpages[i]) { + dic->tpages[i] = dic->rpages[i]; + continue; + } + + dic->tpages[i] = f2fs_compress_alloc_page(); + } + + dic->rbuf = f2fs_vmap(dic->tpages, dic->cluster_size); + if (!dic->rbuf) + return -ENOMEM; + + dic->cbuf = f2fs_vmap(dic->cpages, dic->nr_cpages); + if (!dic->cbuf) + return -ENOMEM; + + if (cops->init_decompress_ctx) + return cops->init_decompress_ctx(dic); + + return 0; +} + +static void f2fs_release_decomp_mem(struct decompress_io_ctx *dic, + bool bypass_destroy_callback, bool pre_alloc) +{ + const struct f2fs_compress_ops *cops = f2fs_cops[dic->compress_algorithm]; + + if (!allow_memalloc_for_decomp(dic->sbi, pre_alloc)) + return; + + if (!bypass_destroy_callback && cops->destroy_decompress_ctx) + cops->destroy_decompress_ctx(dic); + + if (dic->cbuf) + vm_unmap_ram(dic->cbuf, dic->nr_cpages); + + if (dic->rbuf) + vm_unmap_ram(dic->rbuf, dic->cluster_size); +} + +static void f2fs_free_dic(struct decompress_io_ctx *dic, + bool bypass_destroy_callback); struct decompress_io_ctx *f2fs_alloc_dic(struct compress_ctx *cc) { struct decompress_io_ctx *dic; pgoff_t start_idx = start_idx_of_cluster(cc); - int i; + struct f2fs_sb_info *sbi = F2FS_I_SB(cc->inode); + int i, ret; - dic = f2fs_kmem_cache_alloc(dic_entry_slab, GFP_F2FS_ZERO, - false, F2FS_I_SB(cc->inode)); + dic = f2fs_kmem_cache_alloc(dic_entry_slab, GFP_F2FS_ZERO, false, sbi); if (!dic) return ERR_PTR(-ENOMEM); - dic->rpages = page_array_alloc(cc->inode, cc->cluster_size); + dic->rpages = page_array_alloc(sbi, cc->cluster_size); if (!dic->rpages) { kmem_cache_free(dic_entry_slab, dic); return ERR_PTR(-ENOMEM); @@ -1576,6 +1707,8 @@ struct decompress_io_ctx *f2fs_alloc_dic(struct compress_ctx *cc) dic->magic = F2FS_COMPRESSED_PAGE_MAGIC; dic->inode = cc->inode; + dic->sbi = sbi; + dic->compress_algorithm = F2FS_I(cc->inode)->i_compress_algorithm; atomic_set(&dic->remaining_pages, cc->nr_cpages); dic->cluster_idx = cc->cluster_idx; dic->cluster_size = cc->cluster_size; @@ -1589,32 +1722,40 @@ struct decompress_io_ctx *f2fs_alloc_dic(struct compress_ctx *cc) dic->rpages[i] = cc->rpages[i]; dic->nr_rpages = cc->cluster_size; - dic->cpages = page_array_alloc(dic->inode, dic->nr_cpages); - if (!dic->cpages) + dic->cpages = page_array_alloc(sbi, dic->nr_cpages); + if (!dic->cpages) { + ret = -ENOMEM; goto out_free; + } for (i = 0; i < dic->nr_cpages; i++) { struct page *page; page = f2fs_compress_alloc_page(); - if (!page) - goto out_free; - f2fs_set_compressed_page(page, cc->inode, start_idx + i + 1, dic); dic->cpages[i] = page; } + ret = f2fs_prepare_decomp_mem(dic, true); + if (ret) + goto out_free; + return dic; out_free: - f2fs_free_dic(dic); - return ERR_PTR(-ENOMEM); + f2fs_free_dic(dic, true); + return ERR_PTR(ret); } -static void f2fs_free_dic(struct decompress_io_ctx *dic) +static void f2fs_free_dic(struct decompress_io_ctx *dic, + bool bypass_destroy_callback) { int i; + /* use sbi in dic to avoid UFA of dic->inode*/ + struct f2fs_sb_info *sbi = dic->sbi; + + f2fs_release_decomp_mem(dic, bypass_destroy_callback, true); if (dic->tpages) { for (i = 0; i < dic->cluster_size; i++) { @@ -1624,7 +1765,7 @@ static void f2fs_free_dic(struct decompress_io_ctx *dic) continue; f2fs_compress_free_page(dic->tpages[i]); } - page_array_free(dic->inode, dic->tpages, dic->cluster_size); + page_array_free(sbi, dic->tpages, dic->cluster_size); } if (dic->cpages) { @@ -1633,45 +1774,31 @@ static void f2fs_free_dic(struct decompress_io_ctx *dic) continue; f2fs_compress_free_page(dic->cpages[i]); } - page_array_free(dic->inode, dic->cpages, dic->nr_cpages); + page_array_free(sbi, dic->cpages, dic->nr_cpages); } - page_array_free(dic->inode, dic->rpages, dic->nr_rpages); + page_array_free(sbi, dic->rpages, dic->nr_rpages); kmem_cache_free(dic_entry_slab, dic); } -static void f2fs_put_dic(struct decompress_io_ctx *dic) +static void f2fs_late_free_dic(struct work_struct *work) { - if (refcount_dec_and_test(&dic->refcnt)) - f2fs_free_dic(dic); + struct decompress_io_ctx *dic = + container_of(work, struct decompress_io_ctx, free_work); + + f2fs_free_dic(dic, false); } -/* - * Update and unlock the cluster's pagecache pages, and release the reference to - * the decompress_io_ctx that was being held for I/O completion. - */ -static void __f2fs_decompress_end_io(struct decompress_io_ctx *dic, bool failed) +static void f2fs_put_dic(struct decompress_io_ctx *dic, bool in_task) { - int i; - - for (i = 0; i < dic->cluster_size; i++) { - struct page *rpage = dic->rpages[i]; - - if (!rpage) - continue; - - /* PG_error was set if verity failed. */ - if (failed || PageError(rpage)) { - ClearPageUptodate(rpage); - /* will re-read again later */ - ClearPageError(rpage); + if (refcount_dec_and_test(&dic->refcnt)) { + if (in_task) { + f2fs_free_dic(dic, false); } else { - SetPageUptodate(rpage); + INIT_WORK(&dic->free_work, f2fs_late_free_dic); + queue_work(dic->sbi->post_read_wq, &dic->free_work); } - unlock_page(rpage); } - - f2fs_put_dic(dic); } static void f2fs_verify_cluster(struct work_struct *work) @@ -1680,23 +1807,32 @@ static void f2fs_verify_cluster(struct work_struct *work) container_of(work, struct decompress_io_ctx, verity_work); int i; - /* Verify the cluster's decompressed pages with fs-verity. */ + /* Verify, update, and unlock the decompressed pages. */ for (i = 0; i < dic->cluster_size; i++) { struct page *rpage = dic->rpages[i]; - if (rpage && !fsverity_verify_page(rpage)) - SetPageError(rpage); + if (!rpage) + continue; + + if (fsverity_verify_page(rpage)) + SetPageUptodate(rpage); + else + ClearPageUptodate(rpage); + unlock_page(rpage); } - __f2fs_decompress_end_io(dic, false); + f2fs_put_dic(dic, true); } /* * This is called when a compressed cluster has been decompressed * (or failed to be read and/or decompressed). */ -void f2fs_decompress_end_io(struct decompress_io_ctx *dic, bool failed) +void f2fs_decompress_end_io(struct decompress_io_ctx *dic, bool failed, + bool in_task) { + int i; + if (!failed && dic->need_verity) { /* * Note that to avoid deadlocks, the verity work can't be done @@ -1706,38 +1842,58 @@ void f2fs_decompress_end_io(struct decompress_io_ctx *dic, bool failed) */ INIT_WORK(&dic->verity_work, f2fs_verify_cluster); fsverity_enqueue_verify_work(&dic->verity_work); - } else { - __f2fs_decompress_end_io(dic, failed); + return; + } + + /* Update and unlock the cluster's pagecache pages. */ + for (i = 0; i < dic->cluster_size; i++) { + struct page *rpage = dic->rpages[i]; + + if (!rpage) + continue; + + if (failed) + ClearPageUptodate(rpage); + else + SetPageUptodate(rpage); + unlock_page(rpage); } + + /* + * Release the reference to the decompress_io_ctx that was being held + * for I/O completion. + */ + f2fs_put_dic(dic, in_task); } /* - * Put a reference to a compressed page's decompress_io_ctx. + * Put a reference to a compressed folio's decompress_io_ctx. * - * This is called when the page is no longer needed and can be freed. + * This is called when the folio is no longer needed and can be freed. */ -void f2fs_put_page_dic(struct page *page) +void f2fs_put_folio_dic(struct folio *folio, bool in_task) { - struct decompress_io_ctx *dic = - (struct decompress_io_ctx *)page_private(page); + struct decompress_io_ctx *dic = folio->private; - f2fs_put_dic(dic); + f2fs_put_dic(dic, in_task); } /* * check whether cluster blocks are contiguous, and add extent cache entry * only if cluster blocks are logically and physically contiguous. */ -unsigned int f2fs_cluster_blocks_are_contiguous(struct dnode_of_data *dn) +unsigned int f2fs_cluster_blocks_are_contiguous(struct dnode_of_data *dn, + unsigned int ofs_in_node) { - bool compressed = f2fs_data_blkaddr(dn) == COMPRESS_ADDR; + bool compressed = data_blkaddr(dn->inode, dn->node_folio, + ofs_in_node) == COMPRESS_ADDR; int i = compressed ? 1 : 0; - block_t first_blkaddr = data_blkaddr(dn->inode, dn->node_page, - dn->ofs_in_node + i); + block_t first_blkaddr = data_blkaddr(dn->inode, dn->node_folio, + ofs_in_node + i); for (i += 1; i < F2FS_I(dn->inode)->i_cluster_size; i++) { - block_t blkaddr = data_blkaddr(dn->inode, dn->node_page, - dn->ofs_in_node + i); + block_t blkaddr = data_blkaddr(dn->inode, dn->node_folio, + ofs_in_node + i); if (!__is_valid_data_blkaddr(blkaddr)) break; @@ -1749,8 +1905,9 @@ unsigned int f2fs_cluster_blocks_are_contiguous(struct dnode_of_data *dn) } const struct address_space_operations f2fs_compress_aops = { - .releasepage = f2fs_release_page, - .invalidatepage = f2fs_invalidate_page, + .release_folio = f2fs_release_folio, + .invalidate_folio = f2fs_invalidate_folio, + .migrate_folio = filemap_migrate_folio, }; struct address_space *COMPRESS_MAPPING(struct f2fs_sb_info *sbi) @@ -1758,17 +1915,18 @@ struct address_space *COMPRESS_MAPPING(struct f2fs_sb_info *sbi) return sbi->compress_inode->i_mapping; } -void f2fs_invalidate_compress_page(struct f2fs_sb_info *sbi, block_t blkaddr) +void f2fs_invalidate_compress_pages_range(struct f2fs_sb_info *sbi, + block_t blkaddr, unsigned int len) { if (!sbi->compress_inode) return; - invalidate_mapping_pages(COMPRESS_MAPPING(sbi), blkaddr, blkaddr); + invalidate_mapping_pages(COMPRESS_MAPPING(sbi), blkaddr, blkaddr + len - 1); } -void f2fs_cache_compressed_page(struct f2fs_sb_info *sbi, struct page *page, - nid_t ino, block_t blkaddr) +static void f2fs_cache_compressed_page(struct f2fs_sb_info *sbi, + struct folio *folio, nid_t ino, block_t blkaddr) { - struct page *cpage; + struct folio *cfolio; int ret; if (!test_opt(sbi, COMPRESS_CACHE)) @@ -1780,53 +1938,49 @@ void f2fs_cache_compressed_page(struct f2fs_sb_info *sbi, struct page *page, if (!f2fs_available_free_memory(sbi, COMPRESS_PAGE)) return; - cpage = find_get_page(COMPRESS_MAPPING(sbi), blkaddr); - if (cpage) { - f2fs_put_page(cpage, 0); + cfolio = filemap_get_folio(COMPRESS_MAPPING(sbi), blkaddr); + if (!IS_ERR(cfolio)) { + f2fs_folio_put(cfolio, false); return; } - cpage = alloc_page(__GFP_NOWARN | __GFP_IO); - if (!cpage) + cfolio = filemap_alloc_folio(__GFP_NOWARN | __GFP_IO, 0, NULL); + if (!cfolio) return; - ret = add_to_page_cache_lru(cpage, COMPRESS_MAPPING(sbi), + ret = filemap_add_folio(COMPRESS_MAPPING(sbi), cfolio, blkaddr, GFP_NOFS); if (ret) { - f2fs_put_page(cpage, 0); + f2fs_folio_put(cfolio, false); return; } - set_page_private_data(cpage, ino); - - if (!f2fs_is_valid_blkaddr(sbi, blkaddr, DATA_GENERIC_ENHANCE_READ)) - goto out; + folio_set_f2fs_data(cfolio, ino); - memcpy(page_address(cpage), page_address(page), PAGE_SIZE); - SetPageUptodate(cpage); -out: - f2fs_put_page(cpage, 1); + memcpy(folio_address(cfolio), folio_address(folio), PAGE_SIZE); + folio_mark_uptodate(cfolio); + f2fs_folio_put(cfolio, true); } -bool f2fs_load_compressed_page(struct f2fs_sb_info *sbi, struct page *page, +bool f2fs_load_compressed_folio(struct f2fs_sb_info *sbi, struct folio *folio, block_t blkaddr) { - struct page *cpage; + struct folio *cfolio; bool hitted = false; if (!test_opt(sbi, COMPRESS_CACHE)) return false; - cpage = f2fs_pagecache_get_page(COMPRESS_MAPPING(sbi), + cfolio = f2fs_filemap_get_folio(COMPRESS_MAPPING(sbi), blkaddr, FGP_LOCK | FGP_NOWAIT, GFP_NOFS); - if (cpage) { - if (PageUptodate(cpage)) { + if (!IS_ERR(cfolio)) { + if (folio_test_uptodate(cfolio)) { atomic_inc(&sbi->compress_page_hit); - memcpy(page_address(page), - page_address(cpage), PAGE_SIZE); + memcpy(folio_address(folio), + folio_address(cfolio), folio_size(folio)); hitted = true; } - f2fs_put_page(cpage, 1); + f2fs_folio_put(cfolio, true); } return hitted; @@ -1834,46 +1988,41 @@ bool f2fs_load_compressed_page(struct f2fs_sb_info *sbi, struct page *page, void f2fs_invalidate_compress_pages(struct f2fs_sb_info *sbi, nid_t ino) { - struct address_space *mapping = sbi->compress_inode->i_mapping; - struct pagevec pvec; + struct address_space *mapping = COMPRESS_MAPPING(sbi); + struct folio_batch fbatch; pgoff_t index = 0; pgoff_t end = MAX_BLKADDR(sbi); if (!mapping->nrpages) return; - pagevec_init(&pvec); + folio_batch_init(&fbatch); do { - unsigned int nr_pages; - int i; + unsigned int nr, i; - nr_pages = pagevec_lookup_range(&pvec, mapping, - &index, end - 1); - if (!nr_pages) + nr = filemap_get_folios(mapping, &index, end - 1, &fbatch); + if (!nr) break; - for (i = 0; i < nr_pages; i++) { - struct page *page = pvec.pages[i]; + for (i = 0; i < nr; i++) { + struct folio *folio = fbatch.folios[i]; - if (page->index > end) - break; - - lock_page(page); - if (page->mapping != mapping) { - unlock_page(page); + folio_lock(folio); + if (folio->mapping != mapping) { + folio_unlock(folio); continue; } - if (ino != get_page_private_data(page)) { - unlock_page(page); + if (ino != folio_get_f2fs_data(folio)) { + folio_unlock(folio); continue; } - generic_error_remove_page(mapping, page); - unlock_page(page); + generic_error_remove_folio(mapping, folio); + folio_unlock(folio); } - pagevec_release(&pvec); + folio_batch_release(&fbatch); cond_resched(); } while (index < end); } @@ -1909,7 +2058,10 @@ void f2fs_destroy_compress_inode(struct f2fs_sb_info *sbi) int f2fs_init_page_array_cache(struct f2fs_sb_info *sbi) { dev_t dev = sbi->sb->s_bdev->bd_dev; - char slab_name[32]; + char slab_name[35]; + + if (!f2fs_sb_has_compression(sbi)) + return 0; sprintf(slab_name, "f2fs_page_array_entry-%u:%u", MAJOR(dev), MINOR(dev)); @@ -1918,9 +2070,7 @@ int f2fs_init_page_array_cache(struct f2fs_sb_info *sbi) sbi->page_array_slab = f2fs_kmem_cache_create(slab_name, sbi->page_array_slab_size); - if (!sbi->page_array_slab) - return -ENOMEM; - return 0; + return sbi->page_array_slab ? 0 : -ENOMEM; } void f2fs_destroy_page_array_cache(struct f2fs_sb_info *sbi) @@ -1928,53 +2078,24 @@ void f2fs_destroy_page_array_cache(struct f2fs_sb_info *sbi) kmem_cache_destroy(sbi->page_array_slab); } -static int __init f2fs_init_cic_cache(void) +int __init f2fs_init_compress_cache(void) { cic_entry_slab = f2fs_kmem_cache_create("f2fs_cic_entry", sizeof(struct compress_io_ctx)); if (!cic_entry_slab) return -ENOMEM; - return 0; -} - -static void f2fs_destroy_cic_cache(void) -{ - kmem_cache_destroy(cic_entry_slab); -} - -static int __init f2fs_init_dic_cache(void) -{ dic_entry_slab = f2fs_kmem_cache_create("f2fs_dic_entry", sizeof(struct decompress_io_ctx)); if (!dic_entry_slab) - return -ENOMEM; - return 0; -} - -static void f2fs_destroy_dic_cache(void) -{ - kmem_cache_destroy(dic_entry_slab); -} - -int __init f2fs_init_compress_cache(void) -{ - int err; - - err = f2fs_init_cic_cache(); - if (err) - goto out; - err = f2fs_init_dic_cache(); - if (err) goto free_cic; return 0; free_cic: - f2fs_destroy_cic_cache(); -out: + kmem_cache_destroy(cic_entry_slab); return -ENOMEM; } void f2fs_destroy_compress_cache(void) { - f2fs_destroy_dic_cache(); - f2fs_destroy_cic_cache(); + kmem_cache_destroy(dic_entry_slab); + kmem_cache_destroy(cic_entry_slab); } |
