From 9e579fc123a0c95f4fce695f86e5aeb1cf3464ee Mon Sep 17 00:00:00 2001 From: Gao Xiang Date: Tue, 8 Oct 2019 20:56:12 +0800 Subject: erofs: clean up collection handling routines - change return value to int since collection is already returned within the collector. - better function naming. Link: https://lore.kernel.org/r/20191008125616.183715-1-gaoxiang25@huawei.com Reviewed-by: Chao Yu Signed-off-by: Gao Xiang --- fs/erofs/zdata.c | 47 +++++++++++++++++++++++++---------------------- 1 file changed, 25 insertions(+), 22 deletions(-) (limited to 'fs') diff --git a/fs/erofs/zdata.c b/fs/erofs/zdata.c index fad80c97d247..ef32757d1aac 100644 --- a/fs/erofs/zdata.c +++ b/fs/erofs/zdata.c @@ -337,9 +337,9 @@ retry: return COLLECT_PRIMARY; /* :( better luck next time */ } -static struct z_erofs_collection *cllookup(struct z_erofs_collector *clt, - struct inode *inode, - struct erofs_map_blocks *map) +static int z_erofs_lookup_collection(struct z_erofs_collector *clt, + struct inode *inode, + struct erofs_map_blocks *map) { struct erofs_workgroup *grp; struct z_erofs_pcluster *pcl; @@ -349,20 +349,20 @@ static struct z_erofs_collection *cllookup(struct z_erofs_collector *clt, grp = erofs_find_workgroup(inode->i_sb, map->m_pa >> PAGE_SHIFT, &tag); if (!grp) - return NULL; + return -ENOENT; pcl = container_of(grp, struct z_erofs_pcluster, obj); if (clt->owned_head == &pcl->next || pcl == clt->tailpcl) { DBG_BUGON(1); erofs_workgroup_put(grp); - return ERR_PTR(-EFSCORRUPTED); + return -EFSCORRUPTED; } cl = z_erofs_primarycollection(pcl); if (cl->pageofs != (map->m_la & ~PAGE_MASK)) { DBG_BUGON(1); erofs_workgroup_put(grp); - return ERR_PTR(-EFSCORRUPTED); + return -EFSCORRUPTED; } length = READ_ONCE(pcl->length); @@ -370,7 +370,7 @@ static struct z_erofs_collection *cllookup(struct z_erofs_collector *clt, if ((map->m_llen << Z_EROFS_PCLUSTER_LENGTH_BIT) > length) { DBG_BUGON(1); erofs_workgroup_put(grp); - return ERR_PTR(-EFSCORRUPTED); + return -EFSCORRUPTED; } } else { unsigned int llen = map->m_llen << Z_EROFS_PCLUSTER_LENGTH_BIT; @@ -394,12 +394,12 @@ static struct z_erofs_collection *cllookup(struct z_erofs_collector *clt, clt->tailpcl = NULL; clt->pcl = pcl; clt->cl = cl; - return cl; + return 0; } -static struct z_erofs_collection *clregister(struct z_erofs_collector *clt, - struct inode *inode, - struct erofs_map_blocks *map) +static int z_erofs_register_collection(struct z_erofs_collector *clt, + struct inode *inode, + struct erofs_map_blocks *map) { struct z_erofs_pcluster *pcl; struct z_erofs_collection *cl; @@ -408,7 +408,7 @@ static struct z_erofs_collection *clregister(struct z_erofs_collector *clt, /* no available workgroup, let's allocate one */ pcl = kmem_cache_alloc(pcluster_cachep, GFP_NOFS); if (!pcl) - return ERR_PTR(-ENOMEM); + return -ENOMEM; z_erofs_pcluster_init_always(pcl); pcl->obj.index = map->m_pa >> PAGE_SHIFT; @@ -442,7 +442,7 @@ static struct z_erofs_collection *clregister(struct z_erofs_collector *clt, if (err) { mutex_unlock(&cl->lock); kmem_cache_free(pcluster_cachep, pcl); - return ERR_PTR(-EAGAIN); + return -EAGAIN; } /* used to check tail merging loop due to corrupted images */ if (clt->owned_head == Z_EROFS_PCLUSTER_TAIL) @@ -450,14 +450,14 @@ static struct z_erofs_collection *clregister(struct z_erofs_collector *clt, clt->owned_head = &pcl->next; clt->pcl = pcl; clt->cl = cl; - return cl; + return 0; } static int z_erofs_collector_begin(struct z_erofs_collector *clt, struct inode *inode, struct erofs_map_blocks *map) { - struct z_erofs_collection *cl; + int ret; DBG_BUGON(clt->cl); @@ -471,19 +471,22 @@ static int z_erofs_collector_begin(struct z_erofs_collector *clt, } repeat: - cl = cllookup(clt, inode, map); - if (!cl) { - cl = clregister(clt, inode, map); + ret = z_erofs_lookup_collection(clt, inode, map); + if (ret == -ENOENT) { + ret = z_erofs_register_collection(clt, inode, map); - if (cl == ERR_PTR(-EAGAIN)) + /* someone registered at the same time, give another try */ + if (ret == -EAGAIN) { + cond_resched(); goto repeat; + } } - if (IS_ERR(cl)) - return PTR_ERR(cl); + if (ret) + return ret; z_erofs_pagevec_ctor_init(&clt->vector, Z_EROFS_NR_INLINE_PAGEVECS, - cl->pagevec, cl->vcnt); + clt->cl->pagevec, clt->cl->vcnt); clt->compressedpages = clt->pcl->compressed_pages; if (clt->mode <= COLLECT_PRIMARY) /* cannot do in-place I/O */ -- cgit From bda17a4577da729d17b8f87bf3279b9db201d8ca Mon Sep 17 00:00:00 2001 From: Gao Xiang Date: Tue, 8 Oct 2019 20:56:13 +0800 Subject: erofs: remove dead code since managed cache is now built-in After commit 4279f3f9889f ("staging: erofs: turn cache strategies into mount options"), cache strategies are changed into mount options rather than old build configs. Let's kill useless code for obsoleted build options. Link: https://lore.kernel.org/r/20191008125616.183715-2-gaoxiang25@huawei.com Reviewed-by: Chao Yu Signed-off-by: Gao Xiang --- fs/erofs/utils.c | 13 ++++++------- fs/erofs/zdata.c | 25 ++++--------------------- 2 files changed, 10 insertions(+), 28 deletions(-) (limited to 'fs') diff --git a/fs/erofs/utils.c b/fs/erofs/utils.c index d92b3e753a6f..f66043ee16b9 100644 --- a/fs/erofs/utils.c +++ b/fs/erofs/utils.c @@ -149,8 +149,7 @@ static void erofs_workgroup_unfreeze_final(struct erofs_workgroup *grp) } static bool erofs_try_to_release_workgroup(struct erofs_sb_info *sbi, - struct erofs_workgroup *grp, - bool cleanup) + struct erofs_workgroup *grp) { /* * If managed cache is on, refcount of workgroups @@ -188,8 +187,7 @@ static bool erofs_try_to_release_workgroup(struct erofs_sb_info *sbi, } static unsigned long erofs_shrink_workstation(struct erofs_sb_info *sbi, - unsigned long nr_shrink, - bool cleanup) + unsigned long nr_shrink) { pgoff_t first_index = 0; void *batch[PAGEVEC_SIZE]; @@ -208,7 +206,7 @@ repeat: first_index = grp->index + 1; /* try to shrink each valid workgroup */ - if (!erofs_try_to_release_workgroup(sbi, grp, cleanup)) + if (!erofs_try_to_release_workgroup(sbi, grp)) continue; ++freed; @@ -245,7 +243,8 @@ void erofs_shrinker_unregister(struct super_block *sb) struct erofs_sb_info *const sbi = EROFS_SB(sb); mutex_lock(&sbi->umount_mutex); - erofs_shrink_workstation(sbi, ~0UL, true); + /* clean up all remaining workgroups in memory */ + erofs_shrink_workstation(sbi, ~0UL); spin_lock(&erofs_sb_list_lock); list_del(&sbi->list); @@ -294,7 +293,7 @@ static unsigned long erofs_shrink_scan(struct shrinker *shrink, spin_unlock(&erofs_sb_list_lock); sbi->shrinker_run_no = run_no; - freed += erofs_shrink_workstation(sbi, nr, false); + freed += erofs_shrink_workstation(sbi, nr); spin_lock(&erofs_sb_list_lock); /* Get the next list element before we move this one */ diff --git a/fs/erofs/zdata.c b/fs/erofs/zdata.c index ef32757d1aac..93f8bc1a64f6 100644 --- a/fs/erofs/zdata.c +++ b/fs/erofs/zdata.c @@ -574,7 +574,7 @@ static int z_erofs_do_read_page(struct z_erofs_decompress_frontend *fe, struct list_head *pagepool) { struct inode *const inode = fe->inode; - struct erofs_sb_info *const sbi __maybe_unused = EROFS_I_SB(inode); + struct erofs_sb_info *const sbi = EROFS_I_SB(inode); struct erofs_map_blocks *const map = &fe->map; struct z_erofs_collector *const clt = &fe->clt; const loff_t offset = page_offset(page); @@ -997,8 +997,6 @@ static struct page *pickup_page_for_submission(struct z_erofs_pcluster *pcl, struct address_space *mc, gfp_t gfp) { - /* determined at compile time to avoid too many #ifdefs */ - const bool nocache = __builtin_constant_p(mc) ? !mc : false; const pgoff_t index = pcl->obj.index; bool tocache = false; @@ -1019,7 +1017,7 @@ repeat: * the cached page has not been allocated and * an placeholder is out there, prepare it now. */ - if (!nocache && page == PAGE_UNALLOCATED) { + if (page == PAGE_UNALLOCATED) { tocache = true; goto out_allocpage; } @@ -1031,21 +1029,6 @@ repeat: mapping = READ_ONCE(page->mapping); - /* - * if managed cache is disabled, it's no way to - * get such a cached-like page. - */ - if (nocache) { - /* if managed cache is disabled, it is impossible `justfound' */ - DBG_BUGON(justfound); - - /* and it should be locked, not uptodate, and not truncated */ - DBG_BUGON(!PageLocked(page)); - DBG_BUGON(PageUptodate(page)); - DBG_BUGON(!mapping); - goto out; - } - /* * unmanaged (file) pages are all locked solidly, * therefore it is impossible for `mapping' to be NULL. @@ -1102,7 +1085,7 @@ out_allocpage: cpu_relax(); goto repeat; } - if (nocache || !tocache) + if (!tocache) goto out; if (add_to_page_cache_lru(page, mc, index + nr, gfp)) { page->mapping = Z_EROFS_MAPPING_STAGING; @@ -1208,7 +1191,7 @@ static bool z_erofs_vle_submit_all(struct super_block *sb, struct z_erofs_unzip_io *fgq, bool force_fg) { - struct erofs_sb_info *const sbi __maybe_unused = EROFS_SB(sb); + struct erofs_sb_info *const sbi = EROFS_SB(sb); z_erofs_next_pcluster_t qtail[NR_JOBQUEUES]; struct z_erofs_unzip_io *q[NR_JOBQUEUES]; struct bio *bio; -- cgit From 5ddcee1f3a1ccaccb31bc17080f75a0bb13b4906 Mon Sep 17 00:00:00 2001 From: Gao Xiang Date: Thu, 21 Nov 2019 21:59:54 +0800 Subject: erofs: get rid of __stagingpage_alloc helper Now open code is much cleaner due to iterative development. Link: https://lore.kernel.org/r/20191124025217.12345-1-hsiangkao@aol.com Reviewed-by: Chao Yu Signed-off-by: Gao Xiang --- fs/erofs/decompressor.c | 2 +- fs/erofs/internal.h | 2 +- fs/erofs/utils.c | 4 ++-- fs/erofs/zdata.c | 37 +++++++++++++++++-------------------- 4 files changed, 21 insertions(+), 24 deletions(-) (limited to 'fs') diff --git a/fs/erofs/decompressor.c b/fs/erofs/decompressor.c index 19f89f9fb10c..2890a67a1ded 100644 --- a/fs/erofs/decompressor.c +++ b/fs/erofs/decompressor.c @@ -73,7 +73,7 @@ static int z_erofs_lz4_prepare_destpages(struct z_erofs_decompress_req *rq, victim = availables[--top]; get_page(victim); } else { - victim = erofs_allocpage(pagepool, GFP_KERNEL, false); + victim = erofs_allocpage(pagepool, GFP_KERNEL); if (!victim) return -ENOMEM; victim->mapping = Z_EROFS_MAPPING_STAGING; diff --git a/fs/erofs/internal.h b/fs/erofs/internal.h index 544a453f3076..0c1175a08e54 100644 --- a/fs/erofs/internal.h +++ b/fs/erofs/internal.h @@ -382,7 +382,7 @@ int erofs_namei(struct inode *dir, struct qstr *name, extern const struct file_operations erofs_dir_fops; /* utils.c / zdata.c */ -struct page *erofs_allocpage(struct list_head *pool, gfp_t gfp, bool nofail); +struct page *erofs_allocpage(struct list_head *pool, gfp_t gfp); #if (EROFS_PCPUBUF_NR_PAGES > 0) void *erofs_get_pcpubuf(unsigned int pagenr); diff --git a/fs/erofs/utils.c b/fs/erofs/utils.c index f66043ee16b9..1e8e1450d5b0 100644 --- a/fs/erofs/utils.c +++ b/fs/erofs/utils.c @@ -7,7 +7,7 @@ #include "internal.h" #include -struct page *erofs_allocpage(struct list_head *pool, gfp_t gfp, bool nofail) +struct page *erofs_allocpage(struct list_head *pool, gfp_t gfp) { struct page *page; @@ -16,7 +16,7 @@ struct page *erofs_allocpage(struct list_head *pool, gfp_t gfp, bool nofail) DBG_BUGON(page_ref_count(page) != 1); list_del(&page->lru); } else { - page = alloc_pages(gfp | (nofail ? __GFP_NOFAIL : 0), 0); + page = alloc_page(gfp); } return page; } diff --git a/fs/erofs/zdata.c b/fs/erofs/zdata.c index 93f8bc1a64f6..1c582a3a40a3 100644 --- a/fs/erofs/zdata.c +++ b/fs/erofs/zdata.c @@ -546,15 +546,6 @@ static bool z_erofs_collector_end(struct z_erofs_collector *clt) return true; } -static inline struct page *__stagingpage_alloc(struct list_head *pagepool, - gfp_t gfp) -{ - struct page *page = erofs_allocpage(pagepool, gfp, true); - - page->mapping = Z_EROFS_MAPPING_STAGING; - return page; -} - static bool should_alloc_managed_pages(struct z_erofs_decompress_frontend *fe, unsigned int cachestrategy, erofs_off_t la) @@ -661,8 +652,9 @@ retry: /* should allocate an additional staging page for pagevec */ if (err == -EAGAIN) { struct page *const newpage = - __stagingpage_alloc(pagepool, GFP_NOFS); + erofs_allocpage(pagepool, GFP_NOFS | __GFP_NOFAIL); + newpage->mapping = Z_EROFS_MAPPING_STAGING; err = z_erofs_attach_page(clt, newpage, Z_EROFS_PAGE_TYPE_EXCLUSIVE); if (!err) @@ -1079,19 +1071,24 @@ repeat: unlock_page(page); put_page(page); out_allocpage: - page = __stagingpage_alloc(pagepool, gfp); - if (oldpage != cmpxchg(&pcl->compressed_pages[nr], oldpage, page)) { - list_add(&page->lru, pagepool); - cpu_relax(); - goto repeat; - } - if (!tocache) - goto out; - if (add_to_page_cache_lru(page, mc, index + nr, gfp)) { + page = erofs_allocpage(pagepool, gfp | __GFP_NOFAIL); + if (!tocache || add_to_page_cache_lru(page, mc, index + nr, gfp)) { + /* non-LRU / non-movable temporary page is needed */ page->mapping = Z_EROFS_MAPPING_STAGING; - goto out; + tocache = false; } + if (oldpage != cmpxchg(&pcl->compressed_pages[nr], oldpage, page)) { + if (tocache) { + /* since it added to managed cache successfully */ + unlock_page(page); + put_page(page); + } else { + list_add(&page->lru, pagepool); + } + cond_resched(); + goto repeat; + } set_page_private(page, (unsigned long)pcl); SetPagePrivate(page); out: /* the only exit (for tracing and debugging) */ -- cgit From a4b1fab121ec3235e6c5f3543ae1937426bd4eb4 Mon Sep 17 00:00:00 2001 From: Gao Xiang Date: Tue, 8 Oct 2019 20:56:15 +0800 Subject: erofs: clean up decompress queue stuffs Previously, both z_erofs_unzip_io and z_erofs_unzip_io_sb record decompress queues for backend to use. The only difference is that z_erofs_unzip_io is used for on-stack sync decompression so that it doesn't have a super block field (since the caller can pass it in its context), but it increases complexity with only a pointer saving. Rename z_erofs_unzip_io to z_erofs_decompressqueue with a fixed super_block member and kill the other entirely, and it can fallback to sync decompression if memory allocation failure. Link: https://lore.kernel.org/r/20191008125616.183715-4-gaoxiang25@huawei.com Reviewed-by: Chao Yu Signed-off-by: Gao Xiang --- fs/erofs/zdata.c | 133 ++++++++++++++++++++++++------------------------------- fs/erofs/zdata.h | 8 +--- 2 files changed, 60 insertions(+), 81 deletions(-) (limited to 'fs') diff --git a/fs/erofs/zdata.c b/fs/erofs/zdata.c index 1c582a3a40a3..f6056ba80424 100644 --- a/fs/erofs/zdata.c +++ b/fs/erofs/zdata.c @@ -693,13 +693,11 @@ err_out: goto out; } -static void z_erofs_vle_unzip_kickoff(void *ptr, int bios) +static void z_erofs_decompress_kickoff(struct z_erofs_decompressqueue *io, + bool sync, int bios) { - tagptr1_t t = tagptr_init(tagptr1_t, ptr); - struct z_erofs_unzip_io *io = tagptr_unfold_ptr(t); - bool background = tagptr_unfold_tags(t); - - if (!background) { + /* wake up the caller thread for sync decompression */ + if (sync) { unsigned long flags; spin_lock_irqsave(&io->u.wait.lock, flags); @@ -713,37 +711,30 @@ static void z_erofs_vle_unzip_kickoff(void *ptr, int bios) queue_work(z_erofs_workqueue, &io->u.work); } -static inline void z_erofs_vle_read_endio(struct bio *bio) +static void z_erofs_vle_read_endio(struct bio *bio) { - struct erofs_sb_info *sbi = NULL; + tagptr1_t t = tagptr_init(tagptr1_t, bio->bi_private); + struct z_erofs_decompressqueue *q = tagptr_unfold_ptr(t); blk_status_t err = bio->bi_status; struct bio_vec *bvec; struct bvec_iter_all iter_all; bio_for_each_segment_all(bvec, bio, iter_all) { struct page *page = bvec->bv_page; - bool cachemngd = false; DBG_BUGON(PageUptodate(page)); DBG_BUGON(!page->mapping); - if (!sbi && !z_erofs_page_is_staging(page)) - sbi = EROFS_SB(page->mapping->host->i_sb); - - /* sbi should already be gotten if the page is managed */ - if (sbi) - cachemngd = erofs_page_is_managed(sbi, page); - if (err) SetPageError(page); - else if (cachemngd) - SetPageUptodate(page); - if (cachemngd) + if (erofs_page_is_managed(EROFS_SB(q->sb), page)) { + if (!err) + SetPageUptodate(page); unlock_page(page); + } } - - z_erofs_vle_unzip_kickoff(bio->bi_private, -1); + z_erofs_decompress_kickoff(q, tagptr_unfold_tags(t), -1); bio_put(bio); } @@ -948,8 +939,7 @@ out: return err; } -static void z_erofs_vle_unzip_all(struct super_block *sb, - struct z_erofs_unzip_io *io, +static void z_erofs_vle_unzip_all(const struct z_erofs_decompressqueue *io, struct list_head *pagepool) { z_erofs_next_pcluster_t owned = io->head; @@ -966,21 +956,21 @@ static void z_erofs_vle_unzip_all(struct super_block *sb, pcl = container_of(owned, struct z_erofs_pcluster, next); owned = READ_ONCE(pcl->next); - z_erofs_decompress_pcluster(sb, pcl, pagepool); + z_erofs_decompress_pcluster(io->sb, pcl, pagepool); } } static void z_erofs_vle_unzip_wq(struct work_struct *work) { - struct z_erofs_unzip_io_sb *iosb = - container_of(work, struct z_erofs_unzip_io_sb, io.u.work); + struct z_erofs_decompressqueue *bgq = + container_of(work, struct z_erofs_decompressqueue, u.work); LIST_HEAD(pagepool); - DBG_BUGON(iosb->io.head == Z_EROFS_PCLUSTER_TAIL_CLOSED); - z_erofs_vle_unzip_all(iosb->sb, &iosb->io, &pagepool); + DBG_BUGON(bgq->head == Z_EROFS_PCLUSTER_TAIL_CLOSED); + z_erofs_vle_unzip_all(bgq, &pagepool); put_pages_list(&pagepool); - kvfree(iosb); + kvfree(bgq); } static struct page *pickup_page_for_submission(struct z_erofs_pcluster *pcl, @@ -1095,31 +1085,28 @@ out: /* the only exit (for tracing and debugging) */ return page; } -static struct z_erofs_unzip_io *jobqueue_init(struct super_block *sb, - struct z_erofs_unzip_io *io, - bool foreground) +static struct z_erofs_decompressqueue * +jobqueue_init(struct super_block *sb, + struct z_erofs_decompressqueue *fgq, bool *fg) { - struct z_erofs_unzip_io_sb *iosb; + struct z_erofs_decompressqueue *q; - if (foreground) { - /* waitqueue available for foreground io */ - DBG_BUGON(!io); - - init_waitqueue_head(&io->u.wait); - atomic_set(&io->pending_bios, 0); - goto out; + if (fg && !*fg) { + q = kvzalloc(sizeof(*q), GFP_KERNEL | __GFP_NOWARN); + if (!q) { + *fg = true; + goto fg_out; + } + INIT_WORK(&q->u.work, z_erofs_vle_unzip_wq); + } else { +fg_out: + q = fgq; + init_waitqueue_head(&fgq->u.wait); + atomic_set(&fgq->pending_bios, 0); } - - iosb = kvzalloc(sizeof(*iosb), GFP_KERNEL | __GFP_NOFAIL); - DBG_BUGON(!iosb); - - /* initialize fields in the allocated descriptor */ - io = &iosb->io; - iosb->sb = sb; - INIT_WORK(&io->u.work, z_erofs_vle_unzip_wq); -out: - io->head = Z_EROFS_PCLUSTER_TAIL_CLOSED; - return io; + q->sb = sb; + q->head = Z_EROFS_PCLUSTER_TAIL_CLOSED; + return q; } /* define decompression jobqueue types */ @@ -1130,22 +1117,17 @@ enum { }; static void *jobqueueset_init(struct super_block *sb, - z_erofs_next_pcluster_t qtail[], - struct z_erofs_unzip_io *q[], - struct z_erofs_unzip_io *fgq, - bool forcefg) + struct z_erofs_decompressqueue *q[], + struct z_erofs_decompressqueue *fgq, bool *fg) { /* * if managed cache is enabled, bypass jobqueue is needed, * no need to read from device for all pclusters in this queue. */ - q[JQ_BYPASS] = jobqueue_init(sb, fgq + JQ_BYPASS, true); - qtail[JQ_BYPASS] = &q[JQ_BYPASS]->head; + q[JQ_BYPASS] = jobqueue_init(sb, fgq + JQ_BYPASS, NULL); + q[JQ_SUBMIT] = jobqueue_init(sb, fgq + JQ_SUBMIT, fg); - q[JQ_SUBMIT] = jobqueue_init(sb, fgq + JQ_SUBMIT, forcefg); - qtail[JQ_SUBMIT] = &q[JQ_SUBMIT]->head; - - return tagptr_cast_ptr(tagptr_fold(tagptr1_t, q[JQ_SUBMIT], !forcefg)); + return tagptr_cast_ptr(tagptr_fold(tagptr1_t, q[JQ_SUBMIT], *fg)); } static void move_to_bypass_jobqueue(struct z_erofs_pcluster *pcl, @@ -1167,9 +1149,8 @@ static void move_to_bypass_jobqueue(struct z_erofs_pcluster *pcl, qtail[JQ_BYPASS] = &pcl->next; } -static bool postsubmit_is_all_bypassed(struct z_erofs_unzip_io *q[], - unsigned int nr_bios, - bool force_fg) +static bool postsubmit_is_all_bypassed(struct z_erofs_decompressqueue *q[], + unsigned int nr_bios, bool force_fg) { /* * although background is preferred, no one is pending for submission. @@ -1178,19 +1159,19 @@ static bool postsubmit_is_all_bypassed(struct z_erofs_unzip_io *q[], if (force_fg || nr_bios) return false; - kvfree(container_of(q[JQ_SUBMIT], struct z_erofs_unzip_io_sb, io)); + kvfree(q[JQ_SUBMIT]); return true; } static bool z_erofs_vle_submit_all(struct super_block *sb, z_erofs_next_pcluster_t owned_head, struct list_head *pagepool, - struct z_erofs_unzip_io *fgq, - bool force_fg) + struct z_erofs_decompressqueue *fgq, + bool *force_fg) { struct erofs_sb_info *const sbi = EROFS_SB(sb); z_erofs_next_pcluster_t qtail[NR_JOBQUEUES]; - struct z_erofs_unzip_io *q[NR_JOBQUEUES]; + struct z_erofs_decompressqueue *q[NR_JOBQUEUES]; struct bio *bio; void *bi_private; /* since bio will be NULL, no need to initialize last_index */ @@ -1204,7 +1185,9 @@ static bool z_erofs_vle_submit_all(struct super_block *sb, force_submit = false; bio = NULL; nr_bios = 0; - bi_private = jobqueueset_init(sb, qtail, q, fgq, force_fg); + bi_private = jobqueueset_init(sb, q, fgq, force_fg); + qtail[JQ_BYPASS] = &q[JQ_BYPASS]->head; + qtail[JQ_SUBMIT] = &q[JQ_SUBMIT]->head; /* by default, all need io submission */ q[JQ_SUBMIT]->head = owned_head; @@ -1280,10 +1263,10 @@ skippage: if (bio) submit_bio(bio); - if (postsubmit_is_all_bypassed(q, nr_bios, force_fg)) + if (postsubmit_is_all_bypassed(q, nr_bios, *force_fg)) return true; - z_erofs_vle_unzip_kickoff(bi_private, nr_bios); + z_erofs_decompress_kickoff(q[JQ_SUBMIT], *force_fg, nr_bios); return true; } @@ -1292,14 +1275,14 @@ static void z_erofs_submit_and_unzip(struct super_block *sb, struct list_head *pagepool, bool force_fg) { - struct z_erofs_unzip_io io[NR_JOBQUEUES]; + struct z_erofs_decompressqueue io[NR_JOBQUEUES]; if (!z_erofs_vle_submit_all(sb, clt->owned_head, - pagepool, io, force_fg)) + pagepool, io, &force_fg)) return; /* decompress no I/O pclusters immediately */ - z_erofs_vle_unzip_all(sb, &io[JQ_BYPASS], pagepool); + z_erofs_vle_unzip_all(&io[JQ_BYPASS], pagepool); if (!force_fg) return; @@ -1309,7 +1292,7 @@ static void z_erofs_submit_and_unzip(struct super_block *sb, !atomic_read(&io[JQ_SUBMIT].pending_bios)); /* let's synchronous decompression */ - z_erofs_vle_unzip_all(sb, &io[JQ_SUBMIT], pagepool); + z_erofs_vle_unzip_all(&io[JQ_SUBMIT], pagepool); } static int z_erofs_vle_normalaccess_readpage(struct file *file, diff --git a/fs/erofs/zdata.h b/fs/erofs/zdata.h index faf950189bd7..7824f5563a55 100644 --- a/fs/erofs/zdata.h +++ b/fs/erofs/zdata.h @@ -84,7 +84,8 @@ struct z_erofs_pcluster { #define Z_EROFS_WORKGROUP_SIZE sizeof(struct z_erofs_pcluster) -struct z_erofs_unzip_io { +struct z_erofs_decompressqueue { + struct super_block *sb; atomic_t pending_bios; z_erofs_next_pcluster_t head; @@ -94,11 +95,6 @@ struct z_erofs_unzip_io { } u; }; -struct z_erofs_unzip_io_sb { - struct z_erofs_unzip_io io; - struct super_block *sb; -}; - #define MNGD_MAPPING(sbi) ((sbi)->managed_cache->i_mapping) static inline bool erofs_page_is_managed(const struct erofs_sb_info *sbi, struct page *page) -- cgit From a93f8c36877b2ae9ba0ca07eda1933944dc5bcb8 Mon Sep 17 00:00:00 2001 From: Gao Xiang Date: Tue, 8 Oct 2019 20:56:16 +0800 Subject: erofs: set iowait for sync decompression For those tasks waiting I/O for sync decompression, they should be better marked as IO wait state. Link: https://lore.kernel.org/r/20191008125616.183715-5-gaoxiang25@huawei.com Reviewed-by: Chao Yu Signed-off-by: Gao Xiang --- fs/erofs/zdata.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/erofs/zdata.c b/fs/erofs/zdata.c index f6056ba80424..35bf6879d3a6 100644 --- a/fs/erofs/zdata.c +++ b/fs/erofs/zdata.c @@ -1288,8 +1288,8 @@ static void z_erofs_submit_and_unzip(struct super_block *sb, return; /* wait until all bios are completed */ - wait_event(io[JQ_SUBMIT].u.wait, - !atomic_read(&io[JQ_SUBMIT].pending_bios)); + io_wait_event(io[JQ_SUBMIT].u.wait, + !atomic_read(&io[JQ_SUBMIT].pending_bios)); /* let's synchronous decompression */ z_erofs_vle_unzip_all(&io[JQ_SUBMIT], pagepool); -- cgit From b858a4844cfba3d90a8d8ffd3a04d9557f76b60f Mon Sep 17 00:00:00 2001 From: Pratik Shinde Date: Mon, 4 Nov 2019 10:49:37 +0800 Subject: erofs: support superblock checksum Introduce superblock checksum feature in order to check at mounting time. Note that the first 1024 bytes are ignore for x86 boot sectors and other oddities. Link: https://lore.kernel.org/r/20191104024937.113939-1-gaoxiang25@huawei.com Signed-off-by: Pratik Shinde Reviewed-by: Chao Yu Cc: Dan Carpenter Signed-off-by: Gao Xiang --- fs/erofs/Kconfig | 1 + fs/erofs/erofs_fs.h | 3 ++- fs/erofs/internal.h | 1 + fs/erofs/super.c | 36 ++++++++++++++++++++++++++++++++++-- 4 files changed, 38 insertions(+), 3 deletions(-) (limited to 'fs') diff --git a/fs/erofs/Kconfig b/fs/erofs/Kconfig index 9d634d3a1845..74b0aaa7114c 100644 --- a/fs/erofs/Kconfig +++ b/fs/erofs/Kconfig @@ -3,6 +3,7 @@ config EROFS_FS tristate "EROFS filesystem support" depends on BLOCK + select LIBCRC32C help EROFS (Enhanced Read-Only File System) is a lightweight read-only file system with modern designs (eg. page-sized diff --git a/fs/erofs/erofs_fs.h b/fs/erofs/erofs_fs.h index b1ee5654750d..385fa49c7749 100644 --- a/fs/erofs/erofs_fs.h +++ b/fs/erofs/erofs_fs.h @@ -11,6 +11,8 @@ #define EROFS_SUPER_OFFSET 1024 +#define EROFS_FEATURE_COMPAT_SB_CHKSUM 0x00000001 + /* * Any bits that aren't in EROFS_ALL_FEATURE_INCOMPAT should * be incompatible with this kernel version. @@ -37,7 +39,6 @@ struct erofs_super_block { __u8 uuid[16]; /* 128-bit uuid for volume */ __u8 volume_name[16]; /* volume name */ __le32 feature_incompat; - __u8 reserved2[44]; }; diff --git a/fs/erofs/internal.h b/fs/erofs/internal.h index 0c1175a08e54..6864fa4893e9 100644 --- a/fs/erofs/internal.h +++ b/fs/erofs/internal.h @@ -85,6 +85,7 @@ struct erofs_sb_info { u8 uuid[16]; /* 128-bit uuid for volume */ u8 volume_name[16]; /* volume name */ + u32 feature_compat; u32 feature_incompat; unsigned int mount_opt; diff --git a/fs/erofs/super.c b/fs/erofs/super.c index 0e369494f2f2..849c0bdf49d9 100644 --- a/fs/erofs/super.c +++ b/fs/erofs/super.c @@ -9,6 +9,7 @@ #include #include #include +#include #include "xattr.h" #define CREATE_TRACE_POINTS @@ -46,6 +47,30 @@ void _erofs_info(struct super_block *sb, const char *function, va_end(args); } +static int erofs_superblock_csum_verify(struct super_block *sb, void *sbdata) +{ + struct erofs_super_block *dsb; + u32 expected_crc, crc; + + dsb = kmemdup(sbdata + EROFS_SUPER_OFFSET, + EROFS_BLKSIZ - EROFS_SUPER_OFFSET, GFP_KERNEL); + if (!dsb) + return -ENOMEM; + + expected_crc = le32_to_cpu(dsb->checksum); + dsb->checksum = 0; + /* to allow for x86 boot sectors and other oddities. */ + crc = crc32c(~0, dsb, EROFS_BLKSIZ - EROFS_SUPER_OFFSET); + kfree(dsb); + + if (crc != expected_crc) { + erofs_err(sb, "invalid checksum 0x%08x, 0x%08x expected", + crc, expected_crc); + return -EBADMSG; + } + return 0; +} + static void erofs_inode_init_once(void *ptr) { struct erofs_inode *vi = ptr; @@ -112,7 +137,7 @@ static int erofs_read_superblock(struct super_block *sb) sbi = EROFS_SB(sb); - data = kmap_atomic(page); + data = kmap(page); dsb = (struct erofs_super_block *)(data + EROFS_SUPER_OFFSET); ret = -EINVAL; @@ -121,6 +146,13 @@ static int erofs_read_superblock(struct super_block *sb) goto out; } + sbi->feature_compat = le32_to_cpu(dsb->feature_compat); + if (sbi->feature_compat & EROFS_FEATURE_COMPAT_SB_CHKSUM) { + ret = erofs_superblock_csum_verify(sb, data); + if (ret) + goto out; + } + blkszbits = dsb->blkszbits; /* 9(512 bytes) + LOG_SECTORS_PER_BLOCK == LOG_BLOCK_SIZE */ if (blkszbits != LOG_BLOCK_SIZE) { @@ -155,7 +187,7 @@ static int erofs_read_superblock(struct super_block *sb) } ret = 0; out: - kunmap_atomic(data); + kunmap(page); put_page(page); return ret; } -- cgit From 0c638f70d7310f961a3482108c9d7ce15fcba8b3 Mon Sep 17 00:00:00 2001 From: Gao Xiang Date: Fri, 8 Nov 2019 11:37:33 +0800 Subject: erofs: drop all vle annotations for runtime names VLE was an old informal name of fixed-sized output compression which came from published ATC'19 paper [1]. Drop those old annotations since erofs can handle all encoded clusters in block-aligned basis, which is wider than fixed-sized output compression after larger clustersize feature is fully implemented. Unaligned encoding won't be considered in EROFS since it's not friendly to inplace I/O and perhaps decompression inplace. a) Fixed-sized output compression with 16KB pcluster: ___________________________________ |xxxxxxxx|xxxxxxxx|xxxxxxxx|xxxxxxxx| |___ 0___|___ 1___|___ 2___|___ 3___| physical blocks b) Block-aligned fixed-sized input compression with 16KB pcluster: ___________________________________ |xxxxxxxx|xxxxxxxx|xxxxxxxx|xxx00000| |___ 0___|___ 1___|___ 2___|___ 3___| physical blocks c) Block-unaligned fixed-sized input compression with 16KB compression unit: ____________________________________________ |..xxxxxx|xxxxxxxx|xxxxxxxx|xxxxxxxx|x.......| |___ 0___|___ 1___|___ 2___|___ 3___|___ 4___| physical blocks Refine better names for those as well. [1] https://www.usenix.org/conference/atc19/presentation/gao Link: https://lore.kernel.org/r/20191108033733.63919-1-gaoxiang25@huawei.com Reviewed-by: Chao Yu Signed-off-by: Gao Xiang --- fs/erofs/internal.h | 4 +--- fs/erofs/zdata.c | 62 +++++++++++++++++++++++++---------------------------- fs/erofs/zmap.c | 28 ++++++++++++------------ 3 files changed, 44 insertions(+), 50 deletions(-) (limited to 'fs') diff --git a/fs/erofs/internal.h b/fs/erofs/internal.h index 6864fa4893e9..1ed5beff7d11 100644 --- a/fs/erofs/internal.h +++ b/fs/erofs/internal.h @@ -279,9 +279,7 @@ static inline unsigned int erofs_inode_datalayout(unsigned int value) extern const struct super_operations erofs_sops; extern const struct address_space_operations erofs_raw_access_aops; -#ifdef CONFIG_EROFS_FS_ZIP -extern const struct address_space_operations z_erofs_vle_normalaccess_aops; -#endif +extern const struct address_space_operations z_erofs_aops; /* * Logical to physical block mapping, used by erofs_map_blocks() diff --git a/fs/erofs/zdata.c b/fs/erofs/zdata.c index 35bf6879d3a6..ca99425a4536 100644 --- a/fs/erofs/zdata.c +++ b/fs/erofs/zdata.c @@ -711,7 +711,7 @@ static void z_erofs_decompress_kickoff(struct z_erofs_decompressqueue *io, queue_work(z_erofs_workqueue, &io->u.work); } -static void z_erofs_vle_read_endio(struct bio *bio) +static void z_erofs_decompressqueue_endio(struct bio *bio) { tagptr1_t t = tagptr_init(tagptr1_t, bio->bi_private); struct z_erofs_decompressqueue *q = tagptr_unfold_ptr(t); @@ -939,8 +939,8 @@ out: return err; } -static void z_erofs_vle_unzip_all(const struct z_erofs_decompressqueue *io, - struct list_head *pagepool) +static void z_erofs_decompress_queue(const struct z_erofs_decompressqueue *io, + struct list_head *pagepool) { z_erofs_next_pcluster_t owned = io->head; @@ -960,14 +960,14 @@ static void z_erofs_vle_unzip_all(const struct z_erofs_decompressqueue *io, } } -static void z_erofs_vle_unzip_wq(struct work_struct *work) +static void z_erofs_decompressqueue_work(struct work_struct *work) { struct z_erofs_decompressqueue *bgq = container_of(work, struct z_erofs_decompressqueue, u.work); LIST_HEAD(pagepool); DBG_BUGON(bgq->head == Z_EROFS_PCLUSTER_TAIL_CLOSED); - z_erofs_vle_unzip_all(bgq, &pagepool); + z_erofs_decompress_queue(bgq, &pagepool); put_pages_list(&pagepool); kvfree(bgq); @@ -1097,7 +1097,7 @@ jobqueue_init(struct super_block *sb, *fg = true; goto fg_out; } - INIT_WORK(&q->u.work, z_erofs_vle_unzip_wq); + INIT_WORK(&q->u.work, z_erofs_decompressqueue_work); } else { fg_out: q = fgq; @@ -1163,11 +1163,11 @@ static bool postsubmit_is_all_bypassed(struct z_erofs_decompressqueue *q[], return true; } -static bool z_erofs_vle_submit_all(struct super_block *sb, - z_erofs_next_pcluster_t owned_head, - struct list_head *pagepool, - struct z_erofs_decompressqueue *fgq, - bool *force_fg) +static bool z_erofs_submit_queue(struct super_block *sb, + z_erofs_next_pcluster_t owned_head, + struct list_head *pagepool, + struct z_erofs_decompressqueue *fgq, + bool *force_fg) { struct erofs_sb_info *const sbi = EROFS_SB(sb); z_erofs_next_pcluster_t qtail[NR_JOBQUEUES]; @@ -1234,7 +1234,7 @@ submit_bio_retry: if (!bio) { bio = bio_alloc(GFP_NOIO, BIO_MAX_PAGES); - bio->bi_end_io = z_erofs_vle_read_endio; + bio->bi_end_io = z_erofs_decompressqueue_endio; bio_set_dev(bio, sb->s_bdev); bio->bi_iter.bi_sector = (sector_t)(first_index + i) << LOG_SECTORS_PER_BLOCK; @@ -1270,19 +1270,18 @@ skippage: return true; } -static void z_erofs_submit_and_unzip(struct super_block *sb, - struct z_erofs_collector *clt, - struct list_head *pagepool, - bool force_fg) +static void z_erofs_runqueue(struct super_block *sb, + struct z_erofs_collector *clt, + struct list_head *pagepool, bool force_fg) { struct z_erofs_decompressqueue io[NR_JOBQUEUES]; - if (!z_erofs_vle_submit_all(sb, clt->owned_head, - pagepool, io, &force_fg)) + if (!z_erofs_submit_queue(sb, clt->owned_head, + pagepool, io, &force_fg)) return; - /* decompress no I/O pclusters immediately */ - z_erofs_vle_unzip_all(&io[JQ_BYPASS], pagepool); + /* handle bypass queue (no i/o pclusters) immediately */ + z_erofs_decompress_queue(&io[JQ_BYPASS], pagepool); if (!force_fg) return; @@ -1291,12 +1290,11 @@ static void z_erofs_submit_and_unzip(struct super_block *sb, io_wait_event(io[JQ_SUBMIT].u.wait, !atomic_read(&io[JQ_SUBMIT].pending_bios)); - /* let's synchronous decompression */ - z_erofs_vle_unzip_all(&io[JQ_SUBMIT], pagepool); + /* handle synchronous decompress queue in the caller context */ + z_erofs_decompress_queue(&io[JQ_SUBMIT], pagepool); } -static int z_erofs_vle_normalaccess_readpage(struct file *file, - struct page *page) +static int z_erofs_readpage(struct file *file, struct page *page) { struct inode *const inode = page->mapping->host; struct z_erofs_decompress_frontend f = DECOMPRESS_FRONTEND_INIT(inode); @@ -1311,7 +1309,7 @@ static int z_erofs_vle_normalaccess_readpage(struct file *file, (void)z_erofs_collector_end(&f.clt); /* if some compressed cluster ready, need submit them anyway */ - z_erofs_submit_and_unzip(inode->i_sb, &f.clt, &pagepool, true); + z_erofs_runqueue(inode->i_sb, &f.clt, &pagepool, true); if (err) erofs_err(inode->i_sb, "failed to read, err [%d]", err); @@ -1330,10 +1328,8 @@ static bool should_decompress_synchronously(struct erofs_sb_info *sbi, return nr <= sbi->max_sync_decompress_pages; } -static int z_erofs_vle_normalaccess_readpages(struct file *filp, - struct address_space *mapping, - struct list_head *pages, - unsigned int nr_pages) +static int z_erofs_readpages(struct file *filp, struct address_space *mapping, + struct list_head *pages, unsigned int nr_pages) { struct inode *const inode = mapping->host; struct erofs_sb_info *const sbi = EROFS_I_SB(inode); @@ -1388,7 +1384,7 @@ static int z_erofs_vle_normalaccess_readpages(struct file *filp, (void)z_erofs_collector_end(&f.clt); - z_erofs_submit_and_unzip(inode->i_sb, &f.clt, &pagepool, sync); + z_erofs_runqueue(inode->i_sb, &f.clt, &pagepool, sync); if (f.map.mpage) put_page(f.map.mpage); @@ -1398,8 +1394,8 @@ static int z_erofs_vle_normalaccess_readpages(struct file *filp, return 0; } -const struct address_space_operations z_erofs_vle_normalaccess_aops = { - .readpage = z_erofs_vle_normalaccess_readpage, - .readpages = z_erofs_vle_normalaccess_readpages, +const struct address_space_operations z_erofs_aops = { + .readpage = z_erofs_readpage, + .readpages = z_erofs_readpages, }; diff --git a/fs/erofs/zmap.c b/fs/erofs/zmap.c index 6a26c293ae2d..736db3a4cdef 100644 --- a/fs/erofs/zmap.c +++ b/fs/erofs/zmap.c @@ -22,11 +22,11 @@ int z_erofs_fill_inode(struct inode *inode) set_bit(EROFS_I_Z_INITED_BIT, &vi->flags); } - inode->i_mapping->a_ops = &z_erofs_vle_normalaccess_aops; + inode->i_mapping->a_ops = &z_erofs_aops; return 0; } -static int fill_inode_lazy(struct inode *inode) +static int z_erofs_fill_inode_lazy(struct inode *inode) { struct erofs_inode *const vi = EROFS_I(inode); struct super_block *const sb = inode->i_sb; @@ -138,8 +138,8 @@ static int z_erofs_reload_indexes(struct z_erofs_maprecorder *m, return 0; } -static int vle_legacy_load_cluster_from_disk(struct z_erofs_maprecorder *m, - unsigned long lcn) +static int legacy_load_cluster_from_disk(struct z_erofs_maprecorder *m, + unsigned long lcn) { struct inode *const inode = m->inode; struct erofs_inode *const vi = EROFS_I(inode); @@ -311,13 +311,13 @@ out: return unpack_compacted_index(m, amortizedshift, erofs_blkoff(pos)); } -static int vle_load_cluster_from_disk(struct z_erofs_maprecorder *m, - unsigned int lcn) +static int z_erofs_load_cluster_from_disk(struct z_erofs_maprecorder *m, + unsigned int lcn) { const unsigned int datamode = EROFS_I(m->inode)->datalayout; if (datamode == EROFS_INODE_FLAT_COMPRESSION_LEGACY) - return vle_legacy_load_cluster_from_disk(m, lcn); + return legacy_load_cluster_from_disk(m, lcn); if (datamode == EROFS_INODE_FLAT_COMPRESSION) return compacted_load_cluster_from_disk(m, lcn); @@ -325,8 +325,8 @@ static int vle_load_cluster_from_disk(struct z_erofs_maprecorder *m, return -EINVAL; } -static int vle_extent_lookback(struct z_erofs_maprecorder *m, - unsigned int lookback_distance) +static int z_erofs_extent_lookback(struct z_erofs_maprecorder *m, + unsigned int lookback_distance) { struct erofs_inode *const vi = EROFS_I(m->inode); struct erofs_map_blocks *const map = m->map; @@ -343,7 +343,7 @@ static int vle_extent_lookback(struct z_erofs_maprecorder *m, /* load extent head logical cluster if needed */ lcn -= lookback_distance; - err = vle_load_cluster_from_disk(m, lcn); + err = z_erofs_load_cluster_from_disk(m, lcn); if (err) return err; @@ -356,7 +356,7 @@ static int vle_extent_lookback(struct z_erofs_maprecorder *m, DBG_BUGON(1); return -EFSCORRUPTED; } - return vle_extent_lookback(m, m->delta[0]); + return z_erofs_extent_lookback(m, m->delta[0]); case Z_EROFS_VLE_CLUSTER_TYPE_PLAIN: map->m_flags &= ~EROFS_MAP_ZIPPED; /* fallthrough */ @@ -396,7 +396,7 @@ int z_erofs_map_blocks_iter(struct inode *inode, goto out; } - err = fill_inode_lazy(inode); + err = z_erofs_fill_inode_lazy(inode); if (err) goto out; @@ -405,7 +405,7 @@ int z_erofs_map_blocks_iter(struct inode *inode, m.lcn = ofs >> lclusterbits; endoff = ofs & ((1 << lclusterbits) - 1); - err = vle_load_cluster_from_disk(&m, m.lcn); + err = z_erofs_load_cluster_from_disk(&m, m.lcn); if (err) goto unmap_out; @@ -436,7 +436,7 @@ int z_erofs_map_blocks_iter(struct inode *inode, /* fallthrough */ case Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD: /* get the correspoinding first chunk */ - err = vle_extent_lookback(&m, m.delta[0]); + err = z_erofs_extent_lookback(&m, m.delta[0]); if (err) goto unmap_out; break; -- cgit From 3dcb5fa23e16ef50b09e7a56b47d8e4c04ca09c0 Mon Sep 17 00:00:00 2001 From: Chengguang Xu Date: Tue, 19 Nov 2019 19:50:49 +0800 Subject: erofs: remove unnecessary output in erofs_show_options() We have already handled cache_strategy option carefully, so incorrect setting could not pass option parsing. Meanwhile, print 'cache_strategy=(unknown)' can cause failure on remount. Link: https://lore.kernel.org/r/20191119115049.3401-1-cgxu519@mykernel.net Signed-off-by: Chengguang Xu Reviewed-by: Gao Xiang Reviewed-by: Chao Yu Signed-off-by: Gao Xiang --- fs/erofs/super.c | 3 --- 1 file changed, 3 deletions(-) (limited to 'fs') diff --git a/fs/erofs/super.c b/fs/erofs/super.c index 849c0bdf49d9..057e6d7b5b7f 100644 --- a/fs/erofs/super.c +++ b/fs/erofs/super.c @@ -598,9 +598,6 @@ static int erofs_show_options(struct seq_file *seq, struct dentry *root) seq_puts(seq, ",cache_strategy=readahead"); } else if (sbi->cache_strategy == EROFS_ZIP_CACHE_READAROUND) { seq_puts(seq, ",cache_strategy=readaround"); - } else { - seq_puts(seq, ",cache_strategy=(unknown)"); - DBG_BUGON(1); } #endif return 0; -- cgit