diff options
Diffstat (limited to 'fs/squashfs')
| -rw-r--r-- | fs/squashfs/Kconfig | 27 | ||||
| -rw-r--r-- | fs/squashfs/block.c | 57 | ||||
| -rw-r--r-- | fs/squashfs/cache.c | 12 | ||||
| -rw-r--r-- | fs/squashfs/file.c | 234 | ||||
| -rw-r--r-- | fs/squashfs/file_cache.c | 6 | ||||
| -rw-r--r-- | fs/squashfs/file_direct.c | 11 | ||||
| -rw-r--r-- | fs/squashfs/inode.c | 41 | ||||
| -rw-r--r-- | fs/squashfs/squashfs.h | 14 | ||||
| -rw-r--r-- | fs/squashfs/squashfs_fs.h | 1 | ||||
| -rw-r--r-- | fs/squashfs/squashfs_fs_i.h | 2 | ||||
| -rw-r--r-- | fs/squashfs/super.c | 30 |
11 files changed, 323 insertions, 112 deletions
diff --git a/fs/squashfs/Kconfig b/fs/squashfs/Kconfig index 60fc98bdf421..a9602aae21ef 100644 --- a/fs/squashfs/Kconfig +++ b/fs/squashfs/Kconfig @@ -5,8 +5,8 @@ config SQUASHFS help Saying Y here includes support for SquashFS 4.0 (a Compressed Read-Only File System). Squashfs is a highly compressed read-only - filesystem for Linux. It uses zlib, lzo or xz compression to - compress both files, inodes and directories. Inodes in the system + filesystem for Linux. It uses zlib, lz4, lzo, xz or zstd compression + to compress both files, inodes and directories. Inodes in the system are very small and all blocks are packed to minimise data overhead. Block sizes greater than 4K are supported up to a maximum of 1 Mbytes (default block size 128K). SquashFS 4.0 supports 64 bit filesystems @@ -16,7 +16,7 @@ config SQUASHFS Squashfs is intended for general read-only filesystem use, for archival use (i.e. in cases where a .tar.gz file may be used), and in embedded systems where low overhead is needed. Further information - and tools are available from http://squashfs.sourceforge.net. + and tools are available from github.com/plougher/squashfs-tools. If you want to compile this as a module ( = code which can be inserted in and removed from the running kernel whenever you want), @@ -149,6 +149,27 @@ config SQUASHFS_XATTR If unsure, say N. +config SQUASHFS_COMP_CACHE_FULL + bool "Enable full caching of compressed blocks" + depends on SQUASHFS + default n + help + This option enables caching of all compressed blocks, Without caching, + repeated reads of the same files trigger excessive disk I/O, significantly + reducinng performance in workloads like fio-based benchmarks. + + For example, fio tests (iodepth=1, numjobs=1, ioengine=psync) show: + With caching: IOPS=2223, BW=278MiB/s (291MB/s) + Without caching: IOPS=815, BW=102MiB/s (107MB/s) + + Enabling this option restores performance to pre-regression levels by + caching all compressed blocks in the page cache, reducing disk I/O for + repeated reads. However, this increases memory usage, which may be a + concern in memory-constrained environments. + + Enable this option if your workload involves frequent repeated reads and + memory usage is not a limiting factor. If unsure, say N. + config SQUASHFS_ZLIB bool "Include support for ZLIB compressed file systems" depends on SQUASHFS diff --git a/fs/squashfs/block.c b/fs/squashfs/block.c index 2dc730800f44..a05e3793f93a 100644 --- a/fs/squashfs/block.c +++ b/fs/squashfs/block.c @@ -80,19 +80,22 @@ static int squashfs_bio_read_cached(struct bio *fullbio, struct address_space *cache_mapping, u64 index, int length, u64 read_start, u64 read_end, int page_count) { - struct page *head_to_cache = NULL, *tail_to_cache = NULL; + struct folio *head_to_cache = NULL, *tail_to_cache = NULL; struct block_device *bdev = fullbio->bi_bdev; int start_idx = 0, end_idx = 0; - struct bvec_iter_all iter_all; + struct folio_iter fi; struct bio *bio = NULL; - struct bio_vec *bv; int idx = 0; int err = 0; +#ifdef CONFIG_SQUASHFS_COMP_CACHE_FULL + struct folio **cache_folios = kmalloc_array(page_count, + sizeof(*cache_folios), GFP_KERNEL | __GFP_ZERO); +#endif - bio_for_each_segment_all(bv, fullbio, iter_all) { - struct page *page = bv->bv_page; + bio_for_each_folio_all(fi, fullbio) { + struct folio *folio = fi.folio; - if (page->mapping == cache_mapping) { + if (folio->mapping == cache_mapping) { idx++; continue; } @@ -107,9 +110,14 @@ static int squashfs_bio_read_cached(struct bio *fullbio, * adjacent blocks. */ if (idx == 0 && index != read_start) - head_to_cache = page; + head_to_cache = folio; else if (idx == page_count - 1 && index + length != read_end) - tail_to_cache = page; + tail_to_cache = folio; +#ifdef CONFIG_SQUASHFS_COMP_CACHE_FULL + /* Cache all pages in the BIO for repeated reads */ + else if (cache_folios) + cache_folios[idx] = folio; +#endif if (!bio || idx != end_idx) { struct bio *new = bio_alloc_clone(bdev, fullbio, @@ -141,28 +149,47 @@ static int squashfs_bio_read_cached(struct bio *fullbio, return err; if (head_to_cache) { - int ret = add_to_page_cache_lru(head_to_cache, cache_mapping, + int ret = filemap_add_folio(cache_mapping, head_to_cache, read_start >> PAGE_SHIFT, GFP_NOIO); if (!ret) { - SetPageUptodate(head_to_cache); - unlock_page(head_to_cache); + folio_mark_uptodate(head_to_cache); + folio_unlock(head_to_cache); } } if (tail_to_cache) { - int ret = add_to_page_cache_lru(tail_to_cache, cache_mapping, + int ret = filemap_add_folio(cache_mapping, tail_to_cache, (read_end >> PAGE_SHIFT) - 1, GFP_NOIO); if (!ret) { - SetPageUptodate(tail_to_cache); - unlock_page(tail_to_cache); + folio_mark_uptodate(tail_to_cache); + folio_unlock(tail_to_cache); } } +#ifdef CONFIG_SQUASHFS_COMP_CACHE_FULL + if (!cache_folios) + goto out; + + for (idx = 0; idx < page_count; idx++) { + if (!cache_folios[idx]) + continue; + int ret = filemap_add_folio(cache_mapping, cache_folios[idx], + (read_start >> PAGE_SHIFT) + idx, + GFP_NOIO); + + if (!ret) { + folio_mark_uptodate(cache_folios[idx]); + folio_unlock(cache_folios[idx]); + } + } + kfree(cache_folios); +out: +#endif return 0; } @@ -204,7 +231,7 @@ static int squashfs_bio_read(struct super_block *sb, u64 index, int length, bio = bio_kmalloc(page_count, GFP_NOIO); if (!bio) return -ENOMEM; - bio_init(bio, sb->s_bdev, bio->bi_inline_vecs, page_count, REQ_OP_READ); + bio_init_inline(bio, sb->s_bdev, page_count, REQ_OP_READ); bio->bi_iter.bi_sector = block * (msblk->devblksize >> SECTOR_SHIFT); for (i = 0; i < page_count; ++i) { diff --git a/fs/squashfs/cache.c b/fs/squashfs/cache.c index 5062326d0efb..181260e72680 100644 --- a/fs/squashfs/cache.c +++ b/fs/squashfs/cache.c @@ -198,7 +198,7 @@ void squashfs_cache_delete(struct squashfs_cache *cache) { int i, j; - if (cache == NULL) + if (IS_ERR(cache) || cache == NULL) return; for (i = 0; i < cache->entries; i++) { @@ -224,11 +224,15 @@ struct squashfs_cache *squashfs_cache_init(char *name, int entries, int block_size) { int i, j; - struct squashfs_cache *cache = kzalloc(sizeof(*cache), GFP_KERNEL); + struct squashfs_cache *cache; + if (entries == 0) + return NULL; + + cache = kzalloc(sizeof(*cache), GFP_KERNEL); if (cache == NULL) { ERROR("Failed to allocate %s cache\n", name); - return NULL; + return ERR_PTR(-ENOMEM); } cache->entry = kcalloc(entries, sizeof(*(cache->entry)), GFP_KERNEL); @@ -281,7 +285,7 @@ struct squashfs_cache *squashfs_cache_init(char *name, int entries, cleanup: squashfs_cache_delete(cache); - return NULL; + return ERR_PTR(-ENOMEM); } diff --git a/fs/squashfs/file.c b/fs/squashfs/file.c index 21aaa96856c1..1582e0637a7e 100644 --- a/fs/squashfs/file.c +++ b/fs/squashfs/file.c @@ -307,7 +307,8 @@ static int fill_meta_index(struct inode *inode, int index, all_done: *index_block = cur_index_block; *index_offset = cur_offset; - *data_block = cur_data_block; + if (data_block) + *data_block = cur_data_block; /* * Scale cache index (cache slot entry) to index @@ -324,17 +325,15 @@ failed: * Get the on-disk location and compressed size of the datablock * specified by index. Fill_meta_index() does most of the work. */ -static int read_blocklist(struct inode *inode, int index, u64 *block) +static int read_blocklist_ptrs(struct inode *inode, int index, u64 *start, + int *offset, u64 *block) { - u64 start; long long blks; - int offset; __le32 size; - int res = fill_meta_index(inode, index, &start, &offset, block); + int res = fill_meta_index(inode, index, start, offset, block); - TRACE("read_blocklist: res %d, index %d, start 0x%llx, offset" - " 0x%x, block 0x%llx\n", res, index, start, offset, - *block); + TRACE("read_blocklist: res %d, index %d, start 0x%llx, offset 0x%x, block 0x%llx\n", + res, index, *start, *offset, block ? *block : 0); if (res < 0) return res; @@ -346,45 +345,58 @@ static int read_blocklist(struct inode *inode, int index, u64 *block) * extra block indexes needed. */ if (res < index) { - blks = read_indexes(inode->i_sb, index - res, &start, &offset); + blks = read_indexes(inode->i_sb, index - res, start, offset); if (blks < 0) return (int) blks; - *block += blks; + if (block) + *block += blks; } /* * Read length of block specified by index. */ - res = squashfs_read_metadata(inode->i_sb, &size, &start, &offset, + res = squashfs_read_metadata(inode->i_sb, &size, start, offset, sizeof(size)); if (res < 0) return res; return squashfs_block_size(size); } -void squashfs_fill_page(struct page *page, struct squashfs_cache_entry *buffer, int offset, int avail) +static inline int read_blocklist(struct inode *inode, int index, u64 *block) { - int copied; + u64 start; + int offset; + + return read_blocklist_ptrs(inode, index, &start, &offset, block); +} + +static bool squashfs_fill_page(struct folio *folio, + struct squashfs_cache_entry *buffer, size_t offset, + size_t avail) +{ + size_t copied; void *pageaddr; - pageaddr = kmap_atomic(page); + pageaddr = kmap_local_folio(folio, 0); copied = squashfs_copy_data(pageaddr, buffer, offset, avail); memset(pageaddr + copied, 0, PAGE_SIZE - copied); - kunmap_atomic(pageaddr); + kunmap_local(pageaddr); + + flush_dcache_folio(folio); - flush_dcache_page(page); - if (copied == avail) - SetPageUptodate(page); + return copied == avail; } /* Copy data into page cache */ -void squashfs_copy_cache(struct page *page, struct squashfs_cache_entry *buffer, - int bytes, int offset) +void squashfs_copy_cache(struct folio *folio, + struct squashfs_cache_entry *buffer, size_t bytes, + size_t offset) { - struct inode *inode = page->mapping->host; + struct address_space *mapping = folio->mapping; + struct inode *inode = mapping->host; struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info; int i, mask = (1 << (msblk->block_log - PAGE_SHIFT)) - 1; - int start_index = page->index & ~mask, end_index = start_index | mask; + int start_index = folio->index & ~mask, end_index = start_index | mask; /* * Loop copying datablock into pages. As the datablock likely covers @@ -394,32 +406,35 @@ void squashfs_copy_cache(struct page *page, struct squashfs_cache_entry *buffer, */ for (i = start_index; i <= end_index && bytes > 0; i++, bytes -= PAGE_SIZE, offset += PAGE_SIZE) { - struct page *push_page; - int avail = buffer ? min_t(int, bytes, PAGE_SIZE) : 0; + struct folio *push_folio; + size_t avail = buffer ? min(bytes, PAGE_SIZE) : 0; + bool updated = false; - TRACE("bytes %d, i %d, available_bytes %d\n", bytes, i, avail); + TRACE("bytes %zu, i %d, available_bytes %zu\n", bytes, i, avail); - push_page = (i == page->index) ? page : - grab_cache_page_nowait(page->mapping, i); + push_folio = (i == folio->index) ? folio : + __filemap_get_folio(mapping, i, + FGP_LOCK|FGP_CREAT|FGP_NOFS|FGP_NOWAIT, + mapping_gfp_mask(mapping)); - if (!push_page) + if (IS_ERR(push_folio)) continue; - if (PageUptodate(push_page)) - goto skip_page; + if (folio_test_uptodate(push_folio)) + goto skip_folio; - squashfs_fill_page(push_page, buffer, offset, avail); -skip_page: - unlock_page(push_page); - if (i != page->index) - put_page(push_page); + updated = squashfs_fill_page(push_folio, buffer, offset, avail); +skip_folio: + folio_end_read(push_folio, updated); + if (i != folio->index) + folio_put(push_folio); } } /* Read datablock stored packed inside a fragment (tail-end packed block) */ -static int squashfs_readpage_fragment(struct page *page, int expected) +static int squashfs_readpage_fragment(struct folio *folio, int expected) { - struct inode *inode = page->mapping->host; + struct inode *inode = folio->mapping->host; struct squashfs_cache_entry *buffer = squashfs_get_fragment(inode->i_sb, squashfs_i(inode)->fragment_block, squashfs_i(inode)->fragment_size); @@ -430,36 +445,34 @@ static int squashfs_readpage_fragment(struct page *page, int expected) squashfs_i(inode)->fragment_block, squashfs_i(inode)->fragment_size); else - squashfs_copy_cache(page, buffer, expected, + squashfs_copy_cache(folio, buffer, expected, squashfs_i(inode)->fragment_offset); squashfs_cache_put(buffer); return res; } -static int squashfs_readpage_sparse(struct page *page, int expected) +static int squashfs_readpage_sparse(struct folio *folio, int expected) { - squashfs_copy_cache(page, NULL, expected, 0); + squashfs_copy_cache(folio, NULL, expected, 0); return 0; } static int squashfs_read_folio(struct file *file, struct folio *folio) { - struct page *page = &folio->page; - struct inode *inode = page->mapping->host; + struct inode *inode = folio->mapping->host; struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info; - int index = page->index >> (msblk->block_log - PAGE_SHIFT); + int index = folio->index >> (msblk->block_log - PAGE_SHIFT); int file_end = i_size_read(inode) >> msblk->block_log; int expected = index == file_end ? (i_size_read(inode) & (msblk->block_size - 1)) : msblk->block_size; int res = 0; - void *pageaddr; TRACE("Entered squashfs_readpage, page index %lx, start block %llx\n", - page->index, squashfs_i(inode)->start); + folio->index, squashfs_i(inode)->start); - if (page->index >= ((i_size_read(inode) + PAGE_SIZE - 1) >> + if (folio->index >= ((i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT)) goto out; @@ -472,31 +485,25 @@ static int squashfs_read_folio(struct file *file, struct folio *folio) goto out; if (res == 0) - res = squashfs_readpage_sparse(page, expected); + res = squashfs_readpage_sparse(folio, expected); else - res = squashfs_readpage_block(page, block, res, expected); + res = squashfs_readpage_block(folio, block, res, expected); } else - res = squashfs_readpage_fragment(page, expected); + res = squashfs_readpage_fragment(folio, expected); if (!res) return 0; out: - pageaddr = kmap_atomic(page); - memset(pageaddr, 0, PAGE_SIZE); - kunmap_atomic(pageaddr); - flush_dcache_page(page); - if (res == 0) - SetPageUptodate(page); - unlock_page(page); + folio_zero_segment(folio, 0, folio_size(folio)); + folio_end_read(folio, res == 0); return res; } -static int squashfs_readahead_fragment(struct page **page, +static int squashfs_readahead_fragment(struct inode *inode, struct page **page, unsigned int pages, unsigned int expected, loff_t start) { - struct inode *inode = page[0]->mapping->host; struct squashfs_cache_entry *buffer = squashfs_get_fragment(inode->i_sb, squashfs_i(inode)->fragment_block, squashfs_i(inode)->fragment_size); @@ -605,8 +612,8 @@ static void squashfs_readahead(struct readahead_control *ractl) if (start >> msblk->block_log == file_end && squashfs_i(inode)->fragment_block != SQUASHFS_INVALID_BLK) { - res = squashfs_readahead_fragment(pages, nr_pages, - expected, start); + res = squashfs_readahead_fragment(inode, pages, + nr_pages, expected, start); if (res) goto skip_pages; continue; @@ -659,7 +666,114 @@ skip_pages: kfree(pages); } +static loff_t seek_hole_data(struct file *file, loff_t offset, int whence) +{ + struct inode *inode = file->f_mapping->host; + struct super_block *sb = inode->i_sb; + struct squashfs_sb_info *msblk = sb->s_fs_info; + u64 start, index = offset >> msblk->block_log; + u64 file_end = (i_size_read(inode) + msblk->block_size - 1) >> msblk->block_log; + int s_offset, length; + __le32 *blist = NULL; + + /* reject offset if negative or beyond file end */ + if ((unsigned long long)offset >= i_size_read(inode)) + return -ENXIO; + + /* is offset within tailend and is tailend packed into a fragment? */ + if (index + 1 == file_end && + squashfs_i(inode)->fragment_block != SQUASHFS_INVALID_BLK) { + if (whence == SEEK_DATA) + return offset; + + /* there is an implicit hole at the end of any file */ + return i_size_read(inode); + } + + length = read_blocklist_ptrs(inode, index, &start, &s_offset, NULL); + if (length < 0) + return length; + + /* nothing more to do if offset matches desired whence value */ + if ((length == 0 && whence == SEEK_HOLE) || + (length && whence == SEEK_DATA)) + return offset; + + /* skip scanning forwards if we're at file end */ + if (++ index == file_end) + goto not_found; + + blist = kmalloc(SQUASHFS_SCAN_INDEXES << 2, GFP_KERNEL); + if (blist == NULL) { + ERROR("%s: Failed to allocate block_list\n", __func__); + return -ENOMEM; + } + + while (index < file_end) { + int i, indexes = min(file_end - index, SQUASHFS_SCAN_INDEXES); + + offset = squashfs_read_metadata(sb, blist, &start, &s_offset, indexes << 2); + if (offset < 0) + goto finished; + + for (i = 0; i < indexes; i++) { + length = squashfs_block_size(blist[i]); + if (length < 0) { + offset = length; + goto finished; + } + + /* does this block match desired whence value? */ + if ((length == 0 && whence == SEEK_HOLE) || + (length && whence == SEEK_DATA)) { + offset = (index + i) << msblk->block_log; + goto finished; + } + } + + index += indexes; + } + +not_found: + /* whence value determines what happens */ + if (whence == SEEK_DATA) + offset = -ENXIO; + else + /* there is an implicit hole at the end of any file */ + offset = i_size_read(inode); + +finished: + kfree(blist); + return offset; +} + +static loff_t squashfs_llseek(struct file *file, loff_t offset, int whence) +{ + struct inode *inode = file->f_mapping->host; + + switch (whence) { + default: + return generic_file_llseek(file, offset, whence); + case SEEK_DATA: + case SEEK_HOLE: + offset = seek_hole_data(file, offset, whence); + break; + } + + if (offset < 0) + return offset; + + return vfs_setpos(file, offset, inode->i_sb->s_maxbytes); +} + const struct address_space_operations squashfs_aops = { .read_folio = squashfs_read_folio, .readahead = squashfs_readahead }; + +const struct file_operations squashfs_file_operations = { + .llseek = squashfs_llseek, + .read_iter = generic_file_read_iter, + .mmap_prepare = generic_file_readonly_mmap_prepare, + .splice_read = filemap_splice_read +}; diff --git a/fs/squashfs/file_cache.c b/fs/squashfs/file_cache.c index 54c17b7c85fd..40e59a43d098 100644 --- a/fs/squashfs/file_cache.c +++ b/fs/squashfs/file_cache.c @@ -18,9 +18,9 @@ #include "squashfs.h" /* Read separately compressed datablock and memcopy into page cache */ -int squashfs_readpage_block(struct page *page, u64 block, int bsize, int expected) +int squashfs_readpage_block(struct folio *folio, u64 block, int bsize, int expected) { - struct inode *i = page->mapping->host; + struct inode *i = folio->mapping->host; struct squashfs_cache_entry *buffer = squashfs_get_datablock(i->i_sb, block, bsize); int res = buffer->error; @@ -29,7 +29,7 @@ int squashfs_readpage_block(struct page *page, u64 block, int bsize, int expecte ERROR("Unable to read page, block %llx, size %x\n", block, bsize); else - squashfs_copy_cache(page, buffer, expected, 0); + squashfs_copy_cache(folio, buffer, expected, 0); squashfs_cache_put(buffer); return res; diff --git a/fs/squashfs/file_direct.c b/fs/squashfs/file_direct.c index d19d4db74af8..2c3e809d6891 100644 --- a/fs/squashfs/file_direct.c +++ b/fs/squashfs/file_direct.c @@ -19,12 +19,11 @@ #include "page_actor.h" /* Read separately compressed datablock directly into page cache */ -int squashfs_readpage_block(struct page *target_page, u64 block, int bsize, - int expected) - +int squashfs_readpage_block(struct folio *folio, u64 block, int bsize, + int expected) { - struct folio *folio = page_folio(target_page); - struct inode *inode = target_page->mapping->host; + struct page *target_page = &folio->page; + struct inode *inode = folio->mapping->host; struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info; loff_t file_end = (i_size_read(inode) - 1) >> PAGE_SHIFT; int mask = (1 << (msblk->block_log - PAGE_SHIFT)) - 1; @@ -48,7 +47,7 @@ int squashfs_readpage_block(struct page *target_page, u64 block, int bsize, /* Try to grab all the pages covered by the Squashfs block */ for (i = 0, index = start_index; index <= end_index; index++) { page[i] = (index == folio->index) ? target_page : - grab_cache_page_nowait(target_page->mapping, index); + grab_cache_page_nowait(folio->mapping, index); if (page[i] == NULL) continue; diff --git a/fs/squashfs/inode.c b/fs/squashfs/inode.c index d5918eba27e3..82b687414e65 100644 --- a/fs/squashfs/inode.c +++ b/fs/squashfs/inode.c @@ -68,6 +68,10 @@ static int squashfs_new_inode(struct super_block *sb, struct inode *inode, inode->i_mode = le16_to_cpu(sqsh_ino->mode); inode->i_size = 0; + /* File type must not be set at this moment, for it will later be set by the caller. */ + if (inode->i_mode & S_IFMT) + err = -EIO; + return err; } @@ -82,7 +86,7 @@ struct inode *squashfs_iget(struct super_block *sb, long long ino, if (!inode) return ERR_PTR(-ENOMEM); - if (!(inode->i_state & I_NEW)) + if (!(inode_state_read_once(inode) & I_NEW)) return inode; err = squashfs_read_inode(inode, ino); @@ -140,8 +144,17 @@ int squashfs_read_inode(struct inode *inode, long long ino) if (err < 0) goto failed_read; + inode->i_size = le32_to_cpu(sqsh_ino->file_size); frag = le32_to_cpu(sqsh_ino->fragment); if (frag != SQUASHFS_INVALID_FRAG) { + /* + * the file cannot have a fragment (tailend) and have a + * file size a multiple of the block size + */ + if ((inode->i_size & (msblk->block_size - 1)) == 0) { + err = -EINVAL; + goto failed_read; + } frag_offset = le32_to_cpu(sqsh_ino->offset); frag_size = squashfs_frag_lookup(sb, frag, &frag_blk); if (frag_size < 0) { @@ -155,8 +168,7 @@ int squashfs_read_inode(struct inode *inode, long long ino) } set_nlink(inode, 1); - inode->i_size = le32_to_cpu(sqsh_ino->file_size); - inode->i_fop = &generic_ro_fops; + inode->i_fop = &squashfs_file_operations; inode->i_mode |= S_IFREG; inode->i_blocks = ((inode->i_size - 1) >> 9) + 1; squashfs_i(inode)->fragment_block = frag_blk; @@ -165,6 +177,7 @@ int squashfs_read_inode(struct inode *inode, long long ino) squashfs_i(inode)->start = le32_to_cpu(sqsh_ino->start_block); squashfs_i(inode)->block_list_start = block; squashfs_i(inode)->offset = offset; + squashfs_i(inode)->parent = 0; inode->i_data.a_ops = &squashfs_aops; TRACE("File inode %x:%x, start_block %llx, block_list_start " @@ -183,8 +196,21 @@ int squashfs_read_inode(struct inode *inode, long long ino) if (err < 0) goto failed_read; + inode->i_size = le64_to_cpu(sqsh_ino->file_size); + if (inode->i_size < 0) { + err = -EINVAL; + goto failed_read; + } frag = le32_to_cpu(sqsh_ino->fragment); if (frag != SQUASHFS_INVALID_FRAG) { + /* + * the file cannot have a fragment (tailend) and have a + * file size a multiple of the block size + */ + if ((inode->i_size & (msblk->block_size - 1)) == 0) { + err = -EINVAL; + goto failed_read; + } frag_offset = le32_to_cpu(sqsh_ino->offset); frag_size = squashfs_frag_lookup(sb, frag, &frag_blk); if (frag_size < 0) { @@ -199,9 +225,8 @@ int squashfs_read_inode(struct inode *inode, long long ino) xattr_id = le32_to_cpu(sqsh_ino->xattr); set_nlink(inode, le32_to_cpu(sqsh_ino->nlink)); - inode->i_size = le64_to_cpu(sqsh_ino->file_size); inode->i_op = &squashfs_inode_ops; - inode->i_fop = &generic_ro_fops; + inode->i_fop = &squashfs_file_operations; inode->i_mode |= S_IFREG; inode->i_blocks = (inode->i_size - le64_to_cpu(sqsh_ino->sparse) + 511) >> 9; @@ -212,6 +237,7 @@ int squashfs_read_inode(struct inode *inode, long long ino) squashfs_i(inode)->start = le64_to_cpu(sqsh_ino->start_block); squashfs_i(inode)->block_list_start = block; squashfs_i(inode)->offset = offset; + squashfs_i(inode)->parent = 0; inode->i_data.a_ops = &squashfs_aops; TRACE("File inode %x:%x, start_block %llx, block_list_start " @@ -292,6 +318,7 @@ int squashfs_read_inode(struct inode *inode, long long ino) inode->i_mode |= S_IFLNK; squashfs_i(inode)->start = block; squashfs_i(inode)->offset = offset; + squashfs_i(inode)->parent = 0; if (type == SQUASHFS_LSYMLINK_TYPE) { __le32 xattr; @@ -329,6 +356,7 @@ int squashfs_read_inode(struct inode *inode, long long ino) set_nlink(inode, le32_to_cpu(sqsh_ino->nlink)); rdev = le32_to_cpu(sqsh_ino->rdev); init_special_inode(inode, inode->i_mode, new_decode_dev(rdev)); + squashfs_i(inode)->parent = 0; TRACE("Device inode %x:%x, rdev %x\n", SQUASHFS_INODE_BLK(ino), offset, rdev); @@ -353,6 +381,7 @@ int squashfs_read_inode(struct inode *inode, long long ino) set_nlink(inode, le32_to_cpu(sqsh_ino->nlink)); rdev = le32_to_cpu(sqsh_ino->rdev); init_special_inode(inode, inode->i_mode, new_decode_dev(rdev)); + squashfs_i(inode)->parent = 0; TRACE("Device inode %x:%x, rdev %x\n", SQUASHFS_INODE_BLK(ino), offset, rdev); @@ -373,6 +402,7 @@ int squashfs_read_inode(struct inode *inode, long long ino) inode->i_mode |= S_IFSOCK; set_nlink(inode, le32_to_cpu(sqsh_ino->nlink)); init_special_inode(inode, inode->i_mode, 0); + squashfs_i(inode)->parent = 0; break; } case SQUASHFS_LFIFO_TYPE: @@ -392,6 +422,7 @@ int squashfs_read_inode(struct inode *inode, long long ino) inode->i_op = &squashfs_inode_ops; set_nlink(inode, le32_to_cpu(sqsh_ino->nlink)); init_special_inode(inode, inode->i_mode, 0); + squashfs_i(inode)->parent = 0; break; } default: diff --git a/fs/squashfs/squashfs.h b/fs/squashfs/squashfs.h index 5a756e6790b5..4851bd964502 100644 --- a/fs/squashfs/squashfs.h +++ b/fs/squashfs/squashfs.h @@ -14,6 +14,12 @@ #define WARNING(s, args...) pr_warn("SQUASHFS: "s, ## args) +#ifdef CONFIG_SQUASHFS_FILE_CACHE +#define SQUASHFS_READ_PAGES msblk->max_thread_num +#else +#define SQUASHFS_READ_PAGES 0 +#endif + /* block.c */ extern int squashfs_read_data(struct super_block *, u64, int, u64 *, struct squashfs_page_actor *); @@ -67,12 +73,11 @@ extern __le64 *squashfs_read_fragment_index_table(struct super_block *, u64, u64, unsigned int); /* file.c */ -void squashfs_fill_page(struct page *, struct squashfs_cache_entry *, int, int); -void squashfs_copy_cache(struct page *, struct squashfs_cache_entry *, int, - int); +void squashfs_copy_cache(struct folio *, struct squashfs_cache_entry *, + size_t bytes, size_t offset); /* file_xxx.c */ -extern int squashfs_readpage_block(struct page *, u64, int, int); +int squashfs_readpage_block(struct folio *, u64 block, int bsize, int expected); /* id.c */ extern int squashfs_get_id(struct super_block *, unsigned int, unsigned int *); @@ -102,6 +107,7 @@ extern const struct address_space_operations squashfs_aops; /* inode.c */ extern const struct inode_operations squashfs_inode_ops; +extern const struct file_operations squashfs_file_operations; /* namei.c */ extern const struct inode_operations squashfs_dir_inode_ops; diff --git a/fs/squashfs/squashfs_fs.h b/fs/squashfs/squashfs_fs.h index 95f8e8901768..a955d9369749 100644 --- a/fs/squashfs/squashfs_fs.h +++ b/fs/squashfs/squashfs_fs.h @@ -208,6 +208,7 @@ static inline int squashfs_block_size(__le32 raw) #define SQUASHFS_META_INDEXES (SQUASHFS_METADATA_SIZE / sizeof(unsigned int)) #define SQUASHFS_META_ENTRIES 127 #define SQUASHFS_META_SLOTS 8 +#define SQUASHFS_SCAN_INDEXES 1024 struct meta_entry { u64 data_block; diff --git a/fs/squashfs/squashfs_fs_i.h b/fs/squashfs/squashfs_fs_i.h index 2c82d6f2a456..8e497ac07b9a 100644 --- a/fs/squashfs/squashfs_fs_i.h +++ b/fs/squashfs/squashfs_fs_i.h @@ -16,6 +16,7 @@ struct squashfs_inode_info { u64 xattr; unsigned int xattr_size; int xattr_count; + int parent; union { struct { u64 fragment_block; @@ -27,7 +28,6 @@ struct squashfs_inode_info { u64 dir_idx_start; int dir_idx_offset; int dir_idx_cnt; - int parent; }; }; struct inode vfs_inode; diff --git a/fs/squashfs/super.c b/fs/squashfs/super.c index 22e812808e5c..4465cf05603a 100644 --- a/fs/squashfs/super.c +++ b/fs/squashfs/super.c @@ -187,10 +187,15 @@ static int squashfs_fill_super(struct super_block *sb, struct fs_context *fc) unsigned short flags; unsigned int fragments; u64 lookup_table_start, xattr_id_table_start, next_table; - int err; + int err, devblksize = sb_min_blocksize(sb, SQUASHFS_DEVBLK_SIZE); TRACE("Entered squashfs_fill_superblock\n"); + if (!devblksize) { + errorf(fc, "squashfs: unable to set blocksize\n"); + return -EINVAL; + } + sb->s_fs_info = kzalloc(sizeof(*msblk), GFP_KERNEL); if (sb->s_fs_info == NULL) { ERROR("Failed to allocate squashfs_sb_info\n"); @@ -201,7 +206,7 @@ static int squashfs_fill_super(struct super_block *sb, struct fs_context *fc) msblk->panic_on_errors = (opts->errors == Opt_errors_panic); - msblk->devblksize = sb_min_blocksize(sb, SQUASHFS_DEVBLK_SIZE); + msblk->devblksize = devblksize; msblk->devblksize_log2 = ffz(~msblk->devblksize); mutex_init(&msblk->meta_index_mutex); @@ -314,26 +319,29 @@ static int squashfs_fill_super(struct super_block *sb, struct fs_context *fc) sb->s_flags |= SB_RDONLY; sb->s_op = &squashfs_super_ops; - err = -ENOMEM; - msblk->block_cache = squashfs_cache_init("metadata", SQUASHFS_CACHED_BLKS, SQUASHFS_METADATA_SIZE); - if (msblk->block_cache == NULL) + if (IS_ERR(msblk->block_cache)) { + err = PTR_ERR(msblk->block_cache); goto failed_mount; + } /* Allocate read_page block */ msblk->read_page = squashfs_cache_init("data", - msblk->max_thread_num, msblk->block_size); - if (msblk->read_page == NULL) { + SQUASHFS_READ_PAGES, msblk->block_size); + if (IS_ERR(msblk->read_page)) { errorf(fc, "Failed to allocate read_page block"); + err = PTR_ERR(msblk->read_page); goto failed_mount; } if (msblk->devblksize == PAGE_SIZE) { struct inode *cache = new_inode(sb); - if (cache == NULL) + if (cache == NULL) { + err = -ENOMEM; goto failed_mount; + } set_nlink(cache, 1); cache->i_size = OFFSET_MAX; @@ -405,9 +413,9 @@ handle_fragments: goto check_directory_table; msblk->fragment_cache = squashfs_cache_init("fragment", - SQUASHFS_CACHED_FRAGMENTS, msblk->block_size); - if (msblk->fragment_cache == NULL) { - err = -ENOMEM; + min(SQUASHFS_CACHED_FRAGMENTS, fragments), msblk->block_size); + if (IS_ERR(msblk->fragment_cache)) { + err = PTR_ERR(msblk->fragment_cache); goto failed_mount; } |
