summaryrefslogtreecommitdiff
path: root/fs/squashfs
diff options
context:
space:
mode:
Diffstat (limited to 'fs/squashfs')
-rw-r--r--fs/squashfs/Kconfig27
-rw-r--r--fs/squashfs/block.c28
-rw-r--r--fs/squashfs/cache.c12
-rw-r--r--fs/squashfs/decompressor_multi_percpu.c6
-rw-r--r--fs/squashfs/file.c182
-rw-r--r--fs/squashfs/file_cache.c6
-rw-r--r--fs/squashfs/file_direct.c36
-rw-r--r--fs/squashfs/inode.c12
-rw-r--r--fs/squashfs/namei.c14
-rw-r--r--fs/squashfs/page_actor.c11
-rw-r--r--fs/squashfs/page_actor.h6
-rw-r--r--fs/squashfs/squashfs.h13
-rw-r--r--fs/squashfs/super.c26
-rw-r--r--fs/squashfs/symlink.c35
14 files changed, 256 insertions, 158 deletions
diff --git a/fs/squashfs/Kconfig b/fs/squashfs/Kconfig
index 60fc98bdf421..a9602aae21ef 100644
--- a/fs/squashfs/Kconfig
+++ b/fs/squashfs/Kconfig
@@ -5,8 +5,8 @@ config SQUASHFS
help
Saying Y here includes support for SquashFS 4.0 (a Compressed
Read-Only File System). Squashfs is a highly compressed read-only
- filesystem for Linux. It uses zlib, lzo or xz compression to
- compress both files, inodes and directories. Inodes in the system
+ filesystem for Linux. It uses zlib, lz4, lzo, xz or zstd compression
+ to compress both files, inodes and directories. Inodes in the system
are very small and all blocks are packed to minimise data overhead.
Block sizes greater than 4K are supported up to a maximum of 1 Mbytes
(default block size 128K). SquashFS 4.0 supports 64 bit filesystems
@@ -16,7 +16,7 @@ config SQUASHFS
Squashfs is intended for general read-only filesystem use, for
archival use (i.e. in cases where a .tar.gz file may be used), and in
embedded systems where low overhead is needed. Further information
- and tools are available from http://squashfs.sourceforge.net.
+ and tools are available from github.com/plougher/squashfs-tools.
If you want to compile this as a module ( = code which can be
inserted in and removed from the running kernel whenever you want),
@@ -149,6 +149,27 @@ config SQUASHFS_XATTR
If unsure, say N.
+config SQUASHFS_COMP_CACHE_FULL
+ bool "Enable full caching of compressed blocks"
+ depends on SQUASHFS
+ default n
+ help
+ This option enables caching of all compressed blocks, Without caching,
+ repeated reads of the same files trigger excessive disk I/O, significantly
+ reducinng performance in workloads like fio-based benchmarks.
+
+ For example, fio tests (iodepth=1, numjobs=1, ioengine=psync) show:
+ With caching: IOPS=2223, BW=278MiB/s (291MB/s)
+ Without caching: IOPS=815, BW=102MiB/s (107MB/s)
+
+ Enabling this option restores performance to pre-regression levels by
+ caching all compressed blocks in the page cache, reducing disk I/O for
+ repeated reads. However, this increases memory usage, which may be a
+ concern in memory-constrained environments.
+
+ Enable this option if your workload involves frequent repeated reads and
+ memory usage is not a limiting factor. If unsure, say N.
+
config SQUASHFS_ZLIB
bool "Include support for ZLIB compressed file systems"
depends on SQUASHFS
diff --git a/fs/squashfs/block.c b/fs/squashfs/block.c
index 2dc730800f44..3061043e915c 100644
--- a/fs/squashfs/block.c
+++ b/fs/squashfs/block.c
@@ -88,6 +88,10 @@ static int squashfs_bio_read_cached(struct bio *fullbio,
struct bio_vec *bv;
int idx = 0;
int err = 0;
+#ifdef CONFIG_SQUASHFS_COMP_CACHE_FULL
+ struct page **cache_pages = kmalloc_array(page_count,
+ sizeof(void *), GFP_KERNEL | __GFP_ZERO);
+#endif
bio_for_each_segment_all(bv, fullbio, iter_all) {
struct page *page = bv->bv_page;
@@ -110,6 +114,11 @@ static int squashfs_bio_read_cached(struct bio *fullbio,
head_to_cache = page;
else if (idx == page_count - 1 && index + length != read_end)
tail_to_cache = page;
+#ifdef CONFIG_SQUASHFS_COMP_CACHE_FULL
+ /* Cache all pages in the BIO for repeated reads */
+ else if (cache_pages)
+ cache_pages[idx] = page;
+#endif
if (!bio || idx != end_idx) {
struct bio *new = bio_alloc_clone(bdev, fullbio,
@@ -163,6 +172,25 @@ static int squashfs_bio_read_cached(struct bio *fullbio,
}
}
+#ifdef CONFIG_SQUASHFS_COMP_CACHE_FULL
+ if (!cache_pages)
+ goto out;
+
+ for (idx = 0; idx < page_count; idx++) {
+ if (!cache_pages[idx])
+ continue;
+ int ret = add_to_page_cache_lru(cache_pages[idx], cache_mapping,
+ (read_start >> PAGE_SHIFT) + idx,
+ GFP_NOIO);
+
+ if (!ret) {
+ SetPageUptodate(cache_pages[idx]);
+ unlock_page(cache_pages[idx]);
+ }
+ }
+ kfree(cache_pages);
+out:
+#endif
return 0;
}
diff --git a/fs/squashfs/cache.c b/fs/squashfs/cache.c
index 5062326d0efb..181260e72680 100644
--- a/fs/squashfs/cache.c
+++ b/fs/squashfs/cache.c
@@ -198,7 +198,7 @@ void squashfs_cache_delete(struct squashfs_cache *cache)
{
int i, j;
- if (cache == NULL)
+ if (IS_ERR(cache) || cache == NULL)
return;
for (i = 0; i < cache->entries; i++) {
@@ -224,11 +224,15 @@ struct squashfs_cache *squashfs_cache_init(char *name, int entries,
int block_size)
{
int i, j;
- struct squashfs_cache *cache = kzalloc(sizeof(*cache), GFP_KERNEL);
+ struct squashfs_cache *cache;
+ if (entries == 0)
+ return NULL;
+
+ cache = kzalloc(sizeof(*cache), GFP_KERNEL);
if (cache == NULL) {
ERROR("Failed to allocate %s cache\n", name);
- return NULL;
+ return ERR_PTR(-ENOMEM);
}
cache->entry = kcalloc(entries, sizeof(*(cache->entry)), GFP_KERNEL);
@@ -281,7 +285,7 @@ struct squashfs_cache *squashfs_cache_init(char *name, int entries,
cleanup:
squashfs_cache_delete(cache);
- return NULL;
+ return ERR_PTR(-ENOMEM);
}
diff --git a/fs/squashfs/decompressor_multi_percpu.c b/fs/squashfs/decompressor_multi_percpu.c
index 8a218e7c2390..e4d7e507b268 100644
--- a/fs/squashfs/decompressor_multi_percpu.c
+++ b/fs/squashfs/decompressor_multi_percpu.c
@@ -46,7 +46,7 @@ static void *squashfs_decompressor_create(struct squashfs_sb_info *msblk,
}
kfree(comp_opts);
- return (__force void *) percpu;
+ return (void *)(__force unsigned long) percpu;
out:
for_each_possible_cpu(cpu) {
@@ -61,7 +61,7 @@ out:
static void squashfs_decompressor_destroy(struct squashfs_sb_info *msblk)
{
struct squashfs_stream __percpu *percpu =
- (struct squashfs_stream __percpu *) msblk->stream;
+ (void __percpu *)(unsigned long) msblk->stream;
struct squashfs_stream *stream;
int cpu;
@@ -79,7 +79,7 @@ static int squashfs_decompress(struct squashfs_sb_info *msblk, struct bio *bio,
{
struct squashfs_stream *stream;
struct squashfs_stream __percpu *percpu =
- (struct squashfs_stream __percpu *) msblk->stream;
+ (void __percpu *)(unsigned long) msblk->stream;
int res;
local_lock(&percpu->lock);
diff --git a/fs/squashfs/file.c b/fs/squashfs/file.c
index e8df6430444b..5ca2baa16dc2 100644
--- a/fs/squashfs/file.c
+++ b/fs/squashfs/file.c
@@ -362,31 +362,33 @@ static int read_blocklist(struct inode *inode, int index, u64 *block)
return squashfs_block_size(size);
}
-void squashfs_fill_page(struct page *page, struct squashfs_cache_entry *buffer, int offset, int avail)
+static bool squashfs_fill_page(struct folio *folio,
+ struct squashfs_cache_entry *buffer, size_t offset,
+ size_t avail)
{
- int copied;
+ size_t copied;
void *pageaddr;
- pageaddr = kmap_atomic(page);
+ pageaddr = kmap_local_folio(folio, 0);
copied = squashfs_copy_data(pageaddr, buffer, offset, avail);
memset(pageaddr + copied, 0, PAGE_SIZE - copied);
- kunmap_atomic(pageaddr);
+ kunmap_local(pageaddr);
- flush_dcache_page(page);
- if (copied == avail)
- SetPageUptodate(page);
- else
- SetPageError(page);
+ flush_dcache_folio(folio);
+
+ return copied == avail;
}
/* Copy data into page cache */
-void squashfs_copy_cache(struct page *page, struct squashfs_cache_entry *buffer,
- int bytes, int offset)
+void squashfs_copy_cache(struct folio *folio,
+ struct squashfs_cache_entry *buffer, size_t bytes,
+ size_t offset)
{
- struct inode *inode = page->mapping->host;
+ struct address_space *mapping = folio->mapping;
+ struct inode *inode = mapping->host;
struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info;
int i, mask = (1 << (msblk->block_log - PAGE_SHIFT)) - 1;
- int start_index = page->index & ~mask, end_index = start_index | mask;
+ int start_index = folio->index & ~mask, end_index = start_index | mask;
/*
* Loop copying datablock into pages. As the datablock likely covers
@@ -396,32 +398,35 @@ void squashfs_copy_cache(struct page *page, struct squashfs_cache_entry *buffer,
*/
for (i = start_index; i <= end_index && bytes > 0; i++,
bytes -= PAGE_SIZE, offset += PAGE_SIZE) {
- struct page *push_page;
- int avail = buffer ? min_t(int, bytes, PAGE_SIZE) : 0;
+ struct folio *push_folio;
+ size_t avail = buffer ? min(bytes, PAGE_SIZE) : 0;
+ bool updated = false;
- TRACE("bytes %d, i %d, available_bytes %d\n", bytes, i, avail);
+ TRACE("bytes %zu, i %d, available_bytes %zu\n", bytes, i, avail);
- push_page = (i == page->index) ? page :
- grab_cache_page_nowait(page->mapping, i);
+ push_folio = (i == folio->index) ? folio :
+ __filemap_get_folio(mapping, i,
+ FGP_LOCK|FGP_CREAT|FGP_NOFS|FGP_NOWAIT,
+ mapping_gfp_mask(mapping));
- if (!push_page)
+ if (IS_ERR(push_folio))
continue;
- if (PageUptodate(push_page))
- goto skip_page;
+ if (folio_test_uptodate(push_folio))
+ goto skip_folio;
- squashfs_fill_page(push_page, buffer, offset, avail);
-skip_page:
- unlock_page(push_page);
- if (i != page->index)
- put_page(push_page);
+ updated = squashfs_fill_page(push_folio, buffer, offset, avail);
+skip_folio:
+ folio_end_read(push_folio, updated);
+ if (i != folio->index)
+ folio_put(push_folio);
}
}
/* Read datablock stored packed inside a fragment (tail-end packed block) */
-static int squashfs_readpage_fragment(struct page *page, int expected)
+static int squashfs_readpage_fragment(struct folio *folio, int expected)
{
- struct inode *inode = page->mapping->host;
+ struct inode *inode = folio->mapping->host;
struct squashfs_cache_entry *buffer = squashfs_get_fragment(inode->i_sb,
squashfs_i(inode)->fragment_block,
squashfs_i(inode)->fragment_size);
@@ -432,36 +437,34 @@ static int squashfs_readpage_fragment(struct page *page, int expected)
squashfs_i(inode)->fragment_block,
squashfs_i(inode)->fragment_size);
else
- squashfs_copy_cache(page, buffer, expected,
+ squashfs_copy_cache(folio, buffer, expected,
squashfs_i(inode)->fragment_offset);
squashfs_cache_put(buffer);
return res;
}
-static int squashfs_readpage_sparse(struct page *page, int expected)
+static int squashfs_readpage_sparse(struct folio *folio, int expected)
{
- squashfs_copy_cache(page, NULL, expected, 0);
+ squashfs_copy_cache(folio, NULL, expected, 0);
return 0;
}
static int squashfs_read_folio(struct file *file, struct folio *folio)
{
- struct page *page = &folio->page;
- struct inode *inode = page->mapping->host;
+ struct inode *inode = folio->mapping->host;
struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info;
- int index = page->index >> (msblk->block_log - PAGE_SHIFT);
+ int index = folio->index >> (msblk->block_log - PAGE_SHIFT);
int file_end = i_size_read(inode) >> msblk->block_log;
int expected = index == file_end ?
(i_size_read(inode) & (msblk->block_size - 1)) :
msblk->block_size;
int res = 0;
- void *pageaddr;
TRACE("Entered squashfs_readpage, page index %lx, start block %llx\n",
- page->index, squashfs_i(inode)->start);
+ folio->index, squashfs_i(inode)->start);
- if (page->index >= ((i_size_read(inode) + PAGE_SIZE - 1) >>
+ if (folio->index >= ((i_size_read(inode) + PAGE_SIZE - 1) >>
PAGE_SHIFT))
goto out;
@@ -471,66 +474,93 @@ static int squashfs_read_folio(struct file *file, struct folio *folio)
res = read_blocklist(inode, index, &block);
if (res < 0)
- goto error_out;
+ goto out;
if (res == 0)
- res = squashfs_readpage_sparse(page, expected);
+ res = squashfs_readpage_sparse(folio, expected);
else
- res = squashfs_readpage_block(page, block, res, expected);
+ res = squashfs_readpage_block(folio, block, res, expected);
} else
- res = squashfs_readpage_fragment(page, expected);
+ res = squashfs_readpage_fragment(folio, expected);
if (!res)
return 0;
-error_out:
- SetPageError(page);
out:
- pageaddr = kmap_atomic(page);
- memset(pageaddr, 0, PAGE_SIZE);
- kunmap_atomic(pageaddr);
- flush_dcache_page(page);
- if (res == 0)
- SetPageUptodate(page);
- unlock_page(page);
+ folio_zero_segment(folio, 0, folio_size(folio));
+ folio_end_read(folio, res == 0);
return res;
}
static int squashfs_readahead_fragment(struct page **page,
- unsigned int pages, unsigned int expected)
+ unsigned int pages, unsigned int expected, loff_t start)
{
struct inode *inode = page[0]->mapping->host;
struct squashfs_cache_entry *buffer = squashfs_get_fragment(inode->i_sb,
squashfs_i(inode)->fragment_block,
squashfs_i(inode)->fragment_size);
struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info;
- unsigned int n, mask = (1 << (msblk->block_log - PAGE_SHIFT)) - 1;
- int error = buffer->error;
+ int i, bytes, copied;
+ struct squashfs_page_actor *actor;
+ unsigned int offset;
+ void *addr;
+ struct page *last_page;
+
+ if (buffer->error)
+ goto out;
- if (error)
+ actor = squashfs_page_actor_init_special(msblk, page, pages,
+ expected, start);
+ if (!actor)
goto out;
- expected += squashfs_i(inode)->fragment_offset;
+ squashfs_actor_nobuff(actor);
+ addr = squashfs_first_page(actor);
- for (n = 0; n < pages; n++) {
- unsigned int base = (page[n]->index & mask) << PAGE_SHIFT;
- unsigned int offset = base + squashfs_i(inode)->fragment_offset;
+ for (copied = offset = 0; offset < expected; offset += PAGE_SIZE) {
+ int avail = min_t(int, expected - offset, PAGE_SIZE);
- if (expected > offset) {
- unsigned int avail = min_t(unsigned int, expected -
- offset, PAGE_SIZE);
+ if (!IS_ERR(addr)) {
+ bytes = squashfs_copy_data(addr, buffer, offset +
+ squashfs_i(inode)->fragment_offset, avail);
- squashfs_fill_page(page[n], buffer, offset, avail);
+ if (bytes != avail)
+ goto failed;
}
- unlock_page(page[n]);
- put_page(page[n]);
+ copied += avail;
+ addr = squashfs_next_page(actor);
+ }
+
+ last_page = squashfs_page_actor_free(actor);
+
+ if (copied == expected && !IS_ERR(last_page)) {
+ /* Last page (if present) may have trailing bytes not filled */
+ bytes = copied % PAGE_SIZE;
+ if (bytes && last_page)
+ memzero_page(last_page, bytes, PAGE_SIZE - bytes);
+
+ for (i = 0; i < pages; i++) {
+ flush_dcache_page(page[i]);
+ SetPageUptodate(page[i]);
+ }
}
+ for (i = 0; i < pages; i++) {
+ unlock_page(page[i]);
+ put_page(page[i]);
+ }
+
+ squashfs_cache_put(buffer);
+ return 0;
+
+failed:
+ squashfs_page_actor_free(actor);
+
out:
squashfs_cache_put(buffer);
- return error;
+ return 1;
}
static void squashfs_readahead(struct readahead_control *ractl)
@@ -555,7 +585,6 @@ static void squashfs_readahead(struct readahead_control *ractl)
return;
for (;;) {
- pgoff_t index;
int res, bsize;
u64 block = 0;
unsigned int expected;
@@ -574,26 +603,21 @@ static void squashfs_readahead(struct readahead_control *ractl)
if (readahead_pos(ractl) >= i_size_read(inode))
goto skip_pages;
- index = pages[0]->index >> shift;
-
- if ((pages[nr_pages - 1]->index >> shift) != index)
- goto skip_pages;
-
- if (index == file_end && squashfs_i(inode)->fragment_block !=
- SQUASHFS_INVALID_BLK) {
+ if (start >> msblk->block_log == file_end &&
+ squashfs_i(inode)->fragment_block != SQUASHFS_INVALID_BLK) {
res = squashfs_readahead_fragment(pages, nr_pages,
- expected);
+ expected, start);
if (res)
goto skip_pages;
continue;
}
- bsize = read_blocklist(inode, index, &block);
+ bsize = read_blocklist(inode, start >> msblk->block_log, &block);
if (bsize == 0)
goto skip_pages;
actor = squashfs_page_actor_init_special(msblk, pages, nr_pages,
- expected);
+ expected, start);
if (!actor)
goto skip_pages;
@@ -601,12 +625,12 @@ static void squashfs_readahead(struct readahead_control *ractl)
last_page = squashfs_page_actor_free(actor);
- if (res == expected) {
+ if (res == expected && !IS_ERR(last_page)) {
int bytes;
/* Last page (if present) may have trailing bytes not filled */
bytes = res % PAGE_SIZE;
- if (index == file_end && bytes && last_page)
+ if (start >> msblk->block_log == file_end && bytes && last_page)
memzero_page(last_page, bytes,
PAGE_SIZE - bytes);
@@ -620,6 +644,8 @@ static void squashfs_readahead(struct readahead_control *ractl)
unlock_page(pages[i]);
put_page(pages[i]);
}
+
+ start += readahead_batch_length(ractl);
}
kfree(pages);
diff --git a/fs/squashfs/file_cache.c b/fs/squashfs/file_cache.c
index 54c17b7c85fd..40e59a43d098 100644
--- a/fs/squashfs/file_cache.c
+++ b/fs/squashfs/file_cache.c
@@ -18,9 +18,9 @@
#include "squashfs.h"
/* Read separately compressed datablock and memcopy into page cache */
-int squashfs_readpage_block(struct page *page, u64 block, int bsize, int expected)
+int squashfs_readpage_block(struct folio *folio, u64 block, int bsize, int expected)
{
- struct inode *i = page->mapping->host;
+ struct inode *i = folio->mapping->host;
struct squashfs_cache_entry *buffer = squashfs_get_datablock(i->i_sb,
block, bsize);
int res = buffer->error;
@@ -29,7 +29,7 @@ int squashfs_readpage_block(struct page *page, u64 block, int bsize, int expecte
ERROR("Unable to read page, block %llx, size %x\n", block,
bsize);
else
- squashfs_copy_cache(page, buffer, expected, 0);
+ squashfs_copy_cache(folio, buffer, expected, 0);
squashfs_cache_put(buffer);
return res;
diff --git a/fs/squashfs/file_direct.c b/fs/squashfs/file_direct.c
index 763a3f7a75f6..2c3e809d6891 100644
--- a/fs/squashfs/file_direct.c
+++ b/fs/squashfs/file_direct.c
@@ -19,19 +19,19 @@
#include "page_actor.h"
/* Read separately compressed datablock directly into page cache */
-int squashfs_readpage_block(struct page *target_page, u64 block, int bsize,
- int expected)
-
+int squashfs_readpage_block(struct folio *folio, u64 block, int bsize,
+ int expected)
{
- struct inode *inode = target_page->mapping->host;
+ struct page *target_page = &folio->page;
+ struct inode *inode = folio->mapping->host;
struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info;
-
loff_t file_end = (i_size_read(inode) - 1) >> PAGE_SHIFT;
int mask = (1 << (msblk->block_log - PAGE_SHIFT)) - 1;
- loff_t start_index = target_page->index & ~mask;
+ loff_t start_index = folio->index & ~mask;
loff_t end_index = start_index | mask;
- int i, n, pages, bytes, res = -ENOMEM;
- struct page **page;
+ loff_t index;
+ int i, pages, bytes, res = -ENOMEM;
+ struct page **page, *last_page;
struct squashfs_page_actor *actor;
void *pageaddr;
@@ -45,9 +45,9 @@ int squashfs_readpage_block(struct page *target_page, u64 block, int bsize,
return res;
/* Try to grab all the pages covered by the Squashfs block */
- for (i = 0, n = start_index; n <= end_index; n++) {
- page[i] = (n == target_page->index) ? target_page :
- grab_cache_page_nowait(target_page->mapping, n);
+ for (i = 0, index = start_index; index <= end_index; index++) {
+ page[i] = (index == folio->index) ? target_page :
+ grab_cache_page_nowait(folio->mapping, index);
if (page[i] == NULL)
continue;
@@ -67,27 +67,28 @@ int squashfs_readpage_block(struct page *target_page, u64 block, int bsize,
* Create a "page actor" which will kmap and kunmap the
* page cache pages appropriately within the decompressor
*/
- actor = squashfs_page_actor_init_special(msblk, page, pages, expected);
+ actor = squashfs_page_actor_init_special(msblk, page, pages, expected,
+ start_index << PAGE_SHIFT);
if (actor == NULL)
goto out;
/* Decompress directly into the page cache buffers */
res = squashfs_read_data(inode->i_sb, block, bsize, NULL, actor);
- squashfs_page_actor_free(actor);
+ last_page = squashfs_page_actor_free(actor);
if (res < 0)
goto mark_errored;
- if (res != expected) {
+ if (res != expected || IS_ERR(last_page)) {
res = -EIO;
goto mark_errored;
}
/* Last page (if present) may have trailing bytes not filled */
bytes = res % PAGE_SIZE;
- if (page[pages - 1]->index == end_index && bytes) {
- pageaddr = kmap_local_page(page[pages - 1]);
+ if (end_index == file_end && last_page && bytes) {
+ pageaddr = kmap_local_page(last_page);
memset(pageaddr + bytes, 0, PAGE_SIZE - bytes);
kunmap_local(pageaddr);
}
@@ -106,14 +107,13 @@ int squashfs_readpage_block(struct page *target_page, u64 block, int bsize,
return 0;
mark_errored:
- /* Decompression failed, mark pages as errored. Target_page is
+ /* Decompression failed. Target_page is
* dealt with by the caller
*/
for (i = 0; i < pages; i++) {
if (page[i] == NULL || page[i] == target_page)
continue;
flush_dcache_page(page[i]);
- SetPageError(page[i]);
unlock_page(page[i]);
put_page(page[i]);
}
diff --git a/fs/squashfs/inode.c b/fs/squashfs/inode.c
index aa3411354e66..d5918eba27e3 100644
--- a/fs/squashfs/inode.c
+++ b/fs/squashfs/inode.c
@@ -48,6 +48,10 @@ static int squashfs_new_inode(struct super_block *sb, struct inode *inode,
gid_t i_gid;
int err;
+ inode->i_ino = le32_to_cpu(sqsh_ino->inode_number);
+ if (inode->i_ino == 0)
+ return -EINVAL;
+
err = squashfs_get_id(sb, le16_to_cpu(sqsh_ino->uid), &i_uid);
if (err)
return err;
@@ -58,7 +62,6 @@ static int squashfs_new_inode(struct super_block *sb, struct inode *inode,
i_uid_write(inode, i_uid);
i_gid_write(inode, i_gid);
- inode->i_ino = le32_to_cpu(sqsh_ino->inode_number);
inode_set_mtime(inode, le32_to_cpu(sqsh_ino->mtime), 0);
inode_set_atime(inode, inode_get_mtime_sec(inode), 0);
inode_set_ctime(inode, inode_get_mtime_sec(inode), 0);
@@ -276,8 +279,13 @@ int squashfs_read_inode(struct inode *inode, long long ino)
if (err < 0)
goto failed_read;
- set_nlink(inode, le32_to_cpu(sqsh_ino->nlink));
inode->i_size = le32_to_cpu(sqsh_ino->symlink_size);
+ if (inode->i_size > PAGE_SIZE) {
+ ERROR("Corrupted symlink\n");
+ return -EINVAL;
+ }
+
+ set_nlink(inode, le32_to_cpu(sqsh_ino->nlink));
inode->i_op = &squashfs_symlink_inode_ops;
inode_nohighmem(inode);
inode->i_data.a_ops = &squashfs_symlink_aops;
diff --git a/fs/squashfs/namei.c b/fs/squashfs/namei.c
index 11e4539b9eae..65aae7e2a859 100644
--- a/fs/squashfs/namei.c
+++ b/fs/squashfs/namei.c
@@ -62,27 +62,21 @@
*/
static int get_dir_index_using_name(struct super_block *sb,
u64 *next_block, int *next_offset, u64 index_start,
- int index_offset, int i_count, const char *name,
- int len)
+ int index_offset, int i_count, const char *name)
{
struct squashfs_sb_info *msblk = sb->s_fs_info;
int i, length = 0, err;
unsigned int size;
struct squashfs_dir_index *index;
- char *str;
TRACE("Entered get_dir_index_using_name, i_count %d\n", i_count);
- index = kmalloc(sizeof(*index) + SQUASHFS_NAME_LEN * 2 + 2, GFP_KERNEL);
+ index = kmalloc(sizeof(*index) + SQUASHFS_NAME_LEN + 1, GFP_KERNEL);
if (index == NULL) {
ERROR("Failed to allocate squashfs_dir_index\n");
goto out;
}
- str = &index->name[SQUASHFS_NAME_LEN + 1];
- strncpy(str, name, len);
- str[len] = '\0';
-
for (i = 0; i < i_count; i++) {
err = squashfs_read_metadata(sb, index, &index_start,
&index_offset, sizeof(*index));
@@ -101,7 +95,7 @@ static int get_dir_index_using_name(struct super_block *sb,
index->name[size] = '\0';
- if (strcmp(index->name, str) > 0)
+ if (strcmp(index->name, name) > 0)
break;
length = le32_to_cpu(index->index);
@@ -153,7 +147,7 @@ static struct dentry *squashfs_lookup(struct inode *dir, struct dentry *dentry,
length = get_dir_index_using_name(dir->i_sb, &block, &offset,
squashfs_i(dir)->dir_idx_start,
squashfs_i(dir)->dir_idx_offset,
- squashfs_i(dir)->dir_idx_cnt, name, len);
+ squashfs_i(dir)->dir_idx_cnt, name);
while (length < i_size_read(dir)) {
/*
diff --git a/fs/squashfs/page_actor.c b/fs/squashfs/page_actor.c
index 81af6c4ca115..2b3e807d4dea 100644
--- a/fs/squashfs/page_actor.c
+++ b/fs/squashfs/page_actor.c
@@ -60,6 +60,11 @@ struct squashfs_page_actor *squashfs_page_actor_init(void **buffer,
}
/* Implementation of page_actor for decompressing directly into page cache. */
+static loff_t page_next_index(struct squashfs_page_actor *actor)
+{
+ return page_folio(actor->page[actor->next_page])->index;
+}
+
static void *handle_next_page(struct squashfs_page_actor *actor)
{
int max_pages = (actor->length + PAGE_SIZE - 1) >> PAGE_SHIFT;
@@ -68,7 +73,7 @@ static void *handle_next_page(struct squashfs_page_actor *actor)
return NULL;
if ((actor->next_page == actor->pages) ||
- (actor->next_index != actor->page[actor->next_page]->index)) {
+ (actor->next_index != page_next_index(actor))) {
actor->next_index++;
actor->returned_pages++;
actor->last_page = NULL;
@@ -103,7 +108,7 @@ static void direct_finish_page(struct squashfs_page_actor *actor)
}
struct squashfs_page_actor *squashfs_page_actor_init_special(struct squashfs_sb_info *msblk,
- struct page **page, int pages, int length)
+ struct page **page, int pages, int length, loff_t start_index)
{
struct squashfs_page_actor *actor = kmalloc(sizeof(*actor), GFP_KERNEL);
@@ -125,7 +130,7 @@ struct squashfs_page_actor *squashfs_page_actor_init_special(struct squashfs_sb_
actor->pages = pages;
actor->next_page = 0;
actor->returned_pages = 0;
- actor->next_index = page[0]->index & ~((1 << (msblk->block_log - PAGE_SHIFT)) - 1);
+ actor->next_index = start_index >> PAGE_SHIFT;
actor->pageaddr = NULL;
actor->last_page = NULL;
actor->alloc_buffer = msblk->decompressor->alloc_buffer;
diff --git a/fs/squashfs/page_actor.h b/fs/squashfs/page_actor.h
index 97d4983559b1..ffe25eb77c32 100644
--- a/fs/squashfs/page_actor.h
+++ b/fs/squashfs/page_actor.h
@@ -29,13 +29,15 @@ extern struct squashfs_page_actor *squashfs_page_actor_init(void **buffer,
int pages, int length);
extern struct squashfs_page_actor *squashfs_page_actor_init_special(
struct squashfs_sb_info *msblk,
- struct page **page, int pages, int length);
+ struct page **page, int pages, int length,
+ loff_t start_index);
static inline struct page *squashfs_page_actor_free(struct squashfs_page_actor *actor)
{
- struct page *last_page = actor->last_page;
+ struct page *last_page = actor->next_page == actor->pages ? actor->last_page : ERR_PTR(-EIO);
kfree(actor->tmp_buffer);
kfree(actor);
+
return last_page;
}
static inline void *squashfs_first_page(struct squashfs_page_actor *actor)
diff --git a/fs/squashfs/squashfs.h b/fs/squashfs/squashfs.h
index 5a756e6790b5..218868b20f16 100644
--- a/fs/squashfs/squashfs.h
+++ b/fs/squashfs/squashfs.h
@@ -14,6 +14,12 @@
#define WARNING(s, args...) pr_warn("SQUASHFS: "s, ## args)
+#ifdef CONFIG_SQUASHFS_FILE_CACHE
+#define SQUASHFS_READ_PAGES msblk->max_thread_num
+#else
+#define SQUASHFS_READ_PAGES 0
+#endif
+
/* block.c */
extern int squashfs_read_data(struct super_block *, u64, int, u64 *,
struct squashfs_page_actor *);
@@ -67,12 +73,11 @@ extern __le64 *squashfs_read_fragment_index_table(struct super_block *,
u64, u64, unsigned int);
/* file.c */
-void squashfs_fill_page(struct page *, struct squashfs_cache_entry *, int, int);
-void squashfs_copy_cache(struct page *, struct squashfs_cache_entry *, int,
- int);
+void squashfs_copy_cache(struct folio *, struct squashfs_cache_entry *,
+ size_t bytes, size_t offset);
/* file_xxx.c */
-extern int squashfs_readpage_block(struct page *, u64, int, int);
+int squashfs_readpage_block(struct folio *, u64 block, int bsize, int expected);
/* id.c */
extern int squashfs_get_id(struct super_block *, unsigned int, unsigned int *);
diff --git a/fs/squashfs/super.c b/fs/squashfs/super.c
index 22e812808e5c..992ea0e37257 100644
--- a/fs/squashfs/super.c
+++ b/fs/squashfs/super.c
@@ -202,6 +202,11 @@ static int squashfs_fill_super(struct super_block *sb, struct fs_context *fc)
msblk->panic_on_errors = (opts->errors == Opt_errors_panic);
msblk->devblksize = sb_min_blocksize(sb, SQUASHFS_DEVBLK_SIZE);
+ if (!msblk->devblksize) {
+ errorf(fc, "squashfs: unable to set blocksize\n");
+ return -EINVAL;
+ }
+
msblk->devblksize_log2 = ffz(~msblk->devblksize);
mutex_init(&msblk->meta_index_mutex);
@@ -314,26 +319,29 @@ static int squashfs_fill_super(struct super_block *sb, struct fs_context *fc)
sb->s_flags |= SB_RDONLY;
sb->s_op = &squashfs_super_ops;
- err = -ENOMEM;
-
msblk->block_cache = squashfs_cache_init("metadata",
SQUASHFS_CACHED_BLKS, SQUASHFS_METADATA_SIZE);
- if (msblk->block_cache == NULL)
+ if (IS_ERR(msblk->block_cache)) {
+ err = PTR_ERR(msblk->block_cache);
goto failed_mount;
+ }
/* Allocate read_page block */
msblk->read_page = squashfs_cache_init("data",
- msblk->max_thread_num, msblk->block_size);
- if (msblk->read_page == NULL) {
+ SQUASHFS_READ_PAGES, msblk->block_size);
+ if (IS_ERR(msblk->read_page)) {
errorf(fc, "Failed to allocate read_page block");
+ err = PTR_ERR(msblk->read_page);
goto failed_mount;
}
if (msblk->devblksize == PAGE_SIZE) {
struct inode *cache = new_inode(sb);
- if (cache == NULL)
+ if (cache == NULL) {
+ err = -ENOMEM;
goto failed_mount;
+ }
set_nlink(cache, 1);
cache->i_size = OFFSET_MAX;
@@ -405,9 +413,9 @@ handle_fragments:
goto check_directory_table;
msblk->fragment_cache = squashfs_cache_init("fragment",
- SQUASHFS_CACHED_FRAGMENTS, msblk->block_size);
- if (msblk->fragment_cache == NULL) {
- err = -ENOMEM;
+ min(SQUASHFS_CACHED_FRAGMENTS, fragments), msblk->block_size);
+ if (IS_ERR(msblk->fragment_cache)) {
+ err = PTR_ERR(msblk->fragment_cache);
goto failed_mount;
}
diff --git a/fs/squashfs/symlink.c b/fs/squashfs/symlink.c
index 2bf977a52c2c..6ef735bd841a 100644
--- a/fs/squashfs/symlink.c
+++ b/fs/squashfs/symlink.c
@@ -32,20 +32,19 @@
static int squashfs_symlink_read_folio(struct file *file, struct folio *folio)
{
- struct page *page = &folio->page;
- struct inode *inode = page->mapping->host;
+ struct inode *inode = folio->mapping->host;
struct super_block *sb = inode->i_sb;
struct squashfs_sb_info *msblk = sb->s_fs_info;
- int index = page->index << PAGE_SHIFT;
+ int index = folio_pos(folio);
u64 block = squashfs_i(inode)->start;
int offset = squashfs_i(inode)->offset;
int length = min_t(int, i_size_read(inode) - index, PAGE_SIZE);
- int bytes, copied;
+ int bytes, copied, error;
void *pageaddr;
struct squashfs_cache_entry *entry;
TRACE("Entered squashfs_symlink_readpage, page index %ld, start block "
- "%llx, offset %x\n", page->index, block, offset);
+ "%llx, offset %x\n", folio->index, block, offset);
/*
* Skip index bytes into symlink metadata.
@@ -57,14 +56,15 @@ static int squashfs_symlink_read_folio(struct file *file, struct folio *folio)
ERROR("Unable to read symlink [%llx:%x]\n",
squashfs_i(inode)->start,
squashfs_i(inode)->offset);
- goto error_out;
+ error = bytes;
+ goto out;
}
}
/*
* Read length bytes from symlink metadata. Squashfs_read_metadata
* is not used here because it can sleep and we want to use
- * kmap_atomic to map the page. Instead call the underlying
+ * kmap_local to map the folio. Instead call the underlying
* squashfs_cache_get routine. As length bytes may overlap metadata
* blocks, we may need to call squashfs_cache_get multiple times.
*/
@@ -75,29 +75,26 @@ static int squashfs_symlink_read_folio(struct file *file, struct folio *folio)
squashfs_i(inode)->start,
squashfs_i(inode)->offset);
squashfs_cache_put(entry);
- goto error_out;
+ error = entry->error;
+ goto out;
}
- pageaddr = kmap_atomic(page);
+ pageaddr = kmap_local_folio(folio, 0);
copied = squashfs_copy_data(pageaddr + bytes, entry, offset,
length - bytes);
if (copied == length - bytes)
memset(pageaddr + length, 0, PAGE_SIZE - length);
else
block = entry->next_index;
- kunmap_atomic(pageaddr);
+ kunmap_local(pageaddr);
squashfs_cache_put(entry);
}
- flush_dcache_page(page);
- SetPageUptodate(page);
- unlock_page(page);
- return 0;
-
-error_out:
- SetPageError(page);
- unlock_page(page);
- return 0;
+ flush_dcache_folio(folio);
+ error = 0;
+out:
+ folio_end_read(folio, error == 0);
+ return error;
}