summaryrefslogtreecommitdiff
path: root/fs
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2017-03-23 11:39:33 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2017-03-23 11:39:33 -0700
commit131fbf4f9c096adadde27ad4bc70ea1632d720a4 (patch)
treea76baf52612b90c4da8c4ac3c430d483fbbb1d79 /fs
parentf341d9f08ae01d90d8d0c135ae2edf4423e724c9 (diff)
parente1699d2d7bf6e6cce3e1baff19f9dd4595a58664 (diff)
Merge branch 'for-linus-4.11' of git://git.kernel.org/pub/scm/linux/kernel/git/mason/linux-btrfs
Pull btrfs fixes from Chris Mason: "Zygo tracked down a very old bug with inline compressed extents. I didn't tag this one for stable because I want to do individual tested backports. It's a little tricky and I'd rather do some extra testing on it along the way" * 'for-linus-4.11' of git://git.kernel.org/pub/scm/linux/kernel/git/mason/linux-btrfs: btrfs: add missing memset while reading compressed inline extents Btrfs: fix regression in lock_delalloc_pages btrfs: remove btrfs_err_str function from uapi/linux/btrfs.h
Diffstat (limited to 'fs')
-rw-r--r--fs/btrfs/extent_io.c3
-rw-r--r--fs/btrfs/inode.c14
2 files changed, 16 insertions, 1 deletions
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index 28e81922a21c..8df797432740 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -1714,7 +1714,8 @@ static int __process_pages_contig(struct address_space *mapping,
* can we find nothing at @index.
*/
ASSERT(page_ops & PAGE_LOCK);
- return ret;
+ err = -EAGAIN;
+ goto out;
}
for (i = 0; i < ret; i++) {
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index c40060cc481f..231503935652 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -6709,6 +6709,20 @@ static noinline int uncompress_inline(struct btrfs_path *path,
max_size = min_t(unsigned long, PAGE_SIZE, max_size);
ret = btrfs_decompress(compress_type, tmp, page,
extent_offset, inline_size, max_size);
+
+ /*
+ * decompression code contains a memset to fill in any space between the end
+ * of the uncompressed data and the end of max_size in case the decompressed
+ * data ends up shorter than ram_bytes. That doesn't cover the hole between
+ * the end of an inline extent and the beginning of the next block, so we
+ * cover that region here.
+ */
+
+ if (max_size + pg_offset < PAGE_SIZE) {
+ char *map = kmap(page);
+ memset(map + pg_offset + max_size, 0, PAGE_SIZE - max_size - pg_offset);
+ kunmap(page);
+ }
kfree(tmp);
return ret;
}