summaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
authorHugh Dickins <hughd@google.com>2021-04-23 14:29:00 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2021-04-23 14:42:39 -0700
commited98b0159fa92a22a2838bd92522b8c8d964556b (patch)
treec081344662209f6b4fa4525eb63452dc852ad17b /mm
parent2d11e738151d6cd321dd944cefe9c941ea00086c (diff)
mm/filemap: fix mapping_seek_hole_data on THP & 32-bit
No problem on 64-bit, or without huge pages, but xfstests generic/285 and other SEEK_HOLE/SEEK_DATA tests have regressed on huge tmpfs, and on 32-bit architectures, with the new mapping_seek_hole_data(). Several different bugs turned out to need fixing. u64 cast to stop losing bits when converting unsigned long to loff_t (and let's use shifts throughout, rather than mixed with * and /). Use round_up() when advancing pos, to stop assuming that pos was already THP-aligned when advancing it by THP-size. (This use of round_up() assumes that any THP has THP-aligned index: true at present and true going forward, but could be recoded to avoid the assumption.) Use xas_set() when iterating away from a THP, so that xa_index stays in synch with start, instead of drifting away to return bogus offset. Check start against end to avoid wrapping 32-bit xa_index to 0 (and to handle these additional cases, seek_data or not, it's easier to break the loop than goto: so rearrange exit from the function). [hughd@google.com: remove unneeded u64 casts, per Matthew] Link: https://lkml.kernel.org/r/alpine.LSU.2.11.2104221347240.1170@eggly.anvils Link: https://lkml.kernel.org/r/alpine.LSU.2.11.2104211737410.3299@eggly.anvils Fixes: 41139aa4c3a3 ("mm/filemap: add mapping_seek_hole_data") Signed-off-by: Hugh Dickins <hughd@google.com> Cc: Christoph Hellwig <hch@lst.de> Cc: Dave Chinner <dchinner@redhat.com> Cc: Jan Kara <jack@suse.cz> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com> Cc: Matthew Wilcox <willy@infradead.org> Cc: William Kucharski <william.kucharski@oracle.com> Cc: Yang Shi <yang.shi@linux.alibaba.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/filemap.c21
1 files changed, 11 insertions, 10 deletions
diff --git a/mm/filemap.c b/mm/filemap.c
index bcf64e92ffb0..6ce832dc59e7 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -2678,7 +2678,7 @@ loff_t mapping_seek_hole_data(struct address_space *mapping, loff_t start,
loff_t end, int whence)
{
XA_STATE(xas, &mapping->i_pages, start >> PAGE_SHIFT);
- pgoff_t max = (end - 1) / PAGE_SIZE;
+ pgoff_t max = (end - 1) >> PAGE_SHIFT;
bool seek_data = (whence == SEEK_DATA);
struct page *page;
@@ -2687,7 +2687,8 @@ loff_t mapping_seek_hole_data(struct address_space *mapping, loff_t start,
rcu_read_lock();
while ((page = find_get_entry(&xas, max, XA_PRESENT))) {
- loff_t pos = xas.xa_index * PAGE_SIZE;
+ loff_t pos = (u64)xas.xa_index << PAGE_SHIFT;
+ unsigned int seek_size;
if (start < pos) {
if (!seek_data)
@@ -2695,25 +2696,25 @@ loff_t mapping_seek_hole_data(struct address_space *mapping, loff_t start,
start = pos;
}
- pos += seek_page_size(&xas, page);
+ seek_size = seek_page_size(&xas, page);
+ pos = round_up(pos + 1, seek_size);
start = page_seek_hole_data(&xas, mapping, page, start, pos,
seek_data);
if (start < pos)
goto unlock;
+ if (start >= end)
+ break;
+ if (seek_size > PAGE_SIZE)
+ xas_set(&xas, pos >> PAGE_SHIFT);
if (!xa_is_value(page))
put_page(page);
}
- rcu_read_unlock();
-
if (seek_data)
- return -ENXIO;
- goto out;
-
+ start = -ENXIO;
unlock:
rcu_read_unlock();
- if (!xa_is_value(page))
+ if (page && !xa_is_value(page))
put_page(page);
-out:
if (start > end)
return end;
return start;