summaryrefslogtreecommitdiff
path: root/mm/shmem.c
diff options
context:
space:
mode:
authorBaolin Wang <baolin.wang@linux.alibaba.com>2024-10-26 21:51:52 +0800
committerAndrew Morton <akpm@linux-foundation.org>2024-11-06 20:11:18 -0800
commit729881ffd390797077cec0e573d33b4d724d70b3 (patch)
treeed311cd0da942c93a0234437e873666e15e53ff6 /mm/shmem.c
parent477327e10639a1ec5698847030b494dc75de33e4 (diff)
mm: shmem: fallback to page size splice if large folio has poisoned pages
The tmpfs has already supported the PMD-sized large folios, and splice() can not read any pages if the large folio has a poisoned page, which is not good as Matthew pointed out in a previous email[1]: "so if we have hwpoison set on one page in a folio, we now can't read bytes from any page in the folio? That seems like we've made a bad situation worse." Thus add a fallback to the PAGE_SIZE splice() still allows reading normal pages if the large folio has hwpoisoned pages. [1] https://lore.kernel.org/all/Zw_d0EVAJkpNJEbA@casper.infradead.org/ [baolin.wang@linux.alibaba.com: code layout cleaup, per dhowells] Link: https://lkml.kernel.org/r/32dd938c-3531-49f7-93e4-b7ff21fec569@linux.alibaba.com Link: https://lkml.kernel.org/r/e3737fbd5366c4de4337bf5f2044817e77a5235b.1729915173.git.baolin.wang@linux.alibaba.com Signed-off-by: Baolin Wang <baolin.wang@linux.alibaba.com> Cc: David Hildenbrand <david@redhat.com> Cc: David Howells <dhowells@redhat.com> Cc: Hugh Dickins <hughd@google.com> Cc: Kefeng Wang <wangkefeng.wang@huawei.com> Cc: Matthew Wilcox (Oracle) <willy@infradead.org> Cc: Yang Shi <shy828301@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'mm/shmem.c')
-rw-r--r--mm/shmem.c38
1 files changed, 30 insertions, 8 deletions
diff --git a/mm/shmem.c b/mm/shmem.c
index 06da05f984da..5afc5b1f7ae1 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -3288,11 +3288,16 @@ static ssize_t shmem_file_splice_read(struct file *in, loff_t *ppos,
len = min_t(size_t, len, npages * PAGE_SIZE);
do {
+ bool fallback_page_splice = false;
+ struct page *page = NULL;
+ pgoff_t index;
+ size_t size;
+
if (*ppos >= i_size_read(inode))
break;
- error = shmem_get_folio(inode, *ppos / PAGE_SIZE, 0, &folio,
- SGP_READ);
+ index = *ppos >> PAGE_SHIFT;
+ error = shmem_get_folio(inode, index, 0, &folio, SGP_READ);
if (error) {
if (error == -EINVAL)
error = 0;
@@ -3301,12 +3306,15 @@ static ssize_t shmem_file_splice_read(struct file *in, loff_t *ppos,
if (folio) {
folio_unlock(folio);
- if (folio_test_hwpoison(folio) ||
- (folio_test_large(folio) &&
- folio_test_has_hwpoisoned(folio))) {
+ page = folio_file_page(folio, index);
+ if (PageHWPoison(page)) {
error = -EIO;
break;
}
+
+ if (folio_test_large(folio) &&
+ folio_test_has_hwpoisoned(folio))
+ fallback_page_splice = true;
}
/*
@@ -3320,7 +3328,17 @@ static ssize_t shmem_file_splice_read(struct file *in, loff_t *ppos,
isize = i_size_read(inode);
if (unlikely(*ppos >= isize))
break;
- part = min_t(loff_t, isize - *ppos, len);
+ /*
+ * Fallback to PAGE_SIZE splice if the large folio has hwpoisoned
+ * pages.
+ */
+ size = len;
+ if (unlikely(fallback_page_splice)) {
+ size_t offset = *ppos & ~PAGE_MASK;
+
+ size = umin(size, PAGE_SIZE - offset);
+ }
+ part = min_t(loff_t, isize - *ppos, size);
if (folio) {
/*
@@ -3328,8 +3346,12 @@ static ssize_t shmem_file_splice_read(struct file *in, loff_t *ppos,
* virtual addresses, take care about potential aliasing
* before reading the page on the kernel side.
*/
- if (mapping_writably_mapped(mapping))
- flush_dcache_folio(folio);
+ if (mapping_writably_mapped(mapping)) {
+ if (likely(!fallback_page_splice))
+ flush_dcache_folio(folio);
+ else
+ flush_dcache_page(page);
+ }
folio_mark_accessed(folio);
/*
* Ok, we have the page, and it's up-to-date, so we can