summaryrefslogtreecommitdiff
path: root/fs/hugetlbfs/inode.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/hugetlbfs/inode.c')
-rw-r--r--fs/hugetlbfs/inode.c49
1 files changed, 23 insertions, 26 deletions
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
index be4be99304bc..9c94ed8c3ab0 100644
--- a/fs/hugetlbfs/inode.c
+++ b/fs/hugetlbfs/inode.c
@@ -192,37 +192,25 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
* Someone wants to read @bytes from a HWPOISON hugetlb @folio from @offset.
* Returns the maximum number of bytes one can read without touching the 1st raw
* HWPOISON page.
- *
- * The implementation borrows the iteration logic from copy_page_to_iter*.
*/
static size_t adjust_range_hwpoison(struct folio *folio, size_t offset,
size_t bytes)
{
- struct page *page;
- size_t n = 0;
- size_t res = 0;
-
- /* First page to start the loop. */
- page = folio_page(folio, offset / PAGE_SIZE);
- offset %= PAGE_SIZE;
- while (1) {
- if (is_raw_hwpoison_page_in_hugepage(page))
- break;
+ struct page *page = folio_page(folio, offset / PAGE_SIZE);
+ size_t safe_bytes;
- /* Safe to read n bytes without touching HWPOISON subpage. */
- n = min(bytes, (size_t)PAGE_SIZE - offset);
- res += n;
- bytes -= n;
- if (!bytes || !n)
+ if (is_raw_hwpoison_page_in_hugepage(page))
+ return 0;
+ /* Safe to read the remaining bytes in this page. */
+ safe_bytes = PAGE_SIZE - (offset % PAGE_SIZE);
+ page++;
+
+ /* Check each remaining page as long as we are not done yet. */
+ for (; safe_bytes < bytes; safe_bytes += PAGE_SIZE, page++)
+ if (is_raw_hwpoison_page_in_hugepage(page))
break;
- offset += n;
- if (offset == PAGE_SIZE) {
- page = nth_page(page, 1);
- offset = 0;
- }
- }
- return res;
+ return min(safe_bytes, bytes);
}
/*
@@ -490,6 +478,14 @@ hugetlb_vmdelete_list(struct rb_root_cached *root, pgoff_t start, pgoff_t end,
if (!hugetlb_vma_trylock_write(vma))
continue;
+ /*
+ * Skip VMAs without shareable locks. Per the design in commit
+ * 40549ba8f8e0, these will be handled by remove_inode_hugepages()
+ * called after this function with proper locking.
+ */
+ if (!__vma_shareable_lock(vma))
+ goto skip;
+
v_start = vma_offset_start(vma, start);
v_end = vma_offset_end(vma, end);
@@ -500,6 +496,7 @@ hugetlb_vmdelete_list(struct rb_root_cached *root, pgoff_t start, pgoff_t end,
* vmas. Therefore, lock is not held when calling
* unmap_hugepage_range for private vmas.
*/
+skip:
hugetlb_vma_unlock_write(vma);
}
}
@@ -1054,7 +1051,7 @@ static int hugetlbfs_migrate_folio(struct address_space *mapping,
int rc;
rc = migrate_huge_page_move_mapping(mapping, dst, src);
- if (rc != MIGRATEPAGE_SUCCESS)
+ if (rc)
return rc;
if (hugetlb_folio_subpool(src)) {
@@ -1065,7 +1062,7 @@ static int hugetlbfs_migrate_folio(struct address_space *mapping,
folio_migrate_flags(dst, src);
- return MIGRATEPAGE_SUCCESS;
+ return 0;
}
#else
#define hugetlbfs_migrate_folio NULL