From ab502103ae3ce4c0fc393e598455efede3e523c9 Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Mon, 10 Jan 2022 23:15:29 +0000 Subject: mm/usercopy: Detect large folio overruns Move the compound page overrun detection out of CONFIG_HARDENED_USERCOPY_PAGESPAN and convert it to use folios so it's enabled for more people. Signed-off-by: Matthew Wilcox (Oracle) Acked-by: Kees Cook Reviewed-by: David Hildenbrand Signed-off-by: Kees Cook Link: https://lore.kernel.org/r/20220110231530.665970-4-willy@infradead.org --- mm/usercopy.c | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/mm/usercopy.c b/mm/usercopy.c index e1e856dca124..9458c2b24b02 100644 --- a/mm/usercopy.c +++ b/mm/usercopy.c @@ -164,7 +164,6 @@ static inline void check_page_span(const void *ptr, unsigned long n, { #ifdef CONFIG_HARDENED_USERCOPY_PAGESPAN const void *end = ptr + n - 1; - struct page *endpage; bool is_reserved, is_cma; /* @@ -195,11 +194,6 @@ static inline void check_page_span(const void *ptr, unsigned long n, ((unsigned long)end & (unsigned long)PAGE_MASK))) return; - /* Allow if fully inside the same compound (__GFP_COMP) page. */ - endpage = virt_to_head_page(end); - if (likely(endpage == page)) - return; - /* * Reject if range is entirely either Reserved (i.e. special or * device memory), or CMA. Otherwise, reject since the object spans @@ -259,6 +253,10 @@ static inline void check_heap_object(const void *ptr, unsigned long n, if (folio_test_slab(folio)) { /* Check slab allocator for flags and size. */ __check_heap_object(ptr, n, folio_slab(folio), to_user); + } else if (folio_test_large(folio)) { + unsigned long offset = ptr - folio_address(folio); + if (offset + n > folio_size(folio)) + usercopy_abort("page alloc", NULL, to_user, offset, n); } else { /* Verify object does not incorrectly span multiple pages. */ check_page_span(ptr, n, folio_page(folio, 0), to_user); -- cgit