summaryrefslogtreecommitdiff
path: root/mm/usercopy.c
diff options
context:
space:
mode:
authorKees Cook <keescook@chromium.org>2016-09-07 09:54:34 -0700
committerKees Cook <keescook@chromium.org>2016-09-07 11:33:26 -0700
commit8e1f74ea02cf4562404c48c6882214821552c13f (patch)
tree931135d5d5cf00255d596520bd59c4067d669c22 /mm/usercopy.c
parenta85d6b8242dc78ef3f4542a0f979aebcbe77fc4e (diff)
usercopy: remove page-spanning test for now
A custom allocator without __GFP_COMP that copies to userspace has been found in vmw_execbuf_process[1], so this disables the page-span checker by placing it behind a CONFIG for future work where such things can be tracked down later. [1] https://bugzilla.redhat.com/show_bug.cgi?id=1373326 Reported-by: Vinson Lee <vlee@freedesktop.org> Fixes: f5509cc18daa ("mm: Hardened usercopy") Signed-off-by: Kees Cook <keescook@chromium.org>
Diffstat (limited to 'mm/usercopy.c')
-rw-r--r--mm/usercopy.c61
1 files changed, 35 insertions, 26 deletions
diff --git a/mm/usercopy.c b/mm/usercopy.c
index a3cc3052f830..089328f2b920 100644
--- a/mm/usercopy.c
+++ b/mm/usercopy.c
@@ -134,31 +134,16 @@ static inline const char *check_bogus_address(const void *ptr, unsigned long n)
return NULL;
}
-static inline const char *check_heap_object(const void *ptr, unsigned long n,
- bool to_user)
+/* Checks for allocs that are marked in some way as spanning multiple pages. */
+static inline const char *check_page_span(const void *ptr, unsigned long n,
+ struct page *page, bool to_user)
{
- struct page *page, *endpage;
+#ifdef CONFIG_HARDENED_USERCOPY_PAGESPAN
const void *end = ptr + n - 1;
+ struct page *endpage;
bool is_reserved, is_cma;
/*
- * Some architectures (arm64) return true for virt_addr_valid() on
- * vmalloced addresses. Work around this by checking for vmalloc
- * first.
- */
- if (is_vmalloc_addr(ptr))
- return NULL;
-
- if (!virt_addr_valid(ptr))
- return NULL;
-
- page = virt_to_head_page(ptr);
-
- /* Check slab allocator for flags and size. */
- if (PageSlab(page))
- return __check_heap_object(ptr, n, page);
-
- /*
* Sometimes the kernel data regions are not marked Reserved (see
* check below). And sometimes [_sdata,_edata) does not cover
* rodata and/or bss, so check each range explicitly.
@@ -186,7 +171,7 @@ static inline const char *check_heap_object(const void *ptr, unsigned long n,
((unsigned long)end & (unsigned long)PAGE_MASK)))
return NULL;
- /* Allow if start and end are inside the same compound page. */
+ /* Allow if fully inside the same compound (__GFP_COMP) page. */
endpage = virt_to_head_page(end);
if (likely(endpage == page))
return NULL;
@@ -199,20 +184,44 @@ static inline const char *check_heap_object(const void *ptr, unsigned long n,
is_reserved = PageReserved(page);
is_cma = is_migrate_cma_page(page);
if (!is_reserved && !is_cma)
- goto reject;
+ return "<spans multiple pages>";
for (ptr += PAGE_SIZE; ptr <= end; ptr += PAGE_SIZE) {
page = virt_to_head_page(ptr);
if (is_reserved && !PageReserved(page))
- goto reject;
+ return "<spans Reserved and non-Reserved pages>";
if (is_cma && !is_migrate_cma_page(page))
- goto reject;
+ return "<spans CMA and non-CMA pages>";
}
+#endif
return NULL;
+}
+
+static inline const char *check_heap_object(const void *ptr, unsigned long n,
+ bool to_user)
+{
+ struct page *page;
+
+ /*
+ * Some architectures (arm64) return true for virt_addr_valid() on
+ * vmalloced addresses. Work around this by checking for vmalloc
+ * first.
+ */
+ if (is_vmalloc_addr(ptr))
+ return NULL;
+
+ if (!virt_addr_valid(ptr))
+ return NULL;
+
+ page = virt_to_head_page(ptr);
+
+ /* Check slab allocator for flags and size. */
+ if (PageSlab(page))
+ return __check_heap_object(ptr, n, page);
-reject:
- return "<spans multiple pages>";
+ /* Verify object does not incorrectly span multiple pages. */
+ return check_page_span(ptr, n, page, to_user);
}
/*