summaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
authorAndrey Konovalov <andreyknvl@google.com>2018-12-28 00:30:57 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2018-12-28 12:11:44 -0800
commit2813b9c0296259fb11e75c839bab2d958ba4f96c (patch)
tree4f174c60d6f189977d74053b118f8f820ab689f1 /mm
parent41eea9cd239c5b3fff726894f85c97f60e5799a3 (diff)
kasan, mm, arm64: tag non slab memory allocated via pagealloc
Tag-based KASAN doesn't check memory accesses through pointers tagged with 0xff. When page_address is used to get pointer to memory that corresponds to some page, the tag of the resulting pointer gets set to 0xff, even though the allocated memory might have been tagged differently. For slab pages it's impossible to recover the correct tag to return from page_address, since the page might contain multiple slab objects tagged with different values, and we can't know in advance which one of them is going to get accessed. For non slab pages however, we can recover the tag in page_address, since the whole page was marked with the same tag. This patch adds tagging to non slab memory allocated with pagealloc. To set the tag of the pointer returned from page_address, the tag gets stored to page->flags when the memory gets allocated. Link: http://lkml.kernel.org/r/d758ddcef46a5abc9970182b9137e2fbee202a2c.1544099024.git.andreyknvl@google.com Signed-off-by: Andrey Konovalov <andreyknvl@google.com> Reviewed-by: Andrey Ryabinin <aryabinin@virtuozzo.com> Reviewed-by: Dmitry Vyukov <dvyukov@google.com> Acked-by: Will Deacon <will.deacon@arm.com> Cc: Christoph Lameter <cl@linux.com> Cc: Mark Rutland <mark.rutland@arm.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/cma.c11
-rw-r--r--mm/kasan/common.c15
-rw-r--r--mm/page_alloc.c1
-rw-r--r--mm/slab.c2
4 files changed, 26 insertions, 3 deletions
diff --git a/mm/cma.c b/mm/cma.c
index 4cb76121a3ab..c7b39dd3b4f6 100644
--- a/mm/cma.c
+++ b/mm/cma.c
@@ -407,6 +407,7 @@ struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align,
unsigned long pfn = -1;
unsigned long start = 0;
unsigned long bitmap_maxno, bitmap_no, bitmap_count;
+ size_t i;
struct page *page = NULL;
int ret = -ENOMEM;
@@ -466,6 +467,16 @@ struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align,
trace_cma_alloc(pfn, page, count, align);
+ /*
+ * CMA can allocate multiple page blocks, which results in different
+ * blocks being marked with different tags. Reset the tags to ignore
+ * those page blocks.
+ */
+ if (page) {
+ for (i = 0; i < count; i++)
+ page_kasan_tag_reset(page + i);
+ }
+
if (ret && !no_warn) {
pr_err("%s: alloc failed, req-size: %zu pages, ret: %d\n",
__func__, count, ret);
diff --git a/mm/kasan/common.c b/mm/kasan/common.c
index 27f0cae336c9..195ca385cf7a 100644
--- a/mm/kasan/common.c
+++ b/mm/kasan/common.c
@@ -220,8 +220,15 @@ void kasan_unpoison_stack_above_sp_to(const void *watermark)
void kasan_alloc_pages(struct page *page, unsigned int order)
{
+ u8 tag;
+ unsigned long i;
+
if (unlikely(PageHighMem(page)))
return;
+
+ tag = random_tag();
+ for (i = 0; i < (1 << order); i++)
+ page_kasan_tag_set(page + i, tag);
kasan_unpoison_shadow(page_address(page), PAGE_SIZE << order);
}
@@ -319,6 +326,10 @@ struct kasan_free_meta *get_free_info(struct kmem_cache *cache,
void kasan_poison_slab(struct page *page)
{
+ unsigned long i;
+
+ for (i = 0; i < (1 << compound_order(page)); i++)
+ page_kasan_tag_reset(page + i);
kasan_poison_shadow(page_address(page),
PAGE_SIZE << compound_order(page),
KASAN_KMALLOC_REDZONE);
@@ -517,7 +528,7 @@ void kasan_poison_kfree(void *ptr, unsigned long ip)
page = virt_to_head_page(ptr);
if (unlikely(!PageSlab(page))) {
- if (reset_tag(ptr) != page_address(page)) {
+ if (ptr != page_address(page)) {
kasan_report_invalid_free(ptr, ip);
return;
}
@@ -530,7 +541,7 @@ void kasan_poison_kfree(void *ptr, unsigned long ip)
void kasan_kfree_large(void *ptr, unsigned long ip)
{
- if (reset_tag(ptr) != page_address(virt_to_head_page(ptr)))
+ if (ptr != page_address(virt_to_head_page(ptr)))
kasan_report_invalid_free(ptr, ip);
/* The object will be poisoned by page_alloc. */
}
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index e95b5b7c9c3d..d245de2124e3 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1183,6 +1183,7 @@ static void __meminit __init_single_page(struct page *page, unsigned long pfn,
init_page_count(page);
page_mapcount_reset(page);
page_cpupid_reset_last(page);
+ page_kasan_tag_reset(page);
INIT_LIST_HEAD(&page->lru);
#ifdef WANT_PAGE_VIRTUAL
diff --git a/mm/slab.c b/mm/slab.c
index a80beb543678..01991060714c 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -2357,7 +2357,7 @@ static void *alloc_slabmgmt(struct kmem_cache *cachep,
void *freelist;
void *addr = page_address(page);
- page->s_mem = addr + colour_off;
+ page->s_mem = kasan_reset_tag(addr) + colour_off;
page->active = 0;
if (OBJFREELIST_SLAB(cachep))