From 3f9ec80f7b22ec46272a32408c5cd91ae2f8c4c0 Mon Sep 17 00:00:00 2001 From: Andrey Ryabinin Date: Mon, 10 Jul 2017 15:50:31 -0700 Subject: arm64/kasan: don't allocate extra shadow memory We used to read several bytes of the shadow memory in advance. Therefore additional shadow memory mapped to prevent crash if speculative load would happen near the end of the mapped shadow memory. Now we don't have such speculative loads, so we no longer need to map additional shadow memory. Link: http://lkml.kernel.org/r/20170601162338.23540-3-aryabinin@virtuozzo.com Signed-off-by: Andrey Ryabinin Acked-by: Mark Rutland Cc: Catalin Marinas Cc: Will Deacon Cc: "H. Peter Anvin" Cc: Alexander Potapenko Cc: Dmitry Vyukov Cc: Ingo Molnar Cc: Thomas Gleixner Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/arm64/mm/kasan_init.c | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) (limited to 'arch/arm64') diff --git a/arch/arm64/mm/kasan_init.c b/arch/arm64/mm/kasan_init.c index 687a358a3733..81f03959a4ab 100644 --- a/arch/arm64/mm/kasan_init.c +++ b/arch/arm64/mm/kasan_init.c @@ -191,14 +191,8 @@ void __init kasan_init(void) if (start >= end) break; - /* - * end + 1 here is intentional. We check several shadow bytes in - * advance to slightly speed up fastpath. In some rare cases - * we could cross boundary of mapped shadow, so we just map - * some more here. - */ vmemmap_populate((unsigned long)kasan_mem_to_shadow(start), - (unsigned long)kasan_mem_to_shadow(end) + 1, + (unsigned long)kasan_mem_to_shadow(end), pfn_to_nid(virt_to_pfn(start))); } -- cgit