From 7b73d978a5d0d2a3637bdd57191cb6ffbad3feca Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Fri, 29 Dec 2017 08:53:54 +0100 Subject: mm: pass the vmem_altmap to vmemmap_populate We can just pass this on instead of having to do a radix tree lookup without proper locking a few levels into the callchain. Signed-off-by: Christoph Hellwig Signed-off-by: Dan Williams --- arch/arm64/mm/mmu.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'arch/arm64') diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index 267d2b79d52d..ec8952ff13be 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c @@ -654,12 +654,14 @@ int kern_addr_valid(unsigned long addr) } #ifdef CONFIG_SPARSEMEM_VMEMMAP #if !ARM64_SWAPPER_USES_SECTION_MAPS -int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node) +int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node, + struct vmem_altmap *altmap) { return vmemmap_populate_basepages(start, end, node); } #else /* !ARM64_SWAPPER_USES_SECTION_MAPS */ -int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node) +int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node, + struct vmem_altmap *altmap) { unsigned long addr = start; unsigned long next; -- cgit From 24b6d4164348370c6b6a58b4248babd85ff9e982 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Fri, 29 Dec 2017 08:53:56 +0100 Subject: mm: pass the vmem_altmap to vmemmap_free We can just pass this on instead of having to do a radix tree lookup without proper locking a few levels into the callchain. Signed-off-by: Christoph Hellwig Signed-off-by: Dan Williams --- arch/arm64/mm/mmu.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'arch/arm64') diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index ec8952ff13be..0b1f13e0b4b3 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c @@ -696,7 +696,8 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node, return 0; } #endif /* CONFIG_ARM64_64K_PAGES */ -void vmemmap_free(unsigned long start, unsigned long end) +void vmemmap_free(unsigned long start, unsigned long end, + struct vmem_altmap *altmap) { } #endif /* CONFIG_SPARSEMEM_VMEMMAP */ -- cgit From f1e3a12b6543a73cfd47aa7da16d3d73d297795e Mon Sep 17 00:00:00 2001 From: Mathieu Desnoyers Date: Mon, 29 Jan 2018 15:20:19 -0500 Subject: membarrier/arm64: Provide core serializing command Signed-off-by: Mathieu Desnoyers Acked-by: Thomas Gleixner Acked-by: Peter Zijlstra (Intel) Cc: Andrea Parri Cc: Andrew Hunter Cc: Andy Lutomirski Cc: Avi Kivity Cc: Benjamin Herrenschmidt Cc: Boqun Feng Cc: Dave Watson Cc: David Sehr Cc: Greg Hackmann Cc: H. Peter Anvin Cc: Linus Torvalds Cc: Maged Michael Cc: Michael Ellerman Cc: Paul E. McKenney Cc: Paul Mackerras Cc: Russell King Cc: Will Deacon Cc: linux-api@vger.kernel.org Cc: linux-arch@vger.kernel.org Link: http://lkml.kernel.org/r/20180129202020.8515-11-mathieu.desnoyers@efficios.com Signed-off-by: Ingo Molnar --- arch/arm64/Kconfig | 1 + arch/arm64/kernel/entry.S | 4 ++++ 2 files changed, 5 insertions(+) (limited to 'arch/arm64') diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index c9a7e9e1414f..5b0c06d8dbbe 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -16,6 +16,7 @@ config ARM64 select ARCH_HAS_GCOV_PROFILE_ALL select ARCH_HAS_GIGANTIC_PAGE if (MEMORY_ISOLATION && COMPACTION) || CMA select ARCH_HAS_KCOV + select ARCH_HAS_MEMBARRIER_SYNC_CORE select ARCH_HAS_SET_MEMORY select ARCH_HAS_SG_CHAIN select ARCH_HAS_STRICT_KERNEL_RWX diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S index 6d14b8f29b5f..5edde1c2e93e 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S @@ -302,6 +302,10 @@ alternative_else_nop_endif ldp x28, x29, [sp, #16 * 14] ldr lr, [sp, #S_LR] add sp, sp, #S_FRAME_SIZE // restore sp + /* + * ARCH_HAS_MEMBARRIER_SYNC_CORE rely on eret context synchronization + * when returning from IPI handler, and when returning to user-space. + */ eret // return to kernel .endm -- cgit From 917538e212a2c080af95ccb4376c5387fac08176 Mon Sep 17 00:00:00 2001 From: Andrey Konovalov Date: Tue, 6 Feb 2018 15:36:44 -0800 Subject: kasan: clean up KASAN_SHADOW_SCALE_SHIFT usage Right now the fact that KASAN uses a single shadow byte for 8 bytes of memory is scattered all over the code. This change defines KASAN_SHADOW_SCALE_SHIFT early in asm include files and makes use of this constant where necessary. [akpm@linux-foundation.org: coding-style fixes] Link: http://lkml.kernel.org/r/34937ca3b90736eaad91b568edf5684091f662e3.1515775666.git.andreyknvl@google.com Signed-off-by: Andrey Konovalov Acked-by: Andrey Ryabinin Cc: Dmitry Vyukov Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/arm64/include/asm/kasan.h | 17 ++++++++++------- arch/arm64/include/asm/memory.h | 3 ++- arch/arm64/mm/kasan_init.c | 3 ++- 3 files changed, 14 insertions(+), 9 deletions(-) (limited to 'arch/arm64') diff --git a/arch/arm64/include/asm/kasan.h b/arch/arm64/include/asm/kasan.h index e266f80e45b7..8758bb008436 100644 --- a/arch/arm64/include/asm/kasan.h +++ b/arch/arm64/include/asm/kasan.h @@ -12,7 +12,8 @@ /* * KASAN_SHADOW_START: beginning of the kernel virtual addresses. - * KASAN_SHADOW_END: KASAN_SHADOW_START + 1/8 of kernel virtual addresses. + * KASAN_SHADOW_END: KASAN_SHADOW_START + 1/N of kernel virtual addresses, + * where N = (1 << KASAN_SHADOW_SCALE_SHIFT). */ #define KASAN_SHADOW_START (VA_START) #define KASAN_SHADOW_END (KASAN_SHADOW_START + KASAN_SHADOW_SIZE) @@ -20,14 +21,16 @@ /* * This value is used to map an address to the corresponding shadow * address by the following formula: - * shadow_addr = (address >> 3) + KASAN_SHADOW_OFFSET; + * shadow_addr = (address >> KASAN_SHADOW_SCALE_SHIFT) + KASAN_SHADOW_OFFSET * - * (1 << 61) shadow addresses - [KASAN_SHADOW_OFFSET,KASAN_SHADOW_END] - * cover all 64-bits of virtual addresses. So KASAN_SHADOW_OFFSET - * should satisfy the following equation: - * KASAN_SHADOW_OFFSET = KASAN_SHADOW_END - (1ULL << 61) + * (1 << (64 - KASAN_SHADOW_SCALE_SHIFT)) shadow addresses that lie in range + * [KASAN_SHADOW_OFFSET, KASAN_SHADOW_END) cover all 64-bits of virtual + * addresses. So KASAN_SHADOW_OFFSET should satisfy the following equation: + * KASAN_SHADOW_OFFSET = KASAN_SHADOW_END - + * (1ULL << (64 - KASAN_SHADOW_SCALE_SHIFT)) */ -#define KASAN_SHADOW_OFFSET (KASAN_SHADOW_END - (1ULL << (64 - 3))) +#define KASAN_SHADOW_OFFSET (KASAN_SHADOW_END - (1ULL << \ + (64 - KASAN_SHADOW_SCALE_SHIFT))) void kasan_init(void); void kasan_copy_shadow(pgd_t *pgdir); diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h index d4bae7d6e0d8..50fa96a49792 100644 --- a/arch/arm64/include/asm/memory.h +++ b/arch/arm64/include/asm/memory.h @@ -85,7 +85,8 @@ * stack size when KASAN is in use. */ #ifdef CONFIG_KASAN -#define KASAN_SHADOW_SIZE (UL(1) << (VA_BITS - 3)) +#define KASAN_SHADOW_SCALE_SHIFT 3 +#define KASAN_SHADOW_SIZE (UL(1) << (VA_BITS - KASAN_SHADOW_SCALE_SHIFT)) #define KASAN_THREAD_SHIFT 1 #else #define KASAN_SHADOW_SIZE (0) diff --git a/arch/arm64/mm/kasan_init.c b/arch/arm64/mm/kasan_init.c index acba49fb5aac..6e02e6fb4c7b 100644 --- a/arch/arm64/mm/kasan_init.c +++ b/arch/arm64/mm/kasan_init.c @@ -135,7 +135,8 @@ static void __init kasan_pgd_populate(unsigned long addr, unsigned long end, /* The early shadow maps everything to a single page of zeroes */ asmlinkage void __init kasan_early_init(void) { - BUILD_BUG_ON(KASAN_SHADOW_OFFSET != KASAN_SHADOW_END - (1UL << 61)); + BUILD_BUG_ON(KASAN_SHADOW_OFFSET != + KASAN_SHADOW_END - (1UL << (64 - KASAN_SHADOW_SCALE_SHIFT))); BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_START, PGDIR_SIZE)); BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_END, PGDIR_SIZE)); kasan_pgd_populate(KASAN_SHADOW_START, KASAN_SHADOW_END, NUMA_NO_NODE, -- cgit From 3aa56885e51683a19c8aa71739fd279b3f501cd7 Mon Sep 17 00:00:00 2001 From: Yury Norov Date: Tue, 6 Feb 2018 15:38:06 -0800 Subject: bitmap: replace bitmap_{from,to}_u32array with bitmap_{from,to}_arr32 over the kernel. Additionally to it: * __check_eq_bitmap() now takes single nbits argument. * __check_eq_u32_array is not used in new test but may be used in future. So I don't remove it here, but annotate as __used. Tested on arm64 and 32-bit BE mips. [arnd@arndb.de: perf: arm_dsu_pmu: convert to bitmap_from_arr32] Link: http://lkml.kernel.org/r/20180201172508.5739-2-ynorov@caviumnetworks.com [ynorov@caviumnetworks.com: fix net/core/ethtool.c] Link: http://lkml.kernel.org/r/20180205071747.4ekxtsbgxkj5b2fz@yury-thinkpad Link: http://lkml.kernel.org/r/20171228150019.27953-2-ynorov@caviumnetworks.com Signed-off-by: Yury Norov Signed-off-by: Arnd Bergmann Cc: Ben Hutchings Cc: David Decotigny , Cc: David S. Miller , Cc: Geert Uytterhoeven Cc: Matthew Wilcox Cc: Rasmus Villemoes Cc: Heiner Kallweit Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/arm64/kernel/perf_event.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) (limited to 'arch/arm64') diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c index 3affca3dd96a..75b220ba73a3 100644 --- a/arch/arm64/kernel/perf_event.c +++ b/arch/arm64/kernel/perf_event.c @@ -925,9 +925,8 @@ static void __armv8pmu_probe_pmu(void *info) pmceid[0] = read_sysreg(pmceid0_el0); pmceid[1] = read_sysreg(pmceid1_el0); - bitmap_from_u32array(cpu_pmu->pmceid_bitmap, - ARMV8_PMUV3_MAX_COMMON_EVENTS, pmceid, - ARRAY_SIZE(pmceid)); + bitmap_from_arr32(cpu_pmu->pmceid_bitmap, + pmceid, ARMV8_PMUV3_MAX_COMMON_EVENTS); } static int armv8pmu_probe_pmu(struct arm_pmu *cpu_pmu) -- cgit