summaryrefslogtreecommitdiff
path: root/arch/arm64
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2021-11-10 11:29:30 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2021-11-10 11:29:30 -0800
commit89fa0be0a09c8edb5e028e7368ae87c8f6cbc462 (patch)
tree2fddc175e59f5341d3daa2c1175080418250176d /arch/arm64
parent3f55f177edb88293bad08ebe217d3ce20cda427b (diff)
parentc7c386fbc20262c1d911c615c65db6a58667d92c (diff)
Merge tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux
Pull arm64 fixes from Will Deacon: - Fix double-evaluation of 'pte' macro argument when using 52-bit PAs - Fix signedness of some MTE prctl PR_* constants - Fix kmemleak memory usage by skipping early pgtable allocations - Fix printing of CPU feature register strings - Remove redundant -nostdlib linker flag for vDSO binaries * tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux: arm64: pgtable: make __pte_to_phys/__phys_to_pte_val inline functions arm64: Track no early_pgtable_alloc() for kmemleak arm64: mte: change PR_MTE_TCF_NONE back into an unsigned long arm64: vdso: remove -nostdlib compiler flag arm64: arm64_ftr_reg->name may not be a human-readable string
Diffstat (limited to 'arch/arm64')
-rw-r--r--arch/arm64/include/asm/pgtable.h12
-rw-r--r--arch/arm64/kernel/cpufeature.c10
-rw-r--r--arch/arm64/kernel/vdso/Makefile2
-rw-r--r--arch/arm64/kernel/vdso32/Makefile2
-rw-r--r--arch/arm64/mm/kasan_init.c5
-rw-r--r--arch/arm64/mm/mmu.c3
6 files changed, 23 insertions, 11 deletions
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index 84fbb52b4224..c4ba047a82d2 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -67,9 +67,15 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
* page table entry, taking care of 52-bit addresses.
*/
#ifdef CONFIG_ARM64_PA_BITS_52
-#define __pte_to_phys(pte) \
- ((pte_val(pte) & PTE_ADDR_LOW) | ((pte_val(pte) & PTE_ADDR_HIGH) << 36))
-#define __phys_to_pte_val(phys) (((phys) | ((phys) >> 36)) & PTE_ADDR_MASK)
+static inline phys_addr_t __pte_to_phys(pte_t pte)
+{
+ return (pte_val(pte) & PTE_ADDR_LOW) |
+ ((pte_val(pte) & PTE_ADDR_HIGH) << 36);
+}
+static inline pteval_t __phys_to_pte_val(phys_addr_t phys)
+{
+ return (phys | (phys >> 36)) & PTE_ADDR_MASK;
+}
#else
#define __pte_to_phys(pte) (pte_val(pte) & PTE_ADDR_MASK)
#define __phys_to_pte_val(phys) (phys)
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index ecbdff795f5e..7de96196ae14 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -573,15 +573,19 @@ static const struct arm64_ftr_bits ftr_raz[] = {
ARM64_FTR_END,
};
-#define ARM64_FTR_REG_OVERRIDE(id, table, ovr) { \
+#define __ARM64_FTR_REG_OVERRIDE(id_str, id, table, ovr) { \
.sys_id = id, \
.reg = &(struct arm64_ftr_reg){ \
- .name = #id, \
+ .name = id_str, \
.override = (ovr), \
.ftr_bits = &((table)[0]), \
}}
-#define ARM64_FTR_REG(id, table) ARM64_FTR_REG_OVERRIDE(id, table, &no_override)
+#define ARM64_FTR_REG_OVERRIDE(id, table, ovr) \
+ __ARM64_FTR_REG_OVERRIDE(#id, id, table, ovr)
+
+#define ARM64_FTR_REG(id, table) \
+ __ARM64_FTR_REG_OVERRIDE(#id, id, table, &no_override)
struct arm64_ftr_override __ro_after_init id_aa64mmfr1_override;
struct arm64_ftr_override __ro_after_init id_aa64pfr1_override;
diff --git a/arch/arm64/kernel/vdso/Makefile b/arch/arm64/kernel/vdso/Makefile
index 945e6bb326e3..700767dfd221 100644
--- a/arch/arm64/kernel/vdso/Makefile
+++ b/arch/arm64/kernel/vdso/Makefile
@@ -23,7 +23,7 @@ btildflags-$(CONFIG_ARM64_BTI_KERNEL) += -z force-bti
# potential future proofing if we end up with internal calls to the exported
# routines, as x86 does (see 6f121e548f83 ("x86, vdso: Reimplement vdso.so
# preparation in build-time C")).
-ldflags-y := -shared -nostdlib -soname=linux-vdso.so.1 --hash-style=sysv \
+ldflags-y := -shared -soname=linux-vdso.so.1 --hash-style=sysv \
-Bsymbolic --build-id=sha1 -n $(btildflags-y) -T
ccflags-y := -fno-common -fno-builtin -fno-stack-protector -ffixed-x18
diff --git a/arch/arm64/kernel/vdso32/Makefile b/arch/arm64/kernel/vdso32/Makefile
index c8fec493a450..6c01b63ff56d 100644
--- a/arch/arm64/kernel/vdso32/Makefile
+++ b/arch/arm64/kernel/vdso32/Makefile
@@ -102,7 +102,7 @@ VDSO_AFLAGS += -D__ASSEMBLY__
# From arm vDSO Makefile
VDSO_LDFLAGS += -Bsymbolic --no-undefined -soname=linux-vdso.so.1
VDSO_LDFLAGS += -z max-page-size=4096 -z common-page-size=4096
-VDSO_LDFLAGS += -nostdlib -shared --hash-style=sysv --build-id=sha1
+VDSO_LDFLAGS += -shared --hash-style=sysv --build-id=sha1
# Borrow vdsomunge.c from the arm vDSO
diff --git a/arch/arm64/mm/kasan_init.c b/arch/arm64/mm/kasan_init.c
index 5b996ca4d996..ec276f75fa05 100644
--- a/arch/arm64/mm/kasan_init.c
+++ b/arch/arm64/mm/kasan_init.c
@@ -36,7 +36,7 @@ static phys_addr_t __init kasan_alloc_zeroed_page(int node)
{
void *p = memblock_alloc_try_nid(PAGE_SIZE, PAGE_SIZE,
__pa(MAX_DMA_ADDRESS),
- MEMBLOCK_ALLOC_KASAN, node);
+ MEMBLOCK_ALLOC_NOLEAKTRACE, node);
if (!p)
panic("%s: Failed to allocate %lu bytes align=0x%lx nid=%d from=%llx\n",
__func__, PAGE_SIZE, PAGE_SIZE, node,
@@ -49,7 +49,8 @@ static phys_addr_t __init kasan_alloc_raw_page(int node)
{
void *p = memblock_alloc_try_nid_raw(PAGE_SIZE, PAGE_SIZE,
__pa(MAX_DMA_ADDRESS),
- MEMBLOCK_ALLOC_KASAN, node);
+ MEMBLOCK_ALLOC_NOLEAKTRACE,
+ node);
if (!p)
panic("%s: Failed to allocate %lu bytes align=0x%lx nid=%d from=%llx\n",
__func__, PAGE_SIZE, PAGE_SIZE, node,
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index d77bf06d6a6d..acfae9b41cc8 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -96,7 +96,8 @@ static phys_addr_t __init early_pgtable_alloc(int shift)
phys_addr_t phys;
void *ptr;
- phys = memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE);
+ phys = memblock_phys_alloc_range(PAGE_SIZE, PAGE_SIZE, 0,
+ MEMBLOCK_ALLOC_NOLEAKTRACE);
if (!phys)
panic("Failed to allocate page table page\n");