summaryrefslogtreecommitdiff
path: root/arch/arm/mm
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2022-10-06 10:32:34 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2022-10-06 10:32:34 -0700
commit7782aae498b92f124267b366293100d121fe0f56 (patch)
tree70ca107257d0ddcfa4d7cfe559012f5b650f7ace /arch/arm/mm
parent833477fce7a14d43ae4c07f8ddc32fa5119471a2 (diff)
parente66372ecb80dc5179c7abb880229c7452e813d15 (diff)
Merge tag 'for-linus' of git://git.armlinux.org.uk/~rmk/linux-arm
Pull ARM updates from Russell King: - Print an un-hashed userspace PC on undefined instruction exception - Disable FDPIC ABI - Remove redundant vfp_flush/release_thread functions - Use raw_cpu_* rather than this_cpu_* in handle_bad_stack() - Avoid needlessly long backtraces when show_regs() is called - Fix an issue with stack traces through call_with_stack() - Avoid stack traces saving a duplicate exception PC value - Pass a void pointer to virt_to_page() in DMA mapping code - Fix kasan maps for modules when CONFIG_KASAN_VMALLOC=n - Show FDT region and page table level names in kernel page tables dump * tag 'for-linus' of git://git.armlinux.org.uk/~rmk/linux-arm: ARM: 9246/1: dump: show page table level name ARM: 9245/1: dump: show FDT region ARM: 9242/1: kasan: Only map modules if CONFIG_KASAN_VMALLOC=n ARM: 9240/1: dma-mapping: Pass (void *) to virt_to_page() ARM: 9234/1: stacktrace: Avoid duplicate saving of exception PC value ARM: 9233/1: stacktrace: Skip frame pointer boundary check for call_with_stack() ARM: 9224/1: Dump the stack traces based on the parameter 'regs' of show_regs() ARM: 9232/1: Replace this_cpu_* with raw_cpu_* in handle_bad_stack() ARM: 9228/1: vfp: kill vfp_flush/release_thread() ARM: 9226/1: disable FDPIC ABI ARM: 9221/1: traps: print un-hashed user pc on undefined instruction
Diffstat (limited to 'arch/arm/mm')
-rw-r--r--arch/arm/mm/dma-mapping.c2
-rw-r--r--arch/arm/mm/dump.c8
-rw-r--r--arch/arm/mm/kasan_init.c9
3 files changed, 14 insertions, 5 deletions
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index 089c9c644cce..ef691a5720d2 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -307,7 +307,7 @@ void __init dma_contiguous_remap(void)
static int __dma_update_pte(pte_t *pte, unsigned long addr, void *data)
{
- struct page *page = virt_to_page(addr);
+ struct page *page = virt_to_page((void *)addr);
pgprot_t prot = *(pgprot_t *)data;
set_pte_ext(pte, mk_pte(page, prot), 0);
diff --git a/arch/arm/mm/dump.c b/arch/arm/mm/dump.c
index 712da6a81b23..059eb4cdc9c2 100644
--- a/arch/arm/mm/dump.c
+++ b/arch/arm/mm/dump.c
@@ -26,7 +26,7 @@ static struct addr_marker address_markers[] = {
{ MODULES_VADDR, "Modules" },
{ PAGE_OFFSET, "Kernel Mapping" },
{ 0, "vmalloc() Area" },
- { VMALLOC_END, "vmalloc() End" },
+ { FDT_FIXED_BASE, "FDT Area" },
{ FIXADDR_START, "Fixmap Area" },
{ VECTORS_BASE, "Vectors" },
{ VECTORS_BASE + PAGE_SIZE * 2, "Vectors End" },
@@ -200,6 +200,7 @@ static const struct prot_bits section_bits[] = {
};
struct pg_level {
+ const char *name;
const struct prot_bits *bits;
size_t num;
u64 mask;
@@ -213,9 +214,11 @@ static struct pg_level pg_level[] = {
}, { /* p4d */
}, { /* pud */
}, { /* pmd */
+ .name = (CONFIG_PGTABLE_LEVELS > 2) ? "PMD" : "PGD",
.bits = section_bits,
.num = ARRAY_SIZE(section_bits),
}, { /* pte */
+ .name = "PTE",
.bits = pte_bits,
.num = ARRAY_SIZE(pte_bits),
},
@@ -282,7 +285,8 @@ static void note_page(struct pg_state *st, unsigned long addr,
delta >>= 10;
unit++;
}
- pt_dump_seq_printf(st->seq, "%9lu%c", delta, *unit);
+ pt_dump_seq_printf(st->seq, "%9lu%c %s", delta, *unit,
+ pg_level[st->level].name);
if (st->current_domain)
pt_dump_seq_printf(st->seq, " %s",
st->current_domain);
diff --git a/arch/arm/mm/kasan_init.c b/arch/arm/mm/kasan_init.c
index 29caee9c79ce..46d9f4a622cb 100644
--- a/arch/arm/mm/kasan_init.c
+++ b/arch/arm/mm/kasan_init.c
@@ -268,12 +268,17 @@ void __init kasan_init(void)
/*
* 1. The module global variables are in MODULES_VADDR ~ MODULES_END,
- * so we need to map this area.
+ * so we need to map this area if CONFIG_KASAN_VMALLOC=n. With
+ * VMALLOC support KASAN will manage this region dynamically,
+ * refer to kasan_populate_vmalloc() and ARM's implementation of
+ * module_alloc().
* 2. PKMAP_BASE ~ PKMAP_BASE+PMD_SIZE's shadow and MODULES_VADDR
* ~ MODULES_END's shadow is in the same PMD_SIZE, so we can't
* use kasan_populate_zero_shadow.
*/
- create_mapping((void *)MODULES_VADDR, (void *)(PKMAP_BASE + PMD_SIZE));
+ if (!IS_ENABLED(CONFIG_KASAN_VMALLOC) && IS_ENABLED(CONFIG_MODULES))
+ create_mapping((void *)MODULES_VADDR, (void *)(MODULES_END));
+ create_mapping((void *)PKMAP_BASE, (void *)(PKMAP_BASE + PMD_SIZE));
/*
* KAsan may reuse the contents of kasan_early_shadow_pte directly, so