From 50596b7559bf226bb35ad55855ee979453ec06a1 Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Sat, 18 Sep 2021 10:44:37 +0200 Subject: ARM: smp: Store current pointer in TPIDRURO register if available Now that the user space TLS register is assigned on every return to user space, we can use it to keep the 'current' pointer while running in the kernel. This removes the need to access it via thread_info, which is located at the base of the stack, but will be moved out of there in a subsequent patch. Use the __builtin_thread_pointer() helper when available - this will help GCC understand that reloading the value within the same function is not necessary, even when using the per-task stack protector (which also generates accesses via the TLS register). For example, the generated code below loads TPIDRURO only once, and uses it to access both the stack canary and the preempt_count fields. : e92d 41f0 stmdb sp!, {r4, r5, r6, r7, r8, lr} ee1d 4f70 mrc 15, 0, r4, cr13, cr0, {3} 4606 mov r6, r0 b094 sub sp, #80 ; 0x50 f8d4 34e8 ldr.w r3, [r4, #1256] ; 0x4e8 <- stack canary 9313 str r3, [sp, #76] ; 0x4c f8d4 8004 ldr.w r8, [r4, #4] <- preempt count Co-developed-by: Keith Packard Signed-off-by: Keith Packard Signed-off-by: Ard Biesheuvel Reviewed-by: Linus Walleij Tested-by: Amit Daniel Kachhap --- arch/arm/mm/proc-macros.S | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'arch/arm/mm') diff --git a/arch/arm/mm/proc-macros.S b/arch/arm/mm/proc-macros.S index e2c743aa2eb2..d48ba99d739c 100644 --- a/arch/arm/mm/proc-macros.S +++ b/arch/arm/mm/proc-macros.S @@ -30,8 +30,7 @@ * act_mm - get current->active_mm */ .macro act_mm, rd - get_thread_info \rd - ldr \rd, [\rd, #TI_TASK] + get_current \rd .if (TSK_ACTIVE_MM > IMM12_MASK) add \rd, \rd, #TSK_ACTIVE_MM & ~IMM12_MASK .endif -- cgit From b8bc0e50a32a3c3b51605e41de41b4d7952e6aa4 Mon Sep 17 00:00:00 2001 From: "Russell King (Oracle)" Date: Tue, 21 Sep 2021 12:57:22 +0100 Subject: ARM: add __arm_iomem_set_ro() to write-protect ioremapped area __arm_iomem_set_ro() marks an ioremapped area read-only. This is intended for use with __arm_ioremap_exec() to allow the kernel to write some code into e.g. SRAM and then write-protect it so the kernel doesn't complain about W+X mappings. Tested-by: Fabio Estevam Signed-off-by: Russell King (Oracle) --- arch/arm/mm/ioremap.c | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'arch/arm/mm') diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c index 80fb5a4a5c05..6e830b9418c9 100644 --- a/arch/arm/mm/ioremap.c +++ b/arch/arm/mm/ioremap.c @@ -36,6 +36,7 @@ #include #include #include +#include #include #include @@ -401,6 +402,11 @@ __arm_ioremap_exec(phys_addr_t phys_addr, size_t size, bool cached) __builtin_return_address(0)); } +void __arm_iomem_set_ro(void __iomem *ptr, size_t size) +{ + set_memory_ro((unsigned long)ptr, PAGE_ALIGN(size) / PAGE_SIZE); +} + void *arch_memremap_wb(phys_addr_t phys_addr, size_t size) { return (__force void *)arch_ioremap_caller(phys_addr, size, -- cgit From caed89dab0ca0e73d7e016c04e1f5957650f4ec3 Mon Sep 17 00:00:00 2001 From: Wang Kefeng Date: Wed, 22 Sep 2021 14:56:27 +0100 Subject: ARM: 9128/1: mm: Refactor the __do_page_fault() Clean up the multiple goto statements and drops local variable vm_fault_t fault, which will make the __do_page_fault() much more readability. No functional change. Signed-off-by: Kefeng Wang Signed-off-by: Russell King (Oracle) --- arch/arm/mm/fault.c | 34 +++++++++++++--------------------- 1 file changed, 13 insertions(+), 21 deletions(-) (limited to 'arch/arm/mm') diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c index efa402025031..662ac3ca3c8a 100644 --- a/arch/arm/mm/fault.c +++ b/arch/arm/mm/fault.c @@ -205,35 +205,27 @@ __do_page_fault(struct mm_struct *mm, unsigned long addr, unsigned int fsr, unsigned int flags, struct task_struct *tsk, struct pt_regs *regs) { - struct vm_area_struct *vma; - vm_fault_t fault; - - vma = find_vma(mm, addr); - fault = VM_FAULT_BADMAP; + struct vm_area_struct *vma = find_vma(mm, addr); if (unlikely(!vma)) - goto out; - if (unlikely(vma->vm_start > addr)) - goto check_stack; + return VM_FAULT_BADMAP; + + if (unlikely(vma->vm_start > addr)) { + if (!(vma->vm_flags & VM_GROWSDOWN)) + return VM_FAULT_BADMAP; + if (addr < FIRST_USER_ADDRESS) + return VM_FAULT_BADMAP; + if (expand_stack(vma, addr)) + return VM_FAULT_BADMAP; + } /* * Ok, we have a good vm_area for this * memory access, so we can handle it. */ -good_area: - if (access_error(fsr, vma)) { - fault = VM_FAULT_BADACCESS; - goto out; - } + if (access_error(fsr, vma)) + return VM_FAULT_BADACCESS; return handle_mm_fault(vma, addr & PAGE_MASK, flags, regs); - -check_stack: - /* Don't allow expansion below FIRST_USER_ADDRESS */ - if (vma->vm_flags & VM_GROWSDOWN && - addr >= FIRST_USER_ADDRESS && !expand_stack(vma, addr)) - goto good_area; -out: - return fault; } static int __kprobes -- cgit From 488cab12c37169687015fbdfa8be7b0da42f3146 Mon Sep 17 00:00:00 2001 From: Wang Kefeng Date: Wed, 22 Sep 2021 14:56:28 +0100 Subject: ARM: 9129/1: mm: Kill task_struct argument for __do_page_fault() The __do_page_fault() won't use task_struct argument, kill it and also use current->mm directly in do_page_fault(). No functional change. Signed-off-by: Kefeng Wang Signed-off-by: Russell King (Oracle) --- arch/arm/mm/fault.c | 10 +++------- 1 file changed, 3 insertions(+), 7 deletions(-) (limited to 'arch/arm/mm') diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c index 662ac3ca3c8a..249db395bdf0 100644 --- a/arch/arm/mm/fault.c +++ b/arch/arm/mm/fault.c @@ -202,8 +202,7 @@ static inline bool access_error(unsigned int fsr, struct vm_area_struct *vma) static vm_fault_t __kprobes __do_page_fault(struct mm_struct *mm, unsigned long addr, unsigned int fsr, - unsigned int flags, struct task_struct *tsk, - struct pt_regs *regs) + unsigned int flags, struct pt_regs *regs) { struct vm_area_struct *vma = find_vma(mm, addr); if (unlikely(!vma)) @@ -231,8 +230,7 @@ __do_page_fault(struct mm_struct *mm, unsigned long addr, unsigned int fsr, static int __kprobes do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs) { - struct task_struct *tsk; - struct mm_struct *mm; + struct mm_struct *mm = current->mm; int sig, code; vm_fault_t fault; unsigned int flags = FAULT_FLAG_DEFAULT; @@ -240,8 +238,6 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs) if (kprobe_page_fault(regs, fsr)) return 0; - tsk = current; - mm = tsk->mm; /* Enable interrupts if they were enabled in the parent context. */ if (interrupts_enabled(regs)) @@ -285,7 +281,7 @@ retry: #endif } - fault = __do_page_fault(mm, addr, fsr, flags, tsk, regs); + fault = __do_page_fault(mm, addr, fsr, flags, regs); /* If we need to retry but a fatal signal is pending, handle the * signal first. We do not need to release the mmap_lock because -- cgit From f177b06ed7d56c1d8256769d1a6e1feec90153dc Mon Sep 17 00:00:00 2001 From: Wang Kefeng Date: Wed, 22 Sep 2021 14:56:29 +0100 Subject: ARM: 9127/1: mm: Cleanup access_error() Now the write fault check in do_page_fault() and access_error() twice, we can cleanup access_error(), and make the fault check and vma flags set into do_page_fault() directly, then pass the vma flags to __do_page_fault. No functional change. Signed-off-by: Kefeng Wang Signed-off-by: Russell King (Oracle) --- arch/arm/mm/fault.c | 38 ++++++++++++++------------------------ 1 file changed, 14 insertions(+), 24 deletions(-) (limited to 'arch/arm/mm') diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c index 249db395bdf0..9a6d74f6ea1d 100644 --- a/arch/arm/mm/fault.c +++ b/arch/arm/mm/fault.c @@ -183,26 +183,9 @@ void do_bad_area(unsigned long addr, unsigned int fsr, struct pt_regs *regs) #define VM_FAULT_BADMAP 0x010000 #define VM_FAULT_BADACCESS 0x020000 -/* - * Check that the permissions on the VMA allow for the fault which occurred. - * If we encountered a write fault, we must have write permission, otherwise - * we allow any permission. - */ -static inline bool access_error(unsigned int fsr, struct vm_area_struct *vma) -{ - unsigned int mask = VM_ACCESS_FLAGS; - - if ((fsr & FSR_WRITE) && !(fsr & FSR_CM)) - mask = VM_WRITE; - if (fsr & FSR_LNX_PF) - mask = VM_EXEC; - - return vma->vm_flags & mask ? false : true; -} - static vm_fault_t __kprobes -__do_page_fault(struct mm_struct *mm, unsigned long addr, unsigned int fsr, - unsigned int flags, struct pt_regs *regs) +__do_page_fault(struct mm_struct *mm, unsigned long addr, unsigned int flags, + unsigned long vma_flags, struct pt_regs *regs) { struct vm_area_struct *vma = find_vma(mm, addr); if (unlikely(!vma)) @@ -218,10 +201,10 @@ __do_page_fault(struct mm_struct *mm, unsigned long addr, unsigned int fsr, } /* - * Ok, we have a good vm_area for this - * memory access, so we can handle it. + * ok, we have a good vm_area for this memory access, check the + * permissions on the VMA allow for the fault which occurred. */ - if (access_error(fsr, vma)) + if (!(vma->vm_flags & vma_flags)) return VM_FAULT_BADACCESS; return handle_mm_fault(vma, addr & PAGE_MASK, flags, regs); @@ -234,6 +217,7 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs) int sig, code; vm_fault_t fault; unsigned int flags = FAULT_FLAG_DEFAULT; + unsigned long vm_flags = VM_ACCESS_FLAGS; if (kprobe_page_fault(regs, fsr)) return 0; @@ -252,8 +236,14 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs) if (user_mode(regs)) flags |= FAULT_FLAG_USER; - if ((fsr & FSR_WRITE) && !(fsr & FSR_CM)) + + if ((fsr & FSR_WRITE) && !(fsr & FSR_CM)) { flags |= FAULT_FLAG_WRITE; + vm_flags = VM_WRITE; + } + + if (fsr & FSR_LNX_PF) + vm_flags = VM_EXEC; perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, addr); @@ -281,7 +271,7 @@ retry: #endif } - fault = __do_page_fault(mm, addr, fsr, flags, regs); + fault = __do_page_fault(mm, addr, flags, vm_flags, regs); /* If we need to retry but a fatal signal is pending, handle the * signal first. We do not need to release the mmap_lock because -- cgit From 93d2043844012d85bf4d36388303d7a0ade87a19 Mon Sep 17 00:00:00 2001 From: Wang Kefeng Date: Wed, 22 Sep 2021 14:56:30 +0100 Subject: ARM: 9126/1: mm: Kill page table base print in show_pte() Now the show_pts() will dump the virtual (hashed) address of page table base, it is useless, kill it. Signed-off-by: Kefeng Wang Signed-off-by: Russell King (Oracle) --- arch/arm/mm/fault.c | 1 - 1 file changed, 1 deletion(-) (limited to 'arch/arm/mm') diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c index 9a6d74f6ea1d..76aced067b12 100644 --- a/arch/arm/mm/fault.c +++ b/arch/arm/mm/fault.c @@ -37,7 +37,6 @@ void show_pte(const char *lvl, struct mm_struct *mm, unsigned long addr) if (!mm) mm = &init_mm; - printk("%spgd = %p\n", lvl, mm->pgd); pgd = pgd_offset(mm, addr); printk("%s[%08lx] *pgd=%08llx", lvl, addr, (long long)pgd_val(*pgd)); -- cgit From 2e707106fac7f81f780d7c76770a726c46757a84 Mon Sep 17 00:00:00 2001 From: Wang Kefeng Date: Wed, 22 Sep 2021 14:56:31 +0100 Subject: ARM: 9130/1: mm: Provide die_kernel_fault() helper Provide die_kernel_fault() helper to do the kernel fault reporting, which with msg argument, it could report different message in different scenes, and the later patch "ARM: mm: Fix PXN process with LPAE feature" will use it. Signed-off-by: Kefeng Wang Signed-off-by: Russell King (Oracle) --- arch/arm/mm/fault.c | 30 +++++++++++++++++++++--------- 1 file changed, 21 insertions(+), 9 deletions(-) (limited to 'arch/arm/mm') diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c index 76aced067b12..82bcfe57de20 100644 --- a/arch/arm/mm/fault.c +++ b/arch/arm/mm/fault.c @@ -99,6 +99,21 @@ void show_pte(const char *lvl, struct mm_struct *mm, unsigned long addr) { } #endif /* CONFIG_MMU */ +static void die_kernel_fault(const char *msg, struct mm_struct *mm, + unsigned long addr, unsigned int fsr, + struct pt_regs *regs) +{ + bust_spinlocks(1); + pr_alert("8<--- cut here ---\n"); + pr_alert("Unable to handle kernel %s at virtual address %08lx\n", + msg, addr); + + show_pte(KERN_ALERT, mm, addr); + die("Oops", regs, fsr); + bust_spinlocks(0); + do_exit(SIGKILL); +} + /* * Oops. The kernel tried to access some page that wasn't present. */ @@ -106,6 +121,7 @@ static void __do_kernel_fault(struct mm_struct *mm, unsigned long addr, unsigned int fsr, struct pt_regs *regs) { + const char *msg; /* * Are we prepared to handle this kernel fault? */ @@ -115,16 +131,12 @@ __do_kernel_fault(struct mm_struct *mm, unsigned long addr, unsigned int fsr, /* * No handler, we'll have to terminate things with extreme prejudice. */ - bust_spinlocks(1); - pr_alert("8<--- cut here ---\n"); - pr_alert("Unable to handle kernel %s at virtual address %08lx\n", - (addr < PAGE_SIZE) ? "NULL pointer dereference" : - "paging request", addr); + if (addr < PAGE_SIZE) + msg = "NULL pointer dereference"; + else + msg = "paging request"; - show_pte(KERN_ALERT, mm, addr); - die("Oops", regs, fsr); - bust_spinlocks(0); - do_exit(SIGKILL); + die_kernel_fault(msg, mm, addr, fsr, regs); } /* -- cgit From abc25bbcb55c08183d30ee79a308aeef16b62dcb Mon Sep 17 00:00:00 2001 From: Wang Kefeng Date: Wed, 22 Sep 2021 14:56:32 +0100 Subject: ARM: 9131/1: mm: Fix PXN process with LPAE feature When user code execution with privilege mode, it will lead to infinite loop in the page fault handler if ARM_LPAE enabled, The issue could be reproduced with "echo EXEC_USERSPACE > /sys/kernel/debug/provoke-crash/DIRECT" As Permission fault shows in ARM spec, IFSR format when using the Short-descriptor translation table format Permission fault: 01101 First level 01111 Second level IFSR format when using the Long-descriptor translation table format Permission fault: 0011LL LL bits indicate levelb. Add is_permission_fault() function to check permission fault and die if permission fault occurred under instruction fault in do_page_fault(). Fixes: 1d4d37159d01 ("ARM: 8235/1: Support for the PXN CPU feature on ARMv7") Signed-off-by: Kefeng Wang Signed-off-by: Russell King (Oracle) --- arch/arm/mm/fault.c | 20 +++++++++++++++++++- arch/arm/mm/fault.h | 4 ++++ 2 files changed, 23 insertions(+), 1 deletion(-) (limited to 'arch/arm/mm') diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c index 82bcfe57de20..bc8779d54a64 100644 --- a/arch/arm/mm/fault.c +++ b/arch/arm/mm/fault.c @@ -194,6 +194,19 @@ void do_bad_area(unsigned long addr, unsigned int fsr, struct pt_regs *regs) #define VM_FAULT_BADMAP 0x010000 #define VM_FAULT_BADACCESS 0x020000 +static inline bool is_permission_fault(unsigned int fsr) +{ + int fs = fsr_fs(fsr); +#ifdef CONFIG_ARM_LPAE + if ((fs & FS_PERM_NOLL_MASK) == FS_PERM_NOLL) + return true; +#else + if (fs == FS_L1_PERM || fs == FS_L2_PERM) + return true; +#endif + return false; +} + static vm_fault_t __kprobes __do_page_fault(struct mm_struct *mm, unsigned long addr, unsigned int flags, unsigned long vma_flags, struct pt_regs *regs) @@ -253,9 +266,14 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs) vm_flags = VM_WRITE; } - if (fsr & FSR_LNX_PF) + if (fsr & FSR_LNX_PF) { vm_flags = VM_EXEC; + if (is_permission_fault(fsr) && !user_mode(regs)) + die_kernel_fault("execution of memory", + mm, addr, fsr, regs); + } + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, addr); /* diff --git a/arch/arm/mm/fault.h b/arch/arm/mm/fault.h index 9ecc2097a87a..83b5ab32d7a4 100644 --- a/arch/arm/mm/fault.h +++ b/arch/arm/mm/fault.h @@ -14,6 +14,8 @@ #ifdef CONFIG_ARM_LPAE #define FSR_FS_AEA 17 +#define FS_PERM_NOLL 0xC +#define FS_PERM_NOLL_MASK 0x3C static inline int fsr_fs(unsigned int fsr) { @@ -21,6 +23,8 @@ static inline int fsr_fs(unsigned int fsr) } #else #define FSR_FS_AEA 22 +#define FS_L1_PERM 0xD +#define FS_L2_PERM 0xF static inline int fsr_fs(unsigned int fsr) { -- cgit From 345dac33f58894a56d17b92a41be10e16585ceff Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Mon, 18 Oct 2021 15:30:06 +0100 Subject: ARM: 9136/1: ARMv7-M uses BE-8, not BE-32 When configuring the kernel for big-endian, we set either BE-8 or BE-32 based on the CPU architecture level. Until linux-4.4, we did not have any ARMv7-M platform allowing big-endian builds, but now i.MX/Vybrid is in that category, adn we get a build error because of this: arch/arm/kernel/module-plts.c: In function 'get_module_plt': arch/arm/kernel/module-plts.c:60:46: error: implicit declaration of function '__opcode_to_mem_thumb32' [-Werror=implicit-function-declaration] This comes down to picking the wrong default, ARMv7-M uses BE8 like ARMv7-A does. Changing the default gets the kernel to compile and presumably works. https://lore.kernel.org/all/1455804123-2526139-2-git-send-email-arnd@arndb.de/ Tested-by: Vladimir Murzin Signed-off-by: Arnd Bergmann Signed-off-by: Russell King (Oracle) --- arch/arm/mm/Kconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/arm/mm') diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig index 8355c3895894..82aa990c4180 100644 --- a/arch/arm/mm/Kconfig +++ b/arch/arm/mm/Kconfig @@ -750,7 +750,7 @@ config CPU_BIG_ENDIAN config CPU_ENDIAN_BE8 bool depends on CPU_BIG_ENDIAN - default CPU_V6 || CPU_V6K || CPU_V7 + default CPU_V6 || CPU_V6K || CPU_V7 || CPU_V7M help Support for the BE-8 (big-endian) mode on ARMv6 and ARMv7 processors. -- cgit From 8b5bd5adf9e6d073e538e2e9de810f2f25379c3b Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Mon, 18 Oct 2021 15:30:07 +0100 Subject: ARM: 9137/1: disallow CONFIG_THUMB with ARMv4 We can currently build a multi-cpu enabled kernel that allows both ARMv4 and ARMv5 CPUs, and also supports THUMB mode in user space. However, returning to user space in this configuration with the usr_ret macro requires the use of the 'bx' instruction, which is refused by the assembler: arch/arm/kernel/entry-armv.S: Assembler messages: arch/arm/kernel/entry-armv.S:937: Error: selected processor does not support `bx lr' in ARM mode arch/arm/kernel/entry-armv.S:960: Error: selected processor does not support `bx lr' in ARM mode arch/arm/kernel/entry-armv.S:1003: Error: selected processor does not support `bx lr' in ARM mode :2:2: note: instruction requires: armv4t bx lr While it would be possible to handle this correctly in principle, doing so seems to not be worth it, if we can simply avoid the problem by enforcing that a kernel supporting both ARMv4 and a later CPU architecture cannot run THUMB binaries. This turned up while build-testing with clang; for some reason, gcc never triggered the problem. Reviewed-by: Nick Desaulniers Reviewed-by: Ard Biesheuvel Signed-off-by: Arnd Bergmann Signed-off-by: Russell King (Oracle) --- arch/arm/mm/Kconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/arm/mm') diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig index 82aa990c4180..58afba346729 100644 --- a/arch/arm/mm/Kconfig +++ b/arch/arm/mm/Kconfig @@ -675,7 +675,7 @@ config ARM_PV_FIXUP config ARM_THUMB bool "Support Thumb user binaries" if !CPU_THUMBONLY && EXPERT - depends on CPU_THUMB_CAPABLE + depends on CPU_THUMB_CAPABLE && !CPU_32v4 default y help Say Y if you want to include kernel support for running user space -- cgit From c2e6df3eaaf120cde5e7c3a70590dd82e427458a Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Mon, 18 Oct 2021 15:30:38 +0100 Subject: ARM: 9142/1: kasan: work around LPAE build warning pgd_page_vaddr() returns an 'unsigned long' address, causing a warning with the memcpy() call in kasan_init(): arch/arm/mm/kasan_init.c: In function 'kasan_init': include/asm-generic/pgtable-nop4d.h:44:50: error: passing argument 2 of '__memcpy' makes pointer from integer without a cast [-Werror=int-conversion] 44 | #define pgd_page_vaddr(pgd) ((unsigned long)(p4d_pgtable((p4d_t){ pgd }))) | ~^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ | | | long unsigned int arch/arm/include/asm/string.h:58:45: note: in definition of macro 'memcpy' 58 | #define memcpy(dst, src, len) __memcpy(dst, src, len) | ^~~ arch/arm/mm/kasan_init.c:229:16: note: in expansion of macro 'pgd_page_vaddr' 229 | pgd_page_vaddr(*pgd_offset_k(KASAN_SHADOW_START)), | ^~~~~~~~~~~~~~ arch/arm/include/asm/string.h:21:47: note: expected 'const void *' but argument is of type 'long unsigned int' 21 | extern void *__memcpy(void *dest, const void *src, __kernel_size_t n); | ~~~~~~~~~~~~^~~ Avoid this by adding an explicit typecast. Link: https://lore.kernel.org/all/CACRpkdb3DMvof3-xdtss0Pc6KM36pJA-iy=WhvtNVnsDpeJ24Q@mail.gmail.com/ Fixes: 5615f69bc209 ("ARM: 9016/2: Initialize the mapping of KASan shadow memory") Reviewed-by: Linus Walleij Signed-off-by: Arnd Bergmann Signed-off-by: Russell King (Oracle) --- arch/arm/mm/kasan_init.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/arm/mm') diff --git a/arch/arm/mm/kasan_init.c b/arch/arm/mm/kasan_init.c index 9c348042a724..4b1619584b23 100644 --- a/arch/arm/mm/kasan_init.c +++ b/arch/arm/mm/kasan_init.c @@ -226,7 +226,7 @@ void __init kasan_init(void) BUILD_BUG_ON(pgd_index(KASAN_SHADOW_START) != pgd_index(KASAN_SHADOW_END)); memcpy(tmp_pmd_table, - pgd_page_vaddr(*pgd_offset_k(KASAN_SHADOW_START)), + (void*)pgd_page_vaddr(*pgd_offset_k(KASAN_SHADOW_START)), sizeof(tmp_pmd_table)); set_pgd(&tmp_pgd_table[pgd_index(KASAN_SHADOW_START)], __pgd(__pa(tmp_pmd_table) | PMD_TYPE_TABLE | L_PGD_SWAPPER)); -- cgit From fa191b711c32ba107cf8d3474cd860407b7e997a Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Fri, 29 Oct 2021 17:45:32 +0100 Subject: ARM: 9150/1: Fix PID_IN_CONTEXTIDR regression when THREAD_INFO_IN_TASK=y The code that implements the rarely used PID_IN_CONTEXTIDR feature dereferences the 'task' field of struct thread_info directly, and this is no longer possible when THREAD_INFO_IN_TASK=y, as the 'task' field is omitted from the struct definition in that case. Instead, we should just cast the thread_info pointer to a task_struct pointer, given that the former is now the first member of the latter. So use a helper that abstracts this, and provide implementations for both cases. Reported by: Arnd Bergmann Fixes: 18ed1c01a7dd ("ARM: smp: Enable THREAD_INFO_IN_TASK") Signed-off-by: Ard Biesheuvel Signed-off-by: Russell King (Oracle) --- arch/arm/mm/context.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/arm/mm') diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c index b7525b433f3e..48091870db89 100644 --- a/arch/arm/mm/context.c +++ b/arch/arm/mm/context.c @@ -109,7 +109,7 @@ static int contextidr_notifier(struct notifier_block *unused, unsigned long cmd, if (cmd != THREAD_NOTIFY_SWITCH) return NOTIFY_DONE; - pid = task_pid_nr(thread->task) << ASID_BITS; + pid = task_pid_nr(thread_task(thread)) << ASID_BITS; asm volatile( " mrc p15, 0, %0, c13, c0, 1\n" " and %0, %0, %2\n" -- cgit