From 6b076991dca9817e75c37e2f0db6d52611ea42fa Mon Sep 17 00:00:00 2001 From: Russell King Date: Thu, 17 Jul 2014 12:17:45 +0100 Subject: ARM: DMA: ensure that old section mappings are flushed from the TLB When setting up the CMA region, we must ensure that the old section mappings are flushed from the TLB before replacing them with page tables, otherwise we can suffer from mismatched aliases if the CPU speculatively prefetches from these mappings at an inopportune time. A mismatched alias can occur when the TLB contains a section mapping, but a subsequent prefetch causes it to load a page table mapping, resulting in the possibility of the TLB containing two matching mappings for the same virtual address region. Acked-by: Will Deacon Signed-off-by: Russell King --- arch/arm/mm/dma-mapping.c | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index 4c88935654ca..1f88db06b133 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c @@ -461,12 +461,21 @@ void __init dma_contiguous_remap(void) map.type = MT_MEMORY_DMA_READY; /* - * Clear previous low-memory mapping + * Clear previous low-memory mapping to ensure that the + * TLB does not see any conflicting entries, then flush + * the TLB of the old entries before creating new mappings. + * + * This ensures that any speculatively loaded TLB entries + * (even though they may be rare) can not cause any problems, + * and ensures that this code is architecturally compliant. */ for (addr = __phys_to_virt(start); addr < __phys_to_virt(end); addr += PMD_SIZE) pmd_clear(pmd_off_k(addr)); + flush_tlb_kernel_range(__phys_to_virt(start), + __phys_to_virt(end)); + iotable_init(&map, 1); } } -- cgit From 8cf2389bc4456057a4d9663d22429fb677f1b13e Mon Sep 17 00:00:00 2001 From: Sebastian Hesselbarth Date: Mon, 14 Jul 2014 16:23:29 +0100 Subject: ARM: 8100/1: Fix preemption disable in iwmmxt_task_enable() commit 431a84b1a4f7d1a0085d5b91330c5053cc8e8b12 ("ARM: 8034/1: Disable preemption in iwmmxt_task_enable()") introduced macros {inc,dec}_preempt_count to iwmmxt_task_enable to make it run with preemption disabled. Unfortunately, other functions in iwmmxt.S also use concan_{save,dump,load} sections located in iwmmxt_task_enable() to deal with iWMMXt coprocessor. This causes an unbalanced preempt_count due to excessive dec_preempt_count and destroyed return addresses in callers of concan_ labels due to a register collision: Linux version 3.16.0-rc3-00062-gd92a333-dirty (jef@armhf) (gcc version 4.8.3 (Debian 4.8.3-4) ) #5 PREEMPT Thu Jul 3 19:46:39 CEST 2014 CPU: ARMv7 Processor [560f5815] revision 5 (ARMv7), cr=10c5387d CPU: PIPT / VIPT nonaliasing data cache, PIPT instruction cache Machine model: SolidRun CuBox ... PJ4 iWMMXt v2 coprocessor enabled. ... Unable to handle kernel paging request at virtual address fffffffe pgd = bb25c000 [fffffffe] *pgd=3bfde821, *pte=00000000, *ppte=00000000 Internal error: Oops: 80000007 [#1] PREEMPT ARM Modules linked in: CPU: 0 PID: 62 Comm: startpar Not tainted 3.16.0-rc3-00062-gd92a333-dirty #5 task: bb230b80 ti: bb256000 task.ti: bb256000 PC is at 0xfffffffe LR is at iwmmxt_task_copy+0x44/0x4c pc : [] lr : [<800130ac>] psr: 40000033 sp : bb257de8 ip : 00000013 fp : bb257ea4 r10: bb256000 r9 : fffffdfe r8 : 76e898e6 r7 : bb257ec8 r6 : bb256000 r5 : 7ea12760 r4 : 000000a0 r3 : ffffffff r2 : 00000003 r1 : bb257df8 r0 : 00000000 Flags: nZcv IRQs on FIQs on Mode SVC_32 ISA Thumb Segment user Control: 10c5387d Table: 3b25c019 DAC: 00000015 Process startpar (pid: 62, stack limit = 0xbb256248) This patch fixes the issue by moving concan_{save,dump,load} into separate code sections and make iwmmxt_task_enable() call them in the same way the other functions use concan_ symbols. The test for valid ownership is moved to concan_save and is safe for the other user of it, iwmmxt_task_disable(). The register collision is also resolved by moving concan_ symbols as {inc,dec}_preempt_count are now local to iwmmxt_task_enable(). Fixes: 431a84b1a4f7 ("ARM: 8034/1: Disable preemption in iwmmxt_task_enable()") Signed-off-by: Sebastian Hesselbarth Acked-by: Catalin Marinas Reported-by: Jean-Francois Moine Signed-off-by: Russell King --- arch/arm/kernel/iwmmxt.S | 23 ++++++++++++----------- 1 file changed, 12 insertions(+), 11 deletions(-) diff --git a/arch/arm/kernel/iwmmxt.S b/arch/arm/kernel/iwmmxt.S index a5599cfc43cb..2b32978ae905 100644 --- a/arch/arm/kernel/iwmmxt.S +++ b/arch/arm/kernel/iwmmxt.S @@ -94,13 +94,19 @@ ENTRY(iwmmxt_task_enable) mrc p15, 0, r2, c2, c0, 0 mov r2, r2 @ cpwait + bl concan_save - teq r1, #0 @ test for last ownership - mov lr, r9 @ normal exit from exception - beq concan_load @ no owner, skip save +#ifdef CONFIG_PREEMPT_COUNT + get_thread_info r10 +#endif +4: dec_preempt_count r10, r3 + mov pc, r9 @ normal exit from exception concan_save: + teq r1, #0 @ test for last ownership + beq concan_load @ no owner, skip save + tmrc r2, wCon @ CUP? wCx @@ -138,7 +144,7 @@ concan_dump: wstrd wR15, [r1, #MMX_WR15] 2: teq r0, #0 @ anything to load? - beq 3f + moveq pc, lr @ if not, return concan_load: @@ -171,14 +177,9 @@ concan_load: @ clear CUP/MUP (only if r1 != 0) teq r1, #0 mov r2, #0 - beq 3f - tmcr wCon, r2 + moveq pc, lr -3: -#ifdef CONFIG_PREEMPT_COUNT - get_thread_info r10 -#endif -4: dec_preempt_count r10, r3 + tmcr wCon, r2 mov pc, lr /* -- cgit From 91942d17667a7e7976d3e8bc2f18a34c12bed9fc Mon Sep 17 00:00:00 2001 From: Uwe Kleine-König Date: Wed, 23 Jul 2014 20:37:44 +0100 Subject: ARM: 8112/1: only select ARM_PATCH_PHYS_VIRT if MMU is enabled MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This fixes the following warning: warning: (ARCH_MULTIPLATFORM && ARCH_INTEGRATOR && ARCH_SHMOBILE_LEGACY) selects ARM_PATCH_PHYS_VIRT which has unmet direct dependencies (!XIP_KERNEL && MMU && (!ARCH_REALVIEW || !SPARSEMEM)) Signed-off-by: Uwe Kleine-König Signed-off-by: Russell King --- arch/arm/Kconfig | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 245058b3b0ef..7a14af6e3d2c 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -312,7 +312,7 @@ config ARCH_MULTIPLATFORM config ARCH_INTEGRATOR bool "ARM Ltd. Integrator family" select ARM_AMBA - select ARM_PATCH_PHYS_VIRT + select ARM_PATCH_PHYS_VIRT if MMU select AUTO_ZRELADDR select COMMON_CLK select COMMON_CLK_VERSATILE @@ -658,7 +658,7 @@ config ARCH_MSM config ARCH_SHMOBILE_LEGACY bool "Renesas ARM SoCs (non-multiplatform)" select ARCH_SHMOBILE - select ARM_PATCH_PHYS_VIRT + select ARM_PATCH_PHYS_VIRT if MMU select CLKDEV_LOOKUP select GENERIC_CLOCKEVENTS select HAVE_ARM_SCU if SMP -- cgit From 823a19cd3b91b0729d7417f1848413846be61712 Mon Sep 17 00:00:00 2001 From: Russell King Date: Tue, 29 Jul 2014 09:24:47 +0100 Subject: ARM: fix alignment of keystone page table fixup If init_mm.brk is not section aligned, the LPAE fixup code will miss updating the final PMD. Fix this by aligning map_end. Fixes: a77e0c7b2774 ("ARM: mm: Recreate kernel mappings in early_paging_init()") Cc: Signed-off-by: Russell King --- arch/arm/mm/mmu.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index ab14b79b03f0..6e3ba8d112a2 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c @@ -1406,8 +1406,8 @@ void __init early_paging_init(const struct machine_desc *mdesc, return; /* remap kernel code and data */ - map_start = init_mm.start_code; - map_end = init_mm.brk; + map_start = init_mm.start_code & PMD_MASK; + map_end = ALIGN(init_mm.brk, PMD_SIZE); /* get a handle on things... */ pgd0 = pgd_offset_k(0); @@ -1442,7 +1442,7 @@ void __init early_paging_init(const struct machine_desc *mdesc, } /* remap pmds for kernel mapping */ - phys = __pa(map_start) & PMD_MASK; + phys = __pa(map_start); do { *pmdk++ = __pmd(phys | pmdprot); phys += PMD_SIZE; -- cgit From 811a2407a3cf7bbd027fbe92d73416f17485a3d8 Mon Sep 17 00:00:00 2001 From: Konstantin Khlebnikov Date: Fri, 25 Jul 2014 09:17:12 +0100 Subject: ARM: 8115/1: LPAE: reduce damage caused by idmap to virtual memory layout On LPAE, each level 1 (pgd) page table entry maps 1GiB, and the level 2 (pmd) entries map 2MiB. When the identity mapping is created on LPAE, the pgd pointers are copied from the swapper_pg_dir. If we find that we need to modify the contents of a pmd, we allocate a new empty pmd table and insert it into the appropriate 1GB slot, before then filling it with the identity mapping. However, if the 1GB slot covers the kernel lowmem mappings, we obliterate those mappings. When replacing a PMD, first copy the old PMD contents to the new PMD, so that we preserve the existing mappings, particularly the mappings of the kernel itself. [rewrote commit message and added code comment -- rmk] Fixes: ae2de101739c ("ARM: LPAE: Add identity mapping support for the 3-level page table format") Signed-off-by: Konstantin Khlebnikov Signed-off-by: Russell King --- arch/arm/mm/idmap.c | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/arch/arm/mm/idmap.c b/arch/arm/mm/idmap.c index 8e0e52eb76b5..d7a0ee898d24 100644 --- a/arch/arm/mm/idmap.c +++ b/arch/arm/mm/idmap.c @@ -25,6 +25,13 @@ static void idmap_add_pmd(pud_t *pud, unsigned long addr, unsigned long end, pr_warning("Failed to allocate identity pmd.\n"); return; } + /* + * Copy the original PMD to ensure that the PMD entries for + * the kernel image are preserved. + */ + if (!pud_none(*pud)) + memcpy(pmd, pmd_offset(pud, 0), + PTRS_PER_PMD * sizeof(pmd_t)); pud_populate(&init_mm, pud, pmd); pmd += pmd_index(addr); } else -- cgit From c5cc87fa8ddf971682fc5dd112e2dfacb11f30cd Mon Sep 17 00:00:00 2001 From: Russell King Date: Tue, 29 Jul 2014 12:18:34 +0100 Subject: ARM: idmap: add identity mapping usage note Add a note about the usage of the identity mapping; we do not support accesses outside of the identity map region and kernel image while a CPU is using the identity map. This is because the identity mapping may overwrite vmalloc space, IO mappings, the vectors pages, etc. Acked-by: Will Deacon Signed-off-by: Russell King --- arch/arm/mm/idmap.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/arch/arm/mm/idmap.c b/arch/arm/mm/idmap.c index d7a0ee898d24..c447ec70e868 100644 --- a/arch/arm/mm/idmap.c +++ b/arch/arm/mm/idmap.c @@ -9,6 +9,11 @@ #include #include +/* + * Note: accesses outside of the kernel image and the identity map area + * are not supported on any CPU using the idmap tables as its current + * page tables. + */ pgd_t *idmap_pgd; phys_addr_t (*arch_virt_to_idmap) (unsigned long x); -- cgit From 6bf755db4d5e7ccea61fb17727a183b9bd8945b1 Mon Sep 17 00:00:00 2001 From: Omar Sandoval Date: Fri, 1 Aug 2014 18:14:06 +0100 Subject: ARM: 8124/1: don't enter kgdb when userspace executes a kgdb break instruction The kgdb breakpoint hooks (kgdb_brk_fn and kgdb_compiled_brk_fn) should only be entered when a kgdb break instruction is executed from the kernel. Otherwise, if kgdb is enabled, a userspace program can cause the kernel to drop into the debugger by executing either KGDB_BREAKINST or KGDB_COMPILED_BREAK. Acked-by: Will Deacon Signed-off-by: Omar Sandoval Signed-off-by: Russell King --- arch/arm/kernel/kgdb.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/arch/arm/kernel/kgdb.c b/arch/arm/kernel/kgdb.c index 778c2f7024ff..a74b53c1b7df 100644 --- a/arch/arm/kernel/kgdb.c +++ b/arch/arm/kernel/kgdb.c @@ -160,12 +160,16 @@ static int kgdb_compiled_brk_fn(struct pt_regs *regs, unsigned int instr) static struct undef_hook kgdb_brkpt_hook = { .instr_mask = 0xffffffff, .instr_val = KGDB_BREAKINST, + .cpsr_mask = MODE_MASK, + .cpsr_val = SVC_MODE, .fn = kgdb_brk_fn }; static struct undef_hook kgdb_compiled_brkpt_hook = { .instr_mask = 0xffffffff, .instr_val = KGDB_COMPILED_BREAK, + .cpsr_mask = MODE_MASK, + .cpsr_val = SVC_MODE, .fn = kgdb_compiled_brk_fn }; -- cgit