From 25d8b92e0af75d72ce8b99e63e5a449cc0888efa Mon Sep 17 00:00:00 2001 From: Karl Beldan Date: Tue, 27 Jun 2017 19:22:16 +0000 Subject: MIPS: head: Reorder instructions missing a delay slot In this sequence the 'move' is assumed in the delay slot of the 'beq', but head.S is in reorder mode and the former gets pushed one 'nop' farther by the assembler. The corrected behavior made booting with an UHI supplied dtb erratic. Fixes: 15f37e158892 ("MIPS: store the appended dtb address in a variable") Signed-off-by: Karl Beldan Reviewed-by: James Hogan Cc: Jonas Gorski Cc: linux-mips@linux-mips.org Cc: linux-kernel@vger.kernel.org Cc: stable@vger.kernel.org Patchwork: https://patchwork.linux-mips.org/patch/16614/ Signed-off-by: Ralf Baechle --- arch/mips/kernel/head.S | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/mips/kernel/head.S b/arch/mips/kernel/head.S index cf052204eb0a..d1bb506adc10 100644 --- a/arch/mips/kernel/head.S +++ b/arch/mips/kernel/head.S @@ -106,8 +106,8 @@ NESTED(kernel_entry, 16, sp) # kernel entry point beq t0, t1, dtb_found #endif li t1, -2 - beq a0, t1, dtb_found move t2, a1 + beq a0, t1, dtb_found li t2, 0 dtb_found: -- cgit From ddbfff7429a75d954bf5bdff9f2222bceb4c236a Mon Sep 17 00:00:00 2001 From: Aleksandar Markovic Date: Mon, 19 Jun 2017 17:50:12 +0200 Subject: MIPS: math-emu: Handle zero accumulator case in MADDF and MSUBF separately If accumulator value is zero, just return the value of previously calculated product. This brings logic in MADDF/MSUBF implementation closer to the logic in ADD/SUB case. Signed-off-by: Miodrag Dinic Signed-off-by: Goran Ferenc Signed-off-by: Aleksandar Markovic Cc: James.Hogan@imgtec.com Cc: Paul.Burton@imgtec.com Cc: Raghu.Gandham@imgtec.com Cc: Leonid.Yegoshin@imgtec.com Cc: Douglas.Leung@imgtec.com Cc: Petar.Jovanovic@imgtec.com Cc: linux-mips@linux-mips.org Patchwork: https://patchwork.linux-mips.org/patch/16512/ Signed-off-by: Ralf Baechle --- arch/mips/math-emu/dp_maddf.c | 5 ++++- arch/mips/math-emu/sp_maddf.c | 5 ++++- 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/arch/mips/math-emu/dp_maddf.c b/arch/mips/math-emu/dp_maddf.c index 4a2d03c72959..caa62f20a888 100644 --- a/arch/mips/math-emu/dp_maddf.c +++ b/arch/mips/math-emu/dp_maddf.c @@ -54,7 +54,7 @@ static union ieee754dp _dp_maddf(union ieee754dp z, union ieee754dp x, return ieee754dp_nanxcpt(z); case IEEE754_CLASS_DNORM: DPDNORMZ; - /* QNAN is handled separately below */ + /* QNAN and ZERO cases are handled separately below */ } switch (CLPAIR(xc, yc)) { @@ -210,6 +210,9 @@ static union ieee754dp _dp_maddf(union ieee754dp z, union ieee754dp x, } assert(rm & (DP_HIDDEN_BIT << 3)); + if (zc == IEEE754_CLASS_ZERO) + return ieee754dp_format(rs, re, rm); + /* And now the addition */ assert(zm & DP_HIDDEN_BIT); diff --git a/arch/mips/math-emu/sp_maddf.c b/arch/mips/math-emu/sp_maddf.c index a8cd8b4f235e..c91d5e5d9b5f 100644 --- a/arch/mips/math-emu/sp_maddf.c +++ b/arch/mips/math-emu/sp_maddf.c @@ -54,7 +54,7 @@ static union ieee754sp _sp_maddf(union ieee754sp z, union ieee754sp x, return ieee754sp_nanxcpt(z); case IEEE754_CLASS_DNORM: SPDNORMZ; - /* QNAN is handled separately below */ + /* QNAN and ZERO cases are handled separately below */ } switch (CLPAIR(xc, yc)) { @@ -203,6 +203,9 @@ static union ieee754sp _sp_maddf(union ieee754sp z, union ieee754sp x, } assert(rm & (SP_HIDDEN_BIT << 3)); + if (zc == IEEE754_CLASS_ZERO) + return ieee754sp_format(rs, re, rm); + /* And now the addition */ assert(zm & SP_HIDDEN_BIT); -- cgit From 161c51ccb7a6faf45ffe09aa5cf1ad85ccdad503 Mon Sep 17 00:00:00 2001 From: Paul Burton Date: Thu, 2 Mar 2017 14:02:40 -0800 Subject: MIPS: pm-cps: Drop manual cache-line alignment of ready_count We allocate memory for a ready_count variable per-CPU, which is accessed via a cached non-coherent TLB mapping to perform synchronisation between threads within the core using LL/SC instructions. In order to ensure that the variable is contained within its own data cache line we allocate 2 lines worth of memory & align the resulting pointer to a line boundary. This is however unnecessary, since kmalloc is guaranteed to return memory which is at least cache-line aligned (see ARCH_DMA_MINALIGN). Stop the redundant manual alignment. Besides cleaning up the code & avoiding needless work, this has the side effect of avoiding an arithmetic error found by Bryan on 64 bit systems due to the 32 bit size of the former dlinesz. This led the ready_count variable to have its upper 32b cleared erroneously for MIPS64 kernels, causing problems when ready_count was later used on MIPS64 via cpuidle. Signed-off-by: Paul Burton Fixes: 3179d37ee1ed ("MIPS: pm-cps: add PM state entry code for CPS systems") Reported-by: Bryan O'Donoghue Reviewed-by: Bryan O'Donoghue Tested-by: Bryan O'Donoghue Cc: linux-mips@linux-mips.org Cc: stable # v3.16+ Patchwork: https://patchwork.linux-mips.org/patch/15383/ Signed-off-by: Ralf Baechle --- arch/mips/kernel/pm-cps.c | 9 +-------- 1 file changed, 1 insertion(+), 8 deletions(-) diff --git a/arch/mips/kernel/pm-cps.c b/arch/mips/kernel/pm-cps.c index 5f928c34c148..d99416094ba9 100644 --- a/arch/mips/kernel/pm-cps.c +++ b/arch/mips/kernel/pm-cps.c @@ -56,7 +56,6 @@ DECLARE_BITMAP(state_support, CPS_PM_STATE_COUNT); * state. Actually per-core rather than per-CPU. */ static DEFINE_PER_CPU_ALIGNED(u32*, ready_count); -static DEFINE_PER_CPU_ALIGNED(void*, ready_count_alloc); /* Indicates online CPUs coupled with the current CPU */ static DEFINE_PER_CPU_ALIGNED(cpumask_t, online_coupled); @@ -642,7 +641,6 @@ static int cps_pm_online_cpu(unsigned int cpu) { enum cps_pm_state state; unsigned core = cpu_data[cpu].core; - unsigned dlinesz = cpu_data[cpu].dcache.linesz; void *entry_fn, *core_rc; for (state = CPS_PM_NC_WAIT; state < CPS_PM_STATE_COUNT; state++) { @@ -662,16 +660,11 @@ static int cps_pm_online_cpu(unsigned int cpu) } if (!per_cpu(ready_count, core)) { - core_rc = kmalloc(dlinesz * 2, GFP_KERNEL); + core_rc = kmalloc(sizeof(u32), GFP_KERNEL); if (!core_rc) { pr_err("Failed allocate core %u ready_count\n", core); return -ENOMEM; } - per_cpu(ready_count_alloc, core) = core_rc; - - /* Ensure ready_count is aligned to a cacheline boundary */ - core_rc += dlinesz - 1; - core_rc = (void *)((unsigned long)core_rc & ~(dlinesz - 1)); per_cpu(ready_count, core) = core_rc; } -- cgit From d8550860d910c6b7b70f830f59003b33daaa52c9 Mon Sep 17 00:00:00 2001 From: Paul Burton Date: Fri, 3 Mar 2017 15:26:05 -0800 Subject: MIPS: Fix IRQ tracing & lockdep when rescheduling When the scheduler sets TIF_NEED_RESCHED & we call into the scheduler from arch/mips/kernel/entry.S we disable interrupts. This is true regardless of whether we reach work_resched from syscall_exit_work, resume_userspace or by looping after calling schedule(). Although we disable interrupts in these paths we don't call trace_hardirqs_off() before calling into C code which may acquire locks, and we therefore leave lockdep with an inconsistent view of whether interrupts are disabled or not when CONFIG_PROVE_LOCKING & CONFIG_DEBUG_LOCKDEP are both enabled. Without tracing this interrupt state lockdep will print warnings such as the following once a task returns from a syscall via syscall_exit_partial with TIF_NEED_RESCHED set: [ 49.927678] ------------[ cut here ]------------ [ 49.934445] WARNING: CPU: 0 PID: 1 at kernel/locking/lockdep.c:3687 check_flags.part.41+0x1dc/0x1e8 [ 49.946031] DEBUG_LOCKS_WARN_ON(current->hardirqs_enabled) [ 49.946355] CPU: 0 PID: 1 Comm: init Not tainted 4.10.0-00439-gc9fd5d362289-dirty #197 [ 49.963505] Stack : 0000000000000000 ffffffff81bb5d6a 0000000000000006 ffffffff801ce9c4 [ 49.974431] 0000000000000000 0000000000000000 0000000000000000 000000000000004a [ 49.985300] ffffffff80b7e487 ffffffff80a24498 a8000000ff160000 ffffffff80ede8b8 [ 49.996194] 0000000000000001 0000000000000000 0000000000000000 0000000077c8030c [ 50.007063] 000000007fd8a510 ffffffff801cd45c 0000000000000000 a8000000ff127c88 [ 50.017945] 0000000000000000 ffffffff801cf928 0000000000000001 ffffffff80a24498 [ 50.028827] 0000000000000000 0000000000000001 0000000000000000 0000000000000000 [ 50.039688] 0000000000000000 a8000000ff127bd0 0000000000000000 ffffffff805509bc [ 50.050575] 00000000140084e0 0000000000000000 0000000000000000 0000000000040a00 [ 50.061448] 0000000000000000 ffffffff8010e1b0 0000000000000000 ffffffff805509bc [ 50.072327] ... [ 50.076087] Call Trace: [ 50.079869] [] show_stack+0x80/0xa8 [ 50.086577] [] dump_stack+0x10c/0x190 [ 50.093498] [] __warn+0xf0/0x108 [ 50.099889] [] warn_slowpath_fmt+0x3c/0x48 [ 50.107241] [] check_flags.part.41+0x1dc/0x1e8 [ 50.114961] [] lock_is_held_type+0x8c/0xb0 [ 50.122291] [] __schedule+0x8c0/0x10f8 [ 50.129221] [] schedule+0x30/0x98 [ 50.135659] [] work_resched+0x8/0x34 [ 50.142397] ---[ end trace 0cb4f6ef5b99fe21 ]--- [ 50.148405] possible reason: unannotated irqs-off. [ 50.154600] irq event stamp: 400463 [ 50.159566] hardirqs last enabled at (400463): [] _raw_spin_unlock_irqrestore+0x40/0xa8 [ 50.171981] hardirqs last disabled at (400462): [] _raw_spin_lock_irqsave+0x30/0xb0 [ 50.183897] softirqs last enabled at (400450): [] __do_softirq+0x4ac/0x6a8 [ 50.195015] softirqs last disabled at (400425): [] irq_exit+0x110/0x128 Fix this by using the TRACE_IRQS_OFF macro to call trace_hardirqs_off() when CONFIG_TRACE_IRQFLAGS is enabled. This is done before invoking schedule() following the work_resched label because: 1) Interrupts are disabled regardless of the path we take to reach work_resched() & schedule(). 2) Performing the tracing here avoids the need to do it in paths which disable interrupts but don't call out to C code before hitting a path which uses the RESTORE_SOME macro that will call trace_hardirqs_on() or trace_hardirqs_off() as appropriate. We call trace_hardirqs_on() using the TRACE_IRQS_ON macro before calling syscall_trace_leave() for similar reasons, ensuring that lockdep has a consistent view of state after we re-enable interrupts. Signed-off-by: Paul Burton Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2") Cc: linux-mips@linux-mips.org Cc: stable Patchwork: https://patchwork.linux-mips.org/patch/15385/ Signed-off-by: Ralf Baechle --- arch/mips/kernel/entry.S | 3 +++ 1 file changed, 3 insertions(+) diff --git a/arch/mips/kernel/entry.S b/arch/mips/kernel/entry.S index 8d83fc2a96b7..38a302919e6b 100644 --- a/arch/mips/kernel/entry.S +++ b/arch/mips/kernel/entry.S @@ -11,6 +11,7 @@ #include #include #include +#include #include #include #include @@ -119,6 +120,7 @@ work_pending: andi t0, a2, _TIF_NEED_RESCHED # a2 is preloaded with TI_FLAGS beqz t0, work_notifysig work_resched: + TRACE_IRQS_OFF jal schedule local_irq_disable # make sure need_resched and @@ -155,6 +157,7 @@ syscall_exit_work: beqz t0, work_pending # trace bit set? local_irq_enable # could let syscall_trace_leave() # call schedule() instead + TRACE_IRQS_ON move a0, sp jal syscall_trace_leave b resume_userspace -- cgit From cad482c1b1844136fc6d9e33f10d3386344fb045 Mon Sep 17 00:00:00 2001 From: Paul Burton Date: Tue, 13 Jun 2017 10:01:08 -0700 Subject: MIPS: Perform post-DMA cache flushes on systems with MAARs Recent CPUs from Imagination Technologies such as the I6400 or P6600 are able to speculatively fetch data from memory into caches. This means that if used in a system with non-coherent DMA they require that caches be invalidated after a device performs DMA, and before the CPU reads the DMA'd data, in order to ensure that stale values weren't speculatively prefetched. Such CPUs also introduced Memory Accessibility Attribute Registers (MAARs) in order to control the regions in which they are allowed to speculate. Thus we can use the presence of MAARs as a good indication that the CPU requires the above cache maintenance. Use the presence of MAARs to determine the result of cpu_needs_post_dma_flush() in the default case, in order to handle these recent CPUs correctly. Note that the return type of cpu_needs_post_dma_flush() is changed to bool, such that it's clearer what's happening when cpu_has_maar is cast to bool for the return value. If this patch were backported to a pre-v4.7 kernel then MIPS_CPU_MAAR was 1ull<<34, so when cast to an int we would incorrectly return 0. It so happens that MIPS_CPU_MAAR is currently 1ull<<30, so when truncated to an int gives a non-zero value anyway, but even so the implicit conversion from long long int to bool makes it clearer to understand what will happen than the implicit conversion from long long int to int would. The bool return type also fits this usage better semantically, so seems like an all-round win. Thanks to Ed for spotting the issue for pre-v4.7 kernels & suggesting the return type change. Signed-off-by: Paul Burton Reviewed-by: Bryan O'Donoghue Tested-by: Bryan O'Donoghue Cc: Ed Blake Cc: linux-mips@linux-mips.org Patchwork: https://patchwork.linux-mips.org/patch/16363/ Signed-off-by: Ralf Baechle --- arch/mips/mm/dma-default.c | 23 ++++++++++++++++++----- 1 file changed, 18 insertions(+), 5 deletions(-) diff --git a/arch/mips/mm/dma-default.c b/arch/mips/mm/dma-default.c index fe8df14b6169..e08598c70b3e 100644 --- a/arch/mips/mm/dma-default.c +++ b/arch/mips/mm/dma-default.c @@ -68,12 +68,25 @@ static inline struct page *dma_addr_to_page(struct device *dev, * systems and only the R10000 and R12000 are used in such systems, the * SGI IP28 Indigo² rsp. SGI IP32 aka O2. */ -static inline int cpu_needs_post_dma_flush(struct device *dev) +static inline bool cpu_needs_post_dma_flush(struct device *dev) { - return !plat_device_is_coherent(dev) && - (boot_cpu_type() == CPU_R10000 || - boot_cpu_type() == CPU_R12000 || - boot_cpu_type() == CPU_BMIPS5000); + if (plat_device_is_coherent(dev)) + return false; + + switch (boot_cpu_type()) { + case CPU_R10000: + case CPU_R12000: + case CPU_BMIPS5000: + return true; + + default: + /* + * Presence of MAARs suggests that the CPU supports + * speculatively prefetching data, and therefore requires + * the post-DMA flush/invalidate. + */ + return cpu_has_maar; + } } static gfp_t massage_gfp_flags(const struct device *dev, gfp_t gfp) -- cgit From 854236363370995a609a10b03e35fd3dc5e9e4a1 Mon Sep 17 00:00:00 2001 From: James Hogan Date: Thu, 29 Jun 2017 15:05:04 +0100 Subject: MIPS: Avoid accidental raw backtrace Since commit 81a76d7119f6 ("MIPS: Avoid using unwind_stack() with usermode") show_backtrace() invokes the raw backtracer when cp0_status & ST0_KSU indicates user mode to fix issues on EVA kernels where user and kernel address spaces overlap. However this is used by show_stack() which creates its own pt_regs on the stack and leaves cp0_status uninitialised in most of the code paths. This results in the non deterministic use of the raw back tracer depending on the previous stack content. show_stack() deals exclusively with kernel mode stacks anyway, so explicitly initialise regs.cp0_status to KSU_KERNEL (i.e. 0) to ensure we get a useful backtrace. Fixes: 81a76d7119f6 ("MIPS: Avoid using unwind_stack() with usermode") Signed-off-by: James Hogan Cc: linux-mips@linux-mips.org Cc: # 3.15+ Patchwork: https://patchwork.linux-mips.org/patch/16656/ Signed-off-by: Ralf Baechle --- arch/mips/kernel/traps.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c index 9681b5877140..38dfa27730ff 100644 --- a/arch/mips/kernel/traps.c +++ b/arch/mips/kernel/traps.c @@ -201,6 +201,8 @@ void show_stack(struct task_struct *task, unsigned long *sp) { struct pt_regs regs; mm_segment_t old_fs = get_fs(); + + regs.cp0_status = KSU_KERNEL; if (sp) { regs.regs[29] = (unsigned long)sp; regs.regs[31] = 0; -- cgit