summaryrefslogtreecommitdiff
path: root/arch/arc/kernel/mcip.c
diff options
context:
space:
mode:
authorVineet Gupta <vgupta@synopsys.com>2016-02-23 11:55:16 +0530
committerVineet Gupta <vgupta@synopsys.com>2016-02-24 11:07:28 +0530
commitbb143f814ea488769ca2e79e0b376139cb5f134b (patch)
tree32857f21fd6b8a9a4ae8457142c34260628c535e /arch/arc/kernel/mcip.c
parent3e5177c1919bdc7651b5056f35409d0b4d728841 (diff)
ARCv2: SMP: Emulate IPI to self using software triggered interrupt
ARConnect/MCIP Inter-Core-Interrupt module can't send interrupt to local core. So use core intc capability to trigger software interrupt to self, using an unsued IRQ #21. This showed up as csd deadlock with LTP trace_sched on a dual core system. This test acts as scheduler fuzzer, triggering all sorts of schedulting activity. Trouble starts with IPI to self, which doesn't get delivered (effectively lost due to H/w capability), but the msg intended to be sent remain enqueued in per-cpu @ipi_data. All subsequent IPIs to this core from other cores get elided due to the IPI coalescing optimization in ipi_send_msg_one() where a pending msg implies an IPI already sent and assumes other core is yet to ack it. After the elided IPI, other core simply goes into csd_lock_wait() but never comes out as this core never sees the interrupt. Fixes STAR 9001008624 Cc: Peter Zijlstra <peterz@infradead.org> Cc: <stable@vger.kernel.org> [4.2] Signed-off-by: Vineet Gupta <vgupta@synopsys.com>
Diffstat (limited to 'arch/arc/kernel/mcip.c')
-rw-r--r--arch/arc/kernel/mcip.c15
1 files changed, 15 insertions, 0 deletions
diff --git a/arch/arc/kernel/mcip.c b/arch/arc/kernel/mcip.c
index 002c5fcf8947..9e1bd03b87a6 100644
--- a/arch/arc/kernel/mcip.c
+++ b/arch/arc/kernel/mcip.c
@@ -11,9 +11,12 @@
#include <linux/smp.h>
#include <linux/irq.h>
#include <linux/spinlock.h>
+#include <asm/irqflags-arcv2.h>
#include <asm/mcip.h>
#include <asm/setup.h>
+#define SOFTIRQ_IRQ 21
+
static char smp_cpuinfo_buf[128];
static int idu_detected;
@@ -22,6 +25,7 @@ static DEFINE_RAW_SPINLOCK(mcip_lock);
static void mcip_setup_per_cpu(int cpu)
{
smp_ipi_irq_setup(cpu, IPI_IRQ);
+ smp_ipi_irq_setup(cpu, SOFTIRQ_IRQ);
}
static void mcip_ipi_send(int cpu)
@@ -29,6 +33,12 @@ static void mcip_ipi_send(int cpu)
unsigned long flags;
int ipi_was_pending;
+ /* ARConnect can only send IPI to others */
+ if (unlikely(cpu == raw_smp_processor_id())) {
+ arc_softirq_trigger(SOFTIRQ_IRQ);
+ return;
+ }
+
/*
* NOTE: We must spin here if the other cpu hasn't yet
* serviced a previous message. This can burn lots
@@ -63,6 +73,11 @@ static void mcip_ipi_clear(int irq)
unsigned long flags;
unsigned int __maybe_unused copy;
+ if (unlikely(irq == SOFTIRQ_IRQ)) {
+ arc_softirq_clear(irq);
+ return;
+ }
+
raw_spin_lock_irqsave(&mcip_lock, flags);
/* Who sent the IPI */