summaryrefslogtreecommitdiff
path: root/arch/arc/kernel/mcip.c
diff options
context:
space:
mode:
authorVineet Gupta <vgupta@synopsys.com>2014-11-07 10:45:28 +0530
committerVineet Gupta <vgupta@synopsys.com>2015-06-22 14:06:57 +0530
commitaa6083ed50957f699596999affbb6eb9d7a8b72e (patch)
tree67a2cb49a3514680cb1b4d59e4a9be5694f86146 /arch/arc/kernel/mcip.c
parent82fea5a1bbbe8c3b56d5f3efbf8880c7b25b1758 (diff)
ARCv2: SMP: ARConnect debug/robustness
- Handle possible interrupt coalescing from MCIP - chk if prev IPI ack before sending new Signed-off-by: Vineet Gupta <vgupta@synopsys.com>
Diffstat (limited to 'arch/arc/kernel/mcip.c')
-rw-r--r--arch/arc/kernel/mcip.c48
1 files changed, 44 insertions, 4 deletions
diff --git a/arch/arc/kernel/mcip.c b/arch/arc/kernel/mcip.c
index e6ad6e64440a..35921c3ab394 100644
--- a/arch/arc/kernel/mcip.c
+++ b/arch/arc/kernel/mcip.c
@@ -33,27 +33,67 @@ void mcip_init_smp(unsigned int cpu)
static void mcip_ipi_send(int cpu)
{
unsigned long flags;
+ int ipi_was_pending;
+
+ /*
+ * NOTE: We must spin here if the other cpu hasn't yet
+ * serviced a previous message. This can burn lots
+ * of time, but we MUST follows this protocol or
+ * ipi messages can be lost!!!
+ * Also, we must release the lock in this loop because
+ * the other side may get to this same loop and not
+ * be able to ack -- thus causing deadlock.
+ */
+
+ do {
+ raw_spin_lock_irqsave(&mcip_lock, flags);
+ __mcip_cmd(CMD_INTRPT_READ_STATUS, cpu);
+ ipi_was_pending = read_aux_reg(ARC_REG_MCIP_READBACK);
+ if (ipi_was_pending == 0)
+ break; /* break out but keep lock */
+ raw_spin_unlock_irqrestore(&mcip_lock, flags);
+ } while (1);
- raw_spin_lock_irqsave(&mcip_lock, flags);
__mcip_cmd(CMD_INTRPT_GENERATE_IRQ, cpu);
raw_spin_unlock_irqrestore(&mcip_lock, flags);
+
+#ifdef CONFIG_ARC_IPI_DBG
+ if (ipi_was_pending)
+ pr_info("IPI ACK delayed from cpu %d\n", cpu);
+#endif
}
static void mcip_ipi_clear(int irq)
{
- unsigned int cpu;
+ unsigned int cpu, c;
unsigned long flags;
+ unsigned int __maybe_unused copy;
raw_spin_lock_irqsave(&mcip_lock, flags);
/* Who sent the IPI */
__mcip_cmd(CMD_INTRPT_CHECK_SOURCE, 0);
- cpu = read_aux_reg(ARC_REG_MCIP_READBACK); /* 1,2,4,8... */
+ copy = cpu = read_aux_reg(ARC_REG_MCIP_READBACK); /* 1,2,4,8... */
- __mcip_cmd(CMD_INTRPT_GENERATE_ACK, __ffs(cpu)); /* 0,1,2,3... */
+ /*
+ * In rare case, multiple concurrent IPIs sent to same target can
+ * possibly be coalesced by MCIP into 1 asserted IRQ, so @cpus can be
+ * "vectored" (multiple bits sets) as opposed to typical single bit
+ */
+ do {
+ c = __ffs(cpu); /* 0,1,2,3 */
+ __mcip_cmd(CMD_INTRPT_GENERATE_ACK, c);
+ cpu &= ~(1U << c);
+ } while (cpu);
raw_spin_unlock_irqrestore(&mcip_lock, flags);
+
+#ifdef CONFIG_ARC_IPI_DBG
+ if (c != __ffs(copy))
+ pr_info("IPIs from %x coalesced to %x\n",
+ copy, raw_smp_processor_id());
+#endif
}
volatile int wake_flag;