summaryrefslogtreecommitdiff
path: root/arch/arm64/include/asm/hardirq.h
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm64/include/asm/hardirq.h')
-rw-r--r--arch/arm64/include/asm/hardirq.h105
1 files changed, 71 insertions, 34 deletions
diff --git a/arch/arm64/include/asm/hardirq.h b/arch/arm64/include/asm/hardirq.h
index 990c051e7829..77d6b8c63d4e 100644
--- a/arch/arm64/include/asm/hardirq.h
+++ b/arch/arm64/include/asm/hardirq.h
@@ -1,45 +1,89 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2012 ARM Ltd.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef __ASM_HARDIRQ_H
#define __ASM_HARDIRQ_H
#include <linux/cache.h>
+#include <linux/percpu.h>
#include <linux/threads.h>
+#include <asm/barrier.h>
#include <asm/irq.h>
+#include <asm/kvm_arm.h>
+#include <asm/sysreg.h>
-#define NR_IPI 4
+#define ack_bad_irq ack_bad_irq
+#include <asm-generic/hardirq.h>
-typedef struct {
- unsigned int __softirq_pending;
-#ifdef CONFIG_SMP
- unsigned int ipi_irqs[NR_IPI];
-#endif
-} ____cacheline_aligned irq_cpustat_t;
+#define __ARCH_IRQ_EXIT_IRQS_DISABLED 1
-#include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */
+struct nmi_ctx {
+ u64 hcr;
+ unsigned int cnt;
+};
-#define __inc_irq_stat(cpu, member) __IRQ_STAT(cpu, member)++
-#define __get_irq_stat(cpu, member) __IRQ_STAT(cpu, member)
+DECLARE_PER_CPU(struct nmi_ctx, nmi_contexts);
-#ifdef CONFIG_SMP
-u64 smp_irq_stat_cpu(unsigned int cpu);
-#define arch_irq_stat_cpu smp_irq_stat_cpu
-#endif
+#define arch_nmi_enter() \
+do { \
+ struct nmi_ctx *___ctx; \
+ u64 ___hcr; \
+ \
+ if (!is_kernel_in_hyp_mode()) \
+ break; \
+ \
+ ___ctx = this_cpu_ptr(&nmi_contexts); \
+ if (___ctx->cnt) { \
+ ___ctx->cnt++; \
+ break; \
+ } \
+ \
+ ___hcr = read_sysreg(hcr_el2); \
+ if (!(___hcr & HCR_TGE)) { \
+ write_sysreg_hcr(___hcr | HCR_TGE); \
+ isb(); \
+ } \
+ /* \
+ * Make sure the sysreg write is performed before ___ctx->cnt \
+ * is set to 1. NMIs that see cnt == 1 will rely on us. \
+ */ \
+ barrier(); \
+ ___ctx->cnt = 1; \
+ /* \
+ * Make sure ___ctx->cnt is set before we save ___hcr. We \
+ * don't want ___ctx->hcr to be overwritten. \
+ */ \
+ barrier(); \
+ ___ctx->hcr = ___hcr; \
+} while (0)
-#define __ARCH_IRQ_EXIT_IRQS_DISABLED 1
+#define arch_nmi_exit() \
+do { \
+ struct nmi_ctx *___ctx; \
+ u64 ___hcr; \
+ \
+ if (!is_kernel_in_hyp_mode()) \
+ break; \
+ \
+ ___ctx = this_cpu_ptr(&nmi_contexts); \
+ ___hcr = ___ctx->hcr; \
+ /* \
+ * Make sure we read ___ctx->hcr before we release \
+ * ___ctx->cnt as it makes ___ctx->hcr updatable again. \
+ */ \
+ barrier(); \
+ ___ctx->cnt--; \
+ /* \
+ * Make sure ___ctx->cnt release is visible before we \
+ * restore the sysreg. Otherwise a new NMI occurring \
+ * right after write_sysreg() can be fooled and think \
+ * we secured things for it. \
+ */ \
+ barrier(); \
+ if (!___ctx->cnt && !(___hcr & HCR_TGE)) \
+ write_sysreg_hcr(___hcr); \
+} while (0)
static inline void ack_bad_irq(unsigned int irq)
{
@@ -47,11 +91,4 @@ static inline void ack_bad_irq(unsigned int irq)
irq_err_count++;
}
-extern void handle_IRQ(unsigned int, struct pt_regs *);
-
-/*
- * No arch-specific IRQ flags.
- */
-#define set_irq_flags(irq, flags)
-
#endif /* __ASM_HARDIRQ_H */