summaryrefslogtreecommitdiff
path: root/drivers/irqchip
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/irqchip')
-rw-r--r--drivers/irqchip/Kconfig102
-rw-r--r--drivers/irqchip/Makefile15
-rw-r--r--drivers/irqchip/irq-alpine-msi.c2
-rw-r--r--drivers/irqchip/irq-apple-aic.c62
-rw-r--r--drivers/irqchip/irq-armada-370-xp.c866
-rw-r--r--drivers/irqchip/irq-aspeed-intc.c139
-rw-r--r--drivers/irqchip/irq-atmel-aic-common.c4
-rw-r--r--drivers/irqchip/irq-atmel-aic.c3
-rw-r--r--drivers/irqchip/irq-atmel-aic5.c12
-rw-r--r--drivers/irqchip/irq-bcm2835.c4
-rw-r--r--drivers/irqchip/irq-bcm2836.c3
-rw-r--r--drivers/irqchip/irq-bcm6345-l1.c8
-rw-r--r--drivers/irqchip/irq-bcm7038-l1.c2
-rw-r--r--drivers/irqchip/irq-brcmstb-l2.c45
-rw-r--r--drivers/irqchip/irq-clps711x.c2
-rw-r--r--drivers/irqchip/irq-davinci-cp-intc.c3
-rw-r--r--drivers/irqchip/irq-ftintc010.c2
-rw-r--r--drivers/irqchip/irq-gic-common.c22
-rw-r--r--drivers/irqchip/irq-gic-common.h10
-rw-r--r--drivers/irqchip/irq-gic-v2m.c87
-rw-r--r--drivers/irqchip/irq-gic-v3-its-msi-parent.c210
-rw-r--r--drivers/irqchip/irq-gic-v3-its-pci-msi.c202
-rw-r--r--drivers/irqchip/irq-gic-v3-its-platform-msi.c163
-rw-r--r--drivers/irqchip/irq-gic-v3-its.c373
-rw-r--r--drivers/irqchip/irq-gic-v3-mbi.c130
-rw-r--r--drivers/irqchip/irq-gic-v3.c439
-rw-r--r--drivers/irqchip/irq-gic-v4.c2
-rw-r--r--drivers/irqchip/irq-gic.c41
-rw-r--r--drivers/irqchip/irq-hip04.c6
-rw-r--r--drivers/irqchip/irq-imgpdc.c7
-rw-r--r--drivers/irqchip/irq-imx-intmux.c18
-rw-r--r--drivers/irqchip/irq-imx-irqsteer.c38
-rw-r--r--drivers/irqchip/irq-imx-mu-msi.c54
-rw-r--r--drivers/irqchip/irq-ixp4xx.c3
-rw-r--r--drivers/irqchip/irq-jcore-aic.c2
-rw-r--r--drivers/irqchip/irq-keystone.c14
-rw-r--r--drivers/irqchip/irq-lan966x-oic.c278
-rw-r--r--drivers/irqchip/irq-loongarch-avec.c433
-rw-r--r--drivers/irqchip/irq-loongarch-cpu.c13
-rw-r--r--drivers/irqchip/irq-loongson-eiointc.c156
-rw-r--r--drivers/irqchip/irq-loongson-htvec.c2
-rw-r--r--drivers/irqchip/irq-loongson-liointc.c6
-rw-r--r--drivers/irqchip/irq-loongson-pch-lpc.c2
-rw-r--r--drivers/irqchip/irq-loongson-pch-msi.c85
-rw-r--r--drivers/irqchip/irq-loongson-pch-pic.c78
-rw-r--r--drivers/irqchip/irq-loongson.h27
-rw-r--r--drivers/irqchip/irq-ls-scfg-msi.c12
-rw-r--r--drivers/irqchip/irq-madera.c8
-rw-r--r--drivers/irqchip/irq-mbigen.c138
-rw-r--r--drivers/irqchip/irq-meson-gpio.c20
-rw-r--r--drivers/irqchip/irq-mips-gic.c269
-rw-r--r--drivers/irqchip/irq-mscc-ocelot.c10
-rw-r--r--drivers/irqchip/irq-msi-lib.c143
-rw-r--r--drivers/irqchip/irq-msi-lib.h27
-rw-r--r--drivers/irqchip/irq-mvebu-gicp.c44
-rw-r--r--drivers/irqchip/irq-mvebu-icu.c276
-rw-r--r--drivers/irqchip/irq-mvebu-odmi.c37
-rw-r--r--drivers/irqchip/irq-mvebu-pic.c15
-rw-r--r--drivers/irqchip/irq-mvebu-sei.c52
-rw-r--r--drivers/irqchip/irq-mxs.c2
-rw-r--r--drivers/irqchip/irq-omap-intc.c3
-rw-r--r--drivers/irqchip/irq-partition-percpu.c2
-rw-r--r--drivers/irqchip/irq-pic32-evic.c10
-rw-r--r--drivers/irqchip/irq-pruss-intc.c14
-rw-r--r--drivers/irqchip/irq-renesas-intc-irqpin.c9
-rw-r--r--drivers/irqchip/irq-renesas-irqc.c7
-rw-r--r--drivers/irqchip/irq-renesas-rza1.c5
-rw-r--r--drivers/irqchip/irq-renesas-rzg2l.c242
-rw-r--r--drivers/irqchip/irq-renesas-rzv2h.c513
-rw-r--r--drivers/irqchip/irq-riscv-aplic-direct.c329
-rw-r--r--drivers/irqchip/irq-riscv-aplic-main.c234
-rw-r--r--drivers/irqchip/irq-riscv-aplic-main.h53
-rw-r--r--drivers/irqchip/irq-riscv-aplic-msi.c285
-rw-r--r--drivers/irqchip/irq-riscv-imsic-early.c263
-rw-r--r--drivers/irqchip/irq-riscv-imsic-platform.c395
-rw-r--r--drivers/irqchip/irq-riscv-imsic-state.c891
-rw-r--r--drivers/irqchip/irq-riscv-imsic-state.h108
-rw-r--r--drivers/irqchip/irq-riscv-intc.c223
-rw-r--r--drivers/irqchip/irq-sa11x0.c3
-rw-r--r--drivers/irqchip/irq-sifive-plic.c396
-rw-r--r--drivers/irqchip/irq-starfive-jh8100-intc.c207
-rw-r--r--drivers/irqchip/irq-stm32-exti.c624
-rw-r--r--drivers/irqchip/irq-stm32mp-exti.c728
-rw-r--r--drivers/irqchip/irq-sun6i-r.c2
-rw-r--r--drivers/irqchip/irq-sunxi-nmi.c4
-rw-r--r--drivers/irqchip/irq-tb10x.c1
-rw-r--r--drivers/irqchip/irq-thead-c900-aclint-sswi.c176
-rw-r--r--drivers/irqchip/irq-ti-sci-inta.c1
-rw-r--r--drivers/irqchip/irq-ti-sci-intr.c1
-rw-r--r--drivers/irqchip/irq-ts4800.c15
-rw-r--r--drivers/irqchip/irq-versatile-fpga.c4
-rw-r--r--drivers/irqchip/irq-vic.c3
-rw-r--r--drivers/irqchip/irq-xilinx-intc.c2
-rw-r--r--drivers/irqchip/irqchip.c4
-rw-r--r--drivers/irqchip/qcom-pdc.c67
95 files changed, 8318 insertions, 2734 deletions
diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig
index f7149d0f3d45..c11b9965c4ad 100644
--- a/drivers/irqchip/Kconfig
+++ b/drivers/irqchip/Kconfig
@@ -26,6 +26,7 @@ config ARM_GIC_V2M
bool
depends on PCI
select ARM_GIC
+ select IRQ_MSI_LIB
select PCI_MSI
config GIC_NON_BANKED
@@ -41,15 +42,9 @@ config ARM_GIC_V3
config ARM_GIC_V3_ITS
bool
select GENERIC_MSI_IRQ
+ select IRQ_MSI_LIB
default ARM_GIC_V3
-config ARM_GIC_V3_ITS_PCI
- bool
- depends on ARM_GIC_V3_ITS
- depends on PCI
- depends on PCI_MSI
- default ARM_GIC_V3_ITS
-
config ARM_GIC_V3_ITS_FSL_MC
bool
depends on ARM_GIC_V3_ITS
@@ -74,6 +69,9 @@ config ARM_VIC_NR
The maximum number of VICs available in the system, for
power management.
+config IRQ_MSI_LIB
+ bool
+
config ARMADA_370_XP_IRQ
bool
select GENERIC_IRQ_CHIP
@@ -169,6 +167,19 @@ config IXP4XX_IRQ
select IRQ_DOMAIN
select SPARSE_IRQ
+config LAN966X_OIC
+ tristate "Microchip LAN966x OIC Support"
+ depends on MCHP_LAN966X_PCI || COMPILE_TEST
+ select GENERIC_IRQ_CHIP
+ select IRQ_DOMAIN
+ help
+ Enable support for the LAN966x Outbound Interrupt Controller.
+ This controller is present on the Microchip LAN966x PCI device and
+ maps the internal interrupts sources to PCIe interrupt.
+
+ To compile this driver as a module, choose M here: the module
+ will be called irq-lan966x-oic.
+
config MADERA_IRQ
tristate
@@ -248,6 +259,13 @@ config RENESAS_RZG2L_IRQC
Enable support for the Renesas RZ/G2L (and alike SoC) Interrupt Controller
for external devices.
+config RENESAS_RZV2H_ICU
+ bool "Renesas RZ/V2H(P) ICU support" if COMPILE_TEST
+ select GENERIC_IRQ_CHIP
+ select IRQ_DOMAIN_HIERARCHY
+ help
+ Enable support for the Renesas RZ/V2H(P) Interrupt Control Unit (ICU)
+
config SL28CPLD_INTC
bool "Kontron sl28cpld IRQ controller"
depends on MFD_SL28CPLD=y || COMPILE_TEST
@@ -328,6 +346,7 @@ config KEYSTONE_IRQ
config MIPS_GIC
bool
+ select GENERIC_IRQ_EFFECTIVE_AFF_MASK if SMP
select GENERIC_IRQ_IPI if SMP
select IRQ_DOMAIN_HIERARCHY
select MIPS_CM
@@ -366,6 +385,7 @@ config MSCC_OCELOT_IRQ
select GENERIC_IRQ_CHIP
config MVEBU_GICP
+ select IRQ_MSI_LIB
bool
config MVEBU_ICU
@@ -373,6 +393,7 @@ config MVEBU_ICU
config MVEBU_ODMI
bool
+ select IRQ_MSI_LIB
select GENERIC_MSI_IRQ
config MVEBU_PIC
@@ -392,6 +413,15 @@ config LS_SCFG_MSI
config PARTITION_PERCPU
bool
+config STM32MP_EXTI
+ tristate "STM32MP extended interrupts and event controller"
+ depends on (ARCH_STM32 && !ARM_SINGLE_ARMV7M) || COMPILE_TEST
+ default ARCH_STM32 && !ARM_SINGLE_ARMV7M
+ select IRQ_DOMAIN_HIERARCHY
+ select GENERIC_IRQ_CHIP
+ help
+ Support STM32MP EXTI (extended interrupts and event) controller.
+
config STM32_EXTI
bool
select IRQ_DOMAIN
@@ -487,6 +517,7 @@ config IMX_MU_MSI
select IRQ_DOMAIN
select IRQ_DOMAIN_HIERARCHY
select GENERIC_MSI_IRQ
+ select IRQ_MSI_LIB
help
Provide a driver for the i.MX Messaging Unit block used as a
CPU-to-CPU MSI controller. This requires a specially crafted DT
@@ -504,8 +535,9 @@ config LS1X_IRQ
Support for the Loongson-1 platform Interrupt Controller.
config TI_SCI_INTR_IRQCHIP
- bool
+ tristate "TI SCI INTR Interrupt Controller"
depends on TI_SCI_PROTOCOL
+ depends on ARCH_K3 || COMPILE_TEST
select IRQ_DOMAIN_HIERARCHY
help
This enables the irqchip driver support for K3 Interrupt router
@@ -514,8 +546,9 @@ config TI_SCI_INTR_IRQCHIP
TI System Controller, say Y here. Otherwise, say N.
config TI_SCI_INTA_IRQCHIP
- bool
+ tristate "TI SCI INTA Interrupt Controller"
depends on TI_SCI_PROTOCOL
+ depends on ARCH_K3 || (COMPILE_TEST && ARM64)
select IRQ_DOMAIN_HIERARCHY
select TI_SCI_INTA_MSI_DOMAIN
help
@@ -540,12 +573,60 @@ config RISCV_INTC
depends on RISCV
select IRQ_DOMAIN_HIERARCHY
+config RISCV_APLIC
+ bool
+ depends on RISCV
+ select IRQ_DOMAIN_HIERARCHY
+
+config RISCV_APLIC_MSI
+ bool
+ depends on RISCV_APLIC
+ select GENERIC_MSI_IRQ
+ default RISCV_APLIC
+
+config RISCV_IMSIC
+ bool
+ depends on RISCV
+ select IRQ_DOMAIN_HIERARCHY
+ select GENERIC_IRQ_MATRIX_ALLOCATOR
+ select GENERIC_MSI_IRQ
+
+config RISCV_IMSIC_PCI
+ bool
+ depends on RISCV_IMSIC
+ depends on PCI
+ depends on PCI_MSI
+ default RISCV_IMSIC
+
config SIFIVE_PLIC
bool
depends on RISCV
select IRQ_DOMAIN_HIERARCHY
select GENERIC_IRQ_EFFECTIVE_AFF_MASK if SMP
+config STARFIVE_JH8100_INTC
+ bool "StarFive JH8100 External Interrupt Controller"
+ depends on ARCH_STARFIVE || COMPILE_TEST
+ default ARCH_STARFIVE
+ select IRQ_DOMAIN_HIERARCHY
+ help
+ This enables support for the INTC chip found in StarFive JH8100
+ SoC.
+
+ If you don't know what to do here, say Y.
+
+config THEAD_C900_ACLINT_SSWI
+ bool "THEAD C9XX ACLINT S-mode IPI Interrupt Controller"
+ depends on RISCV
+ depends on SMP
+ select IRQ_DOMAIN_HIERARCHY
+ select GENERIC_IRQ_IPI_MUX
+ help
+ This enables support for T-HEAD specific ACLINT SSWI device
+ support.
+
+ If you don't know what to do here, say Y.
+
config EXYNOS_IRQ_COMBINER
bool "Samsung Exynos IRQ combiner support" if COMPILE_TEST
depends on (ARCH_EXYNOS && ARM) || COMPILE_TEST
@@ -557,7 +638,7 @@ config IRQ_LOONGARCH_CPU
bool
select GENERIC_IRQ_CHIP
select IRQ_DOMAIN
- select GENERIC_IRQ_EFFECTIVE_AFF_MASK
+ select GENERIC_IRQ_EFFECTIVE_AFF_MASK if SMP
select LOONGSON_HTVEC
select LOONGSON_LIOINTC
select LOONGSON_EIOINTC
@@ -620,6 +701,7 @@ config LOONGSON_PCH_MSI
depends on PCI
default MACH_LOONGSON64
select IRQ_DOMAIN_HIERARCHY
+ select IRQ_MSI_LIB
select PCI_MSI
help
Support for the Loongson PCH MSI Controller.
diff --git a/drivers/irqchip/Makefile b/drivers/irqchip/Makefile
index ffd945fe71aa..25e9ad29b8c4 100644
--- a/drivers/irqchip/Makefile
+++ b/drivers/irqchip/Makefile
@@ -29,10 +29,10 @@ obj-$(CONFIG_ARCH_SPEAR3XX) += spear-shirq.o
obj-$(CONFIG_ARM_GIC) += irq-gic.o irq-gic-common.o
obj-$(CONFIG_ARM_GIC_PM) += irq-gic-pm.o
obj-$(CONFIG_ARCH_REALVIEW) += irq-gic-realview.o
+obj-$(CONFIG_IRQ_MSI_LIB) += irq-msi-lib.o
obj-$(CONFIG_ARM_GIC_V2M) += irq-gic-v2m.o
obj-$(CONFIG_ARM_GIC_V3) += irq-gic-v3.o irq-gic-v3-mbi.o irq-gic-common.o
-obj-$(CONFIG_ARM_GIC_V3_ITS) += irq-gic-v3-its.o irq-gic-v3-its-platform-msi.o irq-gic-v4.o
-obj-$(CONFIG_ARM_GIC_V3_ITS_PCI) += irq-gic-v3-its-pci-msi.o
+obj-$(CONFIG_ARM_GIC_V3_ITS) += irq-gic-v3-its.o irq-gic-v4.o irq-gic-v3-its-msi-parent.o
obj-$(CONFIG_ARM_GIC_V3_ITS_FSL_MC) += irq-gic-v3-its-fsl-mc-msi.o
obj-$(CONFIG_PARTITION_PERCPU) += irq-partition-percpu.o
obj-$(CONFIG_HISILICON_IRQ_MBIGEN) += irq-mbigen.o
@@ -51,6 +51,7 @@ obj-$(CONFIG_RENESAS_INTC_IRQPIN) += irq-renesas-intc-irqpin.o
obj-$(CONFIG_RENESAS_IRQC) += irq-renesas-irqc.o
obj-$(CONFIG_RENESAS_RZA1_IRQC) += irq-renesas-rza1.o
obj-$(CONFIG_RENESAS_RZG2L_IRQC) += irq-renesas-rzg2l.o
+obj-$(CONFIG_RENESAS_RZV2H_ICU) += irq-renesas-rzv2h.o
obj-$(CONFIG_VERSATILE_FPGA_IRQ) += irq-versatile-fpga.o
obj-$(CONFIG_ARCH_NSPIRE) += irq-zevio.o
obj-$(CONFIG_ARCH_VT8500) += irq-vt8500.o
@@ -84,6 +85,8 @@ obj-$(CONFIG_MVEBU_SEI) += irq-mvebu-sei.o
obj-$(CONFIG_LS_EXTIRQ) += irq-ls-extirq.o
obj-$(CONFIG_LS_SCFG_MSI) += irq-ls-scfg-msi.o
obj-$(CONFIG_ARCH_ASPEED) += irq-aspeed-vic.o irq-aspeed-i2c-ic.o irq-aspeed-scu-ic.o
+obj-$(CONFIG_ARCH_ASPEED) += irq-aspeed-intc.o
+obj-$(CONFIG_STM32MP_EXTI) += irq-stm32mp-exti.o
obj-$(CONFIG_STM32_EXTI) += irq-stm32-exti.o
obj-$(CONFIG_QCOM_IRQ_COMBINER) += qcom-irq-combiner.o
obj-$(CONFIG_IRQ_UNIPHIER_AIDET) += irq-uniphier-aidet.o
@@ -95,16 +98,22 @@ obj-$(CONFIG_QCOM_MPM) += irq-qcom-mpm.o
obj-$(CONFIG_CSKY_MPINTC) += irq-csky-mpintc.o
obj-$(CONFIG_CSKY_APB_INTC) += irq-csky-apb-intc.o
obj-$(CONFIG_RISCV_INTC) += irq-riscv-intc.o
+obj-$(CONFIG_RISCV_APLIC) += irq-riscv-aplic-main.o irq-riscv-aplic-direct.o
+obj-$(CONFIG_RISCV_APLIC_MSI) += irq-riscv-aplic-msi.o
+obj-$(CONFIG_RISCV_IMSIC) += irq-riscv-imsic-state.o irq-riscv-imsic-early.o irq-riscv-imsic-platform.o
obj-$(CONFIG_SIFIVE_PLIC) += irq-sifive-plic.o
+obj-$(CONFIG_STARFIVE_JH8100_INTC) += irq-starfive-jh8100-intc.o
+obj-$(CONFIG_THEAD_C900_ACLINT_SSWI) += irq-thead-c900-aclint-sswi.o
obj-$(CONFIG_IMX_IRQSTEER) += irq-imx-irqsteer.o
obj-$(CONFIG_IMX_INTMUX) += irq-imx-intmux.o
obj-$(CONFIG_IMX_MU_MSI) += irq-imx-mu-msi.o
obj-$(CONFIG_MADERA_IRQ) += irq-madera.o
+obj-$(CONFIG_LAN966X_OIC) += irq-lan966x-oic.o
obj-$(CONFIG_LS1X_IRQ) += irq-ls1x.o
obj-$(CONFIG_TI_SCI_INTR_IRQCHIP) += irq-ti-sci-intr.o
obj-$(CONFIG_TI_SCI_INTA_IRQCHIP) += irq-ti-sci-inta.o
obj-$(CONFIG_TI_PRUSS_INTC) += irq-pruss-intc.o
-obj-$(CONFIG_IRQ_LOONGARCH_CPU) += irq-loongarch-cpu.o
+obj-$(CONFIG_IRQ_LOONGARCH_CPU) += irq-loongarch-cpu.o irq-loongarch-avec.o
obj-$(CONFIG_LOONGSON_LIOINTC) += irq-loongson-liointc.o
obj-$(CONFIG_LOONGSON_EIOINTC) += irq-loongson-eiointc.o
obj-$(CONFIG_LOONGSON_HTPIC) += irq-loongson-htpic.o
diff --git a/drivers/irqchip/irq-alpine-msi.c b/drivers/irqchip/irq-alpine-msi.c
index 9c8b1349ee17..a1430ab60a8a 100644
--- a/drivers/irqchip/irq-alpine-msi.c
+++ b/drivers/irqchip/irq-alpine-msi.c
@@ -165,7 +165,7 @@ static int alpine_msix_middle_domain_alloc(struct irq_domain *domain,
return 0;
err_sgi:
- irq_domain_free_irqs_parent(domain, virq, i - 1);
+ irq_domain_free_irqs_parent(domain, virq, i);
alpine_msix_free_sgi(priv, sgi, nr_irqs);
return err;
}
diff --git a/drivers/irqchip/irq-apple-aic.c b/drivers/irqchip/irq-apple-aic.c
index 5c534d9fd2b0..2b1684c60e3c 100644
--- a/drivers/irqchip/irq-apple-aic.c
+++ b/drivers/irqchip/irq-apple-aic.c
@@ -234,7 +234,10 @@ enum fiq_hwirq {
AIC_NR_FIQ
};
+/* True if UNCORE/UNCORE2 and Sn_... IPI registers are present and used (A11+) */
static DEFINE_STATIC_KEY_TRUE(use_fast_ipi);
+/* True if SYS_IMP_APL_IPI_RR_LOCAL_EL1 exists for local fast IPIs (M1+) */
+static DEFINE_STATIC_KEY_TRUE(use_local_fast_ipi);
struct aic_info {
int version;
@@ -252,6 +255,7 @@ struct aic_info {
/* Features */
bool fast_ipi;
+ bool local_fast_ipi;
};
static const struct aic_info aic1_info __initconst = {
@@ -270,17 +274,32 @@ static const struct aic_info aic1_fipi_info __initconst = {
.fast_ipi = true,
};
+static const struct aic_info aic1_local_fipi_info __initconst = {
+ .version = 1,
+
+ .event = AIC_EVENT,
+ .target_cpu = AIC_TARGET_CPU,
+
+ .fast_ipi = true,
+ .local_fast_ipi = true,
+};
+
static const struct aic_info aic2_info __initconst = {
.version = 2,
.irq_cfg = AIC2_IRQ_CFG,
.fast_ipi = true,
+ .local_fast_ipi = true,
};
static const struct of_device_id aic_info_match[] = {
{
.compatible = "apple,t8103-aic",
+ .data = &aic1_local_fipi_info,
+ },
+ {
+ .compatible = "apple,t8015-aic",
.data = &aic1_fipi_info,
},
{
@@ -532,14 +551,9 @@ static void __exception_irq_entry aic_handle_fiq(struct pt_regs *regs)
* we check for everything here, even things we don't support yet.
*/
- if (read_sysreg_s(SYS_IMP_APL_IPI_SR_EL1) & IPI_SR_PENDING) {
- if (static_branch_likely(&use_fast_ipi)) {
- aic_handle_ipi(regs);
- } else {
- pr_err_ratelimited("Fast IPI fired. Acking.\n");
- write_sysreg_s(IPI_SR_PENDING, SYS_IMP_APL_IPI_SR_EL1);
- }
- }
+ if (static_branch_likely(&use_fast_ipi) &&
+ (read_sysreg_s(SYS_IMP_APL_IPI_SR_EL1) & IPI_SR_PENDING))
+ aic_handle_ipi(regs);
if (TIMER_FIRING(read_sysreg(cntp_ctl_el0)))
generic_handle_domain_irq(aic_irqc->hw_domain,
@@ -563,7 +577,8 @@ static void __exception_irq_entry aic_handle_fiq(struct pt_regs *regs)
AIC_FIQ_HWIRQ(AIC_TMR_EL02_VIRT));
}
- if (read_sysreg_s(SYS_IMP_APL_PMCR0_EL1) & PMCR0_IACT) {
+ if ((read_sysreg_s(SYS_IMP_APL_PMCR0_EL1) & (PMCR0_IMODE | PMCR0_IACT)) ==
+ (FIELD_PREP(PMCR0_IMODE, PMCR0_IMODE_FIQ) | PMCR0_IACT)) {
int irq;
if (cpumask_test_cpu(smp_processor_id(),
&aic_irqc->fiq_aff[AIC_CPU_PMU_P]->aff))
@@ -574,8 +589,9 @@ static void __exception_irq_entry aic_handle_fiq(struct pt_regs *regs)
AIC_FIQ_HWIRQ(irq));
}
- if (FIELD_GET(UPMCR0_IMODE, read_sysreg_s(SYS_IMP_APL_UPMCR0_EL1)) == UPMCR0_IMODE_FIQ &&
- (read_sysreg_s(SYS_IMP_APL_UPMSR_EL1) & UPMSR_IACT)) {
+ if (static_branch_likely(&use_fast_ipi) &&
+ (FIELD_GET(UPMCR0_IMODE, read_sysreg_s(SYS_IMP_APL_UPMCR0_EL1)) == UPMCR0_IMODE_FIQ) &&
+ (read_sysreg_s(SYS_IMP_APL_UPMSR_EL1) & UPMSR_IACT)) {
/* Same story with uncore PMCs */
pr_err_ratelimited("Uncore PMC FIQ fired. Masking.\n");
sysreg_clear_set_s(SYS_IMP_APL_UPMCR0_EL1, UPMCR0_IMODE,
@@ -750,12 +766,12 @@ static void aic_ipi_send_fast(int cpu)
u64 cluster = MPIDR_CLUSTER(mpidr);
u64 idx = MPIDR_CPU(mpidr);
- if (MPIDR_CLUSTER(my_mpidr) == cluster)
- write_sysreg_s(FIELD_PREP(IPI_RR_CPU, idx),
- SYS_IMP_APL_IPI_RR_LOCAL_EL1);
- else
+ if (static_branch_likely(&use_local_fast_ipi) && MPIDR_CLUSTER(my_mpidr) == cluster) {
+ write_sysreg_s(FIELD_PREP(IPI_RR_CPU, idx), SYS_IMP_APL_IPI_RR_LOCAL_EL1);
+ } else {
write_sysreg_s(FIELD_PREP(IPI_RR_CPU, idx) | FIELD_PREP(IPI_RR_CLUSTER, cluster),
SYS_IMP_APL_IPI_RR_GLOBAL_EL1);
+ }
isb();
}
@@ -811,7 +827,8 @@ static int aic_init_cpu(unsigned int cpu)
/* Mask all hard-wired per-CPU IRQ/FIQ sources */
/* Pending Fast IPI FIQs */
- write_sysreg_s(IPI_SR_PENDING, SYS_IMP_APL_IPI_SR_EL1);
+ if (static_branch_likely(&use_fast_ipi))
+ write_sysreg_s(IPI_SR_PENDING, SYS_IMP_APL_IPI_SR_EL1);
/* Timer FIQs */
sysreg_clear_set(cntp_ctl_el0, 0, ARCH_TIMER_CTRL_IT_MASK);
@@ -832,8 +849,10 @@ static int aic_init_cpu(unsigned int cpu)
FIELD_PREP(PMCR0_IMODE, PMCR0_IMODE_OFF));
/* Uncore PMC FIQ */
- sysreg_clear_set_s(SYS_IMP_APL_UPMCR0_EL1, UPMCR0_IMODE,
- FIELD_PREP(UPMCR0_IMODE, UPMCR0_IMODE_OFF));
+ if (static_branch_likely(&use_fast_ipi)) {
+ sysreg_clear_set_s(SYS_IMP_APL_UPMCR0_EL1, UPMCR0_IMODE,
+ FIELD_PREP(UPMCR0_IMODE, UPMCR0_IMODE_OFF));
+ }
/* Commit all of the above */
isb();
@@ -987,11 +1006,12 @@ static int __init aic_of_ic_init(struct device_node *node, struct device_node *p
off += sizeof(u32) * (irqc->max_irq >> 5); /* MASK_CLR */
off += sizeof(u32) * (irqc->max_irq >> 5); /* HW_STATE */
- if (irqc->info.fast_ipi)
- static_branch_enable(&use_fast_ipi);
- else
+ if (!irqc->info.fast_ipi)
static_branch_disable(&use_fast_ipi);
+ if (!irqc->info.local_fast_ipi)
+ static_branch_disable(&use_local_fast_ipi);
+
irqc->info.die_stride = off - start_off;
irqc->hw_domain = irq_domain_create_tree(of_node_to_fwnode(node),
diff --git a/drivers/irqchip/irq-armada-370-xp.c b/drivers/irqchip/irq-armada-370-xp.c
index a55528469278..d7c5ef248474 100644
--- a/drivers/irqchip/irq-armada-370-xp.c
+++ b/drivers/irqchip/irq-armada-370-xp.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Marvell Armada 370 and Armada XP SoC IRQ handling
*
@@ -7,12 +8,11 @@
* Gregory CLEMENT <gregory.clement@free-electrons.com>
* Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
* Ben Dooks <ben.dooks@codethink.co.uk>
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
*/
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/err.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
@@ -29,6 +29,7 @@
#include <linux/slab.h>
#include <linux/syscore_ops.h>
#include <linux/msi.h>
+#include <linux/types.h>
#include <asm/mach/arch.h>
#include <asm/exception.h>
#include <asm/smp_plat.h>
@@ -64,19 +65,17 @@
* device
*
* The "global interrupt mask/unmask" is modified using the
- * ARMADA_370_XP_INT_SET_ENABLE_OFFS and
- * ARMADA_370_XP_INT_CLEAR_ENABLE_OFFS registers, which are relative
- * to "main_int_base".
+ * MPIC_INT_SET_ENABLE and MPIC_INT_CLEAR_ENABLE
+ * registers, which are relative to "mpic->base".
*
- * The "per-CPU mask/unmask" is modified using the
- * ARMADA_370_XP_INT_SET_MASK_OFFS and
- * ARMADA_370_XP_INT_CLEAR_MASK_OFFS registers, which are relative to
- * "per_cpu_int_base". This base address points to a special address,
+ * The "per-CPU mask/unmask" is modified using the MPIC_INT_SET_MASK
+ * and MPIC_INT_CLEAR_MASK registers, which are relative to
+ * "mpic->per_cpu". This base address points to a special address,
* which automatically accesses the registers of the current CPU.
*
* The per-CPU mask/unmask can also be adjusted using the global
- * per-interrupt ARMADA_370_XP_INT_SOURCE_CTL register, which we use
- * to configure interrupt affinity.
+ * per-interrupt MPIC_INT_SOURCE_CTL register, which we use to
+ * configure interrupt affinity.
*
* Due to this model, all interrupts need to be mask/unmasked at two
* different levels: at the global level and at the per-CPU level.
@@ -90,9 +89,8 @@
* the current CPU, running the ->map() code. This allows to have
* the interrupt unmasked at this level in non-SMP
* configurations. In SMP configurations, the ->set_affinity()
- * callback is called, which using the
- * ARMADA_370_XP_INT_SOURCE_CTL() readjusts the per-CPU mask/unmask
- * for the interrupt.
+ * callback is called, which using the MPIC_INT_SOURCE_CTL()
+ * readjusts the per-CPU mask/unmask for the interrupt.
*
* The ->mask() and ->unmask() operations only mask/unmask the
* interrupt at the "global" level.
@@ -114,54 +112,97 @@
* at the per-CPU level.
*/
-/* Registers relative to main_int_base */
-#define ARMADA_370_XP_INT_CONTROL (0x00)
-#define ARMADA_370_XP_SW_TRIG_INT_OFFS (0x04)
-#define ARMADA_370_XP_INT_SET_ENABLE_OFFS (0x30)
-#define ARMADA_370_XP_INT_CLEAR_ENABLE_OFFS (0x34)
-#define ARMADA_370_XP_INT_SOURCE_CTL(irq) (0x100 + irq*4)
-#define ARMADA_370_XP_INT_SOURCE_CPU_MASK 0xF
-#define ARMADA_370_XP_INT_IRQ_FIQ_MASK(cpuid) ((BIT(0) | BIT(8)) << cpuid)
-
-/* Registers relative to per_cpu_int_base */
-#define ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS (0x08)
-#define ARMADA_370_XP_IN_DRBEL_MSK_OFFS (0x0c)
-#define ARMADA_375_PPI_CAUSE (0x10)
-#define ARMADA_370_XP_CPU_INTACK_OFFS (0x44)
-#define ARMADA_370_XP_INT_SET_MASK_OFFS (0x48)
-#define ARMADA_370_XP_INT_CLEAR_MASK_OFFS (0x4C)
-#define ARMADA_370_XP_INT_FABRIC_MASK_OFFS (0x54)
-#define ARMADA_370_XP_INT_CAUSE_PERF(cpu) (1 << cpu)
-
-#define ARMADA_370_XP_MAX_PER_CPU_IRQS (28)
-
-#define IPI_DOORBELL_START (0)
-#define IPI_DOORBELL_END (8)
-#define IPI_DOORBELL_MASK 0xFF
-#define PCI_MSI_DOORBELL_START (16)
-#define PCI_MSI_DOORBELL_NR (16)
-#define PCI_MSI_DOORBELL_END (32)
-#define PCI_MSI_DOORBELL_MASK 0xFFFF0000
-
-static void __iomem *per_cpu_int_base;
-static void __iomem *main_int_base;
-static struct irq_domain *armada_370_xp_mpic_domain;
-static u32 doorbell_mask_reg;
-static int parent_irq;
+/* Registers relative to mpic->base */
+#define MPIC_INT_CONTROL 0x00
+#define MPIC_INT_CONTROL_NUMINT_MASK GENMASK(12, 2)
+#define MPIC_SW_TRIG_INT 0x04
+#define MPIC_INT_SET_ENABLE 0x30
+#define MPIC_INT_CLEAR_ENABLE 0x34
+#define MPIC_INT_SOURCE_CTL(hwirq) (0x100 + (hwirq) * 4)
+#define MPIC_INT_SOURCE_CPU_MASK GENMASK(3, 0)
+#define MPIC_INT_IRQ_FIQ_MASK(cpuid) ((BIT(0) | BIT(8)) << (cpuid))
+
+/* Registers relative to mpic->per_cpu */
+#define MPIC_IN_DRBEL_CAUSE 0x08
+#define MPIC_IN_DRBEL_MASK 0x0c
+#define MPIC_PPI_CAUSE 0x10
+#define MPIC_CPU_INTACK 0x44
+#define MPIC_CPU_INTACK_IID_MASK GENMASK(9, 0)
+#define MPIC_INT_SET_MASK 0x48
+#define MPIC_INT_CLEAR_MASK 0x4C
+#define MPIC_INT_FABRIC_MASK 0x54
+#define MPIC_INT_CAUSE_PERF(cpu) BIT(cpu)
+
+#define MPIC_PER_CPU_IRQS_NR 29
+
+/* IPI and MSI interrupt definitions for IPI platforms */
+#define IPI_DOORBELL_NR 8
+#define IPI_DOORBELL_MASK GENMASK(7, 0)
+#define PCI_MSI_DOORBELL_START 16
+#define PCI_MSI_DOORBELL_NR 16
+#define PCI_MSI_DOORBELL_MASK GENMASK(31, 16)
+
+/* MSI interrupt definitions for non-IPI platforms */
+#define PCI_MSI_FULL_DOORBELL_START 0
+#define PCI_MSI_FULL_DOORBELL_NR 32
+#define PCI_MSI_FULL_DOORBELL_MASK GENMASK(31, 0)
+#define PCI_MSI_FULL_DOORBELL_SRC0_MASK GENMASK(15, 0)
+#define PCI_MSI_FULL_DOORBELL_SRC1_MASK GENMASK(31, 16)
+
+/**
+ * struct mpic - MPIC private data structure
+ * @base: MPIC registers base address
+ * @per_cpu: per-CPU registers base address
+ * @parent_irq: parent IRQ if MPIC is not top-level interrupt controller
+ * @domain: MPIC main interrupt domain
+ * @ipi_domain: IPI domain
+ * @msi_domain: MSI domain
+ * @msi_inner_domain: MSI inner domain
+ * @msi_used: bitmap of used MSI numbers
+ * @msi_lock: mutex serializing access to @msi_used
+ * @msi_doorbell_addr: physical address of MSI doorbell register
+ * @msi_doorbell_mask: mask of available doorbell bits for MSIs (either PCI_MSI_DOORBELL_MASK or
+ * PCI_MSI_FULL_DOORBELL_MASK)
+ * @msi_doorbell_start: first set bit in @msi_doorbell_mask
+ * @msi_doorbell_size: number of set bits in @msi_doorbell_mask
+ * @doorbell_mask: doorbell mask of MSIs and IPIs, stored on suspend, restored on resume
+ */
+struct mpic {
+ void __iomem *base;
+ void __iomem *per_cpu;
+ int parent_irq;
+ struct irq_domain *domain;
+#ifdef CONFIG_SMP
+ struct irq_domain *ipi_domain;
+#endif
#ifdef CONFIG_PCI_MSI
-static struct irq_domain *armada_370_xp_msi_domain;
-static struct irq_domain *armada_370_xp_msi_inner_domain;
-static DECLARE_BITMAP(msi_used, PCI_MSI_DOORBELL_NR);
-static DEFINE_MUTEX(msi_used_lock);
-static phys_addr_t msi_doorbell_addr;
+ struct irq_domain *msi_domain;
+ struct irq_domain *msi_inner_domain;
+ DECLARE_BITMAP(msi_used, PCI_MSI_FULL_DOORBELL_NR);
+ struct mutex msi_lock;
+ phys_addr_t msi_doorbell_addr;
+ u32 msi_doorbell_mask;
+ unsigned int msi_doorbell_start, msi_doorbell_size;
#endif
+ u32 doorbell_mask;
+};
+
+static struct mpic *mpic_data __ro_after_init;
-static inline bool is_percpu_irq(irq_hw_number_t irq)
+static inline bool mpic_is_ipi_available(struct mpic *mpic)
{
- if (irq <= ARMADA_370_XP_MAX_PER_CPU_IRQS)
- return true;
+ /*
+ * We distinguish IPI availability in the IC by the IC not having a
+ * parent irq defined. If a parent irq is defined, there is a parent
+ * interrupt controller (e.g. GIC) that takes care of inter-processor
+ * interrupts.
+ */
+ return mpic->parent_irq <= 0;
+}
- return false;
+static inline bool mpic_is_percpu_irq(irq_hw_number_t hwirq)
+{
+ return hwirq < MPIC_PER_CPU_IRQS_NR;
}
/*
@@ -169,55 +210,53 @@ static inline bool is_percpu_irq(irq_hw_number_t irq)
* For shared global interrupts, mask/unmask global enable bit
* For CPU interrupts, mask/unmask the calling CPU's bit
*/
-static void armada_370_xp_irq_mask(struct irq_data *d)
+static void mpic_irq_mask(struct irq_data *d)
{
+ struct mpic *mpic = irq_data_get_irq_chip_data(d);
irq_hw_number_t hwirq = irqd_to_hwirq(d);
- if (!is_percpu_irq(hwirq))
- writel(hwirq, main_int_base +
- ARMADA_370_XP_INT_CLEAR_ENABLE_OFFS);
+ if (!mpic_is_percpu_irq(hwirq))
+ writel(hwirq, mpic->base + MPIC_INT_CLEAR_ENABLE);
else
- writel(hwirq, per_cpu_int_base +
- ARMADA_370_XP_INT_SET_MASK_OFFS);
+ writel(hwirq, mpic->per_cpu + MPIC_INT_SET_MASK);
}
-static void armada_370_xp_irq_unmask(struct irq_data *d)
+static void mpic_irq_unmask(struct irq_data *d)
{
+ struct mpic *mpic = irq_data_get_irq_chip_data(d);
irq_hw_number_t hwirq = irqd_to_hwirq(d);
- if (!is_percpu_irq(hwirq))
- writel(hwirq, main_int_base +
- ARMADA_370_XP_INT_SET_ENABLE_OFFS);
+ if (!mpic_is_percpu_irq(hwirq))
+ writel(hwirq, mpic->base + MPIC_INT_SET_ENABLE);
else
- writel(hwirq, per_cpu_int_base +
- ARMADA_370_XP_INT_CLEAR_MASK_OFFS);
+ writel(hwirq, mpic->per_cpu + MPIC_INT_CLEAR_MASK);
}
#ifdef CONFIG_PCI_MSI
-static struct irq_chip armada_370_xp_msi_irq_chip = {
- .name = "MPIC MSI",
- .irq_mask = pci_msi_mask_irq,
- .irq_unmask = pci_msi_unmask_irq,
+static struct irq_chip mpic_msi_irq_chip = {
+ .name = "MPIC MSI",
+ .irq_mask = pci_msi_mask_irq,
+ .irq_unmask = pci_msi_unmask_irq,
};
-static struct msi_domain_info armada_370_xp_msi_domain_info = {
+static struct msi_domain_info mpic_msi_domain_info = {
.flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
MSI_FLAG_MULTI_PCI_MSI | MSI_FLAG_PCI_MSIX),
- .chip = &armada_370_xp_msi_irq_chip,
+ .chip = &mpic_msi_irq_chip,
};
-static void armada_370_xp_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
+static void mpic_compose_msi_msg(struct irq_data *d, struct msi_msg *msg)
{
- unsigned int cpu = cpumask_first(irq_data_get_effective_affinity_mask(data));
+ unsigned int cpu = cpumask_first(irq_data_get_effective_affinity_mask(d));
+ struct mpic *mpic = irq_data_get_irq_chip_data(d);
- msg->address_lo = lower_32_bits(msi_doorbell_addr);
- msg->address_hi = upper_32_bits(msi_doorbell_addr);
- msg->data = BIT(cpu + 8) | (data->hwirq + PCI_MSI_DOORBELL_START);
+ msg->address_lo = lower_32_bits(mpic->msi_doorbell_addr);
+ msg->address_hi = upper_32_bits(mpic->msi_doorbell_addr);
+ msg->data = BIT(cpu + 8) | (d->hwirq + mpic->msi_doorbell_start);
}
-static int armada_370_xp_msi_set_affinity(struct irq_data *irq_data,
- const struct cpumask *mask, bool force)
+static int mpic_msi_set_affinity(struct irq_data *d, const struct cpumask *mask, bool force)
{
unsigned int cpu;
@@ -229,33 +268,34 @@ static int armada_370_xp_msi_set_affinity(struct irq_data *irq_data,
if (cpu >= nr_cpu_ids)
return -EINVAL;
- irq_data_update_effective_affinity(irq_data, cpumask_of(cpu));
+ irq_data_update_effective_affinity(d, cpumask_of(cpu));
return IRQ_SET_MASK_OK;
}
-static struct irq_chip armada_370_xp_msi_bottom_irq_chip = {
+static struct irq_chip mpic_msi_bottom_irq_chip = {
.name = "MPIC MSI",
- .irq_compose_msi_msg = armada_370_xp_compose_msi_msg,
- .irq_set_affinity = armada_370_xp_msi_set_affinity,
+ .irq_compose_msi_msg = mpic_compose_msi_msg,
+ .irq_set_affinity = mpic_msi_set_affinity,
};
-static int armada_370_xp_msi_alloc(struct irq_domain *domain, unsigned int virq,
- unsigned int nr_irqs, void *args)
+static int mpic_msi_alloc(struct irq_domain *domain, unsigned int virq, unsigned int nr_irqs,
+ void *args)
{
- int hwirq, i;
+ struct mpic *mpic = domain->host_data;
+ int hwirq;
- mutex_lock(&msi_used_lock);
- hwirq = bitmap_find_free_region(msi_used, PCI_MSI_DOORBELL_NR,
+ mutex_lock(&mpic->msi_lock);
+ hwirq = bitmap_find_free_region(mpic->msi_used, mpic->msi_doorbell_size,
order_base_2(nr_irqs));
- mutex_unlock(&msi_used_lock);
+ mutex_unlock(&mpic->msi_lock);
if (hwirq < 0)
return -ENOSPC;
- for (i = 0; i < nr_irqs; i++) {
+ for (unsigned int i = 0; i < nr_irqs; i++) {
irq_domain_set_info(domain, virq + i, hwirq + i,
- &armada_370_xp_msi_bottom_irq_chip,
+ &mpic_msi_bottom_irq_chip,
domain->host_data, handle_simple_irq,
NULL, NULL);
}
@@ -263,71 +303,84 @@ static int armada_370_xp_msi_alloc(struct irq_domain *domain, unsigned int virq,
return 0;
}
-static void armada_370_xp_msi_free(struct irq_domain *domain,
- unsigned int virq, unsigned int nr_irqs)
+static void mpic_msi_free(struct irq_domain *domain, unsigned int virq, unsigned int nr_irqs)
{
struct irq_data *d = irq_domain_get_irq_data(domain, virq);
+ struct mpic *mpic = domain->host_data;
- mutex_lock(&msi_used_lock);
- bitmap_release_region(msi_used, d->hwirq, order_base_2(nr_irqs));
- mutex_unlock(&msi_used_lock);
+ mutex_lock(&mpic->msi_lock);
+ bitmap_release_region(mpic->msi_used, d->hwirq, order_base_2(nr_irqs));
+ mutex_unlock(&mpic->msi_lock);
}
-static const struct irq_domain_ops armada_370_xp_msi_domain_ops = {
- .alloc = armada_370_xp_msi_alloc,
- .free = armada_370_xp_msi_free,
+static const struct irq_domain_ops mpic_msi_domain_ops = {
+ .alloc = mpic_msi_alloc,
+ .free = mpic_msi_free,
};
-static void armada_370_xp_msi_reenable_percpu(void)
+static void mpic_msi_reenable_percpu(struct mpic *mpic)
{
u32 reg;
/* Enable MSI doorbell mask and combined cpu local interrupt */
- reg = readl(per_cpu_int_base + ARMADA_370_XP_IN_DRBEL_MSK_OFFS)
- | PCI_MSI_DOORBELL_MASK;
- writel(reg, per_cpu_int_base + ARMADA_370_XP_IN_DRBEL_MSK_OFFS);
+ reg = readl(mpic->per_cpu + MPIC_IN_DRBEL_MASK);
+ reg |= mpic->msi_doorbell_mask;
+ writel(reg, mpic->per_cpu + MPIC_IN_DRBEL_MASK);
+
/* Unmask local doorbell interrupt */
- writel(1, per_cpu_int_base + ARMADA_370_XP_INT_CLEAR_MASK_OFFS);
+ writel(1, mpic->per_cpu + MPIC_INT_CLEAR_MASK);
}
-static int armada_370_xp_msi_init(struct device_node *node,
- phys_addr_t main_int_phys_base)
+static int __init mpic_msi_init(struct mpic *mpic, struct device_node *node,
+ phys_addr_t main_int_phys_base)
{
- msi_doorbell_addr = main_int_phys_base +
- ARMADA_370_XP_SW_TRIG_INT_OFFS;
+ mpic->msi_doorbell_addr = main_int_phys_base + MPIC_SW_TRIG_INT;
+
+ mutex_init(&mpic->msi_lock);
- armada_370_xp_msi_inner_domain =
- irq_domain_add_linear(NULL, PCI_MSI_DOORBELL_NR,
- &armada_370_xp_msi_domain_ops, NULL);
- if (!armada_370_xp_msi_inner_domain)
+ if (mpic_is_ipi_available(mpic)) {
+ mpic->msi_doorbell_start = PCI_MSI_DOORBELL_START;
+ mpic->msi_doorbell_size = PCI_MSI_DOORBELL_NR;
+ mpic->msi_doorbell_mask = PCI_MSI_DOORBELL_MASK;
+ } else {
+ mpic->msi_doorbell_start = PCI_MSI_FULL_DOORBELL_START;
+ mpic->msi_doorbell_size = PCI_MSI_FULL_DOORBELL_NR;
+ mpic->msi_doorbell_mask = PCI_MSI_FULL_DOORBELL_MASK;
+ }
+
+ mpic->msi_inner_domain = irq_domain_add_linear(NULL, mpic->msi_doorbell_size,
+ &mpic_msi_domain_ops, mpic);
+ if (!mpic->msi_inner_domain)
return -ENOMEM;
- armada_370_xp_msi_domain =
- pci_msi_create_irq_domain(of_node_to_fwnode(node),
- &armada_370_xp_msi_domain_info,
- armada_370_xp_msi_inner_domain);
- if (!armada_370_xp_msi_domain) {
- irq_domain_remove(armada_370_xp_msi_inner_domain);
+ mpic->msi_domain = pci_msi_create_irq_domain(of_node_to_fwnode(node), &mpic_msi_domain_info,
+ mpic->msi_inner_domain);
+ if (!mpic->msi_domain) {
+ irq_domain_remove(mpic->msi_inner_domain);
return -ENOMEM;
}
- armada_370_xp_msi_reenable_percpu();
+ mpic_msi_reenable_percpu(mpic);
+
+ /* Unmask low 16 MSI irqs on non-IPI platforms */
+ if (!mpic_is_ipi_available(mpic))
+ writel(0, mpic->per_cpu + MPIC_INT_CLEAR_MASK);
return 0;
}
#else
-static void armada_370_xp_msi_reenable_percpu(void) {}
+static __maybe_unused void mpic_msi_reenable_percpu(struct mpic *mpic) {}
-static inline int armada_370_xp_msi_init(struct device_node *node,
- phys_addr_t main_int_phys_base)
+static inline int mpic_msi_init(struct mpic *mpic, struct device_node *node,
+ phys_addr_t main_int_phys_base)
{
return 0;
}
#endif
-static void armada_xp_mpic_perf_init(void)
+static void mpic_perf_init(struct mpic *mpic)
{
- unsigned long cpuid;
+ u32 cpuid;
/*
* This Performance Counter Overflow interrupt is specific for
@@ -339,38 +392,39 @@ static void armada_xp_mpic_perf_init(void)
cpuid = cpu_logical_map(smp_processor_id());
/* Enable Performance Counter Overflow interrupts */
- writel(ARMADA_370_XP_INT_CAUSE_PERF(cpuid),
- per_cpu_int_base + ARMADA_370_XP_INT_FABRIC_MASK_OFFS);
+ writel(MPIC_INT_CAUSE_PERF(cpuid), mpic->per_cpu + MPIC_INT_FABRIC_MASK);
}
#ifdef CONFIG_SMP
-static struct irq_domain *ipi_domain;
-
-static void armada_370_xp_ipi_mask(struct irq_data *d)
+static void mpic_ipi_mask(struct irq_data *d)
{
+ struct mpic *mpic = irq_data_get_irq_chip_data(d);
u32 reg;
- reg = readl(per_cpu_int_base + ARMADA_370_XP_IN_DRBEL_MSK_OFFS);
+
+ reg = readl(mpic->per_cpu + MPIC_IN_DRBEL_MASK);
reg &= ~BIT(d->hwirq);
- writel(reg, per_cpu_int_base + ARMADA_370_XP_IN_DRBEL_MSK_OFFS);
+ writel(reg, mpic->per_cpu + MPIC_IN_DRBEL_MASK);
}
-static void armada_370_xp_ipi_unmask(struct irq_data *d)
+static void mpic_ipi_unmask(struct irq_data *d)
{
+ struct mpic *mpic = irq_data_get_irq_chip_data(d);
u32 reg;
- reg = readl(per_cpu_int_base + ARMADA_370_XP_IN_DRBEL_MSK_OFFS);
+
+ reg = readl(mpic->per_cpu + MPIC_IN_DRBEL_MASK);
reg |= BIT(d->hwirq);
- writel(reg, per_cpu_int_base + ARMADA_370_XP_IN_DRBEL_MSK_OFFS);
+ writel(reg, mpic->per_cpu + MPIC_IN_DRBEL_MASK);
}
-static void armada_370_xp_ipi_send_mask(struct irq_data *d,
- const struct cpumask *mask)
+static void mpic_ipi_send_mask(struct irq_data *d, const struct cpumask *mask)
{
- unsigned long map = 0;
- int cpu;
+ struct mpic *mpic = irq_data_get_irq_chip_data(d);
+ unsigned int cpu;
+ u32 map = 0;
/* Convert our logical CPU mask into a physical one. */
for_each_cpu(cpu, mask)
- map |= 1 << cpu_logical_map(cpu);
+ map |= BIT(cpu_logical_map(cpu));
/*
* Ensure that stores to Normal memory are visible to the
@@ -379,433 +433,465 @@ static void armada_370_xp_ipi_send_mask(struct irq_data *d,
dsb();
/* submit softirq */
- writel((map << 8) | d->hwirq, main_int_base +
- ARMADA_370_XP_SW_TRIG_INT_OFFS);
+ writel((map << 8) | d->hwirq, mpic->base + MPIC_SW_TRIG_INT);
}
-static void armada_370_xp_ipi_ack(struct irq_data *d)
+static void mpic_ipi_ack(struct irq_data *d)
{
- writel(~BIT(d->hwirq), per_cpu_int_base + ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS);
+ struct mpic *mpic = irq_data_get_irq_chip_data(d);
+
+ writel(~BIT(d->hwirq), mpic->per_cpu + MPIC_IN_DRBEL_CAUSE);
}
-static struct irq_chip ipi_irqchip = {
+static struct irq_chip mpic_ipi_irqchip = {
.name = "IPI",
- .irq_ack = armada_370_xp_ipi_ack,
- .irq_mask = armada_370_xp_ipi_mask,
- .irq_unmask = armada_370_xp_ipi_unmask,
- .ipi_send_mask = armada_370_xp_ipi_send_mask,
+ .irq_ack = mpic_ipi_ack,
+ .irq_mask = mpic_ipi_mask,
+ .irq_unmask = mpic_ipi_unmask,
+ .ipi_send_mask = mpic_ipi_send_mask,
};
-static int armada_370_xp_ipi_alloc(struct irq_domain *d,
- unsigned int virq,
- unsigned int nr_irqs, void *args)
+static int mpic_ipi_alloc(struct irq_domain *d, unsigned int virq,
+ unsigned int nr_irqs, void *args)
{
- int i;
-
- for (i = 0; i < nr_irqs; i++) {
+ for (unsigned int i = 0; i < nr_irqs; i++) {
irq_set_percpu_devid(virq + i);
- irq_domain_set_info(d, virq + i, i, &ipi_irqchip,
- d->host_data,
- handle_percpu_devid_irq,
- NULL, NULL);
+ irq_domain_set_info(d, virq + i, i, &mpic_ipi_irqchip, d->host_data,
+ handle_percpu_devid_irq, NULL, NULL);
}
return 0;
}
-static void armada_370_xp_ipi_free(struct irq_domain *d,
- unsigned int virq,
- unsigned int nr_irqs)
+static void mpic_ipi_free(struct irq_domain *d, unsigned int virq,
+ unsigned int nr_irqs)
{
/* Not freeing IPIs */
}
-static const struct irq_domain_ops ipi_domain_ops = {
- .alloc = armada_370_xp_ipi_alloc,
- .free = armada_370_xp_ipi_free,
+static const struct irq_domain_ops mpic_ipi_domain_ops = {
+ .alloc = mpic_ipi_alloc,
+ .free = mpic_ipi_free,
};
-static void ipi_resume(void)
+static void mpic_ipi_resume(struct mpic *mpic)
{
- int i;
-
- for (i = 0; i < IPI_DOORBELL_END; i++) {
- int irq;
+ for (irq_hw_number_t i = 0; i < IPI_DOORBELL_NR; i++) {
+ unsigned int virq = irq_find_mapping(mpic->ipi_domain, i);
+ struct irq_data *d;
- irq = irq_find_mapping(ipi_domain, i);
- if (irq <= 0)
+ if (!virq || !irq_percpu_is_enabled(virq))
continue;
- if (irq_percpu_is_enabled(irq)) {
- struct irq_data *d;
- d = irq_domain_get_irq_data(ipi_domain, irq);
- armada_370_xp_ipi_unmask(d);
- }
+
+ d = irq_domain_get_irq_data(mpic->ipi_domain, virq);
+ mpic_ipi_unmask(d);
}
}
-static __init void armada_xp_ipi_init(struct device_node *node)
+static int __init mpic_ipi_init(struct mpic *mpic, struct device_node *node)
{
int base_ipi;
- ipi_domain = irq_domain_create_linear(of_node_to_fwnode(node),
- IPI_DOORBELL_END,
- &ipi_domain_ops, NULL);
- if (WARN_ON(!ipi_domain))
- return;
+ mpic->ipi_domain = irq_domain_create_linear(of_node_to_fwnode(node), IPI_DOORBELL_NR,
+ &mpic_ipi_domain_ops, mpic);
+ if (WARN_ON(!mpic->ipi_domain))
+ return -ENOMEM;
- irq_domain_update_bus_token(ipi_domain, DOMAIN_BUS_IPI);
- base_ipi = irq_domain_alloc_irqs(ipi_domain, IPI_DOORBELL_END, NUMA_NO_NODE, NULL);
+ irq_domain_update_bus_token(mpic->ipi_domain, DOMAIN_BUS_IPI);
+ base_ipi = irq_domain_alloc_irqs(mpic->ipi_domain, IPI_DOORBELL_NR, NUMA_NO_NODE, NULL);
if (WARN_ON(!base_ipi))
- return;
+ return -ENOMEM;
- set_smp_ipi_range(base_ipi, IPI_DOORBELL_END);
-}
+ set_smp_ipi_range(base_ipi, IPI_DOORBELL_NR);
-static DEFINE_RAW_SPINLOCK(irq_controller_lock);
+ return 0;
+}
-static int armada_xp_set_affinity(struct irq_data *d,
- const struct cpumask *mask_val, bool force)
+static int mpic_set_affinity(struct irq_data *d, const struct cpumask *mask_val, bool force)
{
+ struct mpic *mpic = irq_data_get_irq_chip_data(d);
irq_hw_number_t hwirq = irqd_to_hwirq(d);
- unsigned long reg, mask;
- int cpu;
+ unsigned int cpu;
/* Select a single core from the affinity mask which is online */
cpu = cpumask_any_and(mask_val, cpu_online_mask);
- mask = 1UL << cpu_logical_map(cpu);
- raw_spin_lock(&irq_controller_lock);
- reg = readl(main_int_base + ARMADA_370_XP_INT_SOURCE_CTL(hwirq));
- reg = (reg & (~ARMADA_370_XP_INT_SOURCE_CPU_MASK)) | mask;
- writel(reg, main_int_base + ARMADA_370_XP_INT_SOURCE_CTL(hwirq));
- raw_spin_unlock(&irq_controller_lock);
+ atomic_io_modify(mpic->base + MPIC_INT_SOURCE_CTL(hwirq),
+ MPIC_INT_SOURCE_CPU_MASK, BIT(cpu_logical_map(cpu)));
irq_data_update_effective_affinity(d, cpumask_of(cpu));
return IRQ_SET_MASK_OK;
}
-static void armada_xp_mpic_smp_cpu_init(void)
+static void mpic_smp_cpu_init(struct mpic *mpic)
{
- u32 control;
- int nr_irqs, i;
-
- control = readl(main_int_base + ARMADA_370_XP_INT_CONTROL);
- nr_irqs = (control >> 2) & 0x3ff;
+ for (irq_hw_number_t i = 0; i < mpic->domain->hwirq_max; i++)
+ writel(i, mpic->per_cpu + MPIC_INT_SET_MASK);
- for (i = 0; i < nr_irqs; i++)
- writel(i, per_cpu_int_base + ARMADA_370_XP_INT_SET_MASK_OFFS);
+ if (!mpic_is_ipi_available(mpic))
+ return;
/* Disable all IPIs */
- writel(0, per_cpu_int_base + ARMADA_370_XP_IN_DRBEL_MSK_OFFS);
+ writel(0, mpic->per_cpu + MPIC_IN_DRBEL_MASK);
/* Clear pending IPIs */
- writel(0, per_cpu_int_base + ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS);
+ writel(0, mpic->per_cpu + MPIC_IN_DRBEL_CAUSE);
/* Unmask IPI interrupt */
- writel(0, per_cpu_int_base + ARMADA_370_XP_INT_CLEAR_MASK_OFFS);
+ writel(0, mpic->per_cpu + MPIC_INT_CLEAR_MASK);
}
-static void armada_xp_mpic_reenable_percpu(void)
+static void mpic_reenable_percpu(struct mpic *mpic)
{
- unsigned int irq;
-
/* Re-enable per-CPU interrupts that were enabled before suspend */
- for (irq = 0; irq < ARMADA_370_XP_MAX_PER_CPU_IRQS; irq++) {
- struct irq_data *data;
- int virq;
-
- virq = irq_linear_revmap(armada_370_xp_mpic_domain, irq);
- if (virq == 0)
- continue;
+ for (irq_hw_number_t i = 0; i < MPIC_PER_CPU_IRQS_NR; i++) {
+ unsigned int virq = irq_linear_revmap(mpic->domain, i);
+ struct irq_data *d;
- data = irq_get_irq_data(virq);
-
- if (!irq_percpu_is_enabled(virq))
+ if (!virq || !irq_percpu_is_enabled(virq))
continue;
- armada_370_xp_irq_unmask(data);
+ d = irq_get_irq_data(virq);
+ mpic_irq_unmask(d);
}
- ipi_resume();
+ if (mpic_is_ipi_available(mpic))
+ mpic_ipi_resume(mpic);
- armada_370_xp_msi_reenable_percpu();
+ mpic_msi_reenable_percpu(mpic);
}
-static int armada_xp_mpic_starting_cpu(unsigned int cpu)
+static int mpic_starting_cpu(unsigned int cpu)
{
- armada_xp_mpic_perf_init();
- armada_xp_mpic_smp_cpu_init();
- armada_xp_mpic_reenable_percpu();
+ struct mpic *mpic = irq_get_default_host()->host_data;
+
+ mpic_perf_init(mpic);
+ mpic_smp_cpu_init(mpic);
+ mpic_reenable_percpu(mpic);
+
return 0;
}
static int mpic_cascaded_starting_cpu(unsigned int cpu)
{
- armada_xp_mpic_perf_init();
- armada_xp_mpic_reenable_percpu();
- enable_percpu_irq(parent_irq, IRQ_TYPE_NONE);
+ struct mpic *mpic = mpic_data;
+
+ mpic_perf_init(mpic);
+ mpic_reenable_percpu(mpic);
+ enable_percpu_irq(mpic->parent_irq, IRQ_TYPE_NONE);
+
return 0;
}
#else
-static void armada_xp_mpic_smp_cpu_init(void) {}
-static void ipi_resume(void) {}
+static void mpic_smp_cpu_init(struct mpic *mpic) {}
+static void mpic_ipi_resume(struct mpic *mpic) {}
#endif
-static struct irq_chip armada_370_xp_irq_chip = {
+static struct irq_chip mpic_irq_chip = {
.name = "MPIC",
- .irq_mask = armada_370_xp_irq_mask,
- .irq_mask_ack = armada_370_xp_irq_mask,
- .irq_unmask = armada_370_xp_irq_unmask,
+ .irq_mask = mpic_irq_mask,
+ .irq_mask_ack = mpic_irq_mask,
+ .irq_unmask = mpic_irq_unmask,
#ifdef CONFIG_SMP
- .irq_set_affinity = armada_xp_set_affinity,
+ .irq_set_affinity = mpic_set_affinity,
#endif
.flags = IRQCHIP_SKIP_SET_WAKE | IRQCHIP_MASK_ON_SUSPEND,
};
-static int armada_370_xp_mpic_irq_map(struct irq_domain *h,
- unsigned int virq, irq_hw_number_t hw)
+static int mpic_irq_map(struct irq_domain *domain, unsigned int virq, irq_hw_number_t hwirq)
{
- armada_370_xp_irq_mask(irq_get_irq_data(virq));
- if (!is_percpu_irq(hw))
- writel(hw, per_cpu_int_base +
- ARMADA_370_XP_INT_CLEAR_MASK_OFFS);
+ struct mpic *mpic = domain->host_data;
+
+ /* IRQs 0 and 1 cannot be mapped, they are handled internally */
+ if (hwirq <= 1)
+ return -EINVAL;
+
+ irq_set_chip_data(virq, mpic);
+
+ mpic_irq_mask(irq_get_irq_data(virq));
+ if (!mpic_is_percpu_irq(hwirq))
+ writel(hwirq, mpic->per_cpu + MPIC_INT_CLEAR_MASK);
else
- writel(hw, main_int_base + ARMADA_370_XP_INT_SET_ENABLE_OFFS);
+ writel(hwirq, mpic->base + MPIC_INT_SET_ENABLE);
irq_set_status_flags(virq, IRQ_LEVEL);
- if (is_percpu_irq(hw)) {
+ if (mpic_is_percpu_irq(hwirq)) {
irq_set_percpu_devid(virq);
- irq_set_chip_and_handler(virq, &armada_370_xp_irq_chip,
- handle_percpu_devid_irq);
+ irq_set_chip_and_handler(virq, &mpic_irq_chip, handle_percpu_devid_irq);
} else {
- irq_set_chip_and_handler(virq, &armada_370_xp_irq_chip,
- handle_level_irq);
+ irq_set_chip_and_handler(virq, &mpic_irq_chip, handle_level_irq);
irqd_set_single_target(irq_desc_get_irq_data(irq_to_desc(virq)));
}
irq_set_probe(virq);
-
return 0;
}
-static const struct irq_domain_ops armada_370_xp_mpic_irq_ops = {
- .map = armada_370_xp_mpic_irq_map,
- .xlate = irq_domain_xlate_onecell,
+static const struct irq_domain_ops mpic_irq_ops = {
+ .map = mpic_irq_map,
+ .xlate = irq_domain_xlate_onecell,
};
#ifdef CONFIG_PCI_MSI
-static void armada_370_xp_handle_msi_irq(struct pt_regs *regs, bool is_chained)
+static void mpic_handle_msi_irq(struct mpic *mpic)
{
- u32 msimask, msinr;
-
- msimask = readl_relaxed(per_cpu_int_base +
- ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS)
- & PCI_MSI_DOORBELL_MASK;
+ unsigned long cause;
+ unsigned int i;
- writel(~msimask, per_cpu_int_base +
- ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS);
+ cause = readl_relaxed(mpic->per_cpu + MPIC_IN_DRBEL_CAUSE);
+ cause &= mpic->msi_doorbell_mask;
+ writel(~cause, mpic->per_cpu + MPIC_IN_DRBEL_CAUSE);
- for (msinr = PCI_MSI_DOORBELL_START;
- msinr < PCI_MSI_DOORBELL_END; msinr++) {
- unsigned int irq;
+ for_each_set_bit(i, &cause, BITS_PER_LONG)
+ generic_handle_domain_irq(mpic->msi_inner_domain, i - mpic->msi_doorbell_start);
+}
+#else
+static void mpic_handle_msi_irq(struct mpic *mpic) {}
+#endif
- if (!(msimask & BIT(msinr)))
- continue;
+#ifdef CONFIG_SMP
+static void mpic_handle_ipi_irq(struct mpic *mpic)
+{
+ unsigned long cause;
+ irq_hw_number_t i;
- irq = msinr - PCI_MSI_DOORBELL_START;
+ cause = readl_relaxed(mpic->per_cpu + MPIC_IN_DRBEL_CAUSE);
+ cause &= IPI_DOORBELL_MASK;
- generic_handle_domain_irq(armada_370_xp_msi_inner_domain, irq);
- }
+ for_each_set_bit(i, &cause, IPI_DOORBELL_NR)
+ generic_handle_domain_irq(mpic->ipi_domain, i);
}
#else
-static void armada_370_xp_handle_msi_irq(struct pt_regs *r, bool b) {}
+static inline void mpic_handle_ipi_irq(struct mpic *mpic) {}
#endif
-static void armada_370_xp_mpic_handle_cascade_irq(struct irq_desc *desc)
+static void mpic_handle_cascade_irq(struct irq_desc *desc)
{
+ struct mpic *mpic = irq_desc_get_handler_data(desc);
struct irq_chip *chip = irq_desc_get_chip(desc);
- unsigned long irqmap, irqn, irqsrc, cpuid;
+ unsigned long cause;
+ u32 irqsrc, cpuid;
+ irq_hw_number_t i;
chained_irq_enter(chip, desc);
- irqmap = readl_relaxed(per_cpu_int_base + ARMADA_375_PPI_CAUSE);
+ cause = readl_relaxed(mpic->per_cpu + MPIC_PPI_CAUSE);
cpuid = cpu_logical_map(smp_processor_id());
- for_each_set_bit(irqn, &irqmap, BITS_PER_LONG) {
- irqsrc = readl_relaxed(main_int_base +
- ARMADA_370_XP_INT_SOURCE_CTL(irqn));
+ for_each_set_bit(i, &cause, MPIC_PER_CPU_IRQS_NR) {
+ irqsrc = readl_relaxed(mpic->base + MPIC_INT_SOURCE_CTL(i));
/* Check if the interrupt is not masked on current CPU.
* Test IRQ (0-1) and FIQ (8-9) mask bits.
*/
- if (!(irqsrc & ARMADA_370_XP_INT_IRQ_FIQ_MASK(cpuid)))
+ if (!(irqsrc & MPIC_INT_IRQ_FIQ_MASK(cpuid)))
continue;
- if (irqn == 1) {
- armada_370_xp_handle_msi_irq(NULL, true);
+ if (i == 0 || i == 1) {
+ mpic_handle_msi_irq(mpic);
continue;
}
- generic_handle_domain_irq(armada_370_xp_mpic_domain, irqn);
+ generic_handle_domain_irq(mpic->domain, i);
}
chained_irq_exit(chip, desc);
}
-static void __exception_irq_entry
-armada_370_xp_handle_irq(struct pt_regs *regs)
+static void __exception_irq_entry mpic_handle_irq(struct pt_regs *regs)
{
- u32 irqstat, irqnr;
+ struct mpic *mpic = irq_get_default_host()->host_data;
+ irq_hw_number_t i;
+ u32 irqstat;
do {
- irqstat = readl_relaxed(per_cpu_int_base +
- ARMADA_370_XP_CPU_INTACK_OFFS);
- irqnr = irqstat & 0x3FF;
+ irqstat = readl_relaxed(mpic->per_cpu + MPIC_CPU_INTACK);
+ i = FIELD_GET(MPIC_CPU_INTACK_IID_MASK, irqstat);
- if (irqnr > 1022)
+ if (i > 1022)
break;
- if (irqnr > 1) {
- generic_handle_domain_irq(armada_370_xp_mpic_domain,
- irqnr);
- continue;
- }
+ if (i > 1)
+ generic_handle_domain_irq(mpic->domain, i);
/* MSI handling */
- if (irqnr == 1)
- armada_370_xp_handle_msi_irq(regs, false);
+ if (i == 1)
+ mpic_handle_msi_irq(mpic);
-#ifdef CONFIG_SMP
/* IPI Handling */
- if (irqnr == 0) {
- unsigned long ipimask;
- int ipi;
-
- ipimask = readl_relaxed(per_cpu_int_base +
- ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS)
- & IPI_DOORBELL_MASK;
-
- for_each_set_bit(ipi, &ipimask, IPI_DOORBELL_END)
- generic_handle_domain_irq(ipi_domain, ipi);
- }
-#endif
-
+ if (i == 0)
+ mpic_handle_ipi_irq(mpic);
} while (1);
}
-static int armada_370_xp_mpic_suspend(void)
+static int mpic_suspend(void)
{
- doorbell_mask_reg = readl(per_cpu_int_base +
- ARMADA_370_XP_IN_DRBEL_MSK_OFFS);
+ struct mpic *mpic = mpic_data;
+
+ mpic->doorbell_mask = readl(mpic->per_cpu + MPIC_IN_DRBEL_MASK);
+
return 0;
}
-static void armada_370_xp_mpic_resume(void)
+static void mpic_resume(void)
{
- int nirqs;
- irq_hw_number_t irq;
+ struct mpic *mpic = mpic_data;
+ bool src0, src1;
/* Re-enable interrupts */
- nirqs = (readl(main_int_base + ARMADA_370_XP_INT_CONTROL) >> 2) & 0x3ff;
- for (irq = 0; irq < nirqs; irq++) {
- struct irq_data *data;
- int virq;
+ for (irq_hw_number_t i = 0; i < mpic->domain->hwirq_max; i++) {
+ unsigned int virq = irq_linear_revmap(mpic->domain, i);
+ struct irq_data *d;
- virq = irq_linear_revmap(armada_370_xp_mpic_domain, irq);
- if (virq == 0)
+ if (!virq)
continue;
- data = irq_get_irq_data(virq);
+ d = irq_get_irq_data(virq);
- if (!is_percpu_irq(irq)) {
+ if (!mpic_is_percpu_irq(i)) {
/* Non per-CPU interrupts */
- writel(irq, per_cpu_int_base +
- ARMADA_370_XP_INT_CLEAR_MASK_OFFS);
- if (!irqd_irq_disabled(data))
- armada_370_xp_irq_unmask(data);
+ writel(i, mpic->per_cpu + MPIC_INT_CLEAR_MASK);
+ if (!irqd_irq_disabled(d))
+ mpic_irq_unmask(d);
} else {
/* Per-CPU interrupts */
- writel(irq, main_int_base +
- ARMADA_370_XP_INT_SET_ENABLE_OFFS);
+ writel(i, mpic->base + MPIC_INT_SET_ENABLE);
/*
- * Re-enable on the current CPU,
- * armada_xp_mpic_reenable_percpu() will take
- * care of secondary CPUs when they come up.
+ * Re-enable on the current CPU, mpic_reenable_percpu()
+ * will take care of secondary CPUs when they come up.
*/
if (irq_percpu_is_enabled(virq))
- armada_370_xp_irq_unmask(data);
+ mpic_irq_unmask(d);
}
}
/* Reconfigure doorbells for IPIs and MSIs */
- writel(doorbell_mask_reg,
- per_cpu_int_base + ARMADA_370_XP_IN_DRBEL_MSK_OFFS);
- if (doorbell_mask_reg & IPI_DOORBELL_MASK)
- writel(0, per_cpu_int_base + ARMADA_370_XP_INT_CLEAR_MASK_OFFS);
- if (doorbell_mask_reg & PCI_MSI_DOORBELL_MASK)
- writel(1, per_cpu_int_base + ARMADA_370_XP_INT_CLEAR_MASK_OFFS);
+ writel(mpic->doorbell_mask, mpic->per_cpu + MPIC_IN_DRBEL_MASK);
+
+ if (mpic_is_ipi_available(mpic)) {
+ src0 = mpic->doorbell_mask & IPI_DOORBELL_MASK;
+ src1 = mpic->doorbell_mask & PCI_MSI_DOORBELL_MASK;
+ } else {
+ src0 = mpic->doorbell_mask & PCI_MSI_FULL_DOORBELL_SRC0_MASK;
+ src1 = mpic->doorbell_mask & PCI_MSI_FULL_DOORBELL_SRC1_MASK;
+ }
+
+ if (src0)
+ writel(0, mpic->per_cpu + MPIC_INT_CLEAR_MASK);
+ if (src1)
+ writel(1, mpic->per_cpu + MPIC_INT_CLEAR_MASK);
- ipi_resume();
+ if (mpic_is_ipi_available(mpic))
+ mpic_ipi_resume(mpic);
}
-static struct syscore_ops armada_370_xp_mpic_syscore_ops = {
- .suspend = armada_370_xp_mpic_suspend,
- .resume = armada_370_xp_mpic_resume,
+static struct syscore_ops mpic_syscore_ops = {
+ .suspend = mpic_suspend,
+ .resume = mpic_resume,
};
-static int __init armada_370_xp_mpic_of_init(struct device_node *node,
- struct device_node *parent)
+static int __init mpic_map_region(struct device_node *np, int index,
+ void __iomem **base, phys_addr_t *phys_base)
+{
+ struct resource res;
+ int err;
+
+ err = of_address_to_resource(np, index, &res);
+ if (WARN_ON(err))
+ goto fail;
+
+ if (WARN_ON(!request_mem_region(res.start, resource_size(&res), np->full_name))) {
+ err = -EBUSY;
+ goto fail;
+ }
+
+ *base = ioremap(res.start, resource_size(&res));
+ if (WARN_ON(!*base)) {
+ err = -ENOMEM;
+ goto fail;
+ }
+
+ if (phys_base)
+ *phys_base = res.start;
+
+ return 0;
+
+fail:
+ pr_err("%pOF: Unable to map resource %d: %pE\n", np, index, ERR_PTR(err));
+ return err;
+}
+
+static int __init mpic_of_init(struct device_node *node, struct device_node *parent)
{
- struct resource main_int_res, per_cpu_int_res;
- int nr_irqs, i;
- u32 control;
+ phys_addr_t phys_base;
+ unsigned int nr_irqs;
+ struct mpic *mpic;
+ int err;
+
+ mpic = kzalloc(sizeof(*mpic), GFP_KERNEL);
+ if (WARN_ON(!mpic))
+ return -ENOMEM;
+
+ mpic_data = mpic;
+
+ err = mpic_map_region(node, 0, &mpic->base, &phys_base);
+ if (err)
+ return err;
- BUG_ON(of_address_to_resource(node, 0, &main_int_res));
- BUG_ON(of_address_to_resource(node, 1, &per_cpu_int_res));
+ err = mpic_map_region(node, 1, &mpic->per_cpu, NULL);
+ if (err)
+ return err;
- BUG_ON(!request_mem_region(main_int_res.start,
- resource_size(&main_int_res),
- node->full_name));
- BUG_ON(!request_mem_region(per_cpu_int_res.start,
- resource_size(&per_cpu_int_res),
- node->full_name));
+ nr_irqs = FIELD_GET(MPIC_INT_CONTROL_NUMINT_MASK, readl(mpic->base + MPIC_INT_CONTROL));
- main_int_base = ioremap(main_int_res.start,
- resource_size(&main_int_res));
- BUG_ON(!main_int_base);
+ for (irq_hw_number_t i = 0; i < nr_irqs; i++)
+ writel(i, mpic->base + MPIC_INT_CLEAR_ENABLE);
- per_cpu_int_base = ioremap(per_cpu_int_res.start,
- resource_size(&per_cpu_int_res));
- BUG_ON(!per_cpu_int_base);
+ /*
+ * Initialize mpic->parent_irq before calling any other functions, since
+ * it is used to distinguish between IPI and non-IPI platforms.
+ */
+ mpic->parent_irq = irq_of_parse_and_map(node, 0);
- control = readl(main_int_base + ARMADA_370_XP_INT_CONTROL);
- nr_irqs = (control >> 2) & 0x3ff;
+ /*
+ * On non-IPI platforms the driver currently supports only the per-CPU
+ * interrupts (the first 29 interrupts). See mpic_handle_cascade_irq().
+ */
+ if (!mpic_is_ipi_available(mpic))
+ nr_irqs = MPIC_PER_CPU_IRQS_NR;
- for (i = 0; i < nr_irqs; i++)
- writel(i, main_int_base + ARMADA_370_XP_INT_CLEAR_ENABLE_OFFS);
+ mpic->domain = irq_domain_add_linear(node, nr_irqs, &mpic_irq_ops, mpic);
+ if (!mpic->domain) {
+ pr_err("%pOF: Unable to add IRQ domain\n", node);
+ return -ENOMEM;
+ }
- armada_370_xp_mpic_domain =
- irq_domain_add_linear(node, nr_irqs,
- &armada_370_xp_mpic_irq_ops, NULL);
- BUG_ON(!armada_370_xp_mpic_domain);
- irq_domain_update_bus_token(armada_370_xp_mpic_domain, DOMAIN_BUS_WIRED);
+ irq_domain_update_bus_token(mpic->domain, DOMAIN_BUS_WIRED);
/* Setup for the boot CPU */
- armada_xp_mpic_perf_init();
- armada_xp_mpic_smp_cpu_init();
+ mpic_perf_init(mpic);
+ mpic_smp_cpu_init(mpic);
- armada_370_xp_msi_init(node, main_int_res.start);
+ err = mpic_msi_init(mpic, node, phys_base);
+ if (err) {
+ pr_err("%pOF: Unable to initialize MSI domain\n", node);
+ return err;
+ }
- parent_irq = irq_of_parse_and_map(node, 0);
- if (parent_irq <= 0) {
- irq_set_default_host(armada_370_xp_mpic_domain);
- set_handle_irq(armada_370_xp_handle_irq);
+ if (mpic_is_ipi_available(mpic)) {
+ irq_set_default_host(mpic->domain);
+ set_handle_irq(mpic_handle_irq);
#ifdef CONFIG_SMP
- armada_xp_ipi_init(node);
+ err = mpic_ipi_init(mpic, node);
+ if (err) {
+ pr_err("%pOF: Unable to initialize IPI domain\n", node);
+ return err;
+ }
+
cpuhp_setup_state_nocalls(CPUHP_AP_IRQ_ARMADA_XP_STARTING,
"irqchip/armada/ipi:starting",
- armada_xp_mpic_starting_cpu, NULL);
+ mpic_starting_cpu, NULL);
#endif
} else {
#ifdef CONFIG_SMP
@@ -813,13 +899,13 @@ static int __init armada_370_xp_mpic_of_init(struct device_node *node,
"irqchip/armada/cascade:starting",
mpic_cascaded_starting_cpu, NULL);
#endif
- irq_set_chained_handler(parent_irq,
- armada_370_xp_mpic_handle_cascade_irq);
+ irq_set_chained_handler_and_data(mpic->parent_irq,
+ mpic_handle_cascade_irq, mpic);
}
- register_syscore_ops(&armada_370_xp_mpic_syscore_ops);
+ register_syscore_ops(&mpic_syscore_ops);
return 0;
}
-IRQCHIP_DECLARE(armada_370_xp_mpic, "marvell,mpic", armada_370_xp_mpic_of_init);
+IRQCHIP_DECLARE(marvell_mpic, "marvell,mpic", mpic_of_init);
diff --git a/drivers/irqchip/irq-aspeed-intc.c b/drivers/irqchip/irq-aspeed-intc.c
new file mode 100644
index 000000000000..bd3b759b4b2c
--- /dev/null
+++ b/drivers/irqchip/irq-aspeed-intc.c
@@ -0,0 +1,139 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Aspeed Interrupt Controller.
+ *
+ * Copyright (C) 2023 ASPEED Technology Inc.
+ */
+
+#include <linux/bitops.h>
+#include <linux/irq.h>
+#include <linux/irqchip.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/irqdomain.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/io.h>
+#include <linux/spinlock.h>
+
+#define INTC_INT_ENABLE_REG 0x00
+#define INTC_INT_STATUS_REG 0x04
+#define INTC_IRQS_PER_WORD 32
+
+struct aspeed_intc_ic {
+ void __iomem *base;
+ raw_spinlock_t gic_lock;
+ raw_spinlock_t intc_lock;
+ struct irq_domain *irq_domain;
+};
+
+static void aspeed_intc_ic_irq_handler(struct irq_desc *desc)
+{
+ struct aspeed_intc_ic *intc_ic = irq_desc_get_handler_data(desc);
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+
+ chained_irq_enter(chip, desc);
+
+ scoped_guard(raw_spinlock, &intc_ic->gic_lock) {
+ unsigned long bit, status;
+
+ status = readl(intc_ic->base + INTC_INT_STATUS_REG);
+ for_each_set_bit(bit, &status, INTC_IRQS_PER_WORD) {
+ generic_handle_domain_irq(intc_ic->irq_domain, bit);
+ writel(BIT(bit), intc_ic->base + INTC_INT_STATUS_REG);
+ }
+ }
+
+ chained_irq_exit(chip, desc);
+}
+
+static void aspeed_intc_irq_mask(struct irq_data *data)
+{
+ struct aspeed_intc_ic *intc_ic = irq_data_get_irq_chip_data(data);
+ unsigned int mask = readl(intc_ic->base + INTC_INT_ENABLE_REG) & ~BIT(data->hwirq);
+
+ guard(raw_spinlock)(&intc_ic->intc_lock);
+ writel(mask, intc_ic->base + INTC_INT_ENABLE_REG);
+}
+
+static void aspeed_intc_irq_unmask(struct irq_data *data)
+{
+ struct aspeed_intc_ic *intc_ic = irq_data_get_irq_chip_data(data);
+ unsigned int unmask = readl(intc_ic->base + INTC_INT_ENABLE_REG) | BIT(data->hwirq);
+
+ guard(raw_spinlock)(&intc_ic->intc_lock);
+ writel(unmask, intc_ic->base + INTC_INT_ENABLE_REG);
+}
+
+static struct irq_chip aspeed_intc_chip = {
+ .name = "ASPEED INTC",
+ .irq_mask = aspeed_intc_irq_mask,
+ .irq_unmask = aspeed_intc_irq_unmask,
+};
+
+static int aspeed_intc_ic_map_irq_domain(struct irq_domain *domain, unsigned int irq,
+ irq_hw_number_t hwirq)
+{
+ irq_set_chip_and_handler(irq, &aspeed_intc_chip, handle_level_irq);
+ irq_set_chip_data(irq, domain->host_data);
+
+ return 0;
+}
+
+static const struct irq_domain_ops aspeed_intc_ic_irq_domain_ops = {
+ .map = aspeed_intc_ic_map_irq_domain,
+};
+
+static int __init aspeed_intc_ic_of_init(struct device_node *node,
+ struct device_node *parent)
+{
+ struct aspeed_intc_ic *intc_ic;
+ int irq, i, ret = 0;
+
+ intc_ic = kzalloc(sizeof(*intc_ic), GFP_KERNEL);
+ if (!intc_ic)
+ return -ENOMEM;
+
+ intc_ic->base = of_iomap(node, 0);
+ if (!intc_ic->base) {
+ pr_err("Failed to iomap intc_ic base\n");
+ ret = -ENOMEM;
+ goto err_free_ic;
+ }
+ writel(0xffffffff, intc_ic->base + INTC_INT_STATUS_REG);
+ writel(0x0, intc_ic->base + INTC_INT_ENABLE_REG);
+
+ intc_ic->irq_domain = irq_domain_add_linear(node, INTC_IRQS_PER_WORD,
+ &aspeed_intc_ic_irq_domain_ops, intc_ic);
+ if (!intc_ic->irq_domain) {
+ ret = -ENOMEM;
+ goto err_iounmap;
+ }
+
+ raw_spin_lock_init(&intc_ic->gic_lock);
+ raw_spin_lock_init(&intc_ic->intc_lock);
+
+ /* Check all the irq numbers valid. If not, unmaps all the base and frees the data. */
+ for (i = 0; i < of_irq_count(node); i++) {
+ irq = irq_of_parse_and_map(node, i);
+ if (!irq) {
+ pr_err("Failed to get irq number\n");
+ ret = -EINVAL;
+ goto err_iounmap;
+ }
+ }
+
+ for (i = 0; i < of_irq_count(node); i++) {
+ irq = irq_of_parse_and_map(node, i);
+ irq_set_chained_handler_and_data(irq, aspeed_intc_ic_irq_handler, intc_ic);
+ }
+
+ return 0;
+
+err_iounmap:
+ iounmap(intc_ic->base);
+err_free_ic:
+ kfree(intc_ic);
+ return ret;
+}
+
+IRQCHIP_DECLARE(ast2700_intc_ic, "aspeed,ast2700-intc-ic", aspeed_intc_ic_of_init);
diff --git a/drivers/irqchip/irq-atmel-aic-common.c b/drivers/irqchip/irq-atmel-aic-common.c
index 072bd227b6c6..4525366d16d6 100644
--- a/drivers/irqchip/irq-atmel-aic-common.c
+++ b/drivers/irqchip/irq-atmel-aic-common.c
@@ -111,8 +111,6 @@ static void __init aic_common_ext_irq_of_init(struct irq_domain *domain)
struct device_node *node = irq_domain_get_of_node(domain);
struct irq_chip_generic *gc;
struct aic_chip_data *aic;
- struct property *prop;
- const __be32 *p;
u32 hwirq;
gc = irq_get_domain_generic_chip(domain, 0);
@@ -120,7 +118,7 @@ static void __init aic_common_ext_irq_of_init(struct irq_domain *domain)
aic = gc->private;
aic->ext_irqs |= 1;
- of_property_for_each_u32(node, "atmel,external-irqs", prop, p, hwirq) {
+ of_property_for_each_u32(node, "atmel,external-irqs", hwirq) {
gc = irq_get_domain_generic_chip(domain, hwirq);
if (!gc) {
pr_warn("AIC: external irq %d >= %d skip it\n",
diff --git a/drivers/irqchip/irq-atmel-aic.c b/drivers/irqchip/irq-atmel-aic.c
index 4631f6847953..3839ad79ad31 100644
--- a/drivers/irqchip/irq-atmel-aic.c
+++ b/drivers/irqchip/irq-atmel-aic.c
@@ -57,8 +57,7 @@
static struct irq_domain *aic_domain;
-static asmlinkage void __exception_irq_entry
-aic_handle(struct pt_regs *regs)
+static void __exception_irq_entry aic_handle(struct pt_regs *regs)
{
struct irq_domain_chip_generic *dgc = aic_domain->gc;
struct irq_chip_generic *gc = dgc->gc[0];
diff --git a/drivers/irqchip/irq-atmel-aic5.c b/drivers/irqchip/irq-atmel-aic5.c
index 145535bd7560..e98c2875af9e 100644
--- a/drivers/irqchip/irq-atmel-aic5.c
+++ b/drivers/irqchip/irq-atmel-aic5.c
@@ -67,8 +67,7 @@
static struct irq_domain *aic5_domain;
-static asmlinkage void __exception_irq_entry
-aic5_handle(struct pt_regs *regs)
+static void __exception_irq_entry aic5_handle(struct pt_regs *regs)
{
struct irq_chip_generic *bgc = irq_get_domain_generic_chip(aic5_domain, 0);
u32 irqnr;
@@ -320,6 +319,7 @@ static const struct of_device_id aic5_irq_fixups[] __initconst = {
{ .compatible = "atmel,sama5d3", .data = sama5d3_aic_irq_fixup },
{ .compatible = "atmel,sama5d4", .data = sama5d3_aic_irq_fixup },
{ .compatible = "microchip,sam9x60", .data = sam9x60_aic_irq_fixup },
+ { .compatible = "microchip,sam9x7", .data = sam9x60_aic_irq_fixup },
{ /* sentinel */ },
};
@@ -406,3 +406,11 @@ static int __init sam9x60_aic5_of_init(struct device_node *node,
return aic5_of_init(node, parent, NR_SAM9X60_IRQS);
}
IRQCHIP_DECLARE(sam9x60_aic5, "microchip,sam9x60-aic", sam9x60_aic5_of_init);
+
+#define NR_SAM9X7_IRQS 70
+
+static int __init sam9x7_aic5_of_init(struct device_node *node, struct device_node *parent)
+{
+ return aic5_of_init(node, parent, NR_SAM9X7_IRQS);
+}
+IRQCHIP_DECLARE(sam9x7_aic5, "microchip,sam9x7-aic", sam9x7_aic5_of_init);
diff --git a/drivers/irqchip/irq-bcm2835.c b/drivers/irqchip/irq-bcm2835.c
index e94e2882286c..6c20604c2242 100644
--- a/drivers/irqchip/irq-bcm2835.c
+++ b/drivers/irqchip/irq-bcm2835.c
@@ -102,7 +102,9 @@ static void armctrl_unmask_irq(struct irq_data *d)
static struct irq_chip armctrl_chip = {
.name = "ARMCTRL-level",
.irq_mask = armctrl_mask_irq,
- .irq_unmask = armctrl_unmask_irq
+ .irq_unmask = armctrl_unmask_irq,
+ .flags = IRQCHIP_MASK_ON_SUSPEND |
+ IRQCHIP_SKIP_SET_WAKE,
};
static int armctrl_xlate(struct irq_domain *d, struct device_node *ctrlr,
diff --git a/drivers/irqchip/irq-bcm2836.c b/drivers/irqchip/irq-bcm2836.c
index e5f1059b989f..e366257684b5 100644
--- a/drivers/irqchip/irq-bcm2836.c
+++ b/drivers/irqchip/irq-bcm2836.c
@@ -58,6 +58,7 @@ static struct irq_chip bcm2836_arm_irqchip_timer = {
.name = "bcm2836-timer",
.irq_mask = bcm2836_arm_irqchip_mask_timer_irq,
.irq_unmask = bcm2836_arm_irqchip_unmask_timer_irq,
+ .flags = IRQCHIP_MASK_ON_SUSPEND | IRQCHIP_SKIP_SET_WAKE,
};
static void bcm2836_arm_irqchip_mask_pmu_irq(struct irq_data *d)
@@ -74,6 +75,7 @@ static struct irq_chip bcm2836_arm_irqchip_pmu = {
.name = "bcm2836-pmu",
.irq_mask = bcm2836_arm_irqchip_mask_pmu_irq,
.irq_unmask = bcm2836_arm_irqchip_unmask_pmu_irq,
+ .flags = IRQCHIP_MASK_ON_SUSPEND | IRQCHIP_SKIP_SET_WAKE,
};
static void bcm2836_arm_irqchip_mask_gpu_irq(struct irq_data *d)
@@ -88,6 +90,7 @@ static struct irq_chip bcm2836_arm_irqchip_gpu = {
.name = "bcm2836-gpu",
.irq_mask = bcm2836_arm_irqchip_mask_gpu_irq,
.irq_unmask = bcm2836_arm_irqchip_unmask_gpu_irq,
+ .flags = IRQCHIP_MASK_ON_SUSPEND | IRQCHIP_SKIP_SET_WAKE,
};
static void bcm2836_arm_irqchip_dummy_op(struct irq_data *d)
diff --git a/drivers/irqchip/irq-bcm6345-l1.c b/drivers/irqchip/irq-bcm6345-l1.c
index 9745a119d0e6..90daa274ef23 100644
--- a/drivers/irqchip/irq-bcm6345-l1.c
+++ b/drivers/irqchip/irq-bcm6345-l1.c
@@ -192,14 +192,10 @@ static int bcm6345_l1_set_affinity(struct irq_data *d,
u32 mask = BIT(d->hwirq % IRQS_PER_WORD);
unsigned int old_cpu = cpu_for_irq(intc, d);
unsigned int new_cpu;
- struct cpumask valid;
unsigned long flags;
bool enabled;
- if (!cpumask_and(&valid, &intc->cpumask, dest))
- return -EINVAL;
-
- new_cpu = cpumask_any_and(&valid, cpu_online_mask);
+ new_cpu = cpumask_first_and_and(&intc->cpumask, dest, cpu_online_mask);
if (new_cpu >= nr_cpu_ids)
return -EINVAL;
@@ -242,7 +238,7 @@ static int __init bcm6345_l1_init_one(struct device_node *dn,
else if (intc->n_words != n_words)
return -EINVAL;
- cpu = intc->cpus[idx] = kzalloc(sizeof(*cpu) + n_words * sizeof(u32),
+ cpu = intc->cpus[idx] = kzalloc(struct_size(cpu, enable_cache, n_words),
GFP_KERNEL);
if (!cpu)
return -ENOMEM;
diff --git a/drivers/irqchip/irq-bcm7038-l1.c b/drivers/irqchip/irq-bcm7038-l1.c
index 24ca1d656adc..36e71af054e9 100644
--- a/drivers/irqchip/irq-bcm7038-l1.c
+++ b/drivers/irqchip/irq-bcm7038-l1.c
@@ -249,7 +249,7 @@ static int __init bcm7038_l1_init_one(struct device_node *dn,
return -EINVAL;
}
- cpu = intc->cpus[idx] = kzalloc(sizeof(*cpu) + n_words * sizeof(u32),
+ cpu = intc->cpus[idx] = kzalloc(struct_size(cpu, mask_cache, n_words),
GFP_KERNEL);
if (!cpu)
return -ENOMEM;
diff --git a/drivers/irqchip/irq-brcmstb-l2.c b/drivers/irqchip/irq-brcmstb-l2.c
index 2b0b3175cea0..db4c9721fcf2 100644
--- a/drivers/irqchip/irq-brcmstb-l2.c
+++ b/drivers/irqchip/irq-brcmstb-l2.c
@@ -61,32 +61,6 @@ struct brcmstb_l2_intc_data {
u32 saved_mask; /* for suspend/resume */
};
-/**
- * brcmstb_l2_mask_and_ack - Mask and ack pending interrupt
- * @d: irq_data
- *
- * Chip has separate enable/disable registers instead of a single mask
- * register and pending interrupt is acknowledged by setting a bit.
- *
- * Note: This function is generic and could easily be added to the
- * generic irqchip implementation if there ever becomes a will to do so.
- * Perhaps with a name like irq_gc_mask_disable_and_ack_set().
- *
- * e.g.: https://patchwork.kernel.org/patch/9831047/
- */
-static void brcmstb_l2_mask_and_ack(struct irq_data *d)
-{
- struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
- struct irq_chip_type *ct = irq_data_get_chip_type(d);
- u32 mask = d->mask;
-
- irq_gc_lock(gc);
- irq_reg_writel(gc, mask, ct->regs.disable);
- *ct->mask_cache &= ~mask;
- irq_reg_writel(gc, mask, ct->regs.ack);
- irq_gc_unlock(gc);
-}
-
static void brcmstb_l2_intc_irq_handle(struct irq_desc *desc)
{
struct brcmstb_l2_intc_data *b = irq_desc_get_handler_data(desc);
@@ -118,7 +92,7 @@ out:
chained_irq_exit(chip, desc);
}
-static void brcmstb_l2_intc_suspend(struct irq_data *d)
+static void __brcmstb_l2_intc_suspend(struct irq_data *d, bool save)
{
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
struct irq_chip_type *ct = irq_data_get_chip_type(d);
@@ -127,7 +101,8 @@ static void brcmstb_l2_intc_suspend(struct irq_data *d)
irq_gc_lock_irqsave(gc, flags);
/* Save the current mask */
- b->saved_mask = irq_reg_readl(gc, ct->regs.mask);
+ if (save)
+ b->saved_mask = irq_reg_readl(gc, ct->regs.mask);
if (b->can_wake) {
/* Program the wakeup mask */
@@ -137,6 +112,16 @@ static void brcmstb_l2_intc_suspend(struct irq_data *d)
irq_gc_unlock_irqrestore(gc, flags);
}
+static void brcmstb_l2_intc_shutdown(struct irq_data *d)
+{
+ __brcmstb_l2_intc_suspend(d, false);
+}
+
+static void brcmstb_l2_intc_suspend(struct irq_data *d)
+{
+ __brcmstb_l2_intc_suspend(d, true);
+}
+
static void brcmstb_l2_intc_resume(struct irq_data *d)
{
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
@@ -237,7 +222,7 @@ static int __init brcmstb_l2_intc_of_init(struct device_node *np,
if (init_params->cpu_clear >= 0) {
ct->regs.ack = init_params->cpu_clear;
ct->chip.irq_ack = irq_gc_ack_set_bit;
- ct->chip.irq_mask_ack = brcmstb_l2_mask_and_ack;
+ ct->chip.irq_mask_ack = irq_gc_mask_disable_and_ack_set;
} else {
/* No Ack - but still slightly more efficient to define this */
ct->chip.irq_mask_ack = irq_gc_mask_disable_reg;
@@ -252,7 +237,7 @@ static int __init brcmstb_l2_intc_of_init(struct device_node *np,
ct->chip.irq_suspend = brcmstb_l2_intc_suspend;
ct->chip.irq_resume = brcmstb_l2_intc_resume;
- ct->chip.irq_pm_shutdown = brcmstb_l2_intc_suspend;
+ ct->chip.irq_pm_shutdown = brcmstb_l2_intc_shutdown;
if (data->can_wake) {
/* This IRQ chip can wake the system, set all child interrupts
diff --git a/drivers/irqchip/irq-clps711x.c b/drivers/irqchip/irq-clps711x.c
index e731e0784f7e..806ebb1de201 100644
--- a/drivers/irqchip/irq-clps711x.c
+++ b/drivers/irqchip/irq-clps711x.c
@@ -69,7 +69,7 @@ static struct {
struct irq_domain_ops ops;
} *clps711x_intc;
-static asmlinkage void __exception_irq_entry clps711x_irqh(struct pt_regs *regs)
+static void __exception_irq_entry clps711x_irqh(struct pt_regs *regs)
{
u32 irqstat;
diff --git a/drivers/irqchip/irq-davinci-cp-intc.c b/drivers/irqchip/irq-davinci-cp-intc.c
index 7482c8ed34b2..f4f8e9fadbbf 100644
--- a/drivers/irqchip/irq-davinci-cp-intc.c
+++ b/drivers/irqchip/irq-davinci-cp-intc.c
@@ -116,8 +116,7 @@ static struct irq_chip davinci_cp_intc_irq_chip = {
.flags = IRQCHIP_SKIP_SET_WAKE,
};
-static asmlinkage void __exception_irq_entry
-davinci_cp_intc_handle_irq(struct pt_regs *regs)
+static void __exception_irq_entry davinci_cp_intc_handle_irq(struct pt_regs *regs)
{
int gpir, irqnr, none;
diff --git a/drivers/irqchip/irq-ftintc010.c b/drivers/irqchip/irq-ftintc010.c
index 359efc1d1be7..b91c358ea6db 100644
--- a/drivers/irqchip/irq-ftintc010.c
+++ b/drivers/irqchip/irq-ftintc010.c
@@ -125,7 +125,7 @@ static struct irq_chip ft010_irq_chip = {
/* Local static for the IRQ entry call */
static struct ft010_irq_data firq;
-static asmlinkage void __exception_irq_entry ft010_irqchip_handle_irq(struct pt_regs *regs)
+static void __exception_irq_entry ft010_irqchip_handle_irq(struct pt_regs *regs)
{
struct ft010_irq_data *f = &firq;
int irq;
diff --git a/drivers/irqchip/irq-gic-common.c b/drivers/irqchip/irq-gic-common.c
index afd6a1841715..c776f9142610 100644
--- a/drivers/irqchip/irq-gic-common.c
+++ b/drivers/irqchip/irq-gic-common.c
@@ -7,6 +7,7 @@
#include <linux/io.h>
#include <linux/irq.h>
#include <linux/irqchip/arm-gic.h>
+#include <linux/kernel.h>
#include "irq-gic-common.h"
@@ -45,7 +46,7 @@ void gic_enable_quirks(u32 iidr, const struct gic_quirk *quirks,
}
int gic_configure_irq(unsigned int irq, unsigned int type,
- void __iomem *base, void (*sync_access)(void))
+ void __iomem *base)
{
u32 confmask = 0x2 << ((irq % 16) * 2);
u32 confoff = (irq / 16) * 4;
@@ -84,14 +85,10 @@ int gic_configure_irq(unsigned int irq, unsigned int type,
raw_spin_unlock_irqrestore(&irq_controller_lock, flags);
- if (sync_access)
- sync_access();
-
return ret;
}
-void gic_dist_config(void __iomem *base, int gic_irqs,
- void (*sync_access)(void))
+void gic_dist_config(void __iomem *base, int gic_irqs, u8 priority)
{
unsigned int i;
@@ -106,7 +103,8 @@ void gic_dist_config(void __iomem *base, int gic_irqs,
* Set priority on all global interrupts.
*/
for (i = 32; i < gic_irqs; i += 4)
- writel_relaxed(GICD_INT_DEF_PRI_X4, base + GIC_DIST_PRI + i);
+ writel_relaxed(REPEAT_BYTE_U32(priority),
+ base + GIC_DIST_PRI + i);
/*
* Deactivate and disable all SPIs. Leave the PPI and SGIs
@@ -118,12 +116,9 @@ void gic_dist_config(void __iomem *base, int gic_irqs,
writel_relaxed(GICD_INT_EN_CLR_X32,
base + GIC_DIST_ENABLE_CLEAR + i / 8);
}
-
- if (sync_access)
- sync_access();
}
-void gic_cpu_config(void __iomem *base, int nr, void (*sync_access)(void))
+void gic_cpu_config(void __iomem *base, int nr, u8 priority)
{
int i;
@@ -142,9 +137,6 @@ void gic_cpu_config(void __iomem *base, int nr, void (*sync_access)(void))
* Set priority on PPI and SGI interrupts
*/
for (i = 0; i < nr; i += 4)
- writel_relaxed(GICD_INT_DEF_PRI_X4,
+ writel_relaxed(REPEAT_BYTE_U32(priority),
base + GIC_DIST_PRI + i * 4 / 4);
-
- if (sync_access)
- sync_access();
}
diff --git a/drivers/irqchip/irq-gic-common.h b/drivers/irqchip/irq-gic-common.h
index f407cce9ecaa..020ecdf16901 100644
--- a/drivers/irqchip/irq-gic-common.h
+++ b/drivers/irqchip/irq-gic-common.h
@@ -8,6 +8,7 @@
#include <linux/of.h>
#include <linux/irqdomain.h>
+#include <linux/msi.h>
#include <linux/irqchip/arm-gic-common.h>
struct gic_quirk {
@@ -20,15 +21,16 @@ struct gic_quirk {
};
int gic_configure_irq(unsigned int irq, unsigned int type,
- void __iomem *base, void (*sync_access)(void));
-void gic_dist_config(void __iomem *base, int gic_irqs,
- void (*sync_access)(void));
-void gic_cpu_config(void __iomem *base, int nr, void (*sync_access)(void));
+ void __iomem *base);
+void gic_dist_config(void __iomem *base, int gic_irqs, u8 priority);
+void gic_cpu_config(void __iomem *base, int nr, u8 priority);
void gic_enable_quirks(u32 iidr, const struct gic_quirk *quirks,
void *data);
void gic_enable_of_quirks(const struct device_node *np,
const struct gic_quirk *quirks, void *data);
+extern const struct msi_parent_ops gic_v3_its_msi_parent_ops;
+
#define RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING (1 << 0)
#define RDIST_FLAGS_RD_TABLES_PREALLOCATED (1 << 1)
#define RDIST_FLAGS_FORCE_NON_SHAREABLE (1 << 2)
diff --git a/drivers/irqchip/irq-gic-v2m.c b/drivers/irqchip/irq-gic-v2m.c
index f2ff4387870d..be35c5349986 100644
--- a/drivers/irqchip/irq-gic-v2m.c
+++ b/drivers/irqchip/irq-gic-v2m.c
@@ -26,6 +26,8 @@
#include <linux/irqchip/arm-gic.h>
#include <linux/irqchip/arm-gic-common.h>
+#include "irq-msi-lib.h"
+
/*
* MSI_TYPER:
* [31:26] Reserved
@@ -72,31 +74,6 @@ struct v2m_data {
u32 flags; /* v2m flags for specific implementation */
};
-static void gicv2m_mask_msi_irq(struct irq_data *d)
-{
- pci_msi_mask_irq(d);
- irq_chip_mask_parent(d);
-}
-
-static void gicv2m_unmask_msi_irq(struct irq_data *d)
-{
- pci_msi_unmask_irq(d);
- irq_chip_unmask_parent(d);
-}
-
-static struct irq_chip gicv2m_msi_irq_chip = {
- .name = "MSI",
- .irq_mask = gicv2m_mask_msi_irq,
- .irq_unmask = gicv2m_unmask_msi_irq,
- .irq_eoi = irq_chip_eoi_parent,
-};
-
-static struct msi_domain_info gicv2m_msi_domain_info = {
- .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
- MSI_FLAG_PCI_MSIX | MSI_FLAG_MULTI_PCI_MSI),
- .chip = &gicv2m_msi_irq_chip,
-};
-
static phys_addr_t gicv2m_get_msi_addr(struct v2m_data *v2m, int hwirq)
{
if (v2m->flags & GICV2M_GRAVITON_ADDRESS_ONLY)
@@ -230,6 +207,7 @@ static void gicv2m_irq_domain_free(struct irq_domain *domain,
}
static const struct irq_domain_ops gicv2m_domain_ops = {
+ .select = msi_lib_irq_domain_select,
.alloc = gicv2m_irq_domain_alloc,
.free = gicv2m_irq_domain_free,
};
@@ -250,19 +228,6 @@ static bool is_msi_spi_valid(u32 base, u32 num)
return true;
}
-static struct irq_chip gicv2m_pmsi_irq_chip = {
- .name = "pMSI",
-};
-
-static struct msi_domain_ops gicv2m_pmsi_ops = {
-};
-
-static struct msi_domain_info gicv2m_pmsi_domain_info = {
- .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS),
- .ops = &gicv2m_pmsi_ops,
- .chip = &gicv2m_pmsi_irq_chip,
-};
-
static void __init gicv2m_teardown(void)
{
struct v2m_data *v2m, *tmp;
@@ -278,9 +243,27 @@ static void __init gicv2m_teardown(void)
}
}
+
+#define GICV2M_MSI_FLAGS_REQUIRED (MSI_FLAG_USE_DEF_DOM_OPS | \
+ MSI_FLAG_USE_DEF_CHIP_OPS | \
+ MSI_FLAG_PCI_MSI_MASK_PARENT)
+
+#define GICV2M_MSI_FLAGS_SUPPORTED (MSI_GENERIC_FLAGS_MASK | \
+ MSI_FLAG_PCI_MSIX | \
+ MSI_FLAG_MULTI_PCI_MSI)
+
+static struct msi_parent_ops gicv2m_msi_parent_ops = {
+ .supported_flags = GICV2M_MSI_FLAGS_SUPPORTED,
+ .required_flags = GICV2M_MSI_FLAGS_REQUIRED,
+ .bus_select_token = DOMAIN_BUS_NEXUS,
+ .bus_select_mask = MATCH_PCI_MSI | MATCH_PLATFORM_MSI,
+ .prefix = "GICv2m-",
+ .init_dev_msi_info = msi_lib_init_dev_msi_info,
+};
+
static __init int gicv2m_allocate_domains(struct irq_domain *parent)
{
- struct irq_domain *inner_domain, *pci_domain, *plat_domain;
+ struct irq_domain *inner_domain;
struct v2m_data *v2m;
v2m = list_first_entry_or_null(&v2m_nodes, struct v2m_data, entry);
@@ -295,22 +278,8 @@ static __init int gicv2m_allocate_domains(struct irq_domain *parent)
}
irq_domain_update_bus_token(inner_domain, DOMAIN_BUS_NEXUS);
- pci_domain = pci_msi_create_irq_domain(v2m->fwnode,
- &gicv2m_msi_domain_info,
- inner_domain);
- plat_domain = platform_msi_create_irq_domain(v2m->fwnode,
- &gicv2m_pmsi_domain_info,
- inner_domain);
- if (!pci_domain || !plat_domain) {
- pr_err("Failed to create MSI domains\n");
- if (plat_domain)
- irq_domain_remove(plat_domain);
- if (pci_domain)
- irq_domain_remove(pci_domain);
- irq_domain_remove(inner_domain);
- return -ENOMEM;
- }
-
+ inner_domain->flags |= IRQ_DOMAIN_FLAG_MSI_PARENT;
+ inner_domain->msi_parent_ops = &gicv2m_msi_parent_ops;
return 0;
}
@@ -438,12 +407,12 @@ static int __init gicv2m_of_init(struct fwnode_handle *parent_handle,
ret = gicv2m_init_one(&child->fwnode, spi_start, nr_spis,
&res, 0);
- if (ret) {
- of_node_put(child);
+ if (ret)
break;
- }
}
+ if (ret && child)
+ of_node_put(child);
if (!ret)
ret = gicv2m_allocate_domains(parent);
if (ret)
@@ -511,7 +480,7 @@ acpi_parse_madt_msi(union acpi_subtable_headers *header,
pr_info("applying Amazon Graviton quirk\n");
res.end = res.start + SZ_8K - 1;
flags |= GICV2M_GRAVITON_ADDRESS_ONLY;
- gicv2m_msi_domain_info.flags &= ~MSI_FLAG_MULTI_PCI_MSI;
+ gicv2m_msi_parent_ops.supported_flags &= ~MSI_FLAG_MULTI_PCI_MSI;
}
if (m->flags & ACPI_MADT_OVERRIDE_SPI_VALUES) {
diff --git a/drivers/irqchip/irq-gic-v3-its-msi-parent.c b/drivers/irqchip/irq-gic-v3-its-msi-parent.c
new file mode 100644
index 000000000000..e150365fbe89
--- /dev/null
+++ b/drivers/irqchip/irq-gic-v3-its-msi-parent.c
@@ -0,0 +1,210 @@
+// SPDX-License-Identifier: GPL-2.0-only
+// Copyright (C) 2013-2015 ARM Limited, All Rights Reserved.
+// Author: Marc Zyngier <marc.zyngier@arm.com>
+// Copyright (C) 2022 Linutronix GmbH
+// Copyright (C) 2022 Intel
+
+#include <linux/acpi_iort.h>
+#include <linux/pci.h>
+
+#include "irq-gic-common.h"
+#include "irq-msi-lib.h"
+
+#define ITS_MSI_FLAGS_REQUIRED (MSI_FLAG_USE_DEF_DOM_OPS | \
+ MSI_FLAG_USE_DEF_CHIP_OPS | \
+ MSI_FLAG_PCI_MSI_MASK_PARENT)
+
+#define ITS_MSI_FLAGS_SUPPORTED (MSI_GENERIC_FLAGS_MASK | \
+ MSI_FLAG_PCI_MSIX | \
+ MSI_FLAG_MULTI_PCI_MSI)
+
+#ifdef CONFIG_PCI_MSI
+static int its_pci_msi_vec_count(struct pci_dev *pdev, void *data)
+{
+ int msi, msix, *count = data;
+
+ msi = max(pci_msi_vec_count(pdev), 0);
+ msix = max(pci_msix_vec_count(pdev), 0);
+ *count += max(msi, msix);
+
+ return 0;
+}
+
+static int its_get_pci_alias(struct pci_dev *pdev, u16 alias, void *data)
+{
+ struct pci_dev **alias_dev = data;
+
+ *alias_dev = pdev;
+
+ return 0;
+}
+
+static int its_pci_msi_prepare(struct irq_domain *domain, struct device *dev,
+ int nvec, msi_alloc_info_t *info)
+{
+ struct pci_dev *pdev, *alias_dev;
+ struct msi_domain_info *msi_info;
+ int alias_count = 0, minnvec = 1;
+
+ if (!dev_is_pci(dev))
+ return -EINVAL;
+
+ pdev = to_pci_dev(dev);
+ /*
+ * If pdev is downstream of any aliasing bridges, take an upper
+ * bound of how many other vectors could map to the same DevID.
+ * Also tell the ITS that the signalling will come from a proxy
+ * device, and that special allocation rules apply.
+ */
+ pci_for_each_dma_alias(pdev, its_get_pci_alias, &alias_dev);
+ if (alias_dev != pdev) {
+ if (alias_dev->subordinate)
+ pci_walk_bus(alias_dev->subordinate,
+ its_pci_msi_vec_count, &alias_count);
+ info->flags |= MSI_ALLOC_FLAGS_PROXY_DEVICE;
+ }
+
+ /* ITS specific DeviceID, as the core ITS ignores dev. */
+ info->scratchpad[0].ul = pci_msi_domain_get_msi_rid(domain->parent, pdev);
+
+ /*
+ * @domain->msi_domain_info->hwsize contains the size of the
+ * MSI[-X] domain, but vector allocation happens one by one. This
+ * needs some thought when MSI comes into play as the size of MSI
+ * might be unknown at domain creation time and therefore set to
+ * MSI_MAX_INDEX.
+ */
+ msi_info = msi_get_domain_info(domain);
+ if (msi_info->hwsize > nvec)
+ nvec = msi_info->hwsize;
+
+ /*
+ * Always allocate a power of 2, and special case device 0 for
+ * broken systems where the DevID is not wired (and all devices
+ * appear as DevID 0). For that reason, we generously allocate a
+ * minimum of 32 MSIs for DevID 0. If you want more because all
+ * your devices are aliasing to DevID 0, consider fixing your HW.
+ */
+ nvec = max(nvec, alias_count);
+ if (!info->scratchpad[0].ul)
+ minnvec = 32;
+ nvec = max_t(int, minnvec, roundup_pow_of_two(nvec));
+
+ msi_info = msi_get_domain_info(domain->parent);
+ return msi_info->ops->msi_prepare(domain->parent, dev, nvec, info);
+}
+#else /* CONFIG_PCI_MSI */
+#define its_pci_msi_prepare NULL
+#endif /* !CONFIG_PCI_MSI */
+
+static int of_pmsi_get_dev_id(struct irq_domain *domain, struct device *dev,
+ u32 *dev_id)
+{
+ int ret, index = 0;
+
+ /* Suck the DeviceID out of the msi-parent property */
+ do {
+ struct of_phandle_args args;
+
+ ret = of_parse_phandle_with_args(dev->of_node,
+ "msi-parent", "#msi-cells",
+ index, &args);
+ if (args.np == irq_domain_get_of_node(domain)) {
+ if (WARN_ON(args.args_count != 1))
+ return -EINVAL;
+ *dev_id = args.args[0];
+ break;
+ }
+ index++;
+ } while (!ret);
+
+ return ret;
+}
+
+int __weak iort_pmsi_get_dev_id(struct device *dev, u32 *dev_id)
+{
+ return -1;
+}
+
+static int its_pmsi_prepare(struct irq_domain *domain, struct device *dev,
+ int nvec, msi_alloc_info_t *info)
+{
+ struct msi_domain_info *msi_info;
+ u32 dev_id;
+ int ret;
+
+ if (dev->of_node)
+ ret = of_pmsi_get_dev_id(domain->parent, dev, &dev_id);
+ else
+ ret = iort_pmsi_get_dev_id(dev, &dev_id);
+ if (ret)
+ return ret;
+
+ /* ITS specific DeviceID, as the core ITS ignores dev. */
+ info->scratchpad[0].ul = dev_id;
+
+ /*
+ * @domain->msi_domain_info->hwsize contains the size of the device
+ * domain, but vector allocation happens one by one.
+ */
+ msi_info = msi_get_domain_info(domain);
+ if (msi_info->hwsize > nvec)
+ nvec = msi_info->hwsize;
+
+ /* Allocate at least 32 MSIs, and always as a power of 2 */
+ nvec = max_t(int, 32, roundup_pow_of_two(nvec));
+
+ msi_info = msi_get_domain_info(domain->parent);
+ return msi_info->ops->msi_prepare(domain->parent,
+ dev, nvec, info);
+}
+
+static bool its_init_dev_msi_info(struct device *dev, struct irq_domain *domain,
+ struct irq_domain *real_parent, struct msi_domain_info *info)
+{
+ if (!msi_lib_init_dev_msi_info(dev, domain, real_parent, info))
+ return false;
+
+ switch(info->bus_token) {
+ case DOMAIN_BUS_PCI_DEVICE_MSI:
+ case DOMAIN_BUS_PCI_DEVICE_MSIX:
+ /*
+ * FIXME: This probably should be done after a (not yet
+ * existing) post domain creation callback once to make
+ * support for dynamic post-enable MSI-X allocations
+ * work without having to reevaluate the domain size
+ * over and over. It is known already at allocation
+ * time via info->hwsize.
+ *
+ * That should work perfectly fine for MSI/MSI-X but needs
+ * some thoughts for purely software managed MSI domains
+ * where the index space is only limited artificially via
+ * %MSI_MAX_INDEX.
+ */
+ info->ops->msi_prepare = its_pci_msi_prepare;
+ break;
+ case DOMAIN_BUS_DEVICE_MSI:
+ case DOMAIN_BUS_WIRED_TO_MSI:
+ /*
+ * FIXME: See the above PCI prepare comment. The domain
+ * size is also known at domain creation time.
+ */
+ info->ops->msi_prepare = its_pmsi_prepare;
+ break;
+ default:
+ /* Confused. How did the lib return true? */
+ WARN_ON_ONCE(1);
+ return false;
+ }
+
+ return true;
+}
+
+const struct msi_parent_ops gic_v3_its_msi_parent_ops = {
+ .supported_flags = ITS_MSI_FLAGS_SUPPORTED,
+ .required_flags = ITS_MSI_FLAGS_REQUIRED,
+ .bus_select_token = DOMAIN_BUS_NEXUS,
+ .bus_select_mask = MATCH_PCI_MSI | MATCH_PLATFORM_MSI,
+ .prefix = "ITS-",
+ .init_dev_msi_info = its_init_dev_msi_info,
+};
diff --git a/drivers/irqchip/irq-gic-v3-its-pci-msi.c b/drivers/irqchip/irq-gic-v3-its-pci-msi.c
deleted file mode 100644
index 93f77a8196da..000000000000
--- a/drivers/irqchip/irq-gic-v3-its-pci-msi.c
+++ /dev/null
@@ -1,202 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (C) 2013-2015 ARM Limited, All Rights Reserved.
- * Author: Marc Zyngier <marc.zyngier@arm.com>
- */
-
-#include <linux/acpi_iort.h>
-#include <linux/pci.h>
-#include <linux/msi.h>
-#include <linux/of.h>
-#include <linux/of_irq.h>
-#include <linux/of_pci.h>
-
-static void its_mask_msi_irq(struct irq_data *d)
-{
- pci_msi_mask_irq(d);
- irq_chip_mask_parent(d);
-}
-
-static void its_unmask_msi_irq(struct irq_data *d)
-{
- pci_msi_unmask_irq(d);
- irq_chip_unmask_parent(d);
-}
-
-static struct irq_chip its_msi_irq_chip = {
- .name = "ITS-MSI",
- .irq_unmask = its_unmask_msi_irq,
- .irq_mask = its_mask_msi_irq,
- .irq_eoi = irq_chip_eoi_parent,
-};
-
-static int its_pci_msi_vec_count(struct pci_dev *pdev, void *data)
-{
- int msi, msix, *count = data;
-
- msi = max(pci_msi_vec_count(pdev), 0);
- msix = max(pci_msix_vec_count(pdev), 0);
- *count += max(msi, msix);
-
- return 0;
-}
-
-static int its_get_pci_alias(struct pci_dev *pdev, u16 alias, void *data)
-{
- struct pci_dev **alias_dev = data;
-
- *alias_dev = pdev;
-
- return 0;
-}
-
-static int its_pci_msi_prepare(struct irq_domain *domain, struct device *dev,
- int nvec, msi_alloc_info_t *info)
-{
- struct pci_dev *pdev, *alias_dev;
- struct msi_domain_info *msi_info;
- int alias_count = 0, minnvec = 1;
-
- if (!dev_is_pci(dev))
- return -EINVAL;
-
- msi_info = msi_get_domain_info(domain->parent);
-
- pdev = to_pci_dev(dev);
- /*
- * If pdev is downstream of any aliasing bridges, take an upper
- * bound of how many other vectors could map to the same DevID.
- * Also tell the ITS that the signalling will come from a proxy
- * device, and that special allocation rules apply.
- */
- pci_for_each_dma_alias(pdev, its_get_pci_alias, &alias_dev);
- if (alias_dev != pdev) {
- if (alias_dev->subordinate)
- pci_walk_bus(alias_dev->subordinate,
- its_pci_msi_vec_count, &alias_count);
- info->flags |= MSI_ALLOC_FLAGS_PROXY_DEVICE;
- }
-
- /* ITS specific DeviceID, as the core ITS ignores dev. */
- info->scratchpad[0].ul = pci_msi_domain_get_msi_rid(domain, pdev);
-
- /*
- * Always allocate a power of 2, and special case device 0 for
- * broken systems where the DevID is not wired (and all devices
- * appear as DevID 0). For that reason, we generously allocate a
- * minimum of 32 MSIs for DevID 0. If you want more because all
- * your devices are aliasing to DevID 0, consider fixing your HW.
- */
- nvec = max(nvec, alias_count);
- if (!info->scratchpad[0].ul)
- minnvec = 32;
- nvec = max_t(int, minnvec, roundup_pow_of_two(nvec));
- return msi_info->ops->msi_prepare(domain->parent, dev, nvec, info);
-}
-
-static struct msi_domain_ops its_pci_msi_ops = {
- .msi_prepare = its_pci_msi_prepare,
-};
-
-static struct msi_domain_info its_pci_msi_domain_info = {
- .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
- MSI_FLAG_MULTI_PCI_MSI | MSI_FLAG_PCI_MSIX),
- .ops = &its_pci_msi_ops,
- .chip = &its_msi_irq_chip,
-};
-
-static struct of_device_id its_device_id[] = {
- { .compatible = "arm,gic-v3-its", },
- {},
-};
-
-static int __init its_pci_msi_init_one(struct fwnode_handle *handle,
- const char *name)
-{
- struct irq_domain *parent;
-
- parent = irq_find_matching_fwnode(handle, DOMAIN_BUS_NEXUS);
- if (!parent || !msi_get_domain_info(parent)) {
- pr_err("%s: Unable to locate ITS domain\n", name);
- return -ENXIO;
- }
-
- if (!pci_msi_create_irq_domain(handle, &its_pci_msi_domain_info,
- parent)) {
- pr_err("%s: Unable to create PCI domain\n", name);
- return -ENOMEM;
- }
-
- return 0;
-}
-
-static int __init its_pci_of_msi_init(void)
-{
- struct device_node *np;
-
- for (np = of_find_matching_node(NULL, its_device_id); np;
- np = of_find_matching_node(np, its_device_id)) {
- if (!of_device_is_available(np))
- continue;
- if (!of_property_read_bool(np, "msi-controller"))
- continue;
-
- if (its_pci_msi_init_one(of_node_to_fwnode(np), np->full_name))
- continue;
-
- pr_info("PCI/MSI: %pOF domain created\n", np);
- }
-
- return 0;
-}
-
-#ifdef CONFIG_ACPI
-
-static int __init
-its_pci_msi_parse_madt(union acpi_subtable_headers *header,
- const unsigned long end)
-{
- struct acpi_madt_generic_translator *its_entry;
- struct fwnode_handle *dom_handle;
- const char *node_name;
- int err = -ENXIO;
-
- its_entry = (struct acpi_madt_generic_translator *)header;
- node_name = kasprintf(GFP_KERNEL, "ITS@0x%lx",
- (long)its_entry->base_address);
- dom_handle = iort_find_domain_token(its_entry->translation_id);
- if (!dom_handle) {
- pr_err("%s: Unable to locate ITS domain handle\n", node_name);
- goto out;
- }
-
- err = its_pci_msi_init_one(dom_handle, node_name);
- if (!err)
- pr_info("PCI/MSI: %s domain created\n", node_name);
-
-out:
- kfree(node_name);
- return err;
-}
-
-static int __init its_pci_acpi_msi_init(void)
-{
- acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_TRANSLATOR,
- its_pci_msi_parse_madt, 0);
- return 0;
-}
-#else
-static int __init its_pci_acpi_msi_init(void)
-{
- return 0;
-}
-#endif
-
-static int __init its_pci_msi_init(void)
-{
- its_pci_of_msi_init();
- its_pci_acpi_msi_init();
-
- return 0;
-}
-early_initcall(its_pci_msi_init);
diff --git a/drivers/irqchip/irq-gic-v3-its-platform-msi.c b/drivers/irqchip/irq-gic-v3-its-platform-msi.c
deleted file mode 100644
index daa6d5053bc3..000000000000
--- a/drivers/irqchip/irq-gic-v3-its-platform-msi.c
+++ /dev/null
@@ -1,163 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (C) 2013-2015 ARM Limited, All Rights Reserved.
- * Author: Marc Zyngier <marc.zyngier@arm.com>
- */
-
-#include <linux/acpi_iort.h>
-#include <linux/device.h>
-#include <linux/msi.h>
-#include <linux/of.h>
-#include <linux/of_irq.h>
-
-static struct irq_chip its_pmsi_irq_chip = {
- .name = "ITS-pMSI",
-};
-
-static int of_pmsi_get_dev_id(struct irq_domain *domain, struct device *dev,
- u32 *dev_id)
-{
- int ret, index = 0;
-
- /* Suck the DeviceID out of the msi-parent property */
- do {
- struct of_phandle_args args;
-
- ret = of_parse_phandle_with_args(dev->of_node,
- "msi-parent", "#msi-cells",
- index, &args);
- if (args.np == irq_domain_get_of_node(domain)) {
- if (WARN_ON(args.args_count != 1))
- return -EINVAL;
- *dev_id = args.args[0];
- break;
- }
- index++;
- } while (!ret);
-
- return ret;
-}
-
-int __weak iort_pmsi_get_dev_id(struct device *dev, u32 *dev_id)
-{
- return -1;
-}
-
-static int its_pmsi_prepare(struct irq_domain *domain, struct device *dev,
- int nvec, msi_alloc_info_t *info)
-{
- struct msi_domain_info *msi_info;
- u32 dev_id;
- int ret;
-
- msi_info = msi_get_domain_info(domain->parent);
-
- if (dev->of_node)
- ret = of_pmsi_get_dev_id(domain, dev, &dev_id);
- else
- ret = iort_pmsi_get_dev_id(dev, &dev_id);
- if (ret)
- return ret;
-
- /* ITS specific DeviceID, as the core ITS ignores dev. */
- info->scratchpad[0].ul = dev_id;
-
- /* Allocate at least 32 MSIs, and always as a power of 2 */
- nvec = max_t(int, 32, roundup_pow_of_two(nvec));
- return msi_info->ops->msi_prepare(domain->parent,
- dev, nvec, info);
-}
-
-static struct msi_domain_ops its_pmsi_ops = {
- .msi_prepare = its_pmsi_prepare,
-};
-
-static struct msi_domain_info its_pmsi_domain_info = {
- .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS),
- .ops = &its_pmsi_ops,
- .chip = &its_pmsi_irq_chip,
-};
-
-static const struct of_device_id its_device_id[] = {
- { .compatible = "arm,gic-v3-its", },
- {},
-};
-
-static int __init its_pmsi_init_one(struct fwnode_handle *fwnode,
- const char *name)
-{
- struct irq_domain *parent;
-
- parent = irq_find_matching_fwnode(fwnode, DOMAIN_BUS_NEXUS);
- if (!parent || !msi_get_domain_info(parent)) {
- pr_err("%s: unable to locate ITS domain\n", name);
- return -ENXIO;
- }
-
- if (!platform_msi_create_irq_domain(fwnode, &its_pmsi_domain_info,
- parent)) {
- pr_err("%s: unable to create platform domain\n", name);
- return -ENXIO;
- }
-
- pr_info("Platform MSI: %s domain created\n", name);
- return 0;
-}
-
-#ifdef CONFIG_ACPI
-static int __init
-its_pmsi_parse_madt(union acpi_subtable_headers *header,
- const unsigned long end)
-{
- struct acpi_madt_generic_translator *its_entry;
- struct fwnode_handle *domain_handle;
- const char *node_name;
- int err = -ENXIO;
-
- its_entry = (struct acpi_madt_generic_translator *)header;
- node_name = kasprintf(GFP_KERNEL, "ITS@0x%lx",
- (long)its_entry->base_address);
- domain_handle = iort_find_domain_token(its_entry->translation_id);
- if (!domain_handle) {
- pr_err("%s: Unable to locate ITS domain handle\n", node_name);
- goto out;
- }
-
- err = its_pmsi_init_one(domain_handle, node_name);
-
-out:
- kfree(node_name);
- return err;
-}
-
-static void __init its_pmsi_acpi_init(void)
-{
- acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_TRANSLATOR,
- its_pmsi_parse_madt, 0);
-}
-#else
-static inline void its_pmsi_acpi_init(void) { }
-#endif
-
-static void __init its_pmsi_of_init(void)
-{
- struct device_node *np;
-
- for (np = of_find_matching_node(NULL, its_device_id); np;
- np = of_find_matching_node(np, its_device_id)) {
- if (!of_device_is_available(np))
- continue;
- if (!of_property_read_bool(np, "msi-controller"))
- continue;
-
- its_pmsi_init_one(of_node_to_fwnode(np), np->full_name);
- }
-}
-
-static int __init its_pmsi_init(void)
-{
- its_pmsi_of_init();
- its_pmsi_acpi_init();
- return 0;
-}
-early_initcall(its_pmsi_init);
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index b822752c4261..8c3ec5734f1e 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -12,12 +12,14 @@
#include <linux/crash_dump.h>
#include <linux/delay.h>
#include <linux/efi.h>
+#include <linux/genalloc.h>
#include <linux/interrupt.h>
#include <linux/iommu.h>
#include <linux/iopoll.h>
#include <linux/irqdomain.h>
#include <linux/list.h>
#include <linux/log2.h>
+#include <linux/mem_encrypt.h>
#include <linux/memblock.h>
#include <linux/mm.h>
#include <linux/msi.h>
@@ -27,6 +29,7 @@
#include <linux/of_pci.h>
#include <linux/of_platform.h>
#include <linux/percpu.h>
+#include <linux/set_memory.h>
#include <linux/slab.h>
#include <linux/syscore_ops.h>
@@ -38,11 +41,13 @@
#include <asm/exception.h>
#include "irq-gic-common.h"
+#include "irq-msi-lib.h"
#define ITS_FLAGS_CMDQ_NEEDS_FLUSHING (1ULL << 0)
#define ITS_FLAGS_WORKAROUND_CAVIUM_22375 (1ULL << 1)
#define ITS_FLAGS_WORKAROUND_CAVIUM_23144 (1ULL << 2)
#define ITS_FLAGS_FORCE_NON_SHAREABLE (1ULL << 3)
+#define ITS_FLAGS_WORKAROUND_HISILICON_162100801 (1ULL << 4)
#define RD_LOCAL_LPI_ENABLED BIT(0)
#define RD_LOCAL_PENDTABLE_PREALLOCATED BIT(1)
@@ -59,7 +64,8 @@ static u32 lpi_id_bits;
#define LPI_PROPBASE_SZ ALIGN(BIT(LPI_NRBITS), SZ_64K)
#define LPI_PENDBASE_SZ ALIGN(BIT(LPI_NRBITS) / 8, SZ_64K)
-#define LPI_PROP_DEFAULT_PRIO GICD_INT_DEF_PRI
+static u8 __ro_after_init lpi_prop_prio;
+static struct its_node *find_4_1_its(void);
/*
* Collection structure - just an ID, and a redistributor address to
@@ -163,6 +169,7 @@ struct its_device {
struct its_node *its;
struct event_lpi_map event_map;
void *itt;
+ u32 itt_sz;
u32 nr_ites;
u32 device_id;
bool shared;
@@ -198,6 +205,87 @@ static DEFINE_IDA(its_vpeid_ida);
#define gic_data_rdist_rd_base() (gic_data_rdist()->rd_base)
#define gic_data_rdist_vlpi_base() (gic_data_rdist_rd_base() + SZ_128K)
+static struct page *its_alloc_pages_node(int node, gfp_t gfp,
+ unsigned int order)
+{
+ struct page *page;
+ int ret = 0;
+
+ page = alloc_pages_node(node, gfp, order);
+
+ if (!page)
+ return NULL;
+
+ ret = set_memory_decrypted((unsigned long)page_address(page),
+ 1 << order);
+ /*
+ * If set_memory_decrypted() fails then we don't know what state the
+ * page is in, so we can't free it. Instead we leak it.
+ * set_memory_decrypted() will already have WARNed.
+ */
+ if (ret)
+ return NULL;
+
+ return page;
+}
+
+static struct page *its_alloc_pages(gfp_t gfp, unsigned int order)
+{
+ return its_alloc_pages_node(NUMA_NO_NODE, gfp, order);
+}
+
+static void its_free_pages(void *addr, unsigned int order)
+{
+ /*
+ * If the memory cannot be encrypted again then we must leak the pages.
+ * set_memory_encrypted() will already have WARNed.
+ */
+ if (set_memory_encrypted((unsigned long)addr, 1 << order))
+ return;
+ free_pages((unsigned long)addr, order);
+}
+
+static struct gen_pool *itt_pool;
+
+static void *itt_alloc_pool(int node, int size)
+{
+ unsigned long addr;
+ struct page *page;
+
+ if (size >= PAGE_SIZE) {
+ page = its_alloc_pages_node(node, GFP_KERNEL | __GFP_ZERO, get_order(size));
+
+ return page ? page_address(page) : NULL;
+ }
+
+ do {
+ addr = gen_pool_alloc(itt_pool, size);
+ if (addr)
+ break;
+
+ page = its_alloc_pages_node(node, GFP_KERNEL | __GFP_ZERO, 0);
+ if (!page)
+ break;
+
+ gen_pool_add(itt_pool, (unsigned long)page_address(page), PAGE_SIZE, node);
+ } while (!addr);
+
+ return (void *)addr;
+}
+
+static void itt_free_pool(void *addr, int size)
+{
+ if (!addr)
+ return;
+
+ if (size >= PAGE_SIZE) {
+ its_free_pages(addr, get_order(size));
+ return;
+ }
+
+ gen_pool_free(itt_pool, (unsigned long)addr, size);
+}
+
/*
* Skip ITSs that have no vLPIs mapped, unless we're on GICv4.1, as we
* always have vSGIs mapped.
@@ -620,7 +708,6 @@ static struct its_collection *its_build_mapd_cmd(struct its_node *its,
u8 size = ilog2(desc->its_mapd_cmd.dev->nr_ites);
itt_addr = virt_to_phys(desc->its_mapd_cmd.dev->itt);
- itt_addr = ALIGN(itt_addr, ITS_ITT_ALIGN);
its_encode_cmd(cmd, GITS_CMD_MAPD);
its_encode_devid(cmd, desc->its_mapd_cmd.dev->device_id);
@@ -786,6 +873,7 @@ static struct its_vpe *its_build_vmapp_cmd(struct its_node *its,
struct its_cmd_block *cmd,
struct its_cmd_desc *desc)
{
+ struct its_vpe *vpe = valid_vpe(its, desc->its_vmapp_cmd.vpe);
unsigned long vpt_addr, vconf_addr;
u64 target;
bool alloc;
@@ -795,9 +883,14 @@ static struct its_vpe *its_build_vmapp_cmd(struct its_node *its,
its_encode_valid(cmd, desc->its_vmapp_cmd.valid);
if (!desc->its_vmapp_cmd.valid) {
+ alloc = !atomic_dec_return(&desc->its_vmapp_cmd.vpe->vmapp_count);
if (is_v4_1(its)) {
- alloc = !atomic_dec_return(&desc->its_vmapp_cmd.vpe->vmapp_count);
its_encode_alloc(cmd, alloc);
+ /*
+ * Unmapping a VPE is self-synchronizing on GICv4.1,
+ * no need to issue a VSYNC.
+ */
+ vpe = NULL;
}
goto out;
@@ -810,13 +903,13 @@ static struct its_vpe *its_build_vmapp_cmd(struct its_node *its,
its_encode_vpt_addr(cmd, vpt_addr);
its_encode_vpt_size(cmd, LPI_NRBITS - 1);
+ alloc = !atomic_fetch_inc(&desc->its_vmapp_cmd.vpe->vmapp_count);
+
if (!is_v4_1(its))
goto out;
vconf_addr = virt_to_phys(page_address(desc->its_vmapp_cmd.vpe->its_vm->vprop_page));
- alloc = !atomic_fetch_inc(&desc->its_vmapp_cmd.vpe->vmapp_count);
-
its_encode_alloc(cmd, alloc);
/*
@@ -832,7 +925,7 @@ static struct its_vpe *its_build_vmapp_cmd(struct its_node *its,
out:
its_fixup_cmd(cmd);
- return valid_vpe(its, desc->its_vmapp_cmd.vpe);
+ return vpe;
}
static struct its_vpe *its_build_vmapti_cmd(struct its_node *its,
@@ -1311,7 +1404,6 @@ static void its_send_vmovp(struct its_vpe *vpe)
{
struct its_cmd_desc desc = {};
struct its_node *its;
- unsigned long flags;
int col_id = vpe->col_idx;
desc.its_vmovp_cmd.vpe = vpe;
@@ -1331,8 +1423,7 @@ static void its_send_vmovp(struct its_vpe *vpe)
*
* Wall <-- Head.
*/
- raw_spin_lock_irqsave(&vmovp_lock, flags);
-
+ guard(raw_spinlock)(&vmovp_lock);
desc.its_vmovp_cmd.seq_num = vmovp_seq_num++;
desc.its_vmovp_cmd.its_list = get_its_list(vpe->its_vm);
@@ -1347,8 +1438,6 @@ static void its_send_vmovp(struct its_vpe *vpe)
desc.its_vmovp_cmd.col = &its->collections[col_id];
its_send_single_vcommand(its, its_build_vmovp_cmd, &desc);
}
-
- raw_spin_unlock_irqrestore(&vmovp_lock, flags);
}
static void its_send_vinvall(struct its_node *its, struct its_vpe *vpe)
@@ -1785,12 +1874,10 @@ static bool gic_requires_eager_mapping(void)
static void its_map_vm(struct its_node *its, struct its_vm *vm)
{
- unsigned long flags;
-
if (gic_requires_eager_mapping())
return;
- raw_spin_lock_irqsave(&vmovp_lock, flags);
+ guard(raw_spinlock_irqsave)(&vm->vmapp_lock);
/*
* If the VM wasn't mapped yet, iterate over the vpes and get
@@ -1803,65 +1890,53 @@ static void its_map_vm(struct its_node *its, struct its_vm *vm)
for (i = 0; i < vm->nr_vpes; i++) {
struct its_vpe *vpe = vm->vpes[i];
- struct irq_data *d = irq_get_irq_data(vpe->irq);
- /* Map the VPE to the first possible CPU */
- vpe->col_idx = cpumask_first(cpu_online_mask);
- its_send_vmapp(its, vpe, true);
+ scoped_guard(raw_spinlock, &vpe->vpe_lock)
+ its_send_vmapp(its, vpe, true);
+
its_send_vinvall(its, vpe);
- irq_data_update_effective_affinity(d, cpumask_of(vpe->col_idx));
}
}
-
- raw_spin_unlock_irqrestore(&vmovp_lock, flags);
}
static void its_unmap_vm(struct its_node *its, struct its_vm *vm)
{
- unsigned long flags;
-
/* Not using the ITS list? Everything is always mapped. */
if (gic_requires_eager_mapping())
return;
- raw_spin_lock_irqsave(&vmovp_lock, flags);
+ guard(raw_spinlock_irqsave)(&vm->vmapp_lock);
if (!--vm->vlpi_count[its->list_nr]) {
int i;
- for (i = 0; i < vm->nr_vpes; i++)
+ for (i = 0; i < vm->nr_vpes; i++) {
+ guard(raw_spinlock)(&vm->vpes[i]->vpe_lock);
its_send_vmapp(its, vm->vpes[i], false);
+ }
}
-
- raw_spin_unlock_irqrestore(&vmovp_lock, flags);
}
static int its_vlpi_map(struct irq_data *d, struct its_cmd_info *info)
{
struct its_device *its_dev = irq_data_get_irq_chip_data(d);
u32 event = its_get_event_id(d);
- int ret = 0;
if (!info->map)
return -EINVAL;
- raw_spin_lock(&its_dev->event_map.vlpi_lock);
-
if (!its_dev->event_map.vm) {
struct its_vlpi_map *maps;
maps = kcalloc(its_dev->event_map.nr_lpis, sizeof(*maps),
GFP_ATOMIC);
- if (!maps) {
- ret = -ENOMEM;
- goto out;
- }
+ if (!maps)
+ return -ENOMEM;
its_dev->event_map.vm = info->map->vm;
its_dev->event_map.vlpi_maps = maps;
} else if (its_dev->event_map.vm != info->map->vm) {
- ret = -EINVAL;
- goto out;
+ return -EINVAL;
}
/* Get our private copy of the mapping information */
@@ -1893,46 +1968,32 @@ static int its_vlpi_map(struct irq_data *d, struct its_cmd_info *info)
its_dev->event_map.nr_vlpis++;
}
-out:
- raw_spin_unlock(&its_dev->event_map.vlpi_lock);
- return ret;
+ return 0;
}
static int its_vlpi_get(struct irq_data *d, struct its_cmd_info *info)
{
struct its_device *its_dev = irq_data_get_irq_chip_data(d);
struct its_vlpi_map *map;
- int ret = 0;
-
- raw_spin_lock(&its_dev->event_map.vlpi_lock);
map = get_vlpi_map(d);
- if (!its_dev->event_map.vm || !map) {
- ret = -EINVAL;
- goto out;
- }
+ if (!its_dev->event_map.vm || !map)
+ return -EINVAL;
/* Copy our mapping information to the incoming request */
*info->map = *map;
-out:
- raw_spin_unlock(&its_dev->event_map.vlpi_lock);
- return ret;
+ return 0;
}
static int its_vlpi_unmap(struct irq_data *d)
{
struct its_device *its_dev = irq_data_get_irq_chip_data(d);
u32 event = its_get_event_id(d);
- int ret = 0;
-
- raw_spin_lock(&its_dev->event_map.vlpi_lock);
- if (!its_dev->event_map.vm || !irqd_is_forwarded_to_vcpu(d)) {
- ret = -EINVAL;
- goto out;
- }
+ if (!its_dev->event_map.vm || !irqd_is_forwarded_to_vcpu(d))
+ return -EINVAL;
/* Drop the virtual mapping */
its_send_discard(its_dev, event);
@@ -1940,7 +2001,7 @@ static int its_vlpi_unmap(struct irq_data *d)
/* and restore the physical one */
irqd_clr_forwarded_to_vcpu(d);
its_send_mapti(its_dev, d->hwirq, event);
- lpi_update_config(d, 0xff, (LPI_PROP_DEFAULT_PRIO |
+ lpi_update_config(d, 0xff, (lpi_prop_prio |
LPI_PROP_ENABLED |
LPI_PROP_GROUP1));
@@ -1956,9 +2017,7 @@ static int its_vlpi_unmap(struct irq_data *d)
kfree(its_dev->event_map.vlpi_maps);
}
-out:
- raw_spin_unlock(&its_dev->event_map.vlpi_lock);
- return ret;
+ return 0;
}
static int its_vlpi_prop_update(struct irq_data *d, struct its_cmd_info *info)
@@ -1986,6 +2045,8 @@ static int its_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu_info)
if (!is_v4(its_dev->its))
return -EINVAL;
+ guard(raw_spinlock)(&its_dev->event_map.vlpi_lock);
+
/* Unmap request? */
if (!info)
return its_vlpi_unmap(d);
@@ -2195,8 +2256,8 @@ static void its_lpi_free(unsigned long *bitmap, u32 base, u32 nr_ids)
static void gic_reset_prop_table(void *va)
{
- /* Priority 0xa0, Group-1, disabled */
- memset(va, LPI_PROP_DEFAULT_PRIO | LPI_PROP_GROUP1, LPI_PROPBASE_SZ);
+ /* Regular IRQ priority, Group-1, disabled */
+ memset(va, lpi_prop_prio | LPI_PROP_GROUP1, LPI_PROPBASE_SZ);
/* Make sure the GIC will observe the written configuration */
gic_flush_dcache_to_poc(va, LPI_PROPBASE_SZ);
@@ -2206,7 +2267,8 @@ static struct page *its_allocate_prop_table(gfp_t gfp_flags)
{
struct page *prop_page;
- prop_page = alloc_pages(gfp_flags, get_order(LPI_PROPBASE_SZ));
+ prop_page = its_alloc_pages(gfp_flags,
+ get_order(LPI_PROPBASE_SZ));
if (!prop_page)
return NULL;
@@ -2217,8 +2279,7 @@ static struct page *its_allocate_prop_table(gfp_t gfp_flags)
static void its_free_prop_table(struct page *prop_page)
{
- free_pages((unsigned long)page_address(prop_page),
- get_order(LPI_PROPBASE_SZ));
+ its_free_pages(page_address(prop_page), get_order(LPI_PROPBASE_SZ));
}
static bool gic_check_reserved_range(phys_addr_t addr, unsigned long size)
@@ -2340,7 +2401,7 @@ static int its_setup_baser(struct its_node *its, struct its_baser *baser,
order = get_order(GITS_BASER_PAGES_MAX * psz);
}
- page = alloc_pages_node(its->numa_node, GFP_KERNEL | __GFP_ZERO, order);
+ page = its_alloc_pages_node(its->numa_node, GFP_KERNEL | __GFP_ZERO, order);
if (!page)
return -ENOMEM;
@@ -2353,7 +2414,7 @@ static int its_setup_baser(struct its_node *its, struct its_baser *baser,
/* 52bit PA is supported only when PageSize=64K */
if (psz != SZ_64K) {
pr_err("ITS: no 52bit PA support when psz=%d\n", psz);
- free_pages((unsigned long)base, order);
+ its_free_pages(base, order);
return -ENXIO;
}
@@ -2409,7 +2470,7 @@ retry_baser:
pr_err("ITS@%pa: %s doesn't stick: %llx %llx\n",
&its->phys_base, its_base_type_string[type],
val, tmp);
- free_pages((unsigned long)base, order);
+ its_free_pages(base, order);
return -ENXIO;
}
@@ -2548,8 +2609,7 @@ static void its_free_tables(struct its_node *its)
for (i = 0; i < GITS_BASER_NR_REGS; i++) {
if (its->tables[i].base) {
- free_pages((unsigned long)its->tables[i].base,
- its->tables[i].order);
+ its_free_pages(its->tables[i].base, its->tables[i].order);
its->tables[i].base = NULL;
}
}
@@ -2815,7 +2875,7 @@ static bool allocate_vpe_l2_table(int cpu, u32 id)
/* Allocate memory for 2nd level table */
if (!table[idx]) {
- page = alloc_pages(GFP_KERNEL | __GFP_ZERO, get_order(psz));
+ page = its_alloc_pages(GFP_KERNEL | __GFP_ZERO, get_order(psz));
if (!page)
return false;
@@ -2934,7 +2994,7 @@ static int allocate_vpe_l1_table(void)
pr_debug("np = %d, npg = %lld, psz = %d, epp = %d, esz = %d\n",
np, npg, psz, epp, esz);
- page = alloc_pages(GFP_ATOMIC | __GFP_ZERO, get_order(np * PAGE_SIZE));
+ page = its_alloc_pages(GFP_ATOMIC | __GFP_ZERO, get_order(np * PAGE_SIZE));
if (!page)
return -ENOMEM;
@@ -2980,8 +3040,7 @@ static struct page *its_allocate_pending_table(gfp_t gfp_flags)
{
struct page *pend_page;
- pend_page = alloc_pages(gfp_flags | __GFP_ZERO,
- get_order(LPI_PENDBASE_SZ));
+ pend_page = its_alloc_pages(gfp_flags | __GFP_ZERO, get_order(LPI_PENDBASE_SZ));
if (!pend_page)
return NULL;
@@ -2993,7 +3052,7 @@ static struct page *its_allocate_pending_table(gfp_t gfp_flags)
static void its_free_pending_table(struct page *pt)
{
- free_pages((unsigned long)page_address(pt), get_order(LPI_PENDBASE_SZ));
+ its_free_pages(page_address(pt), get_order(LPI_PENDBASE_SZ));
}
/*
@@ -3328,8 +3387,8 @@ static bool its_alloc_table_entry(struct its_node *its,
/* Allocate memory for 2nd level table */
if (!table[idx]) {
- page = alloc_pages_node(its->numa_node, GFP_KERNEL | __GFP_ZERO,
- get_order(baser->psz));
+ page = its_alloc_pages_node(its->numa_node, GFP_KERNEL | __GFP_ZERO,
+ get_order(baser->psz));
if (!page)
return false;
@@ -3424,15 +3483,18 @@ static struct its_device *its_create_device(struct its_node *its, u32 dev_id,
if (WARN_ON(!is_power_of_2(nvecs)))
nvecs = roundup_pow_of_two(nvecs);
- dev = kzalloc(sizeof(*dev), GFP_KERNEL);
/*
* Even if the device wants a single LPI, the ITT must be
* sized as a power of two (and you need at least one bit...).
*/
nr_ites = max(2, nvecs);
sz = nr_ites * (FIELD_GET(GITS_TYPER_ITT_ENTRY_SIZE, its->typer) + 1);
- sz = max(sz, ITS_ITT_ALIGN) + ITS_ITT_ALIGN - 1;
- itt = kzalloc_node(sz, GFP_KERNEL, its->numa_node);
+ sz = max(sz, ITS_ITT_ALIGN);
+
+ itt = itt_alloc_pool(its->numa_node, sz);
+
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+
if (alloc_lpis) {
lpi_map = its_lpi_alloc(nvecs, &lpi_base, &nr_lpis);
if (lpi_map)
@@ -3444,9 +3506,9 @@ static struct its_device *its_create_device(struct its_node *its, u32 dev_id,
lpi_base = 0;
}
- if (!dev || !itt || !col_map || (!lpi_map && alloc_lpis)) {
+ if (!dev || !itt || !col_map || (!lpi_map && alloc_lpis)) {
kfree(dev);
- kfree(itt);
+ itt_free_pool(itt, sz);
bitmap_free(lpi_map);
kfree(col_map);
return NULL;
@@ -3456,6 +3518,7 @@ static struct its_device *its_create_device(struct its_node *its, u32 dev_id,
dev->its = its;
dev->itt = itt;
+ dev->itt_sz = sz;
dev->nr_ites = nr_ites;
dev->event_map.lpi_map = lpi_map;
dev->event_map.col_map = col_map;
@@ -3483,7 +3546,7 @@ static void its_free_device(struct its_device *its_dev)
list_del(&its_dev->entry);
raw_spin_unlock_irqrestore(&its_dev->its->lock, flags);
kfree(its_dev->event_map.col_map);
- kfree(its_dev->itt);
+ itt_free_pool(its_dev->itt, its_dev->itt_sz);
kfree(its_dev);
}
@@ -3702,6 +3765,7 @@ static void its_irq_domain_free(struct irq_domain *domain, unsigned int virq,
}
static const struct irq_domain_ops its_domain_ops = {
+ .select = msi_lib_irq_domain_select,
.alloc = its_irq_domain_alloc,
.free = its_irq_domain_free,
.activate = its_irq_domain_activate,
@@ -3821,14 +3885,46 @@ static void its_vpe_db_proxy_move(struct its_vpe *vpe, int from, int to)
raw_spin_unlock_irqrestore(&vpe_proxy.lock, flags);
}
+static void its_vpe_4_1_invall_locked(int cpu, struct its_vpe *vpe)
+{
+ void __iomem *rdbase;
+ u64 val;
+
+ val = GICR_INVALLR_V;
+ val |= FIELD_PREP(GICR_INVALLR_VPEID, vpe->vpe_id);
+
+ guard(raw_spinlock)(&gic_data_rdist_cpu(cpu)->rd_lock);
+ rdbase = per_cpu_ptr(gic_rdists->rdist, cpu)->rd_base;
+ gic_write_lpir(val, rdbase + GICR_INVALLR);
+ wait_for_syncr(rdbase);
+}
+
static int its_vpe_set_affinity(struct irq_data *d,
const struct cpumask *mask_val,
bool force)
{
struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
- struct cpumask common, *table_mask;
+ unsigned int from, cpu = nr_cpu_ids;
+ struct cpumask *table_mask;
+ struct its_node *its;
unsigned long flags;
- int from, cpu;
+
+ /*
+ * Check if we're racing against a VPE being destroyed, for
+ * which we don't want to allow a VMOVP.
+ */
+ if (!atomic_read(&vpe->vmapp_count)) {
+ if (gic_requires_eager_mapping())
+ return -EINVAL;
+
+ /*
+ * If we lazily map the VPEs, this isn't an error and
+ * we can exit cleanly.
+ */
+ cpu = cpumask_first(mask_val);
+ irq_data_update_effective_affinity(d, cpumask_of(cpu));
+ return IRQ_SET_MASK_OK_DONE;
+ }
/*
* Changing affinity is mega expensive, so let's be as lazy as
@@ -3842,7 +3938,14 @@ static int its_vpe_set_affinity(struct irq_data *d,
* protect us, and that we must ensure nobody samples vpe->col_idx
* during the update, hence the lock below which must also be
* taken on any vLPI handling path that evaluates vpe->col_idx.
+ *
+ * Finally, we must protect ourselves against concurrent updates of
+ * the mapping state on this VM should the ITS list be in use (see
+ * the shortcut in its_send_vmovp() otherewise).
*/
+ if (its_list_map)
+ raw_spin_lock(&vpe->its_vm->vmapp_lock);
+
from = vpe_to_cpuid_lock(vpe, &flags);
table_mask = gic_data_rdist_cpu(from)->vpe_table_mask;
@@ -3850,10 +3953,15 @@ static int its_vpe_set_affinity(struct irq_data *d,
* If we are offered another CPU in the same GICv4.1 ITS
* affinity, pick this one. Otherwise, any CPU will do.
*/
- if (table_mask && cpumask_and(&common, mask_val, table_mask))
- cpu = cpumask_test_cpu(from, &common) ? from : cpumask_first(&common);
- else
+ if (table_mask)
+ cpu = cpumask_any_and(mask_val, table_mask);
+ if (cpu < nr_cpu_ids) {
+ if (cpumask_test_cpu(from, mask_val) &&
+ cpumask_test_cpu(from, table_mask))
+ cpu = from;
+ } else {
cpu = cpumask_first(mask_val);
+ }
if (from == cpu)
goto out;
@@ -3861,12 +3969,20 @@ static int its_vpe_set_affinity(struct irq_data *d,
vpe->col_idx = cpu;
its_send_vmovp(vpe);
+
+ its = find_4_1_its();
+ if (its && its->flags & ITS_FLAGS_WORKAROUND_HISILICON_162100801)
+ its_vpe_4_1_invall_locked(cpu, vpe);
+
its_vpe_db_proxy_move(vpe, from, cpu);
out:
irq_data_update_effective_affinity(d, cpumask_of(cpu));
vpe_to_cpuid_unlock(vpe, flags);
+ if (its_list_map)
+ raw_spin_unlock(&vpe->its_vm->vmapp_lock);
+
return IRQ_SET_MASK_OK_DONE;
}
@@ -3935,6 +4051,8 @@ static void its_vpe_invall(struct its_vpe *vpe)
{
struct its_node *its;
+ guard(raw_spinlock_irqsave)(&vpe->its_vm->vmapp_lock);
+
list_for_each_entry(its, &its_nodes, entry) {
if (!is_v4(its))
continue;
@@ -4163,22 +4281,12 @@ static void its_vpe_4_1_deschedule(struct its_vpe *vpe,
static void its_vpe_4_1_invall(struct its_vpe *vpe)
{
- void __iomem *rdbase;
unsigned long flags;
- u64 val;
int cpu;
- val = GICR_INVALLR_V;
- val |= FIELD_PREP(GICR_INVALLR_VPEID, vpe->vpe_id);
-
/* Target the redistributor this vPE is currently known on */
cpu = vpe_to_cpuid_lock(vpe, &flags);
- raw_spin_lock(&gic_data_rdist_cpu(cpu)->rd_lock);
- rdbase = per_cpu_ptr(gic_rdists->rdist, cpu)->rd_base;
- gic_write_lpir(val, rdbase + GICR_INVALLR);
-
- wait_for_syncr(rdbase);
- raw_spin_unlock(&gic_data_rdist_cpu(cpu)->rd_lock);
+ its_vpe_4_1_invall_locked(cpu, vpe);
vpe_to_cpuid_unlock(vpe, flags);
}
@@ -4436,12 +4544,12 @@ static const struct irq_domain_ops its_sgi_domain_ops = {
static int its_vpe_id_alloc(void)
{
- return ida_simple_get(&its_vpeid_ida, 0, ITS_MAX_VPEID, GFP_KERNEL);
+ return ida_alloc_max(&its_vpeid_ida, ITS_MAX_VPEID - 1, GFP_KERNEL);
}
static void its_vpe_id_free(u16 id)
{
- ida_simple_remove(&its_vpeid_ida, id);
+ ida_free(&its_vpeid_ida, id);
}
static int its_vpe_init(struct its_vpe *vpe)
@@ -4470,9 +4578,8 @@ static int its_vpe_init(struct its_vpe *vpe)
raw_spin_lock_init(&vpe->vpe_lock);
vpe->vpe_id = vpe_id;
vpe->vpt_page = vpt_page;
- if (gic_rdists->has_rvpeid)
- atomic_set(&vpe->vmapp_count, 0);
- else
+ atomic_set(&vpe->vmapp_count, 0);
+ if (!gic_rdists->has_rvpeid)
vpe->vpe_proxy_event = -1;
return 0;
@@ -4521,8 +4628,6 @@ static int its_vpe_irq_domain_alloc(struct irq_domain *domain, unsigned int virq
struct page *vprop_page;
int base, nr_ids, i, err = 0;
- BUG_ON(!vm);
-
bitmap = its_lpi_alloc(roundup_pow_of_two(nr_irqs), &base, &nr_ids);
if (!bitmap)
return -ENOMEM;
@@ -4542,6 +4647,7 @@ static int its_vpe_irq_domain_alloc(struct irq_domain *domain, unsigned int virq
vm->db_lpi_base = base;
vm->nr_db_lpis = nr_ids;
vm->vprop_page = vprop_page;
+ raw_spin_lock_init(&vm->vmapp_lock);
if (gic_rdists->has_rvpeid)
irqchip = &its_vpe_4_1_irq_chip;
@@ -4561,13 +4667,8 @@ static int its_vpe_irq_domain_alloc(struct irq_domain *domain, unsigned int virq
irqd_set_resend_when_in_progress(irq_get_irq_data(virq + i));
}
- if (err) {
- if (i > 0)
- its_vpe_irq_domain_free(domain, virq, i);
-
- its_lpi_free(bitmap, base, nr_ids);
- its_free_prop_table(vprop_page);
- }
+ if (err)
+ its_vpe_irq_domain_free(domain, virq, i);
return err;
}
@@ -4578,6 +4679,10 @@ static int its_vpe_irq_domain_activate(struct irq_domain *domain,
struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
struct its_node *its;
+ /* Map the VPE to the first possible CPU */
+ vpe->col_idx = cpumask_first(cpu_online_mask);
+ irq_data_update_effective_affinity(d, cpumask_of(vpe->col_idx));
+
/*
* If we use the list map, we issue VMAPP on demand... Unless
* we're on a GICv4.1 and we eagerly map the VPE on all ITSs
@@ -4586,9 +4691,6 @@ static int its_vpe_irq_domain_activate(struct irq_domain *domain,
if (!gic_requires_eager_mapping())
return 0;
- /* Map the VPE to the first possible CPU */
- vpe->col_idx = cpumask_first(cpu_online_mask);
-
list_for_each_entry(its, &its_nodes, entry) {
if (!is_v4(its))
continue;
@@ -4597,8 +4699,6 @@ static int its_vpe_irq_domain_activate(struct irq_domain *domain,
its_send_vinvall(its, vpe);
}
- irq_data_update_effective_affinity(d, cpumask_of(vpe->col_idx));
-
return 0;
}
@@ -4779,6 +4879,14 @@ static bool its_set_non_coherent(void *data)
return true;
}
+static bool __maybe_unused its_enable_quirk_hip09_162100801(void *data)
+{
+ struct its_node *its = data;
+
+ its->flags |= ITS_FLAGS_WORKAROUND_HISILICON_162100801;
+ return true;
+}
+
static const struct gic_quirk its_quirks[] = {
#ifdef CONFIG_CAVIUM_ERRATUM_22375
{
@@ -4825,6 +4933,14 @@ static const struct gic_quirk its_quirks[] = {
.init = its_enable_quirk_hip07_161600802,
},
#endif
+#ifdef CONFIG_HISILICON_ERRATUM_162100801
+ {
+ .desc = "ITS: Hip09 erratum 162100801",
+ .iidr = 0x00051736,
+ .mask = 0xffffffff,
+ .init = its_enable_quirk_hip09_162100801,
+ },
+#endif
#ifdef CONFIG_ROCKCHIP_ERRATUM_3588001
{
.desc = "ITS: Rockchip erratum RK3588001",
@@ -5009,6 +5125,9 @@ static int its_init_domain(struct its_node *its)
irq_domain_update_bus_token(inner_domain, DOMAIN_BUS_NEXUS);
+ inner_domain->msi_parent_ops = &gic_v3_its_msi_parent_ops;
+ inner_domain->flags |= IRQ_DOMAIN_FLAG_MSI_PARENT;
+
return 0;
}
@@ -5127,8 +5246,9 @@ static int __init its_probe_one(struct its_node *its)
}
}
- page = alloc_pages_node(its->numa_node, GFP_KERNEL | __GFP_ZERO,
- get_order(ITS_CMD_QUEUE_SZ));
+ page = its_alloc_pages_node(its->numa_node,
+ GFP_KERNEL | __GFP_ZERO,
+ get_order(ITS_CMD_QUEUE_SZ));
if (!page) {
err = -ENOMEM;
goto out_unmap_sgir;
@@ -5192,7 +5312,7 @@ static int __init its_probe_one(struct its_node *its)
out_free_tables:
its_free_tables(its);
out_free_cmd:
- free_pages((unsigned long)its->cmd_base, get_order(ITS_CMD_QUEUE_SZ));
+ its_free_pages(its->cmd_base, get_order(ITS_CMD_QUEUE_SZ));
out_unmap_sgir:
if (its->sgir_base)
iounmap(its->sgir_base);
@@ -5596,6 +5716,10 @@ static int __init gic_acpi_parse_madt_its(union acpi_subtable_headers *header,
goto node_err;
}
+ if (acpi_get_madt_revision() >= 7 &&
+ (its_entry->flags & ACPI_MADT_ITS_NON_COHERENT))
+ its->flags |= ITS_FLAGS_FORCE_NON_SHAREABLE;
+
err = its_probe_one(its);
if (!err)
return 0;
@@ -5666,7 +5790,7 @@ int __init its_lpi_memreserve_init(void)
}
int __init its_init(struct fwnode_handle *handle, struct rdists *rdists,
- struct irq_domain *parent_domain)
+ struct irq_domain *parent_domain, u8 irq_prio)
{
struct device_node *of_node;
struct its_node *its;
@@ -5674,8 +5798,13 @@ int __init its_init(struct fwnode_handle *handle, struct rdists *rdists,
bool has_v4_1 = false;
int err;
+ itt_pool = gen_pool_create(get_order(ITS_ITT_ALIGN), -1);
+ if (!itt_pool)
+ return -ENOMEM;
+
gic_rdists = rdists;
+ lpi_prop_prio = irq_prio;
its_parent = parent_domain;
of_node = to_of_node(handle);
if (of_node)
diff --git a/drivers/irqchip/irq-gic-v3-mbi.c b/drivers/irqchip/irq-gic-v3-mbi.c
index dbb8b1efda44..3fe870f8ee17 100644
--- a/drivers/irqchip/irq-gic-v3-mbi.c
+++ b/drivers/irqchip/irq-gic-v3-mbi.c
@@ -18,6 +18,8 @@
#include <linux/irqchip/arm-gic-v3.h>
+#include "irq-msi-lib.h"
+
struct mbi_range {
u32 spi_start;
u32 nr_spis;
@@ -138,6 +140,7 @@ static void mbi_irq_domain_free(struct irq_domain *domain,
}
static const struct irq_domain_ops mbi_domain_ops = {
+ .select = msi_lib_irq_domain_select,
.alloc = mbi_irq_domain_alloc,
.free = mbi_irq_domain_free,
};
@@ -151,54 +154,6 @@ static void mbi_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
iommu_dma_compose_msi_msg(irq_data_get_msi_desc(data), msg);
}
-#ifdef CONFIG_PCI_MSI
-/* PCI-specific irqchip */
-static void mbi_mask_msi_irq(struct irq_data *d)
-{
- pci_msi_mask_irq(d);
- irq_chip_mask_parent(d);
-}
-
-static void mbi_unmask_msi_irq(struct irq_data *d)
-{
- pci_msi_unmask_irq(d);
- irq_chip_unmask_parent(d);
-}
-
-static struct irq_chip mbi_msi_irq_chip = {
- .name = "MSI",
- .irq_mask = mbi_mask_msi_irq,
- .irq_unmask = mbi_unmask_msi_irq,
- .irq_eoi = irq_chip_eoi_parent,
- .irq_compose_msi_msg = mbi_compose_msi_msg,
-};
-
-static struct msi_domain_info mbi_msi_domain_info = {
- .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
- MSI_FLAG_PCI_MSIX | MSI_FLAG_MULTI_PCI_MSI),
- .chip = &mbi_msi_irq_chip,
-};
-
-static int mbi_allocate_pci_domain(struct irq_domain *nexus_domain,
- struct irq_domain **pci_domain)
-{
- *pci_domain = pci_msi_create_irq_domain(nexus_domain->parent->fwnode,
- &mbi_msi_domain_info,
- nexus_domain);
- if (!*pci_domain)
- return -ENOMEM;
-
- return 0;
-}
-#else
-static int mbi_allocate_pci_domain(struct irq_domain *nexus_domain,
- struct irq_domain **pci_domain)
-{
- *pci_domain = NULL;
- return 0;
-}
-#endif
-
static void mbi_compose_mbi_msg(struct irq_data *data, struct msi_msg *msg)
{
mbi_compose_msi_msg(data, msg);
@@ -210,28 +165,51 @@ static void mbi_compose_mbi_msg(struct irq_data *data, struct msi_msg *msg)
iommu_dma_compose_msi_msg(irq_data_get_msi_desc(data), &msg[1]);
}
-/* Platform-MSI specific irqchip */
-static struct irq_chip mbi_pmsi_irq_chip = {
- .name = "pMSI",
- .irq_set_type = irq_chip_set_type_parent,
- .irq_compose_msi_msg = mbi_compose_mbi_msg,
- .flags = IRQCHIP_SUPPORTS_LEVEL_MSI,
-};
-
-static struct msi_domain_ops mbi_pmsi_ops = {
-};
+static bool mbi_init_dev_msi_info(struct device *dev, struct irq_domain *domain,
+ struct irq_domain *real_parent, struct msi_domain_info *info)
+{
+ if (!msi_lib_init_dev_msi_info(dev, domain, real_parent, info))
+ return false;
+
+ switch (info->bus_token) {
+ case DOMAIN_BUS_PCI_DEVICE_MSI:
+ case DOMAIN_BUS_PCI_DEVICE_MSIX:
+ info->chip->irq_compose_msi_msg = mbi_compose_msi_msg;
+ return true;
+
+ case DOMAIN_BUS_DEVICE_MSI:
+ info->chip->irq_compose_msi_msg = mbi_compose_mbi_msg;
+ info->chip->irq_set_type = irq_chip_set_type_parent;
+ info->chip->flags |= IRQCHIP_SUPPORTS_LEVEL_MSI;
+ info->flags |= MSI_FLAG_LEVEL_CAPABLE;
+ return true;
+
+ default:
+ WARN_ON_ONCE(1);
+ return false;
+ }
+}
-static struct msi_domain_info mbi_pmsi_domain_info = {
- .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
- MSI_FLAG_LEVEL_CAPABLE),
- .ops = &mbi_pmsi_ops,
- .chip = &mbi_pmsi_irq_chip,
+#define MBI_MSI_FLAGS_REQUIRED (MSI_FLAG_USE_DEF_DOM_OPS | \
+ MSI_FLAG_USE_DEF_CHIP_OPS | \
+ MSI_FLAG_PCI_MSI_MASK_PARENT)
+
+#define MBI_MSI_FLAGS_SUPPORTED (MSI_GENERIC_FLAGS_MASK | \
+ MSI_FLAG_PCI_MSIX | \
+ MSI_FLAG_MULTI_PCI_MSI)
+
+static const struct msi_parent_ops gic_v3_mbi_msi_parent_ops = {
+ .supported_flags = MBI_MSI_FLAGS_SUPPORTED,
+ .required_flags = MBI_MSI_FLAGS_REQUIRED,
+ .bus_select_token = DOMAIN_BUS_NEXUS,
+ .bus_select_mask = MATCH_PCI_MSI | MATCH_PLATFORM_MSI,
+ .prefix = "MBI-",
+ .init_dev_msi_info = mbi_init_dev_msi_info,
};
-static int mbi_allocate_domains(struct irq_domain *parent)
+static int mbi_allocate_domain(struct irq_domain *parent)
{
- struct irq_domain *nexus_domain, *pci_domain, *plat_domain;
- int err;
+ struct irq_domain *nexus_domain;
nexus_domain = irq_domain_create_hierarchy(parent, 0, 0, parent->fwnode,
&mbi_domain_ops, NULL);
@@ -239,22 +217,8 @@ static int mbi_allocate_domains(struct irq_domain *parent)
return -ENOMEM;
irq_domain_update_bus_token(nexus_domain, DOMAIN_BUS_NEXUS);
-
- err = mbi_allocate_pci_domain(nexus_domain, &pci_domain);
-
- plat_domain = platform_msi_create_irq_domain(parent->fwnode,
- &mbi_pmsi_domain_info,
- nexus_domain);
-
- if (err || !plat_domain) {
- if (plat_domain)
- irq_domain_remove(plat_domain);
- if (pci_domain)
- irq_domain_remove(pci_domain);
- irq_domain_remove(nexus_domain);
- return -ENOMEM;
- }
-
+ nexus_domain->flags |= IRQ_DOMAIN_FLAG_MSI_PARENT;
+ nexus_domain->msi_parent_ops = &gic_v3_mbi_msi_parent_ops;
return 0;
}
@@ -317,7 +281,7 @@ int __init mbi_init(struct fwnode_handle *fwnode, struct irq_domain *parent)
pr_info("Using MBI frame %pa\n", &mbi_phys_base);
- ret = mbi_allocate_domains(parent);
+ ret = mbi_allocate_domain(parent);
if (ret)
goto err_free_mbi;
diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
index 98b0329b7154..270d7a4d85a6 100644
--- a/drivers/irqchip/irq-gic-v3.c
+++ b/drivers/irqchip/irq-gic-v3.c
@@ -12,6 +12,7 @@
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/irqdomain.h>
+#include <linux/kernel.h>
#include <linux/kstrtox.h>
#include <linux/of.h>
#include <linux/of_address.h>
@@ -19,10 +20,12 @@
#include <linux/percpu.h>
#include <linux/refcount.h>
#include <linux/slab.h>
+#include <linux/iopoll.h>
#include <linux/irqchip.h>
#include <linux/irqchip/arm-gic-common.h>
#include <linux/irqchip/arm-gic-v3.h>
+#include <linux/irqchip/arm-gic-v3-prio.h>
#include <linux/irqchip/irq-partition-percpu.h>
#include <linux/bitfield.h>
#include <linux/bits.h>
@@ -35,14 +38,18 @@
#include "irq-gic-common.h"
-#define GICD_INT_NMI_PRI (GICD_INT_DEF_PRI & ~0x80)
+static u8 dist_prio_irq __ro_after_init = GICV3_PRIO_IRQ;
+static u8 dist_prio_nmi __ro_after_init = GICV3_PRIO_NMI;
#define FLAGS_WORKAROUND_GICR_WAKER_MSM8996 (1ULL << 0)
#define FLAGS_WORKAROUND_CAVIUM_ERRATUM_38539 (1ULL << 1)
#define FLAGS_WORKAROUND_ASR_ERRATUM_8601001 (1ULL << 2)
+#define FLAGS_WORKAROUND_INSECURE (1ULL << 3)
#define GIC_IRQ_TYPE_PARTITION (GIC_IRQ_TYPE_LPI + 1)
+static struct cpumask broken_rdists __read_mostly __maybe_unused;
+
struct redist_region {
void __iomem *redist_base;
phys_addr_t phys_base;
@@ -77,6 +84,8 @@ static DEFINE_STATIC_KEY_TRUE(supports_deactivate_key);
#define GIC_LINE_NR min(GICD_TYPER_SPIS(gic_data.rdists.gicd_typer), 1020U)
#define GIC_ESPI_NR GICD_TYPER_ESPIS(gic_data.rdists.gicd_typer)
+static bool nmi_support_forbidden;
+
/*
* There are 16 SGIs, though we only actually use 8 in Linux. The other 8 SGIs
* are potentially stolen by the secure side. Some code, especially code dealing
@@ -107,29 +116,117 @@ static DEFINE_STATIC_KEY_TRUE(supports_deactivate_key);
*/
static DEFINE_STATIC_KEY_FALSE(supports_pseudo_nmis);
-DEFINE_STATIC_KEY_FALSE(gic_nonsecure_priorities);
-EXPORT_SYMBOL(gic_nonsecure_priorities);
+static u32 gic_get_pribits(void)
+{
+ u32 pribits;
-/*
- * When the Non-secure world has access to group 0 interrupts (as a
- * consequence of SCR_EL3.FIQ == 0), reading the ICC_RPR_EL1 register will
- * return the Distributor's view of the interrupt priority.
- *
- * When GIC security is enabled (GICD_CTLR.DS == 0), the interrupt priority
- * written by software is moved to the Non-secure range by the Distributor.
- *
- * If both are true (which is when gic_nonsecure_priorities gets enabled),
- * we need to shift down the priority programmed by software to match it
- * against the value returned by ICC_RPR_EL1.
- */
-#define GICD_INT_RPR_PRI(priority) \
- ({ \
- u32 __priority = (priority); \
- if (static_branch_unlikely(&gic_nonsecure_priorities)) \
- __priority = 0x80 | (__priority >> 1); \
- \
- __priority; \
- })
+ pribits = gic_read_ctlr();
+ pribits &= ICC_CTLR_EL1_PRI_BITS_MASK;
+ pribits >>= ICC_CTLR_EL1_PRI_BITS_SHIFT;
+ pribits++;
+
+ return pribits;
+}
+
+static bool gic_has_group0(void)
+{
+ u32 val;
+ u32 old_pmr;
+
+ old_pmr = gic_read_pmr();
+
+ /*
+ * Let's find out if Group0 is under control of EL3 or not by
+ * setting the highest possible, non-zero priority in PMR.
+ *
+ * If SCR_EL3.FIQ is set, the priority gets shifted down in
+ * order for the CPU interface to set bit 7, and keep the
+ * actual priority in the non-secure range. In the process, it
+ * looses the least significant bit and the actual priority
+ * becomes 0x80. Reading it back returns 0, indicating that
+ * we're don't have access to Group0.
+ */
+ gic_write_pmr(BIT(8 - gic_get_pribits()));
+ val = gic_read_pmr();
+
+ gic_write_pmr(old_pmr);
+
+ return val != 0;
+}
+
+static inline bool gic_dist_security_disabled(void)
+{
+ return readl_relaxed(gic_data.dist_base + GICD_CTLR) & GICD_CTLR_DS;
+}
+
+static bool cpus_have_security_disabled __ro_after_init;
+static bool cpus_have_group0 __ro_after_init;
+
+static void __init gic_prio_init(void)
+{
+ bool ds;
+
+ cpus_have_group0 = gic_has_group0();
+
+ ds = gic_dist_security_disabled();
+ if ((gic_data.flags & FLAGS_WORKAROUND_INSECURE) && !ds) {
+ if (cpus_have_group0) {
+ u32 val;
+
+ val = readl_relaxed(gic_data.dist_base + GICD_CTLR);
+ val |= GICD_CTLR_DS;
+ writel_relaxed(val, gic_data.dist_base + GICD_CTLR);
+
+ ds = gic_dist_security_disabled();
+ if (ds)
+ pr_warn("Broken GIC integration, security disabled\n");
+ } else {
+ pr_warn("Broken GIC integration, pNMI forbidden\n");
+ nmi_support_forbidden = true;
+ }
+ }
+
+ cpus_have_security_disabled = ds;
+
+ /*
+ * How priority values are used by the GIC depends on two things:
+ * the security state of the GIC (controlled by the GICD_CTRL.DS bit)
+ * and if Group 0 interrupts can be delivered to Linux in the non-secure
+ * world as FIQs (controlled by the SCR_EL3.FIQ bit). These affect the
+ * way priorities are presented in ICC_PMR_EL1 and in the distributor:
+ *
+ * GICD_CTRL.DS | SCR_EL3.FIQ | ICC_PMR_EL1 | Distributor
+ * -------------------------------------------------------
+ * 1 | - | unchanged | unchanged
+ * -------------------------------------------------------
+ * 0 | 1 | non-secure | non-secure
+ * -------------------------------------------------------
+ * 0 | 0 | unchanged | non-secure
+ *
+ * In the non-secure view reads and writes are modified:
+ *
+ * - A value written is right-shifted by one and the MSB is set,
+ * forcing the priority into the non-secure range.
+ *
+ * - A value read is left-shifted by one.
+ *
+ * In the first two cases, where ICC_PMR_EL1 and the interrupt priority
+ * are both either modified or unchanged, we can use the same set of
+ * priorities.
+ *
+ * In the last case, where only the interrupt priorities are modified to
+ * be in the non-secure range, we program the non-secure values into
+ * the distributor to match the PMR values we want.
+ */
+ if (cpus_have_group0 && !cpus_have_security_disabled) {
+ dist_prio_irq = __gicv3_prio_to_ns(dist_prio_irq);
+ dist_prio_nmi = __gicv3_prio_to_ns(dist_prio_nmi);
+ }
+
+ pr_info("GICD_CTRL.DS=%d, SCR_EL3.FIQ=%d\n",
+ cpus_have_security_disabled,
+ !cpus_have_group0);
+}
/* rdist_nmi_refs[n] == number of cpus having the rdist interrupt n set as NMI */
static refcount_t *rdist_nmi_refs;
@@ -180,11 +277,6 @@ static enum gic_intid_range get_intid_range(struct irq_data *d)
return __get_intid_range(d->hwirq);
}
-static inline unsigned int gic_irq(struct irq_data *d)
-{
- return d->hwirq;
-}
-
static inline bool gic_irq_in_rdist(struct irq_data *d)
{
switch (get_intid_range(d)) {
@@ -251,17 +343,13 @@ static inline void __iomem *gic_dist_base(struct irq_data *d)
static void gic_do_wait_for_rwp(void __iomem *base, u32 bit)
{
- u32 count = 1000000; /* 1s! */
+ u32 val;
+ int ret;
- while (readl_relaxed(base + GICD_CTLR) & bit) {
- count--;
- if (!count) {
- pr_err_ratelimited("RWP timeout, gone fishing\n");
- return;
- }
- cpu_relax();
- udelay(1);
- }
+ ret = readl_relaxed_poll_timeout_atomic(base + GICD_CTLR, val, !(val & bit),
+ 1, USEC_PER_SEC);
+ if (ret == -ETIMEDOUT)
+ pr_err_ratelimited("RWP timeout, gone fishing\n");
}
/* Wait for completion of a distributor change */
@@ -279,8 +367,8 @@ static void gic_redist_wait_for_rwp(void)
static void gic_enable_redist(bool enable)
{
void __iomem *rbase;
- u32 count = 1000000; /* 1s! */
u32 val;
+ int ret;
if (gic_data.flags & FLAGS_WORKAROUND_GICR_WAKER_MSM8996)
return;
@@ -301,16 +389,13 @@ static void gic_enable_redist(bool enable)
return; /* No PM support in this redistributor */
}
- while (--count) {
- val = readl_relaxed(rbase + GICR_WAKER);
- if (enable ^ (bool)(val & GICR_WAKER_ChildrenAsleep))
- break;
- cpu_relax();
- udelay(1);
- }
- if (!count)
+ ret = readl_relaxed_poll_timeout_atomic(rbase + GICR_WAKER, val,
+ enable ^ (bool)(val & GICR_WAKER_ChildrenAsleep),
+ 1, USEC_PER_SEC);
+ if (ret == -ETIMEDOUT) {
pr_err_ratelimited("redistributor failed to %s...\n",
enable ? "wakeup" : "sleep");
+ }
}
/*
@@ -463,6 +548,13 @@ static int gic_irq_set_irqchip_state(struct irq_data *d,
}
gic_poke_irq(d, reg);
+
+ /*
+ * Force read-back to guarantee that the active state has taken
+ * effect, and won't race with a guest-driven deactivation.
+ */
+ if (reg == GICD_ISACTIVER)
+ gic_peek_irq(d, reg);
return 0;
}
@@ -548,7 +640,7 @@ static int gic_irq_nmi_setup(struct irq_data *d)
* A secondary irq_chip should be in charge of LPI request,
* it should not be possible to get there
*/
- if (WARN_ON(gic_irq(d) >= 8192))
+ if (WARN_ON(irqd_to_hwirq(d) >= 8192))
return -EINVAL;
/* desc lock should already be held */
@@ -567,7 +659,7 @@ static int gic_irq_nmi_setup(struct irq_data *d)
desc->handle_irq = handle_fasteoi_nmi;
}
- gic_irq_set_prio(d, GICD_INT_NMI_PRI);
+ gic_irq_set_prio(d, dist_prio_nmi);
return 0;
}
@@ -588,7 +680,7 @@ static void gic_irq_nmi_teardown(struct irq_data *d)
* A secondary irq_chip should be in charge of LPI request,
* it should not be possible to get there
*/
- if (WARN_ON(gic_irq(d) >= 8192))
+ if (WARN_ON(irqd_to_hwirq(d) >= 8192))
return;
/* desc lock should already be held */
@@ -602,7 +694,7 @@ static void gic_irq_nmi_teardown(struct irq_data *d)
desc->handle_irq = handle_fasteoi_irq;
}
- gic_irq_set_prio(d, GICD_INT_DEF_PRI);
+ gic_irq_set_prio(d, dist_prio_irq);
}
static bool gic_arm64_erratum_2941627_needed(struct irq_data *d)
@@ -626,7 +718,7 @@ static bool gic_arm64_erratum_2941627_needed(struct irq_data *d)
static void gic_eoi_irq(struct irq_data *d)
{
- write_gicreg(gic_irq(d), ICC_EOIR1_EL1);
+ write_gicreg(irqd_to_hwirq(d), ICC_EOIR1_EL1);
isb();
if (gic_arm64_erratum_2941627_needed(d)) {
@@ -646,19 +738,19 @@ static void gic_eoimode1_eoi_irq(struct irq_data *d)
* No need to deactivate an LPI, or an interrupt that
* is is getting forwarded to a vcpu.
*/
- if (gic_irq(d) >= 8192 || irqd_is_forwarded_to_vcpu(d))
+ if (irqd_to_hwirq(d) >= 8192 || irqd_is_forwarded_to_vcpu(d))
return;
if (!gic_arm64_erratum_2941627_needed(d))
- gic_write_dir(gic_irq(d));
+ gic_write_dir(irqd_to_hwirq(d));
else
gic_poke_irq(d, GICD_ICACTIVER);
}
static int gic_set_type(struct irq_data *d, unsigned int type)
{
+ irq_hw_number_t irq = irqd_to_hwirq(d);
enum gic_intid_range range;
- unsigned int irq = gic_irq(d);
void __iomem *base;
u32 offset, index;
int ret;
@@ -681,10 +773,10 @@ static int gic_set_type(struct irq_data *d, unsigned int type)
offset = convert_offset_index(d, GICD_ICFGR, &index);
- ret = gic_configure_irq(index, type, base + offset, NULL);
+ ret = gic_configure_irq(index, type, base + offset);
if (ret && (range == PPI_RANGE || range == EPPI_RANGE)) {
/* Misconfigured PPIs are usually not fatal */
- pr_warn("GIC: PPI INTID%d is secure or misconfigured\n", irq);
+ pr_warn("GIC: PPI INTID%ld is secure or misconfigured\n", irq);
ret = 0;
}
@@ -749,7 +841,7 @@ static void gic_deactivate_unhandled(u32 irqnr)
* register state is not stale, as these may have been indirectly written
* *after* exception entry.
*
- * (2) Deactivate the interrupt when EOI mode 1 is in use.
+ * (2) Execute an interrupt priority drop when EOI mode 1 is in use.
*/
static inline void gic_complete_ack(u32 irqnr)
{
@@ -764,7 +856,7 @@ static bool gic_rpr_is_nmi_prio(void)
if (!gic_supports_nmi())
return false;
- return unlikely(gic_read_rpr() == GICD_INT_RPR_PRI(GICD_INT_NMI_PRI));
+ return unlikely(gic_read_rpr() == GICV3_PRIO_NMI);
}
static bool gic_irqnr_is_special(u32 irqnr)
@@ -869,7 +961,7 @@ static void __gic_handle_irq_from_irqsoff(struct pt_regs *regs)
__gic_handle_nmi(irqnr, regs);
}
-static asmlinkage void __exception_irq_entry gic_handle_irq(struct pt_regs *regs)
+static void __exception_irq_entry gic_handle_irq(struct pt_regs *regs)
{
if (unlikely(gic_supports_nmi() && !interrupts_enabled(regs)))
__gic_handle_irq_from_irqsoff(regs);
@@ -877,44 +969,6 @@ static asmlinkage void __exception_irq_entry gic_handle_irq(struct pt_regs *regs
__gic_handle_irq_from_irqson(regs);
}
-static u32 gic_get_pribits(void)
-{
- u32 pribits;
-
- pribits = gic_read_ctlr();
- pribits &= ICC_CTLR_EL1_PRI_BITS_MASK;
- pribits >>= ICC_CTLR_EL1_PRI_BITS_SHIFT;
- pribits++;
-
- return pribits;
-}
-
-static bool gic_has_group0(void)
-{
- u32 val;
- u32 old_pmr;
-
- old_pmr = gic_read_pmr();
-
- /*
- * Let's find out if Group0 is under control of EL3 or not by
- * setting the highest possible, non-zero priority in PMR.
- *
- * If SCR_EL3.FIQ is set, the priority gets shifted down in
- * order for the CPU interface to set bit 7, and keep the
- * actual priority in the non-secure range. In the process, it
- * looses the least significant bit and the actual priority
- * becomes 0x80. Reading it back returns 0, indicating that
- * we're don't have access to Group0.
- */
- gic_write_pmr(BIT(8 - gic_get_pribits()));
- val = gic_read_pmr();
-
- gic_write_pmr(old_pmr);
-
- return val != 0;
-}
-
static void __init gic_dist_init(void)
{
unsigned int i;
@@ -948,10 +1002,11 @@ static void __init gic_dist_init(void)
writel_relaxed(0, base + GICD_ICFGRnE + i / 4);
for (i = 0; i < GIC_ESPI_NR; i += 4)
- writel_relaxed(GICD_INT_DEF_PRI_X4, base + GICD_IPRIORITYRnE + i);
+ writel_relaxed(REPEAT_BYTE_U32(dist_prio_irq),
+ base + GICD_IPRIORITYRnE + i);
/* Now do the common stuff */
- gic_dist_config(base, GIC_LINE_NR, NULL);
+ gic_dist_config(base, GIC_LINE_NR, dist_prio_irq);
val = GICD_CTLR_ARE_NS | GICD_CTLR_ENABLE_G1A | GICD_CTLR_ENABLE_G1;
if (gic_data.rdists.gicd_typer2 & GICD_TYPER2_nASSGIcap) {
@@ -1130,20 +1185,8 @@ static void gic_update_rdist_properties(void)
gic_data.rdists.has_vpend_valid_dirty ? "Valid+Dirty " : "");
}
-/* Check whether it's single security state view */
-static inline bool gic_dist_security_disabled(void)
-{
- return readl_relaxed(gic_data.dist_base + GICD_CTLR) & GICD_CTLR_DS;
-}
-
-static void gic_cpu_sys_reg_init(void)
+static void gic_cpu_sys_reg_enable(void)
{
- int i, cpu = smp_processor_id();
- u64 mpidr = gic_cpu_to_affinity(cpu);
- u64 need_rss = MPIDR_RS(mpidr);
- bool group0;
- u32 pribits;
-
/*
* Need to check that the SRE bit has actually been set. If
* not, it means that SRE is disabled at EL2. We're going to
@@ -1154,6 +1197,16 @@ static void gic_cpu_sys_reg_init(void)
if (!gic_enable_sre())
pr_err("GIC: unable to set SRE (disabled at EL2), panic ahead\n");
+}
+
+static void gic_cpu_sys_reg_init(void)
+{
+ int i, cpu = smp_processor_id();
+ u64 mpidr = gic_cpu_to_affinity(cpu);
+ u64 need_rss = MPIDR_RS(mpidr);
+ bool group0;
+ u32 pribits;
+
pribits = gic_get_pribits();
group0 = gic_has_group0();
@@ -1163,18 +1216,14 @@ static void gic_cpu_sys_reg_init(void)
write_gicreg(DEFAULT_PMR_VALUE, ICC_PMR_EL1);
} else if (gic_supports_nmi()) {
/*
- * Mismatch configuration with boot CPU, the system is likely
- * to die as interrupt masking will not work properly on all
- * CPUs
+ * Check that all CPUs use the same priority space.
*
- * The boot CPU calls this function before enabling NMI support,
- * and as a result we'll never see this warning in the boot path
- * for that CPU.
+ * If there's a mismatch with the boot CPU, the system is
+ * likely to die as interrupt masking will not work properly on
+ * all CPUs.
*/
- if (static_branch_unlikely(&gic_nonsecure_priorities))
- WARN_ON(!group0 || gic_dist_security_disabled());
- else
- WARN_ON(group0 && !gic_dist_security_disabled());
+ WARN_ON(group0 != cpus_have_group0);
+ WARN_ON(gic_dist_security_disabled() != cpus_have_security_disabled);
}
/*
@@ -1293,7 +1342,8 @@ static void gic_cpu_init(void)
for (i = 0; i < gic_data.ppi_nr + SGI_NR; i += 32)
writel_relaxed(~0, rbase + GICR_IGROUPR0 + i / 8);
- gic_cpu_config(rbase, gic_data.ppi_nr + SGI_NR, gic_redist_wait_for_rwp);
+ gic_cpu_config(rbase, gic_data.ppi_nr + SGI_NR, dist_prio_irq);
+ gic_redist_wait_for_rwp();
/* initialise system registers */
gic_cpu_sys_reg_init();
@@ -1304,8 +1354,21 @@ static void gic_cpu_init(void)
#define MPIDR_TO_SGI_RS(mpidr) (MPIDR_RS(mpidr) << ICC_SGI1R_RS_SHIFT)
#define MPIDR_TO_SGI_CLUSTER_ID(mpidr) ((mpidr) & ~0xFUL)
+/*
+ * gic_starting_cpu() is called after the last point where cpuhp is allowed
+ * to fail. So pre check for problems earlier.
+ */
+static int gic_check_rdist(unsigned int cpu)
+{
+ if (cpumask_test_cpu(cpu, &broken_rdists))
+ return -EINVAL;
+
+ return 0;
+}
+
static int gic_starting_cpu(unsigned int cpu)
{
+ gic_cpu_sys_reg_enable();
gic_cpu_init();
if (gic_dist_supports_lpis())
@@ -1395,6 +1458,10 @@ static void __init gic_smp_init(void)
};
int base_sgi;
+ cpuhp_setup_state_nocalls(CPUHP_BP_PREPARE_DYN,
+ "irqchip/arm/gicv3:checkrdist",
+ gic_check_rdist, NULL);
+
cpuhp_setup_state_nocalls(CPUHP_AP_IRQ_GIC_STARTING,
"irqchip/arm/gicv3:starting",
gic_starting_cpu, NULL);
@@ -1464,9 +1531,10 @@ static int gic_retrigger(struct irq_data *data)
static int gic_cpu_pm_notifier(struct notifier_block *self,
unsigned long cmd, void *v)
{
- if (cmd == CPU_PM_EXIT) {
+ if (cmd == CPU_PM_EXIT || cmd == CPU_PM_ENTER_FAILED) {
if (gic_dist_security_disabled())
gic_enable_redist(true);
+ gic_cpu_sys_reg_enable();
gic_cpu_sys_reg_init();
} else if (cmd == CPU_PM_ENTER && gic_dist_security_disabled()) {
gic_write_grpen1(0);
@@ -1702,9 +1770,13 @@ static int gic_irq_domain_select(struct irq_domain *d,
irq_hw_number_t hwirq;
/* Not for us */
- if (fwspec->fwnode != d->fwnode)
+ if (fwspec->fwnode != d->fwnode)
return 0;
+ /* Handle pure domain searches */
+ if (!fwspec->param_count)
+ return d->bus_token == bus_token;
+
/* If this is not DT, then we have a single domain */
if (!is_of_node(fwspec->fwnode))
return 1;
@@ -1859,6 +1931,18 @@ static bool gic_enable_quirk_arm64_2941627(void *data)
return true;
}
+static bool gic_enable_quirk_rk3399(void *data)
+{
+ struct gic_chip_data *d = data;
+
+ if (of_machine_is_compatible("rockchip,rk3399")) {
+ d->flags |= FLAGS_WORKAROUND_INSECURE;
+ return true;
+ }
+
+ return false;
+}
+
static bool rd_set_non_coherent(void *data)
{
struct gic_chip_data *d = data;
@@ -1934,6 +2018,12 @@ static const struct gic_quirk gic_quirks[] = {
.init = rd_set_non_coherent,
},
{
+ .desc = "GICv3: Insecure RK3399 integration",
+ .iidr = 0x0000043b,
+ .mask = 0xff000fff,
+ .init = gic_enable_quirk_rk3399,
+ },
+ {
}
};
@@ -1941,7 +2031,7 @@ static void gic_enable_nmi_support(void)
{
int i;
- if (!gic_prio_masking_enabled())
+ if (!gic_prio_masking_enabled() || nmi_support_forbidden)
return;
rdist_nmi_refs = kcalloc(gic_data.ppi_nr + SGI_NR,
@@ -1955,36 +2045,6 @@ static void gic_enable_nmi_support(void)
pr_info("Pseudo-NMIs enabled using %s ICC_PMR_EL1 synchronisation\n",
gic_has_relaxed_pmr_sync() ? "relaxed" : "forced");
- /*
- * How priority values are used by the GIC depends on two things:
- * the security state of the GIC (controlled by the GICD_CTRL.DS bit)
- * and if Group 0 interrupts can be delivered to Linux in the non-secure
- * world as FIQs (controlled by the SCR_EL3.FIQ bit). These affect the
- * ICC_PMR_EL1 register and the priority that software assigns to
- * interrupts:
- *
- * GICD_CTRL.DS | SCR_EL3.FIQ | ICC_PMR_EL1 | Group 1 priority
- * -----------------------------------------------------------
- * 1 | - | unchanged | unchanged
- * -----------------------------------------------------------
- * 0 | 1 | non-secure | non-secure
- * -----------------------------------------------------------
- * 0 | 0 | unchanged | non-secure
- *
- * where non-secure means that the value is right-shifted by one and the
- * MSB bit set, to make it fit in the non-secure priority range.
- *
- * In the first two cases, where ICC_PMR_EL1 and the interrupt priority
- * are both either modified or unchanged, we can use the same set of
- * priorities.
- *
- * In the last case, where only the interrupt priorities are modified to
- * be in the non-secure range, we use a different PMR value to mask IRQs
- * and the rest of the values that we use remain unchanged.
- */
- if (gic_has_group0() && !gic_dist_security_disabled())
- static_branch_enable(&gic_nonsecure_priorities);
-
static_branch_enable(&supports_pseudo_nmis);
if (static_branch_likely(&supports_deactivate_key))
@@ -2065,6 +2125,8 @@ static int __init gic_init_bases(phys_addr_t dist_phys_base,
gic_update_rdist_properties();
+ gic_cpu_sys_reg_enable();
+ gic_prio_init();
gic_dist_init();
gic_cpu_init();
gic_enable_nmi_support();
@@ -2072,7 +2134,7 @@ static int __init gic_init_bases(phys_addr_t dist_phys_base,
gic_cpu_pm_init();
if (gic_dist_supports_lpis()) {
- its_init(handle, &gic_data.rdists, gic_data.domain);
+ its_init(handle, &gic_data.rdists, gic_data.domain, dist_prio_irq);
its_cpu_init();
its_lpi_memreserve_init();
} else {
@@ -2197,11 +2259,10 @@ out_put_node:
of_node_put(parts_node);
}
-static void __init gic_of_setup_kvm_info(struct device_node *node)
+static void __init gic_of_setup_kvm_info(struct device_node *node, u32 nr_redist_regions)
{
int ret;
struct resource r;
- u32 gicv_idx;
gic_v3_kvm_info.type = GIC_V3;
@@ -2209,12 +2270,8 @@ static void __init gic_of_setup_kvm_info(struct device_node *node)
if (!gic_v3_kvm_info.maint_irq)
return;
- if (of_property_read_u32(node, "#redistributor-regions",
- &gicv_idx))
- gicv_idx = 1;
-
- gicv_idx += 3; /* Also skip GICD, GICC, GICH */
- ret = of_address_to_resource(node, gicv_idx, &r);
+ /* Also skip GICD, GICC, GICH */
+ ret = of_address_to_resource(node, nr_redist_regions + 3, &r);
if (!ret)
gic_v3_kvm_info.vcpu = r;
@@ -2304,7 +2361,7 @@ static int __init gic_of_init(struct device_node *node, struct device_node *pare
gic_populate_ppi_partitions(node);
if (static_branch_likely(&supports_deactivate_key))
- gic_of_setup_kvm_info(node);
+ gic_of_setup_kvm_info(node, nr_redist_regions);
return 0;
out_unmap_rdist:
@@ -2356,6 +2413,11 @@ gic_acpi_parse_madt_redist(union acpi_subtable_headers *header,
pr_err("Couldn't map GICR region @%llx\n", redist->base_address);
return -ENOMEM;
}
+
+ if (acpi_get_madt_revision() >= 7 &&
+ (redist->flags & ACPI_MADT_GICR_NON_COHERENT))
+ gic_data.rdists.flags |= RDIST_FLAGS_FORCE_NON_SHAREABLE;
+
gic_request_region(redist->base_address, redist->length, "GICR");
gic_acpi_register_redist(redist->base_address, redist_base);
@@ -2372,14 +2434,34 @@ gic_acpi_parse_madt_gicc(union acpi_subtable_headers *header,
u32 size = reg == GIC_PIDR2_ARCH_GICv4 ? SZ_64K * 4 : SZ_64K * 2;
void __iomem *redist_base;
- if (!acpi_gicc_is_usable(gicc))
+ /* Neither enabled or online capable means it doesn't exist, skip it */
+ if (!(gicc->flags & (ACPI_MADT_ENABLED | ACPI_MADT_GICC_ONLINE_CAPABLE)))
return 0;
+ /*
+ * Capable but disabled CPUs can be brought online later. What about
+ * the redistributor? ACPI doesn't want to say!
+ * Virtual hotplug systems can use the MADT's "always-on" GICR entries.
+ * Otherwise, prevent such CPUs from being brought online.
+ */
+ if (!(gicc->flags & ACPI_MADT_ENABLED)) {
+ int cpu = get_cpu_for_acpi_id(gicc->uid);
+
+ pr_warn("CPU %u's redistributor is inaccessible: this CPU can't be brought online\n", cpu);
+ if (cpu >= 0)
+ cpumask_set_cpu(cpu, &broken_rdists);
+ return 0;
+ }
+
redist_base = ioremap(gicc->gicr_base_address, size);
if (!redist_base)
return -ENOMEM;
gic_request_region(gicc->gicr_base_address, size, "GICR");
+ if (acpi_get_madt_revision() >= 7 &&
+ (gicc->flags & ACPI_MADT_GICC_NON_COHERENT))
+ gic_data.rdists.flags |= RDIST_FLAGS_FORCE_NON_SHAREABLE;
+
gic_acpi_register_redist(gicc->gicr_base_address, redist_base);
return 0;
}
@@ -2420,21 +2502,15 @@ static int __init gic_acpi_match_gicc(union acpi_subtable_headers *header,
/*
* If GICC is enabled and has valid gicr base address, then it means
- * GICR base is presented via GICC
+ * GICR base is presented via GICC. The redistributor is only known to
+ * be accessible if the GICC is marked as enabled. If this bit is not
+ * set, we'd need to add the redistributor at runtime, which isn't
+ * supported.
*/
- if (acpi_gicc_is_usable(gicc) && gicc->gicr_base_address) {
+ if (gicc->flags & ACPI_MADT_ENABLED && gicc->gicr_base_address)
acpi_data.enabled_rdists++;
- return 0;
- }
- /*
- * It's perfectly valid firmware can pass disabled GICC entry, driver
- * should not treat as errors, skip the entry instead of probe fail.
- */
- if (!acpi_gicc_is_usable(gicc))
- return 0;
-
- return -ENODEV;
+ return 0;
}
static int __init gic_acpi_count_gicr_regions(void)
@@ -2490,7 +2566,8 @@ static int __init gic_acpi_parse_virt_madt_gicc(union acpi_subtable_headers *hea
int maint_irq_mode;
static int first_madt = true;
- if (!acpi_gicc_is_usable(gicc))
+ if (!(gicc->flags &
+ (ACPI_MADT_ENABLED | ACPI_MADT_GICC_ONLINE_CAPABLE)))
return 0;
maint_irq_mode = (gicc->flags & ACPI_MADT_VGIC_IRQ_MODE) ?
diff --git a/drivers/irqchip/irq-gic-v4.c b/drivers/irqchip/irq-gic-v4.c
index ca32ac19d284..58c28895f8c4 100644
--- a/drivers/irqchip/irq-gic-v4.c
+++ b/drivers/irqchip/irq-gic-v4.c
@@ -97,7 +97,7 @@ bool gic_cpuif_has_vsgi(void)
fld = cpuid_feature_extract_unsigned_field(reg, ID_AA64PFR0_EL1_GIC_SHIFT);
- return fld >= 0x3;
+ return fld >= ID_AA64PFR0_EL1_GIC_V4P1;
}
#else
bool gic_cpuif_has_vsgi(void)
diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c
index 412196a7dad5..6503573557fd 100644
--- a/drivers/irqchip/irq-gic.c
+++ b/drivers/irqchip/irq-gic.c
@@ -64,7 +64,7 @@ static void gic_check_cpu_features(void)
union gic_base {
void __iomem *common_base;
- void __percpu * __iomem *percpu_base;
+ void __iomem * __percpu *percpu_base;
};
struct gic_chip_data {
@@ -162,11 +162,6 @@ static inline void __iomem *gic_cpu_base(struct irq_data *d)
return gic_data_cpu_base(gic_data);
}
-static inline unsigned int gic_irq(struct irq_data *d)
-{
- return d->hwirq;
-}
-
static inline bool cascading_gic_irq(struct irq_data *d)
{
void *data = irq_data_get_irq_handler_data(d);
@@ -183,14 +178,16 @@ static inline bool cascading_gic_irq(struct irq_data *d)
*/
static void gic_poke_irq(struct irq_data *d, u32 offset)
{
- u32 mask = 1 << (gic_irq(d) % 32);
- writel_relaxed(mask, gic_dist_base(d) + offset + (gic_irq(d) / 32) * 4);
+ u32 mask = 1 << (irqd_to_hwirq(d) % 32);
+
+ writel_relaxed(mask, gic_dist_base(d) + offset + (irqd_to_hwirq(d) / 32) * 4);
}
static int gic_peek_irq(struct irq_data *d, u32 offset)
{
- u32 mask = 1 << (gic_irq(d) % 32);
- return !!(readl_relaxed(gic_dist_base(d) + offset + (gic_irq(d) / 32) * 4) & mask);
+ u32 mask = 1 << (irqd_to_hwirq(d) % 32);
+
+ return !!(readl_relaxed(gic_dist_base(d) + offset + (irqd_to_hwirq(d) / 32) * 4) & mask);
}
static void gic_mask_irq(struct irq_data *d)
@@ -220,7 +217,7 @@ static void gic_unmask_irq(struct irq_data *d)
static void gic_eoi_irq(struct irq_data *d)
{
- u32 hwirq = gic_irq(d);
+ irq_hw_number_t hwirq = irqd_to_hwirq(d);
if (hwirq < 16)
hwirq = this_cpu_read(sgi_intid);
@@ -230,7 +227,7 @@ static void gic_eoi_irq(struct irq_data *d)
static void gic_eoimode1_eoi_irq(struct irq_data *d)
{
- u32 hwirq = gic_irq(d);
+ irq_hw_number_t hwirq = irqd_to_hwirq(d);
/* Do not deactivate an IRQ forwarded to a vcpu. */
if (irqd_is_forwarded_to_vcpu(d))
@@ -293,8 +290,8 @@ static int gic_irq_get_irqchip_state(struct irq_data *d,
static int gic_set_type(struct irq_data *d, unsigned int type)
{
+ irq_hw_number_t gicirq = irqd_to_hwirq(d);
void __iomem *base = gic_dist_base(d);
- unsigned int gicirq = gic_irq(d);
int ret;
/* Interrupt configuration for SGIs can't be changed */
@@ -306,10 +303,10 @@ static int gic_set_type(struct irq_data *d, unsigned int type)
type != IRQ_TYPE_EDGE_RISING)
return -EINVAL;
- ret = gic_configure_irq(gicirq, type, base + GIC_DIST_CONFIG, NULL);
+ ret = gic_configure_irq(gicirq, type, base + GIC_DIST_CONFIG);
if (ret && gicirq < 32) {
/* Misconfigured PPIs are usually not fatal */
- pr_warn("GIC: PPI%d is secure or misconfigured\n", gicirq - 16);
+ pr_warn("GIC: PPI%ld is secure or misconfigured\n", gicirq - 16);
ret = 0;
}
@@ -319,7 +316,7 @@ static int gic_set_type(struct irq_data *d, unsigned int type)
static int gic_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu)
{
/* Only interrupts on the primary GIC can be forwarded to a vcpu. */
- if (cascading_gic_irq(d) || gic_irq(d) < 16)
+ if (cascading_gic_irq(d) || irqd_to_hwirq(d) < 16)
return -EINVAL;
if (vcpu)
@@ -403,7 +400,7 @@ static void gic_irq_print_chip(struct irq_data *d, struct seq_file *p)
struct gic_chip_data *gic = irq_data_get_irq_chip_data(d);
if (gic->domain->pm_dev)
- seq_printf(p, gic->domain->pm_dev->of_node->name);
+ seq_puts(p, gic->domain->pm_dev->of_node->name);
else
seq_printf(p, "GIC-%d", (int)(gic - &gic_data[0]));
}
@@ -482,7 +479,7 @@ static void gic_dist_init(struct gic_chip_data *gic)
for (i = 32; i < gic_irqs; i += 4)
writel_relaxed(cpumask, base + GIC_DIST_TARGET + i * 4 / 4);
- gic_dist_config(base, gic_irqs, NULL);
+ gic_dist_config(base, gic_irqs, GICD_INT_DEF_PRI);
writel_relaxed(GICD_ENABLE, base + GIC_DIST_CTRL);
}
@@ -519,7 +516,7 @@ static int gic_cpu_init(struct gic_chip_data *gic)
gic_cpu_map[i] &= ~cpu_mask;
}
- gic_cpu_config(dist_base, 32, NULL);
+ gic_cpu_config(dist_base, 32, GICD_INT_DEF_PRI);
writel_relaxed(GICC_INT_PRI_THRESHOLD, base + GIC_CPU_PRIMASK);
gic_cpu_if_up(gic);
@@ -611,7 +608,7 @@ void gic_dist_restore(struct gic_chip_data *gic)
dist_base + GIC_DIST_CONFIG + i * 4);
for (i = 0; i < DIV_ROUND_UP(gic_irqs, 4); i++)
- writel_relaxed(GICD_INT_DEF_PRI_X4,
+ writel_relaxed(REPEAT_BYTE_U32(GICD_INT_DEF_PRI),
dist_base + GIC_DIST_PRI + i * 4);
for (i = 0; i < DIV_ROUND_UP(gic_irqs, 4); i++)
@@ -700,7 +697,7 @@ void gic_cpu_restore(struct gic_chip_data *gic)
writel_relaxed(ptr[i], dist_base + GIC_DIST_CONFIG + i * 4);
for (i = 0; i < DIV_ROUND_UP(32, 4); i++)
- writel_relaxed(GICD_INT_DEF_PRI_X4,
+ writel_relaxed(REPEAT_BYTE_U32(GICD_INT_DEF_PRI),
dist_base + GIC_DIST_PRI + i * 4);
writel_relaxed(GICC_INT_PRI_THRESHOLD, cpu_base + GIC_CPU_PRIMASK);
@@ -796,7 +793,7 @@ static void rmw_writeb(u8 bval, void __iomem *addr)
static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
bool force)
{
- void __iomem *reg = gic_dist_base(d) + GIC_DIST_TARGET + gic_irq(d);
+ void __iomem *reg = gic_dist_base(d) + GIC_DIST_TARGET + irqd_to_hwirq(d);
struct gic_chip_data *gic = irq_data_get_irq_chip_data(d);
unsigned int cpu;
diff --git a/drivers/irqchip/irq-hip04.c b/drivers/irqchip/irq-hip04.c
index 46161f6ff289..31c3f70a5d5e 100644
--- a/drivers/irqchip/irq-hip04.c
+++ b/drivers/irqchip/irq-hip04.c
@@ -130,7 +130,7 @@ static int hip04_irq_set_type(struct irq_data *d, unsigned int type)
raw_spin_lock(&irq_controller_lock);
- ret = gic_configure_irq(irq, type, base + GIC_DIST_CONFIG, NULL);
+ ret = gic_configure_irq(irq, type, base + GIC_DIST_CONFIG);
if (ret && irq < 32) {
/* Misconfigured PPIs are usually not fatal */
pr_warn("GIC: PPI%d is secure or misconfigured\n", irq - 16);
@@ -260,7 +260,7 @@ static void __init hip04_irq_dist_init(struct hip04_irq_data *intc)
for (i = 32; i < nr_irqs; i += 2)
writel_relaxed(cpumask, base + GIC_DIST_TARGET + ((i * 2) & ~3));
- gic_dist_config(base, nr_irqs, NULL);
+ gic_dist_config(base, nr_irqs, GICD_INT_DEF_PRI);
writel_relaxed(1, base + GIC_DIST_CTRL);
}
@@ -287,7 +287,7 @@ static void hip04_irq_cpu_init(struct hip04_irq_data *intc)
if (i != cpu)
hip04_cpu_map[i] &= ~cpu_mask;
- gic_cpu_config(dist_base, 32, NULL);
+ gic_cpu_config(dist_base, 32, GICD_INT_DEF_PRI);
writel_relaxed(0xf0, base + GIC_CPU_PRIMASK);
writel_relaxed(1, base + GIC_CPU_CTRL);
diff --git a/drivers/irqchip/irq-imgpdc.c b/drivers/irqchip/irq-imgpdc.c
index 5831be454673..85f80bac0961 100644
--- a/drivers/irqchip/irq-imgpdc.c
+++ b/drivers/irqchip/irq-imgpdc.c
@@ -461,12 +461,11 @@ err_generic:
return ret;
}
-static int pdc_intc_remove(struct platform_device *pdev)
+static void pdc_intc_remove(struct platform_device *pdev)
{
struct pdc_intc_priv *priv = platform_get_drvdata(pdev);
irq_domain_remove(priv->domain);
- return 0;
}
static const struct of_device_id pdc_intc_match[] = {
@@ -479,8 +478,8 @@ static struct platform_driver pdc_intc_driver = {
.name = "pdc-intc",
.of_match_table = pdc_intc_match,
},
- .probe = pdc_intc_probe,
- .remove = pdc_intc_remove,
+ .probe = pdc_intc_probe,
+ .remove = pdc_intc_remove,
};
static int __init pdc_intc_init(void)
diff --git a/drivers/irqchip/irq-imx-intmux.c b/drivers/irqchip/irq-imx-intmux.c
index aa041e4dfee0..787543d07565 100644
--- a/drivers/irqchip/irq-imx-intmux.c
+++ b/drivers/irqchip/irq-imx-intmux.c
@@ -166,6 +166,10 @@ static int imx_intmux_irq_select(struct irq_domain *d, struct irq_fwspec *fwspec
if (fwspec->fwnode != d->fwnode)
return false;
+ /* Handle pure domain searches */
+ if (!fwspec->param_count)
+ return d->bus_token == bus_token;
+
return irqchip_data->chanidx == fwspec->param[1];
}
@@ -282,7 +286,7 @@ out:
return ret;
}
-static int imx_intmux_remove(struct platform_device *pdev)
+static void imx_intmux_remove(struct platform_device *pdev)
{
struct intmux_data *data = platform_get_drvdata(pdev);
int i;
@@ -298,8 +302,6 @@ static int imx_intmux_remove(struct platform_device *pdev)
}
pm_runtime_disable(&pdev->dev);
-
- return 0;
}
#ifdef CONFIG_PM
@@ -354,11 +356,11 @@ static const struct of_device_id imx_intmux_id[] = {
static struct platform_driver imx_intmux_driver = {
.driver = {
- .name = "imx-intmux",
- .of_match_table = imx_intmux_id,
- .pm = &imx_intmux_pm_ops,
+ .name = "imx-intmux",
+ .of_match_table = imx_intmux_id,
+ .pm = &imx_intmux_pm_ops,
},
- .probe = imx_intmux_probe,
- .remove = imx_intmux_remove,
+ .probe = imx_intmux_probe,
+ .remove = imx_intmux_remove,
};
builtin_platform_driver(imx_intmux_driver);
diff --git a/drivers/irqchip/irq-imx-irqsteer.c b/drivers/irqchip/irq-imx-irqsteer.c
index bd9543314539..b0e9788c0045 100644
--- a/drivers/irqchip/irq-imx-irqsteer.c
+++ b/drivers/irqchip/irq-imx-irqsteer.c
@@ -36,6 +36,7 @@ struct irqsteer_data {
int channel;
struct irq_domain *domain;
u32 *saved_reg;
+ struct device *dev;
};
static int imx_irqsteer_get_reg_index(struct irqsteer_data *data,
@@ -72,10 +73,26 @@ static void imx_irqsteer_irq_mask(struct irq_data *d)
raw_spin_unlock_irqrestore(&data->lock, flags);
}
+static void imx_irqsteer_irq_bus_lock(struct irq_data *d)
+{
+ struct irqsteer_data *data = d->chip_data;
+
+ pm_runtime_get_sync(data->dev);
+}
+
+static void imx_irqsteer_irq_bus_sync_unlock(struct irq_data *d)
+{
+ struct irqsteer_data *data = d->chip_data;
+
+ pm_runtime_put_autosuspend(data->dev);
+}
+
static const struct irq_chip imx_irqsteer_irq_chip = {
- .name = "irqsteer",
- .irq_mask = imx_irqsteer_irq_mask,
- .irq_unmask = imx_irqsteer_irq_unmask,
+ .name = "irqsteer",
+ .irq_mask = imx_irqsteer_irq_mask,
+ .irq_unmask = imx_irqsteer_irq_unmask,
+ .irq_bus_lock = imx_irqsteer_irq_bus_lock,
+ .irq_bus_sync_unlock = imx_irqsteer_irq_bus_sync_unlock,
};
static int imx_irqsteer_irq_map(struct irq_domain *h, unsigned int irq,
@@ -150,6 +167,7 @@ static int imx_irqsteer_probe(struct platform_device *pdev)
if (!data)
return -ENOMEM;
+ data->dev = &pdev->dev;
data->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(data->regs)) {
dev_err(&pdev->dev, "failed to initialize reg\n");
@@ -231,7 +249,7 @@ out:
return ret;
}
-static int imx_irqsteer_remove(struct platform_device *pdev)
+static void imx_irqsteer_remove(struct platform_device *pdev)
{
struct irqsteer_data *irqsteer_data = platform_get_drvdata(pdev);
int i;
@@ -243,8 +261,6 @@ static int imx_irqsteer_remove(struct platform_device *pdev)
irq_domain_remove(irqsteer_data->domain);
clk_disable_unprepare(irqsteer_data->ipg_clk);
-
- return 0;
}
#ifdef CONFIG_PM
@@ -307,11 +323,11 @@ static const struct of_device_id imx_irqsteer_dt_ids[] = {
static struct platform_driver imx_irqsteer_driver = {
.driver = {
- .name = "imx-irqsteer",
- .of_match_table = imx_irqsteer_dt_ids,
- .pm = &imx_irqsteer_pm_ops,
+ .name = "imx-irqsteer",
+ .of_match_table = imx_irqsteer_dt_ids,
+ .pm = &imx_irqsteer_pm_ops,
},
- .probe = imx_irqsteer_probe,
- .remove = imx_irqsteer_remove,
+ .probe = imx_irqsteer_probe,
+ .remove = imx_irqsteer_remove,
};
builtin_platform_driver(imx_irqsteer_driver);
diff --git a/drivers/irqchip/irq-imx-mu-msi.c b/drivers/irqchip/irq-imx-mu-msi.c
index 90d41c1407ac..4342a21de1eb 100644
--- a/drivers/irqchip/irq-imx-mu-msi.c
+++ b/drivers/irqchip/irq-imx-mu-msi.c
@@ -24,6 +24,8 @@
#include <linux/pm_domain.h>
#include <linux/spinlock.h>
+#include "irq-msi-lib.h"
+
#define IMX_MU_CHANS 4
enum imx_mu_xcr {
@@ -114,20 +116,6 @@ static void imx_mu_msi_parent_ack_irq(struct irq_data *data)
imx_mu_read(msi_data, msi_data->cfg->xRR + data->hwirq * 4);
}
-static struct irq_chip imx_mu_msi_irq_chip = {
- .name = "MU-MSI",
- .irq_ack = irq_chip_ack_parent,
-};
-
-static struct msi_domain_ops imx_mu_msi_irq_ops = {
-};
-
-static struct msi_domain_info imx_mu_msi_domain_info = {
- .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS),
- .ops = &imx_mu_msi_irq_ops,
- .chip = &imx_mu_msi_irq_chip,
-};
-
static void imx_mu_msi_parent_compose_msg(struct irq_data *data,
struct msi_msg *msg)
{
@@ -195,6 +183,7 @@ static void imx_mu_msi_domain_irq_free(struct irq_domain *domain,
}
static const struct irq_domain_ops imx_mu_msi_domain_ops = {
+ .select = msi_lib_irq_domain_select,
.alloc = imx_mu_msi_domain_irq_alloc,
.free = imx_mu_msi_domain_irq_free,
};
@@ -216,35 +205,38 @@ static void imx_mu_msi_irq_handler(struct irq_desc *desc)
chained_irq_exit(chip, desc);
}
+#define IMX_MU_MSI_FLAGS_REQUIRED (MSI_FLAG_USE_DEF_DOM_OPS | \
+ MSI_FLAG_USE_DEF_CHIP_OPS | \
+ MSI_FLAG_PARENT_PM_DEV)
+
+#define IMX_MU_MSI_FLAGS_SUPPORTED (MSI_GENERIC_FLAGS_MASK)
+
+static const struct msi_parent_ops imx_mu_msi_parent_ops = {
+ .supported_flags = IMX_MU_MSI_FLAGS_SUPPORTED,
+ .required_flags = IMX_MU_MSI_FLAGS_REQUIRED,
+ .bus_select_token = DOMAIN_BUS_NEXUS,
+ .bus_select_mask = MATCH_PLATFORM_MSI,
+ .prefix = "MU-MSI-",
+ .init_dev_msi_info = msi_lib_init_dev_msi_info,
+};
+
static int imx_mu_msi_domains_init(struct imx_mu_msi *msi_data, struct device *dev)
{
struct fwnode_handle *fwnodes = dev_fwnode(dev);
struct irq_domain *parent;
/* Initialize MSI domain parent */
- parent = irq_domain_create_linear(fwnodes,
- IMX_MU_CHANS,
- &imx_mu_msi_domain_ops,
- msi_data);
+ parent = irq_domain_create_linear(fwnodes, IMX_MU_CHANS,
+ &imx_mu_msi_domain_ops, msi_data);
if (!parent) {
dev_err(dev, "failed to create IRQ domain\n");
return -ENOMEM;
}
irq_domain_update_bus_token(parent, DOMAIN_BUS_NEXUS);
-
- msi_data->msi_domain = platform_msi_create_irq_domain(fwnodes,
- &imx_mu_msi_domain_info,
- parent);
-
- if (!msi_data->msi_domain) {
- dev_err(dev, "failed to create MSI domain\n");
- irq_domain_remove(parent);
- return -ENOMEM;
- }
-
- irq_domain_set_pm_device(msi_data->msi_domain, dev);
-
+ parent->dev = parent->pm_dev = dev;
+ parent->flags |= IRQ_DOMAIN_FLAG_MSI_PARENT;
+ parent->msi_parent_ops = &imx_mu_msi_parent_ops;
return 0;
}
diff --git a/drivers/irqchip/irq-ixp4xx.c b/drivers/irqchip/irq-ixp4xx.c
index 5fba907b9052..f23b02f62a5c 100644
--- a/drivers/irqchip/irq-ixp4xx.c
+++ b/drivers/irqchip/irq-ixp4xx.c
@@ -105,8 +105,7 @@ static void ixp4xx_irq_unmask(struct irq_data *d)
}
}
-static asmlinkage void __exception_irq_entry
-ixp4xx_handle_irq(struct pt_regs *regs)
+static void __exception_irq_entry ixp4xx_handle_irq(struct pt_regs *regs)
{
struct ixp4xx_irq *ixi = &ixirq;
unsigned long status;
diff --git a/drivers/irqchip/irq-jcore-aic.c b/drivers/irqchip/irq-jcore-aic.c
index b9dcc8e78c75..1f613eb7b7f0 100644
--- a/drivers/irqchip/irq-jcore-aic.c
+++ b/drivers/irqchip/irq-jcore-aic.c
@@ -38,7 +38,7 @@ static struct irq_chip jcore_aic;
static void handle_jcore_irq(struct irq_desc *desc)
{
if (irqd_is_per_cpu(irq_desc_get_irq_data(desc)))
- handle_percpu_irq(desc);
+ handle_percpu_devid_irq(desc);
else
handle_simple_irq(desc);
}
diff --git a/drivers/irqchip/irq-keystone.c b/drivers/irqchip/irq-keystone.c
index a36396db4b08..37e1a03fcbb4 100644
--- a/drivers/irqchip/irq-keystone.c
+++ b/drivers/irqchip/irq-keystone.c
@@ -141,18 +141,11 @@ static int keystone_irq_probe(struct platform_device *pdev)
if (!kirq)
return -ENOMEM;
- kirq->devctrl_regs =
- syscon_regmap_lookup_by_phandle(np, "ti,syscon-dev");
+ kirq->devctrl_regs = syscon_regmap_lookup_by_phandle_args(np, "ti,syscon-dev",
+ 1, &kirq->devctrl_offset);
if (IS_ERR(kirq->devctrl_regs))
return PTR_ERR(kirq->devctrl_regs);
- ret = of_property_read_u32_index(np, "ti,syscon-dev", 1,
- &kirq->devctrl_offset);
- if (ret) {
- dev_err(dev, "couldn't read the devctrl_offset offset!\n");
- return ret;
- }
-
kirq->irq = platform_get_irq(pdev, 0);
if (kirq->irq < 0)
return kirq->irq;
@@ -190,7 +183,7 @@ static int keystone_irq_probe(struct platform_device *pdev)
return 0;
}
-static int keystone_irq_remove(struct platform_device *pdev)
+static void keystone_irq_remove(struct platform_device *pdev)
{
struct keystone_irq_device *kirq = platform_get_drvdata(pdev);
int hwirq;
@@ -201,7 +194,6 @@ static int keystone_irq_remove(struct platform_device *pdev)
irq_dispose_mapping(irq_find_mapping(kirq->irqd, hwirq));
irq_domain_remove(kirq->irqd);
- return 0;
}
static const struct of_device_id keystone_irq_dt_ids[] = {
diff --git a/drivers/irqchip/irq-lan966x-oic.c b/drivers/irqchip/irq-lan966x-oic.c
new file mode 100644
index 000000000000..41ac880e3b87
--- /dev/null
+++ b/drivers/irqchip/irq-lan966x-oic.c
@@ -0,0 +1,278 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Driver for the Microchip LAN966x outbound interrupt controller
+ *
+ * Copyright (c) 2024 Technology Inc. and its subsidiaries.
+ *
+ * Authors:
+ * Horatiu Vultur <horatiu.vultur@microchip.com>
+ * Clément Léger <clement.leger@bootlin.com>
+ * Herve Codina <herve.codina@bootlin.com>
+ */
+
+#include <linux/interrupt.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/irqchip.h>
+#include <linux/irq.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+struct lan966x_oic_chip_regs {
+ int reg_off_ena_set;
+ int reg_off_ena_clr;
+ int reg_off_sticky;
+ int reg_off_ident;
+ int reg_off_map;
+};
+
+struct lan966x_oic_data {
+ void __iomem *regs;
+ int irq;
+};
+
+#define LAN966X_OIC_NR_IRQ 86
+
+/* Interrupt sticky status */
+#define LAN966X_OIC_INTR_STICKY 0x30
+#define LAN966X_OIC_INTR_STICKY1 0x34
+#define LAN966X_OIC_INTR_STICKY2 0x38
+
+/* Interrupt enable */
+#define LAN966X_OIC_INTR_ENA 0x48
+#define LAN966X_OIC_INTR_ENA1 0x4c
+#define LAN966X_OIC_INTR_ENA2 0x50
+
+/* Atomic clear of interrupt enable */
+#define LAN966X_OIC_INTR_ENA_CLR 0x54
+#define LAN966X_OIC_INTR_ENA_CLR1 0x58
+#define LAN966X_OIC_INTR_ENA_CLR2 0x5c
+
+/* Atomic set of interrupt */
+#define LAN966X_OIC_INTR_ENA_SET 0x60
+#define LAN966X_OIC_INTR_ENA_SET1 0x64
+#define LAN966X_OIC_INTR_ENA_SET2 0x68
+
+/* Mapping of source to destination interrupts (_n = 0..8) */
+#define LAN966X_OIC_DST_INTR_MAP(_n) (0x78 + (_n) * 4)
+#define LAN966X_OIC_DST_INTR_MAP1(_n) (0x9c + (_n) * 4)
+#define LAN966X_OIC_DST_INTR_MAP2(_n) (0xc0 + (_n) * 4)
+
+/* Currently active interrupt sources per destination (_n = 0..8) */
+#define LAN966X_OIC_DST_INTR_IDENT(_n) (0xe4 + (_n) * 4)
+#define LAN966X_OIC_DST_INTR_IDENT1(_n) (0x108 + (_n) * 4)
+#define LAN966X_OIC_DST_INTR_IDENT2(_n) (0x12c + (_n) * 4)
+
+static unsigned int lan966x_oic_irq_startup(struct irq_data *data)
+{
+ struct irq_chip_generic *gc = irq_data_get_irq_chip_data(data);
+ struct irq_chip_type *ct = irq_data_get_chip_type(data);
+ struct lan966x_oic_chip_regs *chip_regs = gc->private;
+ u32 map;
+
+ irq_gc_lock(gc);
+
+ /* Map the source interrupt to the destination */
+ map = irq_reg_readl(gc, chip_regs->reg_off_map);
+ map |= data->mask;
+ irq_reg_writel(gc, map, chip_regs->reg_off_map);
+
+ irq_gc_unlock(gc);
+
+ ct->chip.irq_ack(data);
+ ct->chip.irq_unmask(data);
+
+ return 0;
+}
+
+static void lan966x_oic_irq_shutdown(struct irq_data *data)
+{
+ struct irq_chip_generic *gc = irq_data_get_irq_chip_data(data);
+ struct irq_chip_type *ct = irq_data_get_chip_type(data);
+ struct lan966x_oic_chip_regs *chip_regs = gc->private;
+ u32 map;
+
+ ct->chip.irq_mask(data);
+
+ irq_gc_lock(gc);
+
+ /* Unmap the interrupt */
+ map = irq_reg_readl(gc, chip_regs->reg_off_map);
+ map &= ~data->mask;
+ irq_reg_writel(gc, map, chip_regs->reg_off_map);
+
+ irq_gc_unlock(gc);
+}
+
+static int lan966x_oic_irq_set_type(struct irq_data *data,
+ unsigned int flow_type)
+{
+ if (flow_type != IRQ_TYPE_LEVEL_HIGH) {
+ pr_err("lan966x oic doesn't support flow type %d\n", flow_type);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void lan966x_oic_irq_handler_domain(struct irq_domain *d, u32 first_irq)
+{
+ struct irq_chip_generic *gc = irq_get_domain_generic_chip(d, first_irq);
+ struct lan966x_oic_chip_regs *chip_regs = gc->private;
+ unsigned long ident;
+ unsigned int hwirq;
+
+ ident = irq_reg_readl(gc, chip_regs->reg_off_ident);
+ if (!ident)
+ return;
+
+ for_each_set_bit(hwirq, &ident, 32)
+ generic_handle_domain_irq(d, hwirq + first_irq);
+}
+
+static void lan966x_oic_irq_handler(struct irq_desc *desc)
+{
+ struct irq_domain *d = irq_desc_get_handler_data(desc);
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+
+ chained_irq_enter(chip, desc);
+ lan966x_oic_irq_handler_domain(d, 0);
+ lan966x_oic_irq_handler_domain(d, 32);
+ lan966x_oic_irq_handler_domain(d, 64);
+ chained_irq_exit(chip, desc);
+}
+
+static struct lan966x_oic_chip_regs lan966x_oic_chip_regs[3] = {
+ {
+ .reg_off_ena_set = LAN966X_OIC_INTR_ENA_SET,
+ .reg_off_ena_clr = LAN966X_OIC_INTR_ENA_CLR,
+ .reg_off_sticky = LAN966X_OIC_INTR_STICKY,
+ .reg_off_ident = LAN966X_OIC_DST_INTR_IDENT(0),
+ .reg_off_map = LAN966X_OIC_DST_INTR_MAP(0),
+ }, {
+ .reg_off_ena_set = LAN966X_OIC_INTR_ENA_SET1,
+ .reg_off_ena_clr = LAN966X_OIC_INTR_ENA_CLR1,
+ .reg_off_sticky = LAN966X_OIC_INTR_STICKY1,
+ .reg_off_ident = LAN966X_OIC_DST_INTR_IDENT1(0),
+ .reg_off_map = LAN966X_OIC_DST_INTR_MAP1(0),
+ }, {
+ .reg_off_ena_set = LAN966X_OIC_INTR_ENA_SET2,
+ .reg_off_ena_clr = LAN966X_OIC_INTR_ENA_CLR2,
+ .reg_off_sticky = LAN966X_OIC_INTR_STICKY2,
+ .reg_off_ident = LAN966X_OIC_DST_INTR_IDENT2(0),
+ .reg_off_map = LAN966X_OIC_DST_INTR_MAP2(0),
+ }
+};
+
+static int lan966x_oic_chip_init(struct irq_chip_generic *gc)
+{
+ struct lan966x_oic_data *lan966x_oic = gc->domain->host_data;
+ struct lan966x_oic_chip_regs *chip_regs;
+
+ chip_regs = &lan966x_oic_chip_regs[gc->irq_base / 32];
+
+ gc->reg_base = lan966x_oic->regs;
+ gc->chip_types[0].regs.enable = chip_regs->reg_off_ena_set;
+ gc->chip_types[0].regs.disable = chip_regs->reg_off_ena_clr;
+ gc->chip_types[0].regs.ack = chip_regs->reg_off_sticky;
+ gc->chip_types[0].chip.irq_startup = lan966x_oic_irq_startup;
+ gc->chip_types[0].chip.irq_shutdown = lan966x_oic_irq_shutdown;
+ gc->chip_types[0].chip.irq_set_type = lan966x_oic_irq_set_type;
+ gc->chip_types[0].chip.irq_mask = irq_gc_mask_disable_reg;
+ gc->chip_types[0].chip.irq_unmask = irq_gc_unmask_enable_reg;
+ gc->chip_types[0].chip.irq_ack = irq_gc_ack_set_bit;
+ gc->private = chip_regs;
+
+ /* Disable all interrupts handled by this chip */
+ irq_reg_writel(gc, ~0U, chip_regs->reg_off_ena_clr);
+
+ return 0;
+}
+
+static void lan966x_oic_chip_exit(struct irq_chip_generic *gc)
+{
+ /* Disable and ack all interrupts handled by this chip */
+ irq_reg_writel(gc, ~0U, gc->chip_types[0].regs.disable);
+ irq_reg_writel(gc, ~0U, gc->chip_types[0].regs.ack);
+}
+
+static int lan966x_oic_domain_init(struct irq_domain *d)
+{
+ struct lan966x_oic_data *lan966x_oic = d->host_data;
+
+ irq_set_chained_handler_and_data(lan966x_oic->irq, lan966x_oic_irq_handler, d);
+
+ return 0;
+}
+
+static void lan966x_oic_domain_exit(struct irq_domain *d)
+{
+ struct lan966x_oic_data *lan966x_oic = d->host_data;
+
+ irq_set_chained_handler_and_data(lan966x_oic->irq, NULL, NULL);
+}
+
+static int lan966x_oic_probe(struct platform_device *pdev)
+{
+ struct irq_domain_chip_generic_info dgc_info = {
+ .name = "lan966x-oic",
+ .handler = handle_level_irq,
+ .irqs_per_chip = 32,
+ .num_ct = 1,
+ .init = lan966x_oic_chip_init,
+ .exit = lan966x_oic_chip_exit,
+ };
+ struct irq_domain_info d_info = {
+ .fwnode = of_node_to_fwnode(pdev->dev.of_node),
+ .domain_flags = IRQ_DOMAIN_FLAG_DESTROY_GC,
+ .size = LAN966X_OIC_NR_IRQ,
+ .hwirq_max = LAN966X_OIC_NR_IRQ,
+ .ops = &irq_generic_chip_ops,
+ .dgc_info = &dgc_info,
+ .init = lan966x_oic_domain_init,
+ .exit = lan966x_oic_domain_exit,
+ };
+ struct lan966x_oic_data *lan966x_oic;
+ struct device *dev = &pdev->dev;
+ struct irq_domain *domain;
+
+ lan966x_oic = devm_kmalloc(dev, sizeof(*lan966x_oic), GFP_KERNEL);
+ if (!lan966x_oic)
+ return -ENOMEM;
+
+ lan966x_oic->regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(lan966x_oic->regs))
+ return dev_err_probe(dev, PTR_ERR(lan966x_oic->regs),
+ "failed to map resource\n");
+
+ lan966x_oic->irq = platform_get_irq(pdev, 0);
+ if (lan966x_oic->irq < 0)
+ return dev_err_probe(dev, lan966x_oic->irq, "failed to get the IRQ\n");
+
+ d_info.host_data = lan966x_oic;
+ domain = devm_irq_domain_instantiate(dev, &d_info);
+ if (IS_ERR(domain))
+ return dev_err_probe(dev, PTR_ERR(domain),
+ "failed to instantiate the IRQ domain\n");
+ return 0;
+}
+
+static const struct of_device_id lan966x_oic_of_match[] = {
+ { .compatible = "microchip,lan966x-oic" },
+ {} /* sentinel */
+};
+MODULE_DEVICE_TABLE(of, lan966x_oic_of_match);
+
+static struct platform_driver lan966x_oic_driver = {
+ .probe = lan966x_oic_probe,
+ .driver = {
+ .name = "lan966x-oic",
+ .of_match_table = lan966x_oic_of_match,
+ },
+};
+module_platform_driver(lan966x_oic_driver);
+
+MODULE_AUTHOR("Herve Codina <herve.codina@bootlin.com>");
+MODULE_DESCRIPTION("Microchip LAN966x OIC driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/irqchip/irq-loongarch-avec.c b/drivers/irqchip/irq-loongarch-avec.c
new file mode 100644
index 000000000000..80e55955a29f
--- /dev/null
+++ b/drivers/irqchip/irq-loongarch-avec.c
@@ -0,0 +1,433 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2020-2024 Loongson Technologies, Inc.
+ */
+
+#include <linux/cpuhotplug.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/irqchip.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/irqdomain.h>
+#include <linux/kernel.h>
+#include <linux/msi.h>
+#include <linux/radix-tree.h>
+#include <linux/spinlock.h>
+
+#include <asm/loongarch.h>
+#include <asm/setup.h>
+
+#include "irq-msi-lib.h"
+#include "irq-loongson.h"
+
+#define VECTORS_PER_REG 64
+#define IRR_VECTOR_MASK 0xffUL
+#define IRR_INVALID_MASK 0x80000000UL
+#define AVEC_MSG_OFFSET 0x100000
+
+#ifdef CONFIG_SMP
+struct pending_list {
+ struct list_head head;
+};
+
+static struct cpumask intersect_mask;
+static DEFINE_PER_CPU(struct pending_list, pending_list);
+#endif
+
+static DEFINE_PER_CPU(struct irq_desc * [NR_VECTORS], irq_map);
+
+struct avecintc_chip {
+ raw_spinlock_t lock;
+ struct fwnode_handle *fwnode;
+ struct irq_domain *domain;
+ struct irq_matrix *vector_matrix;
+ phys_addr_t msi_base_addr;
+};
+
+static struct avecintc_chip loongarch_avec;
+
+struct avecintc_data {
+ struct list_head entry;
+ unsigned int cpu;
+ unsigned int vec;
+ unsigned int prev_cpu;
+ unsigned int prev_vec;
+ unsigned int moving;
+};
+
+static inline void avecintc_enable(void)
+{
+ u64 value;
+
+ value = iocsr_read64(LOONGARCH_IOCSR_MISC_FUNC);
+ value |= IOCSR_MISC_FUNC_AVEC_EN;
+ iocsr_write64(value, LOONGARCH_IOCSR_MISC_FUNC);
+}
+
+static inline void avecintc_ack_irq(struct irq_data *d)
+{
+}
+
+static inline void avecintc_mask_irq(struct irq_data *d)
+{
+}
+
+static inline void avecintc_unmask_irq(struct irq_data *d)
+{
+}
+
+#ifdef CONFIG_SMP
+static inline void pending_list_init(int cpu)
+{
+ struct pending_list *plist = per_cpu_ptr(&pending_list, cpu);
+
+ INIT_LIST_HEAD(&plist->head);
+}
+
+static void avecintc_sync(struct avecintc_data *adata)
+{
+ struct pending_list *plist;
+
+ if (cpu_online(adata->prev_cpu)) {
+ plist = per_cpu_ptr(&pending_list, adata->prev_cpu);
+ list_add_tail(&adata->entry, &plist->head);
+ adata->moving = 1;
+ mp_ops.send_ipi_single(adata->prev_cpu, ACTION_CLEAR_VECTOR);
+ }
+}
+
+static int avecintc_set_affinity(struct irq_data *data, const struct cpumask *dest, bool force)
+{
+ int cpu, ret, vector;
+ struct avecintc_data *adata;
+
+ scoped_guard(raw_spinlock, &loongarch_avec.lock) {
+ adata = irq_data_get_irq_chip_data(data);
+
+ if (adata->moving)
+ return -EBUSY;
+
+ if (cpu_online(adata->cpu) && cpumask_test_cpu(adata->cpu, dest))
+ return 0;
+
+ cpumask_and(&intersect_mask, dest, cpu_online_mask);
+
+ ret = irq_matrix_alloc(loongarch_avec.vector_matrix, &intersect_mask, false, &cpu);
+ if (ret < 0)
+ return ret;
+
+ vector = ret;
+ adata->cpu = cpu;
+ adata->vec = vector;
+ per_cpu_ptr(irq_map, adata->cpu)[adata->vec] = irq_data_to_desc(data);
+ avecintc_sync(adata);
+ }
+
+ irq_data_update_effective_affinity(data, cpumask_of(cpu));
+
+ return IRQ_SET_MASK_OK;
+}
+
+static int avecintc_cpu_online(unsigned int cpu)
+{
+ if (!loongarch_avec.vector_matrix)
+ return 0;
+
+ guard(raw_spinlock)(&loongarch_avec.lock);
+
+ avecintc_enable();
+
+ irq_matrix_online(loongarch_avec.vector_matrix);
+
+ pending_list_init(cpu);
+
+ return 0;
+}
+
+static int avecintc_cpu_offline(unsigned int cpu)
+{
+ struct pending_list *plist = per_cpu_ptr(&pending_list, cpu);
+
+ if (!loongarch_avec.vector_matrix)
+ return 0;
+
+ guard(raw_spinlock)(&loongarch_avec.lock);
+
+ if (!list_empty(&plist->head))
+ pr_warn("CPU#%d vector is busy\n", cpu);
+
+ irq_matrix_offline(loongarch_avec.vector_matrix);
+
+ return 0;
+}
+
+void complete_irq_moving(void)
+{
+ struct pending_list *plist = this_cpu_ptr(&pending_list);
+ struct avecintc_data *adata, *tdata;
+ int cpu, vector, bias;
+ uint64_t isr;
+
+ guard(raw_spinlock)(&loongarch_avec.lock);
+
+ list_for_each_entry_safe(adata, tdata, &plist->head, entry) {
+ cpu = adata->prev_cpu;
+ vector = adata->prev_vec;
+ bias = vector / VECTORS_PER_REG;
+ switch (bias) {
+ case 0:
+ isr = csr_read64(LOONGARCH_CSR_ISR0);
+ break;
+ case 1:
+ isr = csr_read64(LOONGARCH_CSR_ISR1);
+ break;
+ case 2:
+ isr = csr_read64(LOONGARCH_CSR_ISR2);
+ break;
+ case 3:
+ isr = csr_read64(LOONGARCH_CSR_ISR3);
+ break;
+ }
+
+ if (isr & (1UL << (vector % VECTORS_PER_REG))) {
+ mp_ops.send_ipi_single(cpu, ACTION_CLEAR_VECTOR);
+ continue;
+ }
+ list_del(&adata->entry);
+ irq_matrix_free(loongarch_avec.vector_matrix, cpu, vector, false);
+ this_cpu_write(irq_map[vector], NULL);
+ adata->moving = 0;
+ adata->prev_cpu = adata->cpu;
+ adata->prev_vec = adata->vec;
+ }
+}
+#endif
+
+static void avecintc_compose_msi_msg(struct irq_data *d, struct msi_msg *msg)
+{
+ struct avecintc_data *adata = irq_data_get_irq_chip_data(d);
+
+ msg->address_hi = 0x0;
+ msg->address_lo = (loongarch_avec.msi_base_addr | (adata->vec & 0xff) << 4)
+ | ((cpu_logical_map(adata->cpu & 0xffff)) << 12);
+ msg->data = 0x0;
+}
+
+static struct irq_chip avec_irq_controller = {
+ .name = "AVECINTC",
+ .irq_ack = avecintc_ack_irq,
+ .irq_mask = avecintc_mask_irq,
+ .irq_unmask = avecintc_unmask_irq,
+#ifdef CONFIG_SMP
+ .irq_set_affinity = avecintc_set_affinity,
+#endif
+ .irq_compose_msi_msg = avecintc_compose_msi_msg,
+};
+
+static void avecintc_irq_dispatch(struct irq_desc *desc)
+{
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ struct irq_desc *d;
+
+ chained_irq_enter(chip, desc);
+
+ while (true) {
+ unsigned long vector = csr_read64(LOONGARCH_CSR_IRR);
+ if (vector & IRR_INVALID_MASK)
+ break;
+
+ vector &= IRR_VECTOR_MASK;
+
+ d = this_cpu_read(irq_map[vector]);
+ if (d) {
+ generic_handle_irq_desc(d);
+ } else {
+ spurious_interrupt();
+ pr_warn("Unexpected IRQ occurs on CPU#%d [vector %ld]\n", smp_processor_id(), vector);
+ }
+ }
+
+ chained_irq_exit(chip, desc);
+}
+
+static int avecintc_alloc_vector(struct irq_data *irqd, struct avecintc_data *adata)
+{
+ int cpu, ret;
+
+ guard(raw_spinlock_irqsave)(&loongarch_avec.lock);
+
+ ret = irq_matrix_alloc(loongarch_avec.vector_matrix, cpu_online_mask, false, &cpu);
+ if (ret < 0)
+ return ret;
+
+ adata->prev_cpu = adata->cpu = cpu;
+ adata->prev_vec = adata->vec = ret;
+ per_cpu_ptr(irq_map, adata->cpu)[adata->vec] = irq_data_to_desc(irqd);
+
+ return 0;
+}
+
+static int avecintc_domain_alloc(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs, void *arg)
+{
+ for (unsigned int i = 0; i < nr_irqs; i++) {
+ struct irq_data *irqd = irq_domain_get_irq_data(domain, virq + i);
+ struct avecintc_data *adata = kzalloc(sizeof(*adata), GFP_KERNEL);
+ int ret;
+
+ if (!adata)
+ return -ENOMEM;
+
+ ret = avecintc_alloc_vector(irqd, adata);
+ if (ret < 0) {
+ kfree(adata);
+ return ret;
+ }
+
+ irq_domain_set_info(domain, virq + i, virq + i, &avec_irq_controller,
+ adata, handle_edge_irq, NULL, NULL);
+ irqd_set_single_target(irqd);
+ irqd_set_affinity_on_activate(irqd);
+ }
+
+ return 0;
+}
+
+static void avecintc_free_vector(struct irq_data *irqd, struct avecintc_data *adata)
+{
+ guard(raw_spinlock_irqsave)(&loongarch_avec.lock);
+
+ per_cpu(irq_map, adata->cpu)[adata->vec] = NULL;
+ irq_matrix_free(loongarch_avec.vector_matrix, adata->cpu, adata->vec, false);
+
+#ifdef CONFIG_SMP
+ if (!adata->moving)
+ return;
+
+ per_cpu(irq_map, adata->prev_cpu)[adata->prev_vec] = NULL;
+ irq_matrix_free(loongarch_avec.vector_matrix, adata->prev_cpu, adata->prev_vec, false);
+ list_del_init(&adata->entry);
+#endif
+}
+
+static void avecintc_domain_free(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs)
+{
+ for (unsigned int i = 0; i < nr_irqs; i++) {
+ struct irq_data *d = irq_domain_get_irq_data(domain, virq + i);
+
+ if (d) {
+ struct avecintc_data *adata = irq_data_get_irq_chip_data(d);
+
+ avecintc_free_vector(d, adata);
+ irq_domain_reset_irq_data(d);
+ kfree(adata);
+ }
+ }
+}
+
+static const struct irq_domain_ops avecintc_domain_ops = {
+ .alloc = avecintc_domain_alloc,
+ .free = avecintc_domain_free,
+ .select = msi_lib_irq_domain_select,
+};
+
+static int __init irq_matrix_init(void)
+{
+ loongarch_avec.vector_matrix = irq_alloc_matrix(NR_VECTORS, 0, NR_VECTORS);
+ if (!loongarch_avec.vector_matrix)
+ return -ENOMEM;
+
+ for (int i = 0; i < NR_LEGACY_VECTORS; i++)
+ irq_matrix_assign_system(loongarch_avec.vector_matrix, i, false);
+
+ irq_matrix_online(loongarch_avec.vector_matrix);
+
+ return 0;
+}
+
+static int __init avecintc_init(struct irq_domain *parent)
+{
+ int ret, parent_irq;
+
+ raw_spin_lock_init(&loongarch_avec.lock);
+
+ loongarch_avec.fwnode = irq_domain_alloc_named_fwnode("AVECINTC");
+ if (!loongarch_avec.fwnode) {
+ pr_err("Unable to allocate domain handle\n");
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ loongarch_avec.domain = irq_domain_create_tree(loongarch_avec.fwnode,
+ &avecintc_domain_ops, NULL);
+ if (!loongarch_avec.domain) {
+ pr_err("Unable to create IRQ domain\n");
+ ret = -ENOMEM;
+ goto out_free_handle;
+ }
+
+ parent_irq = irq_create_mapping(parent, INT_AVEC);
+ if (!parent_irq) {
+ pr_err("Failed to mapping hwirq\n");
+ ret = -EINVAL;
+ goto out_remove_domain;
+ }
+
+ ret = irq_matrix_init();
+ if (ret < 0) {
+ pr_err("Failed to init irq matrix\n");
+ goto out_remove_domain;
+ }
+ irq_set_chained_handler_and_data(parent_irq, avecintc_irq_dispatch, NULL);
+
+#ifdef CONFIG_SMP
+ pending_list_init(0);
+ cpuhp_setup_state_nocalls(CPUHP_AP_IRQ_AVECINTC_STARTING,
+ "irqchip/loongarch/avecintc:starting",
+ avecintc_cpu_online, avecintc_cpu_offline);
+#endif
+ avecintc_enable();
+
+ return ret;
+
+out_remove_domain:
+ irq_domain_remove(loongarch_avec.domain);
+out_free_handle:
+ irq_domain_free_fwnode(loongarch_avec.fwnode);
+out:
+ return ret;
+}
+
+static int __init pch_msi_parse_madt(union acpi_subtable_headers *header,
+ const unsigned long end)
+{
+ struct acpi_madt_msi_pic *pchmsi_entry = (struct acpi_madt_msi_pic *)header;
+
+ loongarch_avec.msi_base_addr = pchmsi_entry->msg_address - AVEC_MSG_OFFSET;
+
+ return pch_msi_acpi_init_avec(loongarch_avec.domain);
+}
+
+static inline int __init acpi_cascade_irqdomain_init(void)
+{
+ return acpi_table_parse_madt(ACPI_MADT_TYPE_MSI_PIC, pch_msi_parse_madt, 1);
+}
+
+int __init avecintc_acpi_init(struct irq_domain *parent)
+{
+ int ret = avecintc_init(parent);
+ if (ret < 0) {
+ pr_err("Failed to init IRQ domain\n");
+ return ret;
+ }
+
+ ret = acpi_cascade_irqdomain_init();
+ if (ret < 0) {
+ pr_err("Failed to init cascade IRQ domain\n");
+ return ret;
+ }
+
+ return ret;
+}
diff --git a/drivers/irqchip/irq-loongarch-cpu.c b/drivers/irqchip/irq-loongarch-cpu.c
index 9d8f2c406043..e62dab4c97fc 100644
--- a/drivers/irqchip/irq-loongarch-cpu.c
+++ b/drivers/irqchip/irq-loongarch-cpu.c
@@ -13,16 +13,20 @@
#include <asm/loongarch.h>
#include <asm/setup.h>
+#include "irq-loongson.h"
+
static struct irq_domain *irq_domain;
struct fwnode_handle *cpuintc_handle;
static u32 lpic_gsi_to_irq(u32 gsi)
{
+ int irq = 0;
+
/* Only pch irqdomain transferring is required for LoongArch. */
if (gsi >= GSI_MIN_PCH_IRQ && gsi <= GSI_MAX_PCH_IRQ)
- return acpi_register_gsi(NULL, gsi, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_HIGH);
+ irq = acpi_register_gsi(NULL, gsi, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_HIGH);
- return 0;
+ return (irq > 0) ? irq : 0;
}
static struct fwnode_handle *lpic_get_gsi_domain_id(u32 gsi)
@@ -138,7 +142,10 @@ static int __init acpi_cascade_irqdomain_init(void)
if (r < 0)
return r;
- return 0;
+ if (cpu_has_avecint)
+ r = avecintc_acpi_init(irq_domain);
+
+ return r;
}
static int __init cpuintc_acpi_init(union acpi_subtable_headers *header,
diff --git a/drivers/irqchip/irq-loongson-eiointc.c b/drivers/irqchip/irq-loongson-eiointc.c
index b3736bdd4b9f..bb79e19dfb59 100644
--- a/drivers/irqchip/irq-loongson-eiointc.c
+++ b/drivers/irqchip/irq-loongson-eiointc.c
@@ -14,7 +14,11 @@
#include <linux/irqdomain.h>
#include <linux/irqchip/chained_irq.h>
#include <linux/kernel.h>
+#include <linux/kvm_para.h>
#include <linux/syscore_ops.h>
+#include <asm/numa.h>
+
+#include "irq-loongson.h"
#define EIOINTC_REG_NODEMAP 0x14a0
#define EIOINTC_REG_IPMAP 0x14c0
@@ -23,15 +27,37 @@
#define EIOINTC_REG_ISR 0x1800
#define EIOINTC_REG_ROUTE 0x1c00
+#define EXTIOI_VIRT_FEATURES 0x40000000
+#define EXTIOI_HAS_VIRT_EXTENSION BIT(0)
+#define EXTIOI_HAS_ENABLE_OPTION BIT(1)
+#define EXTIOI_HAS_INT_ENCODE BIT(2)
+#define EXTIOI_HAS_CPU_ENCODE BIT(3)
+#define EXTIOI_VIRT_CONFIG 0x40000004
+#define EXTIOI_ENABLE BIT(1)
+#define EXTIOI_ENABLE_INT_ENCODE BIT(2)
+#define EXTIOI_ENABLE_CPU_ENCODE BIT(3)
+
#define VEC_REG_COUNT 4
#define VEC_COUNT_PER_REG 64
#define VEC_COUNT (VEC_REG_COUNT * VEC_COUNT_PER_REG)
#define VEC_REG_IDX(irq_id) ((irq_id) / VEC_COUNT_PER_REG)
#define VEC_REG_BIT(irq_id) ((irq_id) % VEC_COUNT_PER_REG)
#define EIOINTC_ALL_ENABLE 0xffffffff
+#define EIOINTC_ALL_ENABLE_VEC_MASK(vector) (EIOINTC_ALL_ENABLE & ~BIT(vector & 0x1f))
+#define EIOINTC_REG_ENABLE_VEC(vector) (EIOINTC_REG_ENABLE + ((vector >> 5) << 2))
+#define EIOINTC_USE_CPU_ENCODE BIT(0)
#define MAX_EIO_NODES (NR_CPUS / CORES_PER_EIO_NODE)
+/*
+ * Routing registers are 32bit, and there is 8-bit route setting for every
+ * interrupt vector. So one Route register contains four vectors routing
+ * information.
+ */
+#define EIOINTC_REG_ROUTE_VEC(vector) (EIOINTC_REG_ROUTE + (vector & ~0x03))
+#define EIOINTC_REG_ROUTE_VEC_SHIFT(vector) ((vector & 0x03) << 3)
+#define EIOINTC_REG_ROUTE_VEC_MASK(vector) (0xff << EIOINTC_REG_ROUTE_VEC_SHIFT(vector))
+
static int nr_pics;
struct eiointc_priv {
@@ -41,6 +67,7 @@ struct eiointc_priv {
cpumask_t cpuspan_map;
struct fwnode_handle *domain_handle;
struct irq_domain *eiointc_domain;
+ int flags;
};
static struct eiointc_priv *eiointc_priv[MAX_IO_PICS];
@@ -56,9 +83,13 @@ static void eiointc_enable(void)
static int cpu_to_eio_node(int cpu)
{
- return cpu_logical_map(cpu) / CORES_PER_EIO_NODE;
+ if (!kvm_para_has_feature(KVM_FEATURE_VIRT_EXTIOI))
+ return cpu_logical_map(cpu) / CORES_PER_EIO_NODE;
+ else
+ return cpu_logical_map(cpu) / CORES_PER_VEIO_NODE;
}
+#ifdef CONFIG_SMP
static void eiointc_set_irq_route(int pos, unsigned int cpu, unsigned int mnode, nodemask_t *node_map)
{
int i, node, cpu_node, route_node;
@@ -85,6 +116,17 @@ static void eiointc_set_irq_route(int pos, unsigned int cpu, unsigned int mnode,
}
}
+static void veiointc_set_irq_route(unsigned int vector, unsigned int cpu)
+{
+ unsigned long reg = EIOINTC_REG_ROUTE_VEC(vector);
+ unsigned int data;
+
+ data = iocsr_read32(reg);
+ data &= ~EIOINTC_REG_ROUTE_VEC_MASK(vector);
+ data |= cpu_logical_map(cpu) << EIOINTC_REG_ROUTE_VEC_SHIFT(vector);
+ iocsr_write32(data, reg);
+}
+
static DEFINE_RAW_SPINLOCK(affinity_lock);
static int eiointc_set_irq_affinity(struct irq_data *d, const struct cpumask *affinity, bool force)
@@ -92,33 +134,35 @@ static int eiointc_set_irq_affinity(struct irq_data *d, const struct cpumask *af
unsigned int cpu;
unsigned long flags;
uint32_t vector, regaddr;
- struct cpumask intersect_affinity;
struct eiointc_priv *priv = d->domain->host_data;
raw_spin_lock_irqsave(&affinity_lock, flags);
- cpumask_and(&intersect_affinity, affinity, cpu_online_mask);
- cpumask_and(&intersect_affinity, &intersect_affinity, &priv->cpuspan_map);
-
- if (cpumask_empty(&intersect_affinity)) {
+ cpu = cpumask_first_and_and(&priv->cpuspan_map, affinity, cpu_online_mask);
+ if (cpu >= nr_cpu_ids) {
raw_spin_unlock_irqrestore(&affinity_lock, flags);
return -EINVAL;
}
- cpu = cpumask_first(&intersect_affinity);
vector = d->hwirq;
- regaddr = EIOINTC_REG_ENABLE + ((vector >> 5) << 2);
-
- /* Mask target vector */
- csr_any_send(regaddr, EIOINTC_ALL_ENABLE & (~BIT(vector & 0x1F)),
- 0x0, priv->node * CORES_PER_EIO_NODE);
-
- /* Set route for target vector */
- eiointc_set_irq_route(vector, cpu, priv->node, &priv->node_map);
-
- /* Unmask target vector */
- csr_any_send(regaddr, EIOINTC_ALL_ENABLE,
- 0x0, priv->node * CORES_PER_EIO_NODE);
+ regaddr = EIOINTC_REG_ENABLE_VEC(vector);
+
+ if (priv->flags & EIOINTC_USE_CPU_ENCODE) {
+ iocsr_write32(EIOINTC_ALL_ENABLE_VEC_MASK(vector), regaddr);
+ veiointc_set_irq_route(vector, cpu);
+ iocsr_write32(EIOINTC_ALL_ENABLE, regaddr);
+ } else {
+ /* Mask target vector */
+ csr_any_send(regaddr, EIOINTC_ALL_ENABLE_VEC_MASK(vector),
+ 0x0, priv->node * CORES_PER_EIO_NODE);
+
+ /* Set route for target vector */
+ eiointc_set_irq_route(vector, cpu, priv->node, &priv->node_map);
+
+ /* Unmask target vector */
+ csr_any_send(regaddr, EIOINTC_ALL_ENABLE,
+ 0x0, priv->node * CORES_PER_EIO_NODE);
+ }
irq_data_update_effective_affinity(d, cpumask_of(cpu));
@@ -126,6 +170,7 @@ static int eiointc_set_irq_affinity(struct irq_data *d, const struct cpumask *af
return IRQ_SET_MASK_OK;
}
+#endif
static int eiointc_index(int node)
{
@@ -141,17 +186,23 @@ static int eiointc_index(int node)
static int eiointc_router_init(unsigned int cpu)
{
- int i, bit;
- uint32_t data;
- uint32_t node = cpu_to_eio_node(cpu);
- int index = eiointc_index(node);
+ int i, bit, cores, index, node;
+ unsigned int data;
+
+ node = cpu_to_eio_node(cpu);
+ index = eiointc_index(node);
if (index < 0) {
pr_err("Error: invalid nodemap!\n");
- return -1;
+ return -EINVAL;
}
- if ((cpu_logical_map(cpu) % CORES_PER_EIO_NODE) == 0) {
+ if (!(eiointc_priv[index]->flags & EIOINTC_USE_CPU_ENCODE))
+ cores = CORES_PER_EIO_NODE;
+ else
+ cores = CORES_PER_VEIO_NODE;
+
+ if ((cpu_logical_map(cpu) % cores) == 0) {
eiointc_enable();
for (i = 0; i < eiointc_priv[0]->vec_count / 32; i++) {
@@ -167,7 +218,9 @@ static int eiointc_router_init(unsigned int cpu)
for (i = 0; i < eiointc_priv[0]->vec_count / 4; i++) {
/* Route to Node-0 Core-0 */
- if (index == 0)
+ if (eiointc_priv[index]->flags & EIOINTC_USE_CPU_ENCODE)
+ bit = cpu_logical_map(0);
+ else if (index == 0)
bit = BIT(cpu_logical_map(0));
else
bit = (eiointc_priv[index]->node << 4) | 1;
@@ -198,6 +251,12 @@ static void eiointc_irq_dispatch(struct irq_desc *desc)
for (i = 0; i < eiointc_priv[0]->vec_count / VEC_COUNT_PER_REG; i++) {
pending = iocsr_read64(EIOINTC_REG_ISR + (i << 3));
+
+ /* Skip handling if pending bitmap is zero */
+ if (!pending)
+ continue;
+
+ /* Clear the IRQs */
iocsr_write64(pending, EIOINTC_REG_ISR + (i << 3));
while (pending) {
int bit = __ffs(pending);
@@ -232,7 +291,9 @@ static struct irq_chip eiointc_irq_chip = {
.irq_ack = eiointc_ack_irq,
.irq_mask = eiointc_mask_irq,
.irq_unmask = eiointc_unmask_irq,
+#ifdef CONFIG_SMP
.irq_set_affinity = eiointc_set_irq_affinity,
+#endif
};
static int eiointc_domain_alloc(struct irq_domain *domain, unsigned int virq,
@@ -304,23 +365,7 @@ static int eiointc_suspend(void)
static void eiointc_resume(void)
{
- int i, j;
- struct irq_desc *desc;
- struct irq_data *irq_data;
-
eiointc_router_init(0);
-
- for (i = 0; i < nr_pics; i++) {
- for (j = 0; j < eiointc_priv[0]->vec_count; j++) {
- desc = irq_resolve_mapping(eiointc_priv[i]->eiointc_domain, j);
- if (desc && desc->handle_irq && desc->handle_irq != handle_bad_irq) {
- raw_spin_lock(&desc->lock);
- irq_data = irq_domain_get_irq_data(eiointc_priv[i]->eiointc_domain, irq_desc_get_irq(desc));
- eiointc_set_irq_affinity(irq_data, irq_data->common->affinity, 0);
- raw_spin_unlock(&desc->lock);
- }
- }
- }
}
static struct syscore_ops eiointc_syscore_ops = {
@@ -349,7 +394,7 @@ static int __init pch_msi_parse_madt(union acpi_subtable_headers *header,
int node;
if (cpu_has_flatmode)
- node = cpu_to_node(eiointc_priv[nr_pics - 1]->node * CORES_PER_EIO_NODE);
+ node = early_cpu_to_node(eiointc_priv[nr_pics - 1]->node * CORES_PER_EIO_NODE);
else
node = eiointc_priv[nr_pics - 1]->node;
@@ -369,6 +414,9 @@ static int __init acpi_cascade_irqdomain_init(void)
if (r < 0)
return r;
+ if (cpu_has_avecint)
+ return 0;
+
r = acpi_table_parse_madt(ACPI_MADT_TYPE_MSI_PIC, pch_msi_parse_madt, 1);
if (r < 0)
return r;
@@ -379,7 +427,7 @@ static int __init acpi_cascade_irqdomain_init(void)
static int __init eiointc_init(struct eiointc_priv *priv, int parent_irq,
u64 node_map)
{
- int i;
+ int i, val;
node_map = node_map ? node_map : -1ULL;
for_each_possible_cpu(i) {
@@ -399,14 +447,28 @@ static int __init eiointc_init(struct eiointc_priv *priv, int parent_irq,
return -ENOMEM;
}
+ if (kvm_para_has_feature(KVM_FEATURE_VIRT_EXTIOI)) {
+ val = iocsr_read32(EXTIOI_VIRT_FEATURES);
+ /*
+ * With EXTIOI_ENABLE_CPU_ENCODE set
+ * interrupts can route to 256 vCPUs.
+ */
+ if (val & EXTIOI_HAS_CPU_ENCODE) {
+ val = iocsr_read32(EXTIOI_VIRT_CONFIG);
+ val |= EXTIOI_ENABLE_CPU_ENCODE;
+ iocsr_write32(val, EXTIOI_VIRT_CONFIG);
+ priv->flags = EIOINTC_USE_CPU_ENCODE;
+ }
+ }
+
eiointc_priv[nr_pics++] = priv;
eiointc_router_init(0);
irq_set_chained_handler_and_data(parent_irq, eiointc_irq_dispatch, priv);
if (nr_pics == 1) {
register_syscore_ops(&eiointc_syscore_ops);
- cpuhp_setup_state_nocalls(CPUHP_AP_IRQ_LOONGARCH_STARTING,
- "irqchip/loongarch/intc:starting",
+ cpuhp_setup_state_nocalls(CPUHP_AP_IRQ_EIOINTC_STARTING,
+ "irqchip/loongarch/eiointc:starting",
eiointc_router_init, NULL);
}
@@ -441,7 +503,7 @@ int __init eiointc_acpi_init(struct irq_domain *parent,
goto out_free_handle;
if (cpu_has_flatmode)
- node = cpu_to_node(acpi_eiointc->node * CORES_PER_EIO_NODE);
+ node = early_cpu_to_node(acpi_eiointc->node * CORES_PER_EIO_NODE);
else
node = acpi_eiointc->node;
acpi_set_vec_parent(node, priv->eiointc_domain, pch_group);
diff --git a/drivers/irqchip/irq-loongson-htvec.c b/drivers/irqchip/irq-loongson-htvec.c
index 0bff728b25e3..5da02c7ad0b3 100644
--- a/drivers/irqchip/irq-loongson-htvec.c
+++ b/drivers/irqchip/irq-loongson-htvec.c
@@ -17,6 +17,8 @@
#include <linux/of_irq.h>
#include <linux/syscore_ops.h>
+#include "irq-loongson.h"
+
/* Registers */
#define HTVEC_EN_OFF 0x20
#define HTVEC_MAX_PARENT_IRQ 8
diff --git a/drivers/irqchip/irq-loongson-liointc.c b/drivers/irqchip/irq-loongson-liointc.c
index e4b33aed1c97..2b1bd4a96665 100644
--- a/drivers/irqchip/irq-loongson-liointc.c
+++ b/drivers/irqchip/irq-loongson-liointc.c
@@ -22,13 +22,15 @@
#include <asm/loongson.h>
#endif
+#include "irq-loongson.h"
+
#define LIOINTC_CHIP_IRQ 32
#define LIOINTC_NUM_PARENT 4
#define LIOINTC_NUM_CORES 4
#define LIOINTC_INTC_CHIP_START 0x20
-#define LIOINTC_REG_INTC_STATUS (LIOINTC_INTC_CHIP_START + 0x20)
+#define LIOINTC_REG_INTC_STATUS(core) (LIOINTC_INTC_CHIP_START + 0x20 + (core) * 8)
#define LIOINTC_REG_INTC_EN_STATUS (LIOINTC_INTC_CHIP_START + 0x04)
#define LIOINTC_REG_INTC_ENABLE (LIOINTC_INTC_CHIP_START + 0x08)
#define LIOINTC_REG_INTC_DISABLE (LIOINTC_INTC_CHIP_START + 0x0c)
@@ -217,7 +219,7 @@ static int liointc_init(phys_addr_t addr, unsigned long size, int revision,
goto out_free_priv;
for (i = 0; i < LIOINTC_NUM_CORES; i++)
- priv->core_isr[i] = base + LIOINTC_REG_INTC_STATUS;
+ priv->core_isr[i] = base + LIOINTC_REG_INTC_STATUS(i);
for (i = 0; i < LIOINTC_NUM_PARENT; i++)
priv->handler[i].parent_int_map = parent_int_map[i];
diff --git a/drivers/irqchip/irq-loongson-pch-lpc.c b/drivers/irqchip/irq-loongson-pch-lpc.c
index 9b35492fb6be..2d4c3ec128b8 100644
--- a/drivers/irqchip/irq-loongson-pch-lpc.c
+++ b/drivers/irqchip/irq-loongson-pch-lpc.c
@@ -15,6 +15,8 @@
#include <linux/kernel.h>
#include <linux/syscore_ops.h>
+#include "irq-loongson.h"
+
/* Registers */
#define LPC_INT_CTL 0x00
#define LPC_INT_ENA 0x04
diff --git a/drivers/irqchip/irq-loongson-pch-msi.c b/drivers/irqchip/irq-loongson-pch-msi.c
index 6e1e1f011bb2..bd337ecddb40 100644
--- a/drivers/irqchip/irq-loongson-pch-msi.c
+++ b/drivers/irqchip/irq-loongson-pch-msi.c
@@ -15,6 +15,9 @@
#include <linux/pci.h>
#include <linux/slab.h>
+#include "irq-msi-lib.h"
+#include "irq-loongson.h"
+
static int nr_pics;
struct pch_msi_data {
@@ -27,26 +30,6 @@ struct pch_msi_data {
static struct fwnode_handle *pch_msi_handle[MAX_IO_PICS];
-static void pch_msi_mask_msi_irq(struct irq_data *d)
-{
- pci_msi_mask_irq(d);
- irq_chip_mask_parent(d);
-}
-
-static void pch_msi_unmask_msi_irq(struct irq_data *d)
-{
- irq_chip_unmask_parent(d);
- pci_msi_unmask_irq(d);
-}
-
-static struct irq_chip pch_msi_irq_chip = {
- .name = "PCH PCI MSI",
- .irq_mask = pch_msi_mask_msi_irq,
- .irq_unmask = pch_msi_unmask_msi_irq,
- .irq_ack = irq_chip_ack_parent,
- .irq_set_affinity = irq_chip_set_affinity_parent,
-};
-
static int pch_msi_allocate_hwirq(struct pch_msi_data *priv, int num_req)
{
int first;
@@ -85,12 +68,6 @@ static void pch_msi_compose_msi_msg(struct irq_data *data,
msg->data = data->hwirq;
}
-static struct msi_domain_info pch_msi_domain_info = {
- .flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
- MSI_FLAG_MULTI_PCI_MSI | MSI_FLAG_PCI_MSIX,
- .chip = &pch_msi_irq_chip,
-};
-
static struct irq_chip middle_irq_chip = {
.name = "PCH MSI",
.irq_mask = irq_chip_mask_parent,
@@ -136,7 +113,7 @@ static int pch_msi_middle_domain_alloc(struct irq_domain *domain,
err_hwirq:
pch_msi_free_hwirq(priv, hwirq, nr_irqs);
- irq_domain_free_irqs_parent(domain, virq, i - 1);
+ irq_domain_free_irqs_parent(domain, virq, i);
return err;
}
@@ -155,13 +132,31 @@ static void pch_msi_middle_domain_free(struct irq_domain *domain,
static const struct irq_domain_ops pch_msi_middle_domain_ops = {
.alloc = pch_msi_middle_domain_alloc,
.free = pch_msi_middle_domain_free,
+ .select = msi_lib_irq_domain_select,
+};
+
+#define PCH_MSI_FLAGS_REQUIRED (MSI_FLAG_USE_DEF_DOM_OPS | \
+ MSI_FLAG_USE_DEF_CHIP_OPS | \
+ MSI_FLAG_PCI_MSI_MASK_PARENT)
+
+#define PCH_MSI_FLAGS_SUPPORTED (MSI_GENERIC_FLAGS_MASK | \
+ MSI_FLAG_PCI_MSIX | \
+ MSI_FLAG_MULTI_PCI_MSI)
+
+static struct msi_parent_ops pch_msi_parent_ops = {
+ .required_flags = PCH_MSI_FLAGS_REQUIRED,
+ .supported_flags = PCH_MSI_FLAGS_SUPPORTED,
+ .bus_select_mask = MATCH_PCI_MSI,
+ .bus_select_token = DOMAIN_BUS_NEXUS,
+ .prefix = "PCH-",
+ .init_dev_msi_info = msi_lib_init_dev_msi_info,
};
static int pch_msi_init_domains(struct pch_msi_data *priv,
struct irq_domain *parent,
struct fwnode_handle *domain_handle)
{
- struct irq_domain *middle_domain, *msi_domain;
+ struct irq_domain *middle_domain;
middle_domain = irq_domain_create_hierarchy(parent, 0, priv->num_irqs,
domain_handle,
@@ -174,14 +169,8 @@ static int pch_msi_init_domains(struct pch_msi_data *priv,
irq_domain_update_bus_token(middle_domain, DOMAIN_BUS_NEXUS);
- msi_domain = pci_msi_create_irq_domain(domain_handle,
- &pch_msi_domain_info,
- middle_domain);
- if (!msi_domain) {
- pr_err("Failed to create PCI MSI domain\n");
- irq_domain_remove(middle_domain);
- return -ENOMEM;
- }
+ middle_domain->flags |= IRQ_DOMAIN_FLAG_MSI_PARENT;
+ middle_domain->msi_parent_ops = &pch_msi_parent_ops;
return 0;
}
@@ -266,17 +255,17 @@ IRQCHIP_DECLARE(pch_msi, "loongson,pch-msi-1.0", pch_msi_of_init);
#ifdef CONFIG_ACPI
struct fwnode_handle *get_pch_msi_handle(int pci_segment)
{
- int i;
+ if (cpu_has_avecint)
+ return pch_msi_handle[0];
- for (i = 0; i < MAX_IO_PICS; i++) {
+ for (int i = 0; i < MAX_IO_PICS; i++) {
if (msi_group[i].pci_segment == pci_segment)
return pch_msi_handle[i];
}
- return NULL;
+ return pch_msi_handle[0];
}
-int __init pch_msi_acpi_init(struct irq_domain *parent,
- struct acpi_madt_msi_pic *acpi_pchmsi)
+int __init pch_msi_acpi_init(struct irq_domain *parent, struct acpi_madt_msi_pic *acpi_pchmsi)
{
int ret;
struct fwnode_handle *domain_handle;
@@ -289,4 +278,18 @@ int __init pch_msi_acpi_init(struct irq_domain *parent,
return ret;
}
+
+int __init pch_msi_acpi_init_avec(struct irq_domain *parent)
+{
+ if (pch_msi_handle[0])
+ return 0;
+
+ pch_msi_handle[0] = parent->fwnode;
+ irq_domain_update_bus_token(parent, DOMAIN_BUS_NEXUS);
+
+ parent->flags |= IRQ_DOMAIN_FLAG_MSI_PARENT;
+ parent->msi_parent_ops = &pch_msi_parent_ops;
+
+ return 0;
+}
#endif
diff --git a/drivers/irqchip/irq-loongson-pch-pic.c b/drivers/irqchip/irq-loongson-pch-pic.c
index 63db8e2172e0..69efda35a8e7 100644
--- a/drivers/irqchip/irq-loongson-pch-pic.c
+++ b/drivers/irqchip/irq-loongson-pch-pic.c
@@ -17,6 +17,8 @@
#include <linux/of_irq.h>
#include <linux/syscore_ops.h>
+#include "irq-loongson.h"
+
/* Registers */
#define PCH_PIC_MASK 0x20
#define PCH_PIC_HTMSI_EN 0x40
@@ -33,6 +35,7 @@
#define PIC_COUNT (PIC_COUNT_PER_REG * PIC_REG_COUNT)
#define PIC_REG_IDX(irq_id) ((irq_id) / PIC_COUNT_PER_REG)
#define PIC_REG_BIT(irq_id) ((irq_id) % PIC_COUNT_PER_REG)
+#define PIC_UNDEF_VECTOR 255
static int nr_pics;
@@ -46,12 +49,19 @@ struct pch_pic {
u32 saved_vec_en[PIC_REG_COUNT];
u32 saved_vec_pol[PIC_REG_COUNT];
u32 saved_vec_edge[PIC_REG_COUNT];
+ u8 table[PIC_COUNT];
+ int inuse;
};
static struct pch_pic *pch_pic_priv[MAX_IO_PICS];
struct fwnode_handle *pch_pic_handle[MAX_IO_PICS];
+static inline u8 hwirq_to_bit(struct pch_pic *priv, int hirq)
+{
+ return priv->table[hirq];
+}
+
static void pch_pic_bitset(struct pch_pic *priv, int offset, int bit)
{
u32 reg;
@@ -80,45 +90,47 @@ static void pch_pic_mask_irq(struct irq_data *d)
{
struct pch_pic *priv = irq_data_get_irq_chip_data(d);
- pch_pic_bitset(priv, PCH_PIC_MASK, d->hwirq);
+ pch_pic_bitset(priv, PCH_PIC_MASK, hwirq_to_bit(priv, d->hwirq));
irq_chip_mask_parent(d);
}
static void pch_pic_unmask_irq(struct irq_data *d)
{
struct pch_pic *priv = irq_data_get_irq_chip_data(d);
+ int bit = hwirq_to_bit(priv, d->hwirq);
- writel(BIT(PIC_REG_BIT(d->hwirq)),
- priv->base + PCH_PIC_CLR + PIC_REG_IDX(d->hwirq) * 4);
+ writel(BIT(PIC_REG_BIT(bit)),
+ priv->base + PCH_PIC_CLR + PIC_REG_IDX(bit) * 4);
irq_chip_unmask_parent(d);
- pch_pic_bitclr(priv, PCH_PIC_MASK, d->hwirq);
+ pch_pic_bitclr(priv, PCH_PIC_MASK, bit);
}
static int pch_pic_set_type(struct irq_data *d, unsigned int type)
{
struct pch_pic *priv = irq_data_get_irq_chip_data(d);
+ int bit = hwirq_to_bit(priv, d->hwirq);
int ret = 0;
switch (type) {
case IRQ_TYPE_EDGE_RISING:
- pch_pic_bitset(priv, PCH_PIC_EDGE, d->hwirq);
- pch_pic_bitclr(priv, PCH_PIC_POL, d->hwirq);
+ pch_pic_bitset(priv, PCH_PIC_EDGE, bit);
+ pch_pic_bitclr(priv, PCH_PIC_POL, bit);
irq_set_handler_locked(d, handle_edge_irq);
break;
case IRQ_TYPE_EDGE_FALLING:
- pch_pic_bitset(priv, PCH_PIC_EDGE, d->hwirq);
- pch_pic_bitset(priv, PCH_PIC_POL, d->hwirq);
+ pch_pic_bitset(priv, PCH_PIC_EDGE, bit);
+ pch_pic_bitset(priv, PCH_PIC_POL, bit);
irq_set_handler_locked(d, handle_edge_irq);
break;
case IRQ_TYPE_LEVEL_HIGH:
- pch_pic_bitclr(priv, PCH_PIC_EDGE, d->hwirq);
- pch_pic_bitclr(priv, PCH_PIC_POL, d->hwirq);
+ pch_pic_bitclr(priv, PCH_PIC_EDGE, bit);
+ pch_pic_bitclr(priv, PCH_PIC_POL, bit);
irq_set_handler_locked(d, handle_level_irq);
break;
case IRQ_TYPE_LEVEL_LOW:
- pch_pic_bitclr(priv, PCH_PIC_EDGE, d->hwirq);
- pch_pic_bitset(priv, PCH_PIC_POL, d->hwirq);
+ pch_pic_bitclr(priv, PCH_PIC_EDGE, bit);
+ pch_pic_bitset(priv, PCH_PIC_POL, bit);
irq_set_handler_locked(d, handle_level_irq);
break;
default:
@@ -133,11 +145,12 @@ static void pch_pic_ack_irq(struct irq_data *d)
{
unsigned int reg;
struct pch_pic *priv = irq_data_get_irq_chip_data(d);
+ int bit = hwirq_to_bit(priv, d->hwirq);
- reg = readl(priv->base + PCH_PIC_EDGE + PIC_REG_IDX(d->hwirq) * 4);
- if (reg & BIT(PIC_REG_BIT(d->hwirq))) {
- writel(BIT(PIC_REG_BIT(d->hwirq)),
- priv->base + PCH_PIC_CLR + PIC_REG_IDX(d->hwirq) * 4);
+ reg = readl(priv->base + PCH_PIC_EDGE + PIC_REG_IDX(bit) * 4);
+ if (reg & BIT(PIC_REG_BIT(bit))) {
+ writel(BIT(PIC_REG_BIT(bit)),
+ priv->base + PCH_PIC_CLR + PIC_REG_IDX(bit) * 4);
}
irq_chip_ack_parent(d);
}
@@ -159,6 +172,8 @@ static int pch_pic_domain_translate(struct irq_domain *d,
{
struct pch_pic *priv = d->host_data;
struct device_node *of_node = to_of_node(fwspec->fwnode);
+ unsigned long flags;
+ int i;
if (of_node) {
if (fwspec->param_count < 2)
@@ -171,12 +186,33 @@ static int pch_pic_domain_translate(struct irq_domain *d,
return -EINVAL;
*hwirq = fwspec->param[0] - priv->gsi_base;
+
if (fwspec->param_count > 1)
*type = fwspec->param[1] & IRQ_TYPE_SENSE_MASK;
else
*type = IRQ_TYPE_NONE;
}
+ raw_spin_lock_irqsave(&priv->pic_lock, flags);
+ /* Check pic-table to confirm if the hwirq has been assigned */
+ for (i = 0; i < priv->inuse; i++) {
+ if (priv->table[i] == *hwirq) {
+ *hwirq = i;
+ break;
+ }
+ }
+ if (i == priv->inuse) {
+ /* Assign a new hwirq in pic-table */
+ if (priv->inuse >= PIC_COUNT) {
+ pr_err("pch-pic domain has no free vectors\n");
+ raw_spin_unlock_irqrestore(&priv->pic_lock, flags);
+ return -EINVAL;
+ }
+ priv->table[priv->inuse] = *hwirq;
+ *hwirq = priv->inuse++;
+ }
+ raw_spin_unlock_irqrestore(&priv->pic_lock, flags);
+
return 0;
}
@@ -194,6 +230,9 @@ static int pch_pic_alloc(struct irq_domain *domain, unsigned int virq,
if (err)
return err;
+ /* Write vector ID */
+ writeb(priv->ht_vec_base + hwirq, priv->base + PCH_INT_HTVEC(hwirq_to_bit(priv, hwirq)));
+
parent_fwspec.fwnode = domain->parent->fwnode;
parent_fwspec.param_count = 1;
parent_fwspec.param[0] = hwirq + priv->ht_vec_base;
@@ -222,7 +261,7 @@ static void pch_pic_reset(struct pch_pic *priv)
for (i = 0; i < PIC_COUNT; i++) {
/* Write vector ID */
- writeb(priv->ht_vec_base + i, priv->base + PCH_INT_HTVEC(i));
+ writeb(priv->ht_vec_base + i, priv->base + PCH_INT_HTVEC(hwirq_to_bit(priv, i)));
/* Hardcode route to HT0 Lo */
writeb(1, priv->base + PCH_INT_ROUTE(i));
}
@@ -284,6 +323,7 @@ static int pch_pic_init(phys_addr_t addr, unsigned long size, int vec_base,
u32 gsi_base)
{
struct pch_pic *priv;
+ int i;
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv)
@@ -294,6 +334,10 @@ static int pch_pic_init(phys_addr_t addr, unsigned long size, int vec_base,
if (!priv->base)
goto free_priv;
+ priv->inuse = 0;
+ for (i = 0; i < PIC_COUNT; i++)
+ priv->table[i] = PIC_UNDEF_VECTOR;
+
priv->ht_vec_base = vec_base;
priv->vec_count = ((readq(priv->base) >> 48) & 0xff) + 1;
priv->gsi_base = gsi_base;
diff --git a/drivers/irqchip/irq-loongson.h b/drivers/irqchip/irq-loongson.h
new file mode 100644
index 000000000000..11fa138d1f44
--- /dev/null
+++ b/drivers/irqchip/irq-loongson.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2024 Loongson Technology Corporation Limited
+ */
+
+#ifndef _DRIVERS_IRQCHIP_IRQ_LOONGSON_H
+#define _DRIVERS_IRQCHIP_IRQ_LOONGSON_H
+
+int find_pch_pic(u32 gsi);
+
+int liointc_acpi_init(struct irq_domain *parent,
+ struct acpi_madt_lio_pic *acpi_liointc);
+int eiointc_acpi_init(struct irq_domain *parent,
+ struct acpi_madt_eio_pic *acpi_eiointc);
+int avecintc_acpi_init(struct irq_domain *parent);
+
+int htvec_acpi_init(struct irq_domain *parent,
+ struct acpi_madt_ht_pic *acpi_htvec);
+int pch_lpc_acpi_init(struct irq_domain *parent,
+ struct acpi_madt_lpc_pic *acpi_pchlpc);
+int pch_pic_acpi_init(struct irq_domain *parent,
+ struct acpi_madt_bio_pic *acpi_pchpic);
+int pch_msi_acpi_init(struct irq_domain *parent,
+ struct acpi_madt_msi_pic *acpi_pchmsi);
+int pch_msi_acpi_init_avec(struct irq_domain *parent);
+
+#endif /* _DRIVERS_IRQCHIP_IRQ_LOONGSON_H */
diff --git a/drivers/irqchip/irq-ls-scfg-msi.c b/drivers/irqchip/irq-ls-scfg-msi.c
index 15cf80b46322..c0e1aafe468c 100644
--- a/drivers/irqchip/irq-ls-scfg-msi.c
+++ b/drivers/irqchip/irq-ls-scfg-msi.c
@@ -398,7 +398,7 @@ static int ls_scfg_msi_probe(struct platform_device *pdev)
return 0;
}
-static int ls_scfg_msi_remove(struct platform_device *pdev)
+static void ls_scfg_msi_remove(struct platform_device *pdev)
{
struct ls_scfg_msi *msi_data = platform_get_drvdata(pdev);
int i;
@@ -410,17 +410,15 @@ static int ls_scfg_msi_remove(struct platform_device *pdev)
irq_domain_remove(msi_data->parent);
platform_set_drvdata(pdev, NULL);
-
- return 0;
}
static struct platform_driver ls_scfg_msi_driver = {
.driver = {
- .name = "ls-scfg-msi",
- .of_match_table = ls_scfg_msi_id,
+ .name = "ls-scfg-msi",
+ .of_match_table = ls_scfg_msi_id,
},
- .probe = ls_scfg_msi_probe,
- .remove = ls_scfg_msi_remove,
+ .probe = ls_scfg_msi_probe,
+ .remove = ls_scfg_msi_remove,
};
module_platform_driver(ls_scfg_msi_driver);
diff --git a/drivers/irqchip/irq-madera.c b/drivers/irqchip/irq-madera.c
index 3eb1f8cdf674..b32982c11515 100644
--- a/drivers/irqchip/irq-madera.c
+++ b/drivers/irqchip/irq-madera.c
@@ -222,7 +222,7 @@ static int madera_irq_probe(struct platform_device *pdev)
return 0;
}
-static int madera_irq_remove(struct platform_device *pdev)
+static void madera_irq_remove(struct platform_device *pdev)
{
struct madera *madera = dev_get_drvdata(pdev->dev.parent);
@@ -232,13 +232,11 @@ static int madera_irq_remove(struct platform_device *pdev)
*/
madera->irq_dev = NULL;
regmap_del_irq_chip(madera->irq, madera->irq_data);
-
- return 0;
}
static struct platform_driver madera_irq_driver = {
- .probe = &madera_irq_probe,
- .remove = &madera_irq_remove,
+ .probe = madera_irq_probe,
+ .remove = madera_irq_remove,
.driver = {
.name = "madera-irq",
.pm = &madera_irq_pm_ops,
diff --git a/drivers/irqchip/irq-mbigen.c b/drivers/irqchip/irq-mbigen.c
index 58881d313979..6f69f4e5dbac 100644
--- a/drivers/irqchip/irq-mbigen.c
+++ b/drivers/irqchip/irq-mbigen.c
@@ -64,6 +64,20 @@ struct mbigen_device {
void __iomem *base;
};
+static inline unsigned int get_mbigen_node_offset(unsigned int nid)
+{
+ unsigned int offset = nid * MBIGEN_NODE_OFFSET;
+
+ /*
+ * To avoid touched clear register in unexpected way, we need to directly
+ * skip clear register when access to more than 10 mbigen nodes.
+ */
+ if (nid >= (REG_MBIGEN_CLEAR_OFFSET / MBIGEN_NODE_OFFSET))
+ offset += MBIGEN_NODE_OFFSET;
+
+ return offset;
+}
+
static inline unsigned int get_mbigen_vec_reg(irq_hw_number_t hwirq)
{
unsigned int nid, pin;
@@ -72,8 +86,7 @@ static inline unsigned int get_mbigen_vec_reg(irq_hw_number_t hwirq)
nid = hwirq / IRQS_PER_MBIGEN_NODE + 1;
pin = hwirq % IRQS_PER_MBIGEN_NODE;
- return pin * 4 + nid * MBIGEN_NODE_OFFSET
- + REG_MBIGEN_VEC_OFFSET;
+ return pin * 4 + get_mbigen_node_offset(nid) + REG_MBIGEN_VEC_OFFSET;
}
static inline void get_mbigen_type_reg(irq_hw_number_t hwirq,
@@ -88,8 +101,7 @@ static inline void get_mbigen_type_reg(irq_hw_number_t hwirq,
*mask = 1 << (irq_ofst % 32);
ofst = irq_ofst / 32 * 4;
- *addr = ofst + nid * MBIGEN_NODE_OFFSET
- + REG_MBIGEN_TYPE_OFFSET;
+ *addr = ofst + get_mbigen_node_offset(nid) + REG_MBIGEN_TYPE_OFFSET;
}
static inline void get_mbigen_clear_reg(irq_hw_number_t hwirq,
@@ -135,24 +147,14 @@ static int mbigen_set_type(struct irq_data *data, unsigned int type)
return 0;
}
-static struct irq_chip mbigen_irq_chip = {
- .name = "mbigen-v2",
- .irq_mask = irq_chip_mask_parent,
- .irq_unmask = irq_chip_unmask_parent,
- .irq_eoi = mbigen_eoi_irq,
- .irq_set_type = mbigen_set_type,
- .irq_set_affinity = irq_chip_set_affinity_parent,
-};
-
-static void mbigen_write_msg(struct msi_desc *desc, struct msi_msg *msg)
+static void mbigen_write_msi_msg(struct irq_data *d, struct msi_msg *msg)
{
- struct irq_data *d = irq_get_irq_data(desc->irq);
void __iomem *base = d->chip_data;
u32 val;
if (!msg->address_lo && !msg->address_hi)
return;
-
+
base += get_mbigen_vec_reg(d->hwirq);
val = readl_relaxed(base);
@@ -165,10 +167,8 @@ static void mbigen_write_msg(struct msi_desc *desc, struct msi_msg *msg)
writel_relaxed(val, base);
}
-static int mbigen_domain_translate(struct irq_domain *d,
- struct irq_fwspec *fwspec,
- unsigned long *hwirq,
- unsigned int *type)
+static int mbigen_domain_translate(struct irq_domain *d, struct irq_fwspec *fwspec,
+ unsigned long *hwirq, unsigned int *type)
{
if (is_of_node(fwspec->fwnode) || is_acpi_device_node(fwspec->fwnode)) {
if (fwspec->param_count != 2)
@@ -192,86 +192,69 @@ static int mbigen_domain_translate(struct irq_domain *d,
return -EINVAL;
}
-static int mbigen_irq_domain_alloc(struct irq_domain *domain,
- unsigned int virq,
- unsigned int nr_irqs,
- void *args)
+static void mbigen_domain_set_desc(msi_alloc_info_t *arg, struct msi_desc *desc)
{
- struct irq_fwspec *fwspec = args;
- irq_hw_number_t hwirq;
- unsigned int type;
- struct mbigen_device *mgn_chip;
- int i, err;
-
- err = mbigen_domain_translate(domain, fwspec, &hwirq, &type);
- if (err)
- return err;
-
- err = platform_msi_device_domain_alloc(domain, virq, nr_irqs);
- if (err)
- return err;
+ arg->desc = desc;
+ arg->hwirq = (u32)desc->data.icookie.value;
+}
- mgn_chip = platform_msi_get_host_data(domain);
+static const struct msi_domain_template mbigen_msi_template = {
+ .chip = {
+ .name = "mbigen-v2",
+ .irq_mask = irq_chip_mask_parent,
+ .irq_unmask = irq_chip_unmask_parent,
+ .irq_eoi = mbigen_eoi_irq,
+ .irq_set_type = mbigen_set_type,
+ .irq_write_msi_msg = mbigen_write_msi_msg,
+ },
- for (i = 0; i < nr_irqs; i++)
- irq_domain_set_hwirq_and_chip(domain, virq + i, hwirq + i,
- &mbigen_irq_chip, mgn_chip->base);
+ .ops = {
+ .set_desc = mbigen_domain_set_desc,
+ .msi_translate = mbigen_domain_translate,
+ },
- return 0;
-}
+ .info = {
+ .bus_token = DOMAIN_BUS_WIRED_TO_MSI,
+ .flags = MSI_FLAG_USE_DEV_FWNODE,
+ },
+};
-static void mbigen_irq_domain_free(struct irq_domain *domain, unsigned int virq,
- unsigned int nr_irqs)
+static bool mbigen_create_device_domain(struct device *dev, unsigned int size,
+ struct mbigen_device *mgn_chip)
{
- platform_msi_device_domain_free(domain, virq, nr_irqs);
-}
+ if (WARN_ON_ONCE(!dev->msi.domain))
+ return false;
-static const struct irq_domain_ops mbigen_domain_ops = {
- .translate = mbigen_domain_translate,
- .alloc = mbigen_irq_domain_alloc,
- .free = mbigen_irq_domain_free,
-};
+ return msi_create_device_irq_domain(dev, MSI_DEFAULT_DOMAIN,
+ &mbigen_msi_template, size,
+ NULL, mgn_chip->base);
+}
static int mbigen_of_create_domain(struct platform_device *pdev,
struct mbigen_device *mgn_chip)
{
struct platform_device *child;
- struct irq_domain *domain;
- struct device_node *np;
u32 num_pins;
- int ret = 0;
- for_each_child_of_node(pdev->dev.of_node, np) {
+ for_each_child_of_node_scoped(pdev->dev.of_node, np) {
if (!of_property_read_bool(np, "interrupt-controller"))
continue;
child = of_platform_device_create(np, NULL, NULL);
- if (!child) {
- ret = -ENOMEM;
- break;
- }
+ if (!child)
+ return -ENOMEM;
if (of_property_read_u32(child->dev.of_node, "num-pins",
&num_pins) < 0) {
dev_err(&pdev->dev, "No num-pins property\n");
- ret = -EINVAL;
- break;
+ return -EINVAL;
}
- domain = platform_msi_create_device_domain(&child->dev, num_pins,
- mbigen_write_msg,
- &mbigen_domain_ops,
- mgn_chip);
- if (!domain) {
- ret = -ENOMEM;
- break;
- }
+ if (!mbigen_create_device_domain(&child->dev, num_pins, mgn_chip))
+ return -ENOMEM;
}
- if (ret)
- of_node_put(np);
-
- return ret;
+ return 0;
}
#ifdef CONFIG_ACPI
@@ -284,7 +267,6 @@ MODULE_DEVICE_TABLE(acpi, mbigen_acpi_match);
static int mbigen_acpi_create_domain(struct platform_device *pdev,
struct mbigen_device *mgn_chip)
{
- struct irq_domain *domain;
u32 num_pins = 0;
int ret;
@@ -315,11 +297,7 @@ static int mbigen_acpi_create_domain(struct platform_device *pdev,
if (ret || num_pins == 0)
return -EINVAL;
- domain = platform_msi_create_device_domain(&pdev->dev, num_pins,
- mbigen_write_msg,
- &mbigen_domain_ops,
- mgn_chip);
- if (!domain)
+ if (!mbigen_create_device_domain(&pdev->dev, num_pins, mgn_chip))
return -ENOMEM;
return 0;
diff --git a/drivers/irqchip/irq-meson-gpio.c b/drivers/irqchip/irq-meson-gpio.c
index f88df39f4129..cd789fa51519 100644
--- a/drivers/irqchip/irq-meson-gpio.c
+++ b/drivers/irqchip/irq-meson-gpio.c
@@ -154,6 +154,10 @@ static const struct meson_gpio_irq_params c3_params = {
INIT_MESON_S4_COMMON_DATA(55)
};
+static const struct meson_gpio_irq_params t7_params = {
+ INIT_MESON_S4_COMMON_DATA(157)
+};
+
static const struct of_device_id meson_irq_gpio_matches[] __maybe_unused = {
{ .compatible = "amlogic,meson8-gpio-intc", .data = &meson8_params },
{ .compatible = "amlogic,meson8b-gpio-intc", .data = &meson8b_params },
@@ -165,6 +169,7 @@ static const struct of_device_id meson_irq_gpio_matches[] __maybe_unused = {
{ .compatible = "amlogic,meson-a1-gpio-intc", .data = &a1_params },
{ .compatible = "amlogic,meson-s4-gpio-intc", .data = &s4_params },
{ .compatible = "amlogic,c3-gpio-intc", .data = &c3_params },
+ { .compatible = "amlogic,t7-gpio-intc", .data = &t7_params },
{ }
};
@@ -173,7 +178,7 @@ struct meson_gpio_irq_controller {
void __iomem *base;
u32 channel_irqs[MAX_NUM_CHANNEL];
DECLARE_BITMAP(channel_map, MAX_NUM_CHANNEL);
- spinlock_t lock;
+ raw_spinlock_t lock;
};
static void meson_gpio_irq_update_bits(struct meson_gpio_irq_controller *ctl,
@@ -182,14 +187,14 @@ static void meson_gpio_irq_update_bits(struct meson_gpio_irq_controller *ctl,
unsigned long flags;
u32 tmp;
- spin_lock_irqsave(&ctl->lock, flags);
+ raw_spin_lock_irqsave(&ctl->lock, flags);
tmp = readl_relaxed(ctl->base + reg);
tmp &= ~mask;
tmp |= val;
writel_relaxed(tmp, ctl->base + reg);
- spin_unlock_irqrestore(&ctl->lock, flags);
+ raw_spin_unlock_irqrestore(&ctl->lock, flags);
}
static void meson_gpio_irq_init_dummy(struct meson_gpio_irq_controller *ctl)
@@ -239,12 +244,12 @@ meson_gpio_irq_request_channel(struct meson_gpio_irq_controller *ctl,
unsigned long flags;
unsigned int idx;
- spin_lock_irqsave(&ctl->lock, flags);
+ raw_spin_lock_irqsave(&ctl->lock, flags);
/* Find a free channel */
idx = find_first_zero_bit(ctl->channel_map, ctl->params->nr_channels);
if (idx >= ctl->params->nr_channels) {
- spin_unlock_irqrestore(&ctl->lock, flags);
+ raw_spin_unlock_irqrestore(&ctl->lock, flags);
pr_err("No channel available\n");
return -ENOSPC;
}
@@ -252,7 +257,7 @@ meson_gpio_irq_request_channel(struct meson_gpio_irq_controller *ctl,
/* Mark the channel as used */
set_bit(idx, ctl->channel_map);
- spin_unlock_irqrestore(&ctl->lock, flags);
+ raw_spin_unlock_irqrestore(&ctl->lock, flags);
/*
* Setup the mux of the channel to route the signal of the pad
@@ -562,7 +567,7 @@ static int meson_gpio_irq_of_init(struct device_node *node, struct device_node *
if (!ctl)
return -ENOMEM;
- spin_lock_init(&ctl->lock);
+ raw_spin_lock_init(&ctl->lock);
ctl->base = of_iomap(node, 0);
if (!ctl->base) {
@@ -603,5 +608,6 @@ IRQCHIP_MATCH("amlogic,meson-gpio-intc", meson_gpio_irq_of_init)
IRQCHIP_PLATFORM_DRIVER_END(meson_gpio_intc)
MODULE_AUTHOR("Jerome Brunet <jbrunet@baylibre.com>");
+MODULE_DESCRIPTION("Meson GPIO Interrupt Multiplexer driver");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS("platform:meson-gpio-intc");
diff --git a/drivers/irqchip/irq-mips-gic.c b/drivers/irqchip/irq-mips-gic.c
index 76253e864f23..bca8053864b2 100644
--- a/drivers/irqchip/irq-mips-gic.c
+++ b/drivers/irqchip/irq-mips-gic.c
@@ -66,6 +66,87 @@ static struct gic_all_vpes_chip_data {
bool mask;
} gic_all_vpes_chip_data[GIC_NUM_LOCAL_INTRS];
+static int __gic_with_next_online_cpu(int prev)
+{
+ unsigned int cpu;
+
+ /* Discover the next online CPU */
+ cpu = cpumask_next(prev, cpu_online_mask);
+
+ /* If there isn't one, we're done */
+ if (cpu >= nr_cpu_ids)
+ return cpu;
+
+ /*
+ * Move the access lock to the next CPU's GIC local register block.
+ *
+ * Set GIC_VL_OTHER. Since the caller holds gic_lock nothing can
+ * clobber the written value.
+ */
+ write_gic_vl_other(mips_cm_vp_id(cpu));
+
+ return cpu;
+}
+
+static inline void gic_unlock_cluster(void)
+{
+ if (mips_cps_multicluster_cpus())
+ mips_cm_unlock_other();
+}
+
+/**
+ * for_each_online_cpu_gic() - Iterate over online CPUs, access local registers
+ * @cpu: An integer variable to hold the current CPU number
+ * @gic_lock: A pointer to raw spin lock used as a guard
+ *
+ * Iterate over online CPUs & configure the other/redirect register region to
+ * access each CPUs GIC local register block, which can be accessed from the
+ * loop body using read_gic_vo_*() or write_gic_vo_*() accessor functions or
+ * their derivatives.
+ */
+#define for_each_online_cpu_gic(cpu, gic_lock) \
+ guard(raw_spinlock_irqsave)(gic_lock); \
+ for ((cpu) = __gic_with_next_online_cpu(-1); \
+ (cpu) < nr_cpu_ids; \
+ gic_unlock_cluster(), \
+ (cpu) = __gic_with_next_online_cpu(cpu))
+
+/**
+ * gic_irq_lock_cluster() - Lock redirect block access to IRQ's cluster
+ * @d: struct irq_data corresponding to the interrupt we're interested in
+ *
+ * Locks redirect register block access to the global register block of the GIC
+ * within the remote cluster that the IRQ corresponding to @d is affine to,
+ * returning true when this redirect block setup & locking has been performed.
+ *
+ * If @d is affine to the local cluster then no locking is performed and this
+ * function will return false, indicating to the caller that it should access
+ * the local clusters registers without the overhead of indirection through the
+ * redirect block.
+ *
+ * In summary, if this function returns true then the caller should access GIC
+ * registers using redirect register block accessors & then call
+ * mips_cm_unlock_other() when done. If this function returns false then the
+ * caller should trivially access GIC registers in the local cluster.
+ *
+ * Returns true if locking performed, else false.
+ */
+static bool gic_irq_lock_cluster(struct irq_data *d)
+{
+ unsigned int cpu, cl;
+
+ cpu = cpumask_first(irq_data_get_effective_affinity_mask(d));
+ BUG_ON(cpu >= NR_CPUS);
+
+ cl = cpu_cluster(&cpu_data[cpu]);
+ if (cl == cpu_cluster(&current_cpu_data))
+ return false;
+ if (mips_cps_numcores(cl) == 0)
+ return false;
+ mips_cm_lock_other(cl, 0, 0, CM_GCR_Cx_OTHER_BLOCK_GLOBAL);
+ return true;
+}
+
static void gic_clear_pcpu_masks(unsigned int intr)
{
unsigned int i;
@@ -112,7 +193,12 @@ static void gic_send_ipi(struct irq_data *d, unsigned int cpu)
{
irq_hw_number_t hwirq = GIC_HWIRQ_TO_SHARED(irqd_to_hwirq(d));
- write_gic_wedge(GIC_WEDGE_RW | hwirq);
+ if (gic_irq_lock_cluster(d)) {
+ write_gic_redir_wedge(GIC_WEDGE_RW | hwirq);
+ mips_cm_unlock_other();
+ } else {
+ write_gic_wedge(GIC_WEDGE_RW | hwirq);
+ }
}
int gic_get_c0_compare_int(void)
@@ -180,7 +266,13 @@ static void gic_mask_irq(struct irq_data *d)
{
unsigned int intr = GIC_HWIRQ_TO_SHARED(d->hwirq);
- write_gic_rmask(intr);
+ if (gic_irq_lock_cluster(d)) {
+ write_gic_redir_rmask(intr);
+ mips_cm_unlock_other();
+ } else {
+ write_gic_rmask(intr);
+ }
+
gic_clear_pcpu_masks(intr);
}
@@ -189,7 +281,12 @@ static void gic_unmask_irq(struct irq_data *d)
unsigned int intr = GIC_HWIRQ_TO_SHARED(d->hwirq);
unsigned int cpu;
- write_gic_smask(intr);
+ if (gic_irq_lock_cluster(d)) {
+ write_gic_redir_smask(intr);
+ mips_cm_unlock_other();
+ } else {
+ write_gic_smask(intr);
+ }
gic_clear_pcpu_masks(intr);
cpu = cpumask_first(irq_data_get_effective_affinity_mask(d));
@@ -200,7 +297,12 @@ static void gic_ack_irq(struct irq_data *d)
{
unsigned int irq = GIC_HWIRQ_TO_SHARED(d->hwirq);
- write_gic_wedge(irq);
+ if (gic_irq_lock_cluster(d)) {
+ write_gic_redir_wedge(irq);
+ mips_cm_unlock_other();
+ } else {
+ write_gic_wedge(irq);
+ }
}
static int gic_set_type(struct irq_data *d, unsigned int type)
@@ -240,9 +342,16 @@ static int gic_set_type(struct irq_data *d, unsigned int type)
break;
}
- change_gic_pol(irq, pol);
- change_gic_trig(irq, trig);
- change_gic_dual(irq, dual);
+ if (gic_irq_lock_cluster(d)) {
+ change_gic_redir_pol(irq, pol);
+ change_gic_redir_trig(irq, trig);
+ change_gic_redir_dual(irq, dual);
+ mips_cm_unlock_other();
+ } else {
+ change_gic_pol(irq, pol);
+ change_gic_trig(irq, trig);
+ change_gic_dual(irq, dual);
+ }
if (trig == GIC_TRIG_EDGE)
irq_set_chip_handler_name_locked(d, &gic_edge_irq_controller,
@@ -260,25 +369,72 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *cpumask,
bool force)
{
unsigned int irq = GIC_HWIRQ_TO_SHARED(d->hwirq);
+ unsigned int cpu, cl, old_cpu, old_cl;
unsigned long flags;
- unsigned int cpu;
+ /*
+ * The GIC specifies that we can only route an interrupt to one VP(E),
+ * ie. CPU in Linux parlance, at a time. Therefore we always route to
+ * the first online CPU in the mask.
+ */
cpu = cpumask_first_and(cpumask, cpu_online_mask);
if (cpu >= NR_CPUS)
return -EINVAL;
- /* Assumption : cpumask refers to a single CPU */
- raw_spin_lock_irqsave(&gic_lock, flags);
+ old_cpu = cpumask_first(irq_data_get_effective_affinity_mask(d));
+ old_cl = cpu_cluster(&cpu_data[old_cpu]);
+ cl = cpu_cluster(&cpu_data[cpu]);
- /* Re-route this IRQ */
- write_gic_map_vp(irq, BIT(mips_cm_vp_id(cpu)));
+ raw_spin_lock_irqsave(&gic_lock, flags);
- /* Update the pcpu_masks */
- gic_clear_pcpu_masks(irq);
- if (read_gic_mask(irq))
- set_bit(irq, per_cpu_ptr(pcpu_masks, cpu));
+ /*
+ * If we're moving affinity between clusters, stop routing the
+ * interrupt to any VP(E) in the old cluster.
+ */
+ if (cl != old_cl) {
+ if (gic_irq_lock_cluster(d)) {
+ write_gic_redir_map_vp(irq, 0);
+ mips_cm_unlock_other();
+ } else {
+ write_gic_map_vp(irq, 0);
+ }
+ }
+ /*
+ * Update effective affinity - after this gic_irq_lock_cluster() will
+ * begin operating on the new cluster.
+ */
irq_data_update_effective_affinity(d, cpumask_of(cpu));
+
+ /*
+ * If we're moving affinity between clusters, configure the interrupt
+ * trigger type in the new cluster.
+ */
+ if (cl != old_cl)
+ gic_set_type(d, irqd_get_trigger_type(d));
+
+ /* Route the interrupt to its new VP(E) */
+ if (gic_irq_lock_cluster(d)) {
+ write_gic_redir_map_pin(irq,
+ GIC_MAP_PIN_MAP_TO_PIN | gic_cpu_pin);
+ write_gic_redir_map_vp(irq, BIT(mips_cm_vp_id(cpu)));
+
+ /* Update the pcpu_masks */
+ gic_clear_pcpu_masks(irq);
+ if (read_gic_redir_mask(irq))
+ set_bit(irq, per_cpu_ptr(pcpu_masks, cpu));
+
+ mips_cm_unlock_other();
+ } else {
+ write_gic_map_pin(irq, GIC_MAP_PIN_MAP_TO_PIN | gic_cpu_pin);
+ write_gic_map_vp(irq, BIT(mips_cm_vp_id(cpu)));
+
+ /* Update the pcpu_masks */
+ gic_clear_pcpu_masks(irq);
+ if (read_gic_mask(irq))
+ set_bit(irq, per_cpu_ptr(pcpu_masks, cpu));
+ }
+
raw_spin_unlock_irqrestore(&gic_lock, flags);
return IRQ_SET_MASK_OK;
@@ -350,37 +506,33 @@ static struct irq_chip gic_local_irq_controller = {
static void gic_mask_local_irq_all_vpes(struct irq_data *d)
{
struct gic_all_vpes_chip_data *cd;
- unsigned long flags;
int intr, cpu;
+ if (!mips_cps_multicluster_cpus())
+ return;
+
intr = GIC_HWIRQ_TO_LOCAL(d->hwirq);
cd = irq_data_get_irq_chip_data(d);
cd->mask = false;
- raw_spin_lock_irqsave(&gic_lock, flags);
- for_each_online_cpu(cpu) {
- write_gic_vl_other(mips_cm_vp_id(cpu));
+ for_each_online_cpu_gic(cpu, &gic_lock)
write_gic_vo_rmask(BIT(intr));
- }
- raw_spin_unlock_irqrestore(&gic_lock, flags);
}
static void gic_unmask_local_irq_all_vpes(struct irq_data *d)
{
struct gic_all_vpes_chip_data *cd;
- unsigned long flags;
int intr, cpu;
+ if (!mips_cps_multicluster_cpus())
+ return;
+
intr = GIC_HWIRQ_TO_LOCAL(d->hwirq);
cd = irq_data_get_irq_chip_data(d);
cd->mask = true;
- raw_spin_lock_irqsave(&gic_lock, flags);
- for_each_online_cpu(cpu) {
- write_gic_vl_other(mips_cm_vp_id(cpu));
+ for_each_online_cpu_gic(cpu, &gic_lock)
write_gic_vo_smask(BIT(intr));
- }
- raw_spin_unlock_irqrestore(&gic_lock, flags);
}
static void gic_all_vpes_irq_cpu_online(void)
@@ -436,11 +588,21 @@ static int gic_shared_irq_domain_map(struct irq_domain *d, unsigned int virq,
unsigned long flags;
data = irq_get_irq_data(virq);
+ irq_data_update_effective_affinity(data, cpumask_of(cpu));
raw_spin_lock_irqsave(&gic_lock, flags);
- write_gic_map_pin(intr, GIC_MAP_PIN_MAP_TO_PIN | gic_cpu_pin);
- write_gic_map_vp(intr, BIT(mips_cm_vp_id(cpu)));
- irq_data_update_effective_affinity(data, cpumask_of(cpu));
+
+ /* Route the interrupt to its VP(E) */
+ if (gic_irq_lock_cluster(data)) {
+ write_gic_redir_map_pin(intr,
+ GIC_MAP_PIN_MAP_TO_PIN | gic_cpu_pin);
+ write_gic_redir_map_vp(intr, BIT(mips_cm_vp_id(cpu)));
+ mips_cm_unlock_other();
+ } else {
+ write_gic_map_pin(intr, GIC_MAP_PIN_MAP_TO_PIN | gic_cpu_pin);
+ write_gic_map_vp(intr, BIT(mips_cm_vp_id(cpu)));
+ }
+
raw_spin_unlock_irqrestore(&gic_lock, flags);
return 0;
@@ -469,7 +631,6 @@ static int gic_irq_domain_map(struct irq_domain *d, unsigned int virq,
irq_hw_number_t hwirq)
{
struct gic_all_vpes_chip_data *cd;
- unsigned long flags;
unsigned int intr;
int err, cpu;
u32 map;
@@ -533,12 +694,10 @@ static int gic_irq_domain_map(struct irq_domain *d, unsigned int virq,
if (!gic_local_irq_is_routable(intr))
return -EPERM;
- raw_spin_lock_irqsave(&gic_lock, flags);
- for_each_online_cpu(cpu) {
- write_gic_vl_other(mips_cm_vp_id(cpu));
- write_gic_vo_map(mips_gic_vx_map_reg(intr), map);
+ if (mips_cps_multicluster_cpus()) {
+ for_each_online_cpu_gic(cpu, &gic_lock)
+ write_gic_vo_map(mips_gic_vx_map_reg(intr), map);
}
- raw_spin_unlock_irqrestore(&gic_lock, flags);
return 0;
}
@@ -621,6 +780,9 @@ static int gic_ipi_domain_alloc(struct irq_domain *d, unsigned int virq,
if (ret)
goto error;
+ /* Set affinity to cpu. */
+ irq_data_update_effective_affinity(irq_get_irq_data(virq + i),
+ cpumask_of(cpu));
ret = irq_set_irq_type(virq + i, IRQ_TYPE_EDGE_RISING);
if (ret)
goto error;
@@ -734,7 +896,7 @@ static int gic_cpu_startup(unsigned int cpu)
static int __init gic_of_init(struct device_node *node,
struct device_node *parent)
{
- unsigned int cpu_vec, i, gicconfig;
+ unsigned int cpu_vec, i, gicconfig, cl, nclusters;
unsigned long reserved;
phys_addr_t gic_base;
struct resource res;
@@ -815,11 +977,32 @@ static int __init gic_of_init(struct device_node *node,
board_bind_eic_interrupt = &gic_bind_eic_interrupt;
- /* Setup defaults */
- for (i = 0; i < gic_shared_intrs; i++) {
- change_gic_pol(i, GIC_POL_ACTIVE_HIGH);
- change_gic_trig(i, GIC_TRIG_LEVEL);
- write_gic_rmask(i);
+ /*
+ * Initialise each cluster's GIC shared registers to sane default
+ * values.
+ * Otherwise, the IPI set up will be erased if we move code
+ * to gic_cpu_startup for each cpu.
+ */
+ nclusters = mips_cps_numclusters();
+ for (cl = 0; cl < nclusters; cl++) {
+ if (cl == cpu_cluster(&current_cpu_data)) {
+ for (i = 0; i < gic_shared_intrs; i++) {
+ change_gic_pol(i, GIC_POL_ACTIVE_HIGH);
+ change_gic_trig(i, GIC_TRIG_LEVEL);
+ write_gic_rmask(i);
+ }
+ } else if (mips_cps_numcores(cl) != 0) {
+ mips_cm_lock_other(cl, 0, 0, CM_GCR_Cx_OTHER_BLOCK_GLOBAL);
+ for (i = 0; i < gic_shared_intrs; i++) {
+ change_gic_redir_pol(i, GIC_POL_ACTIVE_HIGH);
+ change_gic_redir_trig(i, GIC_TRIG_LEVEL);
+ write_gic_redir_rmask(i);
+ }
+ mips_cm_unlock_other();
+
+ } else {
+ pr_warn("No CPU cores on the cluster %d skip it\n", cl);
+ }
}
return cpuhp_setup_state(CPUHP_AP_IRQ_MIPS_GIC_STARTING,
diff --git a/drivers/irqchip/irq-mscc-ocelot.c b/drivers/irqchip/irq-mscc-ocelot.c
index 4d0c3532dbe7..3dc745b14caf 100644
--- a/drivers/irqchip/irq-mscc-ocelot.c
+++ b/drivers/irqchip/irq-mscc-ocelot.c
@@ -37,7 +37,7 @@ static struct chip_props ocelot_props = {
.reg_off_ena_clr = 0x1c,
.reg_off_ena_set = 0x20,
.reg_off_ident = 0x38,
- .reg_off_trigger = 0x5c,
+ .reg_off_trigger = 0x4,
.n_irq = 24,
};
@@ -70,7 +70,7 @@ static struct chip_props jaguar2_props = {
.reg_off_ena_clr = 0x1c,
.reg_off_ena_set = 0x20,
.reg_off_ident = 0x38,
- .reg_off_trigger = 0x5c,
+ .reg_off_trigger = 0x4,
.n_irq = 29,
};
@@ -84,6 +84,12 @@ static void ocelot_irq_unmask(struct irq_data *data)
u32 val;
irq_gc_lock(gc);
+ /*
+ * Clear sticky bits for edge mode interrupts.
+ * Serval has only one trigger register replication, but the adjacent
+ * register is always read as zero, so there's no need to handle this
+ * case separately.
+ */
val = irq_reg_readl(gc, ICPU_CFG_INTR_INTR_TRIGGER(p, 0)) |
irq_reg_readl(gc, ICPU_CFG_INTR_INTR_TRIGGER(p, 1));
if (!(val & mask))
diff --git a/drivers/irqchip/irq-msi-lib.c b/drivers/irqchip/irq-msi-lib.c
new file mode 100644
index 000000000000..d8e29fc0d406
--- /dev/null
+++ b/drivers/irqchip/irq-msi-lib.c
@@ -0,0 +1,143 @@
+// SPDX-License-Identifier: GPL-2.0-only
+// Copyright (C) 2022 Linutronix GmbH
+// Copyright (C) 2022 Intel
+
+#include <linux/export.h>
+
+#include "irq-msi-lib.h"
+
+/**
+ * msi_lib_init_dev_msi_info - Domain info setup for MSI domains
+ * @dev: The device for which the domain is created for
+ * @domain: The domain providing this callback
+ * @real_parent: The real parent domain of the domain to be initialized
+ * which might be a domain built on top of @domain or
+ * @domain itself
+ * @info: The domain info for the domain to be initialize
+ *
+ * This function is to be used for all types of MSI domains above the root
+ * parent domain and any intermediates. The topmost parent domain specific
+ * functionality is determined via @real_parent.
+ *
+ * All intermediate domains between the root and the device domain must
+ * have either msi_parent_ops.init_dev_msi_info = msi_parent_init_dev_msi_info
+ * or invoke it down the line.
+ */
+bool msi_lib_init_dev_msi_info(struct device *dev, struct irq_domain *domain,
+ struct irq_domain *real_parent,
+ struct msi_domain_info *info)
+{
+ const struct msi_parent_ops *pops = real_parent->msi_parent_ops;
+ u32 required_flags;
+
+ /* Parent ops available? */
+ if (WARN_ON_ONCE(!pops))
+ return false;
+
+ /*
+ * MSI parent domain specific settings. For now there is only the
+ * root parent domain, e.g. NEXUS, acting as a MSI parent, but it is
+ * possible to stack MSI parents. See x86 vector -> irq remapping
+ */
+ if (domain->bus_token == pops->bus_select_token) {
+ if (WARN_ON_ONCE(domain != real_parent))
+ return false;
+ } else {
+ WARN_ON_ONCE(1);
+ return false;
+ }
+
+ required_flags = pops->required_flags;
+
+ /* Is the target domain bus token supported? */
+ switch(info->bus_token) {
+ case DOMAIN_BUS_PCI_DEVICE_MSI:
+ case DOMAIN_BUS_PCI_DEVICE_MSIX:
+ if (WARN_ON_ONCE(!IS_ENABLED(CONFIG_PCI_MSI)))
+ return false;
+
+ break;
+ case DOMAIN_BUS_DEVICE_MSI:
+ /*
+ * Per device MSI should never have any MSI feature bits
+ * set. It's sole purpose is to create a dumb interrupt
+ * chip which has a device specific irq_write_msi_msg()
+ * callback.
+ */
+ if (WARN_ON_ONCE(info->flags))
+ return false;
+
+ /* Core managed MSI descriptors */
+ info->flags = MSI_FLAG_ALLOC_SIMPLE_MSI_DESCS | MSI_FLAG_FREE_MSI_DESCS;
+ fallthrough;
+ case DOMAIN_BUS_WIRED_TO_MSI:
+ /* Remove PCI specific flags */
+ required_flags &= ~MSI_FLAG_PCI_MSI_MASK_PARENT;
+ break;
+ default:
+ /*
+ * This should never be reached. See
+ * msi_lib_irq_domain_select()
+ */
+ WARN_ON_ONCE(1);
+ return false;
+ }
+
+ /*
+ * Mask out the domain specific MSI feature flags which are not
+ * supported by the real parent.
+ */
+ info->flags &= pops->supported_flags;
+ /* Enforce the required flags */
+ info->flags |= required_flags;
+
+ /* Chip updates for all child bus types */
+ if (!info->chip->irq_eoi)
+ info->chip->irq_eoi = irq_chip_eoi_parent;
+ if (!info->chip->irq_ack)
+ info->chip->irq_ack = irq_chip_ack_parent;
+
+ /*
+ * The device MSI domain can never have a set affinity callback. It
+ * always has to rely on the parent domain to handle affinity
+ * settings. The device MSI domain just has to write the resulting
+ * MSI message into the hardware which is the whole purpose of the
+ * device MSI domain aside of mask/unmask which is provided e.g. by
+ * PCI/MSI device domains.
+ */
+ info->chip->irq_set_affinity = msi_domain_set_affinity;
+ return true;
+}
+EXPORT_SYMBOL_GPL(msi_lib_init_dev_msi_info);
+
+/**
+ * msi_lib_irq_domain_select - Shared select function for NEXUS domains
+ * @d: Pointer to the irq domain on which select is invoked
+ * @fwspec: Firmware spec describing what is searched
+ * @bus_token: The bus token for which a matching irq domain is looked up
+ *
+ * Returns: %0 if @d is not what is being looked for
+ *
+ * %1 if @d is either the domain which is directly searched for or
+ * if @d is providing the parent MSI domain for the functionality
+ * requested with @bus_token.
+ */
+int msi_lib_irq_domain_select(struct irq_domain *d, struct irq_fwspec *fwspec,
+ enum irq_domain_bus_token bus_token)
+{
+ const struct msi_parent_ops *ops = d->msi_parent_ops;
+ u32 busmask = BIT(bus_token);
+
+ if (!ops)
+ return 0;
+
+ if (fwspec->fwnode != d->fwnode || fwspec->param_count != 0)
+ return 0;
+
+ /* Handle pure domain searches */
+ if (bus_token == ops->bus_select_token)
+ return 1;
+
+ return !!(ops->bus_select_mask & busmask);
+}
+EXPORT_SYMBOL_GPL(msi_lib_irq_domain_select);
diff --git a/drivers/irqchip/irq-msi-lib.h b/drivers/irqchip/irq-msi-lib.h
new file mode 100644
index 000000000000..681ceabb7bc7
--- /dev/null
+++ b/drivers/irqchip/irq-msi-lib.h
@@ -0,0 +1,27 @@
+// SPDX-License-Identifier: GPL-2.0-only
+// Copyright (C) 2022 Linutronix GmbH
+// Copyright (C) 2022 Intel
+
+#ifndef _DRIVERS_IRQCHIP_IRQ_MSI_LIB_H
+#define _DRIVERS_IRQCHIP_IRQ_MSI_LIB_H
+
+#include <linux/bits.h>
+#include <linux/irqdomain.h>
+#include <linux/msi.h>
+
+#ifdef CONFIG_PCI_MSI
+#define MATCH_PCI_MSI BIT(DOMAIN_BUS_PCI_MSI)
+#else
+#define MATCH_PCI_MSI (0)
+#endif
+
+#define MATCH_PLATFORM_MSI BIT(DOMAIN_BUS_PLATFORM_MSI)
+
+int msi_lib_irq_domain_select(struct irq_domain *d, struct irq_fwspec *fwspec,
+ enum irq_domain_bus_token bus_token);
+
+bool msi_lib_init_dev_msi_info(struct device *dev, struct irq_domain *domain,
+ struct irq_domain *real_parent,
+ struct msi_domain_info *info);
+
+#endif /* _DRIVERS_IRQCHIP_IRQ_MSI_LIB_H */
diff --git a/drivers/irqchip/irq-mvebu-gicp.c b/drivers/irqchip/irq-mvebu-gicp.c
index c43a345061d5..2b6183919ea4 100644
--- a/drivers/irqchip/irq-mvebu-gicp.c
+++ b/drivers/irqchip/irq-mvebu-gicp.c
@@ -17,6 +17,8 @@
#include <linux/of_platform.h>
#include <linux/platform_device.h>
+#include "irq-msi-lib.h"
+
#include <dt-bindings/interrupt-controller/arm-gic.h>
#define GICP_SETSPI_NSR_OFFSET 0x0
@@ -145,32 +147,32 @@ static void gicp_irq_domain_free(struct irq_domain *domain,
}
static const struct irq_domain_ops gicp_domain_ops = {
+ .select = msi_lib_irq_domain_select,
.alloc = gicp_irq_domain_alloc,
.free = gicp_irq_domain_free,
};
-static struct irq_chip gicp_msi_irq_chip = {
- .name = "GICP",
- .irq_set_type = irq_chip_set_type_parent,
- .flags = IRQCHIP_SUPPORTS_LEVEL_MSI,
-};
+#define GICP_MSI_FLAGS_REQUIRED (MSI_FLAG_USE_DEF_DOM_OPS | \
+ MSI_FLAG_USE_DEF_CHIP_OPS)
-static struct msi_domain_ops gicp_msi_ops = {
-};
+#define GICP_MSI_FLAGS_SUPPORTED (MSI_GENERIC_FLAGS_MASK | \
+ MSI_FLAG_LEVEL_CAPABLE)
-static struct msi_domain_info gicp_msi_domain_info = {
- .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
- MSI_FLAG_LEVEL_CAPABLE),
- .ops = &gicp_msi_ops,
- .chip = &gicp_msi_irq_chip,
+static const struct msi_parent_ops gicp_msi_parent_ops = {
+ .supported_flags = GICP_MSI_FLAGS_SUPPORTED,
+ .required_flags = GICP_MSI_FLAGS_REQUIRED,
+ .bus_select_token = DOMAIN_BUS_GENERIC_MSI,
+ .bus_select_mask = MATCH_PLATFORM_MSI,
+ .prefix = "GICP-",
+ .init_dev_msi_info = msi_lib_init_dev_msi_info,
};
static int mvebu_gicp_probe(struct platform_device *pdev)
{
- struct mvebu_gicp *gicp;
- struct irq_domain *inner_domain, *plat_domain, *parent_domain;
+ struct irq_domain *inner_domain, *parent_domain;
struct device_node *node = pdev->dev.of_node;
struct device_node *irq_parent_dn;
+ struct mvebu_gicp *gicp;
int ret, i;
gicp = devm_kzalloc(&pdev->dev, sizeof(*gicp), GFP_KERNEL);
@@ -234,17 +236,9 @@ static int mvebu_gicp_probe(struct platform_device *pdev)
if (!inner_domain)
return -ENOMEM;
-
- plat_domain = platform_msi_create_irq_domain(of_node_to_fwnode(node),
- &gicp_msi_domain_info,
- inner_domain);
- if (!plat_domain) {
- irq_domain_remove(inner_domain);
- return -ENOMEM;
- }
-
- platform_set_drvdata(pdev, gicp);
-
+ irq_domain_update_bus_token(inner_domain, DOMAIN_BUS_GENERIC_MSI);
+ inner_domain->flags |= IRQ_DOMAIN_FLAG_MSI_PARENT;
+ inner_domain->msi_parent_ops = &gicp_msi_parent_ops;
return 0;
}
diff --git a/drivers/irqchip/irq-mvebu-icu.c b/drivers/irqchip/irq-mvebu-icu.c
index 3c77acc7ec6a..4eebed39880a 100644
--- a/drivers/irqchip/irq-mvebu-icu.c
+++ b/drivers/irqchip/irq-mvebu-icu.c
@@ -20,6 +20,8 @@
#include <linux/of_platform.h>
#include <linux/platform_device.h>
+#include "irq-msi-lib.h"
+
#include <dt-bindings/interrupt-controller/mvebu-icu.h>
/* ICU registers */
@@ -60,99 +62,14 @@ struct mvebu_icu_msi_data {
const struct mvebu_icu_subset_data *subset_data;
};
-struct mvebu_icu_irq_data {
- struct mvebu_icu *icu;
- unsigned int icu_group;
- unsigned int type;
-};
-
static DEFINE_STATIC_KEY_FALSE(legacy_bindings);
-static void mvebu_icu_init(struct mvebu_icu *icu,
- struct mvebu_icu_msi_data *msi_data,
- struct msi_msg *msg)
-{
- const struct mvebu_icu_subset_data *subset = msi_data->subset_data;
-
- if (atomic_cmpxchg(&msi_data->initialized, false, true))
- return;
-
- /* Set 'SET' ICU SPI message address in AP */
- writel_relaxed(msg[0].address_hi, icu->base + subset->offset_set_ah);
- writel_relaxed(msg[0].address_lo, icu->base + subset->offset_set_al);
-
- if (subset->icu_group != ICU_GRP_NSR)
- return;
-
- /* Set 'CLEAR' ICU SPI message address in AP (level-MSI only) */
- writel_relaxed(msg[1].address_hi, icu->base + subset->offset_clr_ah);
- writel_relaxed(msg[1].address_lo, icu->base + subset->offset_clr_al);
-}
-
-static void mvebu_icu_write_msg(struct msi_desc *desc, struct msi_msg *msg)
-{
- struct irq_data *d = irq_get_irq_data(desc->irq);
- struct mvebu_icu_msi_data *msi_data = platform_msi_get_host_data(d->domain);
- struct mvebu_icu_irq_data *icu_irqd = d->chip_data;
- struct mvebu_icu *icu = icu_irqd->icu;
- unsigned int icu_int;
-
- if (msg->address_lo || msg->address_hi) {
- /* One off initialization per domain */
- mvebu_icu_init(icu, msi_data, msg);
- /* Configure the ICU with irq number & type */
- icu_int = msg->data | ICU_INT_ENABLE;
- if (icu_irqd->type & IRQ_TYPE_EDGE_RISING)
- icu_int |= ICU_IS_EDGE;
- icu_int |= icu_irqd->icu_group << ICU_GROUP_SHIFT;
- } else {
- /* De-configure the ICU */
- icu_int = 0;
- }
-
- writel_relaxed(icu_int, icu->base + ICU_INT_CFG(d->hwirq));
-
- /*
- * The SATA unit has 2 ports, and a dedicated ICU entry per
- * port. The ahci sata driver supports only one irq interrupt
- * per SATA unit. To solve this conflict, we configure the 2
- * SATA wired interrupts in the south bridge into 1 GIC
- * interrupt in the north bridge. Even if only a single port
- * is enabled, if sata node is enabled, both interrupts are
- * configured (regardless of which port is actually in use).
- */
- if (d->hwirq == ICU_SATA0_ICU_ID || d->hwirq == ICU_SATA1_ICU_ID) {
- writel_relaxed(icu_int,
- icu->base + ICU_INT_CFG(ICU_SATA0_ICU_ID));
- writel_relaxed(icu_int,
- icu->base + ICU_INT_CFG(ICU_SATA1_ICU_ID));
- }
-}
-
-static struct irq_chip mvebu_icu_nsr_chip = {
- .name = "ICU-NSR",
- .irq_mask = irq_chip_mask_parent,
- .irq_unmask = irq_chip_unmask_parent,
- .irq_eoi = irq_chip_eoi_parent,
- .irq_set_type = irq_chip_set_type_parent,
- .irq_set_affinity = irq_chip_set_affinity_parent,
-};
-
-static struct irq_chip mvebu_icu_sei_chip = {
- .name = "ICU-SEI",
- .irq_ack = irq_chip_ack_parent,
- .irq_mask = irq_chip_mask_parent,
- .irq_unmask = irq_chip_unmask_parent,
- .irq_set_type = irq_chip_set_type_parent,
- .irq_set_affinity = irq_chip_set_affinity_parent,
-};
-
-static int
-mvebu_icu_irq_domain_translate(struct irq_domain *d, struct irq_fwspec *fwspec,
+static int mvebu_icu_translate(struct irq_domain *d, struct irq_fwspec *fwspec,
unsigned long *hwirq, unsigned int *type)
{
unsigned int param_count = static_branch_unlikely(&legacy_bindings) ? 3 : 2;
- struct mvebu_icu_msi_data *msi_data = platform_msi_get_host_data(d);
+ struct msi_domain_info *info = d->host_data;
+ struct mvebu_icu_msi_data *msi_data = info->chip_data;
struct mvebu_icu *icu = msi_data->icu;
/* Check the count of the parameters in dt */
@@ -192,81 +109,126 @@ mvebu_icu_irq_domain_translate(struct irq_domain *d, struct irq_fwspec *fwspec,
return 0;
}
-static int
-mvebu_icu_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
- unsigned int nr_irqs, void *args)
+static void mvebu_icu_init(struct mvebu_icu *icu,
+ struct mvebu_icu_msi_data *msi_data,
+ struct msi_msg *msg)
{
- int err;
- unsigned long hwirq;
- struct irq_fwspec *fwspec = args;
- struct mvebu_icu_msi_data *msi_data = platform_msi_get_host_data(domain);
- struct mvebu_icu *icu = msi_data->icu;
- struct mvebu_icu_irq_data *icu_irqd;
- struct irq_chip *chip = &mvebu_icu_nsr_chip;
+ const struct mvebu_icu_subset_data *subset = msi_data->subset_data;
- icu_irqd = kmalloc(sizeof(*icu_irqd), GFP_KERNEL);
- if (!icu_irqd)
- return -ENOMEM;
+ if (atomic_cmpxchg(&msi_data->initialized, false, true))
+ return;
- err = mvebu_icu_irq_domain_translate(domain, fwspec, &hwirq,
- &icu_irqd->type);
- if (err) {
- dev_err(icu->dev, "failed to translate ICU parameters\n");
- goto free_irqd;
- }
+ /* Set 'SET' ICU SPI message address in AP */
+ writel_relaxed(msg[0].address_hi, icu->base + subset->offset_set_ah);
+ writel_relaxed(msg[0].address_lo, icu->base + subset->offset_set_al);
- if (static_branch_unlikely(&legacy_bindings))
- icu_irqd->icu_group = fwspec->param[0];
- else
- icu_irqd->icu_group = msi_data->subset_data->icu_group;
- icu_irqd->icu = icu;
+ if (subset->icu_group != ICU_GRP_NSR)
+ return;
- err = platform_msi_device_domain_alloc(domain, virq, nr_irqs);
- if (err) {
- dev_err(icu->dev, "failed to allocate ICU interrupt in parent domain\n");
- goto free_irqd;
- }
+ /* Set 'CLEAR' ICU SPI message address in AP (level-MSI only) */
+ writel_relaxed(msg[1].address_hi, icu->base + subset->offset_clr_ah);
+ writel_relaxed(msg[1].address_lo, icu->base + subset->offset_clr_al);
+}
- /* Make sure there is no interrupt left pending by the firmware */
- err = irq_set_irqchip_state(virq, IRQCHIP_STATE_PENDING, false);
- if (err)
- goto free_msi;
+static int mvebu_icu_msi_init(struct irq_domain *domain, struct msi_domain_info *info,
+ unsigned int virq, irq_hw_number_t hwirq, msi_alloc_info_t *arg)
+{
+ irq_domain_set_hwirq_and_chip(domain, virq, hwirq, info->chip, info->chip_data);
+ return irq_set_irqchip_state(virq, IRQCHIP_STATE_PENDING, false);
+}
- if (icu_irqd->icu_group == ICU_GRP_SEI)
- chip = &mvebu_icu_sei_chip;
+static void mvebu_icu_set_desc(msi_alloc_info_t *arg, struct msi_desc *desc)
+{
+ arg->desc = desc;
+ arg->hwirq = (u32)desc->data.icookie.value;
+}
- err = irq_domain_set_hwirq_and_chip(domain, virq, hwirq,
- chip, icu_irqd);
- if (err) {
- dev_err(icu->dev, "failed to set the data to IRQ domain\n");
- goto free_msi;
+static void mvebu_icu_write_msi_msg(struct irq_data *d, struct msi_msg *msg)
+{
+ struct mvebu_icu_msi_data *msi_data = d->chip_data;
+ unsigned int icu_group = msi_data->subset_data->icu_group;
+ struct msi_desc *desc = irq_data_get_msi_desc(d);
+ struct mvebu_icu *icu = msi_data->icu;
+ unsigned int type;
+ u32 icu_int;
+
+ if (msg->address_lo || msg->address_hi) {
+ /* One off initialization per domain */
+ mvebu_icu_init(icu, msi_data, msg);
+ /* Configure the ICU with irq number & type */
+ icu_int = msg->data | ICU_INT_ENABLE;
+ type = (unsigned int)(desc->data.icookie.value >> 32);
+ if (type & IRQ_TYPE_EDGE_RISING)
+ icu_int |= ICU_IS_EDGE;
+ icu_int |= icu_group << ICU_GROUP_SHIFT;
+ } else {
+ /* De-configure the ICU */
+ icu_int = 0;
}
- return 0;
+ writel_relaxed(icu_int, icu->base + ICU_INT_CFG(d->hwirq));
-free_msi:
- platform_msi_device_domain_free(domain, virq, nr_irqs);
-free_irqd:
- kfree(icu_irqd);
- return err;
+ /*
+ * The SATA unit has 2 ports, and a dedicated ICU entry per
+ * port. The ahci sata driver supports only one irq interrupt
+ * per SATA unit. To solve this conflict, we configure the 2
+ * SATA wired interrupts in the south bridge into 1 GIC
+ * interrupt in the north bridge. Even if only a single port
+ * is enabled, if sata node is enabled, both interrupts are
+ * configured (regardless of which port is actually in use).
+ */
+ if (d->hwirq == ICU_SATA0_ICU_ID || d->hwirq == ICU_SATA1_ICU_ID) {
+ writel_relaxed(icu_int, icu->base + ICU_INT_CFG(ICU_SATA0_ICU_ID));
+ writel_relaxed(icu_int, icu->base + ICU_INT_CFG(ICU_SATA1_ICU_ID));
+ }
}
-static void
-mvebu_icu_irq_domain_free(struct irq_domain *domain, unsigned int virq,
- unsigned int nr_irqs)
-{
- struct irq_data *d = irq_get_irq_data(virq);
- struct mvebu_icu_irq_data *icu_irqd = d->chip_data;
+static const struct msi_domain_template mvebu_icu_nsr_msi_template = {
+ .chip = {
+ .name = "ICU-NSR",
+ .irq_mask = irq_chip_mask_parent,
+ .irq_unmask = irq_chip_unmask_parent,
+ .irq_eoi = irq_chip_eoi_parent,
+ .irq_set_type = irq_chip_set_type_parent,
+ .irq_write_msi_msg = mvebu_icu_write_msi_msg,
+ .flags = IRQCHIP_SUPPORTS_LEVEL_MSI,
+ },
- kfree(icu_irqd);
+ .ops = {
+ .msi_translate = mvebu_icu_translate,
+ .msi_init = mvebu_icu_msi_init,
+ .set_desc = mvebu_icu_set_desc,
+ },
- platform_msi_device_domain_free(domain, virq, nr_irqs);
-}
+ .info = {
+ .bus_token = DOMAIN_BUS_WIRED_TO_MSI,
+ .flags = MSI_FLAG_LEVEL_CAPABLE |
+ MSI_FLAG_USE_DEV_FWNODE,
+ },
+};
+
+static const struct msi_domain_template mvebu_icu_sei_msi_template = {
+ .chip = {
+ .name = "ICU-SEI",
+ .irq_mask = irq_chip_mask_parent,
+ .irq_unmask = irq_chip_unmask_parent,
+ .irq_ack = irq_chip_ack_parent,
+ .irq_set_type = irq_chip_set_type_parent,
+ .irq_write_msi_msg = mvebu_icu_write_msi_msg,
+ .flags = IRQCHIP_SUPPORTS_LEVEL_MSI,
+ },
-static const struct irq_domain_ops mvebu_icu_domain_ops = {
- .translate = mvebu_icu_irq_domain_translate,
- .alloc = mvebu_icu_irq_domain_alloc,
- .free = mvebu_icu_irq_domain_free,
+ .ops = {
+ .msi_translate = mvebu_icu_translate,
+ .msi_init = mvebu_icu_msi_init,
+ .set_desc = mvebu_icu_set_desc,
+ },
+
+ .info = {
+ .bus_token = DOMAIN_BUS_WIRED_TO_MSI,
+ .flags = MSI_FLAG_LEVEL_CAPABLE |
+ MSI_FLAG_USE_DEV_FWNODE,
+ },
};
static const struct mvebu_icu_subset_data mvebu_icu_nsr_subset_data = {
@@ -297,10 +259,10 @@ static const struct of_device_id mvebu_icu_subset_of_match[] = {
static int mvebu_icu_subset_probe(struct platform_device *pdev)
{
+ const struct msi_domain_template *tmpl;
struct mvebu_icu_msi_data *msi_data;
- struct device_node *msi_parent_dn;
struct device *dev = &pdev->dev;
- struct irq_domain *irq_domain;
+ bool sei;
msi_data = devm_kzalloc(dev, sizeof(*msi_data), GFP_KERNEL);
if (!msi_data)
@@ -314,20 +276,18 @@ static int mvebu_icu_subset_probe(struct platform_device *pdev)
msi_data->subset_data = of_device_get_match_data(dev);
}
- dev->msi.domain = of_msi_get_domain(dev, dev->of_node,
- DOMAIN_BUS_PLATFORM_MSI);
+ dev->msi.domain = of_msi_get_domain(dev, dev->of_node, DOMAIN_BUS_PLATFORM_MSI);
if (!dev->msi.domain)
return -EPROBE_DEFER;
- msi_parent_dn = irq_domain_get_of_node(dev->msi.domain);
- if (!msi_parent_dn)
+ if (!irq_domain_get_of_node(dev->msi.domain))
return -ENODEV;
- irq_domain = platform_msi_create_device_tree_domain(dev, ICU_MAX_IRQS,
- mvebu_icu_write_msg,
- &mvebu_icu_domain_ops,
- msi_data);
- if (!irq_domain) {
+ sei = msi_data->subset_data->icu_group == ICU_GRP_SEI;
+ tmpl = sei ? &mvebu_icu_sei_msi_template : &mvebu_icu_nsr_msi_template;
+
+ if (!msi_create_device_irq_domain(dev, MSI_DEFAULT_DOMAIN, tmpl,
+ ICU_MAX_IRQS, NULL, msi_data)) {
dev_err(dev, "Failed to create ICU MSI domain\n");
return -ENOMEM;
}
diff --git a/drivers/irqchip/irq-mvebu-odmi.c b/drivers/irqchip/irq-mvebu-odmi.c
index 108091533e10..ff19bfd258dc 100644
--- a/drivers/irqchip/irq-mvebu-odmi.c
+++ b/drivers/irqchip/irq-mvebu-odmi.c
@@ -17,6 +17,9 @@
#include <linux/msi.h>
#include <linux/of_address.h>
#include <linux/slab.h>
+
+#include "irq-msi-lib.h"
+
#include <dt-bindings/interrupt-controller/arm-gic.h>
#define GICP_ODMIN_SET 0x40
@@ -141,27 +144,29 @@ static void odmi_irq_domain_free(struct irq_domain *domain,
}
static const struct irq_domain_ops odmi_domain_ops = {
+ .select = msi_lib_irq_domain_select,
.alloc = odmi_irq_domain_alloc,
.free = odmi_irq_domain_free,
};
-static struct irq_chip odmi_msi_irq_chip = {
- .name = "ODMI",
-};
+#define ODMI_MSI_FLAGS_REQUIRED (MSI_FLAG_USE_DEF_DOM_OPS | \
+ MSI_FLAG_USE_DEF_CHIP_OPS)
-static struct msi_domain_ops odmi_msi_ops = {
-};
+#define ODMI_MSI_FLAGS_SUPPORTED (MSI_GENERIC_FLAGS_MASK)
-static struct msi_domain_info odmi_msi_domain_info = {
- .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS),
- .ops = &odmi_msi_ops,
- .chip = &odmi_msi_irq_chip,
+static const struct msi_parent_ops odmi_msi_parent_ops = {
+ .supported_flags = ODMI_MSI_FLAGS_SUPPORTED,
+ .required_flags = ODMI_MSI_FLAGS_REQUIRED,
+ .bus_select_token = DOMAIN_BUS_GENERIC_MSI,
+ .bus_select_mask = MATCH_PLATFORM_MSI,
+ .prefix = "ODMI-",
+ .init_dev_msi_info = msi_lib_init_dev_msi_info,
};
static int __init mvebu_odmi_init(struct device_node *node,
struct device_node *parent)
{
- struct irq_domain *parent_domain, *inner_domain, *plat_domain;
+ struct irq_domain *parent_domain, *inner_domain;
int ret, i;
if (of_property_read_u32(node, "marvell,odmi-frames", &odmis_count))
@@ -208,18 +213,12 @@ static int __init mvebu_odmi_init(struct device_node *node,
goto err_unmap;
}
- plat_domain = platform_msi_create_irq_domain(of_node_to_fwnode(node),
- &odmi_msi_domain_info,
- inner_domain);
- if (!plat_domain) {
- ret = -ENOMEM;
- goto err_remove_inner;
- }
+ irq_domain_update_bus_token(inner_domain, DOMAIN_BUS_GENERIC_MSI);
+ inner_domain->flags |= IRQ_DOMAIN_FLAG_MSI_PARENT;
+ inner_domain->msi_parent_ops = &odmi_msi_parent_ops;
return 0;
-err_remove_inner:
- irq_domain_remove(inner_domain);
err_unmap:
for (i = 0; i < odmis_count; i++) {
struct odmi_data *odmi = &odmis[i];
diff --git a/drivers/irqchip/irq-mvebu-pic.c b/drivers/irqchip/irq-mvebu-pic.c
index ef3d3646ccc2..3888b7585981 100644
--- a/drivers/irqchip/irq-mvebu-pic.c
+++ b/drivers/irqchip/irq-mvebu-pic.c
@@ -71,7 +71,7 @@ static void mvebu_pic_print_chip(struct irq_data *d, struct seq_file *p)
{
struct mvebu_pic *pic = irq_data_get_irq_chip_data(d);
- seq_printf(p, dev_name(&pic->pdev->dev));
+ seq_puts(p, dev_name(&pic->pdev->dev));
}
static const struct irq_chip mvebu_pic_chip = {
@@ -167,14 +167,12 @@ static int mvebu_pic_probe(struct platform_device *pdev)
return 0;
}
-static int mvebu_pic_remove(struct platform_device *pdev)
+static void mvebu_pic_remove(struct platform_device *pdev)
{
struct mvebu_pic *pic = platform_get_drvdata(pdev);
on_each_cpu(mvebu_pic_disable_percpu_irq, pic, 1);
irq_domain_remove(pic->domain);
-
- return 0;
}
static const struct of_device_id mvebu_pic_of_match[] = {
@@ -184,17 +182,18 @@ static const struct of_device_id mvebu_pic_of_match[] = {
MODULE_DEVICE_TABLE(of, mvebu_pic_of_match);
static struct platform_driver mvebu_pic_driver = {
- .probe = mvebu_pic_probe,
- .remove = mvebu_pic_remove,
+ .probe = mvebu_pic_probe,
+ .remove = mvebu_pic_remove,
.driver = {
- .name = "mvebu-pic",
- .of_match_table = mvebu_pic_of_match,
+ .name = "mvebu-pic",
+ .of_match_table = mvebu_pic_of_match,
},
};
module_platform_driver(mvebu_pic_driver);
MODULE_AUTHOR("Yehuda Yitschak <yehuday@marvell.com>");
MODULE_AUTHOR("Thomas Petazzoni <thomas.petazzoni@free-electrons.com>");
+MODULE_DESCRIPTION("Marvell Armada 7K/8K PIC driver");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS("platform:mvebu_pic");
diff --git a/drivers/irqchip/irq-mvebu-sei.c b/drivers/irqchip/irq-mvebu-sei.c
index a48dbe91b036..065166ab5dbc 100644
--- a/drivers/irqchip/irq-mvebu-sei.c
+++ b/drivers/irqchip/irq-mvebu-sei.c
@@ -14,6 +14,8 @@
#include <linux/of_irq.h>
#include <linux/of_platform.h>
+#include "irq-msi-lib.h"
+
/* Cause register */
#define GICP_SECR(idx) (0x0 + ((idx) * 0x4))
/* Mask register */
@@ -303,25 +305,11 @@ static void mvebu_sei_cp_domain_free(struct irq_domain *domain,
}
static const struct irq_domain_ops mvebu_sei_cp_domain_ops = {
+ .select = msi_lib_irq_domain_select,
.alloc = mvebu_sei_cp_domain_alloc,
.free = mvebu_sei_cp_domain_free,
};
-static struct irq_chip mvebu_sei_msi_irq_chip = {
- .name = "SEI pMSI",
- .irq_ack = irq_chip_ack_parent,
- .irq_set_type = irq_chip_set_type_parent,
-};
-
-static struct msi_domain_ops mvebu_sei_msi_ops = {
-};
-
-static struct msi_domain_info mvebu_sei_msi_domain_info = {
- .flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS,
- .ops = &mvebu_sei_msi_ops,
- .chip = &mvebu_sei_msi_irq_chip,
-};
-
static void mvebu_sei_handle_cascade_irq(struct irq_desc *desc)
{
struct mvebu_sei *sei = irq_desc_get_handler_data(desc);
@@ -360,10 +348,23 @@ static void mvebu_sei_reset(struct mvebu_sei *sei)
}
}
+#define SEI_MSI_FLAGS_REQUIRED (MSI_FLAG_USE_DEF_DOM_OPS | \
+ MSI_FLAG_USE_DEF_CHIP_OPS)
+
+#define SEI_MSI_FLAGS_SUPPORTED (MSI_GENERIC_FLAGS_MASK)
+
+static const struct msi_parent_ops sei_msi_parent_ops = {
+ .supported_flags = SEI_MSI_FLAGS_SUPPORTED,
+ .required_flags = SEI_MSI_FLAGS_REQUIRED,
+ .bus_select_mask = MATCH_PLATFORM_MSI,
+ .bus_select_token = DOMAIN_BUS_GENERIC_MSI,
+ .prefix = "SEI-",
+ .init_dev_msi_info = msi_lib_init_dev_msi_info,
+};
+
static int mvebu_sei_probe(struct platform_device *pdev)
{
struct device_node *node = pdev->dev.of_node;
- struct irq_domain *plat_domain;
struct mvebu_sei *sei;
u32 parent_irq;
int ret;
@@ -440,33 +441,20 @@ static int mvebu_sei_probe(struct platform_device *pdev)
}
irq_domain_update_bus_token(sei->cp_domain, DOMAIN_BUS_GENERIC_MSI);
-
- plat_domain = platform_msi_create_irq_domain(of_node_to_fwnode(node),
- &mvebu_sei_msi_domain_info,
- sei->cp_domain);
- if (!plat_domain) {
- pr_err("Failed to create CPs MSI domain\n");
- ret = -ENOMEM;
- goto remove_cp_domain;
- }
+ sei->cp_domain->flags |= IRQ_DOMAIN_FLAG_MSI_PARENT;
+ sei->cp_domain->msi_parent_ops = &sei_msi_parent_ops;
mvebu_sei_reset(sei);
- irq_set_chained_handler_and_data(parent_irq,
- mvebu_sei_handle_cascade_irq,
- sei);
-
+ irq_set_chained_handler_and_data(parent_irq, mvebu_sei_handle_cascade_irq, sei);
return 0;
-remove_cp_domain:
- irq_domain_remove(sei->cp_domain);
remove_ap_domain:
irq_domain_remove(sei->ap_domain);
remove_sei_domain:
irq_domain_remove(sei->sei_domain);
dispose_irq:
irq_dispose_mapping(parent_irq);
-
return ret;
}
diff --git a/drivers/irqchip/irq-mxs.c b/drivers/irqchip/irq-mxs.c
index be9680645545..d67b5da38982 100644
--- a/drivers/irqchip/irq-mxs.c
+++ b/drivers/irqchip/irq-mxs.c
@@ -130,7 +130,7 @@ static struct irq_chip asm9260_icoll_chip = {
IRQCHIP_SKIP_SET_WAKE,
};
-asmlinkage void __exception_irq_entry icoll_handle_irq(struct pt_regs *regs)
+static void __exception_irq_entry icoll_handle_irq(struct pt_regs *regs)
{
u32 irqnr;
diff --git a/drivers/irqchip/irq-omap-intc.c b/drivers/irqchip/irq-omap-intc.c
index dc82162ba763..ad84a2f03368 100644
--- a/drivers/irqchip/irq-omap-intc.c
+++ b/drivers/irqchip/irq-omap-intc.c
@@ -325,8 +325,7 @@ static int __init omap_init_irq(u32 base, struct device_node *node)
return ret;
}
-static asmlinkage void __exception_irq_entry
-omap_intc_handle_irq(struct pt_regs *regs)
+static void __exception_irq_entry omap_intc_handle_irq(struct pt_regs *regs)
{
extern unsigned long irq_err_count;
u32 irqnr;
diff --git a/drivers/irqchip/irq-partition-percpu.c b/drivers/irqchip/irq-partition-percpu.c
index 8e76d2913e6b..4441ffe149ea 100644
--- a/drivers/irqchip/irq-partition-percpu.c
+++ b/drivers/irqchip/irq-partition-percpu.c
@@ -98,7 +98,7 @@ static void partition_irq_print_chip(struct irq_data *d, struct seq_file *p)
struct irq_chip *chip = irq_desc_get_chip(part->chained_desc);
struct irq_data *data = irq_desc_get_irq_data(part->chained_desc);
- seq_printf(p, " %5s-%lu", chip->name, data->hwirq);
+ seq_printf(p, "%5s-%lu", chip->name, data->hwirq);
}
static struct irq_chip partition_irq_chip = {
diff --git a/drivers/irqchip/irq-pic32-evic.c b/drivers/irqchip/irq-pic32-evic.c
index 1d9bb28d13e5..eb6ca516a166 100644
--- a/drivers/irqchip/irq-pic32-evic.c
+++ b/drivers/irqchip/irq-pic32-evic.c
@@ -161,9 +161,9 @@ static int pic32_irq_domain_map(struct irq_domain *d, unsigned int virq,
return ret;
}
-int pic32_irq_domain_xlate(struct irq_domain *d, struct device_node *ctrlr,
- const u32 *intspec, unsigned int intsize,
- irq_hw_number_t *out_hwirq, unsigned int *out_type)
+static int pic32_irq_domain_xlate(struct irq_domain *d, struct device_node *ctrlr,
+ const u32 *intspec, unsigned int intsize,
+ irq_hw_number_t *out_hwirq, unsigned int *out_type)
{
struct evic_chip_data *priv = d->host_data;
@@ -190,13 +190,11 @@ static void __init pic32_ext_irq_of_init(struct irq_domain *domain)
{
struct device_node *node = irq_domain_get_of_node(domain);
struct evic_chip_data *priv = domain->host_data;
- struct property *prop;
- const __le32 *p;
u32 hwirq;
int i = 0;
const char *pname = "microchip,external-irqs";
- of_property_for_each_u32(node, pname, prop, p, hwirq) {
+ of_property_for_each_u32(node, pname, hwirq) {
if (i >= ARRAY_SIZE(priv->ext_irqs)) {
pr_warn("More than %d external irq, skip rest\n",
ARRAY_SIZE(priv->ext_irqs));
diff --git a/drivers/irqchip/irq-pruss-intc.c b/drivers/irqchip/irq-pruss-intc.c
index 0f64ecb9b1f4..bee01980b463 100644
--- a/drivers/irqchip/irq-pruss-intc.c
+++ b/drivers/irqchip/irq-pruss-intc.c
@@ -599,7 +599,7 @@ fail_irq:
return ret;
}
-static int pruss_intc_remove(struct platform_device *pdev)
+static void pruss_intc_remove(struct platform_device *pdev)
{
struct pruss_intc *intc = platform_get_drvdata(pdev);
u8 max_system_events = intc->soc_config->num_system_events;
@@ -616,8 +616,6 @@ static int pruss_intc_remove(struct platform_device *pdev)
irq_dispose_mapping(irq_find_mapping(intc->domain, hwirq));
irq_domain_remove(intc->domain);
-
- return 0;
}
static const struct pruss_intc_match_data pruss_intc_data = {
@@ -645,12 +643,12 @@ MODULE_DEVICE_TABLE(of, pruss_intc_of_match);
static struct platform_driver pruss_intc_driver = {
.driver = {
- .name = "pruss-intc",
- .of_match_table = pruss_intc_of_match,
- .suppress_bind_attrs = true,
+ .name = "pruss-intc",
+ .of_match_table = pruss_intc_of_match,
+ .suppress_bind_attrs = true,
},
- .probe = pruss_intc_probe,
- .remove = pruss_intc_remove,
+ .probe = pruss_intc_probe,
+ .remove = pruss_intc_remove,
};
module_platform_driver(pruss_intc_driver);
diff --git a/drivers/irqchip/irq-renesas-intc-irqpin.c b/drivers/irqchip/irq-renesas-intc-irqpin.c
index fa19585f3dee..954419f2460d 100644
--- a/drivers/irqchip/irq-renesas-intc-irqpin.c
+++ b/drivers/irqchip/irq-renesas-intc-irqpin.c
@@ -561,14 +561,13 @@ err0:
return ret;
}
-static int intc_irqpin_remove(struct platform_device *pdev)
+static void intc_irqpin_remove(struct platform_device *pdev)
{
struct intc_irqpin_priv *p = platform_get_drvdata(pdev);
irq_domain_remove(p->irq_domain);
pm_runtime_put(&pdev->dev);
pm_runtime_disable(&pdev->dev);
- return 0;
}
static int __maybe_unused intc_irqpin_suspend(struct device *dev)
@@ -587,9 +586,9 @@ static struct platform_driver intc_irqpin_device_driver = {
.probe = intc_irqpin_probe,
.remove = intc_irqpin_remove,
.driver = {
- .name = "renesas_intc_irqpin",
- .of_match_table = intc_irqpin_dt_ids,
- .pm = &intc_irqpin_pm_ops,
+ .name = "renesas_intc_irqpin",
+ .of_match_table = intc_irqpin_dt_ids,
+ .pm = &intc_irqpin_pm_ops,
}
};
diff --git a/drivers/irqchip/irq-renesas-irqc.c b/drivers/irqchip/irq-renesas-irqc.c
index 49b446b396f9..cbce8ffc7de4 100644
--- a/drivers/irqchip/irq-renesas-irqc.c
+++ b/drivers/irqchip/irq-renesas-irqc.c
@@ -218,14 +218,13 @@ err_runtime_pm_disable:
return ret;
}
-static int irqc_remove(struct platform_device *pdev)
+static void irqc_remove(struct platform_device *pdev)
{
struct irqc_priv *p = platform_get_drvdata(pdev);
irq_domain_remove(p->irq_domain);
pm_runtime_put(&pdev->dev);
pm_runtime_disable(&pdev->dev);
- return 0;
}
static int __maybe_unused irqc_suspend(struct device *dev)
@@ -250,9 +249,9 @@ static struct platform_driver irqc_device_driver = {
.probe = irqc_probe,
.remove = irqc_remove,
.driver = {
- .name = "renesas_irqc",
+ .name = "renesas_irqc",
.of_match_table = irqc_dt_ids,
- .pm = &irqc_pm_ops,
+ .pm = &irqc_pm_ops,
}
};
diff --git a/drivers/irqchip/irq-renesas-rza1.c b/drivers/irqchip/irq-renesas-rza1.c
index e4c99c2e0373..d4e6a68889ec 100644
--- a/drivers/irqchip/irq-renesas-rza1.c
+++ b/drivers/irqchip/irq-renesas-rza1.c
@@ -244,12 +244,11 @@ out_put_node:
return ret;
}
-static int rza1_irqc_remove(struct platform_device *pdev)
+static void rza1_irqc_remove(struct platform_device *pdev)
{
struct rza1_irqc_priv *priv = platform_get_drvdata(pdev);
irq_domain_remove(priv->irq_domain);
- return 0;
}
static const struct of_device_id rza1_irqc_dt_ids[] = {
@@ -262,7 +261,7 @@ static struct platform_driver rza1_irqc_device_driver = {
.probe = rza1_irqc_probe,
.remove = rza1_irqc_remove,
.driver = {
- .name = "renesas_rza1_irqc",
+ .name = "renesas_rza1_irqc",
.of_match_table = rza1_irqc_dt_ids,
}
};
diff --git a/drivers/irqchip/irq-renesas-rzg2l.c b/drivers/irqchip/irq-renesas-rzg2l.c
index 9494fc26259c..99e27e01b0b1 100644
--- a/drivers/irqchip/irq-renesas-rzg2l.c
+++ b/drivers/irqchip/irq-renesas-rzg2l.c
@@ -8,6 +8,7 @@
*/
#include <linux/bitfield.h>
+#include <linux/cleanup.h>
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/io.h>
@@ -37,6 +38,8 @@
#define TSSEL_SHIFT(n) (8 * (n))
#define TSSEL_MASK GENMASK(7, 0)
#define IRQ_MASK 0x3
+#define IMSK 0x10010
+#define TMSK 0x10020
#define TSSR_OFFSET(n) ((n) % 4)
#define TSSR_INDEX(n) ((n) / 4)
@@ -69,12 +72,14 @@ struct rzg2l_irqc_reg_cache {
/**
* struct rzg2l_irqc_priv - IRQ controller private data structure
* @base: Controller's base address
+ * @irqchip: Pointer to struct irq_chip
* @fwspec: IRQ firmware specific data
* @lock: Lock to serialize access to hardware registers
* @cache: Registers cache for suspend/resume
*/
static struct rzg2l_irqc_priv {
void __iomem *base;
+ const struct irq_chip *irqchip;
struct irq_fwspec fwspec[IRQC_NUM_IRQ];
raw_spinlock_t lock;
struct rzg2l_irqc_reg_cache cache;
@@ -85,10 +90,9 @@ static struct rzg2l_irqc_priv *irq_data_to_priv(struct irq_data *data)
return data->domain->host_data;
}
-static void rzg2l_irq_eoi(struct irq_data *d)
+static void rzg2l_clear_irq_int(struct rzg2l_irqc_priv *priv, unsigned int hwirq)
{
- unsigned int hw_irq = irqd_to_hwirq(d) - IRQC_IRQ_START;
- struct rzg2l_irqc_priv *priv = irq_data_to_priv(d);
+ unsigned int hw_irq = hwirq - IRQC_IRQ_START;
u32 bit = BIT(hw_irq);
u32 iitsr, iscr;
@@ -99,20 +103,30 @@ static void rzg2l_irq_eoi(struct irq_data *d)
* ISCR can only be cleared if the type is falling-edge, rising-edge or
* falling/rising-edge.
*/
- if ((iscr & bit) && (iitsr & IITSR_IITSEL_MASK(hw_irq)))
+ if ((iscr & bit) && (iitsr & IITSR_IITSEL_MASK(hw_irq))) {
writel_relaxed(iscr & ~bit, priv->base + ISCR);
+ /*
+ * Enforce that the posted write is flushed to prevent that the
+ * just handled interrupt is raised again.
+ */
+ readl_relaxed(priv->base + ISCR);
+ }
}
-static void rzg2l_tint_eoi(struct irq_data *d)
+static void rzg2l_clear_tint_int(struct rzg2l_irqc_priv *priv, unsigned int hwirq)
{
- unsigned int hw_irq = irqd_to_hwirq(d) - IRQC_TINT_START;
- struct rzg2l_irqc_priv *priv = irq_data_to_priv(d);
- u32 bit = BIT(hw_irq);
+ u32 bit = BIT(hwirq - IRQC_TINT_START);
u32 reg;
reg = readl_relaxed(priv->base + TSCR);
- if (reg & bit)
+ if (reg & bit) {
writel_relaxed(reg & ~bit, priv->base + TSCR);
+ /*
+ * Enforce that the posted write is flushed to prevent that the
+ * just handled interrupt is raised again.
+ */
+ readl_relaxed(priv->base + TSCR);
+ }
}
static void rzg2l_irqc_eoi(struct irq_data *d)
@@ -122,39 +136,123 @@ static void rzg2l_irqc_eoi(struct irq_data *d)
raw_spin_lock(&priv->lock);
if (hw_irq >= IRQC_IRQ_START && hw_irq <= IRQC_IRQ_COUNT)
- rzg2l_irq_eoi(d);
+ rzg2l_clear_irq_int(priv, hw_irq);
else if (hw_irq >= IRQC_TINT_START && hw_irq < IRQC_NUM_IRQ)
- rzg2l_tint_eoi(d);
+ rzg2l_clear_tint_int(priv, hw_irq);
raw_spin_unlock(&priv->lock);
irq_chip_eoi_parent(d);
}
-static void rzg2l_irqc_irq_disable(struct irq_data *d)
+static void rzfive_irqc_mask_irq_interrupt(struct rzg2l_irqc_priv *priv,
+ unsigned int hwirq)
{
- unsigned int hw_irq = irqd_to_hwirq(d);
+ u32 bit = BIT(hwirq - IRQC_IRQ_START);
- if (hw_irq >= IRQC_TINT_START && hw_irq < IRQC_NUM_IRQ) {
- struct rzg2l_irqc_priv *priv = irq_data_to_priv(d);
- u32 offset = hw_irq - IRQC_TINT_START;
+ writel_relaxed(readl_relaxed(priv->base + IMSK) | bit, priv->base + IMSK);
+}
+
+static void rzfive_irqc_unmask_irq_interrupt(struct rzg2l_irqc_priv *priv,
+ unsigned int hwirq)
+{
+ u32 bit = BIT(hwirq - IRQC_IRQ_START);
+
+ writel_relaxed(readl_relaxed(priv->base + IMSK) & ~bit, priv->base + IMSK);
+}
+
+static void rzfive_irqc_mask_tint_interrupt(struct rzg2l_irqc_priv *priv,
+ unsigned int hwirq)
+{
+ u32 bit = BIT(hwirq - IRQC_TINT_START);
+
+ writel_relaxed(readl_relaxed(priv->base + TMSK) | bit, priv->base + TMSK);
+}
+
+static void rzfive_irqc_unmask_tint_interrupt(struct rzg2l_irqc_priv *priv,
+ unsigned int hwirq)
+{
+ u32 bit = BIT(hwirq - IRQC_TINT_START);
+
+ writel_relaxed(readl_relaxed(priv->base + TMSK) & ~bit, priv->base + TMSK);
+}
+
+static void rzfive_irqc_mask(struct irq_data *d)
+{
+ struct rzg2l_irqc_priv *priv = irq_data_to_priv(d);
+ unsigned int hwirq = irqd_to_hwirq(d);
+
+ raw_spin_lock(&priv->lock);
+ if (hwirq >= IRQC_IRQ_START && hwirq <= IRQC_IRQ_COUNT)
+ rzfive_irqc_mask_irq_interrupt(priv, hwirq);
+ else if (hwirq >= IRQC_TINT_START && hwirq < IRQC_NUM_IRQ)
+ rzfive_irqc_mask_tint_interrupt(priv, hwirq);
+ raw_spin_unlock(&priv->lock);
+ irq_chip_mask_parent(d);
+}
+
+static void rzfive_irqc_unmask(struct irq_data *d)
+{
+ struct rzg2l_irqc_priv *priv = irq_data_to_priv(d);
+ unsigned int hwirq = irqd_to_hwirq(d);
+
+ raw_spin_lock(&priv->lock);
+ if (hwirq >= IRQC_IRQ_START && hwirq <= IRQC_IRQ_COUNT)
+ rzfive_irqc_unmask_irq_interrupt(priv, hwirq);
+ else if (hwirq >= IRQC_TINT_START && hwirq < IRQC_NUM_IRQ)
+ rzfive_irqc_unmask_tint_interrupt(priv, hwirq);
+ raw_spin_unlock(&priv->lock);
+ irq_chip_unmask_parent(d);
+}
+
+static void rzfive_tint_irq_endisable(struct irq_data *d, bool enable)
+{
+ struct rzg2l_irqc_priv *priv = irq_data_to_priv(d);
+ unsigned int hwirq = irqd_to_hwirq(d);
+
+ if (hwirq >= IRQC_TINT_START && hwirq < IRQC_NUM_IRQ) {
+ u32 offset = hwirq - IRQC_TINT_START;
u32 tssr_offset = TSSR_OFFSET(offset);
u8 tssr_index = TSSR_INDEX(offset);
u32 reg;
raw_spin_lock(&priv->lock);
+ if (enable)
+ rzfive_irqc_unmask_tint_interrupt(priv, hwirq);
+ else
+ rzfive_irqc_mask_tint_interrupt(priv, hwirq);
reg = readl_relaxed(priv->base + TSSR(tssr_index));
- reg &= ~(TSSEL_MASK << TSSEL_SHIFT(tssr_offset));
+ if (enable)
+ reg |= TIEN << TSSEL_SHIFT(tssr_offset);
+ else
+ reg &= ~(TIEN << TSSEL_SHIFT(tssr_offset));
writel_relaxed(reg, priv->base + TSSR(tssr_index));
raw_spin_unlock(&priv->lock);
+ } else {
+ raw_spin_lock(&priv->lock);
+ if (enable)
+ rzfive_irqc_unmask_irq_interrupt(priv, hwirq);
+ else
+ rzfive_irqc_mask_irq_interrupt(priv, hwirq);
+ raw_spin_unlock(&priv->lock);
}
+}
+
+static void rzfive_irqc_irq_disable(struct irq_data *d)
+{
irq_chip_disable_parent(d);
+ rzfive_tint_irq_endisable(d, false);
}
-static void rzg2l_irqc_irq_enable(struct irq_data *d)
+static void rzfive_irqc_irq_enable(struct irq_data *d)
+{
+ rzfive_tint_irq_endisable(d, true);
+ irq_chip_enable_parent(d);
+}
+
+static void rzg2l_tint_irq_endisable(struct irq_data *d, bool enable)
{
unsigned int hw_irq = irqd_to_hwirq(d);
if (hw_irq >= IRQC_TINT_START && hw_irq < IRQC_NUM_IRQ) {
- unsigned long tint = (uintptr_t)irq_data_get_irq_chip_data(d);
struct rzg2l_irqc_priv *priv = irq_data_to_priv(d);
u32 offset = hw_irq - IRQC_TINT_START;
u32 tssr_offset = TSSR_OFFSET(offset);
@@ -163,17 +261,33 @@ static void rzg2l_irqc_irq_enable(struct irq_data *d)
raw_spin_lock(&priv->lock);
reg = readl_relaxed(priv->base + TSSR(tssr_index));
- reg |= (TIEN | tint) << TSSEL_SHIFT(tssr_offset);
+ if (enable)
+ reg |= TIEN << TSSEL_SHIFT(tssr_offset);
+ else
+ reg &= ~(TIEN << TSSEL_SHIFT(tssr_offset));
writel_relaxed(reg, priv->base + TSSR(tssr_index));
raw_spin_unlock(&priv->lock);
}
+}
+
+static void rzg2l_irqc_irq_disable(struct irq_data *d)
+{
+ irq_chip_disable_parent(d);
+ rzg2l_tint_irq_endisable(d, false);
+}
+
+static void rzg2l_irqc_irq_enable(struct irq_data *d)
+{
+ rzg2l_tint_irq_endisable(d, true);
irq_chip_enable_parent(d);
}
static int rzg2l_irq_set_type(struct irq_data *d, unsigned int type)
{
- unsigned int hw_irq = irqd_to_hwirq(d) - IRQC_IRQ_START;
struct rzg2l_irqc_priv *priv = irq_data_to_priv(d);
+ unsigned int hwirq = irqd_to_hwirq(d);
+ u32 iitseln = hwirq - IRQC_IRQ_START;
+ bool clear_irq_int = false;
u16 sense, tmp;
switch (type & IRQ_TYPE_SENSE_MASK) {
@@ -183,14 +297,17 @@ static int rzg2l_irq_set_type(struct irq_data *d, unsigned int type)
case IRQ_TYPE_EDGE_FALLING:
sense = IITSR_IITSEL_EDGE_FALLING;
+ clear_irq_int = true;
break;
case IRQ_TYPE_EDGE_RISING:
sense = IITSR_IITSEL_EDGE_RISING;
+ clear_irq_int = true;
break;
case IRQ_TYPE_EDGE_BOTH:
sense = IITSR_IITSEL_EDGE_BOTH;
+ clear_irq_int = true;
break;
default:
@@ -199,21 +316,40 @@ static int rzg2l_irq_set_type(struct irq_data *d, unsigned int type)
raw_spin_lock(&priv->lock);
tmp = readl_relaxed(priv->base + IITSR);
- tmp &= ~IITSR_IITSEL_MASK(hw_irq);
- tmp |= IITSR_IITSEL(hw_irq, sense);
+ tmp &= ~IITSR_IITSEL_MASK(iitseln);
+ tmp |= IITSR_IITSEL(iitseln, sense);
+ if (clear_irq_int)
+ rzg2l_clear_irq_int(priv, hwirq);
writel_relaxed(tmp, priv->base + IITSR);
raw_spin_unlock(&priv->lock);
return 0;
}
+static u32 rzg2l_disable_tint_and_set_tint_source(struct irq_data *d, struct rzg2l_irqc_priv *priv,
+ u32 reg, u32 tssr_offset, u8 tssr_index)
+{
+ u32 tint = (u32)(uintptr_t)irq_data_get_irq_chip_data(d);
+ u32 tien = reg & (TIEN << TSSEL_SHIFT(tssr_offset));
+
+ /* Clear the relevant byte in reg */
+ reg &= ~(TSSEL_MASK << TSSEL_SHIFT(tssr_offset));
+ /* Set TINT and leave TIEN clear */
+ reg |= tint << TSSEL_SHIFT(tssr_offset);
+ writel_relaxed(reg, priv->base + TSSR(tssr_index));
+
+ return reg | tien;
+}
+
static int rzg2l_tint_set_edge(struct irq_data *d, unsigned int type)
{
struct rzg2l_irqc_priv *priv = irq_data_to_priv(d);
unsigned int hwirq = irqd_to_hwirq(d);
u32 titseln = hwirq - IRQC_TINT_START;
+ u32 tssr_offset = TSSR_OFFSET(titseln);
+ u8 tssr_index = TSSR_INDEX(titseln);
u8 index, sense;
- u32 reg;
+ u32 reg, tssr;
switch (type & IRQ_TYPE_SENSE_MASK) {
case IRQ_TYPE_EDGE_RISING:
@@ -235,10 +371,14 @@ static int rzg2l_tint_set_edge(struct irq_data *d, unsigned int type)
}
raw_spin_lock(&priv->lock);
+ tssr = readl_relaxed(priv->base + TSSR(tssr_index));
+ tssr = rzg2l_disable_tint_and_set_tint_source(d, priv, tssr, tssr_offset, tssr_index);
reg = readl_relaxed(priv->base + TITSR(index));
reg &= ~(IRQ_MASK << (titseln * TITSEL_WIDTH));
reg |= sense << (titseln * TITSEL_WIDTH);
writel_relaxed(reg, priv->base + TITSR(index));
+ rzg2l_clear_tint_int(priv, hwirq);
+ writel_relaxed(tssr, priv->base + TSSR(tssr_index));
raw_spin_unlock(&priv->lock);
return 0;
@@ -291,7 +431,7 @@ static struct syscore_ops rzg2l_irqc_syscore_ops = {
.resume = rzg2l_irqc_irq_resume,
};
-static const struct irq_chip irqc_chip = {
+static const struct irq_chip rzg2l_irqc_chip = {
.name = "rzg2l-irqc",
.irq_eoi = rzg2l_irqc_eoi,
.irq_mask = irq_chip_mask_parent,
@@ -308,6 +448,23 @@ static const struct irq_chip irqc_chip = {
IRQCHIP_SKIP_SET_WAKE,
};
+static const struct irq_chip rzfive_irqc_chip = {
+ .name = "rzfive-irqc",
+ .irq_eoi = rzg2l_irqc_eoi,
+ .irq_mask = rzfive_irqc_mask,
+ .irq_unmask = rzfive_irqc_unmask,
+ .irq_disable = rzfive_irqc_irq_disable,
+ .irq_enable = rzfive_irqc_irq_enable,
+ .irq_get_irqchip_state = irq_chip_get_parent_state,
+ .irq_set_irqchip_state = irq_chip_set_parent_state,
+ .irq_retrigger = irq_chip_retrigger_hierarchy,
+ .irq_set_type = rzg2l_irqc_set_type,
+ .irq_set_affinity = irq_chip_set_affinity_parent,
+ .flags = IRQCHIP_MASK_ON_SUSPEND |
+ IRQCHIP_SET_TYPE_MASKED |
+ IRQCHIP_SKIP_SET_WAKE,
+};
+
static int rzg2l_irqc_alloc(struct irq_domain *domain, unsigned int virq,
unsigned int nr_irqs, void *arg)
{
@@ -339,7 +496,7 @@ static int rzg2l_irqc_alloc(struct irq_domain *domain, unsigned int virq,
if (hwirq > (IRQC_NUM_IRQ - 1))
return -EINVAL;
- ret = irq_domain_set_hwirq_and_chip(domain, virq, hwirq, &irqc_chip,
+ ret = irq_domain_set_hwirq_and_chip(domain, virq, hwirq, priv->irqchip,
(void *)(uintptr_t)tint);
if (ret)
return ret;
@@ -371,14 +528,15 @@ static int rzg2l_irqc_parse_interrupts(struct rzg2l_irqc_priv *priv,
return 0;
}
-static int rzg2l_irqc_init(struct device_node *node, struct device_node *parent)
+static int rzg2l_irqc_common_init(struct device_node *node, struct device_node *parent,
+ const struct irq_chip *irq_chip)
{
+ struct platform_device *pdev = of_find_device_by_node(node);
+ struct device *dev __free(put_device) = pdev ? &pdev->dev : NULL;
struct irq_domain *irq_domain, *parent_domain;
- struct platform_device *pdev;
struct reset_control *resetn;
int ret;
- pdev = of_find_device_by_node(node);
if (!pdev)
return -ENODEV;
@@ -392,6 +550,8 @@ static int rzg2l_irqc_init(struct device_node *node, struct device_node *parent)
if (!rzg2l_irqc_data)
return -ENOMEM;
+ rzg2l_irqc_data->irqchip = irq_chip;
+
rzg2l_irqc_data->base = devm_of_iomap(&pdev->dev, pdev->dev.of_node, 0, NULL);
if (IS_ERR(rzg2l_irqc_data->base))
return PTR_ERR(rzg2l_irqc_data->base);
@@ -432,6 +592,17 @@ static int rzg2l_irqc_init(struct device_node *node, struct device_node *parent)
register_syscore_ops(&rzg2l_irqc_syscore_ops);
+ /*
+ * Prevent the cleanup function from invoking put_device by assigning
+ * NULL to dev.
+ *
+ * make coccicheck will complain about missing put_device calls, but
+ * those are false positives, as dev will be automatically "put" via
+ * __free_put_device on the failing path.
+ * On the successful path we don't actually want to "put" dev.
+ */
+ dev = NULL;
+
return 0;
pm_put:
@@ -442,8 +613,21 @@ pm_disable:
return ret;
}
+static int __init rzg2l_irqc_init(struct device_node *node,
+ struct device_node *parent)
+{
+ return rzg2l_irqc_common_init(node, parent, &rzg2l_irqc_chip);
+}
+
+static int __init rzfive_irqc_init(struct device_node *node,
+ struct device_node *parent)
+{
+ return rzg2l_irqc_common_init(node, parent, &rzfive_irqc_chip);
+}
+
IRQCHIP_PLATFORM_DRIVER_BEGIN(rzg2l_irqc)
IRQCHIP_MATCH("renesas,rzg2l-irqc", rzg2l_irqc_init)
+IRQCHIP_MATCH("renesas,r9a07g043f-irqc", rzfive_irqc_init)
IRQCHIP_PLATFORM_DRIVER_END(rzg2l_irqc)
MODULE_AUTHOR("Lad Prabhakar <prabhakar.mahadev-lad.rj@bp.renesas.com>");
MODULE_DESCRIPTION("Renesas RZ/G2L IRQC Driver");
diff --git a/drivers/irqchip/irq-renesas-rzv2h.c b/drivers/irqchip/irq-renesas-rzv2h.c
new file mode 100644
index 000000000000..fe2d29e91026
--- /dev/null
+++ b/drivers/irqchip/irq-renesas-rzv2h.c
@@ -0,0 +1,513 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Renesas RZ/V2H(P) ICU Driver
+ *
+ * Based on irq-renesas-rzg2l.c
+ *
+ * Copyright (C) 2024 Renesas Electronics Corporation.
+ *
+ * Author: Fabrizio Castro <fabrizio.castro.jz@renesas.com>
+ */
+
+#include <linux/bitfield.h>
+#include <linux/cleanup.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/irqchip.h>
+#include <linux/irqdomain.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <linux/pm_runtime.h>
+#include <linux/reset.h>
+#include <linux/spinlock.h>
+#include <linux/syscore_ops.h>
+
+/* DT "interrupts" indexes */
+#define ICU_IRQ_START 1
+#define ICU_IRQ_COUNT 16
+#define ICU_TINT_START (ICU_IRQ_START + ICU_IRQ_COUNT)
+#define ICU_TINT_COUNT 32
+#define ICU_NUM_IRQ (ICU_TINT_START + ICU_TINT_COUNT)
+
+/* Registers */
+#define ICU_NSCNT 0x00
+#define ICU_NSCLR 0x04
+#define ICU_NITSR 0x08
+#define ICU_ISCTR 0x10
+#define ICU_ISCLR 0x14
+#define ICU_IITSR 0x18
+#define ICU_TSCTR 0x20
+#define ICU_TSCLR 0x24
+#define ICU_TITSR(k) (0x28 + (k) * 4)
+#define ICU_TSSR(k) (0x30 + (k) * 4)
+
+/* NMI */
+#define ICU_NMI_EDGE_FALLING 0
+#define ICU_NMI_EDGE_RISING 1
+
+#define ICU_NSCLR_NCLR BIT(0)
+
+/* IRQ */
+#define ICU_IRQ_LEVEL_LOW 0
+#define ICU_IRQ_EDGE_FALLING 1
+#define ICU_IRQ_EDGE_RISING 2
+#define ICU_IRQ_EDGE_BOTH 3
+
+#define ICU_IITSR_IITSEL_PREP(iitsel, n) ((iitsel) << ((n) * 2))
+#define ICU_IITSR_IITSEL_GET(iitsr, n) (((iitsr) >> ((n) * 2)) & 0x03)
+#define ICU_IITSR_IITSEL_MASK(n) ICU_IITSR_IITSEL_PREP(0x03, n)
+
+/* TINT */
+#define ICU_TINT_EDGE_RISING 0
+#define ICU_TINT_EDGE_FALLING 1
+#define ICU_TINT_LEVEL_HIGH 2
+#define ICU_TINT_LEVEL_LOW 3
+
+#define ICU_TSSR_K(tint_nr) ((tint_nr) / 4)
+#define ICU_TSSR_TSSEL_N(tint_nr) ((tint_nr) % 4)
+#define ICU_TSSR_TSSEL_PREP(tssel, n) ((tssel) << ((n) * 8))
+#define ICU_TSSR_TSSEL_MASK(n) ICU_TSSR_TSSEL_PREP(0x7F, n)
+#define ICU_TSSR_TIEN(n) (BIT(7) << ((n) * 8))
+
+#define ICU_TITSR_K(tint_nr) ((tint_nr) / 16)
+#define ICU_TITSR_TITSEL_N(tint_nr) ((tint_nr) % 16)
+#define ICU_TITSR_TITSEL_PREP(titsel, n) ICU_IITSR_IITSEL_PREP(titsel, n)
+#define ICU_TITSR_TITSEL_MASK(n) ICU_IITSR_IITSEL_MASK(n)
+#define ICU_TITSR_TITSEL_GET(titsr, n) ICU_IITSR_IITSEL_GET(titsr, n)
+
+#define ICU_TINT_EXTRACT_HWIRQ(x) FIELD_GET(GENMASK(15, 0), (x))
+#define ICU_TINT_EXTRACT_GPIOINT(x) FIELD_GET(GENMASK(31, 16), (x))
+#define ICU_PB5_TINT 0x55
+
+/**
+ * struct rzv2h_icu_priv - Interrupt Control Unit controller private data structure.
+ * @base: Controller's base address
+ * @irqchip: Pointer to struct irq_chip
+ * @fwspec: IRQ firmware specific data
+ * @lock: Lock to serialize access to hardware registers
+ */
+struct rzv2h_icu_priv {
+ void __iomem *base;
+ const struct irq_chip *irqchip;
+ struct irq_fwspec fwspec[ICU_NUM_IRQ];
+ raw_spinlock_t lock;
+};
+
+static inline struct rzv2h_icu_priv *irq_data_to_priv(struct irq_data *data)
+{
+ return data->domain->host_data;
+}
+
+static void rzv2h_icu_eoi(struct irq_data *d)
+{
+ struct rzv2h_icu_priv *priv = irq_data_to_priv(d);
+ unsigned int hw_irq = irqd_to_hwirq(d);
+ unsigned int tintirq_nr;
+ u32 bit;
+
+ scoped_guard(raw_spinlock, &priv->lock) {
+ if (hw_irq >= ICU_TINT_START) {
+ tintirq_nr = hw_irq - ICU_TINT_START;
+ bit = BIT(tintirq_nr);
+ if (!irqd_is_level_type(d))
+ writel_relaxed(bit, priv->base + ICU_TSCLR);
+ } else if (hw_irq >= ICU_IRQ_START) {
+ tintirq_nr = hw_irq - ICU_IRQ_START;
+ bit = BIT(tintirq_nr);
+ if (!irqd_is_level_type(d))
+ writel_relaxed(bit, priv->base + ICU_ISCLR);
+ } else {
+ writel_relaxed(ICU_NSCLR_NCLR, priv->base + ICU_NSCLR);
+ }
+ }
+
+ irq_chip_eoi_parent(d);
+}
+
+static void rzv2h_tint_irq_endisable(struct irq_data *d, bool enable)
+{
+ struct rzv2h_icu_priv *priv = irq_data_to_priv(d);
+ unsigned int hw_irq = irqd_to_hwirq(d);
+ u32 tint_nr, tssel_n, k, tssr;
+
+ if (hw_irq < ICU_TINT_START)
+ return;
+
+ tint_nr = hw_irq - ICU_TINT_START;
+ k = ICU_TSSR_K(tint_nr);
+ tssel_n = ICU_TSSR_TSSEL_N(tint_nr);
+
+ guard(raw_spinlock)(&priv->lock);
+ tssr = readl_relaxed(priv->base + ICU_TSSR(k));
+ if (enable)
+ tssr |= ICU_TSSR_TIEN(tssel_n);
+ else
+ tssr &= ~ICU_TSSR_TIEN(tssel_n);
+ writel_relaxed(tssr, priv->base + ICU_TSSR(k));
+}
+
+static void rzv2h_icu_irq_disable(struct irq_data *d)
+{
+ irq_chip_disable_parent(d);
+ rzv2h_tint_irq_endisable(d, false);
+}
+
+static void rzv2h_icu_irq_enable(struct irq_data *d)
+{
+ rzv2h_tint_irq_endisable(d, true);
+ irq_chip_enable_parent(d);
+}
+
+static int rzv2h_nmi_set_type(struct irq_data *d, unsigned int type)
+{
+ struct rzv2h_icu_priv *priv = irq_data_to_priv(d);
+ u32 sense;
+
+ switch (type & IRQ_TYPE_SENSE_MASK) {
+ case IRQ_TYPE_EDGE_FALLING:
+ sense = ICU_NMI_EDGE_FALLING;
+ break;
+
+ case IRQ_TYPE_EDGE_RISING:
+ sense = ICU_NMI_EDGE_RISING;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ writel_relaxed(sense, priv->base + ICU_NITSR);
+
+ return 0;
+}
+
+static void rzv2h_clear_irq_int(struct rzv2h_icu_priv *priv, unsigned int hwirq)
+{
+ unsigned int irq_nr = hwirq - ICU_IRQ_START;
+ u32 isctr, iitsr, iitsel;
+ u32 bit = BIT(irq_nr);
+
+ isctr = readl_relaxed(priv->base + ICU_ISCTR);
+ iitsr = readl_relaxed(priv->base + ICU_IITSR);
+ iitsel = ICU_IITSR_IITSEL_GET(iitsr, irq_nr);
+
+ /*
+ * When level sensing is used, the interrupt flag gets automatically cleared when the
+ * interrupt signal is de-asserted by the source of the interrupt request, therefore clear
+ * the interrupt only for edge triggered interrupts.
+ */
+ if ((isctr & bit) && (iitsel != ICU_IRQ_LEVEL_LOW))
+ writel_relaxed(bit, priv->base + ICU_ISCLR);
+}
+
+static int rzv2h_irq_set_type(struct irq_data *d, unsigned int type)
+{
+ struct rzv2h_icu_priv *priv = irq_data_to_priv(d);
+ unsigned int hwirq = irqd_to_hwirq(d);
+ u32 irq_nr = hwirq - ICU_IRQ_START;
+ u32 iitsr, sense;
+
+ switch (type & IRQ_TYPE_SENSE_MASK) {
+ case IRQ_TYPE_LEVEL_LOW:
+ sense = ICU_IRQ_LEVEL_LOW;
+ break;
+
+ case IRQ_TYPE_EDGE_FALLING:
+ sense = ICU_IRQ_EDGE_FALLING;
+ break;
+
+ case IRQ_TYPE_EDGE_RISING:
+ sense = ICU_IRQ_EDGE_RISING;
+ break;
+
+ case IRQ_TYPE_EDGE_BOTH:
+ sense = ICU_IRQ_EDGE_BOTH;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ guard(raw_spinlock)(&priv->lock);
+ iitsr = readl_relaxed(priv->base + ICU_IITSR);
+ iitsr &= ~ICU_IITSR_IITSEL_MASK(irq_nr);
+ iitsr |= ICU_IITSR_IITSEL_PREP(sense, irq_nr);
+ rzv2h_clear_irq_int(priv, hwirq);
+ writel_relaxed(iitsr, priv->base + ICU_IITSR);
+
+ return 0;
+}
+
+static void rzv2h_clear_tint_int(struct rzv2h_icu_priv *priv, unsigned int hwirq)
+{
+ unsigned int tint_nr = hwirq - ICU_TINT_START;
+ int titsel_n = ICU_TITSR_TITSEL_N(tint_nr);
+ u32 tsctr, titsr, titsel;
+ u32 bit = BIT(tint_nr);
+ int k = tint_nr / 16;
+
+ tsctr = readl_relaxed(priv->base + ICU_TSCTR);
+ titsr = readl_relaxed(priv->base + ICU_TITSR(k));
+ titsel = ICU_TITSR_TITSEL_GET(titsr, titsel_n);
+
+ /*
+ * Writing 1 to the corresponding flag from register ICU_TSCTR only has effect if
+ * TSTATn = 1b and if it's a rising edge or a falling edge interrupt.
+ */
+ if ((tsctr & bit) && ((titsel == ICU_TINT_EDGE_RISING) ||
+ (titsel == ICU_TINT_EDGE_FALLING)))
+ writel_relaxed(bit, priv->base + ICU_TSCLR);
+}
+
+static int rzv2h_tint_set_type(struct irq_data *d, unsigned int type)
+{
+ u32 titsr, titsr_k, titsel_n, tien;
+ struct rzv2h_icu_priv *priv;
+ u32 tssr, tssr_k, tssel_n;
+ unsigned int hwirq;
+ u32 tint, sense;
+ int tint_nr;
+
+ switch (type & IRQ_TYPE_SENSE_MASK) {
+ case IRQ_TYPE_LEVEL_LOW:
+ sense = ICU_TINT_LEVEL_LOW;
+ break;
+
+ case IRQ_TYPE_LEVEL_HIGH:
+ sense = ICU_TINT_LEVEL_HIGH;
+ break;
+
+ case IRQ_TYPE_EDGE_RISING:
+ sense = ICU_TINT_EDGE_RISING;
+ break;
+
+ case IRQ_TYPE_EDGE_FALLING:
+ sense = ICU_TINT_EDGE_FALLING;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ tint = (u32)(uintptr_t)irq_data_get_irq_chip_data(d);
+ if (tint > ICU_PB5_TINT)
+ return -EINVAL;
+
+ priv = irq_data_to_priv(d);
+ hwirq = irqd_to_hwirq(d);
+
+ tint_nr = hwirq - ICU_TINT_START;
+
+ tssr_k = ICU_TSSR_K(tint_nr);
+ tssel_n = ICU_TSSR_TSSEL_N(tint_nr);
+
+ titsr_k = ICU_TITSR_K(tint_nr);
+ titsel_n = ICU_TITSR_TITSEL_N(tint_nr);
+ tien = ICU_TSSR_TIEN(titsel_n);
+
+ guard(raw_spinlock)(&priv->lock);
+
+ tssr = readl_relaxed(priv->base + ICU_TSSR(tssr_k));
+ tssr &= ~(ICU_TSSR_TSSEL_MASK(tssel_n) | tien);
+ tssr |= ICU_TSSR_TSSEL_PREP(tint, tssel_n);
+
+ writel_relaxed(tssr, priv->base + ICU_TSSR(tssr_k));
+
+ titsr = readl_relaxed(priv->base + ICU_TITSR(titsr_k));
+ titsr &= ~ICU_TITSR_TITSEL_MASK(titsel_n);
+ titsr |= ICU_TITSR_TITSEL_PREP(sense, titsel_n);
+
+ writel_relaxed(titsr, priv->base + ICU_TITSR(titsr_k));
+
+ rzv2h_clear_tint_int(priv, hwirq);
+
+ writel_relaxed(tssr | tien, priv->base + ICU_TSSR(tssr_k));
+
+ return 0;
+}
+
+static int rzv2h_icu_set_type(struct irq_data *d, unsigned int type)
+{
+ unsigned int hw_irq = irqd_to_hwirq(d);
+ int ret;
+
+ if (hw_irq >= ICU_TINT_START)
+ ret = rzv2h_tint_set_type(d, type);
+ else if (hw_irq >= ICU_IRQ_START)
+ ret = rzv2h_irq_set_type(d, type);
+ else
+ ret = rzv2h_nmi_set_type(d, type);
+
+ if (ret)
+ return ret;
+
+ return irq_chip_set_type_parent(d, IRQ_TYPE_LEVEL_HIGH);
+}
+
+static const struct irq_chip rzv2h_icu_chip = {
+ .name = "rzv2h-icu",
+ .irq_eoi = rzv2h_icu_eoi,
+ .irq_mask = irq_chip_mask_parent,
+ .irq_unmask = irq_chip_unmask_parent,
+ .irq_disable = rzv2h_icu_irq_disable,
+ .irq_enable = rzv2h_icu_irq_enable,
+ .irq_get_irqchip_state = irq_chip_get_parent_state,
+ .irq_set_irqchip_state = irq_chip_set_parent_state,
+ .irq_retrigger = irq_chip_retrigger_hierarchy,
+ .irq_set_type = rzv2h_icu_set_type,
+ .irq_set_affinity = irq_chip_set_affinity_parent,
+ .flags = IRQCHIP_SET_TYPE_MASKED,
+};
+
+static int rzv2h_icu_alloc(struct irq_domain *domain, unsigned int virq, unsigned int nr_irqs,
+ void *arg)
+{
+ struct rzv2h_icu_priv *priv = domain->host_data;
+ unsigned long tint = 0;
+ irq_hw_number_t hwirq;
+ unsigned int type;
+ int ret;
+
+ ret = irq_domain_translate_twocell(domain, arg, &hwirq, &type);
+ if (ret)
+ return ret;
+
+ /*
+ * For TINT interrupts the hwirq and TINT are encoded in
+ * fwspec->param[0].
+ * hwirq is embedded in bits 0-15.
+ * TINT is embedded in bits 16-31.
+ */
+ if (hwirq >= ICU_TINT_START) {
+ tint = ICU_TINT_EXTRACT_GPIOINT(hwirq);
+ hwirq = ICU_TINT_EXTRACT_HWIRQ(hwirq);
+
+ if (hwirq < ICU_TINT_START)
+ return -EINVAL;
+ }
+
+ if (hwirq > (ICU_NUM_IRQ - 1))
+ return -EINVAL;
+
+ ret = irq_domain_set_hwirq_and_chip(domain, virq, hwirq, priv->irqchip,
+ (void *)(uintptr_t)tint);
+ if (ret)
+ return ret;
+
+ return irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, &priv->fwspec[hwirq]);
+}
+
+static const struct irq_domain_ops rzv2h_icu_domain_ops = {
+ .alloc = rzv2h_icu_alloc,
+ .free = irq_domain_free_irqs_common,
+ .translate = irq_domain_translate_twocell,
+};
+
+static int rzv2h_icu_parse_interrupts(struct rzv2h_icu_priv *priv, struct device_node *np)
+{
+ struct of_phandle_args map;
+ unsigned int i;
+ int ret;
+
+ for (i = 0; i < ICU_NUM_IRQ; i++) {
+ ret = of_irq_parse_one(np, i, &map);
+ if (ret)
+ return ret;
+
+ of_phandle_args_to_fwspec(np, map.args, map.args_count, &priv->fwspec[i]);
+ }
+
+ return 0;
+}
+
+static int rzv2h_icu_init(struct device_node *node, struct device_node *parent)
+{
+ struct irq_domain *irq_domain, *parent_domain;
+ struct rzv2h_icu_priv *rzv2h_icu_data;
+ struct platform_device *pdev;
+ struct reset_control *resetn;
+ int ret;
+
+ pdev = of_find_device_by_node(node);
+ if (!pdev)
+ return -ENODEV;
+
+ parent_domain = irq_find_host(parent);
+ if (!parent_domain) {
+ dev_err(&pdev->dev, "cannot find parent domain\n");
+ ret = -ENODEV;
+ goto put_dev;
+ }
+
+ rzv2h_icu_data = devm_kzalloc(&pdev->dev, sizeof(*rzv2h_icu_data), GFP_KERNEL);
+ if (!rzv2h_icu_data) {
+ ret = -ENOMEM;
+ goto put_dev;
+ }
+
+ rzv2h_icu_data->irqchip = &rzv2h_icu_chip;
+
+ rzv2h_icu_data->base = devm_of_iomap(&pdev->dev, pdev->dev.of_node, 0, NULL);
+ if (IS_ERR(rzv2h_icu_data->base)) {
+ ret = PTR_ERR(rzv2h_icu_data->base);
+ goto put_dev;
+ }
+
+ ret = rzv2h_icu_parse_interrupts(rzv2h_icu_data, node);
+ if (ret) {
+ dev_err(&pdev->dev, "cannot parse interrupts: %d\n", ret);
+ goto put_dev;
+ }
+
+ resetn = devm_reset_control_get_exclusive(&pdev->dev, NULL);
+ if (IS_ERR(resetn)) {
+ ret = PTR_ERR(resetn);
+ goto put_dev;
+ }
+
+ ret = reset_control_deassert(resetn);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to deassert resetn pin, %d\n", ret);
+ goto put_dev;
+ }
+
+ pm_runtime_enable(&pdev->dev);
+ ret = pm_runtime_resume_and_get(&pdev->dev);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "pm_runtime_resume_and_get failed: %d\n", ret);
+ goto pm_disable;
+ }
+
+ raw_spin_lock_init(&rzv2h_icu_data->lock);
+
+ irq_domain = irq_domain_add_hierarchy(parent_domain, 0, ICU_NUM_IRQ, node,
+ &rzv2h_icu_domain_ops, rzv2h_icu_data);
+ if (!irq_domain) {
+ dev_err(&pdev->dev, "failed to add irq domain\n");
+ ret = -ENOMEM;
+ goto pm_put;
+ }
+
+ /*
+ * coccicheck complains about a missing put_device call before returning, but it's a false
+ * positive. We still need &pdev->dev after successfully returning from this function.
+ */
+ return 0;
+
+pm_put:
+ pm_runtime_put(&pdev->dev);
+pm_disable:
+ pm_runtime_disable(&pdev->dev);
+ reset_control_assert(resetn);
+put_dev:
+ put_device(&pdev->dev);
+
+ return ret;
+}
+
+IRQCHIP_PLATFORM_DRIVER_BEGIN(rzv2h_icu)
+IRQCHIP_MATCH("renesas,r9a09g057-icu", rzv2h_icu_init)
+IRQCHIP_PLATFORM_DRIVER_END(rzv2h_icu)
+MODULE_AUTHOR("Fabrizio Castro <fabrizio.castro.jz@renesas.com>");
+MODULE_DESCRIPTION("Renesas RZ/V2H(P) ICU Driver");
diff --git a/drivers/irqchip/irq-riscv-aplic-direct.c b/drivers/irqchip/irq-riscv-aplic-direct.c
new file mode 100644
index 000000000000..7cd6b646774b
--- /dev/null
+++ b/drivers/irqchip/irq-riscv-aplic-direct.c
@@ -0,0 +1,329 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2021 Western Digital Corporation or its affiliates.
+ * Copyright (C) 2022 Ventana Micro Systems Inc.
+ */
+
+#include <linux/acpi.h>
+#include <linux/bitfield.h>
+#include <linux/bitops.h>
+#include <linux/cpu.h>
+#include <linux/interrupt.h>
+#include <linux/irqchip.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/irqchip/riscv-aplic.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/printk.h>
+#include <linux/smp.h>
+
+#include "irq-riscv-aplic-main.h"
+
+#define APLIC_DISABLE_IDELIVERY 0
+#define APLIC_ENABLE_IDELIVERY 1
+#define APLIC_DISABLE_ITHRESHOLD 1
+#define APLIC_ENABLE_ITHRESHOLD 0
+
+struct aplic_direct {
+ struct aplic_priv priv;
+ struct irq_domain *irqdomain;
+ struct cpumask lmask;
+};
+
+struct aplic_idc {
+ unsigned int hart_index;
+ void __iomem *regs;
+ struct aplic_direct *direct;
+};
+
+static unsigned int aplic_direct_parent_irq;
+static DEFINE_PER_CPU(struct aplic_idc, aplic_idcs);
+
+static void aplic_direct_irq_eoi(struct irq_data *d)
+{
+ /*
+ * The fasteoi_handler requires irq_eoi() callback hence
+ * provide a dummy handler.
+ */
+}
+
+#ifdef CONFIG_SMP
+static int aplic_direct_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
+ bool force)
+{
+ struct aplic_priv *priv = irq_data_get_irq_chip_data(d);
+ struct aplic_direct *direct = container_of(priv, struct aplic_direct, priv);
+ struct aplic_idc *idc;
+ unsigned int cpu, val;
+ void __iomem *target;
+
+ if (force)
+ cpu = cpumask_first_and(&direct->lmask, mask_val);
+ else
+ cpu = cpumask_first_and_and(&direct->lmask, mask_val, cpu_online_mask);
+
+ if (cpu >= nr_cpu_ids)
+ return -EINVAL;
+
+ idc = per_cpu_ptr(&aplic_idcs, cpu);
+ target = priv->regs + APLIC_TARGET_BASE + (d->hwirq - 1) * sizeof(u32);
+ val = FIELD_PREP(APLIC_TARGET_HART_IDX, idc->hart_index);
+ val |= FIELD_PREP(APLIC_TARGET_IPRIO, APLIC_DEFAULT_PRIORITY);
+ writel(val, target);
+
+ irq_data_update_effective_affinity(d, cpumask_of(cpu));
+
+ return IRQ_SET_MASK_OK_DONE;
+}
+#endif
+
+static struct irq_chip aplic_direct_chip = {
+ .name = "APLIC-DIRECT",
+ .irq_mask = aplic_irq_mask,
+ .irq_unmask = aplic_irq_unmask,
+ .irq_set_type = aplic_irq_set_type,
+ .irq_eoi = aplic_direct_irq_eoi,
+#ifdef CONFIG_SMP
+ .irq_set_affinity = aplic_direct_set_affinity,
+#endif
+ .flags = IRQCHIP_SET_TYPE_MASKED |
+ IRQCHIP_SKIP_SET_WAKE |
+ IRQCHIP_MASK_ON_SUSPEND,
+};
+
+static int aplic_direct_irqdomain_translate(struct irq_domain *d, struct irq_fwspec *fwspec,
+ unsigned long *hwirq, unsigned int *type)
+{
+ struct aplic_priv *priv = d->host_data;
+
+ return aplic_irqdomain_translate(fwspec, priv->gsi_base, hwirq, type);
+}
+
+static int aplic_direct_irqdomain_alloc(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs, void *arg)
+{
+ struct aplic_priv *priv = domain->host_data;
+ struct aplic_direct *direct = container_of(priv, struct aplic_direct, priv);
+ struct irq_fwspec *fwspec = arg;
+ irq_hw_number_t hwirq;
+ unsigned int type;
+ int i, ret;
+
+ ret = aplic_irqdomain_translate(fwspec, priv->gsi_base, &hwirq, &type);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < nr_irqs; i++) {
+ irq_domain_set_info(domain, virq + i, hwirq + i, &aplic_direct_chip,
+ priv, handle_fasteoi_irq, NULL, NULL);
+ irq_set_affinity(virq + i, &direct->lmask);
+ }
+
+ return 0;
+}
+
+static const struct irq_domain_ops aplic_direct_irqdomain_ops = {
+ .translate = aplic_direct_irqdomain_translate,
+ .alloc = aplic_direct_irqdomain_alloc,
+ .free = irq_domain_free_irqs_top,
+};
+
+/*
+ * To handle an APLIC direct interrupts, we just read the CLAIMI register
+ * which will return highest priority pending interrupt and clear the
+ * pending bit of the interrupt. This process is repeated until CLAIMI
+ * register return zero value.
+ */
+static void aplic_direct_handle_irq(struct irq_desc *desc)
+{
+ struct aplic_idc *idc = this_cpu_ptr(&aplic_idcs);
+ struct irq_domain *irqdomain = idc->direct->irqdomain;
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ irq_hw_number_t hw_irq;
+ int irq;
+
+ chained_irq_enter(chip, desc);
+
+ while ((hw_irq = readl(idc->regs + APLIC_IDC_CLAIMI))) {
+ hw_irq = hw_irq >> APLIC_IDC_TOPI_ID_SHIFT;
+ irq = irq_find_mapping(irqdomain, hw_irq);
+
+ if (unlikely(irq <= 0)) {
+ dev_warn_ratelimited(idc->direct->priv.dev,
+ "hw_irq %lu mapping not found\n", hw_irq);
+ } else {
+ generic_handle_irq(irq);
+ }
+ }
+
+ chained_irq_exit(chip, desc);
+}
+
+static void aplic_idc_set_delivery(struct aplic_idc *idc, bool en)
+{
+ u32 de = (en) ? APLIC_ENABLE_IDELIVERY : APLIC_DISABLE_IDELIVERY;
+ u32 th = (en) ? APLIC_ENABLE_ITHRESHOLD : APLIC_DISABLE_ITHRESHOLD;
+
+ /* Priority must be less than threshold for interrupt triggering */
+ writel(th, idc->regs + APLIC_IDC_ITHRESHOLD);
+
+ /* Delivery must be set to 1 for interrupt triggering */
+ writel(de, idc->regs + APLIC_IDC_IDELIVERY);
+}
+
+static int aplic_direct_dying_cpu(unsigned int cpu)
+{
+ if (aplic_direct_parent_irq)
+ disable_percpu_irq(aplic_direct_parent_irq);
+
+ return 0;
+}
+
+static int aplic_direct_starting_cpu(unsigned int cpu)
+{
+ if (aplic_direct_parent_irq) {
+ enable_percpu_irq(aplic_direct_parent_irq,
+ irq_get_trigger_type(aplic_direct_parent_irq));
+ }
+
+ return 0;
+}
+
+static int aplic_direct_parse_parent_hwirq(struct device *dev, u32 index,
+ u32 *parent_hwirq, unsigned long *parent_hartid,
+ struct aplic_priv *priv)
+{
+ struct of_phandle_args parent;
+ unsigned long hartid;
+ int rc;
+
+ if (!is_of_node(dev->fwnode)) {
+ hartid = acpi_rintc_ext_parent_to_hartid(priv->acpi_aplic_id, index);
+ if (hartid == INVALID_HARTID)
+ return -ENODEV;
+
+ *parent_hartid = hartid;
+ *parent_hwirq = RV_IRQ_EXT;
+ return 0;
+ }
+
+ rc = of_irq_parse_one(to_of_node(dev->fwnode), index, &parent);
+ if (rc)
+ return rc;
+
+ rc = riscv_of_parent_hartid(parent.np, parent_hartid);
+ if (rc)
+ return rc;
+
+ *parent_hwirq = parent.args[0];
+ return 0;
+}
+
+int aplic_direct_setup(struct device *dev, void __iomem *regs)
+{
+ int i, j, rc, cpu, current_cpu, setup_count = 0;
+ struct aplic_direct *direct;
+ struct irq_domain *domain;
+ struct aplic_priv *priv;
+ struct aplic_idc *idc;
+ unsigned long hartid;
+ u32 v, hwirq;
+
+ direct = devm_kzalloc(dev, sizeof(*direct), GFP_KERNEL);
+ if (!direct)
+ return -ENOMEM;
+ priv = &direct->priv;
+
+ rc = aplic_setup_priv(priv, dev, regs);
+ if (rc) {
+ dev_err(dev, "failed to create APLIC context\n");
+ return rc;
+ }
+
+ /* Setup per-CPU IDC and target CPU mask */
+ current_cpu = get_cpu();
+ for (i = 0; i < priv->nr_idcs; i++) {
+ rc = aplic_direct_parse_parent_hwirq(dev, i, &hwirq, &hartid, priv);
+ if (rc) {
+ dev_warn(dev, "parent irq for IDC%d not found\n", i);
+ continue;
+ }
+
+ /*
+ * Skip interrupts other than external interrupts for
+ * current privilege level.
+ */
+ if (hwirq != RV_IRQ_EXT)
+ continue;
+
+ cpu = riscv_hartid_to_cpuid(hartid);
+ if (cpu < 0) {
+ dev_warn(dev, "invalid cpuid for IDC%d\n", i);
+ continue;
+ }
+
+ cpumask_set_cpu(cpu, &direct->lmask);
+
+ idc = per_cpu_ptr(&aplic_idcs, cpu);
+ idc->hart_index = i;
+ idc->regs = priv->regs + APLIC_IDC_BASE + i * APLIC_IDC_SIZE;
+ idc->direct = direct;
+
+ aplic_idc_set_delivery(idc, true);
+
+ /*
+ * Boot cpu might not have APLIC hart_index = 0 so check
+ * and update target registers of all interrupts.
+ */
+ if (cpu == current_cpu && idc->hart_index) {
+ v = FIELD_PREP(APLIC_TARGET_HART_IDX, idc->hart_index);
+ v |= FIELD_PREP(APLIC_TARGET_IPRIO, APLIC_DEFAULT_PRIORITY);
+ for (j = 1; j <= priv->nr_irqs; j++)
+ writel(v, priv->regs + APLIC_TARGET_BASE + (j - 1) * sizeof(u32));
+ }
+
+ setup_count++;
+ }
+ put_cpu();
+
+ /* Find parent domain and register chained handler */
+ domain = irq_find_matching_fwnode(riscv_get_intc_hwnode(),
+ DOMAIN_BUS_ANY);
+ if (!aplic_direct_parent_irq && domain) {
+ aplic_direct_parent_irq = irq_create_mapping(domain, RV_IRQ_EXT);
+ if (aplic_direct_parent_irq) {
+ irq_set_chained_handler(aplic_direct_parent_irq,
+ aplic_direct_handle_irq);
+
+ /*
+ * Setup CPUHP notifier to enable parent
+ * interrupt on all CPUs
+ */
+ cpuhp_setup_state(CPUHP_AP_ONLINE_DYN,
+ "irqchip/riscv/aplic:starting",
+ aplic_direct_starting_cpu,
+ aplic_direct_dying_cpu);
+ }
+ }
+
+ /* Fail if we were not able to setup IDC for any CPU */
+ if (!setup_count)
+ return -ENODEV;
+
+ /* Setup global config and interrupt delivery */
+ aplic_init_hw_global(priv, false);
+
+ /* Create irq domain instance for the APLIC */
+ direct->irqdomain = irq_domain_create_linear(dev->fwnode, priv->nr_irqs + 1,
+ &aplic_direct_irqdomain_ops, priv);
+ if (!direct->irqdomain) {
+ dev_err(dev, "failed to create direct irq domain\n");
+ return -ENOMEM;
+ }
+
+ /* Advertise the interrupt controller */
+ dev_info(dev, "%d interrupts directly connected to %d CPUs\n",
+ priv->nr_irqs, priv->nr_idcs);
+
+ return 0;
+}
diff --git a/drivers/irqchip/irq-riscv-aplic-main.c b/drivers/irqchip/irq-riscv-aplic-main.c
new file mode 100644
index 000000000000..93e7c51f944a
--- /dev/null
+++ b/drivers/irqchip/irq-riscv-aplic-main.c
@@ -0,0 +1,234 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2021 Western Digital Corporation or its affiliates.
+ * Copyright (C) 2022 Ventana Micro Systems Inc.
+ */
+
+#include <linux/acpi.h>
+#include <linux/bitfield.h>
+#include <linux/irqchip/riscv-aplic.h>
+#include <linux/irqchip/riscv-imsic.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/platform_device.h>
+#include <linux/printk.h>
+
+#include "irq-riscv-aplic-main.h"
+
+void aplic_irq_unmask(struct irq_data *d)
+{
+ struct aplic_priv *priv = irq_data_get_irq_chip_data(d);
+
+ writel(d->hwirq, priv->regs + APLIC_SETIENUM);
+}
+
+void aplic_irq_mask(struct irq_data *d)
+{
+ struct aplic_priv *priv = irq_data_get_irq_chip_data(d);
+
+ writel(d->hwirq, priv->regs + APLIC_CLRIENUM);
+}
+
+int aplic_irq_set_type(struct irq_data *d, unsigned int type)
+{
+ struct aplic_priv *priv = irq_data_get_irq_chip_data(d);
+ void __iomem *sourcecfg;
+ u32 val = 0;
+
+ switch (type) {
+ case IRQ_TYPE_NONE:
+ val = APLIC_SOURCECFG_SM_INACTIVE;
+ break;
+ case IRQ_TYPE_LEVEL_LOW:
+ val = APLIC_SOURCECFG_SM_LEVEL_LOW;
+ break;
+ case IRQ_TYPE_LEVEL_HIGH:
+ val = APLIC_SOURCECFG_SM_LEVEL_HIGH;
+ break;
+ case IRQ_TYPE_EDGE_FALLING:
+ val = APLIC_SOURCECFG_SM_EDGE_FALL;
+ break;
+ case IRQ_TYPE_EDGE_RISING:
+ val = APLIC_SOURCECFG_SM_EDGE_RISE;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ sourcecfg = priv->regs + APLIC_SOURCECFG_BASE;
+ sourcecfg += (d->hwirq - 1) * sizeof(u32);
+ writel(val, sourcecfg);
+
+ return 0;
+}
+
+int aplic_irqdomain_translate(struct irq_fwspec *fwspec, u32 gsi_base,
+ unsigned long *hwirq, unsigned int *type)
+{
+ if (WARN_ON(fwspec->param_count < 2))
+ return -EINVAL;
+ if (WARN_ON(!fwspec->param[0]))
+ return -EINVAL;
+
+ /* For DT, gsi_base is always zero. */
+ *hwirq = fwspec->param[0] - gsi_base;
+ *type = fwspec->param[1] & IRQ_TYPE_SENSE_MASK;
+
+ WARN_ON(*type == IRQ_TYPE_NONE);
+
+ return 0;
+}
+
+void aplic_init_hw_global(struct aplic_priv *priv, bool msi_mode)
+{
+ u32 val;
+#ifdef CONFIG_RISCV_M_MODE
+ u32 valh;
+
+ if (msi_mode) {
+ val = lower_32_bits(priv->msicfg.base_ppn);
+ valh = FIELD_PREP(APLIC_xMSICFGADDRH_BAPPN, upper_32_bits(priv->msicfg.base_ppn));
+ valh |= FIELD_PREP(APLIC_xMSICFGADDRH_LHXW, priv->msicfg.lhxw);
+ valh |= FIELD_PREP(APLIC_xMSICFGADDRH_HHXW, priv->msicfg.hhxw);
+ valh |= FIELD_PREP(APLIC_xMSICFGADDRH_LHXS, priv->msicfg.lhxs);
+ valh |= FIELD_PREP(APLIC_xMSICFGADDRH_HHXS, priv->msicfg.hhxs);
+ writel(val, priv->regs + APLIC_xMSICFGADDR);
+ writel(valh, priv->regs + APLIC_xMSICFGADDRH);
+ }
+#endif
+
+ /* Setup APLIC domaincfg register */
+ val = readl(priv->regs + APLIC_DOMAINCFG);
+ val |= APLIC_DOMAINCFG_IE;
+ if (msi_mode)
+ val |= APLIC_DOMAINCFG_DM;
+ writel(val, priv->regs + APLIC_DOMAINCFG);
+ if (readl(priv->regs + APLIC_DOMAINCFG) != val)
+ dev_warn(priv->dev, "unable to write 0x%x in domaincfg\n", val);
+}
+
+static void aplic_init_hw_irqs(struct aplic_priv *priv)
+{
+ int i;
+
+ /* Disable all interrupts */
+ for (i = 0; i <= priv->nr_irqs; i += 32)
+ writel(-1U, priv->regs + APLIC_CLRIE_BASE + (i / 32) * sizeof(u32));
+
+ /* Set interrupt type and default priority for all interrupts */
+ for (i = 1; i <= priv->nr_irqs; i++) {
+ writel(0, priv->regs + APLIC_SOURCECFG_BASE + (i - 1) * sizeof(u32));
+ writel(APLIC_DEFAULT_PRIORITY,
+ priv->regs + APLIC_TARGET_BASE + (i - 1) * sizeof(u32));
+ }
+
+ /* Clear APLIC domaincfg */
+ writel(0, priv->regs + APLIC_DOMAINCFG);
+}
+
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id aplic_acpi_match[] = {
+ { "RSCV0002", 0 },
+ {}
+};
+MODULE_DEVICE_TABLE(acpi, aplic_acpi_match);
+
+#endif
+
+int aplic_setup_priv(struct aplic_priv *priv, struct device *dev, void __iomem *regs)
+{
+ struct device_node *np = to_of_node(dev->fwnode);
+ struct of_phandle_args parent;
+ int rc;
+
+ /* Save device pointer and register base */
+ priv->dev = dev;
+ priv->regs = regs;
+
+ if (np) {
+ /* Find out number of interrupt sources */
+ rc = of_property_read_u32(np, "riscv,num-sources", &priv->nr_irqs);
+ if (rc) {
+ dev_err(dev, "failed to get number of interrupt sources\n");
+ return rc;
+ }
+
+ /*
+ * Find out number of IDCs based on parent interrupts
+ *
+ * If "msi-parent" property is present then we ignore the
+ * APLIC IDCs which forces the APLIC driver to use MSI mode.
+ */
+ if (!of_property_present(np, "msi-parent")) {
+ while (!of_irq_parse_one(np, priv->nr_idcs, &parent))
+ priv->nr_idcs++;
+ }
+ } else {
+ rc = riscv_acpi_get_gsi_info(dev->fwnode, &priv->gsi_base, &priv->acpi_aplic_id,
+ &priv->nr_irqs, &priv->nr_idcs);
+ if (rc) {
+ dev_err(dev, "failed to find GSI mapping\n");
+ return rc;
+ }
+ }
+
+ /* Setup initial state APLIC interrupts */
+ aplic_init_hw_irqs(priv);
+
+ return 0;
+}
+
+static int aplic_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ bool msi_mode = false;
+ void __iomem *regs;
+ int rc;
+
+ /* Map the MMIO registers */
+ regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(regs)) {
+ dev_err(dev, "failed map MMIO registers\n");
+ return PTR_ERR(regs);
+ }
+
+ /*
+ * If msi-parent property is present then setup APLIC MSI
+ * mode otherwise setup APLIC direct mode.
+ */
+ if (is_of_node(dev->fwnode))
+ msi_mode = of_property_present(to_of_node(dev->fwnode), "msi-parent");
+ else
+ msi_mode = imsic_acpi_get_fwnode(NULL) ? 1 : 0;
+
+ if (msi_mode)
+ rc = aplic_msi_setup(dev, regs);
+ else
+ rc = aplic_direct_setup(dev, regs);
+ if (rc)
+ dev_err_probe(dev, rc, "failed to setup APLIC in %s mode\n",
+ msi_mode ? "MSI" : "direct");
+
+#ifdef CONFIG_ACPI
+ if (!acpi_disabled)
+ acpi_dev_clear_dependencies(ACPI_COMPANION(dev));
+#endif
+
+ return rc;
+}
+
+static const struct of_device_id aplic_match[] = {
+ { .compatible = "riscv,aplic" },
+ {}
+};
+
+static struct platform_driver aplic_driver = {
+ .driver = {
+ .name = "riscv-aplic",
+ .of_match_table = aplic_match,
+ .acpi_match_table = ACPI_PTR(aplic_acpi_match),
+ },
+ .probe = aplic_probe,
+};
+builtin_platform_driver(aplic_driver);
diff --git a/drivers/irqchip/irq-riscv-aplic-main.h b/drivers/irqchip/irq-riscv-aplic-main.h
new file mode 100644
index 000000000000..b0ad8cde69b1
--- /dev/null
+++ b/drivers/irqchip/irq-riscv-aplic-main.h
@@ -0,0 +1,53 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2021 Western Digital Corporation or its affiliates.
+ * Copyright (C) 2022 Ventana Micro Systems Inc.
+ */
+
+#ifndef _IRQ_RISCV_APLIC_MAIN_H
+#define _IRQ_RISCV_APLIC_MAIN_H
+
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/fwnode.h>
+
+#define APLIC_DEFAULT_PRIORITY 1
+
+struct aplic_msicfg {
+ phys_addr_t base_ppn;
+ u32 hhxs;
+ u32 hhxw;
+ u32 lhxs;
+ u32 lhxw;
+};
+
+struct aplic_priv {
+ struct device *dev;
+ u32 gsi_base;
+ u32 nr_irqs;
+ u32 nr_idcs;
+ u32 acpi_aplic_id;
+ void __iomem *regs;
+ struct aplic_msicfg msicfg;
+};
+
+void aplic_irq_unmask(struct irq_data *d);
+void aplic_irq_mask(struct irq_data *d);
+int aplic_irq_set_type(struct irq_data *d, unsigned int type);
+int aplic_irqdomain_translate(struct irq_fwspec *fwspec, u32 gsi_base,
+ unsigned long *hwirq, unsigned int *type);
+void aplic_init_hw_global(struct aplic_priv *priv, bool msi_mode);
+int aplic_setup_priv(struct aplic_priv *priv, struct device *dev, void __iomem *regs);
+int aplic_direct_setup(struct device *dev, void __iomem *regs);
+#ifdef CONFIG_RISCV_APLIC_MSI
+int aplic_msi_setup(struct device *dev, void __iomem *regs);
+#else
+static inline int aplic_msi_setup(struct device *dev, void __iomem *regs)
+{
+ return -ENODEV;
+}
+#endif
+
+#endif
diff --git a/drivers/irqchip/irq-riscv-aplic-msi.c b/drivers/irqchip/irq-riscv-aplic-msi.c
new file mode 100644
index 000000000000..fb8d1838609f
--- /dev/null
+++ b/drivers/irqchip/irq-riscv-aplic-msi.c
@@ -0,0 +1,285 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2021 Western Digital Corporation or its affiliates.
+ * Copyright (C) 2022 Ventana Micro Systems Inc.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/bitops.h>
+#include <linux/cpu.h>
+#include <linux/interrupt.h>
+#include <linux/irqchip.h>
+#include <linux/irqchip/riscv-aplic.h>
+#include <linux/irqchip/riscv-imsic.h>
+#include <linux/module.h>
+#include <linux/msi.h>
+#include <linux/of_irq.h>
+#include <linux/platform_device.h>
+#include <linux/printk.h>
+#include <linux/smp.h>
+
+#include "irq-riscv-aplic-main.h"
+
+static void aplic_msi_irq_mask(struct irq_data *d)
+{
+ aplic_irq_mask(d);
+ irq_chip_mask_parent(d);
+}
+
+static void aplic_msi_irq_unmask(struct irq_data *d)
+{
+ irq_chip_unmask_parent(d);
+ aplic_irq_unmask(d);
+}
+
+static void aplic_msi_irq_retrigger_level(struct irq_data *d)
+{
+ struct aplic_priv *priv = irq_data_get_irq_chip_data(d);
+
+ switch (irqd_get_trigger_type(d)) {
+ case IRQ_TYPE_LEVEL_LOW:
+ case IRQ_TYPE_LEVEL_HIGH:
+ /*
+ * The section "4.9.2 Special consideration for level-sensitive interrupt
+ * sources" of the RISC-V AIA specification says:
+ *
+ * A second option is for the interrupt service routine to write the
+ * APLIC’s source identity number for the interrupt to the domain’s
+ * setipnum register just before exiting. This will cause the interrupt’s
+ * pending bit to be set to one again if the source is still asserting
+ * an interrupt, but not if the source is not asserting an interrupt.
+ */
+ writel(d->hwirq, priv->regs + APLIC_SETIPNUM_LE);
+ break;
+ }
+}
+
+static void aplic_msi_irq_eoi(struct irq_data *d)
+{
+ /*
+ * EOI handling is required only for level-triggered interrupts
+ * when APLIC is in MSI mode.
+ */
+ aplic_msi_irq_retrigger_level(d);
+}
+
+static int aplic_msi_irq_set_type(struct irq_data *d, unsigned int type)
+{
+ int rc = aplic_irq_set_type(d, type);
+
+ if (rc)
+ return rc;
+ /*
+ * Updating sourcecfg register for level-triggered interrupts
+ * requires interrupt retriggering when APLIC is in MSI mode.
+ */
+ aplic_msi_irq_retrigger_level(d);
+ return 0;
+}
+
+static void aplic_msi_write_msg(struct irq_data *d, struct msi_msg *msg)
+{
+ unsigned int group_index, hart_index, guest_index, val;
+ struct aplic_priv *priv = irq_data_get_irq_chip_data(d);
+ struct aplic_msicfg *mc = &priv->msicfg;
+ phys_addr_t tppn, tbppn, msg_addr;
+ void __iomem *target;
+
+ /* For zeroed MSI, simply write zero into the target register */
+ if (!msg->address_hi && !msg->address_lo && !msg->data) {
+ target = priv->regs + APLIC_TARGET_BASE;
+ target += (d->hwirq - 1) * sizeof(u32);
+ writel(0, target);
+ return;
+ }
+
+ /* Sanity check on message data */
+ WARN_ON(msg->data > APLIC_TARGET_EIID_MASK);
+
+ /* Compute target MSI address */
+ msg_addr = (((u64)msg->address_hi) << 32) | msg->address_lo;
+ tppn = msg_addr >> APLIC_xMSICFGADDR_PPN_SHIFT;
+
+ /* Compute target HART Base PPN */
+ tbppn = tppn;
+ tbppn &= ~APLIC_xMSICFGADDR_PPN_HART(mc->lhxs);
+ tbppn &= ~APLIC_xMSICFGADDR_PPN_LHX(mc->lhxw, mc->lhxs);
+ tbppn &= ~APLIC_xMSICFGADDR_PPN_HHX(mc->hhxw, mc->hhxs);
+ WARN_ON(tbppn != mc->base_ppn);
+
+ /* Compute target group and hart indexes */
+ group_index = (tppn >> APLIC_xMSICFGADDR_PPN_HHX_SHIFT(mc->hhxs)) &
+ APLIC_xMSICFGADDR_PPN_HHX_MASK(mc->hhxw);
+ hart_index = (tppn >> APLIC_xMSICFGADDR_PPN_LHX_SHIFT(mc->lhxs)) &
+ APLIC_xMSICFGADDR_PPN_LHX_MASK(mc->lhxw);
+ hart_index |= (group_index << mc->lhxw);
+ WARN_ON(hart_index > APLIC_TARGET_HART_IDX_MASK);
+
+ /* Compute target guest index */
+ guest_index = tppn & APLIC_xMSICFGADDR_PPN_HART(mc->lhxs);
+ WARN_ON(guest_index > APLIC_TARGET_GUEST_IDX_MASK);
+
+ /* Update IRQ TARGET register */
+ target = priv->regs + APLIC_TARGET_BASE;
+ target += (d->hwirq - 1) * sizeof(u32);
+ val = FIELD_PREP(APLIC_TARGET_HART_IDX, hart_index);
+ val |= FIELD_PREP(APLIC_TARGET_GUEST_IDX, guest_index);
+ val |= FIELD_PREP(APLIC_TARGET_EIID, msg->data);
+ writel(val, target);
+}
+
+static void aplic_msi_set_desc(msi_alloc_info_t *arg, struct msi_desc *desc)
+{
+ arg->desc = desc;
+ arg->hwirq = (u32)desc->data.icookie.value;
+}
+
+static int aplic_msi_translate(struct irq_domain *d, struct irq_fwspec *fwspec,
+ unsigned long *hwirq, unsigned int *type)
+{
+ struct msi_domain_info *info = d->host_data;
+ struct aplic_priv *priv = info->data;
+
+ return aplic_irqdomain_translate(fwspec, priv->gsi_base, hwirq, type);
+}
+
+static const struct msi_domain_template aplic_msi_template = {
+ .chip = {
+ .name = "APLIC-MSI",
+ .irq_mask = aplic_msi_irq_mask,
+ .irq_unmask = aplic_msi_irq_unmask,
+ .irq_set_type = aplic_msi_irq_set_type,
+ .irq_eoi = aplic_msi_irq_eoi,
+#ifdef CONFIG_SMP
+ .irq_set_affinity = irq_chip_set_affinity_parent,
+#endif
+ .irq_write_msi_msg = aplic_msi_write_msg,
+ .flags = IRQCHIP_SET_TYPE_MASKED |
+ IRQCHIP_SKIP_SET_WAKE |
+ IRQCHIP_MASK_ON_SUSPEND,
+ },
+
+ .ops = {
+ .set_desc = aplic_msi_set_desc,
+ .msi_translate = aplic_msi_translate,
+ },
+
+ .info = {
+ .bus_token = DOMAIN_BUS_WIRED_TO_MSI,
+ .flags = MSI_FLAG_USE_DEV_FWNODE,
+ .handler = handle_fasteoi_irq,
+ .handler_name = "fasteoi",
+ },
+};
+
+int aplic_msi_setup(struct device *dev, void __iomem *regs)
+{
+ const struct imsic_global_config *imsic_global;
+ struct irq_domain *msi_domain;
+ struct aplic_priv *priv;
+ struct aplic_msicfg *mc;
+ phys_addr_t pa;
+ int rc;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ rc = aplic_setup_priv(priv, dev, regs);
+ if (rc) {
+ dev_err(dev, "failed to create APLIC context\n");
+ return rc;
+ }
+ mc = &priv->msicfg;
+
+ /*
+ * The APLIC outgoing MSI config registers assume target MSI
+ * controller to be RISC-V AIA IMSIC controller.
+ */
+ imsic_global = imsic_get_global_config();
+ if (!imsic_global) {
+ dev_err(dev, "IMSIC global config not found\n");
+ return -ENODEV;
+ }
+
+ /* Find number of guest index bits (LHXS) */
+ mc->lhxs = imsic_global->guest_index_bits;
+ if (APLIC_xMSICFGADDRH_LHXS_MASK < mc->lhxs) {
+ dev_err(dev, "IMSIC guest index bits big for APLIC LHXS\n");
+ return -EINVAL;
+ }
+
+ /* Find number of HART index bits (LHXW) */
+ mc->lhxw = imsic_global->hart_index_bits;
+ if (APLIC_xMSICFGADDRH_LHXW_MASK < mc->lhxw) {
+ dev_err(dev, "IMSIC hart index bits big for APLIC LHXW\n");
+ return -EINVAL;
+ }
+
+ /* Find number of group index bits (HHXW) */
+ mc->hhxw = imsic_global->group_index_bits;
+ if (APLIC_xMSICFGADDRH_HHXW_MASK < mc->hhxw) {
+ dev_err(dev, "IMSIC group index bits big for APLIC HHXW\n");
+ return -EINVAL;
+ }
+
+ /* Find first bit position of group index (HHXS) */
+ mc->hhxs = imsic_global->group_index_shift;
+ if (mc->hhxs < (2 * APLIC_xMSICFGADDR_PPN_SHIFT)) {
+ dev_err(dev, "IMSIC group index shift should be >= %d\n",
+ (2 * APLIC_xMSICFGADDR_PPN_SHIFT));
+ return -EINVAL;
+ }
+ mc->hhxs -= (2 * APLIC_xMSICFGADDR_PPN_SHIFT);
+ if (APLIC_xMSICFGADDRH_HHXS_MASK < mc->hhxs) {
+ dev_err(dev, "IMSIC group index shift big for APLIC HHXS\n");
+ return -EINVAL;
+ }
+
+ /* Compute PPN base */
+ mc->base_ppn = imsic_global->base_addr >> APLIC_xMSICFGADDR_PPN_SHIFT;
+ mc->base_ppn &= ~APLIC_xMSICFGADDR_PPN_HART(mc->lhxs);
+ mc->base_ppn &= ~APLIC_xMSICFGADDR_PPN_LHX(mc->lhxw, mc->lhxs);
+ mc->base_ppn &= ~APLIC_xMSICFGADDR_PPN_HHX(mc->hhxw, mc->hhxs);
+
+ /* Setup global config and interrupt delivery */
+ aplic_init_hw_global(priv, true);
+
+ /* Set the APLIC device MSI domain if not available */
+ if (!dev_get_msi_domain(dev)) {
+ /*
+ * The device MSI domain for OF devices is only set at the
+ * time of populating/creating OF device. If the device MSI
+ * domain is discovered later after the OF device is created
+ * then we need to set it explicitly before using any platform
+ * MSI functions.
+ *
+ * In case of APLIC device, the parent MSI domain is always
+ * IMSIC and the IMSIC MSI domains are created later through
+ * the platform driver probing so we set it explicitly here.
+ */
+ if (is_of_node(dev->fwnode)) {
+ of_msi_configure(dev, to_of_node(dev->fwnode));
+ } else {
+ msi_domain = irq_find_matching_fwnode(imsic_acpi_get_fwnode(dev),
+ DOMAIN_BUS_PLATFORM_MSI);
+ if (msi_domain)
+ dev_set_msi_domain(dev, msi_domain);
+ }
+
+ if (!dev_get_msi_domain(dev))
+ return -EPROBE_DEFER;
+ }
+
+ if (!msi_create_device_irq_domain(dev, MSI_DEFAULT_DOMAIN, &aplic_msi_template,
+ priv->nr_irqs + 1, priv, priv)) {
+ dev_err(dev, "failed to create MSI irq domain\n");
+ return -ENOMEM;
+ }
+
+ /* Advertise the interrupt controller */
+ pa = priv->msicfg.base_ppn << APLIC_xMSICFGADDR_PPN_SHIFT;
+ dev_info(dev, "%d interrupts forwarded to MSI base %pa\n", priv->nr_irqs, &pa);
+
+ return 0;
+}
diff --git a/drivers/irqchip/irq-riscv-imsic-early.c b/drivers/irqchip/irq-riscv-imsic-early.c
new file mode 100644
index 000000000000..275df5005705
--- /dev/null
+++ b/drivers/irqchip/irq-riscv-imsic-early.c
@@ -0,0 +1,263 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2021 Western Digital Corporation or its affiliates.
+ * Copyright (C) 2022 Ventana Micro Systems Inc.
+ */
+
+#define pr_fmt(fmt) "riscv-imsic: " fmt
+#include <linux/acpi.h>
+#include <linux/cpu.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/irqchip.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/irqchip/riscv-imsic.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/spinlock.h>
+#include <linux/smp.h>
+
+#include "irq-riscv-imsic-state.h"
+
+static int imsic_parent_irq;
+
+#ifdef CONFIG_SMP
+static void imsic_ipi_send(unsigned int cpu)
+{
+ struct imsic_local_config *local = per_cpu_ptr(imsic->global.local, cpu);
+
+ writel(IMSIC_IPI_ID, local->msi_va);
+}
+
+static void imsic_ipi_starting_cpu(void)
+{
+ /* Enable IPIs for current CPU. */
+ __imsic_id_set_enable(IMSIC_IPI_ID);
+}
+
+static void imsic_ipi_dying_cpu(void)
+{
+ /* Disable IPIs for current CPU. */
+ __imsic_id_clear_enable(IMSIC_IPI_ID);
+}
+
+static int __init imsic_ipi_domain_init(void)
+{
+ int virq;
+
+ /* Create IMSIC IPI multiplexing */
+ virq = ipi_mux_create(IMSIC_NR_IPI, imsic_ipi_send);
+ if (virq <= 0)
+ return virq < 0 ? virq : -ENOMEM;
+
+ /* Set vIRQ range */
+ riscv_ipi_set_virq_range(virq, IMSIC_NR_IPI);
+
+ /* Announce that IMSIC is providing IPIs */
+ pr_info("%pfwP: providing IPIs using interrupt %d\n", imsic->fwnode, IMSIC_IPI_ID);
+
+ return 0;
+}
+#else
+static void imsic_ipi_starting_cpu(void) { }
+static void imsic_ipi_dying_cpu(void) { }
+static int __init imsic_ipi_domain_init(void) { return 0; }
+#endif
+
+/*
+ * To handle an interrupt, we read the TOPEI CSR and write zero in one
+ * instruction. If TOPEI CSR is non-zero then we translate TOPEI.ID to
+ * Linux interrupt number and let Linux IRQ subsystem handle it.
+ */
+static void imsic_handle_irq(struct irq_desc *desc)
+{
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ int err, cpu = smp_processor_id();
+ struct imsic_vector *vec;
+ unsigned long local_id;
+
+ chained_irq_enter(chip, desc);
+
+ while ((local_id = csr_swap(CSR_TOPEI, 0))) {
+ local_id >>= TOPEI_ID_SHIFT;
+
+ if (local_id == IMSIC_IPI_ID) {
+ if (IS_ENABLED(CONFIG_SMP))
+ ipi_mux_process();
+ continue;
+ }
+
+ if (unlikely(!imsic->base_domain))
+ continue;
+
+ vec = imsic_vector_from_local_id(cpu, local_id);
+ if (!vec) {
+ pr_warn_ratelimited("vector not found for local ID 0x%lx\n", local_id);
+ continue;
+ }
+
+ err = generic_handle_domain_irq(imsic->base_domain, vec->hwirq);
+ if (unlikely(err))
+ pr_warn_ratelimited("hwirq 0x%x mapping not found\n", vec->hwirq);
+ }
+
+ chained_irq_exit(chip, desc);
+}
+
+static int imsic_starting_cpu(unsigned int cpu)
+{
+ /* Mark per-CPU IMSIC state as online */
+ imsic_state_online();
+
+ /* Enable per-CPU parent interrupt */
+ enable_percpu_irq(imsic_parent_irq, irq_get_trigger_type(imsic_parent_irq));
+
+ /* Setup IPIs */
+ imsic_ipi_starting_cpu();
+
+ /*
+ * Interrupts identities might have been enabled/disabled while
+ * this CPU was not running so sync-up local enable/disable state.
+ */
+ imsic_local_sync_all();
+
+ /* Enable local interrupt delivery */
+ imsic_local_delivery(true);
+
+ return 0;
+}
+
+static int imsic_dying_cpu(unsigned int cpu)
+{
+ /* Cleanup IPIs */
+ imsic_ipi_dying_cpu();
+
+ /* Mark per-CPU IMSIC state as offline */
+ imsic_state_offline();
+
+ return 0;
+}
+
+static int __init imsic_early_probe(struct fwnode_handle *fwnode)
+{
+ struct irq_domain *domain;
+ int rc;
+
+ /* Find parent domain and register chained handler */
+ domain = irq_find_matching_fwnode(riscv_get_intc_hwnode(), DOMAIN_BUS_ANY);
+ if (!domain) {
+ pr_err("%pfwP: Failed to find INTC domain\n", fwnode);
+ return -ENOENT;
+ }
+ imsic_parent_irq = irq_create_mapping(domain, RV_IRQ_EXT);
+ if (!imsic_parent_irq) {
+ pr_err("%pfwP: Failed to create INTC mapping\n", fwnode);
+ return -ENOENT;
+ }
+
+ /* Initialize IPI domain */
+ rc = imsic_ipi_domain_init();
+ if (rc) {
+ pr_err("%pfwP: Failed to initialize IPI domain\n", fwnode);
+ return rc;
+ }
+
+ /* Setup chained handler to the parent domain interrupt */
+ irq_set_chained_handler(imsic_parent_irq, imsic_handle_irq);
+
+ /*
+ * Setup cpuhp state (must be done after setting imsic_parent_irq)
+ *
+ * Don't disable per-CPU IMSIC file when CPU goes offline
+ * because this affects IPI and the masking/unmasking of
+ * virtual IPIs is done via generic IPI-Mux
+ */
+ cpuhp_setup_state(CPUHP_AP_IRQ_RISCV_IMSIC_STARTING, "irqchip/riscv/imsic:starting",
+ imsic_starting_cpu, imsic_dying_cpu);
+
+ return 0;
+}
+
+static int __init imsic_early_dt_init(struct device_node *node, struct device_node *parent)
+{
+ struct fwnode_handle *fwnode = &node->fwnode;
+ int rc;
+
+ /* Setup IMSIC state */
+ rc = imsic_setup_state(fwnode, NULL);
+ if (rc) {
+ pr_err("%pfwP: failed to setup state (error %d)\n", fwnode, rc);
+ return rc;
+ }
+
+ /* Do early setup of IPIs */
+ rc = imsic_early_probe(fwnode);
+ if (rc)
+ return rc;
+
+ /* Ensure that OF platform device gets probed */
+ of_node_clear_flag(node, OF_POPULATED);
+ return 0;
+}
+
+IRQCHIP_DECLARE(riscv_imsic, "riscv,imsics", imsic_early_dt_init);
+
+#ifdef CONFIG_ACPI
+
+static struct fwnode_handle *imsic_acpi_fwnode;
+
+struct fwnode_handle *imsic_acpi_get_fwnode(struct device *dev)
+{
+ return imsic_acpi_fwnode;
+}
+
+static int __init imsic_early_acpi_init(union acpi_subtable_headers *header,
+ const unsigned long end)
+{
+ struct acpi_madt_imsic *imsic = (struct acpi_madt_imsic *)header;
+ int rc;
+
+ imsic_acpi_fwnode = irq_domain_alloc_named_fwnode("imsic");
+ if (!imsic_acpi_fwnode) {
+ pr_err("unable to allocate IMSIC FW node\n");
+ return -ENOMEM;
+ }
+
+ /* Setup IMSIC state */
+ rc = imsic_setup_state(imsic_acpi_fwnode, imsic);
+ if (rc) {
+ pr_err("%pfwP: failed to setup state (error %d)\n", imsic_acpi_fwnode, rc);
+ return rc;
+ }
+
+ /* Do early setup of IMSIC state and IPIs */
+ rc = imsic_early_probe(imsic_acpi_fwnode);
+ if (rc) {
+ irq_domain_free_fwnode(imsic_acpi_fwnode);
+ imsic_acpi_fwnode = NULL;
+ return rc;
+ }
+
+ rc = imsic_platform_acpi_probe(imsic_acpi_fwnode);
+
+#ifdef CONFIG_PCI
+ if (!rc)
+ pci_msi_register_fwnode_provider(&imsic_acpi_get_fwnode);
+#endif
+
+ if (rc)
+ pr_err("%pfwP: failed to register IMSIC for MSI functionality (error %d)\n",
+ imsic_acpi_fwnode, rc);
+
+ /*
+ * Even if imsic_platform_acpi_probe() fails, the IPI part of IMSIC can
+ * continue to work. So, no need to return failure. This is similar to
+ * DT where IPI works but MSI probe fails for some reason.
+ */
+ return 0;
+}
+
+IRQCHIP_ACPI_DECLARE(riscv_imsic, ACPI_MADT_TYPE_IMSIC, NULL,
+ 1, imsic_early_acpi_init);
+#endif
diff --git a/drivers/irqchip/irq-riscv-imsic-platform.c b/drivers/irqchip/irq-riscv-imsic-platform.c
new file mode 100644
index 000000000000..c708780e8760
--- /dev/null
+++ b/drivers/irqchip/irq-riscv-imsic-platform.c
@@ -0,0 +1,395 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2021 Western Digital Corporation or its affiliates.
+ * Copyright (C) 2022 Ventana Micro Systems Inc.
+ */
+
+#define pr_fmt(fmt) "riscv-imsic: " fmt
+#include <linux/acpi.h>
+#include <linux/bitmap.h>
+#include <linux/cpu.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/irqchip.h>
+#include <linux/irqdomain.h>
+#include <linux/module.h>
+#include <linux/msi.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/spinlock.h>
+#include <linux/smp.h>
+
+#include "irq-riscv-imsic-state.h"
+
+static bool imsic_cpu_page_phys(unsigned int cpu, unsigned int guest_index,
+ phys_addr_t *out_msi_pa)
+{
+ struct imsic_global_config *global;
+ struct imsic_local_config *local;
+
+ global = &imsic->global;
+ local = per_cpu_ptr(global->local, cpu);
+
+ if (BIT(global->guest_index_bits) <= guest_index)
+ return false;
+
+ if (out_msi_pa)
+ *out_msi_pa = local->msi_pa + (guest_index * IMSIC_MMIO_PAGE_SZ);
+
+ return true;
+}
+
+static void imsic_irq_mask(struct irq_data *d)
+{
+ imsic_vector_mask(irq_data_get_irq_chip_data(d));
+}
+
+static void imsic_irq_unmask(struct irq_data *d)
+{
+ imsic_vector_unmask(irq_data_get_irq_chip_data(d));
+}
+
+static int imsic_irq_retrigger(struct irq_data *d)
+{
+ struct imsic_vector *vec = irq_data_get_irq_chip_data(d);
+ struct imsic_local_config *local;
+
+ if (WARN_ON(!vec))
+ return -ENOENT;
+
+ local = per_cpu_ptr(imsic->global.local, vec->cpu);
+ writel_relaxed(vec->local_id, local->msi_va);
+ return 0;
+}
+
+static void imsic_irq_compose_vector_msg(struct imsic_vector *vec, struct msi_msg *msg)
+{
+ phys_addr_t msi_addr;
+
+ if (WARN_ON(!vec))
+ return;
+
+ if (WARN_ON(!imsic_cpu_page_phys(vec->cpu, 0, &msi_addr)))
+ return;
+
+ msg->address_hi = upper_32_bits(msi_addr);
+ msg->address_lo = lower_32_bits(msi_addr);
+ msg->data = vec->local_id;
+}
+
+static void imsic_irq_compose_msg(struct irq_data *d, struct msi_msg *msg)
+{
+ imsic_irq_compose_vector_msg(irq_data_get_irq_chip_data(d), msg);
+}
+
+#ifdef CONFIG_SMP
+static void imsic_msi_update_msg(struct irq_data *d, struct imsic_vector *vec)
+{
+ struct msi_msg msg = { };
+
+ imsic_irq_compose_vector_msg(vec, &msg);
+ irq_data_get_irq_chip(d)->irq_write_msi_msg(d, &msg);
+}
+
+static int imsic_irq_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
+ bool force)
+{
+ struct imsic_vector *old_vec, *new_vec;
+ struct irq_data *pd = d->parent_data;
+
+ old_vec = irq_data_get_irq_chip_data(pd);
+ if (WARN_ON(!old_vec))
+ return -ENOENT;
+
+ /* If old vector cpu belongs to the target cpumask then do nothing */
+ if (cpumask_test_cpu(old_vec->cpu, mask_val))
+ return IRQ_SET_MASK_OK_DONE;
+
+ /* If move is already in-flight then return failure */
+ if (imsic_vector_get_move(old_vec))
+ return -EBUSY;
+
+ /* Get a new vector on the desired set of CPUs */
+ new_vec = imsic_vector_alloc(old_vec->hwirq, mask_val);
+ if (!new_vec)
+ return -ENOSPC;
+
+ /* Point device to the new vector */
+ imsic_msi_update_msg(d, new_vec);
+
+ /* Update irq descriptors with the new vector */
+ pd->chip_data = new_vec;
+
+ /* Update effective affinity of parent irq data */
+ irq_data_update_effective_affinity(pd, cpumask_of(new_vec->cpu));
+
+ /* Move state of the old vector to the new vector */
+ imsic_vector_move(old_vec, new_vec);
+
+ return IRQ_SET_MASK_OK_DONE;
+}
+#endif
+
+static struct irq_chip imsic_irq_base_chip = {
+ .name = "IMSIC",
+ .irq_mask = imsic_irq_mask,
+ .irq_unmask = imsic_irq_unmask,
+ .irq_retrigger = imsic_irq_retrigger,
+ .irq_compose_msi_msg = imsic_irq_compose_msg,
+ .flags = IRQCHIP_SKIP_SET_WAKE |
+ IRQCHIP_MASK_ON_SUSPEND,
+};
+
+static int imsic_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs, void *args)
+{
+ struct imsic_vector *vec;
+
+ /* Multi-MSI is not supported yet. */
+ if (nr_irqs > 1)
+ return -EOPNOTSUPP;
+
+ vec = imsic_vector_alloc(virq, cpu_online_mask);
+ if (!vec)
+ return -ENOSPC;
+
+ irq_domain_set_info(domain, virq, virq, &imsic_irq_base_chip, vec,
+ handle_simple_irq, NULL, NULL);
+ irq_set_noprobe(virq);
+ irq_set_affinity(virq, cpu_online_mask);
+ irq_data_update_effective_affinity(irq_get_irq_data(virq), cpumask_of(vec->cpu));
+
+ return 0;
+}
+
+static void imsic_irq_domain_free(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs)
+{
+ struct irq_data *d = irq_domain_get_irq_data(domain, virq);
+
+ imsic_vector_free(irq_data_get_irq_chip_data(d));
+ irq_domain_free_irqs_parent(domain, virq, nr_irqs);
+}
+
+static int imsic_irq_domain_select(struct irq_domain *domain, struct irq_fwspec *fwspec,
+ enum irq_domain_bus_token bus_token)
+{
+ const struct msi_parent_ops *ops = domain->msi_parent_ops;
+ u32 busmask = BIT(bus_token);
+
+ if (fwspec->fwnode != domain->fwnode || fwspec->param_count != 0)
+ return 0;
+
+ /* Handle pure domain searches */
+ if (bus_token == ops->bus_select_token)
+ return 1;
+
+ return !!(ops->bus_select_mask & busmask);
+}
+
+#ifdef CONFIG_GENERIC_IRQ_DEBUGFS
+static void imsic_irq_debug_show(struct seq_file *m, struct irq_domain *d,
+ struct irq_data *irqd, int ind)
+{
+ if (!irqd) {
+ imsic_vector_debug_show_summary(m, ind);
+ return;
+ }
+
+ imsic_vector_debug_show(m, irq_data_get_irq_chip_data(irqd), ind);
+}
+#endif
+
+static const struct irq_domain_ops imsic_base_domain_ops = {
+ .alloc = imsic_irq_domain_alloc,
+ .free = imsic_irq_domain_free,
+ .select = imsic_irq_domain_select,
+#ifdef CONFIG_GENERIC_IRQ_DEBUGFS
+ .debug_show = imsic_irq_debug_show,
+#endif
+};
+
+#ifdef CONFIG_RISCV_IMSIC_PCI
+
+static void imsic_pci_mask_irq(struct irq_data *d)
+{
+ pci_msi_mask_irq(d);
+ irq_chip_mask_parent(d);
+}
+
+static void imsic_pci_unmask_irq(struct irq_data *d)
+{
+ irq_chip_unmask_parent(d);
+ pci_msi_unmask_irq(d);
+}
+
+#define MATCH_PCI_MSI BIT(DOMAIN_BUS_PCI_MSI)
+
+#else
+
+#define MATCH_PCI_MSI 0
+
+#endif
+
+static bool imsic_init_dev_msi_info(struct device *dev,
+ struct irq_domain *domain,
+ struct irq_domain *real_parent,
+ struct msi_domain_info *info)
+{
+ const struct msi_parent_ops *pops = real_parent->msi_parent_ops;
+
+ /* MSI parent domain specific settings */
+ switch (real_parent->bus_token) {
+ case DOMAIN_BUS_NEXUS:
+ if (WARN_ON_ONCE(domain != real_parent))
+ return false;
+#ifdef CONFIG_SMP
+ info->chip->irq_set_affinity = imsic_irq_set_affinity;
+#endif
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ return false;
+ }
+
+ /* Is the target supported? */
+ switch (info->bus_token) {
+#ifdef CONFIG_RISCV_IMSIC_PCI
+ case DOMAIN_BUS_PCI_DEVICE_MSI:
+ case DOMAIN_BUS_PCI_DEVICE_MSIX:
+ info->chip->irq_mask = imsic_pci_mask_irq;
+ info->chip->irq_unmask = imsic_pci_unmask_irq;
+ break;
+#endif
+ case DOMAIN_BUS_DEVICE_MSI:
+ /*
+ * Per-device MSI should never have any MSI feature bits
+ * set. It's sole purpose is to create a dumb interrupt
+ * chip which has a device specific irq_write_msi_msg()
+ * callback.
+ */
+ if (WARN_ON_ONCE(info->flags))
+ return false;
+
+ /* Core managed MSI descriptors */
+ info->flags |= MSI_FLAG_ALLOC_SIMPLE_MSI_DESCS |
+ MSI_FLAG_FREE_MSI_DESCS;
+ break;
+ case DOMAIN_BUS_WIRED_TO_MSI:
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ return false;
+ }
+
+ /* Use hierarchial chip operations re-trigger */
+ info->chip->irq_retrigger = irq_chip_retrigger_hierarchy;
+
+ /*
+ * Mask out the domain specific MSI feature flags which are not
+ * supported by the real parent.
+ */
+ info->flags &= pops->supported_flags;
+
+ /* Enforce the required flags */
+ info->flags |= pops->required_flags;
+
+ return true;
+}
+
+#define MATCH_PLATFORM_MSI BIT(DOMAIN_BUS_PLATFORM_MSI)
+
+static const struct msi_parent_ops imsic_msi_parent_ops = {
+ .supported_flags = MSI_GENERIC_FLAGS_MASK |
+ MSI_FLAG_PCI_MSIX,
+ .required_flags = MSI_FLAG_USE_DEF_DOM_OPS |
+ MSI_FLAG_USE_DEF_CHIP_OPS,
+ .bus_select_token = DOMAIN_BUS_NEXUS,
+ .bus_select_mask = MATCH_PCI_MSI | MATCH_PLATFORM_MSI,
+ .init_dev_msi_info = imsic_init_dev_msi_info,
+};
+
+int imsic_irqdomain_init(void)
+{
+ struct imsic_global_config *global;
+
+ if (!imsic || !imsic->fwnode) {
+ pr_err("early driver not probed\n");
+ return -ENODEV;
+ }
+
+ if (imsic->base_domain) {
+ pr_err("%pfwP: irq domain already created\n", imsic->fwnode);
+ return -ENODEV;
+ }
+
+ /* Create Base IRQ domain */
+ imsic->base_domain = irq_domain_create_tree(imsic->fwnode,
+ &imsic_base_domain_ops, imsic);
+ if (!imsic->base_domain) {
+ pr_err("%pfwP: failed to create IMSIC base domain\n", imsic->fwnode);
+ return -ENOMEM;
+ }
+ imsic->base_domain->flags |= IRQ_DOMAIN_FLAG_MSI_PARENT;
+ imsic->base_domain->msi_parent_ops = &imsic_msi_parent_ops;
+
+ irq_domain_update_bus_token(imsic->base_domain, DOMAIN_BUS_NEXUS);
+
+ global = &imsic->global;
+ pr_info("%pfwP: hart-index-bits: %d, guest-index-bits: %d\n",
+ imsic->fwnode, global->hart_index_bits, global->guest_index_bits);
+ pr_info("%pfwP: group-index-bits: %d, group-index-shift: %d\n",
+ imsic->fwnode, global->group_index_bits, global->group_index_shift);
+ pr_info("%pfwP: per-CPU IDs %d at base address %pa\n",
+ imsic->fwnode, global->nr_ids, &global->base_addr);
+ pr_info("%pfwP: total %d interrupts available\n",
+ imsic->fwnode, num_possible_cpus() * (global->nr_ids - 1));
+
+ return 0;
+}
+
+static int imsic_platform_probe_common(struct fwnode_handle *fwnode)
+{
+ if (imsic && imsic->fwnode != fwnode) {
+ pr_err("%pfwP: fwnode mismatch\n", fwnode);
+ return -ENODEV;
+ }
+
+ return imsic_irqdomain_init();
+}
+
+static int imsic_platform_dt_probe(struct platform_device *pdev)
+{
+ return imsic_platform_probe_common(pdev->dev.fwnode);
+}
+
+#ifdef CONFIG_ACPI
+
+/*
+ * On ACPI based systems, PCI enumeration happens early during boot in
+ * acpi_scan_init(). PCI enumeration expects MSI domain setup before
+ * it calls pci_set_msi_domain(). Hence, unlike in DT where
+ * imsic-platform drive probe happens late during boot, ACPI based
+ * systems need to setup the MSI domain early.
+ */
+int imsic_platform_acpi_probe(struct fwnode_handle *fwnode)
+{
+ return imsic_platform_probe_common(fwnode);
+}
+
+#endif
+
+static const struct of_device_id imsic_platform_match[] = {
+ { .compatible = "riscv,imsics" },
+ {}
+};
+
+static struct platform_driver imsic_platform_driver = {
+ .driver = {
+ .name = "riscv-imsic",
+ .of_match_table = imsic_platform_match,
+ },
+ .probe = imsic_platform_dt_probe,
+};
+builtin_platform_driver(imsic_platform_driver);
diff --git a/drivers/irqchip/irq-riscv-imsic-state.c b/drivers/irqchip/irq-riscv-imsic-state.c
new file mode 100644
index 000000000000..b97e6cd89ed7
--- /dev/null
+++ b/drivers/irqchip/irq-riscv-imsic-state.c
@@ -0,0 +1,891 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2021 Western Digital Corporation or its affiliates.
+ * Copyright (C) 2022 Ventana Micro Systems Inc.
+ */
+
+#define pr_fmt(fmt) "riscv-imsic: " fmt
+#include <linux/acpi.h>
+#include <linux/cpu.h>
+#include <linux/bitmap.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/seq_file.h>
+#include <linux/spinlock.h>
+#include <linux/smp.h>
+#include <asm/hwcap.h>
+
+#include "irq-riscv-imsic-state.h"
+
+#define IMSIC_DISABLE_EIDELIVERY 0
+#define IMSIC_ENABLE_EIDELIVERY 1
+#define IMSIC_DISABLE_EITHRESHOLD 1
+#define IMSIC_ENABLE_EITHRESHOLD 0
+
+static inline void imsic_csr_write(unsigned long reg, unsigned long val)
+{
+ csr_write(CSR_ISELECT, reg);
+ csr_write(CSR_IREG, val);
+}
+
+static inline unsigned long imsic_csr_read(unsigned long reg)
+{
+ csr_write(CSR_ISELECT, reg);
+ return csr_read(CSR_IREG);
+}
+
+static inline unsigned long imsic_csr_read_clear(unsigned long reg, unsigned long val)
+{
+ csr_write(CSR_ISELECT, reg);
+ return csr_read_clear(CSR_IREG, val);
+}
+
+static inline void imsic_csr_set(unsigned long reg, unsigned long val)
+{
+ csr_write(CSR_ISELECT, reg);
+ csr_set(CSR_IREG, val);
+}
+
+static inline void imsic_csr_clear(unsigned long reg, unsigned long val)
+{
+ csr_write(CSR_ISELECT, reg);
+ csr_clear(CSR_IREG, val);
+}
+
+struct imsic_priv *imsic;
+
+const struct imsic_global_config *imsic_get_global_config(void)
+{
+ return imsic ? &imsic->global : NULL;
+}
+EXPORT_SYMBOL_GPL(imsic_get_global_config);
+
+static bool __imsic_eix_read_clear(unsigned long id, bool pend)
+{
+ unsigned long isel, imask;
+
+ isel = id / BITS_PER_LONG;
+ isel *= BITS_PER_LONG / IMSIC_EIPx_BITS;
+ isel += pend ? IMSIC_EIP0 : IMSIC_EIE0;
+ imask = BIT(id & (__riscv_xlen - 1));
+
+ return !!(imsic_csr_read_clear(isel, imask) & imask);
+}
+
+static inline bool __imsic_id_read_clear_enabled(unsigned long id)
+{
+ return __imsic_eix_read_clear(id, false);
+}
+
+static inline bool __imsic_id_read_clear_pending(unsigned long id)
+{
+ return __imsic_eix_read_clear(id, true);
+}
+
+void __imsic_eix_update(unsigned long base_id, unsigned long num_id, bool pend, bool val)
+{
+ unsigned long id = base_id, last_id = base_id + num_id;
+ unsigned long i, isel, ireg;
+
+ while (id < last_id) {
+ isel = id / BITS_PER_LONG;
+ isel *= BITS_PER_LONG / IMSIC_EIPx_BITS;
+ isel += pend ? IMSIC_EIP0 : IMSIC_EIE0;
+
+ /*
+ * Prepare the ID mask to be programmed in the
+ * IMSIC EIEx and EIPx registers. These registers
+ * are XLEN-wide and we must not touch IDs which
+ * are < base_id and >= (base_id + num_id).
+ */
+ ireg = 0;
+ for (i = id & (__riscv_xlen - 1); id < last_id && i < __riscv_xlen; i++) {
+ ireg |= BIT(i);
+ id++;
+ }
+
+ /*
+ * The IMSIC EIEx and EIPx registers are indirectly
+ * accessed via using ISELECT and IREG CSRs so we
+ * need to access these CSRs without getting preempted.
+ *
+ * All existing users of this function call this
+ * function with local IRQs disabled so we don't
+ * need to do anything special here.
+ */
+ if (val)
+ imsic_csr_set(isel, ireg);
+ else
+ imsic_csr_clear(isel, ireg);
+ }
+}
+
+static void __imsic_local_sync(struct imsic_local_priv *lpriv)
+{
+ struct imsic_local_config *mlocal;
+ struct imsic_vector *vec, *mvec;
+ int i;
+
+ lockdep_assert_held(&lpriv->lock);
+
+ for_each_set_bit(i, lpriv->dirty_bitmap, imsic->global.nr_ids + 1) {
+ if (!i || i == IMSIC_IPI_ID)
+ goto skip;
+ vec = &lpriv->vectors[i];
+
+ if (READ_ONCE(vec->enable))
+ __imsic_id_set_enable(i);
+ else
+ __imsic_id_clear_enable(i);
+
+ /*
+ * If the ID was being moved to a new ID on some other CPU
+ * then we can get a MSI during the movement so check the
+ * ID pending bit and re-trigger the new ID on other CPU
+ * using MMIO write.
+ */
+ mvec = READ_ONCE(vec->move);
+ WRITE_ONCE(vec->move, NULL);
+ if (mvec && mvec != vec) {
+ if (__imsic_id_read_clear_pending(i)) {
+ mlocal = per_cpu_ptr(imsic->global.local, mvec->cpu);
+ writel_relaxed(mvec->local_id, mlocal->msi_va);
+ }
+
+ imsic_vector_free(&lpriv->vectors[i]);
+ }
+
+skip:
+ bitmap_clear(lpriv->dirty_bitmap, i, 1);
+ }
+}
+
+void imsic_local_sync_all(void)
+{
+ struct imsic_local_priv *lpriv = this_cpu_ptr(imsic->lpriv);
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&lpriv->lock, flags);
+ bitmap_fill(lpriv->dirty_bitmap, imsic->global.nr_ids + 1);
+ __imsic_local_sync(lpriv);
+ raw_spin_unlock_irqrestore(&lpriv->lock, flags);
+}
+
+void imsic_local_delivery(bool enable)
+{
+ if (enable) {
+ imsic_csr_write(IMSIC_EITHRESHOLD, IMSIC_ENABLE_EITHRESHOLD);
+ imsic_csr_write(IMSIC_EIDELIVERY, IMSIC_ENABLE_EIDELIVERY);
+ return;
+ }
+
+ imsic_csr_write(IMSIC_EIDELIVERY, IMSIC_DISABLE_EIDELIVERY);
+ imsic_csr_write(IMSIC_EITHRESHOLD, IMSIC_DISABLE_EITHRESHOLD);
+}
+
+#ifdef CONFIG_SMP
+static void imsic_local_timer_callback(struct timer_list *timer)
+{
+ struct imsic_local_priv *lpriv = this_cpu_ptr(imsic->lpriv);
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&lpriv->lock, flags);
+ __imsic_local_sync(lpriv);
+ raw_spin_unlock_irqrestore(&lpriv->lock, flags);
+}
+
+static void __imsic_remote_sync(struct imsic_local_priv *lpriv, unsigned int cpu)
+{
+ lockdep_assert_held(&lpriv->lock);
+
+ /*
+ * The spinlock acquire/release semantics ensure that changes
+ * to vector enable, vector move and dirty bitmap are visible
+ * to the target CPU.
+ */
+
+ /*
+ * We schedule a timer on the target CPU if the target CPU is not
+ * same as the current CPU. An offline CPU will unconditionally
+ * synchronize IDs through imsic_starting_cpu() when the
+ * CPU is brought up.
+ */
+ if (cpu_online(cpu)) {
+ if (cpu == smp_processor_id()) {
+ __imsic_local_sync(lpriv);
+ return;
+ }
+
+ if (!timer_pending(&lpriv->timer)) {
+ lpriv->timer.expires = jiffies + 1;
+ add_timer_on(&lpriv->timer, cpu);
+ }
+ }
+}
+#else
+static void __imsic_remote_sync(struct imsic_local_priv *lpriv, unsigned int cpu)
+{
+ lockdep_assert_held(&lpriv->lock);
+ __imsic_local_sync(lpriv);
+}
+#endif
+
+void imsic_vector_mask(struct imsic_vector *vec)
+{
+ struct imsic_local_priv *lpriv;
+
+ lpriv = per_cpu_ptr(imsic->lpriv, vec->cpu);
+ if (WARN_ON_ONCE(&lpriv->vectors[vec->local_id] != vec))
+ return;
+
+ /*
+ * This function is called through Linux irq subsystem with
+ * irqs disabled so no need to save/restore irq flags.
+ */
+
+ raw_spin_lock(&lpriv->lock);
+
+ WRITE_ONCE(vec->enable, false);
+ bitmap_set(lpriv->dirty_bitmap, vec->local_id, 1);
+ __imsic_remote_sync(lpriv, vec->cpu);
+
+ raw_spin_unlock(&lpriv->lock);
+}
+
+void imsic_vector_unmask(struct imsic_vector *vec)
+{
+ struct imsic_local_priv *lpriv;
+
+ lpriv = per_cpu_ptr(imsic->lpriv, vec->cpu);
+ if (WARN_ON_ONCE(&lpriv->vectors[vec->local_id] != vec))
+ return;
+
+ /*
+ * This function is called through Linux irq subsystem with
+ * irqs disabled so no need to save/restore irq flags.
+ */
+
+ raw_spin_lock(&lpriv->lock);
+
+ WRITE_ONCE(vec->enable, true);
+ bitmap_set(lpriv->dirty_bitmap, vec->local_id, 1);
+ __imsic_remote_sync(lpriv, vec->cpu);
+
+ raw_spin_unlock(&lpriv->lock);
+}
+
+static bool imsic_vector_move_update(struct imsic_local_priv *lpriv, struct imsic_vector *vec,
+ bool new_enable, struct imsic_vector *new_move)
+{
+ unsigned long flags;
+ bool enabled;
+
+ raw_spin_lock_irqsave(&lpriv->lock, flags);
+
+ /* Update enable and move details */
+ enabled = READ_ONCE(vec->enable);
+ WRITE_ONCE(vec->enable, new_enable);
+ WRITE_ONCE(vec->move, new_move);
+
+ /* Mark the vector as dirty and synchronize */
+ bitmap_set(lpriv->dirty_bitmap, vec->local_id, 1);
+ __imsic_remote_sync(lpriv, vec->cpu);
+
+ raw_spin_unlock_irqrestore(&lpriv->lock, flags);
+
+ return enabled;
+}
+
+void imsic_vector_move(struct imsic_vector *old_vec, struct imsic_vector *new_vec)
+{
+ struct imsic_local_priv *old_lpriv, *new_lpriv;
+ bool enabled;
+
+ if (WARN_ON_ONCE(old_vec->cpu == new_vec->cpu))
+ return;
+
+ old_lpriv = per_cpu_ptr(imsic->lpriv, old_vec->cpu);
+ if (WARN_ON_ONCE(&old_lpriv->vectors[old_vec->local_id] != old_vec))
+ return;
+
+ new_lpriv = per_cpu_ptr(imsic->lpriv, new_vec->cpu);
+ if (WARN_ON_ONCE(&new_lpriv->vectors[new_vec->local_id] != new_vec))
+ return;
+
+ /*
+ * Move and re-trigger the new vector based on the pending
+ * state of the old vector because we might get a device
+ * interrupt on the old vector while device was being moved
+ * to the new vector.
+ */
+ enabled = imsic_vector_move_update(old_lpriv, old_vec, false, new_vec);
+ imsic_vector_move_update(new_lpriv, new_vec, enabled, new_vec);
+}
+
+#ifdef CONFIG_GENERIC_IRQ_DEBUGFS
+void imsic_vector_debug_show(struct seq_file *m, struct imsic_vector *vec, int ind)
+{
+ struct imsic_local_priv *lpriv;
+ struct imsic_vector *mvec;
+ bool is_enabled;
+
+ lpriv = per_cpu_ptr(imsic->lpriv, vec->cpu);
+ if (WARN_ON_ONCE(&lpriv->vectors[vec->local_id] != vec))
+ return;
+
+ is_enabled = imsic_vector_isenabled(vec);
+ mvec = imsic_vector_get_move(vec);
+
+ seq_printf(m, "%*starget_cpu : %5u\n", ind, "", vec->cpu);
+ seq_printf(m, "%*starget_local_id : %5u\n", ind, "", vec->local_id);
+ seq_printf(m, "%*sis_reserved : %5u\n", ind, "",
+ (vec->local_id <= IMSIC_IPI_ID) ? 1 : 0);
+ seq_printf(m, "%*sis_enabled : %5u\n", ind, "", is_enabled ? 1 : 0);
+ seq_printf(m, "%*sis_move_pending : %5u\n", ind, "", mvec ? 1 : 0);
+ if (mvec) {
+ seq_printf(m, "%*smove_cpu : %5u\n", ind, "", mvec->cpu);
+ seq_printf(m, "%*smove_local_id : %5u\n", ind, "", mvec->local_id);
+ }
+}
+
+void imsic_vector_debug_show_summary(struct seq_file *m, int ind)
+{
+ irq_matrix_debug_show(m, imsic->matrix, ind);
+}
+#endif
+
+struct imsic_vector *imsic_vector_from_local_id(unsigned int cpu, unsigned int local_id)
+{
+ struct imsic_local_priv *lpriv = per_cpu_ptr(imsic->lpriv, cpu);
+
+ if (!lpriv || imsic->global.nr_ids < local_id)
+ return NULL;
+
+ return &lpriv->vectors[local_id];
+}
+
+struct imsic_vector *imsic_vector_alloc(unsigned int hwirq, const struct cpumask *mask)
+{
+ struct imsic_vector *vec = NULL;
+ struct imsic_local_priv *lpriv;
+ unsigned long flags;
+ unsigned int cpu;
+ int local_id;
+
+ raw_spin_lock_irqsave(&imsic->matrix_lock, flags);
+ local_id = irq_matrix_alloc(imsic->matrix, mask, false, &cpu);
+ raw_spin_unlock_irqrestore(&imsic->matrix_lock, flags);
+ if (local_id < 0)
+ return NULL;
+
+ lpriv = per_cpu_ptr(imsic->lpriv, cpu);
+ vec = &lpriv->vectors[local_id];
+ vec->hwirq = hwirq;
+ vec->enable = false;
+ vec->move = NULL;
+
+ return vec;
+}
+
+void imsic_vector_free(struct imsic_vector *vec)
+{
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&imsic->matrix_lock, flags);
+ vec->hwirq = UINT_MAX;
+ irq_matrix_free(imsic->matrix, vec->cpu, vec->local_id, false);
+ raw_spin_unlock_irqrestore(&imsic->matrix_lock, flags);
+}
+
+static void __init imsic_local_cleanup(void)
+{
+ struct imsic_local_priv *lpriv;
+ int cpu;
+
+ for_each_possible_cpu(cpu) {
+ lpriv = per_cpu_ptr(imsic->lpriv, cpu);
+
+ bitmap_free(lpriv->dirty_bitmap);
+ kfree(lpriv->vectors);
+ }
+
+ free_percpu(imsic->lpriv);
+}
+
+static int __init imsic_local_init(void)
+{
+ struct imsic_global_config *global = &imsic->global;
+ struct imsic_local_priv *lpriv;
+ struct imsic_vector *vec;
+ int cpu, i;
+
+ /* Allocate per-CPU private state */
+ imsic->lpriv = alloc_percpu(typeof(*imsic->lpriv));
+ if (!imsic->lpriv)
+ return -ENOMEM;
+
+ /* Setup per-CPU private state */
+ for_each_possible_cpu(cpu) {
+ lpriv = per_cpu_ptr(imsic->lpriv, cpu);
+
+ raw_spin_lock_init(&lpriv->lock);
+
+ /* Allocate dirty bitmap */
+ lpriv->dirty_bitmap = bitmap_zalloc(global->nr_ids + 1, GFP_KERNEL);
+ if (!lpriv->dirty_bitmap)
+ goto fail_local_cleanup;
+
+#ifdef CONFIG_SMP
+ /* Setup lazy timer for synchronization */
+ timer_setup(&lpriv->timer, imsic_local_timer_callback, TIMER_PINNED);
+#endif
+
+ /* Allocate vector array */
+ lpriv->vectors = kcalloc(global->nr_ids + 1, sizeof(*lpriv->vectors),
+ GFP_KERNEL);
+ if (!lpriv->vectors)
+ goto fail_local_cleanup;
+
+ /* Setup vector array */
+ for (i = 0; i <= global->nr_ids; i++) {
+ vec = &lpriv->vectors[i];
+ vec->cpu = cpu;
+ vec->local_id = i;
+ vec->hwirq = UINT_MAX;
+ }
+ }
+
+ return 0;
+
+fail_local_cleanup:
+ imsic_local_cleanup();
+ return -ENOMEM;
+}
+
+void imsic_state_online(void)
+{
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&imsic->matrix_lock, flags);
+ irq_matrix_online(imsic->matrix);
+ raw_spin_unlock_irqrestore(&imsic->matrix_lock, flags);
+}
+
+void imsic_state_offline(void)
+{
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&imsic->matrix_lock, flags);
+ irq_matrix_offline(imsic->matrix);
+ raw_spin_unlock_irqrestore(&imsic->matrix_lock, flags);
+
+#ifdef CONFIG_SMP
+ struct imsic_local_priv *lpriv = this_cpu_ptr(imsic->lpriv);
+
+ raw_spin_lock_irqsave(&lpriv->lock, flags);
+ WARN_ON_ONCE(try_to_del_timer_sync(&lpriv->timer) < 0);
+ raw_spin_unlock_irqrestore(&lpriv->lock, flags);
+#endif
+}
+
+static int __init imsic_matrix_init(void)
+{
+ struct imsic_global_config *global = &imsic->global;
+
+ raw_spin_lock_init(&imsic->matrix_lock);
+ imsic->matrix = irq_alloc_matrix(global->nr_ids + 1,
+ 0, global->nr_ids + 1);
+ if (!imsic->matrix)
+ return -ENOMEM;
+
+ /* Reserve ID#0 because it is special and never implemented */
+ irq_matrix_assign_system(imsic->matrix, 0, false);
+
+ /* Reserve IPI ID because it is special and used internally */
+ irq_matrix_assign_system(imsic->matrix, IMSIC_IPI_ID, false);
+
+ return 0;
+}
+
+static int __init imsic_populate_global_dt(struct fwnode_handle *fwnode,
+ struct imsic_global_config *global,
+ u32 *nr_parent_irqs)
+{
+ int rc;
+
+ /* Find number of guest index bits in MSI address */
+ rc = of_property_read_u32(to_of_node(fwnode), "riscv,guest-index-bits",
+ &global->guest_index_bits);
+ if (rc)
+ global->guest_index_bits = 0;
+
+ /* Find number of HART index bits */
+ rc = of_property_read_u32(to_of_node(fwnode), "riscv,hart-index-bits",
+ &global->hart_index_bits);
+ if (rc) {
+ /* Assume default value */
+ global->hart_index_bits = __fls(*nr_parent_irqs);
+ if (BIT(global->hart_index_bits) < *nr_parent_irqs)
+ global->hart_index_bits++;
+ }
+
+ /* Find number of group index bits */
+ rc = of_property_read_u32(to_of_node(fwnode), "riscv,group-index-bits",
+ &global->group_index_bits);
+ if (rc)
+ global->group_index_bits = 0;
+
+ /*
+ * Find first bit position of group index.
+ * If not specified assumed the default APLIC-IMSIC configuration.
+ */
+ rc = of_property_read_u32(to_of_node(fwnode), "riscv,group-index-shift",
+ &global->group_index_shift);
+ if (rc)
+ global->group_index_shift = IMSIC_MMIO_PAGE_SHIFT * 2;
+
+ /* Find number of interrupt identities */
+ rc = of_property_read_u32(to_of_node(fwnode), "riscv,num-ids",
+ &global->nr_ids);
+ if (rc) {
+ pr_err("%pfwP: number of interrupt identities not found\n", fwnode);
+ return rc;
+ }
+
+ /* Find number of guest interrupt identities */
+ rc = of_property_read_u32(to_of_node(fwnode), "riscv,num-guest-ids",
+ &global->nr_guest_ids);
+ if (rc)
+ global->nr_guest_ids = global->nr_ids;
+
+ return 0;
+}
+
+static int __init imsic_populate_global_acpi(struct fwnode_handle *fwnode,
+ struct imsic_global_config *global,
+ u32 *nr_parent_irqs, void *opaque)
+{
+ struct acpi_madt_imsic *imsic = (struct acpi_madt_imsic *)opaque;
+
+ global->guest_index_bits = imsic->guest_index_bits;
+ global->hart_index_bits = imsic->hart_index_bits;
+ global->group_index_bits = imsic->group_index_bits;
+ global->group_index_shift = imsic->group_index_shift;
+ global->nr_ids = imsic->num_ids;
+ global->nr_guest_ids = imsic->num_guest_ids;
+ return 0;
+}
+
+static int __init imsic_get_parent_hartid(struct fwnode_handle *fwnode,
+ u32 index, unsigned long *hartid)
+{
+ struct of_phandle_args parent;
+ int rc;
+
+ if (!is_of_node(fwnode)) {
+ if (hartid)
+ *hartid = acpi_rintc_index_to_hartid(index);
+
+ if (!hartid || (*hartid == INVALID_HARTID))
+ return -EINVAL;
+
+ return 0;
+ }
+
+ rc = of_irq_parse_one(to_of_node(fwnode), index, &parent);
+ if (rc)
+ return rc;
+
+ /*
+ * Skip interrupts other than external interrupts for
+ * current privilege level.
+ */
+ if (parent.args[0] != RV_IRQ_EXT)
+ return -EINVAL;
+
+ return riscv_of_parent_hartid(parent.np, hartid);
+}
+
+static int __init imsic_get_mmio_resource(struct fwnode_handle *fwnode,
+ u32 index, struct resource *res)
+{
+ if (!is_of_node(fwnode))
+ return acpi_rintc_get_imsic_mmio_info(index, res);
+
+ return of_address_to_resource(to_of_node(fwnode), index, res);
+}
+
+static int __init imsic_parse_fwnode(struct fwnode_handle *fwnode,
+ struct imsic_global_config *global,
+ u32 *nr_parent_irqs,
+ u32 *nr_mmios,
+ void *opaque)
+{
+ unsigned long hartid;
+ struct resource res;
+ int rc;
+ u32 i;
+
+ *nr_parent_irqs = 0;
+ *nr_mmios = 0;
+
+ /* Find number of parent interrupts */
+ while (!imsic_get_parent_hartid(fwnode, *nr_parent_irqs, &hartid))
+ (*nr_parent_irqs)++;
+ if (!*nr_parent_irqs) {
+ pr_err("%pfwP: no parent irqs available\n", fwnode);
+ return -EINVAL;
+ }
+
+ if (is_of_node(fwnode))
+ rc = imsic_populate_global_dt(fwnode, global, nr_parent_irqs);
+ else
+ rc = imsic_populate_global_acpi(fwnode, global, nr_parent_irqs, opaque);
+
+ if (rc)
+ return rc;
+
+ /* Sanity check guest index bits */
+ i = BITS_PER_LONG - IMSIC_MMIO_PAGE_SHIFT;
+ if (i < global->guest_index_bits) {
+ pr_err("%pfwP: guest index bits too big\n", fwnode);
+ return -EINVAL;
+ }
+
+ /* Sanity check HART index bits */
+ i = BITS_PER_LONG - IMSIC_MMIO_PAGE_SHIFT - global->guest_index_bits;
+ if (i < global->hart_index_bits) {
+ pr_err("%pfwP: HART index bits too big\n", fwnode);
+ return -EINVAL;
+ }
+
+ /* Sanity check group index bits */
+ i = BITS_PER_LONG - IMSIC_MMIO_PAGE_SHIFT -
+ global->guest_index_bits - global->hart_index_bits;
+ if (i < global->group_index_bits) {
+ pr_err("%pfwP: group index bits too big\n", fwnode);
+ return -EINVAL;
+ }
+
+ /* Sanity check group index shift */
+ i = global->group_index_bits + global->group_index_shift - 1;
+ if (i >= BITS_PER_LONG) {
+ pr_err("%pfwP: group index shift too big\n", fwnode);
+ return -EINVAL;
+ }
+
+ /* Sanity check number of interrupt identities */
+ if (global->nr_ids < IMSIC_MIN_ID ||
+ global->nr_ids >= IMSIC_MAX_ID ||
+ (global->nr_ids & IMSIC_MIN_ID) != IMSIC_MIN_ID) {
+ pr_err("%pfwP: invalid number of interrupt identities\n", fwnode);
+ return -EINVAL;
+ }
+
+ /* Sanity check number of guest interrupt identities */
+ if (global->nr_guest_ids < IMSIC_MIN_ID ||
+ global->nr_guest_ids >= IMSIC_MAX_ID ||
+ (global->nr_guest_ids & IMSIC_MIN_ID) != IMSIC_MIN_ID) {
+ pr_err("%pfwP: invalid number of guest interrupt identities\n", fwnode);
+ return -EINVAL;
+ }
+
+ /* Compute base address */
+ rc = imsic_get_mmio_resource(fwnode, 0, &res);
+ if (rc) {
+ pr_err("%pfwP: first MMIO resource not found\n", fwnode);
+ return -EINVAL;
+ }
+ global->base_addr = res.start;
+ global->base_addr &= ~(BIT(global->guest_index_bits +
+ global->hart_index_bits +
+ IMSIC_MMIO_PAGE_SHIFT) - 1);
+ global->base_addr &= ~((BIT(global->group_index_bits) - 1) <<
+ global->group_index_shift);
+
+ /* Find number of MMIO register sets */
+ while (!imsic_get_mmio_resource(fwnode, *nr_mmios, &res))
+ (*nr_mmios)++;
+
+ return 0;
+}
+
+int __init imsic_setup_state(struct fwnode_handle *fwnode, void *opaque)
+{
+ u32 i, j, index, nr_parent_irqs, nr_mmios, nr_handlers = 0;
+ struct imsic_global_config *global;
+ struct imsic_local_config *local;
+ void __iomem **mmios_va = NULL;
+ struct resource *mmios = NULL;
+ unsigned long reloff, hartid;
+ phys_addr_t base_addr;
+ int rc, cpu;
+
+ /*
+ * Only one IMSIC instance allowed in a platform for clean
+ * implementation of SMP IRQ affinity and per-CPU IPIs.
+ *
+ * This means on a multi-socket (or multi-die) platform we
+ * will have multiple MMIO regions for one IMSIC instance.
+ */
+ if (imsic) {
+ pr_err("%pfwP: already initialized hence ignoring\n", fwnode);
+ return -EALREADY;
+ }
+
+ if (!riscv_isa_extension_available(NULL, SxAIA)) {
+ pr_err("%pfwP: AIA support not available\n", fwnode);
+ return -ENODEV;
+ }
+
+ imsic = kzalloc(sizeof(*imsic), GFP_KERNEL);
+ if (!imsic)
+ return -ENOMEM;
+ imsic->fwnode = fwnode;
+ global = &imsic->global;
+
+ global->local = alloc_percpu(typeof(*global->local));
+ if (!global->local) {
+ rc = -ENOMEM;
+ goto out_free_priv;
+ }
+
+ /* Parse IMSIC fwnode */
+ rc = imsic_parse_fwnode(fwnode, global, &nr_parent_irqs, &nr_mmios, opaque);
+ if (rc)
+ goto out_free_local;
+
+ /* Allocate MMIO resource array */
+ mmios = kcalloc(nr_mmios, sizeof(*mmios), GFP_KERNEL);
+ if (!mmios) {
+ rc = -ENOMEM;
+ goto out_free_local;
+ }
+
+ /* Allocate MMIO virtual address array */
+ mmios_va = kcalloc(nr_mmios, sizeof(*mmios_va), GFP_KERNEL);
+ if (!mmios_va) {
+ rc = -ENOMEM;
+ goto out_iounmap;
+ }
+
+ /* Parse and map MMIO register sets */
+ for (i = 0; i < nr_mmios; i++) {
+ rc = imsic_get_mmio_resource(fwnode, i, &mmios[i]);
+ if (rc) {
+ pr_err("%pfwP: unable to parse MMIO regset %d\n", fwnode, i);
+ goto out_iounmap;
+ }
+
+ base_addr = mmios[i].start;
+ base_addr &= ~(BIT(global->guest_index_bits +
+ global->hart_index_bits +
+ IMSIC_MMIO_PAGE_SHIFT) - 1);
+ base_addr &= ~((BIT(global->group_index_bits) - 1) <<
+ global->group_index_shift);
+ if (base_addr != global->base_addr) {
+ rc = -EINVAL;
+ pr_err("%pfwP: address mismatch for regset %d\n", fwnode, i);
+ goto out_iounmap;
+ }
+
+ mmios_va[i] = ioremap(mmios[i].start, resource_size(&mmios[i]));
+ if (!mmios_va[i]) {
+ rc = -EIO;
+ pr_err("%pfwP: unable to map MMIO regset %d\n", fwnode, i);
+ goto out_iounmap;
+ }
+ }
+
+ /* Initialize local (or per-CPU )state */
+ rc = imsic_local_init();
+ if (rc) {
+ pr_err("%pfwP: failed to initialize local state\n",
+ fwnode);
+ goto out_iounmap;
+ }
+
+ /* Configure handlers for target CPUs */
+ for (i = 0; i < nr_parent_irqs; i++) {
+ rc = imsic_get_parent_hartid(fwnode, i, &hartid);
+ if (rc) {
+ pr_warn("%pfwP: hart ID for parent irq%d not found\n", fwnode, i);
+ continue;
+ }
+
+ cpu = riscv_hartid_to_cpuid(hartid);
+ if (cpu < 0) {
+ pr_warn("%pfwP: invalid cpuid for parent irq%d\n", fwnode, i);
+ continue;
+ }
+
+ /* Find MMIO location of MSI page */
+ index = nr_mmios;
+ reloff = i * BIT(global->guest_index_bits) *
+ IMSIC_MMIO_PAGE_SZ;
+ for (j = 0; nr_mmios; j++) {
+ if (reloff < resource_size(&mmios[j])) {
+ index = j;
+ break;
+ }
+
+ /*
+ * MMIO region size may not be aligned to
+ * BIT(global->guest_index_bits) * IMSIC_MMIO_PAGE_SZ
+ * if holes are present.
+ */
+ reloff -= ALIGN(resource_size(&mmios[j]),
+ BIT(global->guest_index_bits) * IMSIC_MMIO_PAGE_SZ);
+ }
+ if (index >= nr_mmios) {
+ pr_warn("%pfwP: MMIO not found for parent irq%d\n", fwnode, i);
+ continue;
+ }
+
+ local = per_cpu_ptr(global->local, cpu);
+ local->msi_pa = mmios[index].start + reloff;
+ local->msi_va = mmios_va[index] + reloff;
+
+ nr_handlers++;
+ }
+
+ /* If no CPU handlers found then can't take interrupts */
+ if (!nr_handlers) {
+ pr_err("%pfwP: No CPU handlers found\n", fwnode);
+ rc = -ENODEV;
+ goto out_local_cleanup;
+ }
+
+ /* Initialize matrix allocator */
+ rc = imsic_matrix_init();
+ if (rc) {
+ pr_err("%pfwP: failed to create matrix allocator\n", fwnode);
+ goto out_local_cleanup;
+ }
+
+ /* We don't need MMIO arrays anymore so let's free-up */
+ kfree(mmios_va);
+ kfree(mmios);
+
+ return 0;
+
+out_local_cleanup:
+ imsic_local_cleanup();
+out_iounmap:
+ for (i = 0; i < nr_mmios; i++) {
+ if (mmios_va[i])
+ iounmap(mmios_va[i]);
+ }
+ kfree(mmios_va);
+ kfree(mmios);
+out_free_local:
+ free_percpu(imsic->global.local);
+out_free_priv:
+ kfree(imsic);
+ imsic = NULL;
+ return rc;
+}
diff --git a/drivers/irqchip/irq-riscv-imsic-state.h b/drivers/irqchip/irq-riscv-imsic-state.h
new file mode 100644
index 000000000000..391e44280827
--- /dev/null
+++ b/drivers/irqchip/irq-riscv-imsic-state.h
@@ -0,0 +1,108 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2021 Western Digital Corporation or its affiliates.
+ * Copyright (C) 2022 Ventana Micro Systems Inc.
+ */
+
+#ifndef _IRQ_RISCV_IMSIC_STATE_H
+#define _IRQ_RISCV_IMSIC_STATE_H
+
+#include <linux/irqchip/riscv-imsic.h>
+#include <linux/irqdomain.h>
+#include <linux/fwnode.h>
+#include <linux/timer.h>
+
+#define IMSIC_IPI_ID 1
+#define IMSIC_NR_IPI 8
+
+struct imsic_vector {
+ /* Fixed details of the vector */
+ unsigned int cpu;
+ unsigned int local_id;
+ /* Details saved by driver in the vector */
+ unsigned int hwirq;
+ /* Details accessed using local lock held */
+ bool enable;
+ struct imsic_vector *move;
+};
+
+struct imsic_local_priv {
+ /* Local lock to protect vector enable/move variables and dirty bitmap */
+ raw_spinlock_t lock;
+
+ /* Local dirty bitmap for synchronization */
+ unsigned long *dirty_bitmap;
+
+#ifdef CONFIG_SMP
+ /* Local timer for synchronization */
+ struct timer_list timer;
+#endif
+
+ /* Local vector table */
+ struct imsic_vector *vectors;
+};
+
+struct imsic_priv {
+ /* Device details */
+ struct fwnode_handle *fwnode;
+
+ /* Global configuration common for all HARTs */
+ struct imsic_global_config global;
+
+ /* Per-CPU state */
+ struct imsic_local_priv __percpu *lpriv;
+
+ /* State of IRQ matrix allocator */
+ raw_spinlock_t matrix_lock;
+ struct irq_matrix *matrix;
+
+ /* IRQ domains (created by platform driver) */
+ struct irq_domain *base_domain;
+};
+
+extern struct imsic_priv *imsic;
+
+void __imsic_eix_update(unsigned long base_id, unsigned long num_id, bool pend, bool val);
+
+static inline void __imsic_id_set_enable(unsigned long id)
+{
+ __imsic_eix_update(id, 1, false, true);
+}
+
+static inline void __imsic_id_clear_enable(unsigned long id)
+{
+ __imsic_eix_update(id, 1, false, false);
+}
+
+void imsic_local_sync_all(void);
+void imsic_local_delivery(bool enable);
+
+void imsic_vector_mask(struct imsic_vector *vec);
+void imsic_vector_unmask(struct imsic_vector *vec);
+
+static inline bool imsic_vector_isenabled(struct imsic_vector *vec)
+{
+ return READ_ONCE(vec->enable);
+}
+
+static inline struct imsic_vector *imsic_vector_get_move(struct imsic_vector *vec)
+{
+ return READ_ONCE(vec->move);
+}
+
+void imsic_vector_move(struct imsic_vector *old_vec, struct imsic_vector *new_vec);
+
+struct imsic_vector *imsic_vector_from_local_id(unsigned int cpu, unsigned int local_id);
+
+struct imsic_vector *imsic_vector_alloc(unsigned int hwirq, const struct cpumask *mask);
+void imsic_vector_free(struct imsic_vector *vector);
+
+void imsic_vector_debug_show(struct seq_file *m, struct imsic_vector *vec, int ind);
+void imsic_vector_debug_show_summary(struct seq_file *m, int ind);
+
+void imsic_state_online(void);
+void imsic_state_offline(void);
+int imsic_setup_state(struct fwnode_handle *fwnode, void *opaque);
+int imsic_irqdomain_init(void);
+
+#endif
diff --git a/drivers/irqchip/irq-riscv-intc.c b/drivers/irqchip/irq-riscv-intc.c
index e8d01b14ccdd..f653c13de62b 100644
--- a/drivers/irqchip/irq-riscv-intc.c
+++ b/drivers/irqchip/irq-riscv-intc.c
@@ -17,17 +17,29 @@
#include <linux/module.h>
#include <linux/of.h>
#include <linux/smp.h>
+#include <linux/soc/andes/irq.h>
+
+#include <asm/hwcap.h>
static struct irq_domain *intc_domain;
+static unsigned int riscv_intc_nr_irqs __ro_after_init = BITS_PER_LONG;
+static unsigned int riscv_intc_custom_base __ro_after_init = BITS_PER_LONG;
+static unsigned int riscv_intc_custom_nr_irqs __ro_after_init;
-static asmlinkage void riscv_intc_irq(struct pt_regs *regs)
+static void riscv_intc_irq(struct pt_regs *regs)
{
unsigned long cause = regs->cause & ~CAUSE_IRQ_FLAG;
- if (unlikely(cause >= BITS_PER_LONG))
- panic("unexpected interrupt cause");
+ if (generic_handle_domain_irq(intc_domain, cause))
+ pr_warn_ratelimited("Failed to handle interrupt (cause: %ld)\n", cause);
+}
- generic_handle_domain_irq(intc_domain, cause);
+static void riscv_intc_aia_irq(struct pt_regs *regs)
+{
+ unsigned long topi;
+
+ while ((topi = csr_read(CSR_TOPI)))
+ generic_handle_domain_irq(intc_domain, topi >> TOPI_IID_SHIFT);
}
/*
@@ -39,12 +51,43 @@ static asmlinkage void riscv_intc_irq(struct pt_regs *regs)
static void riscv_intc_irq_mask(struct irq_data *d)
{
- csr_clear(CSR_IE, BIT(d->hwirq));
+ if (IS_ENABLED(CONFIG_32BIT) && d->hwirq >= BITS_PER_LONG)
+ csr_clear(CSR_IEH, BIT(d->hwirq - BITS_PER_LONG));
+ else
+ csr_clear(CSR_IE, BIT(d->hwirq));
}
static void riscv_intc_irq_unmask(struct irq_data *d)
{
- csr_set(CSR_IE, BIT(d->hwirq));
+ if (IS_ENABLED(CONFIG_32BIT) && d->hwirq >= BITS_PER_LONG)
+ csr_set(CSR_IEH, BIT(d->hwirq - BITS_PER_LONG));
+ else
+ csr_set(CSR_IE, BIT(d->hwirq));
+}
+
+static void andes_intc_irq_mask(struct irq_data *d)
+{
+ /*
+ * Andes specific S-mode local interrupt causes (hwirq)
+ * are defined as (256 + n) and controlled by n-th bit
+ * of SLIE.
+ */
+ unsigned int mask = BIT(d->hwirq % BITS_PER_LONG);
+
+ if (d->hwirq < ANDES_SLI_CAUSE_BASE)
+ csr_clear(CSR_IE, mask);
+ else
+ csr_clear(ANDES_CSR_SLIE, mask);
+}
+
+static void andes_intc_irq_unmask(struct irq_data *d)
+{
+ unsigned int mask = BIT(d->hwirq % BITS_PER_LONG);
+
+ if (d->hwirq < ANDES_SLI_CAUSE_BASE)
+ csr_set(CSR_IE, mask);
+ else
+ csr_set(ANDES_CSR_SLIE, mask);
}
static void riscv_intc_irq_eoi(struct irq_data *d)
@@ -70,12 +113,21 @@ static struct irq_chip riscv_intc_chip = {
.irq_eoi = riscv_intc_irq_eoi,
};
+static struct irq_chip andes_intc_chip = {
+ .name = "RISC-V INTC",
+ .irq_mask = andes_intc_irq_mask,
+ .irq_unmask = andes_intc_irq_unmask,
+ .irq_eoi = riscv_intc_irq_eoi,
+};
+
static int riscv_intc_domain_map(struct irq_domain *d, unsigned int irq,
irq_hw_number_t hwirq)
{
+ struct irq_chip *chip = d->host_data;
+
irq_set_percpu_devid(irq);
- irq_domain_set_info(d, irq, hwirq, &riscv_intc_chip, d->host_data,
- handle_percpu_devid_irq, NULL, NULL);
+ irq_domain_set_info(d, irq, hwirq, chip, NULL, handle_percpu_devid_irq,
+ NULL, NULL);
return 0;
}
@@ -93,6 +145,15 @@ static int riscv_intc_domain_alloc(struct irq_domain *domain,
if (ret)
return ret;
+ /*
+ * Only allow hwirq for which we have corresponding standard or
+ * custom interrupt enable register.
+ */
+ if (hwirq >= riscv_intc_nr_irqs &&
+ (hwirq < riscv_intc_custom_base ||
+ hwirq >= riscv_intc_custom_base + riscv_intc_custom_nr_irqs))
+ return -EINVAL;
+
for (i = 0; i < nr_irqs; i++) {
ret = riscv_intc_domain_map(domain, virq + i, hwirq + i);
if (ret)
@@ -113,18 +174,22 @@ static struct fwnode_handle *riscv_intc_hwnode(void)
return intc_domain->fwnode;
}
-static int __init riscv_intc_init_common(struct fwnode_handle *fn)
+static int __init riscv_intc_init_common(struct fwnode_handle *fn, struct irq_chip *chip)
{
int rc;
- intc_domain = irq_domain_create_linear(fn, BITS_PER_LONG,
- &riscv_intc_domain_ops, NULL);
+ intc_domain = irq_domain_create_tree(fn, &riscv_intc_domain_ops, chip);
if (!intc_domain) {
pr_err("unable to add IRQ domain\n");
return -ENXIO;
}
- rc = set_handle_irq(&riscv_intc_irq);
+ if (riscv_isa_extension_available(NULL, SxAIA)) {
+ riscv_intc_nr_irqs = 64;
+ rc = set_handle_irq(&riscv_intc_aia_irq);
+ } else {
+ rc = set_handle_irq(&riscv_intc_irq);
+ }
if (rc) {
pr_err("failed to set irq handler\n");
return rc;
@@ -132,7 +197,11 @@ static int __init riscv_intc_init_common(struct fwnode_handle *fn)
riscv_set_intc_hwnode_fn(riscv_intc_hwnode);
- pr_info("%d local interrupts mapped\n", BITS_PER_LONG);
+ pr_info("%d local interrupts mapped%s\n",
+ riscv_intc_nr_irqs,
+ riscv_isa_extension_available(NULL, SxAIA) ? " using AIA" : "");
+ if (riscv_intc_custom_nr_irqs)
+ pr_info("%d custom local interrupts mapped\n", riscv_intc_custom_nr_irqs);
return 0;
}
@@ -140,8 +209,9 @@ static int __init riscv_intc_init_common(struct fwnode_handle *fn)
static int __init riscv_intc_init(struct device_node *node,
struct device_node *parent)
{
- int rc;
+ struct irq_chip *chip = &riscv_intc_chip;
unsigned long hartid;
+ int rc;
rc = riscv_of_parent_hartid(node, &hartid);
if (rc < 0) {
@@ -166,20 +236,133 @@ static int __init riscv_intc_init(struct device_node *node,
return 0;
}
- return riscv_intc_init_common(of_node_to_fwnode(node));
+ if (of_device_is_compatible(node, "andestech,cpu-intc")) {
+ riscv_intc_custom_base = ANDES_SLI_CAUSE_BASE;
+ riscv_intc_custom_nr_irqs = ANDES_RV_IRQ_LAST;
+ chip = &andes_intc_chip;
+ }
+
+ return riscv_intc_init_common(of_node_to_fwnode(node), chip);
}
IRQCHIP_DECLARE(riscv, "riscv,cpu-intc", riscv_intc_init);
+IRQCHIP_DECLARE(andes, "andestech,cpu-intc", riscv_intc_init);
#ifdef CONFIG_ACPI
+struct rintc_data {
+ union {
+ u32 ext_intc_id;
+ struct {
+ u32 context_id : 16,
+ reserved : 8,
+ aplic_plic_id : 8;
+ };
+ };
+ unsigned long hart_id;
+ u64 imsic_addr;
+ u32 imsic_size;
+};
+
+static u32 nr_rintc;
+static struct rintc_data **rintc_acpi_data;
+
+#define for_each_matching_plic(_plic_id) \
+ unsigned int _plic; \
+ \
+ for (_plic = 0; _plic < nr_rintc; _plic++) \
+ if (rintc_acpi_data[_plic]->aplic_plic_id != _plic_id) \
+ continue; \
+ else
+
+unsigned int acpi_rintc_get_plic_nr_contexts(unsigned int plic_id)
+{
+ unsigned int nctx = 0;
+
+ for_each_matching_plic(plic_id)
+ nctx++;
+
+ return nctx;
+}
+
+static struct rintc_data *get_plic_context(unsigned int plic_id, unsigned int ctxt_idx)
+{
+ unsigned int ctxt = 0;
+
+ for_each_matching_plic(plic_id) {
+ if (ctxt == ctxt_idx)
+ return rintc_acpi_data[_plic];
+
+ ctxt++;
+ }
+
+ return NULL;
+}
+
+unsigned long acpi_rintc_ext_parent_to_hartid(unsigned int plic_id, unsigned int ctxt_idx)
+{
+ struct rintc_data *data = get_plic_context(plic_id, ctxt_idx);
+
+ return data ? data->hart_id : INVALID_HARTID;
+}
+
+unsigned int acpi_rintc_get_plic_context(unsigned int plic_id, unsigned int ctxt_idx)
+{
+ struct rintc_data *data = get_plic_context(plic_id, ctxt_idx);
+
+ return data ? data->context_id : INVALID_CONTEXT;
+}
+
+unsigned long acpi_rintc_index_to_hartid(u32 index)
+{
+ return index >= nr_rintc ? INVALID_HARTID : rintc_acpi_data[index]->hart_id;
+}
+
+int acpi_rintc_get_imsic_mmio_info(u32 index, struct resource *res)
+{
+ if (index >= nr_rintc)
+ return -1;
+
+ res->start = rintc_acpi_data[index]->imsic_addr;
+ res->end = res->start + rintc_acpi_data[index]->imsic_size - 1;
+ res->flags = IORESOURCE_MEM;
+ return 0;
+}
+
+static int __init riscv_intc_acpi_match(union acpi_subtable_headers *header,
+ const unsigned long end)
+{
+ return 0;
+}
+
static int __init riscv_intc_acpi_init(union acpi_subtable_headers *header,
const unsigned long end)
{
- struct fwnode_handle *fn;
struct acpi_madt_rintc *rintc;
+ struct fwnode_handle *fn;
+ int count;
+ int rc;
+
+ if (!rintc_acpi_data) {
+ count = acpi_table_parse_madt(ACPI_MADT_TYPE_RINTC, riscv_intc_acpi_match, 0);
+ if (count <= 0)
+ return -EINVAL;
+
+ rintc_acpi_data = kcalloc(count, sizeof(*rintc_acpi_data), GFP_KERNEL);
+ if (!rintc_acpi_data)
+ return -ENOMEM;
+ }
rintc = (struct acpi_madt_rintc *)header;
+ rintc_acpi_data[nr_rintc] = kzalloc(sizeof(*rintc_acpi_data[0]), GFP_KERNEL);
+ if (!rintc_acpi_data[nr_rintc])
+ return -ENOMEM;
+
+ rintc_acpi_data[nr_rintc]->ext_intc_id = rintc->ext_intc_id;
+ rintc_acpi_data[nr_rintc]->hart_id = rintc->hart_id;
+ rintc_acpi_data[nr_rintc]->imsic_addr = rintc->imsic_addr;
+ rintc_acpi_data[nr_rintc]->imsic_size = rintc->imsic_size;
+ nr_rintc++;
/*
* The ACPI MADT will have one INTC for each CPU (or HART)
@@ -196,7 +379,13 @@ static int __init riscv_intc_acpi_init(union acpi_subtable_headers *header,
return -ENOMEM;
}
- return riscv_intc_init_common(fn);
+ rc = riscv_intc_init_common(fn, &riscv_intc_chip);
+ if (rc)
+ irq_domain_free_fwnode(fn);
+ else
+ acpi_set_irq_model(ACPI_IRQ_MODEL_RINTC, riscv_acpi_get_gsi_domain_id);
+
+ return rc;
}
IRQCHIP_ACPI_DECLARE(riscv_intc, ACPI_MADT_TYPE_RINTC, NULL,
diff --git a/drivers/irqchip/irq-sa11x0.c b/drivers/irqchip/irq-sa11x0.c
index 31c202a1ae62..9d0b80271949 100644
--- a/drivers/irqchip/irq-sa11x0.c
+++ b/drivers/irqchip/irq-sa11x0.c
@@ -127,8 +127,7 @@ static int __init sa1100irq_init_devicefs(void)
device_initcall(sa1100irq_init_devicefs);
-static asmlinkage void __exception_irq_entry
-sa1100_handle_irq(struct pt_regs *regs)
+static void __exception_irq_entry sa1100_handle_irq(struct pt_regs *regs)
{
uint32_t icip, icmr, mask;
diff --git a/drivers/irqchip/irq-sifive-plic.c b/drivers/irqchip/irq-sifive-plic.c
index bf0b40b0fad4..bf69a4802b71 100644
--- a/drivers/irqchip/irq-sifive-plic.c
+++ b/drivers/irqchip/irq-sifive-plic.c
@@ -3,7 +3,8 @@
* Copyright (C) 2017 SiFive
* Copyright (C) 2018 Christoph Hellwig
*/
-#define pr_fmt(fmt) "plic: " fmt
+#define pr_fmt(fmt) "riscv-plic: " fmt
+#include <linux/acpi.h>
#include <linux/cpu.h>
#include <linux/interrupt.h>
#include <linux/io.h>
@@ -64,12 +65,15 @@
#define PLIC_QUIRK_EDGE_INTERRUPT 0
struct plic_priv {
+ struct fwnode_handle *fwnode;
struct cpumask lmask;
struct irq_domain *irqdomain;
void __iomem *regs;
unsigned long plic_quirks;
unsigned int nr_irqs;
unsigned long *prio_save;
+ u32 gsi_base;
+ int acpi_plic_id;
};
struct plic_handler {
@@ -85,7 +89,7 @@ struct plic_handler {
struct plic_priv *priv;
};
static int plic_parent_irq __ro_after_init;
-static bool plic_cpuhp_setup_done __ro_after_init;
+static bool plic_global_setup_done __ro_after_init;
static DEFINE_PER_CPU(struct plic_handler, plic_handlers);
static int plic_irq_set_type(struct irq_data *d, unsigned int type);
@@ -103,9 +107,11 @@ static void __plic_toggle(void __iomem *enable_base, int hwirq, int enable)
static void plic_toggle(struct plic_handler *handler, int hwirq, int enable)
{
- raw_spin_lock(&handler->enable_lock);
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&handler->enable_lock, flags);
__plic_toggle(handler->enable_base, hwirq, enable);
- raw_spin_unlock(&handler->enable_lock);
+ raw_spin_unlock_irqrestore(&handler->enable_lock, flags);
}
static inline void plic_irq_toggle(const struct cpumask *mask,
@@ -120,16 +126,6 @@ static inline void plic_irq_toggle(const struct cpumask *mask,
}
}
-static void plic_irq_enable(struct irq_data *d)
-{
- plic_irq_toggle(irq_data_get_effective_affinity_mask(d), d, 1);
-}
-
-static void plic_irq_disable(struct irq_data *d)
-{
- plic_irq_toggle(irq_data_get_effective_affinity_mask(d), d, 0);
-}
-
static void plic_irq_unmask(struct irq_data *d)
{
struct plic_priv *priv = irq_data_get_irq_chip_data(d);
@@ -144,6 +140,17 @@ static void plic_irq_mask(struct irq_data *d)
writel(0, priv->regs + PRIORITY_BASE + d->hwirq * PRIORITY_PER_ID);
}
+static void plic_irq_enable(struct irq_data *d)
+{
+ plic_irq_toggle(irq_data_get_effective_affinity_mask(d), d, 1);
+ plic_irq_unmask(d);
+}
+
+static void plic_irq_disable(struct irq_data *d)
+{
+ plic_irq_toggle(irq_data_get_effective_affinity_mask(d), d, 0);
+}
+
static void plic_irq_eoi(struct irq_data *d)
{
struct plic_handler *handler = this_cpu_ptr(&plic_handlers);
@@ -162,15 +169,12 @@ static int plic_set_affinity(struct irq_data *d,
const struct cpumask *mask_val, bool force)
{
unsigned int cpu;
- struct cpumask amask;
struct plic_priv *priv = irq_data_get_irq_chip_data(d);
- cpumask_and(&amask, &priv->lmask, mask_val);
-
if (force)
- cpu = cpumask_first(&amask);
+ cpu = cpumask_first_and(&priv->lmask, mask_val);
else
- cpu = cpumask_any_and(&amask, cpu_online_mask);
+ cpu = cpumask_first_and_and(&priv->lmask, mask_val, cpu_online_mask);
if (cpu >= nr_cpu_ids)
return -EINVAL;
@@ -242,16 +246,16 @@ static int plic_irq_set_type(struct irq_data *d, unsigned int type)
static int plic_irq_suspend(void)
{
unsigned int i, cpu;
+ unsigned long flags;
u32 __iomem *reg;
struct plic_priv *priv;
priv = per_cpu_ptr(&plic_handlers, smp_processor_id())->priv;
- for (i = 0; i < priv->nr_irqs; i++)
- if (readl(priv->regs + PRIORITY_BASE + i * PRIORITY_PER_ID))
- __set_bit(i, priv->prio_save);
- else
- __clear_bit(i, priv->prio_save);
+ for (i = 0; i < priv->nr_irqs; i++) {
+ __assign_bit(i, priv->prio_save,
+ readl(priv->regs + PRIORITY_BASE + i * PRIORITY_PER_ID));
+ }
for_each_cpu(cpu, cpu_present_mask) {
struct plic_handler *handler = per_cpu_ptr(&plic_handlers, cpu);
@@ -259,12 +263,12 @@ static int plic_irq_suspend(void)
if (!handler->present)
continue;
- raw_spin_lock(&handler->enable_lock);
+ raw_spin_lock_irqsave(&handler->enable_lock, flags);
for (i = 0; i < DIV_ROUND_UP(priv->nr_irqs, 32); i++) {
reg = handler->enable_base + i * sizeof(u32);
handler->enable_save[i] = readl(reg);
}
- raw_spin_unlock(&handler->enable_lock);
+ raw_spin_unlock_irqrestore(&handler->enable_lock, flags);
}
return 0;
@@ -273,6 +277,7 @@ static int plic_irq_suspend(void)
static void plic_irq_resume(void)
{
unsigned int i, index, cpu;
+ unsigned long flags;
u32 __iomem *reg;
struct plic_priv *priv;
@@ -290,12 +295,12 @@ static void plic_irq_resume(void)
if (!handler->present)
continue;
- raw_spin_lock(&handler->enable_lock);
+ raw_spin_lock_irqsave(&handler->enable_lock, flags);
for (i = 0; i < DIV_ROUND_UP(priv->nr_irqs, 32); i++) {
reg = handler->enable_base + i * sizeof(u32);
writel(handler->enable_save[i], reg);
}
- raw_spin_unlock(&handler->enable_lock);
+ raw_spin_unlock_irqrestore(&handler->enable_lock, flags);
}
}
@@ -323,6 +328,10 @@ static int plic_irq_domain_translate(struct irq_domain *d,
{
struct plic_priv *priv = d->host_data;
+ /* For DT, gsi_base is always zero. */
+ if (fwspec->param[0] >= priv->gsi_base)
+ fwspec->param[0] = fwspec->param[0] - priv->gsi_base;
+
if (test_bit(PLIC_QUIRK_EDGE_INTERRUPT, &priv->plic_quirks))
return irq_domain_translate_twocell(d, fwspec, hwirq, type);
@@ -376,9 +385,10 @@ static void plic_handle_irq(struct irq_desc *desc)
while ((hwirq = readl(claim))) {
int err = generic_handle_domain_irq(handler->priv->irqdomain,
hwirq);
- if (unlikely(err))
- pr_warn_ratelimited("can't find mapping for hwirq %lu\n",
- hwirq);
+ if (unlikely(err)) {
+ pr_warn_ratelimited("%pfwP: can't find mapping for hwirq %lu\n",
+ handler->priv->fwnode, hwirq);
+ }
}
chained_irq_exit(chip, desc);
@@ -406,71 +416,176 @@ static int plic_starting_cpu(unsigned int cpu)
enable_percpu_irq(plic_parent_irq,
irq_get_trigger_type(plic_parent_irq));
else
- pr_warn("cpu%d: parent irq not available\n", cpu);
+ pr_warn("%pfwP: cpu%d: parent irq not available\n",
+ handler->priv->fwnode, cpu);
plic_set_threshold(handler, PLIC_ENABLE_THRESHOLD);
return 0;
}
-static int __init __plic_init(struct device_node *node,
- struct device_node *parent,
- unsigned long plic_quirks)
+static const struct of_device_id plic_match[] = {
+ { .compatible = "sifive,plic-1.0.0" },
+ { .compatible = "riscv,plic0" },
+ { .compatible = "andestech,nceplic100",
+ .data = (const void *)BIT(PLIC_QUIRK_EDGE_INTERRUPT) },
+ { .compatible = "thead,c900-plic",
+ .data = (const void *)BIT(PLIC_QUIRK_EDGE_INTERRUPT) },
+ {}
+};
+
+#ifdef CONFIG_ACPI
+
+static const struct acpi_device_id plic_acpi_match[] = {
+ { "RSCV0001", 0 },
+ {}
+};
+MODULE_DEVICE_TABLE(acpi, plic_acpi_match);
+
+#endif
+static int plic_parse_nr_irqs_and_contexts(struct fwnode_handle *fwnode,
+ u32 *nr_irqs, u32 *nr_contexts,
+ u32 *gsi_base, u32 *id)
{
- int error = 0, nr_contexts, nr_handlers = 0, i;
- u32 nr_irqs;
- struct plic_priv *priv;
- struct plic_handler *handler;
- unsigned int cpu;
+ int rc;
- priv = kzalloc(sizeof(*priv), GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
+ if (!is_of_node(fwnode)) {
+ rc = riscv_acpi_get_gsi_info(fwnode, gsi_base, id, nr_irqs, NULL);
+ if (rc) {
+ pr_err("%pfwP: failed to find GSI mapping\n", fwnode);
+ return rc;
+ }
- priv->plic_quirks = plic_quirks;
+ *nr_contexts = acpi_rintc_get_plic_nr_contexts(*id);
+ if (WARN_ON(!*nr_contexts)) {
+ pr_err("%pfwP: no PLIC context available\n", fwnode);
+ return -EINVAL;
+ }
- priv->regs = of_iomap(node, 0);
- if (WARN_ON(!priv->regs)) {
- error = -EIO;
- goto out_free_priv;
+ return 0;
}
- error = -EINVAL;
- of_property_read_u32(node, "riscv,ndev", &nr_irqs);
- if (WARN_ON(!nr_irqs))
- goto out_iounmap;
+ rc = of_property_read_u32(to_of_node(fwnode), "riscv,ndev", nr_irqs);
+ if (rc) {
+ pr_err("%pfwP: riscv,ndev property not available\n", fwnode);
+ return rc;
+ }
- priv->nr_irqs = nr_irqs;
+ *nr_contexts = of_irq_count(to_of_node(fwnode));
+ if (WARN_ON(!(*nr_contexts))) {
+ pr_err("%pfwP: no PLIC context available\n", fwnode);
+ return -EINVAL;
+ }
- priv->prio_save = bitmap_alloc(nr_irqs, GFP_KERNEL);
- if (!priv->prio_save)
- goto out_free_priority_reg;
+ *gsi_base = 0;
+ *id = 0;
- nr_contexts = of_irq_count(node);
- if (WARN_ON(!nr_contexts))
- goto out_free_priority_reg;
+ return 0;
+}
+
+static int plic_parse_context_parent(struct fwnode_handle *fwnode, u32 context,
+ u32 *parent_hwirq, int *parent_cpu, u32 id)
+{
+ struct of_phandle_args parent;
+ unsigned long hartid;
+ int rc;
+
+ if (!is_of_node(fwnode)) {
+ hartid = acpi_rintc_ext_parent_to_hartid(id, context);
+ if (hartid == INVALID_HARTID)
+ return -EINVAL;
+
+ *parent_cpu = riscv_hartid_to_cpuid(hartid);
+ *parent_hwirq = RV_IRQ_EXT;
+ return 0;
+ }
- error = -ENOMEM;
- priv->irqdomain = irq_domain_add_linear(node, nr_irqs + 1,
- &plic_irqdomain_ops, priv);
- if (WARN_ON(!priv->irqdomain))
- goto out_free_priority_reg;
+ rc = of_irq_parse_one(to_of_node(fwnode), context, &parent);
+ if (rc)
+ return rc;
- for (i = 0; i < nr_contexts; i++) {
- struct of_phandle_args parent;
- irq_hw_number_t hwirq;
- int cpu;
- unsigned long hartid;
+ rc = riscv_of_parent_hartid(parent.np, &hartid);
+ if (rc)
+ return rc;
- if (of_irq_parse_one(node, i, &parent)) {
- pr_err("failed to parse parent for context %d.\n", i);
+ *parent_hwirq = parent.args[0];
+ *parent_cpu = riscv_hartid_to_cpuid(hartid);
+ return 0;
+}
+
+static int plic_probe(struct fwnode_handle *fwnode)
+{
+ int error = 0, nr_contexts, nr_handlers = 0, cpu, i;
+ unsigned long plic_quirks = 0;
+ struct plic_handler *handler;
+ u32 nr_irqs, parent_hwirq;
+ struct plic_priv *priv;
+ irq_hw_number_t hwirq;
+ void __iomem *regs;
+ int id, context_id;
+ u32 gsi_base;
+
+ if (is_of_node(fwnode)) {
+ const struct of_device_id *id;
+
+ id = of_match_node(plic_match, to_of_node(fwnode));
+ if (id)
+ plic_quirks = (unsigned long)id->data;
+
+ regs = of_iomap(to_of_node(fwnode), 0);
+ if (!regs)
+ return -ENOMEM;
+ } else {
+ regs = devm_platform_ioremap_resource(to_platform_device(fwnode->dev), 0);
+ if (IS_ERR(regs))
+ return PTR_ERR(regs);
+ }
+
+ error = plic_parse_nr_irqs_and_contexts(fwnode, &nr_irqs, &nr_contexts, &gsi_base, &id);
+ if (error)
+ goto fail_free_regs;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv) {
+ error = -ENOMEM;
+ goto fail_free_regs;
+ }
+
+ priv->fwnode = fwnode;
+ priv->plic_quirks = plic_quirks;
+ priv->nr_irqs = nr_irqs;
+ priv->regs = regs;
+ priv->gsi_base = gsi_base;
+ priv->acpi_plic_id = id;
+
+ priv->prio_save = bitmap_zalloc(nr_irqs, GFP_KERNEL);
+ if (!priv->prio_save) {
+ error = -ENOMEM;
+ goto fail_free_priv;
+ }
+
+ for (i = 0; i < nr_contexts; i++) {
+ error = plic_parse_context_parent(fwnode, i, &parent_hwirq, &cpu,
+ priv->acpi_plic_id);
+ if (error) {
+ pr_warn("%pfwP: hwirq for context%d not found\n", fwnode, i);
continue;
}
+ if (is_of_node(fwnode)) {
+ context_id = i;
+ } else {
+ context_id = acpi_rintc_get_plic_context(priv->acpi_plic_id, i);
+ if (context_id == INVALID_CONTEXT) {
+ pr_warn("%pfwP: invalid context id for context%d\n", fwnode, i);
+ continue;
+ }
+ }
+
/*
* Skip contexts other than external interrupts for our
* privilege level.
*/
- if (parent.args[0] != RV_IRQ_EXT) {
+ if (parent_hwirq != RV_IRQ_EXT) {
/* Disable S-mode enable bits if running in M-mode. */
if (IS_ENABLED(CONFIG_RISCV_M_MODE)) {
void __iomem *enable_base = priv->regs +
@@ -483,26 +598,11 @@ static int __init __plic_init(struct device_node *node,
continue;
}
- error = riscv_of_parent_hartid(parent.np, &hartid);
- if (error < 0) {
- pr_warn("failed to parse hart ID for context %d.\n", i);
- continue;
- }
-
- cpu = riscv_hartid_to_cpuid(hartid);
if (cpu < 0) {
- pr_warn("Invalid cpuid for context %d\n", i);
+ pr_warn("%pfwP: Invalid cpuid for context %d\n", fwnode, i);
continue;
}
- /* Find parent domain and register chained handler */
- if (!plic_parent_irq && irq_find_host(parent.np)) {
- plic_parent_irq = irq_of_parse_and_map(node, i);
- if (plic_parent_irq)
- irq_set_chained_handler(plic_parent_irq,
- plic_handle_irq);
- }
-
/*
* When running in M-mode we need to ignore the S-mode handler.
* Here we assume it always comes later, but that might be a
@@ -510,7 +610,7 @@ static int __init __plic_init(struct device_node *node,
*/
handler = per_cpu_ptr(&plic_handlers, cpu);
if (handler->present) {
- pr_warn("handler already present for context %d.\n", i);
+ pr_warn("%pfwP: handler already present for context %d.\n", fwnode, i);
plic_set_threshold(handler, PLIC_DISABLE_THRESHOLD);
goto done;
}
@@ -518,16 +618,18 @@ static int __init __plic_init(struct device_node *node,
cpumask_set_cpu(cpu, &priv->lmask);
handler->present = true;
handler->hart_base = priv->regs + CONTEXT_BASE +
- i * CONTEXT_SIZE;
+ context_id * CONTEXT_SIZE;
raw_spin_lock_init(&handler->enable_lock);
handler->enable_base = priv->regs + CONTEXT_ENABLE_BASE +
- i * CONTEXT_ENABLE_SIZE;
+ context_id * CONTEXT_ENABLE_SIZE;
handler->priv = priv;
- handler->enable_save = kcalloc(DIV_ROUND_UP(nr_irqs, 32),
- sizeof(*handler->enable_save), GFP_KERNEL);
- if (!handler->enable_save)
- goto out_free_enable_reg;
+ handler->enable_save = kcalloc(DIV_ROUND_UP(nr_irqs, 32),
+ sizeof(*handler->enable_save), GFP_KERNEL);
+ if (!handler->enable_save) {
+ error = -ENOMEM;
+ goto fail_cleanup_contexts;
+ }
done:
for (hwirq = 1; hwirq <= nr_irqs; hwirq++) {
plic_toggle(handler, hwirq, 0);
@@ -537,52 +639,98 @@ done:
nr_handlers++;
}
+ priv->irqdomain = irq_domain_create_linear(fwnode, nr_irqs + 1,
+ &plic_irqdomain_ops, priv);
+ if (WARN_ON(!priv->irqdomain)) {
+ error = -ENOMEM;
+ goto fail_cleanup_contexts;
+ }
+
/*
- * We can have multiple PLIC instances so setup cpuhp state
- * and register syscore operations only when context handler
- * for current/boot CPU is present.
+ * We can have multiple PLIC instances so setup global state
+ * and register syscore operations only once after context
+ * handlers of all online CPUs are initialized.
*/
- handler = this_cpu_ptr(&plic_handlers);
- if (handler->present && !plic_cpuhp_setup_done) {
- cpuhp_setup_state(CPUHP_AP_IRQ_SIFIVE_PLIC_STARTING,
- "irqchip/sifive/plic:starting",
- plic_starting_cpu, plic_dying_cpu);
- register_syscore_ops(&plic_irq_syscore_ops);
- plic_cpuhp_setup_done = true;
+ if (!plic_global_setup_done) {
+ struct irq_domain *domain;
+ bool global_setup = true;
+
+ for_each_online_cpu(cpu) {
+ handler = per_cpu_ptr(&plic_handlers, cpu);
+ if (!handler->present) {
+ global_setup = false;
+ break;
+ }
+ }
+
+ if (global_setup) {
+ /* Find parent domain and register chained handler */
+ domain = irq_find_matching_fwnode(riscv_get_intc_hwnode(), DOMAIN_BUS_ANY);
+ if (domain)
+ plic_parent_irq = irq_create_mapping(domain, RV_IRQ_EXT);
+ if (plic_parent_irq)
+ irq_set_chained_handler(plic_parent_irq, plic_handle_irq);
+
+ cpuhp_setup_state(CPUHP_AP_IRQ_SIFIVE_PLIC_STARTING,
+ "irqchip/sifive/plic:starting",
+ plic_starting_cpu, plic_dying_cpu);
+ register_syscore_ops(&plic_irq_syscore_ops);
+ plic_global_setup_done = true;
+ }
}
- pr_info("%pOFP: mapped %d interrupts with %d handlers for"
- " %d contexts.\n", node, nr_irqs, nr_handlers, nr_contexts);
+#ifdef CONFIG_ACPI
+ if (!acpi_disabled)
+ acpi_dev_clear_dependencies(ACPI_COMPANION(fwnode->dev));
+#endif
+
+ pr_info("%pfwP: mapped %d interrupts with %d handlers for %d contexts.\n",
+ fwnode, nr_irqs, nr_handlers, nr_contexts);
return 0;
-out_free_enable_reg:
- for_each_cpu(cpu, cpu_present_mask) {
+fail_cleanup_contexts:
+ for (i = 0; i < nr_contexts; i++) {
+ if (plic_parse_context_parent(fwnode, i, &parent_hwirq, &cpu, priv->acpi_plic_id))
+ continue;
+ if (parent_hwirq != RV_IRQ_EXT || cpu < 0)
+ continue;
+
handler = per_cpu_ptr(&plic_handlers, cpu);
+ handler->present = false;
+ handler->hart_base = NULL;
+ handler->enable_base = NULL;
kfree(handler->enable_save);
+ handler->enable_save = NULL;
+ handler->priv = NULL;
}
-out_free_priority_reg:
- kfree(priv->prio_save);
-out_iounmap:
- iounmap(priv->regs);
-out_free_priv:
+ bitmap_free(priv->prio_save);
+fail_free_priv:
kfree(priv);
+fail_free_regs:
+ iounmap(regs);
return error;
}
-static int __init plic_init(struct device_node *node,
- struct device_node *parent)
+static int plic_platform_probe(struct platform_device *pdev)
{
- return __plic_init(node, parent, 0);
+ return plic_probe(pdev->dev.fwnode);
}
-IRQCHIP_DECLARE(sifive_plic, "sifive,plic-1.0.0", plic_init);
-IRQCHIP_DECLARE(riscv_plic0, "riscv,plic0", plic_init); /* for legacy systems */
+static struct platform_driver plic_driver = {
+ .driver = {
+ .name = "riscv-plic",
+ .of_match_table = plic_match,
+ .suppress_bind_attrs = true,
+ .acpi_match_table = ACPI_PTR(plic_acpi_match),
+ },
+ .probe = plic_platform_probe,
+};
+builtin_platform_driver(plic_driver);
-static int __init plic_edge_init(struct device_node *node,
- struct device_node *parent)
+static int __init plic_early_probe(struct device_node *node,
+ struct device_node *parent)
{
- return __plic_init(node, parent, BIT(PLIC_QUIRK_EDGE_INTERRUPT));
+ return plic_probe(&node->fwnode);
}
-IRQCHIP_DECLARE(andestech_nceplic100, "andestech,nceplic100", plic_edge_init);
-IRQCHIP_DECLARE(thead_c900_plic, "thead,c900-plic", plic_edge_init);
+IRQCHIP_DECLARE(riscv, "allwinner,sun20i-d1-plic", plic_early_probe);
diff --git a/drivers/irqchip/irq-starfive-jh8100-intc.c b/drivers/irqchip/irq-starfive-jh8100-intc.c
new file mode 100644
index 000000000000..0f5837176e53
--- /dev/null
+++ b/drivers/irqchip/irq-starfive-jh8100-intc.c
@@ -0,0 +1,207 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * StarFive JH8100 External Interrupt Controller driver
+ *
+ * Copyright (C) 2023 StarFive Technology Co., Ltd.
+ *
+ * Author: Changhuang Liang <changhuang.liang@starfivetech.com>
+ */
+
+#define pr_fmt(fmt) "irq-starfive-jh8100: " fmt
+
+#include <linux/bitops.h>
+#include <linux/clk.h>
+#include <linux/irq.h>
+#include <linux/irqchip.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/irqdomain.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/reset.h>
+#include <linux/spinlock.h>
+
+#define STARFIVE_INTC_SRC0_CLEAR 0x10
+#define STARFIVE_INTC_SRC0_MASK 0x14
+#define STARFIVE_INTC_SRC0_INT 0x1c
+
+#define STARFIVE_INTC_SRC_IRQ_NUM 32
+
+struct starfive_irq_chip {
+ void __iomem *base;
+ struct irq_domain *domain;
+ raw_spinlock_t lock;
+};
+
+static void starfive_intc_bit_set(struct starfive_irq_chip *irqc,
+ u32 reg, u32 bit_mask)
+{
+ u32 value;
+
+ value = ioread32(irqc->base + reg);
+ value |= bit_mask;
+ iowrite32(value, irqc->base + reg);
+}
+
+static void starfive_intc_bit_clear(struct starfive_irq_chip *irqc,
+ u32 reg, u32 bit_mask)
+{
+ u32 value;
+
+ value = ioread32(irqc->base + reg);
+ value &= ~bit_mask;
+ iowrite32(value, irqc->base + reg);
+}
+
+static void starfive_intc_unmask(struct irq_data *d)
+{
+ struct starfive_irq_chip *irqc = irq_data_get_irq_chip_data(d);
+
+ raw_spin_lock(&irqc->lock);
+ starfive_intc_bit_clear(irqc, STARFIVE_INTC_SRC0_MASK, BIT(d->hwirq));
+ raw_spin_unlock(&irqc->lock);
+}
+
+static void starfive_intc_mask(struct irq_data *d)
+{
+ struct starfive_irq_chip *irqc = irq_data_get_irq_chip_data(d);
+
+ raw_spin_lock(&irqc->lock);
+ starfive_intc_bit_set(irqc, STARFIVE_INTC_SRC0_MASK, BIT(d->hwirq));
+ raw_spin_unlock(&irqc->lock);
+}
+
+static struct irq_chip intc_dev = {
+ .name = "StarFive JH8100 INTC",
+ .irq_unmask = starfive_intc_unmask,
+ .irq_mask = starfive_intc_mask,
+};
+
+static int starfive_intc_map(struct irq_domain *d, unsigned int irq,
+ irq_hw_number_t hwirq)
+{
+ irq_domain_set_info(d, irq, hwirq, &intc_dev, d->host_data,
+ handle_level_irq, NULL, NULL);
+
+ return 0;
+}
+
+static const struct irq_domain_ops starfive_intc_domain_ops = {
+ .xlate = irq_domain_xlate_onecell,
+ .map = starfive_intc_map,
+};
+
+static void starfive_intc_irq_handler(struct irq_desc *desc)
+{
+ struct starfive_irq_chip *irqc = irq_data_get_irq_handler_data(&desc->irq_data);
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ unsigned long value;
+ int hwirq;
+
+ chained_irq_enter(chip, desc);
+
+ value = ioread32(irqc->base + STARFIVE_INTC_SRC0_INT);
+ while (value) {
+ hwirq = ffs(value) - 1;
+
+ generic_handle_domain_irq(irqc->domain, hwirq);
+
+ starfive_intc_bit_set(irqc, STARFIVE_INTC_SRC0_CLEAR, BIT(hwirq));
+ starfive_intc_bit_clear(irqc, STARFIVE_INTC_SRC0_CLEAR, BIT(hwirq));
+
+ __clear_bit(hwirq, &value);
+ }
+
+ chained_irq_exit(chip, desc);
+}
+
+static int __init starfive_intc_init(struct device_node *intc,
+ struct device_node *parent)
+{
+ struct starfive_irq_chip *irqc;
+ struct reset_control *rst;
+ struct clk *clk;
+ int parent_irq;
+ int ret;
+
+ irqc = kzalloc(sizeof(*irqc), GFP_KERNEL);
+ if (!irqc)
+ return -ENOMEM;
+
+ irqc->base = of_iomap(intc, 0);
+ if (!irqc->base) {
+ pr_err("Unable to map registers\n");
+ ret = -ENXIO;
+ goto err_free;
+ }
+
+ rst = of_reset_control_get_exclusive(intc, NULL);
+ if (IS_ERR(rst)) {
+ pr_err("Unable to get reset control %pe\n", rst);
+ ret = PTR_ERR(rst);
+ goto err_unmap;
+ }
+
+ clk = of_clk_get(intc, 0);
+ if (IS_ERR(clk)) {
+ pr_err("Unable to get clock %pe\n", clk);
+ ret = PTR_ERR(clk);
+ goto err_reset_put;
+ }
+
+ ret = reset_control_deassert(rst);
+ if (ret)
+ goto err_clk_put;
+
+ ret = clk_prepare_enable(clk);
+ if (ret)
+ goto err_reset_assert;
+
+ raw_spin_lock_init(&irqc->lock);
+
+ irqc->domain = irq_domain_add_linear(intc, STARFIVE_INTC_SRC_IRQ_NUM,
+ &starfive_intc_domain_ops, irqc);
+ if (!irqc->domain) {
+ pr_err("Unable to create IRQ domain\n");
+ ret = -EINVAL;
+ goto err_clk_disable;
+ }
+
+ parent_irq = of_irq_get(intc, 0);
+ if (parent_irq < 0) {
+ pr_err("Failed to get main IRQ: %d\n", parent_irq);
+ ret = parent_irq;
+ goto err_remove_domain;
+ }
+
+ irq_set_chained_handler_and_data(parent_irq, starfive_intc_irq_handler,
+ irqc);
+
+ pr_info("Interrupt controller register, nr_irqs %d\n",
+ STARFIVE_INTC_SRC_IRQ_NUM);
+
+ return 0;
+
+err_remove_domain:
+ irq_domain_remove(irqc->domain);
+err_clk_disable:
+ clk_disable_unprepare(clk);
+err_reset_assert:
+ reset_control_assert(rst);
+err_clk_put:
+ clk_put(clk);
+err_reset_put:
+ reset_control_put(rst);
+err_unmap:
+ iounmap(irqc->base);
+err_free:
+ kfree(irqc);
+ return ret;
+}
+
+IRQCHIP_PLATFORM_DRIVER_BEGIN(starfive_intc)
+IRQCHIP_MATCH("starfive,jh8100-intc", starfive_intc_init)
+IRQCHIP_PLATFORM_DRIVER_END(starfive_intc)
+
+MODULE_DESCRIPTION("StarFive JH8100 External Interrupt Controller");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Changhuang Liang <changhuang.liang@starfivetech.com>");
diff --git a/drivers/irqchip/irq-stm32-exti.c b/drivers/irqchip/irq-stm32-exti.c
index 971240e2e31b..7c6a0080c330 100644
--- a/drivers/irqchip/irq-stm32-exti.c
+++ b/drivers/irqchip/irq-stm32-exti.c
@@ -1,31 +1,21 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) Maxime Coquelin 2015
- * Copyright (C) STMicroelectronics 2017
+ * Copyright (C) STMicroelectronics 2017-2024
* Author: Maxime Coquelin <mcoquelin.stm32@gmail.com>
*/
#include <linux/bitops.h>
-#include <linux/delay.h>
-#include <linux/hwspinlock.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/irq.h>
#include <linux/irqchip.h>
#include <linux/irqchip/chained_irq.h>
#include <linux/irqdomain.h>
-#include <linux/mod_devicetable.h>
-#include <linux/module.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
-#include <linux/platform_device.h>
-#include <linux/syscore_ops.h>
-#include <dt-bindings/interrupt-controller/arm-gic.h>
-
-#define IRQS_PER_BANK 32
-
-#define HWSPNLCK_TIMEOUT 1000 /* usec */
+#define IRQS_PER_BANK 32
struct stm32_exti_bank {
u32 imr_ofst;
@@ -34,12 +24,8 @@ struct stm32_exti_bank {
u32 ftsr_ofst;
u32 swier_ofst;
u32 rpr_ofst;
- u32 fpr_ofst;
- u32 trg_ofst;
};
-#define UNDEF_REG ~0
-
struct stm32_exti_drv_data {
const struct stm32_exti_bank **exti_banks;
const u8 *desc_irqs;
@@ -49,22 +35,20 @@ struct stm32_exti_drv_data {
struct stm32_exti_chip_data {
struct stm32_exti_host_data *host_data;
const struct stm32_exti_bank *reg_bank;
- struct raw_spinlock rlock;
u32 wake_active;
u32 mask_cache;
u32 rtsr_cache;
u32 ftsr_cache;
+ u32 event_reserved;
};
struct stm32_exti_host_data {
void __iomem *base;
+ struct device *dev;
struct stm32_exti_chip_data *chips_data;
const struct stm32_exti_drv_data *drv_data;
- struct hwspinlock *hwlock;
};
-static struct stm32_exti_host_data *stm32_host_data;
-
static const struct stm32_exti_bank stm32f4xx_exti_b1 = {
.imr_ofst = 0x00,
.emr_ofst = 0x04,
@@ -72,8 +56,6 @@ static const struct stm32_exti_bank stm32f4xx_exti_b1 = {
.ftsr_ofst = 0x0C,
.swier_ofst = 0x10,
.rpr_ofst = 0x14,
- .fpr_ofst = UNDEF_REG,
- .trg_ofst = UNDEF_REG,
};
static const struct stm32_exti_bank *stm32f4xx_exti_banks[] = {
@@ -92,8 +74,6 @@ static const struct stm32_exti_bank stm32h7xx_exti_b1 = {
.ftsr_ofst = 0x04,
.swier_ofst = 0x08,
.rpr_ofst = 0x88,
- .fpr_ofst = UNDEF_REG,
- .trg_ofst = UNDEF_REG,
};
static const struct stm32_exti_bank stm32h7xx_exti_b2 = {
@@ -103,8 +83,6 @@ static const struct stm32_exti_bank stm32h7xx_exti_b2 = {
.ftsr_ofst = 0x24,
.swier_ofst = 0x28,
.rpr_ofst = 0x98,
- .fpr_ofst = UNDEF_REG,
- .trg_ofst = UNDEF_REG,
};
static const struct stm32_exti_bank stm32h7xx_exti_b3 = {
@@ -114,8 +92,6 @@ static const struct stm32_exti_bank stm32h7xx_exti_b3 = {
.ftsr_ofst = 0x44,
.swier_ofst = 0x48,
.rpr_ofst = 0xA8,
- .fpr_ofst = UNDEF_REG,
- .trg_ofst = UNDEF_REG,
};
static const struct stm32_exti_bank *stm32h7xx_exti_banks[] = {
@@ -129,180 +105,12 @@ static const struct stm32_exti_drv_data stm32h7xx_drv_data = {
.bank_nr = ARRAY_SIZE(stm32h7xx_exti_banks),
};
-static const struct stm32_exti_bank stm32mp1_exti_b1 = {
- .imr_ofst = 0x80,
- .emr_ofst = UNDEF_REG,
- .rtsr_ofst = 0x00,
- .ftsr_ofst = 0x04,
- .swier_ofst = 0x08,
- .rpr_ofst = 0x0C,
- .fpr_ofst = 0x10,
- .trg_ofst = 0x3EC,
-};
-
-static const struct stm32_exti_bank stm32mp1_exti_b2 = {
- .imr_ofst = 0x90,
- .emr_ofst = UNDEF_REG,
- .rtsr_ofst = 0x20,
- .ftsr_ofst = 0x24,
- .swier_ofst = 0x28,
- .rpr_ofst = 0x2C,
- .fpr_ofst = 0x30,
- .trg_ofst = 0x3E8,
-};
-
-static const struct stm32_exti_bank stm32mp1_exti_b3 = {
- .imr_ofst = 0xA0,
- .emr_ofst = UNDEF_REG,
- .rtsr_ofst = 0x40,
- .ftsr_ofst = 0x44,
- .swier_ofst = 0x48,
- .rpr_ofst = 0x4C,
- .fpr_ofst = 0x50,
- .trg_ofst = 0x3E4,
-};
-
-static const struct stm32_exti_bank *stm32mp1_exti_banks[] = {
- &stm32mp1_exti_b1,
- &stm32mp1_exti_b2,
- &stm32mp1_exti_b3,
-};
-
-static struct irq_chip stm32_exti_h_chip;
-static struct irq_chip stm32_exti_h_chip_direct;
-
-#define EXTI_INVALID_IRQ U8_MAX
-#define STM32MP1_DESC_IRQ_SIZE (ARRAY_SIZE(stm32mp1_exti_banks) * IRQS_PER_BANK)
-
-/*
- * Use some intentionally tricky logic here to initialize the whole array to
- * EXTI_INVALID_IRQ, but then override certain fields, requiring us to indicate
- * that we "know" that there are overrides in this structure, and we'll need to
- * disable that warning from W=1 builds.
- */
-__diag_push();
-__diag_ignore_all("-Woverride-init",
- "logic to initialize all and then override some is OK");
-
-static const u8 stm32mp1_desc_irq[] = {
- /* default value */
- [0 ... (STM32MP1_DESC_IRQ_SIZE - 1)] = EXTI_INVALID_IRQ,
-
- [0] = 6,
- [1] = 7,
- [2] = 8,
- [3] = 9,
- [4] = 10,
- [5] = 23,
- [6] = 64,
- [7] = 65,
- [8] = 66,
- [9] = 67,
- [10] = 40,
- [11] = 42,
- [12] = 76,
- [13] = 77,
- [14] = 121,
- [15] = 127,
- [16] = 1,
- [19] = 3,
- [21] = 31,
- [22] = 33,
- [23] = 72,
- [24] = 95,
- [25] = 107,
- [26] = 37,
- [27] = 38,
- [28] = 39,
- [29] = 71,
- [30] = 52,
- [31] = 53,
- [32] = 82,
- [33] = 83,
- [46] = 151,
- [47] = 93,
- [48] = 138,
- [50] = 139,
- [52] = 140,
- [53] = 141,
- [54] = 135,
- [61] = 100,
- [65] = 144,
- [68] = 143,
- [70] = 62,
- [73] = 129,
-};
-
-static const u8 stm32mp13_desc_irq[] = {
- /* default value */
- [0 ... (STM32MP1_DESC_IRQ_SIZE - 1)] = EXTI_INVALID_IRQ,
-
- [0] = 6,
- [1] = 7,
- [2] = 8,
- [3] = 9,
- [4] = 10,
- [5] = 24,
- [6] = 65,
- [7] = 66,
- [8] = 67,
- [9] = 68,
- [10] = 41,
- [11] = 43,
- [12] = 77,
- [13] = 78,
- [14] = 106,
- [15] = 109,
- [16] = 1,
- [19] = 3,
- [21] = 32,
- [22] = 34,
- [23] = 73,
- [24] = 93,
- [25] = 114,
- [26] = 38,
- [27] = 39,
- [28] = 40,
- [29] = 72,
- [30] = 53,
- [31] = 54,
- [32] = 83,
- [33] = 84,
- [44] = 96,
- [47] = 92,
- [48] = 116,
- [50] = 117,
- [52] = 118,
- [53] = 119,
- [68] = 63,
- [70] = 98,
-};
-
-__diag_pop();
-
-static const struct stm32_exti_drv_data stm32mp1_drv_data = {
- .exti_banks = stm32mp1_exti_banks,
- .bank_nr = ARRAY_SIZE(stm32mp1_exti_banks),
- .desc_irqs = stm32mp1_desc_irq,
-};
-
-static const struct stm32_exti_drv_data stm32mp13_drv_data = {
- .exti_banks = stm32mp1_exti_banks,
- .bank_nr = ARRAY_SIZE(stm32mp1_exti_banks),
- .desc_irqs = stm32mp13_desc_irq,
-};
-
static unsigned long stm32_exti_pending(struct irq_chip_generic *gc)
{
struct stm32_exti_chip_data *chip_data = gc->private;
const struct stm32_exti_bank *stm32_bank = chip_data->reg_bank;
- unsigned long pending;
-
- pending = irq_reg_readl(gc, stm32_bank->rpr_ofst);
- if (stm32_bank->fpr_ofst != UNDEF_REG)
- pending |= irq_reg_readl(gc, stm32_bank->fpr_ofst);
- return pending;
+ return irq_reg_readl(gc, stm32_bank->rpr_ofst);
}
static void stm32_irq_handler(struct irq_desc *desc)
@@ -322,7 +130,7 @@ static void stm32_irq_handler(struct irq_desc *desc)
while ((pending = stm32_exti_pending(gc))) {
for_each_set_bit(n, &pending, IRQS_PER_BANK)
generic_handle_domain_irq(domain, irq_base + n);
- }
+ }
}
chained_irq_exit(chip, desc);
@@ -358,33 +166,21 @@ static int stm32_irq_set_type(struct irq_data *d, unsigned int type)
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
struct stm32_exti_chip_data *chip_data = gc->private;
const struct stm32_exti_bank *stm32_bank = chip_data->reg_bank;
- struct hwspinlock *hwlock = chip_data->host_data->hwlock;
u32 rtsr, ftsr;
int err;
irq_gc_lock(gc);
- if (hwlock) {
- err = hwspin_lock_timeout_in_atomic(hwlock, HWSPNLCK_TIMEOUT);
- if (err) {
- pr_err("%s can't get hwspinlock (%d)\n", __func__, err);
- goto unlock;
- }
- }
-
rtsr = irq_reg_readl(gc, stm32_bank->rtsr_ofst);
ftsr = irq_reg_readl(gc, stm32_bank->ftsr_ofst);
err = stm32_exti_set_type(d, type, &rtsr, &ftsr);
if (err)
- goto unspinlock;
+ goto unlock;
irq_reg_writel(gc, rtsr, stm32_bank->rtsr_ofst);
irq_reg_writel(gc, ftsr, stm32_bank->ftsr_ofst);
-unspinlock:
- if (hwlock)
- hwspin_unlock_in_atomic(hwlock);
unlock:
irq_gc_unlock(gc);
@@ -472,282 +268,10 @@ static void stm32_irq_ack(struct irq_data *d)
irq_gc_lock(gc);
irq_reg_writel(gc, d->mask, stm32_bank->rpr_ofst);
- if (stm32_bank->fpr_ofst != UNDEF_REG)
- irq_reg_writel(gc, d->mask, stm32_bank->fpr_ofst);
irq_gc_unlock(gc);
}
-/* directly set the target bit without reading first. */
-static inline void stm32_exti_write_bit(struct irq_data *d, u32 reg)
-{
- struct stm32_exti_chip_data *chip_data = irq_data_get_irq_chip_data(d);
- void __iomem *base = chip_data->host_data->base;
- u32 val = BIT(d->hwirq % IRQS_PER_BANK);
-
- writel_relaxed(val, base + reg);
-}
-
-static inline u32 stm32_exti_set_bit(struct irq_data *d, u32 reg)
-{
- struct stm32_exti_chip_data *chip_data = irq_data_get_irq_chip_data(d);
- void __iomem *base = chip_data->host_data->base;
- u32 val;
-
- val = readl_relaxed(base + reg);
- val |= BIT(d->hwirq % IRQS_PER_BANK);
- writel_relaxed(val, base + reg);
-
- return val;
-}
-
-static inline u32 stm32_exti_clr_bit(struct irq_data *d, u32 reg)
-{
- struct stm32_exti_chip_data *chip_data = irq_data_get_irq_chip_data(d);
- void __iomem *base = chip_data->host_data->base;
- u32 val;
-
- val = readl_relaxed(base + reg);
- val &= ~BIT(d->hwirq % IRQS_PER_BANK);
- writel_relaxed(val, base + reg);
-
- return val;
-}
-
-static void stm32_exti_h_eoi(struct irq_data *d)
-{
- struct stm32_exti_chip_data *chip_data = irq_data_get_irq_chip_data(d);
- const struct stm32_exti_bank *stm32_bank = chip_data->reg_bank;
-
- raw_spin_lock(&chip_data->rlock);
-
- stm32_exti_write_bit(d, stm32_bank->rpr_ofst);
- if (stm32_bank->fpr_ofst != UNDEF_REG)
- stm32_exti_write_bit(d, stm32_bank->fpr_ofst);
-
- raw_spin_unlock(&chip_data->rlock);
-
- if (d->parent_data->chip)
- irq_chip_eoi_parent(d);
-}
-
-static void stm32_exti_h_mask(struct irq_data *d)
-{
- struct stm32_exti_chip_data *chip_data = irq_data_get_irq_chip_data(d);
- const struct stm32_exti_bank *stm32_bank = chip_data->reg_bank;
-
- raw_spin_lock(&chip_data->rlock);
- chip_data->mask_cache = stm32_exti_clr_bit(d, stm32_bank->imr_ofst);
- raw_spin_unlock(&chip_data->rlock);
-
- if (d->parent_data->chip)
- irq_chip_mask_parent(d);
-}
-
-static void stm32_exti_h_unmask(struct irq_data *d)
-{
- struct stm32_exti_chip_data *chip_data = irq_data_get_irq_chip_data(d);
- const struct stm32_exti_bank *stm32_bank = chip_data->reg_bank;
-
- raw_spin_lock(&chip_data->rlock);
- chip_data->mask_cache = stm32_exti_set_bit(d, stm32_bank->imr_ofst);
- raw_spin_unlock(&chip_data->rlock);
-
- if (d->parent_data->chip)
- irq_chip_unmask_parent(d);
-}
-
-static int stm32_exti_h_set_type(struct irq_data *d, unsigned int type)
-{
- struct stm32_exti_chip_data *chip_data = irq_data_get_irq_chip_data(d);
- const struct stm32_exti_bank *stm32_bank = chip_data->reg_bank;
- struct hwspinlock *hwlock = chip_data->host_data->hwlock;
- void __iomem *base = chip_data->host_data->base;
- u32 rtsr, ftsr;
- int err;
-
- raw_spin_lock(&chip_data->rlock);
-
- if (hwlock) {
- err = hwspin_lock_timeout_in_atomic(hwlock, HWSPNLCK_TIMEOUT);
- if (err) {
- pr_err("%s can't get hwspinlock (%d)\n", __func__, err);
- goto unlock;
- }
- }
-
- rtsr = readl_relaxed(base + stm32_bank->rtsr_ofst);
- ftsr = readl_relaxed(base + stm32_bank->ftsr_ofst);
-
- err = stm32_exti_set_type(d, type, &rtsr, &ftsr);
- if (err)
- goto unspinlock;
-
- writel_relaxed(rtsr, base + stm32_bank->rtsr_ofst);
- writel_relaxed(ftsr, base + stm32_bank->ftsr_ofst);
-
-unspinlock:
- if (hwlock)
- hwspin_unlock_in_atomic(hwlock);
-unlock:
- raw_spin_unlock(&chip_data->rlock);
-
- return err;
-}
-
-static int stm32_exti_h_set_wake(struct irq_data *d, unsigned int on)
-{
- struct stm32_exti_chip_data *chip_data = irq_data_get_irq_chip_data(d);
- u32 mask = BIT(d->hwirq % IRQS_PER_BANK);
-
- raw_spin_lock(&chip_data->rlock);
-
- if (on)
- chip_data->wake_active |= mask;
- else
- chip_data->wake_active &= ~mask;
-
- raw_spin_unlock(&chip_data->rlock);
-
- return 0;
-}
-
-static int stm32_exti_h_set_affinity(struct irq_data *d,
- const struct cpumask *dest, bool force)
-{
- if (d->parent_data->chip)
- return irq_chip_set_affinity_parent(d, dest, force);
-
- return IRQ_SET_MASK_OK_DONE;
-}
-
-static int __maybe_unused stm32_exti_h_suspend(void)
-{
- struct stm32_exti_chip_data *chip_data;
- int i;
-
- for (i = 0; i < stm32_host_data->drv_data->bank_nr; i++) {
- chip_data = &stm32_host_data->chips_data[i];
- raw_spin_lock(&chip_data->rlock);
- stm32_chip_suspend(chip_data, chip_data->wake_active);
- raw_spin_unlock(&chip_data->rlock);
- }
-
- return 0;
-}
-
-static void __maybe_unused stm32_exti_h_resume(void)
-{
- struct stm32_exti_chip_data *chip_data;
- int i;
-
- for (i = 0; i < stm32_host_data->drv_data->bank_nr; i++) {
- chip_data = &stm32_host_data->chips_data[i];
- raw_spin_lock(&chip_data->rlock);
- stm32_chip_resume(chip_data, chip_data->mask_cache);
- raw_spin_unlock(&chip_data->rlock);
- }
-}
-
-static struct syscore_ops stm32_exti_h_syscore_ops = {
-#ifdef CONFIG_PM_SLEEP
- .suspend = stm32_exti_h_suspend,
- .resume = stm32_exti_h_resume,
-#endif
-};
-
-static void stm32_exti_h_syscore_init(struct stm32_exti_host_data *host_data)
-{
- stm32_host_data = host_data;
- register_syscore_ops(&stm32_exti_h_syscore_ops);
-}
-
-static void stm32_exti_h_syscore_deinit(void)
-{
- unregister_syscore_ops(&stm32_exti_h_syscore_ops);
-}
-
-static int stm32_exti_h_retrigger(struct irq_data *d)
-{
- struct stm32_exti_chip_data *chip_data = irq_data_get_irq_chip_data(d);
- const struct stm32_exti_bank *stm32_bank = chip_data->reg_bank;
- void __iomem *base = chip_data->host_data->base;
- u32 mask = BIT(d->hwirq % IRQS_PER_BANK);
-
- writel_relaxed(mask, base + stm32_bank->swier_ofst);
-
- return 0;
-}
-
-static struct irq_chip stm32_exti_h_chip = {
- .name = "stm32-exti-h",
- .irq_eoi = stm32_exti_h_eoi,
- .irq_mask = stm32_exti_h_mask,
- .irq_unmask = stm32_exti_h_unmask,
- .irq_retrigger = stm32_exti_h_retrigger,
- .irq_set_type = stm32_exti_h_set_type,
- .irq_set_wake = stm32_exti_h_set_wake,
- .flags = IRQCHIP_MASK_ON_SUSPEND,
- .irq_set_affinity = IS_ENABLED(CONFIG_SMP) ? stm32_exti_h_set_affinity : NULL,
-};
-
-static struct irq_chip stm32_exti_h_chip_direct = {
- .name = "stm32-exti-h-direct",
- .irq_eoi = irq_chip_eoi_parent,
- .irq_ack = irq_chip_ack_parent,
- .irq_mask = stm32_exti_h_mask,
- .irq_unmask = stm32_exti_h_unmask,
- .irq_retrigger = irq_chip_retrigger_hierarchy,
- .irq_set_type = irq_chip_set_type_parent,
- .irq_set_wake = stm32_exti_h_set_wake,
- .flags = IRQCHIP_MASK_ON_SUSPEND,
- .irq_set_affinity = IS_ENABLED(CONFIG_SMP) ? irq_chip_set_affinity_parent : NULL,
-};
-
-static int stm32_exti_h_domain_alloc(struct irq_domain *dm,
- unsigned int virq,
- unsigned int nr_irqs, void *data)
-{
- struct stm32_exti_host_data *host_data = dm->host_data;
- struct stm32_exti_chip_data *chip_data;
- u8 desc_irq;
- struct irq_fwspec *fwspec = data;
- struct irq_fwspec p_fwspec;
- irq_hw_number_t hwirq;
- int bank;
- u32 event_trg;
- struct irq_chip *chip;
-
- hwirq = fwspec->param[0];
- if (hwirq >= host_data->drv_data->bank_nr * IRQS_PER_BANK)
- return -EINVAL;
-
- bank = hwirq / IRQS_PER_BANK;
- chip_data = &host_data->chips_data[bank];
-
- event_trg = readl_relaxed(host_data->base + chip_data->reg_bank->trg_ofst);
- chip = (event_trg & BIT(hwirq % IRQS_PER_BANK)) ?
- &stm32_exti_h_chip : &stm32_exti_h_chip_direct;
-
- irq_domain_set_hwirq_and_chip(dm, virq, hwirq, chip, chip_data);
-
- if (!host_data->drv_data->desc_irqs)
- return -EINVAL;
-
- desc_irq = host_data->drv_data->desc_irqs[hwirq];
- if (desc_irq != EXTI_INVALID_IRQ) {
- p_fwspec.fwnode = dm->parent->fwnode;
- p_fwspec.param_count = 3;
- p_fwspec.param[0] = GIC_SPI;
- p_fwspec.param[1] = desc_irq;
- p_fwspec.param[2] = IRQ_TYPE_LEVEL_HIGH;
-
- return irq_domain_alloc_irqs_parent(dm, virq, 1, &p_fwspec);
- }
-
- return 0;
-}
-
static struct
stm32_exti_host_data *stm32_exti_host_init(const struct stm32_exti_drv_data *dd,
struct device_node *node)
@@ -771,8 +295,6 @@ stm32_exti_host_data *stm32_exti_host_init(const struct stm32_exti_drv_data *dd,
goto free_chips_data;
}
- stm32_host_data = host_data;
-
return host_data;
free_chips_data:
@@ -797,15 +319,12 @@ stm32_exti_chip_data *stm32_exti_chip_init(struct stm32_exti_host_data *h_data,
chip_data->host_data = h_data;
chip_data->reg_bank = stm32_bank;
- raw_spin_lock_init(&chip_data->rlock);
-
/*
* This IP has no reset, so after hot reboot we should
* clear registers to avoid residue
*/
writel_relaxed(0, base + stm32_bank->imr_ofst);
- if (stm32_bank->emr_ofst != UNDEF_REG)
- writel_relaxed(0, base + stm32_bank->emr_ofst);
+ writel_relaxed(0, base + stm32_bank->emr_ofst);
pr_info("%pOF: bank%d\n", node, bank_idx);
@@ -885,133 +404,6 @@ out_unmap:
return ret;
}
-static const struct irq_domain_ops stm32_exti_h_domain_ops = {
- .alloc = stm32_exti_h_domain_alloc,
- .free = irq_domain_free_irqs_common,
- .xlate = irq_domain_xlate_twocell,
-};
-
-static void stm32_exti_remove_irq(void *data)
-{
- struct irq_domain *domain = data;
-
- irq_domain_remove(domain);
-}
-
-static int stm32_exti_remove(struct platform_device *pdev)
-{
- stm32_exti_h_syscore_deinit();
- return 0;
-}
-
-static int stm32_exti_probe(struct platform_device *pdev)
-{
- int ret, i;
- struct device *dev = &pdev->dev;
- struct device_node *np = dev->of_node;
- struct irq_domain *parent_domain, *domain;
- struct stm32_exti_host_data *host_data;
- const struct stm32_exti_drv_data *drv_data;
-
- host_data = devm_kzalloc(dev, sizeof(*host_data), GFP_KERNEL);
- if (!host_data)
- return -ENOMEM;
-
- /* check for optional hwspinlock which may be not available yet */
- ret = of_hwspin_lock_get_id(np, 0);
- if (ret == -EPROBE_DEFER)
- /* hwspinlock framework not yet ready */
- return ret;
-
- if (ret >= 0) {
- host_data->hwlock = devm_hwspin_lock_request_specific(dev, ret);
- if (!host_data->hwlock) {
- dev_err(dev, "Failed to request hwspinlock\n");
- return -EINVAL;
- }
- } else if (ret != -ENOENT) {
- /* note: ENOENT is a valid case (means 'no hwspinlock') */
- dev_err(dev, "Failed to get hwspinlock\n");
- return ret;
- }
-
- /* initialize host_data */
- drv_data = of_device_get_match_data(dev);
- if (!drv_data) {
- dev_err(dev, "no of match data\n");
- return -ENODEV;
- }
- host_data->drv_data = drv_data;
-
- host_data->chips_data = devm_kcalloc(dev, drv_data->bank_nr,
- sizeof(*host_data->chips_data),
- GFP_KERNEL);
- if (!host_data->chips_data)
- return -ENOMEM;
-
- host_data->base = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(host_data->base))
- return PTR_ERR(host_data->base);
-
- for (i = 0; i < drv_data->bank_nr; i++)
- stm32_exti_chip_init(host_data, i, np);
-
- parent_domain = irq_find_host(of_irq_find_parent(np));
- if (!parent_domain) {
- dev_err(dev, "GIC interrupt-parent not found\n");
- return -EINVAL;
- }
-
- domain = irq_domain_add_hierarchy(parent_domain, 0,
- drv_data->bank_nr * IRQS_PER_BANK,
- np, &stm32_exti_h_domain_ops,
- host_data);
-
- if (!domain) {
- dev_err(dev, "Could not register exti domain\n");
- return -ENOMEM;
- }
-
- ret = devm_add_action_or_reset(dev, stm32_exti_remove_irq, domain);
- if (ret)
- return ret;
-
- stm32_exti_h_syscore_init(host_data);
-
- return 0;
-}
-
-/* platform driver only for MP1 */
-static const struct of_device_id stm32_exti_ids[] = {
- { .compatible = "st,stm32mp1-exti", .data = &stm32mp1_drv_data},
- { .compatible = "st,stm32mp13-exti", .data = &stm32mp13_drv_data},
- {},
-};
-MODULE_DEVICE_TABLE(of, stm32_exti_ids);
-
-static struct platform_driver stm32_exti_driver = {
- .probe = stm32_exti_probe,
- .remove = stm32_exti_remove,
- .driver = {
- .name = "stm32_exti",
- .of_match_table = stm32_exti_ids,
- },
-};
-
-static int __init stm32_exti_arch_init(void)
-{
- return platform_driver_register(&stm32_exti_driver);
-}
-
-static void __exit stm32_exti_arch_exit(void)
-{
- return platform_driver_unregister(&stm32_exti_driver);
-}
-
-arch_initcall(stm32_exti_arch_init);
-module_exit(stm32_exti_arch_exit);
-
-/* no platform driver for F4 and H7 */
static int __init stm32f4_exti_of_init(struct device_node *np,
struct device_node *parent)
{
diff --git a/drivers/irqchip/irq-stm32mp-exti.c b/drivers/irqchip/irq-stm32mp-exti.c
new file mode 100644
index 000000000000..cb83d6cc6113
--- /dev/null
+++ b/drivers/irqchip/irq-stm32mp-exti.c
@@ -0,0 +1,728 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) Maxime Coquelin 2015
+ * Copyright (C) STMicroelectronics 2017-2024
+ * Author: Maxime Coquelin <mcoquelin.stm32@gmail.com>
+ */
+
+#include <linux/bitops.h>
+#include <linux/hwspinlock.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/irqchip.h>
+#include <linux/irqdomain.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/platform_device.h>
+#include <linux/pm.h>
+
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+
+#define IRQS_PER_BANK 32
+
+#define HWSPNLCK_TIMEOUT 1000 /* usec */
+
+#define EXTI_EnCIDCFGR(n) (0x180 + (n) * 4)
+#define EXTI_HWCFGR1 0x3f0
+
+/* Register: EXTI_EnCIDCFGR(n) */
+#define EXTI_CIDCFGR_CFEN_MASK BIT(0)
+#define EXTI_CIDCFGR_CID_MASK GENMASK(6, 4)
+#define EXTI_CIDCFGR_CID_SHIFT 4
+
+/* Register: EXTI_HWCFGR1 */
+#define EXTI_HWCFGR1_CIDWIDTH_MASK GENMASK(27, 24)
+
+#define EXTI_CID1 1
+
+struct stm32mp_exti_bank {
+ u32 imr_ofst;
+ u32 rtsr_ofst;
+ u32 ftsr_ofst;
+ u32 swier_ofst;
+ u32 rpr_ofst;
+ u32 fpr_ofst;
+ u32 trg_ofst;
+ u32 seccfgr_ofst;
+};
+
+struct stm32mp_exti_drv_data {
+ const struct stm32mp_exti_bank **exti_banks;
+ const u8 *desc_irqs;
+ u32 bank_nr;
+};
+
+struct stm32mp_exti_chip_data {
+ struct stm32mp_exti_host_data *host_data;
+ const struct stm32mp_exti_bank *reg_bank;
+ struct raw_spinlock rlock;
+ u32 wake_active;
+ u32 mask_cache;
+ u32 rtsr_cache;
+ u32 ftsr_cache;
+ u32 event_reserved;
+};
+
+struct stm32mp_exti_host_data {
+ void __iomem *base;
+ struct device *dev;
+ struct stm32mp_exti_chip_data *chips_data;
+ const struct stm32mp_exti_drv_data *drv_data;
+ struct hwspinlock *hwlock;
+ /* skip internal desc_irqs array and get it from DT */
+ bool dt_has_irqs_desc;
+};
+
+static const struct stm32mp_exti_bank stm32mp_exti_b1 = {
+ .imr_ofst = 0x80,
+ .rtsr_ofst = 0x00,
+ .ftsr_ofst = 0x04,
+ .swier_ofst = 0x08,
+ .rpr_ofst = 0x0C,
+ .fpr_ofst = 0x10,
+ .trg_ofst = 0x3EC,
+ .seccfgr_ofst = 0x14,
+};
+
+static const struct stm32mp_exti_bank stm32mp_exti_b2 = {
+ .imr_ofst = 0x90,
+ .rtsr_ofst = 0x20,
+ .ftsr_ofst = 0x24,
+ .swier_ofst = 0x28,
+ .rpr_ofst = 0x2C,
+ .fpr_ofst = 0x30,
+ .trg_ofst = 0x3E8,
+ .seccfgr_ofst = 0x34,
+};
+
+static const struct stm32mp_exti_bank stm32mp_exti_b3 = {
+ .imr_ofst = 0xA0,
+ .rtsr_ofst = 0x40,
+ .ftsr_ofst = 0x44,
+ .swier_ofst = 0x48,
+ .rpr_ofst = 0x4C,
+ .fpr_ofst = 0x50,
+ .trg_ofst = 0x3E4,
+ .seccfgr_ofst = 0x54,
+};
+
+static const struct stm32mp_exti_bank *stm32mp_exti_banks[] = {
+ &stm32mp_exti_b1,
+ &stm32mp_exti_b2,
+ &stm32mp_exti_b3,
+};
+
+static struct irq_chip stm32mp_exti_chip;
+static struct irq_chip stm32mp_exti_chip_direct;
+
+#define EXTI_INVALID_IRQ U8_MAX
+#define STM32MP_DESC_IRQ_SIZE (ARRAY_SIZE(stm32mp_exti_banks) * IRQS_PER_BANK)
+
+/*
+ * Use some intentionally tricky logic here to initialize the whole array to
+ * EXTI_INVALID_IRQ, but then override certain fields, requiring us to indicate
+ * that we "know" that there are overrides in this structure, and we'll need to
+ * disable that warning from W=1 builds.
+ */
+__diag_push();
+__diag_ignore_all("-Woverride-init",
+ "logic to initialize all and then override some is OK");
+
+static const u8 stm32mp1_desc_irq[] = {
+ /* default value */
+ [0 ... (STM32MP_DESC_IRQ_SIZE - 1)] = EXTI_INVALID_IRQ,
+
+ [0] = 6,
+ [1] = 7,
+ [2] = 8,
+ [3] = 9,
+ [4] = 10,
+ [5] = 23,
+ [6] = 64,
+ [7] = 65,
+ [8] = 66,
+ [9] = 67,
+ [10] = 40,
+ [11] = 42,
+ [12] = 76,
+ [13] = 77,
+ [14] = 121,
+ [15] = 127,
+ [16] = 1,
+ [19] = 3,
+ [21] = 31,
+ [22] = 33,
+ [23] = 72,
+ [24] = 95,
+ [25] = 107,
+ [26] = 37,
+ [27] = 38,
+ [28] = 39,
+ [29] = 71,
+ [30] = 52,
+ [31] = 53,
+ [32] = 82,
+ [33] = 83,
+ [46] = 151,
+ [47] = 93,
+ [48] = 138,
+ [50] = 139,
+ [52] = 140,
+ [53] = 141,
+ [54] = 135,
+ [61] = 100,
+ [65] = 144,
+ [68] = 143,
+ [70] = 62,
+ [73] = 129,
+};
+
+static const u8 stm32mp13_desc_irq[] = {
+ /* default value */
+ [0 ... (STM32MP_DESC_IRQ_SIZE - 1)] = EXTI_INVALID_IRQ,
+
+ [0] = 6,
+ [1] = 7,
+ [2] = 8,
+ [3] = 9,
+ [4] = 10,
+ [5] = 24,
+ [6] = 65,
+ [7] = 66,
+ [8] = 67,
+ [9] = 68,
+ [10] = 41,
+ [11] = 43,
+ [12] = 77,
+ [13] = 78,
+ [14] = 106,
+ [15] = 109,
+ [16] = 1,
+ [19] = 3,
+ [21] = 32,
+ [22] = 34,
+ [23] = 73,
+ [24] = 93,
+ [25] = 114,
+ [26] = 38,
+ [27] = 39,
+ [28] = 40,
+ [29] = 72,
+ [30] = 53,
+ [31] = 54,
+ [32] = 83,
+ [33] = 84,
+ [44] = 96,
+ [47] = 92,
+ [48] = 116,
+ [50] = 117,
+ [52] = 118,
+ [53] = 119,
+ [68] = 63,
+ [70] = 98,
+};
+
+__diag_pop();
+
+static const struct stm32mp_exti_drv_data stm32mp1_drv_data = {
+ .exti_banks = stm32mp_exti_banks,
+ .bank_nr = ARRAY_SIZE(stm32mp_exti_banks),
+ .desc_irqs = stm32mp1_desc_irq,
+};
+
+static const struct stm32mp_exti_drv_data stm32mp13_drv_data = {
+ .exti_banks = stm32mp_exti_banks,
+ .bank_nr = ARRAY_SIZE(stm32mp_exti_banks),
+ .desc_irqs = stm32mp13_desc_irq,
+};
+
+static int stm32mp_exti_convert_type(struct irq_data *d, unsigned int type, u32 *rtsr, u32 *ftsr)
+{
+ u32 mask = BIT(d->hwirq % IRQS_PER_BANK);
+
+ switch (type) {
+ case IRQ_TYPE_EDGE_RISING:
+ *rtsr |= mask;
+ *ftsr &= ~mask;
+ break;
+ case IRQ_TYPE_EDGE_FALLING:
+ *rtsr &= ~mask;
+ *ftsr |= mask;
+ break;
+ case IRQ_TYPE_EDGE_BOTH:
+ *rtsr |= mask;
+ *ftsr |= mask;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void stm32mp_chip_suspend(struct stm32mp_exti_chip_data *chip_data, u32 wake_active)
+{
+ const struct stm32mp_exti_bank *bank = chip_data->reg_bank;
+ void __iomem *base = chip_data->host_data->base;
+
+ /* save rtsr, ftsr registers */
+ chip_data->rtsr_cache = readl_relaxed(base + bank->rtsr_ofst);
+ chip_data->ftsr_cache = readl_relaxed(base + bank->ftsr_ofst);
+
+ writel_relaxed(wake_active, base + bank->imr_ofst);
+}
+
+static void stm32mp_chip_resume(struct stm32mp_exti_chip_data *chip_data, u32 mask_cache)
+{
+ const struct stm32mp_exti_bank *bank = chip_data->reg_bank;
+ void __iomem *base = chip_data->host_data->base;
+
+ /* restore rtsr, ftsr, registers */
+ writel_relaxed(chip_data->rtsr_cache, base + bank->rtsr_ofst);
+ writel_relaxed(chip_data->ftsr_cache, base + bank->ftsr_ofst);
+
+ writel_relaxed(mask_cache, base + bank->imr_ofst);
+}
+
+/* directly set the target bit without reading first. */
+static inline void stm32mp_exti_write_bit(struct irq_data *d, u32 reg)
+{
+ struct stm32mp_exti_chip_data *chip_data = irq_data_get_irq_chip_data(d);
+ void __iomem *base = chip_data->host_data->base;
+ u32 val = BIT(d->hwirq % IRQS_PER_BANK);
+
+ writel_relaxed(val, base + reg);
+}
+
+static inline u32 stm32mp_exti_set_bit(struct irq_data *d, u32 reg)
+{
+ struct stm32mp_exti_chip_data *chip_data = irq_data_get_irq_chip_data(d);
+ void __iomem *base = chip_data->host_data->base;
+ u32 val;
+
+ val = readl_relaxed(base + reg);
+ val |= BIT(d->hwirq % IRQS_PER_BANK);
+ writel_relaxed(val, base + reg);
+
+ return val;
+}
+
+static inline u32 stm32mp_exti_clr_bit(struct irq_data *d, u32 reg)
+{
+ struct stm32mp_exti_chip_data *chip_data = irq_data_get_irq_chip_data(d);
+ void __iomem *base = chip_data->host_data->base;
+ u32 val;
+
+ val = readl_relaxed(base + reg);
+ val &= ~BIT(d->hwirq % IRQS_PER_BANK);
+ writel_relaxed(val, base + reg);
+
+ return val;
+}
+
+static void stm32mp_exti_eoi(struct irq_data *d)
+{
+ struct stm32mp_exti_chip_data *chip_data = irq_data_get_irq_chip_data(d);
+ const struct stm32mp_exti_bank *bank = chip_data->reg_bank;
+
+ raw_spin_lock(&chip_data->rlock);
+
+ stm32mp_exti_write_bit(d, bank->rpr_ofst);
+ stm32mp_exti_write_bit(d, bank->fpr_ofst);
+
+ raw_spin_unlock(&chip_data->rlock);
+
+ if (d->parent_data->chip)
+ irq_chip_eoi_parent(d);
+}
+
+static void stm32mp_exti_mask(struct irq_data *d)
+{
+ struct stm32mp_exti_chip_data *chip_data = irq_data_get_irq_chip_data(d);
+ const struct stm32mp_exti_bank *bank = chip_data->reg_bank;
+
+ raw_spin_lock(&chip_data->rlock);
+ chip_data->mask_cache = stm32mp_exti_clr_bit(d, bank->imr_ofst);
+ raw_spin_unlock(&chip_data->rlock);
+
+ if (d->parent_data->chip)
+ irq_chip_mask_parent(d);
+}
+
+static void stm32mp_exti_unmask(struct irq_data *d)
+{
+ struct stm32mp_exti_chip_data *chip_data = irq_data_get_irq_chip_data(d);
+ const struct stm32mp_exti_bank *bank = chip_data->reg_bank;
+
+ raw_spin_lock(&chip_data->rlock);
+ chip_data->mask_cache = stm32mp_exti_set_bit(d, bank->imr_ofst);
+ raw_spin_unlock(&chip_data->rlock);
+
+ if (d->parent_data->chip)
+ irq_chip_unmask_parent(d);
+}
+
+static int stm32mp_exti_set_type(struct irq_data *d, unsigned int type)
+{
+ struct stm32mp_exti_chip_data *chip_data = irq_data_get_irq_chip_data(d);
+ const struct stm32mp_exti_bank *bank = chip_data->reg_bank;
+ struct hwspinlock *hwlock = chip_data->host_data->hwlock;
+ void __iomem *base = chip_data->host_data->base;
+ u32 rtsr, ftsr;
+ int err;
+
+ raw_spin_lock(&chip_data->rlock);
+
+ if (hwlock) {
+ err = hwspin_lock_timeout_in_atomic(hwlock, HWSPNLCK_TIMEOUT);
+ if (err) {
+ pr_err("%s can't get hwspinlock (%d)\n", __func__, err);
+ goto unlock;
+ }
+ }
+
+ rtsr = readl_relaxed(base + bank->rtsr_ofst);
+ ftsr = readl_relaxed(base + bank->ftsr_ofst);
+
+ err = stm32mp_exti_convert_type(d, type, &rtsr, &ftsr);
+ if (!err) {
+ writel_relaxed(rtsr, base + bank->rtsr_ofst);
+ writel_relaxed(ftsr, base + bank->ftsr_ofst);
+ }
+
+ if (hwlock)
+ hwspin_unlock_in_atomic(hwlock);
+unlock:
+ raw_spin_unlock(&chip_data->rlock);
+ return err;
+}
+
+static int stm32mp_exti_set_wake(struct irq_data *d, unsigned int on)
+{
+ struct stm32mp_exti_chip_data *chip_data = irq_data_get_irq_chip_data(d);
+ u32 mask = BIT(d->hwirq % IRQS_PER_BANK);
+
+ raw_spin_lock(&chip_data->rlock);
+
+ if (on)
+ chip_data->wake_active |= mask;
+ else
+ chip_data->wake_active &= ~mask;
+
+ raw_spin_unlock(&chip_data->rlock);
+
+ return 0;
+}
+
+static int stm32mp_exti_set_affinity(struct irq_data *d, const struct cpumask *dest, bool force)
+{
+ if (d->parent_data->chip)
+ return irq_chip_set_affinity_parent(d, dest, force);
+
+ return IRQ_SET_MASK_OK_DONE;
+}
+
+static int stm32mp_exti_suspend(struct device *dev)
+{
+ struct stm32mp_exti_host_data *host_data = dev_get_drvdata(dev);
+ struct stm32mp_exti_chip_data *chip_data;
+ int i;
+
+ for (i = 0; i < host_data->drv_data->bank_nr; i++) {
+ chip_data = &host_data->chips_data[i];
+ stm32mp_chip_suspend(chip_data, chip_data->wake_active);
+ }
+
+ return 0;
+}
+
+static int stm32mp_exti_resume(struct device *dev)
+{
+ struct stm32mp_exti_host_data *host_data = dev_get_drvdata(dev);
+ struct stm32mp_exti_chip_data *chip_data;
+ int i;
+
+ for (i = 0; i < host_data->drv_data->bank_nr; i++) {
+ chip_data = &host_data->chips_data[i];
+ stm32mp_chip_resume(chip_data, chip_data->mask_cache);
+ }
+
+ return 0;
+}
+
+static int stm32mp_exti_retrigger(struct irq_data *d)
+{
+ struct stm32mp_exti_chip_data *chip_data = irq_data_get_irq_chip_data(d);
+ const struct stm32mp_exti_bank *bank = chip_data->reg_bank;
+ void __iomem *base = chip_data->host_data->base;
+ u32 mask = BIT(d->hwirq % IRQS_PER_BANK);
+
+ writel_relaxed(mask, base + bank->swier_ofst);
+
+ return 0;
+}
+
+static struct irq_chip stm32mp_exti_chip = {
+ .name = "stm32mp-exti",
+ .irq_eoi = stm32mp_exti_eoi,
+ .irq_mask = stm32mp_exti_mask,
+ .irq_unmask = stm32mp_exti_unmask,
+ .irq_retrigger = stm32mp_exti_retrigger,
+ .irq_set_type = stm32mp_exti_set_type,
+ .irq_set_wake = stm32mp_exti_set_wake,
+ .flags = IRQCHIP_MASK_ON_SUSPEND,
+ .irq_set_affinity = IS_ENABLED(CONFIG_SMP) ? stm32mp_exti_set_affinity : NULL,
+};
+
+static struct irq_chip stm32mp_exti_chip_direct = {
+ .name = "stm32mp-exti-direct",
+ .irq_eoi = irq_chip_eoi_parent,
+ .irq_ack = irq_chip_ack_parent,
+ .irq_mask = stm32mp_exti_mask,
+ .irq_unmask = stm32mp_exti_unmask,
+ .irq_retrigger = irq_chip_retrigger_hierarchy,
+ .irq_set_type = irq_chip_set_type_parent,
+ .irq_set_wake = stm32mp_exti_set_wake,
+ .flags = IRQCHIP_MASK_ON_SUSPEND,
+ .irq_set_affinity = IS_ENABLED(CONFIG_SMP) ? irq_chip_set_affinity_parent : NULL,
+};
+
+static int stm32mp_exti_domain_alloc(struct irq_domain *dm,
+ unsigned int virq,
+ unsigned int nr_irqs, void *data)
+{
+ struct stm32mp_exti_host_data *host_data = dm->host_data;
+ struct stm32mp_exti_chip_data *chip_data;
+ struct irq_fwspec *fwspec = data;
+ struct irq_fwspec p_fwspec;
+ irq_hw_number_t hwirq;
+ struct irq_chip *chip;
+ u32 event_trg;
+ u8 desc_irq;
+ int bank;
+
+ hwirq = fwspec->param[0];
+ if (hwirq >= host_data->drv_data->bank_nr * IRQS_PER_BANK)
+ return -EINVAL;
+
+ bank = hwirq / IRQS_PER_BANK;
+ chip_data = &host_data->chips_data[bank];
+
+ /* Check if event is reserved (Secure) */
+ if (chip_data->event_reserved & BIT(hwirq % IRQS_PER_BANK)) {
+ dev_err(host_data->dev, "event %lu is reserved, secure\n", hwirq);
+ return -EPERM;
+ }
+
+ event_trg = readl_relaxed(host_data->base + chip_data->reg_bank->trg_ofst);
+ chip = (event_trg & BIT(hwirq % IRQS_PER_BANK)) ?
+ &stm32mp_exti_chip : &stm32mp_exti_chip_direct;
+
+ irq_domain_set_hwirq_and_chip(dm, virq, hwirq, chip, chip_data);
+
+ if (host_data->dt_has_irqs_desc) {
+ struct of_phandle_args out_irq;
+ int ret;
+
+ ret = of_irq_parse_one(host_data->dev->of_node, hwirq, &out_irq);
+ if (ret)
+ return ret;
+ /* we only support one parent, so far */
+ if (of_node_to_fwnode(out_irq.np) != dm->parent->fwnode)
+ return -EINVAL;
+
+ of_phandle_args_to_fwspec(out_irq.np, out_irq.args,
+ out_irq.args_count, &p_fwspec);
+
+ return irq_domain_alloc_irqs_parent(dm, virq, 1, &p_fwspec);
+ }
+
+ if (!host_data->drv_data->desc_irqs)
+ return -EINVAL;
+
+ desc_irq = host_data->drv_data->desc_irqs[hwirq];
+ if (desc_irq != EXTI_INVALID_IRQ) {
+ p_fwspec.fwnode = dm->parent->fwnode;
+ p_fwspec.param_count = 3;
+ p_fwspec.param[0] = GIC_SPI;
+ p_fwspec.param[1] = desc_irq;
+ p_fwspec.param[2] = IRQ_TYPE_LEVEL_HIGH;
+
+ return irq_domain_alloc_irqs_parent(dm, virq, 1, &p_fwspec);
+ }
+
+ return 0;
+}
+
+static struct stm32mp_exti_chip_data *stm32mp_exti_chip_init(struct stm32mp_exti_host_data *h_data,
+ u32 bank_idx, struct device_node *node)
+{
+ struct stm32mp_exti_chip_data *chip_data;
+ const struct stm32mp_exti_bank *bank;
+ void __iomem *base = h_data->base;
+
+ bank = h_data->drv_data->exti_banks[bank_idx];
+ chip_data = &h_data->chips_data[bank_idx];
+ chip_data->host_data = h_data;
+ chip_data->reg_bank = bank;
+
+ raw_spin_lock_init(&chip_data->rlock);
+
+ /*
+ * This IP has no reset, so after hot reboot we should
+ * clear registers to avoid residue
+ */
+ writel_relaxed(0, base + bank->imr_ofst);
+
+ /* reserve Secure events */
+ chip_data->event_reserved = readl_relaxed(base + bank->seccfgr_ofst);
+
+ pr_info("%pOF: bank%d\n", node, bank_idx);
+
+ return chip_data;
+}
+
+static const struct irq_domain_ops stm32mp_exti_domain_ops = {
+ .alloc = stm32mp_exti_domain_alloc,
+ .free = irq_domain_free_irqs_common,
+ .xlate = irq_domain_xlate_twocell,
+};
+
+static void stm32mp_exti_check_rif(struct stm32mp_exti_host_data *host_data)
+{
+ unsigned int bank, i, event;
+ u32 cid, cidcfgr, hwcfgr1;
+
+ /* quit on CID not supported */
+ hwcfgr1 = readl_relaxed(host_data->base + EXTI_HWCFGR1);
+ if ((hwcfgr1 & EXTI_HWCFGR1_CIDWIDTH_MASK) == 0)
+ return;
+
+ for (bank = 0; bank < host_data->drv_data->bank_nr; bank++) {
+ for (i = 0; i < IRQS_PER_BANK; i++) {
+ event = bank * IRQS_PER_BANK + i;
+ cidcfgr = readl_relaxed(host_data->base + EXTI_EnCIDCFGR(event));
+ cid = (cidcfgr & EXTI_CIDCFGR_CID_MASK) >> EXTI_CIDCFGR_CID_SHIFT;
+ if ((cidcfgr & EXTI_CIDCFGR_CFEN_MASK) && cid != EXTI_CID1)
+ host_data->chips_data[bank].event_reserved |= BIT(i);
+ }
+ }
+}
+
+static void stm32mp_exti_remove_irq(void *data)
+{
+ struct irq_domain *domain = data;
+
+ irq_domain_remove(domain);
+}
+
+static int stm32mp_exti_probe(struct platform_device *pdev)
+{
+ const struct stm32mp_exti_drv_data *drv_data;
+ struct irq_domain *parent_domain, *domain;
+ struct stm32mp_exti_host_data *host_data;
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ int ret, i;
+
+ host_data = devm_kzalloc(dev, sizeof(*host_data), GFP_KERNEL);
+ if (!host_data)
+ return -ENOMEM;
+
+ dev_set_drvdata(dev, host_data);
+ host_data->dev = dev;
+
+ /* check for optional hwspinlock which may be not available yet */
+ ret = of_hwspin_lock_get_id(np, 0);
+ if (ret == -EPROBE_DEFER)
+ /* hwspinlock framework not yet ready */
+ return ret;
+
+ if (ret >= 0) {
+ host_data->hwlock = devm_hwspin_lock_request_specific(dev, ret);
+ if (!host_data->hwlock) {
+ dev_err(dev, "Failed to request hwspinlock\n");
+ return -EINVAL;
+ }
+ } else if (ret != -ENOENT) {
+ /* note: ENOENT is a valid case (means 'no hwspinlock') */
+ dev_err(dev, "Failed to get hwspinlock\n");
+ return ret;
+ }
+
+ /* initialize host_data */
+ drv_data = of_device_get_match_data(dev);
+ if (!drv_data) {
+ dev_err(dev, "no of match data\n");
+ return -ENODEV;
+ }
+ host_data->drv_data = drv_data;
+
+ host_data->chips_data = devm_kcalloc(dev, drv_data->bank_nr,
+ sizeof(*host_data->chips_data),
+ GFP_KERNEL);
+ if (!host_data->chips_data)
+ return -ENOMEM;
+
+ host_data->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(host_data->base))
+ return PTR_ERR(host_data->base);
+
+ for (i = 0; i < drv_data->bank_nr; i++)
+ stm32mp_exti_chip_init(host_data, i, np);
+
+ stm32mp_exti_check_rif(host_data);
+
+ parent_domain = irq_find_host(of_irq_find_parent(np));
+ if (!parent_domain) {
+ dev_err(dev, "GIC interrupt-parent not found\n");
+ return -EINVAL;
+ }
+
+ domain = irq_domain_add_hierarchy(parent_domain, 0,
+ drv_data->bank_nr * IRQS_PER_BANK,
+ np, &stm32mp_exti_domain_ops,
+ host_data);
+
+ if (!domain) {
+ dev_err(dev, "Could not register exti domain\n");
+ return -ENOMEM;
+ }
+
+ ret = devm_add_action_or_reset(dev, stm32mp_exti_remove_irq, domain);
+ if (ret)
+ return ret;
+
+ host_data->dt_has_irqs_desc = of_property_present(np, "interrupts-extended");
+
+ return 0;
+}
+
+static const struct of_device_id stm32mp_exti_ids[] = {
+ { .compatible = "st,stm32mp1-exti", .data = &stm32mp1_drv_data},
+ { .compatible = "st,stm32mp13-exti", .data = &stm32mp13_drv_data},
+ {},
+};
+MODULE_DEVICE_TABLE(of, stm32mp_exti_ids);
+
+static const struct dev_pm_ops stm32mp_exti_dev_pm_ops = {
+ NOIRQ_SYSTEM_SLEEP_PM_OPS(stm32mp_exti_suspend, stm32mp_exti_resume)
+};
+
+static struct platform_driver stm32mp_exti_driver = {
+ .probe = stm32mp_exti_probe,
+ .driver = {
+ .name = "stm32mp_exti",
+ .of_match_table = stm32mp_exti_ids,
+ .pm = &stm32mp_exti_dev_pm_ops,
+ },
+};
+
+module_platform_driver(stm32mp_exti_driver);
+
+MODULE_AUTHOR("Maxime Coquelin <mcoquelin.stm32@gmail.com>");
+MODULE_DESCRIPTION("STM32MP EXTI driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/irqchip/irq-sun6i-r.c b/drivers/irqchip/irq-sun6i-r.c
index a01e44049415..99958d470d62 100644
--- a/drivers/irqchip/irq-sun6i-r.c
+++ b/drivers/irqchip/irq-sun6i-r.c
@@ -270,7 +270,7 @@ static const struct irq_domain_ops sun6i_r_intc_domain_ops = {
static int sun6i_r_intc_suspend(void)
{
- u32 buf[BITS_TO_U32(max(SUN6I_NR_TOP_LEVEL_IRQS, SUN6I_NR_MUX_BITS))];
+ u32 buf[BITS_TO_U32(MAX(SUN6I_NR_TOP_LEVEL_IRQS, SUN6I_NR_MUX_BITS))];
int i;
/* Wake IRQs are enabled during system sleep and shutdown. */
diff --git a/drivers/irqchip/irq-sunxi-nmi.c b/drivers/irqchip/irq-sunxi-nmi.c
index e760b1278143..0b4312152024 100644
--- a/drivers/irqchip/irq-sunxi-nmi.c
+++ b/drivers/irqchip/irq-sunxi-nmi.c
@@ -186,13 +186,13 @@ static int __init sunxi_sc_nmi_irq_init(struct device_node *node,
gc->chip_types[0].chip.irq_unmask = irq_gc_mask_set_bit;
gc->chip_types[0].chip.irq_eoi = irq_gc_ack_set_bit;
gc->chip_types[0].chip.irq_set_type = sunxi_sc_nmi_set_type;
- gc->chip_types[0].chip.flags = IRQCHIP_EOI_THREADED | IRQCHIP_EOI_IF_HANDLED;
+ gc->chip_types[0].chip.flags = IRQCHIP_EOI_THREADED | IRQCHIP_EOI_IF_HANDLED |
+ IRQCHIP_SKIP_SET_WAKE;
gc->chip_types[0].regs.ack = reg_offs->pend;
gc->chip_types[0].regs.mask = reg_offs->enable;
gc->chip_types[0].regs.type = reg_offs->ctrl;
gc->chip_types[1].type = IRQ_TYPE_EDGE_BOTH;
- gc->chip_types[1].chip.name = gc->chip_types[0].chip.name;
gc->chip_types[1].chip.irq_ack = irq_gc_ack_set_bit;
gc->chip_types[1].chip.irq_mask = irq_gc_mask_clr_bit;
gc->chip_types[1].chip.irq_unmask = irq_gc_mask_set_bit;
diff --git a/drivers/irqchip/irq-tb10x.c b/drivers/irqchip/irq-tb10x.c
index 680586354d12..d59bfbe8c6d0 100644
--- a/drivers/irqchip/irq-tb10x.c
+++ b/drivers/irqchip/irq-tb10x.c
@@ -150,7 +150,6 @@ static int __init of_tb10x_init_irq(struct device_node *ictl,
gc->chip_types[0].regs.mask = AB_IRQCTL_INT_ENABLE;
gc->chip_types[1].type = IRQ_TYPE_EDGE_BOTH;
- gc->chip_types[1].chip.name = gc->chip_types[0].chip.name;
gc->chip_types[1].chip.irq_ack = irq_gc_ack_set_bit;
gc->chip_types[1].chip.irq_mask = irq_gc_mask_clr_bit;
gc->chip_types[1].chip.irq_unmask = irq_gc_mask_set_bit;
diff --git a/drivers/irqchip/irq-thead-c900-aclint-sswi.c b/drivers/irqchip/irq-thead-c900-aclint-sswi.c
new file mode 100644
index 000000000000..8ff6e7a1363b
--- /dev/null
+++ b/drivers/irqchip/irq-thead-c900-aclint-sswi.c
@@ -0,0 +1,176 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2024 Inochi Amaoto <inochiama@gmail.com>
+ */
+
+#define pr_fmt(fmt) "thead-c900-aclint-sswi: " fmt
+#include <linux/cpu.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/irqchip.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/pci.h>
+#include <linux/spinlock.h>
+#include <linux/smp.h>
+#include <linux/string_choices.h>
+#include <asm/sbi.h>
+#include <asm/vendorid_list.h>
+
+#define THEAD_ACLINT_xSWI_REGISTER_SIZE 4
+
+#define THEAD_C9XX_CSR_SXSTATUS 0x5c0
+#define THEAD_C9XX_SXSTATUS_CLINTEE BIT(17)
+
+static int sswi_ipi_virq __ro_after_init;
+static DEFINE_PER_CPU(void __iomem *, sswi_cpu_regs);
+
+static void thead_aclint_sswi_ipi_send(unsigned int cpu)
+{
+ writel(0x1, per_cpu(sswi_cpu_regs, cpu));
+}
+
+static void thead_aclint_sswi_ipi_clear(void)
+{
+ writel_relaxed(0x0, this_cpu_read(sswi_cpu_regs));
+}
+
+static void thead_aclint_sswi_ipi_handle(struct irq_desc *desc)
+{
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+
+ chained_irq_enter(chip, desc);
+
+ csr_clear(CSR_IP, IE_SIE);
+ thead_aclint_sswi_ipi_clear();
+
+ ipi_mux_process();
+
+ chained_irq_exit(chip, desc);
+}
+
+static int thead_aclint_sswi_starting_cpu(unsigned int cpu)
+{
+ enable_percpu_irq(sswi_ipi_virq, irq_get_trigger_type(sswi_ipi_virq));
+
+ return 0;
+}
+
+static int thead_aclint_sswi_dying_cpu(unsigned int cpu)
+{
+ thead_aclint_sswi_ipi_clear();
+
+ disable_percpu_irq(sswi_ipi_virq);
+
+ return 0;
+}
+
+static int __init thead_aclint_sswi_parse_irq(struct fwnode_handle *fwnode,
+ void __iomem *reg)
+{
+ struct of_phandle_args parent;
+ unsigned long hartid;
+ u32 contexts, i;
+ int rc, cpu;
+
+ contexts = of_irq_count(to_of_node(fwnode));
+ if (!(contexts)) {
+ pr_err("%pfwP: no ACLINT SSWI context available\n", fwnode);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < contexts; i++) {
+ rc = of_irq_parse_one(to_of_node(fwnode), i, &parent);
+ if (rc)
+ return rc;
+
+ rc = riscv_of_parent_hartid(parent.np, &hartid);
+ if (rc)
+ return rc;
+
+ if (parent.args[0] != RV_IRQ_SOFT)
+ return -ENOTSUPP;
+
+ cpu = riscv_hartid_to_cpuid(hartid);
+
+ per_cpu(sswi_cpu_regs, cpu) = reg + i * THEAD_ACLINT_xSWI_REGISTER_SIZE;
+ }
+
+ pr_info("%pfwP: register %u CPU%s\n", fwnode, contexts, str_plural(contexts));
+
+ return 0;
+}
+
+static int __init thead_aclint_sswi_probe(struct fwnode_handle *fwnode)
+{
+ struct irq_domain *domain;
+ void __iomem *reg;
+ int virq, rc;
+
+ /* If it is T-HEAD CPU, check whether SSWI is enabled */
+ if (riscv_cached_mvendorid(0) == THEAD_VENDOR_ID &&
+ !(csr_read(THEAD_C9XX_CSR_SXSTATUS) & THEAD_C9XX_SXSTATUS_CLINTEE))
+ return -ENOTSUPP;
+
+ if (!is_of_node(fwnode))
+ return -EINVAL;
+
+ reg = of_iomap(to_of_node(fwnode), 0);
+ if (!reg)
+ return -ENOMEM;
+
+ /* Parse SSWI setting */
+ rc = thead_aclint_sswi_parse_irq(fwnode, reg);
+ if (rc < 0)
+ return rc;
+
+ /* If mulitple SSWI devices are present, do not register irq again */
+ if (sswi_ipi_virq)
+ return 0;
+
+ /* Find riscv intc domain and create IPI irq mapping */
+ domain = irq_find_matching_fwnode(riscv_get_intc_hwnode(), DOMAIN_BUS_ANY);
+ if (!domain) {
+ pr_err("%pfwP: Failed to find INTC domain\n", fwnode);
+ return -ENOENT;
+ }
+
+ sswi_ipi_virq = irq_create_mapping(domain, RV_IRQ_SOFT);
+ if (!sswi_ipi_virq) {
+ pr_err("unable to create ACLINT SSWI IRQ mapping\n");
+ return -ENOMEM;
+ }
+
+ /* Register SSWI irq and handler */
+ virq = ipi_mux_create(BITS_PER_BYTE, thead_aclint_sswi_ipi_send);
+ if (virq <= 0) {
+ pr_err("unable to create muxed IPIs\n");
+ irq_dispose_mapping(sswi_ipi_virq);
+ return virq < 0 ? virq : -ENOMEM;
+ }
+
+ irq_set_chained_handler(sswi_ipi_virq, thead_aclint_sswi_ipi_handle);
+
+ cpuhp_setup_state(CPUHP_AP_IRQ_THEAD_ACLINT_SSWI_STARTING,
+ "irqchip/thead-aclint-sswi:starting",
+ thead_aclint_sswi_starting_cpu,
+ thead_aclint_sswi_dying_cpu);
+
+ riscv_ipi_set_virq_range(virq, BITS_PER_BYTE);
+
+ /* Announce that SSWI is providing IPIs */
+ pr_info("providing IPIs using THEAD ACLINT SSWI\n");
+
+ return 0;
+}
+
+static int __init thead_aclint_sswi_early_probe(struct device_node *node,
+ struct device_node *parent)
+{
+ return thead_aclint_sswi_probe(&node->fwnode);
+}
+IRQCHIP_DECLARE(thead_aclint_sswi, "thead,c900-aclint-sswi", thead_aclint_sswi_early_probe);
diff --git a/drivers/irqchip/irq-ti-sci-inta.c b/drivers/irqchip/irq-ti-sci-inta.c
index b83f5cbab123..a887efba262c 100644
--- a/drivers/irqchip/irq-ti-sci-inta.c
+++ b/drivers/irqchip/irq-ti-sci-inta.c
@@ -743,3 +743,4 @@ module_platform_driver(ti_sci_inta_irq_domain_driver);
MODULE_AUTHOR("Lokesh Vutla <lokeshvutla@ti.com>");
MODULE_DESCRIPTION("K3 Interrupt Aggregator driver over TI SCI protocol");
+MODULE_LICENSE("GPL");
diff --git a/drivers/irqchip/irq-ti-sci-intr.c b/drivers/irqchip/irq-ti-sci-intr.c
index c027cd9e4a69..b49a73106c69 100644
--- a/drivers/irqchip/irq-ti-sci-intr.c
+++ b/drivers/irqchip/irq-ti-sci-intr.c
@@ -303,3 +303,4 @@ module_platform_driver(ti_sci_intr_irq_domain_driver);
MODULE_AUTHOR("Lokesh Vutla <lokeshvutla@ticom>");
MODULE_DESCRIPTION("K3 Interrupt Router driver over TI SCI protocol");
+MODULE_LICENSE("GPL");
diff --git a/drivers/irqchip/irq-ts4800.c b/drivers/irqchip/irq-ts4800.c
index b2d61d4f6fe6..960c343d5781 100644
--- a/drivers/irqchip/irq-ts4800.c
+++ b/drivers/irqchip/irq-ts4800.c
@@ -52,7 +52,7 @@ static void ts4800_irq_print_chip(struct irq_data *d, struct seq_file *p)
{
struct ts4800_irq_data *data = irq_data_get_irq_chip_data(d);
- seq_printf(p, "%s", dev_name(&data->pdev->dev));
+ seq_puts(p, dev_name(&data->pdev->dev));
}
static const struct irq_chip ts4800_chip = {
@@ -139,13 +139,11 @@ static int ts4800_ic_probe(struct platform_device *pdev)
return 0;
}
-static int ts4800_ic_remove(struct platform_device *pdev)
+static void ts4800_ic_remove(struct platform_device *pdev)
{
struct ts4800_irq_data *data = platform_get_drvdata(pdev);
irq_domain_remove(data->domain);
-
- return 0;
}
static const struct of_device_id ts4800_ic_of_match[] = {
@@ -155,15 +153,16 @@ static const struct of_device_id ts4800_ic_of_match[] = {
MODULE_DEVICE_TABLE(of, ts4800_ic_of_match);
static struct platform_driver ts4800_ic_driver = {
- .probe = ts4800_ic_probe,
- .remove = ts4800_ic_remove,
+ .probe = ts4800_ic_probe,
+ .remove = ts4800_ic_remove,
.driver = {
- .name = "ts4800-irqc",
- .of_match_table = ts4800_ic_of_match,
+ .name = "ts4800-irqc",
+ .of_match_table = ts4800_ic_of_match,
},
};
module_platform_driver(ts4800_ic_driver);
MODULE_AUTHOR("Damien Riegel <damien.riegel@savoirfairelinux.com>");
+MODULE_DESCRIPTION("Multiplexed-IRQs driver for TS-4800's FPGA");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS("platform:ts4800_irqc");
diff --git a/drivers/irqchip/irq-versatile-fpga.c b/drivers/irqchip/irq-versatile-fpga.c
index 5018a06060e6..0abc8934c2ee 100644
--- a/drivers/irqchip/irq-versatile-fpga.c
+++ b/drivers/irqchip/irq-versatile-fpga.c
@@ -69,7 +69,7 @@ static void fpga_irq_print_chip(struct irq_data *d, struct seq_file *p)
{
struct fpga_irq_data *f = irq_data_get_irq_chip_data(d);
- seq_printf(p, irq_domain_get_of_node(f->domain)->name);
+ seq_puts(p, irq_domain_get_of_node(f->domain)->name);
}
static const struct irq_chip fpga_chip = {
@@ -128,7 +128,7 @@ static int handle_one_fpga(struct fpga_irq_data *f, struct pt_regs *regs)
* Keep iterating over all registered FPGA IRQ controllers until there are
* no pending interrupts.
*/
-static asmlinkage void __exception_irq_entry fpga_handle_irq(struct pt_regs *regs)
+static void __exception_irq_entry fpga_handle_irq(struct pt_regs *regs)
{
int i, handled;
diff --git a/drivers/irqchip/irq-vic.c b/drivers/irqchip/irq-vic.c
index 9e3d5561e04e..ea93e7236c4a 100644
--- a/drivers/irqchip/irq-vic.c
+++ b/drivers/irqchip/irq-vic.c
@@ -47,9 +47,8 @@
/**
* struct vic_device - VIC PM device
- * @parent_irq: The parent IRQ number of the VIC if cascaded, or 0.
- * @irq: The IRQ number for the base of the VIC.
* @base: The register base for the VIC.
+ * @irq: The IRQ number for the base of the VIC.
* @valid_sources: A bitmask of valid interrupts
* @resume_sources: A bitmask of interrupts for resume.
* @resume_irqs: The IRQs enabled for resume.
diff --git a/drivers/irqchip/irq-xilinx-intc.c b/drivers/irqchip/irq-xilinx-intc.c
index 238d3d344949..7e08714d507f 100644
--- a/drivers/irqchip/irq-xilinx-intc.c
+++ b/drivers/irqchip/irq-xilinx-intc.c
@@ -189,7 +189,7 @@ static int __init xilinx_intc_of_init(struct device_node *intc,
irqc->intr_mask = 0;
}
- if (irqc->intr_mask >> irqc->nr_irq)
+ if ((u64)irqc->intr_mask >> irqc->nr_irq)
pr_warn("irq-xilinx: mismatch in kind-of-intr param\n");
pr_info("irq-xilinx: %pOF: num_irq=%d, edge=0x%x\n",
diff --git a/drivers/irqchip/irqchip.c b/drivers/irqchip/irqchip.c
index 1eeb0d0156ce..0ee7b6b71f5f 100644
--- a/drivers/irqchip/irqchip.c
+++ b/drivers/irqchip/irqchip.c
@@ -35,11 +35,10 @@ void __init irqchip_init(void)
int platform_irqchip_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
- struct device_node *par_np = of_irq_find_parent(np);
+ struct device_node *par_np __free(device_node) = of_irq_find_parent(np);
of_irq_init_cb_t irq_init_cb = of_device_get_match_data(&pdev->dev);
if (!irq_init_cb) {
- of_node_put(par_np);
return -EINVAL;
}
@@ -55,7 +54,6 @@ int platform_irqchip_probe(struct platform_device *pdev)
* interrupt controller can check for specific domains as necessary.
*/
if (par_np && !irq_find_matching_host(par_np, DOMAIN_BUS_ANY)) {
- of_node_put(par_np);
return -EPROBE_DEFER;
}
diff --git a/drivers/irqchip/qcom-pdc.c b/drivers/irqchip/qcom-pdc.c
index 74b2f124116e..52d77546aacb 100644
--- a/drivers/irqchip/qcom-pdc.c
+++ b/drivers/irqchip/qcom-pdc.c
@@ -21,9 +21,11 @@
#include <linux/types.h>
#define PDC_MAX_GPIO_IRQS 256
+#define PDC_DRV_OFFSET 0x10000
/* Valid only on HW version < 3.2 */
#define IRQ_ENABLE_BANK 0x10
+#define IRQ_ENABLE_BANK_MAX (IRQ_ENABLE_BANK + BITS_TO_BYTES(PDC_MAX_GPIO_IRQS))
#define IRQ_i_CFG 0x110
/* Valid only on HW version >= 3.2 */
@@ -46,13 +48,20 @@ struct pdc_pin_region {
static DEFINE_RAW_SPINLOCK(pdc_lock);
static void __iomem *pdc_base;
+static void __iomem *pdc_prev_base;
static struct pdc_pin_region *pdc_region;
static int pdc_region_cnt;
static unsigned int pdc_version;
+static bool pdc_x1e_quirk;
+
+static void pdc_base_reg_write(void __iomem *base, int reg, u32 i, u32 val)
+{
+ writel_relaxed(val, base + reg + i * sizeof(u32));
+}
static void pdc_reg_write(int reg, u32 i, u32 val)
{
- writel_relaxed(val, pdc_base + reg + i * sizeof(u32));
+ pdc_base_reg_write(pdc_base, reg, i, val);
}
static u32 pdc_reg_read(int reg, u32 i)
@@ -60,6 +69,34 @@ static u32 pdc_reg_read(int reg, u32 i)
return readl_relaxed(pdc_base + reg + i * sizeof(u32));
}
+static void pdc_x1e_irq_enable_write(u32 bank, u32 enable)
+{
+ void __iomem *base;
+
+ /* Remap the write access to work around a hardware bug on X1E */
+ switch (bank) {
+ case 0 ... 1:
+ /* Use previous DRV (client) region and shift to bank 3-4 */
+ base = pdc_prev_base;
+ bank += 3;
+ break;
+ case 2 ... 4:
+ /* Use our own region and shift to bank 0-2 */
+ base = pdc_base;
+ bank -= 2;
+ break;
+ case 5:
+ /* No fixup required for bank 5 */
+ base = pdc_base;
+ break;
+ default:
+ WARN_ON(1);
+ return;
+ }
+
+ pdc_base_reg_write(base, IRQ_ENABLE_BANK, bank, enable);
+}
+
static void __pdc_enable_intr(int pin_out, bool on)
{
unsigned long enable;
@@ -72,7 +109,11 @@ static void __pdc_enable_intr(int pin_out, bool on)
enable = pdc_reg_read(IRQ_ENABLE_BANK, index);
__assign_bit(mask, &enable, on);
- pdc_reg_write(IRQ_ENABLE_BANK, index, enable);
+
+ if (pdc_x1e_quirk)
+ pdc_x1e_irq_enable_write(index, enable);
+ else
+ pdc_reg_write(IRQ_ENABLE_BANK, index, enable);
} else {
enable = pdc_reg_read(IRQ_i_CFG, pin_out);
__assign_bit(IRQ_i_CFG_IRQ_ENABLE, &enable, on);
@@ -324,10 +365,29 @@ static int qcom_pdc_init(struct device_node *node, struct device_node *parent)
if (res_size > resource_size(&res))
pr_warn("%pOF: invalid reg size, please fix DT\n", node);
+ /*
+ * PDC has multiple DRV regions, each one provides the same set of
+ * registers for a particular client in the system. Due to a hardware
+ * bug on X1E, some writes to the IRQ_ENABLE_BANK register must be
+ * issued inside the previous region. This region belongs to
+ * a different client and is not described in the device tree. Map the
+ * region with the expected offset to preserve support for old DTs.
+ */
+ if (of_device_is_compatible(node, "qcom,x1e80100-pdc")) {
+ pdc_prev_base = ioremap(res.start - PDC_DRV_OFFSET, IRQ_ENABLE_BANK_MAX);
+ if (!pdc_prev_base) {
+ pr_err("%pOF: unable to map previous PDC DRV region\n", node);
+ return -ENXIO;
+ }
+
+ pdc_x1e_quirk = true;
+ }
+
pdc_base = ioremap(res.start, res_size);
if (!pdc_base) {
pr_err("%pOF: unable to map PDC registers\n", node);
- return -ENXIO;
+ ret = -ENXIO;
+ goto fail;
}
pdc_version = pdc_reg_read(PDC_VERSION_REG, 0);
@@ -363,6 +423,7 @@ static int qcom_pdc_init(struct device_node *node, struct device_node *parent)
fail:
kfree(pdc_region);
iounmap(pdc_base);
+ iounmap(pdc_prev_base);
return ret;
}