diff options
Diffstat (limited to 'drivers/irqchip')
165 files changed, 32321 insertions, 8099 deletions
diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig index 3d1e60779078..f334f49c9791 100644 --- a/drivers/irqchip/Kconfig +++ b/drivers/irqchip/Kconfig @@ -1,24 +1,24 @@ +# SPDX-License-Identifier: GPL-2.0-only menu "IRQ chip support" config IRQCHIP def_bool y - depends on OF_IRQ + depends on (OF_IRQ || ACPI_GENERIC_GSI) config ARM_GIC bool - select IRQ_DOMAIN + depends on OF select IRQ_DOMAIN_HIERARCHY - select GENERIC_IRQ_MULTI_HANDLER - select GENERIC_IRQ_EFFECTIVE_AFF_MASK + select GENERIC_IRQ_EFFECTIVE_AFF_MASK if SMP config ARM_GIC_PM bool depends on PM select ARM_GIC - select PM_CLK config ARM_GIC_MAX_NR int + depends on ARM_GIC default 2 if ARCH_REALVIEW default 1 @@ -26,30 +26,30 @@ config ARM_GIC_V2M bool depends on PCI select ARM_GIC + select IRQ_MSI_LIB select PCI_MSI + select IRQ_MSI_IOMMU config GIC_NON_BANKED bool config ARM_GIC_V3 bool - select IRQ_DOMAIN - select GENERIC_IRQ_MULTI_HANDLER select IRQ_DOMAIN_HIERARCHY - select PARTITION_PERCPU - select GENERIC_IRQ_EFFECTIVE_AFF_MASK + select GENERIC_IRQ_EFFECTIVE_AFF_MASK if SMP + select HAVE_ARM_SMCCC_DISCOVERY + select IRQ_MSI_IOMMU -config ARM_GIC_V3_ITS +config ARM_GIC_ITS_PARENT bool - select GENERIC_MSI_IRQ_DOMAIN - default ARM_GIC_V3 -config ARM_GIC_V3_ITS_PCI +config ARM_GIC_V3_ITS bool - depends on ARM_GIC_V3_ITS - depends on PCI - depends on PCI_MSI - default ARM_GIC_V3_ITS + select GENERIC_MSI_IRQ + select IRQ_MSI_LIB + select ARM_GIC_ITS_PARENT + default ARM_GIC_V3 + select IRQ_MSI_IOMMU config ARM_GIC_V3_ITS_FSL_MC bool @@ -57,16 +57,22 @@ config ARM_GIC_V3_ITS_FSL_MC depends on FSL_MC_BUS default ARM_GIC_V3_ITS +config ARM_GIC_V5 + bool + select IRQ_DOMAIN_HIERARCHY + select GENERIC_IRQ_EFFECTIVE_AFF_MASK + select GENERIC_MSI_IRQ + select IRQ_MSI_LIB + select ARM_GIC_ITS_PARENT + config ARM_NVIC bool - select IRQ_DOMAIN select IRQ_DOMAIN_HIERARCHY select GENERIC_IRQ_CHIP config ARM_VIC bool select IRQ_DOMAIN - select GENERIC_IRQ_MULTI_HANDLER config ARM_VIC_NR int @@ -77,54 +83,94 @@ config ARM_VIC_NR The maximum number of VICs available in the system, for power management. +config IRQ_MSI_LIB + bool + select GENERIC_MSI_IRQ + config ARMADA_370_XP_IRQ bool select GENERIC_IRQ_CHIP select PCI_MSI if PCI - select GENERIC_IRQ_EFFECTIVE_AFF_MASK + select IRQ_MSI_LIB if PCI + select GENERIC_IRQ_EFFECTIVE_AFF_MASK if SMP config ALPINE_MSI bool depends on PCI select PCI_MSI + select IRQ_MSI_LIB select GENERIC_IRQ_CHIP +config AL_FIC + bool "Amazon's Annapurna Labs Fabric Interrupt Controller" + depends on OF + depends on HAS_IOMEM + select GENERIC_IRQ_CHIP + select IRQ_DOMAIN + help + Support Amazon's Annapurna Labs Fabric Interrupt Controller. + config ATMEL_AIC_IRQ bool select GENERIC_IRQ_CHIP select IRQ_DOMAIN - select GENERIC_IRQ_MULTI_HANDLER select SPARSE_IRQ config ATMEL_AIC5_IRQ bool select GENERIC_IRQ_CHIP select IRQ_DOMAIN - select GENERIC_IRQ_MULTI_HANDLER select SPARSE_IRQ config I8259 bool select IRQ_DOMAIN +config BCM2712_MIP + tristate "Broadcom BCM2712 MSI-X Interrupt Peripheral support" + depends on ARCH_BRCMSTB || ARCH_BCM2835 || COMPILE_TEST + default m if ARCH_BRCMSTB || ARCH_BCM2835 + depends on ARM_GIC + select GENERIC_IRQ_CHIP + select IRQ_DOMAIN_HIERARCHY + select GENERIC_MSI_IRQ + select IRQ_MSI_LIB + help + Enable support for the Broadcom BCM2712 MSI-X target peripheral + (MIP) needed by brcmstb PCIe to handle MSI-X interrupts on + Raspberry Pi 5. + + If unsure say n. + config BCM6345_L1_IRQ bool select GENERIC_IRQ_CHIP select IRQ_DOMAIN - select GENERIC_IRQ_EFFECTIVE_AFF_MASK + select GENERIC_IRQ_EFFECTIVE_AFF_MASK if SMP config BCM7038_L1_IRQ - bool + tristate "Broadcom STB 7038-style L1/L2 interrupt controller driver" + depends on ARCH_BRCMSTB || BMIPS_GENERIC || COMPILE_TEST + default ARCH_BRCMSTB || BMIPS_GENERIC select GENERIC_IRQ_CHIP select IRQ_DOMAIN - select GENERIC_IRQ_EFFECTIVE_AFF_MASK + select GENERIC_IRQ_EFFECTIVE_AFF_MASK if SMP config BCM7120_L2_IRQ - bool + tristate "Broadcom STB 7120-style L2 interrupt controller driver" + depends on ARCH_BRCMSTB || BMIPS_GENERIC || COMPILE_TEST + default ARCH_BRCMSTB || BMIPS_GENERIC select GENERIC_IRQ_CHIP select IRQ_DOMAIN config BRCMSTB_L2_IRQ + tristate "Broadcom STB generic L2 interrupt controller driver" + depends on ARCH_BCM2835 || ARCH_BRCMSTB || BMIPS_GENERIC || COMPILE_TEST + default ARCH_BCM2835 || ARCH_BRCMSTB || BMIPS_GENERIC + select GENERIC_IRQ_CHIP + select IRQ_DOMAIN + +config DAVINCI_CP_INTC bool select GENERIC_IRQ_CHIP select IRQ_DOMAIN @@ -132,12 +178,16 @@ config BRCMSTB_L2_IRQ config DW_APB_ICTL bool select GENERIC_IRQ_CHIP + select IRQ_DOMAIN_HIERARCHY + +config ECONET_EN751221_INTC + bool + select GENERIC_IRQ_CHIP select IRQ_DOMAIN config FARADAY_FTINTC010 bool select IRQ_DOMAIN - select GENERIC_IRQ_MULTI_HANDLER select SPARSE_IRQ config HISILICON_IRQ_MBIGEN @@ -150,22 +200,38 @@ config IMGPDC_IRQ select GENERIC_IRQ_CHIP select IRQ_DOMAIN +config IXP4XX_IRQ + bool + select IRQ_DOMAIN + select SPARSE_IRQ + +config LAN966X_OIC + tristate "Microchip LAN966x OIC Support" + depends on MCHP_LAN966X_PCI || COMPILE_TEST + select GENERIC_IRQ_CHIP + select IRQ_DOMAIN + help + Enable support for the LAN966x Outbound Interrupt Controller. + This controller is present on the Microchip LAN966x PCI device and + maps the internal interrupts sources to PCIe interrupt. + + To compile this driver as a module, choose M here: the module + will be called irq-lan966x-oic. + config MADERA_IRQ tristate config IRQ_MIPS_CPU bool select GENERIC_IRQ_CHIP - select GENERIC_IRQ_IPI if SYS_SUPPORTS_MULTITHREADING + select GENERIC_IRQ_IPI if SMP && SYS_SUPPORTS_MULTITHREADING select IRQ_DOMAIN - select IRQ_DOMAIN_HIERARCHY if GENERIC_IRQ_IPI - select GENERIC_IRQ_EFFECTIVE_AFF_MASK + select GENERIC_IRQ_EFFECTIVE_AFF_MASK if SMP config CLPS711X_IRQCHIP bool depends on ARCH_CLPS711X select IRQ_DOMAIN - select GENERIC_IRQ_MULTI_HANDLER select SPARSE_IRQ default y @@ -184,7 +250,6 @@ config OMAP_IRQCHIP config ORION_IRQCHIP bool select IRQ_DOMAIN - select GENERIC_IRQ_MULTI_HANDLER config PIC32_EVIC bool @@ -203,13 +268,49 @@ config RDA_INTC select IRQ_DOMAIN config RENESAS_INTC_IRQPIN - bool + bool "Renesas INTC External IRQ Pin Support" if COMPILE_TEST select IRQ_DOMAIN + help + Enable support for the Renesas Interrupt Controller for external + interrupt pins, as found on SH/R-Mobile and R-Car Gen1 SoCs. config RENESAS_IRQC - bool + bool "Renesas R-Mobile APE6, R-Car Gen{2,3} and RZ/G{1,2} IRQC support" if COMPILE_TEST select GENERIC_IRQ_CHIP select IRQ_DOMAIN + help + Enable support for the Renesas Interrupt Controller for external + devices, as found on R-Mobile APE6, R-Car Gen{2,3} and RZ/G{1,2} SoCs. + +config RENESAS_RZA1_IRQC + bool "Renesas RZ/A1 IRQC support" if COMPILE_TEST + select IRQ_DOMAIN_HIERARCHY + help + Enable support for the Renesas RZ/A1 Interrupt Controller, to use up + to 8 external interrupts with configurable sense select. + +config RENESAS_RZG2L_IRQC + bool "Renesas RZ/G2L (and alike SoC) IRQC support" if COMPILE_TEST + select GENERIC_IRQ_CHIP + select IRQ_DOMAIN_HIERARCHY + help + Enable support for the Renesas RZ/G2L (and alike SoC) Interrupt Controller + for external devices. + +config RENESAS_RZV2H_ICU + bool "Renesas RZ/V2H(P) ICU support" if COMPILE_TEST + select GENERIC_IRQ_CHIP + select IRQ_DOMAIN_HIERARCHY + help + Enable support for the Renesas RZ/V2H(P) Interrupt Control Unit (ICU) + +config SL28CPLD_INTC + bool "Kontron sl28cpld IRQ controller" + depends on MFD_SL28CPLD=y || COMPILE_TEST + select REGMAP_IRQ + help + Interrupt controller driver for the board management controller + found on the Kontron sl28 CPLD. config ST_IRQCHIP bool @@ -218,9 +319,16 @@ config ST_IRQCHIP help Enables SysCfg Controlled IRQs on STi based platforms. -config TANGO_IRQ +config SUN4I_INTC + bool + +config SUN6I_R_INTC + bool + select IRQ_DOMAIN_HIERARCHY + select IRQ_FASTEOI_HIERARCHY_HANDLERS + +config SUNXI_NMI_INTC bool - select IRQ_DOMAIN select GENERIC_IRQ_CHIP config TB10X_IRQC @@ -248,11 +356,16 @@ config VERSATILE_FPGA_IRQ_NR config XTENSA_MX bool select IRQ_DOMAIN - select GENERIC_IRQ_EFFECTIVE_AFF_MASK + select GENERIC_IRQ_EFFECTIVE_AFF_MASK if SMP config XILINX_INTC - bool + bool "Xilinx Interrupt Controller IP" + depends on OF_ADDRESS select IRQ_DOMAIN + help + Support for the Xilinx Interrupt Controller IP core. + This is used as a primary controller with MicroBlaze and can also + be used as a secondary chained controller on other platforms. config IRQ_CROSSBAR bool @@ -271,7 +384,8 @@ config KEYSTONE_IRQ config MIPS_GIC bool - select GENERIC_IRQ_IPI + select GENERIC_IRQ_EFFECTIVE_AFF_MASK if SMP + select GENERIC_IRQ_IPI if SMP select IRQ_DOMAIN_HIERARCHY select MIPS_CM @@ -280,13 +394,17 @@ config INGENIC_IRQ depends on MACH_INGENIC default y -config RENESAS_H8300H_INTC - bool - select IRQ_DOMAIN +config INGENIC_TCU_IRQ + bool "Ingenic JZ47xx TCU interrupt controller" + default MACH_INGENIC + depends on MIPS || COMPILE_TEST + select MFD_SYSCON + select GENERIC_IRQ_CHIP + help + Support for interrupts in the Timer/Counter Unit (TCU) of the Ingenic + JZ47xx SoCs. -config RENESAS_H8S_INTC - bool - select IRQ_DOMAIN + If unsure, say N. config IMX_GPCV2 bool @@ -305,6 +423,7 @@ config MSCC_OCELOT_IRQ select GENERIC_IRQ_CHIP config MVEBU_GICP + select IRQ_MSI_LIB bool config MVEBU_ICU @@ -312,7 +431,8 @@ config MVEBU_ICU config MVEBU_ODMI bool - select GENERIC_MSI_IRQ_DOMAIN + select IRQ_MSI_LIB + select GENERIC_MSI_IRQ config MVEBU_PIC bool @@ -320,19 +440,24 @@ config MVEBU_PIC config MVEBU_SEI bool -config LS_SCFG_MSI +config LS_EXTIRQ def_bool y if SOC_LS1021A || ARCH_LAYERSCAPE - depends on PCI && PCI_MSI + select MFD_SYSCON -config PARTITION_PERCPU - bool +config LS_SCFG_MSI + def_bool y if SOC_LS1021A || ARCH_LAYERSCAPE + select IRQ_MSI_IOMMU + depends on PCI_MSI + select IRQ_MSI_LIB -config EZNPS_GIC - bool "NPS400 Global Interrupt Manager (GIM)" - depends on ARC || (COMPILE_TEST && !64BIT) - select IRQ_DOMAIN +config STM32MP_EXTI + tristate "STM32MP extended interrupts and event controller" + depends on (ARCH_STM32 && !ARM_SINGLE_ARMV7M) || COMPILE_TEST + default ARCH_STM32 && !ARM_SINGLE_ARMV7M + select IRQ_DOMAIN_HIERARCHY + select GENERIC_IRQ_CHIP help - Support the EZchip NPS400 global interrupt controller + Support STM32MP EXTI (extended interrupts and event) controller. config STM32_EXTI bool @@ -342,7 +467,6 @@ config STM32_EXTI config QCOM_IRQ_COMBINER bool "QCOM IRQ combiner support" depends on ARCH_QCOM && ACPI - select IRQ_DOMAIN select IRQ_DOMAIN_HIERARCHY help Say yes here to add support for the IRQ combiner devices embedded @@ -357,9 +481,9 @@ config IRQ_UNIPHIER_AIDET Support for the UniPhier AIDET (ARM Interrupt Detector). config MESON_IRQ_GPIO - bool "Meson GPIO Interrupt Multiplexer" - depends on ARCH_MESON - select IRQ_DOMAIN + tristate "Meson GPIO Interrupt Multiplexer" + depends on ARCH_MESON || COMPILE_TEST + default ARCH_MESON select IRQ_DOMAIN_HIERARCHY help Support Meson SoC Family GPIO Interrupt Multiplexer @@ -367,27 +491,36 @@ config MESON_IRQ_GPIO config GOLDFISH_PIC bool "Goldfish programmable interrupt controller" depends on MIPS && (GOLDFISH || COMPILE_TEST) + select GENERIC_IRQ_CHIP select IRQ_DOMAIN help Say yes here to enable Goldfish interrupt controller driver used for Goldfish based virtual platforms. config QCOM_PDC - bool "QCOM PDC" + tristate "QCOM PDC" depends on ARCH_QCOM - select IRQ_DOMAIN select IRQ_DOMAIN_HIERARCHY help Power Domain Controller driver to manage and configure wakeup IRQs for Qualcomm Technologies Inc (QTI) mobile chips. +config QCOM_MPM + tristate "QCOM MPM" + depends on ARCH_QCOM + depends on MAILBOX + select IRQ_DOMAIN_HIERARCHY + help + MSM Power Manager driver to manage and configure wakeup + IRQs for Qualcomm Technologies Inc (QTI) mobile chips. + config CSKY_MPINTC - bool "C-SKY Multi Processor Interrupt Controller" + bool depends on CSKY help Say yes here to enable C-SKY SMP interrupt controller driver used for C-SKY SMP system. - In fact it's not mmio map in hw and it use ld/st to visit the + In fact it's not mmio map in hardware and it uses ld/st to visit the controller's register inside CPU. config CSKY_APB_INTC @@ -395,7 +528,7 @@ config CSKY_APB_INTC depends on CSKY help Say yes here to enable C-SKY APB interrupt controller driver used - by C-SKY single core SOC system. It use mmio map apb-bus to visit + by C-SKY single core SOC system. It uses mmio map apb-bus to visit the controller's register. config IMX_IRQSTEER @@ -406,16 +539,284 @@ config IMX_IRQSTEER help Support for the i.MX IRQSTEER interrupt multiplexer/remapper. -endmenu +config IMX_INTMUX + bool "i.MX INTMUX support" if COMPILE_TEST + default y if ARCH_MXC + select IRQ_DOMAIN + help + Support for the i.MX INTMUX interrupt multiplexer. + +config IMX_MU_MSI + tristate "i.MX MU used as MSI controller" + depends on OF && HAS_IOMEM + depends on ARCH_MXC || COMPILE_TEST + depends on ARM || ARM64 + default m if ARCH_MXC + select IRQ_DOMAIN + select IRQ_DOMAIN_HIERARCHY + select GENERIC_MSI_IRQ + select IRQ_MSI_LIB + help + Provide a driver for the i.MX Messaging Unit block used as a + CPU-to-CPU MSI controller. This requires a specially crafted DT + to make use of this driver. + + If unsure, say N + +config LS1X_IRQ + bool "Loongson-1 Interrupt Controller" + depends on MACH_LOONGSON32 + default y + select IRQ_DOMAIN + select GENERIC_IRQ_CHIP + help + Support for the Loongson-1 platform Interrupt Controller. + +config TI_SCI_INTR_IRQCHIP + tristate "TI SCI INTR Interrupt Controller" + depends on TI_SCI_PROTOCOL + depends on ARCH_K3 || COMPILE_TEST + select IRQ_DOMAIN_HIERARCHY + help + This enables the irqchip driver support for K3 Interrupt router + over TI System Control Interface available on some new TI's SoCs. + If you wish to use interrupt router irq resources managed by the + TI System Controller, say Y here. Otherwise, say N. + +config TI_SCI_INTA_IRQCHIP + tristate "TI SCI INTA Interrupt Controller" + depends on TI_SCI_PROTOCOL + depends on ARCH_K3 || (COMPILE_TEST && ARM64) + select IRQ_DOMAIN_HIERARCHY + select TI_SCI_INTA_MSI_DOMAIN + help + This enables the irqchip driver support for K3 Interrupt aggregator + over TI System Control Interface available on some new TI's SoCs. + If you wish to use interrupt aggregator irq resources managed by the + TI System Controller, say Y here. Otherwise, say N. + +config TI_PRUSS_INTC + tristate + depends on TI_PRUSS + default TI_PRUSS + select IRQ_DOMAIN + help + This enables support for the PRU-ICSS Local Interrupt Controller + present within a PRU-ICSS subsystem present on various TI SoCs. + The PRUSS INTC enables various interrupts to be routed to multiple + different processors within the SoC. + +config RISCV_INTC + bool + depends on RISCV + select IRQ_DOMAIN_HIERARCHY + +config RISCV_APLIC + bool + depends on RISCV + select IRQ_DOMAIN_HIERARCHY + +config RISCV_APLIC_MSI + bool + depends on RISCV_APLIC + select GENERIC_MSI_IRQ + default RISCV_APLIC + +config RISCV_IMSIC + bool + depends on RISCV + select IRQ_DOMAIN_HIERARCHY + select GENERIC_IRQ_MATRIX_ALLOCATOR + select GENERIC_MSI_IRQ + select IRQ_MSI_LIB + +config RISCV_RPMI_SYSMSI + bool + depends on RISCV && MAILBOX + select IRQ_DOMAIN_HIERARCHY + select GENERIC_MSI_IRQ + default RISCV config SIFIVE_PLIC - bool "SiFive Platform-Level Interrupt Controller" + bool depends on RISCV + select IRQ_DOMAIN_HIERARCHY + select GENERIC_IRQ_EFFECTIVE_AFF_MASK if SMP + +config STARFIVE_JH8100_INTC + bool "StarFive JH8100 External Interrupt Controller" + depends on ARCH_STARFIVE || COMPILE_TEST + default ARCH_STARFIVE + select IRQ_DOMAIN_HIERARCHY + help + This enables support for the INTC chip found in StarFive JH8100 + SoC. + + If you don't know what to do here, say Y. + +config ACLINT_SSWI + bool "RISC-V ACLINT S-mode IPI Interrupt Controller" + depends on RISCV + depends on SMP + select IRQ_DOMAIN_HIERARCHY + select GENERIC_IRQ_IPI_MUX + help + This enables support for variants of the RISC-V ACLINT-SSWI device. + Supported variants are: + - T-HEAD, with compatible "thead,c900-aclint-sswi" + - MIPS P8700, with compatible "mips,p8700-aclint-sswi" + + If you don't know what to do here, say Y. + +# Backwards compatibility so oldconfig does not drop it. +config THEAD_C900_ACLINT_SSWI + bool + select ACLINT_SSWI + +config EXYNOS_IRQ_COMBINER + bool "Samsung Exynos IRQ combiner support" if COMPILE_TEST + depends on (ARCH_EXYNOS && ARM) || COMPILE_TEST + help + Say yes here to add support for the IRQ combiner devices embedded + in Samsung Exynos chips. + +config IRQ_LOONGARCH_CPU + bool + select GENERIC_IRQ_CHIP + select IRQ_DOMAIN + select GENERIC_IRQ_EFFECTIVE_AFF_MASK if SMP + select LOONGSON_HTVEC + select LOONGSON_LIOINTC + select LOONGSON_EIOINTC + select LOONGSON_PCH_PIC + select LOONGSON_PCH_MSI + select LOONGSON_PCH_LPC + help + Support for the LoongArch CPU Interrupt Controller. For details of + irq chip hierarchy on LoongArch platforms please read the document + Documentation/arch/loongarch/irq-chip-model.rst. + +config LOONGSON_LIOINTC + bool "Loongson Local I/O Interrupt Controller" + depends on MACH_LOONGSON64 + default y + select IRQ_DOMAIN + select GENERIC_IRQ_CHIP + help + Support for the Loongson Local I/O Interrupt Controller. + +config LOONGSON_EIOINTC + bool "Loongson Extend I/O Interrupt Controller" + depends on LOONGARCH + depends on MACH_LOONGSON64 + default MACH_LOONGSON64 + select IRQ_DOMAIN_HIERARCHY + select GENERIC_IRQ_CHIP + help + Support for the Loongson3 Extend I/O Interrupt Vector Controller. + +config LOONGSON_HTPIC + bool "Loongson3 HyperTransport PIC Controller" + depends on MACH_LOONGSON64 && MIPS + default y + select IRQ_DOMAIN + select GENERIC_IRQ_CHIP + help + Support for the Loongson-3 HyperTransport PIC Controller. + +config LOONGSON_HTVEC + bool "Loongson HyperTransport Interrupt Vector Controller" + depends on MACH_LOONGSON64 + default MACH_LOONGSON64 + select IRQ_DOMAIN_HIERARCHY + help + Support for the Loongson HyperTransport Interrupt Vector Controller. + +config LOONGSON_PCH_PIC + bool "Loongson PCH PIC Controller" + depends on MACH_LOONGSON64 + default MACH_LOONGSON64 + select IRQ_DOMAIN_HIERARCHY + select IRQ_FASTEOI_HIERARCHY_HANDLERS + help + Support for the Loongson PCH PIC Controller. + +config LOONGSON_PCH_MSI + bool "Loongson PCH MSI Controller" + depends on MACH_LOONGSON64 + depends on PCI + default MACH_LOONGSON64 + select IRQ_DOMAIN_HIERARCHY + select IRQ_MSI_LIB + select PCI_MSI help - This enables support for the PLIC chip found in SiFive (and - potentially other) RISC-V systems. The PLIC controls devices - interrupts and connects them to each core's local interrupt - controller. Aside from timer and software interrupts, all other - interrupt sources are subordinate to the PLIC. + Support for the Loongson PCH MSI Controller. - If you don't know what to do here, say Y. +config LOONGSON_PCH_LPC + bool "Loongson PCH LPC Controller" + depends on LOONGARCH + depends on MACH_LOONGSON64 + default MACH_LOONGSON64 + select IRQ_DOMAIN_HIERARCHY + help + Support for the Loongson PCH LPC Controller. + +config MST_IRQ + bool "MStar Interrupt Controller" + depends on ARCH_MEDIATEK || ARCH_MSTARV7 || COMPILE_TEST + default ARCH_MEDIATEK + select IRQ_DOMAIN + select IRQ_DOMAIN_HIERARCHY + help + Support MStar Interrupt Controller. + +config WPCM450_AIC + bool "Nuvoton WPCM450 Advanced Interrupt Controller" + depends on ARCH_WPCM450 + help + Support for the interrupt controller in the Nuvoton WPCM450 BMC SoC. + +config IRQ_IDT3243X + bool + select GENERIC_IRQ_CHIP + select IRQ_DOMAIN + +config APPLE_AIC + bool "Apple Interrupt Controller (AIC)" + depends on ARM64 + depends on ARCH_APPLE || COMPILE_TEST + select GENERIC_IRQ_IPI_MUX + help + Support for the Apple Interrupt Controller found on Apple Silicon SoCs, + such as the M1. + +config MCHP_EIC + bool "Microchip External Interrupt Controller" + depends on ARCH_AT91 || COMPILE_TEST + select IRQ_DOMAIN + select IRQ_DOMAIN_HIERARCHY + help + Support for Microchip External Interrupt Controller. + +config SOPHGO_SG2042_MSI + bool "Sophgo SG2042 MSI Controller" + depends on ARCH_SOPHGO || COMPILE_TEST + depends on PCI + select IRQ_DOMAIN_HIERARCHY + select IRQ_MSI_LIB + select PCI_MSI + help + Support for the Sophgo SG2042 MSI Controller. + This on-chip interrupt controller enables MSI sources to be + routed to the primary PLIC controller on SoC. + +config SUNPLUS_SP7021_INTC + bool "Sunplus SP7021 interrupt controller" if COMPILE_TEST + default SOC_SP7021 + help + Support for the Sunplus SP7021 Interrupt Controller IP core. + SP7021 SoC has 2 Chips: C-Chip & P-Chip. This is used as a + chained controller, routing all interrupt source in P-Chip to + the primary controller on C-Chip. + +endmenu diff --git a/drivers/irqchip/Makefile b/drivers/irqchip/Makefile index c93713d24b86..6a229443efe0 100644 --- a/drivers/irqchip/Makefile +++ b/drivers/irqchip/Makefile @@ -1,37 +1,43 @@ # SPDX-License-Identifier: GPL-2.0 obj-$(CONFIG_IRQCHIP) += irqchip.o +obj-$(CONFIG_AL_FIC) += irq-al-fic.o obj-$(CONFIG_ALPINE_MSI) += irq-alpine-msi.o obj-$(CONFIG_ATH79) += irq-ath79-cpu.o obj-$(CONFIG_ATH79) += irq-ath79-misc.o obj-$(CONFIG_ARCH_BCM2835) += irq-bcm2835.o obj-$(CONFIG_ARCH_BCM2835) += irq-bcm2836.o -obj-$(CONFIG_ARCH_EXYNOS) += exynos-combiner.o +obj-$(CONFIG_ARCH_ACTIONS) += irq-owl-sirq.o +obj-$(CONFIG_DAVINCI_CP_INTC) += irq-davinci-cp-intc.o +obj-$(CONFIG_EXYNOS_IRQ_COMBINER) += exynos-combiner.o +obj-$(CONFIG_ECONET_EN751221_INTC) += irq-econet-en751221.o obj-$(CONFIG_FARADAY_FTINTC010) += irq-ftintc010.o obj-$(CONFIG_ARCH_HIP04) += irq-hip04.o obj-$(CONFIG_ARCH_LPC32XX) += irq-lpc32xx.o obj-$(CONFIG_ARCH_MMP) += irq-mmp.o obj-$(CONFIG_IRQ_MXS) += irq-mxs.o obj-$(CONFIG_ARCH_TEGRA) += irq-tegra.o -obj-$(CONFIG_ARCH_S3C24XX) += irq-s3c24xx.o obj-$(CONFIG_DW_APB_ICTL) += irq-dw-apb-ictl.o obj-$(CONFIG_CLPS711X_IRQCHIP) += irq-clps711x.o obj-$(CONFIG_OMPIC) += irq-ompic.o obj-$(CONFIG_OR1K_PIC) += irq-or1k-pic.o obj-$(CONFIG_ORION_IRQCHIP) += irq-orion.o obj-$(CONFIG_OMAP_IRQCHIP) += irq-omap-intc.o -obj-$(CONFIG_ARCH_SUNXI) += irq-sun4i.o -obj-$(CONFIG_ARCH_SUNXI) += irq-sunxi-nmi.o +obj-$(CONFIG_SUN4I_INTC) += irq-sun4i.o +obj-$(CONFIG_SUN6I_R_INTC) += irq-sun6i-r.o +obj-$(CONFIG_SUNXI_NMI_INTC) += irq-sunxi-nmi.o obj-$(CONFIG_ARCH_SPEAR3XX) += spear-shirq.o obj-$(CONFIG_ARM_GIC) += irq-gic.o irq-gic-common.o obj-$(CONFIG_ARM_GIC_PM) += irq-gic-pm.o obj-$(CONFIG_ARCH_REALVIEW) += irq-gic-realview.o +obj-$(CONFIG_IRQ_MSI_LIB) += irq-msi-lib.o obj-$(CONFIG_ARM_GIC_V2M) += irq-gic-v2m.o obj-$(CONFIG_ARM_GIC_V3) += irq-gic-v3.o irq-gic-v3-mbi.o irq-gic-common.o -obj-$(CONFIG_ARM_GIC_V3_ITS) += irq-gic-v3-its.o irq-gic-v3-its-platform-msi.o irq-gic-v4.o -obj-$(CONFIG_ARM_GIC_V3_ITS_PCI) += irq-gic-v3-its-pci-msi.o +obj-$(CONFIG_ARM_GIC_ITS_PARENT) += irq-gic-its-msi-parent.o +obj-$(CONFIG_ARM_GIC_V3_ITS) += irq-gic-v3-its.o irq-gic-v4.o obj-$(CONFIG_ARM_GIC_V3_ITS_FSL_MC) += irq-gic-v3-its-fsl-mc-msi.o -obj-$(CONFIG_PARTITION_PERCPU) += irq-partition-percpu.o +obj-$(CONFIG_ARM_GIC_V5) += irq-gic-v5.o irq-gic-v5-irs.o irq-gic-v5-its.o \ + irq-gic-v5-iwb.o obj-$(CONFIG_HISILICON_IRQ_MBIGEN) += irq-mbigen.o obj-$(CONFIG_ARM_NVIC) += irq-nvic.o obj-$(CONFIG_ARM_VIC) += irq-vic.o @@ -41,16 +47,18 @@ obj-$(CONFIG_ATMEL_AIC5_IRQ) += irq-atmel-aic-common.o irq-atmel-aic5.o obj-$(CONFIG_I8259) += irq-i8259.o obj-$(CONFIG_IMGPDC_IRQ) += irq-imgpdc.o obj-$(CONFIG_IRQ_MIPS_CPU) += irq-mips-cpu.o -obj-$(CONFIG_SIRF_IRQ) += irq-sirfsoc.o +obj-$(CONFIG_IXP4XX_IRQ) += irq-ixp4xx.o obj-$(CONFIG_JCORE_AIC) += irq-jcore-aic.o obj-$(CONFIG_RDA_INTC) += irq-rda-intc.o obj-$(CONFIG_RENESAS_INTC_IRQPIN) += irq-renesas-intc-irqpin.o obj-$(CONFIG_RENESAS_IRQC) += irq-renesas-irqc.o +obj-$(CONFIG_RENESAS_RZA1_IRQC) += irq-renesas-rza1.o +obj-$(CONFIG_RENESAS_RZG2L_IRQC) += irq-renesas-rzg2l.o +obj-$(CONFIG_RENESAS_RZV2H_ICU) += irq-renesas-rzv2h.o obj-$(CONFIG_VERSATILE_FPGA_IRQ) += irq-versatile-fpga.o obj-$(CONFIG_ARCH_NSPIRE) += irq-zevio.o obj-$(CONFIG_ARCH_VT8500) += irq-vt8500.o obj-$(CONFIG_ST_IRQCHIP) += irq-st.o -obj-$(CONFIG_TANGO_IRQ) += irq-tango.o obj-$(CONFIG_TB10X_IRQC) += irq-tb10x.o obj-$(CONFIG_TS4800_IRQ) += irq-ts4800.o obj-$(CONFIG_XTENSA) += irq-xtensa-pic.o @@ -58,6 +66,7 @@ obj-$(CONFIG_XTENSA_MX) += irq-xtensa-mx.o obj-$(CONFIG_XILINX_INTC) += irq-xilinx-intc.o obj-$(CONFIG_IRQ_CROSSBAR) += irq-crossbar.o obj-$(CONFIG_SOC_VF610) += irq-vf610-mscm-ir.o +obj-$(CONFIG_BCM2712_MIP) += irq-bcm2712-mip.o obj-$(CONFIG_BCM6345_L1_IRQ) += irq-bcm6345-l1.o obj-$(CONFIG_BCM7038_L1_IRQ) += irq-bcm7038-l1.o obj-$(CONFIG_BCM7120_L2_IRQ) += irq-bcm7120-l2.o @@ -66,10 +75,9 @@ obj-$(CONFIG_KEYSTONE_IRQ) += irq-keystone.o obj-$(CONFIG_MIPS_GIC) += irq-mips-gic.o obj-$(CONFIG_ARCH_MEDIATEK) += irq-mtk-sysirq.o irq-mtk-cirq.o obj-$(CONFIG_ARCH_DIGICOLOR) += irq-digicolor.o -obj-$(CONFIG_RENESAS_H8300H_INTC) += irq-renesas-h8300h.o -obj-$(CONFIG_RENESAS_H8S_INTC) += irq-renesas-h8s.o obj-$(CONFIG_ARCH_SA1100) += irq-sa11x0.o obj-$(CONFIG_INGENIC_IRQ) += irq-ingenic.o +obj-$(CONFIG_INGENIC_TCU_IRQ) += irq-ingenic-tcu.o obj-$(CONFIG_IMX_GPCV2) += irq-imx-gpcv2.o obj-$(CONFIG_PIC32_EVIC) += irq-pic32-evic.o obj-$(CONFIG_MSCC_OCELOT_IRQ) += irq-mscc-ocelot.o @@ -78,19 +86,52 @@ obj-$(CONFIG_MVEBU_ICU) += irq-mvebu-icu.o obj-$(CONFIG_MVEBU_ODMI) += irq-mvebu-odmi.o obj-$(CONFIG_MVEBU_PIC) += irq-mvebu-pic.o obj-$(CONFIG_MVEBU_SEI) += irq-mvebu-sei.o +obj-$(CONFIG_LS_EXTIRQ) += irq-ls-extirq.o obj-$(CONFIG_LS_SCFG_MSI) += irq-ls-scfg-msi.o -obj-$(CONFIG_EZNPS_GIC) += irq-eznps.o -obj-$(CONFIG_ARCH_ASPEED) += irq-aspeed-vic.o irq-aspeed-i2c-ic.o +obj-$(CONFIG_ARCH_ASPEED) += irq-aspeed-vic.o irq-aspeed-i2c-ic.o irq-aspeed-scu-ic.o +obj-$(CONFIG_ARCH_ASPEED) += irq-aspeed-intc.o +obj-$(CONFIG_STM32MP_EXTI) += irq-stm32mp-exti.o obj-$(CONFIG_STM32_EXTI) += irq-stm32-exti.o obj-$(CONFIG_QCOM_IRQ_COMBINER) += qcom-irq-combiner.o obj-$(CONFIG_IRQ_UNIPHIER_AIDET) += irq-uniphier-aidet.o obj-$(CONFIG_ARCH_SYNQUACER) += irq-sni-exiu.o obj-$(CONFIG_MESON_IRQ_GPIO) += irq-meson-gpio.o obj-$(CONFIG_GOLDFISH_PIC) += irq-goldfish-pic.o -obj-$(CONFIG_NDS32) += irq-ativic32.o obj-$(CONFIG_QCOM_PDC) += qcom-pdc.o +obj-$(CONFIG_QCOM_MPM) += irq-qcom-mpm.o obj-$(CONFIG_CSKY_MPINTC) += irq-csky-mpintc.o obj-$(CONFIG_CSKY_APB_INTC) += irq-csky-apb-intc.o +obj-$(CONFIG_RISCV_INTC) += irq-riscv-intc.o +obj-$(CONFIG_RISCV_APLIC) += irq-riscv-aplic-main.o irq-riscv-aplic-direct.o +obj-$(CONFIG_RISCV_APLIC_MSI) += irq-riscv-aplic-msi.o +obj-$(CONFIG_RISCV_IMSIC) += irq-riscv-imsic-state.o irq-riscv-imsic-early.o irq-riscv-imsic-platform.o +obj-$(CONFIG_RISCV_RPMI_SYSMSI) += irq-riscv-rpmi-sysmsi.o obj-$(CONFIG_SIFIVE_PLIC) += irq-sifive-plic.o +obj-$(CONFIG_STARFIVE_JH8100_INTC) += irq-starfive-jh8100-intc.o +obj-$(CONFIG_ACLINT_SSWI) += irq-aclint-sswi.o obj-$(CONFIG_IMX_IRQSTEER) += irq-imx-irqsteer.o +obj-$(CONFIG_IMX_INTMUX) += irq-imx-intmux.o +obj-$(CONFIG_IMX_MU_MSI) += irq-imx-mu-msi.o obj-$(CONFIG_MADERA_IRQ) += irq-madera.o +obj-$(CONFIG_LAN966X_OIC) += irq-lan966x-oic.o +obj-$(CONFIG_LS1X_IRQ) += irq-ls1x.o +obj-$(CONFIG_TI_SCI_INTR_IRQCHIP) += irq-ti-sci-intr.o +obj-$(CONFIG_TI_SCI_INTA_IRQCHIP) += irq-ti-sci-inta.o +obj-$(CONFIG_TI_PRUSS_INTC) += irq-pruss-intc.o +obj-$(CONFIG_IRQ_LOONGARCH_CPU) += irq-loongarch-cpu.o irq-loongarch-avec.o +obj-$(CONFIG_LOONGSON_LIOINTC) += irq-loongson-liointc.o +obj-$(CONFIG_LOONGSON_EIOINTC) += irq-loongson-eiointc.o +obj-$(CONFIG_LOONGSON_HTPIC) += irq-loongson-htpic.o +obj-$(CONFIG_LOONGSON_HTVEC) += irq-loongson-htvec.o +obj-$(CONFIG_LOONGSON_PCH_PIC) += irq-loongson-pch-pic.o +obj-$(CONFIG_LOONGSON_PCH_MSI) += irq-loongson-pch-msi.o +obj-$(CONFIG_LOONGSON_PCH_LPC) += irq-loongson-pch-lpc.o +obj-$(CONFIG_MST_IRQ) += irq-mst-intc.o +obj-$(CONFIG_SL28CPLD_INTC) += irq-sl28cpld.o +obj-$(CONFIG_MACH_REALTEK_RTL) += irq-realtek-rtl.o +obj-$(CONFIG_WPCM450_AIC) += irq-wpcm450-aic.o +obj-$(CONFIG_IRQ_IDT3243X) += irq-idt3243x.o +obj-$(CONFIG_APPLE_AIC) += irq-apple-aic.o +obj-$(CONFIG_MCHP_EIC) += irq-mchp-eic.o +obj-$(CONFIG_SOPHGO_SG2042_MSI) += irq-sg2042-msi.o +obj-$(CONFIG_SUNPLUS_SP7021_INTC) += irq-sp7021-intc.o diff --git a/drivers/irqchip/alphascale_asm9260-icoll.h b/drivers/irqchip/alphascale_asm9260-icoll.h index 5cec108ee204..1ec59483afa1 100644 --- a/drivers/irqchip/alphascale_asm9260-icoll.h +++ b/drivers/irqchip/alphascale_asm9260-icoll.h @@ -1,10 +1,6 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Copyright (C) 2014 Oleksij Rempel <linux@rempel-privat.de> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. */ #ifndef _ALPHASCALE_ASM9260_ICOLL_H diff --git a/drivers/irqchip/exynos-combiner.c b/drivers/irqchip/exynos-combiner.c index b78a169c9c83..495848442b35 100644 --- a/drivers/irqchip/exynos-combiner.c +++ b/drivers/irqchip/exynos-combiner.c @@ -1,12 +1,9 @@ +// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd. * http://www.samsung.com * * Combiner irqchip for EXYNOS - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. */ #include <linux/err.h> #include <linux/export.h> @@ -69,8 +66,9 @@ static void combiner_handle_cascade_irq(struct irq_desc *desc) { struct combiner_chip_data *chip_data = irq_desc_get_handler_data(desc); struct irq_chip *chip = irq_desc_get_chip(desc); - unsigned int cascade_irq, combiner_irq; + unsigned int combiner_irq; unsigned long status; + int ret; chained_irq_enter(chip, desc); @@ -83,12 +81,9 @@ static void combiner_handle_cascade_irq(struct irq_desc *desc) goto out; combiner_irq = chip_data->hwirq_offset + __ffs(status); - cascade_irq = irq_find_mapping(combiner_irq_domain, combiner_irq); - - if (unlikely(!cascade_irq)) + ret = generic_handle_domain_irq(combiner_irq_domain, combiner_irq); + if (unlikely(ret)) handle_bad_irq(desc); - else - generic_handle_irq(cascade_irq); out: chained_irq_exit(chip, desc); @@ -182,12 +177,10 @@ static void __init combiner_init(void __iomem *combiner_base, nr_irq = max_nr * IRQ_IN_COMBINER; combiner_data = kcalloc(max_nr, sizeof (*combiner_data), GFP_KERNEL); - if (!combiner_data) { - pr_warn("%s: could not allocate combiner data\n", __func__); + if (!combiner_data) return; - } - combiner_irq_domain = irq_domain_add_linear(np, nr_irq, + combiner_irq_domain = irq_domain_create_linear(of_fwnode_handle(np), nr_irq, &combiner_irq_domain_ops, combiner_data); if (WARN_ON(!combiner_irq_domain)) { pr_warn("%s: irq domain init failed\n", __func__); @@ -207,12 +200,13 @@ static void __init combiner_init(void __iomem *combiner_base, /** * combiner_suspend - save interrupt combiner state before suspend + * @data: syscore context * * Save the interrupt enable set register for all combiner groups since * the state is lost when the system enters into a sleep state. * */ -static int combiner_suspend(void) +static int combiner_suspend(void *data) { int i; @@ -225,12 +219,13 @@ static int combiner_suspend(void) /** * combiner_resume - restore interrupt combiner state after resume + * @data: syscore context * * Restore the interrupt enable set register for all combiner groups since * the state is lost when the system enters into a sleep state on suspend. * */ -static void combiner_resume(void) +static void combiner_resume(void *data) { int i; @@ -247,11 +242,15 @@ static void combiner_resume(void) #define combiner_resume NULL #endif -static struct syscore_ops combiner_syscore_ops = { +static const struct syscore_ops combiner_syscore_ops = { .suspend = combiner_suspend, .resume = combiner_resume, }; +static struct syscore combiner_syscore = { + .ops = &combiner_syscore_ops, +}; + static int __init combiner_of_init(struct device_node *np, struct device_node *parent) { @@ -271,7 +270,7 @@ static int __init combiner_of_init(struct device_node *np, combiner_init(combiner_base, np); - register_syscore_ops(&combiner_syscore_ops); + register_syscore(&combiner_syscore); return 0; } diff --git a/drivers/irqchip/irq-aclint-sswi.c b/drivers/irqchip/irq-aclint-sswi.c new file mode 100644 index 000000000000..fee30f3bc5ac --- /dev/null +++ b/drivers/irqchip/irq-aclint-sswi.c @@ -0,0 +1,209 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2024 Inochi Amaoto <inochiama@gmail.com> + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/cpu.h> +#include <linux/interrupt.h> +#include <linux/irqchip.h> +#include <linux/irqchip/chained_irq.h> +#include <linux/of_address.h> +#include <linux/spinlock.h> +#include <linux/smp.h> +#include <linux/string_choices.h> +#include <asm/sbi.h> +#include <asm/vendorid_list.h> + +static int sswi_ipi_virq __ro_after_init; +static DEFINE_PER_CPU(void __iomem *, sswi_cpu_regs); + +static void aclint_sswi_ipi_send(unsigned int cpu) +{ + writel(0x1, per_cpu(sswi_cpu_regs, cpu)); +} + +static void aclint_sswi_ipi_clear(void) +{ + writel_relaxed(0x0, this_cpu_read(sswi_cpu_regs)); +} + +static void aclint_sswi_ipi_handle(struct irq_desc *desc) +{ + struct irq_chip *chip = irq_desc_get_chip(desc); + + chained_irq_enter(chip, desc); + + csr_clear(CSR_IP, IE_SIE); + aclint_sswi_ipi_clear(); + + ipi_mux_process(); + + chained_irq_exit(chip, desc); +} + +static int aclint_sswi_starting_cpu(unsigned int cpu) +{ + enable_percpu_irq(sswi_ipi_virq, irq_get_trigger_type(sswi_ipi_virq)); + + return 0; +} + +static int aclint_sswi_dying_cpu(unsigned int cpu) +{ + aclint_sswi_ipi_clear(); + + disable_percpu_irq(sswi_ipi_virq); + + return 0; +} + +static int __init aclint_sswi_parse_irq(struct fwnode_handle *fwnode, void __iomem *reg) +{ + u32 contexts = of_irq_count(to_of_node(fwnode)); + + if (!(contexts)) { + pr_err("%pfwP: no ACLINT SSWI context available\n", fwnode); + return -EINVAL; + } + + for (u32 i = 0; i < contexts; i++) { + struct of_phandle_args parent; + unsigned long hartid; + u32 hart_index; + int rc, cpu; + + rc = of_irq_parse_one(to_of_node(fwnode), i, &parent); + if (rc) + return rc; + + rc = riscv_of_parent_hartid(parent.np, &hartid); + if (rc) + return rc; + + if (parent.args[0] != RV_IRQ_SOFT) + return -ENOTSUPP; + + cpu = riscv_hartid_to_cpuid(hartid); + + rc = riscv_get_hart_index(fwnode, i, &hart_index); + if (rc) { + pr_warn("%pfwP: hart index [%d] not found\n", fwnode, i); + return -EINVAL; + } + per_cpu(sswi_cpu_regs, cpu) = reg + hart_index * 4; + } + + pr_info("%pfwP: register %u CPU%s\n", fwnode, contexts, str_plural(contexts)); + + return 0; +} + +static int __init aclint_sswi_probe(struct fwnode_handle *fwnode) +{ + struct irq_domain *domain; + void __iomem *reg; + int virq, rc; + + if (!is_of_node(fwnode)) + return -EINVAL; + + reg = of_iomap(to_of_node(fwnode), 0); + if (!reg) + return -ENOMEM; + + /* Parse SSWI setting */ + rc = aclint_sswi_parse_irq(fwnode, reg); + if (rc < 0) + return rc; + + /* If mulitple SSWI devices are present, do not register irq again */ + if (sswi_ipi_virq) + return 0; + + /* Find riscv intc domain and create IPI irq mapping */ + domain = irq_find_matching_fwnode(riscv_get_intc_hwnode(), DOMAIN_BUS_ANY); + if (!domain) { + pr_err("%pfwP: Failed to find INTC domain\n", fwnode); + return -ENOENT; + } + + sswi_ipi_virq = irq_create_mapping(domain, RV_IRQ_SOFT); + if (!sswi_ipi_virq) { + pr_err("unable to create ACLINT SSWI IRQ mapping\n"); + return -ENOMEM; + } + + /* Register SSWI irq and handler */ + virq = ipi_mux_create(BITS_PER_BYTE, aclint_sswi_ipi_send); + if (virq <= 0) { + pr_err("unable to create muxed IPIs\n"); + irq_dispose_mapping(sswi_ipi_virq); + return virq < 0 ? virq : -ENOMEM; + } + + irq_set_chained_handler(sswi_ipi_virq, aclint_sswi_ipi_handle); + + cpuhp_setup_state(CPUHP_AP_IRQ_ACLINT_SSWI_STARTING, + "irqchip/aclint-sswi:starting", + aclint_sswi_starting_cpu, + aclint_sswi_dying_cpu); + + riscv_ipi_set_virq_range(virq, BITS_PER_BYTE); + + return 0; +} + +/* generic/MIPS variant */ +static int __init generic_aclint_sswi_probe(struct fwnode_handle *fwnode) +{ + int rc; + + rc = aclint_sswi_probe(fwnode); + if (rc) + return rc; + + /* Announce that SSWI is providing IPIs */ + pr_info("providing IPIs using ACLINT SSWI\n"); + + return 0; +} + +static int __init generic_aclint_sswi_early_probe(struct device_node *node, + struct device_node *parent) +{ + return generic_aclint_sswi_probe(&node->fwnode); +} +IRQCHIP_DECLARE(mips_p8700_sswi, "mips,p8700-aclint-sswi", generic_aclint_sswi_early_probe); +IRQCHIP_DECLARE(nuclei_ux900_sswi, "nuclei,ux900-aclint-sswi", generic_aclint_sswi_early_probe); + +/* THEAD variant */ +#define THEAD_C9XX_CSR_SXSTATUS 0x5c0 +#define THEAD_C9XX_SXSTATUS_CLINTEE BIT(17) + +static int __init thead_aclint_sswi_probe(struct fwnode_handle *fwnode) +{ + int rc; + + /* If it is T-HEAD CPU, check whether SSWI is enabled */ + if (riscv_cached_mvendorid(0) == THEAD_VENDOR_ID && + !(csr_read(THEAD_C9XX_CSR_SXSTATUS) & THEAD_C9XX_SXSTATUS_CLINTEE)) + return -ENOTSUPP; + + rc = aclint_sswi_probe(fwnode); + if (rc) + return rc; + + /* Announce that SSWI is providing IPIs */ + pr_info("providing IPIs using THEAD ACLINT SSWI\n"); + + return 0; +} + +static int __init thead_aclint_sswi_early_probe(struct device_node *node, + struct device_node *parent) +{ + return thead_aclint_sswi_probe(&node->fwnode); +} +IRQCHIP_DECLARE(thead_aclint_sswi, "thead,c900-aclint-sswi", thead_aclint_sswi_early_probe); diff --git a/drivers/irqchip/irq-al-fic.c b/drivers/irqchip/irq-al-fic.c new file mode 100644 index 000000000000..8f300843bbca --- /dev/null +++ b/drivers/irqchip/irq-al-fic.c @@ -0,0 +1,278 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + */ + +#include <linux/bitfield.h> +#include <linux/irq.h> +#include <linux/irqchip.h> +#include <linux/irqchip/chained_irq.h> +#include <linux/irqdomain.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/of_address.h> +#include <linux/of_irq.h> + +/* FIC Registers */ +#define AL_FIC_CAUSE 0x00 +#define AL_FIC_SET_CAUSE 0x08 +#define AL_FIC_MASK 0x10 +#define AL_FIC_CONTROL 0x28 + +#define CONTROL_TRIGGER_RISING BIT(3) +#define CONTROL_MASK_MSI_X BIT(5) + +#define NR_FIC_IRQS 32 + +MODULE_AUTHOR("Talel Shenhar"); +MODULE_DESCRIPTION("Amazon's Annapurna Labs Interrupt Controller Driver"); + +enum al_fic_state { + AL_FIC_UNCONFIGURED = 0, + AL_FIC_CONFIGURED_LEVEL, + AL_FIC_CONFIGURED_RISING_EDGE, +}; + +struct al_fic { + void __iomem *base; + struct irq_domain *domain; + const char *name; + unsigned int parent_irq; + enum al_fic_state state; +}; + +static void al_fic_set_trigger(struct al_fic *fic, + struct irq_chip_generic *gc, + enum al_fic_state new_state) +{ + irq_flow_handler_t handler; + u32 control = readl_relaxed(fic->base + AL_FIC_CONTROL); + + if (new_state == AL_FIC_CONFIGURED_LEVEL) { + handler = handle_level_irq; + control &= ~CONTROL_TRIGGER_RISING; + } else { + handler = handle_edge_irq; + control |= CONTROL_TRIGGER_RISING; + } + gc->chip_types->handler = handler; + fic->state = new_state; + writel_relaxed(control, fic->base + AL_FIC_CONTROL); +} + +static int al_fic_irq_set_type(struct irq_data *data, unsigned int flow_type) +{ + struct irq_chip_generic *gc = irq_data_get_irq_chip_data(data); + struct al_fic *fic = gc->private; + enum al_fic_state new_state; + + guard(raw_spinlock)(&gc->lock); + + if (((flow_type & IRQ_TYPE_SENSE_MASK) != IRQ_TYPE_LEVEL_HIGH) && + ((flow_type & IRQ_TYPE_SENSE_MASK) != IRQ_TYPE_EDGE_RISING)) { + pr_debug("fic doesn't support flow type %d\n", flow_type); + return -EINVAL; + } + + new_state = (flow_type & IRQ_TYPE_LEVEL_HIGH) ? + AL_FIC_CONFIGURED_LEVEL : AL_FIC_CONFIGURED_RISING_EDGE; + + /* + * A given FIC instance can be either all level or all edge triggered. + * This is generally fixed depending on what pieces of HW it's wired up + * to. + * + * We configure it based on the sensitivity of the first source + * being setup, and reject any subsequent attempt at configuring it in a + * different way. + */ + if (fic->state == AL_FIC_UNCONFIGURED) { + al_fic_set_trigger(fic, gc, new_state); + } else if (fic->state != new_state) { + pr_debug("fic %s state already configured to %d\n", fic->name, fic->state); + return -EINVAL; + } + return 0; +} + +static void al_fic_irq_handler(struct irq_desc *desc) +{ + struct al_fic *fic = irq_desc_get_handler_data(desc); + struct irq_domain *domain = fic->domain; + struct irq_chip *irqchip = irq_desc_get_chip(desc); + struct irq_chip_generic *gc = irq_get_domain_generic_chip(domain, 0); + unsigned long pending; + u32 hwirq; + + chained_irq_enter(irqchip, desc); + + pending = readl_relaxed(fic->base + AL_FIC_CAUSE); + pending &= ~gc->mask_cache; + + for_each_set_bit(hwirq, &pending, NR_FIC_IRQS) + generic_handle_domain_irq(domain, hwirq); + + chained_irq_exit(irqchip, desc); +} + +static int al_fic_irq_retrigger(struct irq_data *data) +{ + struct irq_chip_generic *gc = irq_data_get_irq_chip_data(data); + struct al_fic *fic = gc->private; + + writel_relaxed(BIT(data->hwirq), fic->base + AL_FIC_SET_CAUSE); + + return 1; +} + +static int al_fic_register(struct device_node *node, + struct al_fic *fic) +{ + struct irq_chip_generic *gc; + int ret; + + fic->domain = irq_domain_create_linear(of_fwnode_handle(node), + NR_FIC_IRQS, + &irq_generic_chip_ops, + fic); + if (!fic->domain) { + pr_err("fail to add irq domain\n"); + return -ENOMEM; + } + + ret = irq_alloc_domain_generic_chips(fic->domain, + NR_FIC_IRQS, + 1, fic->name, + handle_level_irq, + 0, 0, IRQ_GC_INIT_MASK_CACHE); + if (ret) { + pr_err("fail to allocate generic chip (%d)\n", ret); + goto err_domain_remove; + } + + gc = irq_get_domain_generic_chip(fic->domain, 0); + gc->reg_base = fic->base; + gc->chip_types->regs.mask = AL_FIC_MASK; + gc->chip_types->regs.ack = AL_FIC_CAUSE; + gc->chip_types->chip.irq_mask = irq_gc_mask_set_bit; + gc->chip_types->chip.irq_unmask = irq_gc_mask_clr_bit; + gc->chip_types->chip.irq_ack = irq_gc_ack_clr_bit; + gc->chip_types->chip.irq_set_type = al_fic_irq_set_type; + gc->chip_types->chip.irq_retrigger = al_fic_irq_retrigger; + gc->chip_types->chip.flags = IRQCHIP_SKIP_SET_WAKE; + gc->private = fic; + + irq_set_chained_handler_and_data(fic->parent_irq, + al_fic_irq_handler, + fic); + return 0; + +err_domain_remove: + irq_domain_remove(fic->domain); + + return ret; +} + +/* + * al_fic_wire_init() - initialize and configure fic in wire mode + * @of_node: optional pointer to interrupt controller's device tree node. + * @base: mmio to fic register + * @name: name of the fic + * @parent_irq: interrupt of parent + * + * This API will configure the fic hardware to to work in wire mode. + * In wire mode, fic hardware is generating a wire ("wired") interrupt. + * Interrupt can be generated based on positive edge or level - configuration is + * to be determined based on connected hardware to this fic. + */ +static struct al_fic *al_fic_wire_init(struct device_node *node, + void __iomem *base, + const char *name, + unsigned int parent_irq) +{ + struct al_fic *fic; + int ret; + u32 control = CONTROL_MASK_MSI_X; + + fic = kzalloc(sizeof(*fic), GFP_KERNEL); + if (!fic) + return ERR_PTR(-ENOMEM); + + fic->base = base; + fic->parent_irq = parent_irq; + fic->name = name; + + /* mask out all interrupts */ + writel_relaxed(0xFFFFFFFF, fic->base + AL_FIC_MASK); + + /* clear any pending interrupt */ + writel_relaxed(0, fic->base + AL_FIC_CAUSE); + + writel_relaxed(control, fic->base + AL_FIC_CONTROL); + + ret = al_fic_register(node, fic); + if (ret) { + pr_err("fail to register irqchip\n"); + goto err_free; + } + + pr_debug("%s initialized successfully in Legacy mode (parent-irq=%u)\n", + fic->name, parent_irq); + + return fic; + +err_free: + kfree(fic); + return ERR_PTR(ret); +} + +static int __init al_fic_init_dt(struct device_node *node, + struct device_node *parent) +{ + int ret; + void __iomem *base; + unsigned int parent_irq; + struct al_fic *fic; + + if (!parent) { + pr_err("%s: unsupported - device require a parent\n", + node->name); + return -EINVAL; + } + + base = of_iomap(node, 0); + if (!base) { + pr_err("%s: fail to map memory\n", node->name); + return -ENOMEM; + } + + parent_irq = irq_of_parse_and_map(node, 0); + if (!parent_irq) { + pr_err("%s: fail to map irq\n", node->name); + ret = -EINVAL; + goto err_unmap; + } + + fic = al_fic_wire_init(node, + base, + node->name, + parent_irq); + if (IS_ERR(fic)) { + pr_err("%s: fail to initialize irqchip (%lu)\n", + node->name, + PTR_ERR(fic)); + ret = PTR_ERR(fic); + goto err_irq_dispose; + } + + return 0; + +err_irq_dispose: + irq_dispose_mapping(parent_irq); +err_unmap: + iounmap(base); + + return ret; +} + +IRQCHIP_DECLARE(al_fic, "amazon,al-fic", al_fic_init_dt); diff --git a/drivers/irqchip/irq-alpine-msi.c b/drivers/irqchip/irq-alpine-msi.c index 23a3b877f7f1..159d9ec7c0dd 100644 --- a/drivers/irqchip/irq-alpine-msi.c +++ b/drivers/irqchip/irq-alpine-msi.c @@ -14,6 +14,7 @@ #include <linux/irqchip.h> #include <linux/irqchip/arm-gic.h> +#include <linux/irqchip/irq-msi-lib.h> #include <linux/msi.h> #include <linux/of.h> #include <linux/of_address.h> @@ -29,84 +30,45 @@ #define ALPINE_MSIX_SPI_TARGET_CLUSTER0 BIT(16) struct alpine_msix_data { - spinlock_t msi_map_lock; - phys_addr_t addr; - u32 spi_first; /* The SGI number that MSIs start */ - u32 num_spis; /* The number of SGIs for MSIs */ - unsigned long *msi_map; -}; - -static void alpine_msix_mask_msi_irq(struct irq_data *d) -{ - pci_msi_mask_irq(d); - irq_chip_mask_parent(d); -} - -static void alpine_msix_unmask_msi_irq(struct irq_data *d) -{ - pci_msi_unmask_irq(d); - irq_chip_unmask_parent(d); -} - -static struct irq_chip alpine_msix_irq_chip = { - .name = "MSIx", - .irq_mask = alpine_msix_mask_msi_irq, - .irq_unmask = alpine_msix_unmask_msi_irq, - .irq_eoi = irq_chip_eoi_parent, - .irq_set_affinity = irq_chip_set_affinity_parent, + spinlock_t msi_map_lock; + phys_addr_t addr; + u32 spi_first; /* The SGI number that MSIs start */ + u32 num_spis; /* The number of SGIs for MSIs */ + unsigned long *msi_map; }; static int alpine_msix_allocate_sgi(struct alpine_msix_data *priv, int num_req) { int first; - spin_lock(&priv->msi_map_lock); - - first = bitmap_find_next_zero_area(priv->msi_map, priv->num_spis, 0, - num_req, 0); - if (first >= priv->num_spis) { - spin_unlock(&priv->msi_map_lock); + guard(spinlock)(&priv->msi_map_lock); + first = bitmap_find_next_zero_area(priv->msi_map, priv->num_spis, 0, num_req, 0); + if (first >= priv->num_spis) return -ENOSPC; - } bitmap_set(priv->msi_map, first, num_req); - - spin_unlock(&priv->msi_map_lock); - return priv->spi_first + first; } -static void alpine_msix_free_sgi(struct alpine_msix_data *priv, unsigned sgi, - int num_req) +static void alpine_msix_free_sgi(struct alpine_msix_data *priv, unsigned int sgi, int num_req) { int first = sgi - priv->spi_first; - spin_lock(&priv->msi_map_lock); - + guard(spinlock)(&priv->msi_map_lock); bitmap_clear(priv->msi_map, first, num_req); - - spin_unlock(&priv->msi_map_lock); } -static void alpine_msix_compose_msi_msg(struct irq_data *data, - struct msi_msg *msg) +static void alpine_msix_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) { struct alpine_msix_data *priv = irq_data_get_irq_chip_data(data); phys_addr_t msg_addr = priv->addr; msg_addr |= (data->hwirq << 3); - msg->address_hi = upper_32_bits(msg_addr); msg->address_lo = lower_32_bits(msg_addr); msg->data = 0; } -static struct msi_domain_info alpine_msix_domain_info = { - .flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | - MSI_FLAG_PCI_MSIX, - .chip = &alpine_msix_irq_chip, -}; - static struct irq_chip middle_irq_chip = { .name = "alpine_msix_middle", .irq_mask = irq_chip_mask_parent, @@ -116,8 +78,7 @@ static struct irq_chip middle_irq_chip = { .irq_compose_msi_msg = alpine_msix_compose_msi_msg, }; -static int alpine_msix_gic_domain_alloc(struct irq_domain *domain, - unsigned int virq, int sgi) +static int alpine_msix_gic_domain_alloc(struct irq_domain *domain, unsigned int virq, int sgi) { struct irq_fwspec fwspec; struct irq_data *d; @@ -138,12 +99,10 @@ static int alpine_msix_gic_domain_alloc(struct irq_domain *domain, d = irq_domain_get_irq_data(domain->parent, virq); d->chip->irq_set_type(d, IRQ_TYPE_EDGE_RISING); - return 0; } -static int alpine_msix_middle_domain_alloc(struct irq_domain *domain, - unsigned int virq, +static int alpine_msix_middle_domain_alloc(struct irq_domain *domain, unsigned int virq, unsigned int nr_irqs, void *args) { struct alpine_msix_data *priv = domain->host_data; @@ -161,18 +120,15 @@ static int alpine_msix_middle_domain_alloc(struct irq_domain *domain, irq_domain_set_hwirq_and_chip(domain, virq + i, sgi + i, &middle_irq_chip, priv); } - return 0; err_sgi: - while (--i >= 0) - irq_domain_free_irqs_parent(domain, virq, i); + irq_domain_free_irqs_parent(domain, virq, i); alpine_msix_free_sgi(priv, sgi, nr_irqs); return err; } -static void alpine_msix_middle_domain_free(struct irq_domain *domain, - unsigned int virq, +static void alpine_msix_middle_domain_free(struct irq_domain *domain, unsigned int virq, unsigned int nr_irqs) { struct irq_data *d = irq_domain_get_irq_data(domain, virq); @@ -183,14 +139,35 @@ static void alpine_msix_middle_domain_free(struct irq_domain *domain, } static const struct irq_domain_ops alpine_msix_middle_domain_ops = { + .select = msi_lib_irq_domain_select, .alloc = alpine_msix_middle_domain_alloc, .free = alpine_msix_middle_domain_free, }; -static int alpine_msix_init_domains(struct alpine_msix_data *priv, - struct device_node *node) +#define ALPINE_MSI_FLAGS_REQUIRED (MSI_FLAG_USE_DEF_DOM_OPS | \ + MSI_FLAG_USE_DEF_CHIP_OPS | \ + MSI_FLAG_PCI_MSI_MASK_PARENT) + +#define ALPINE_MSI_FLAGS_SUPPORTED (MSI_GENERIC_FLAGS_MASK | \ + MSI_FLAG_PCI_MSIX) + +static struct msi_parent_ops alpine_msi_parent_ops = { + .supported_flags = ALPINE_MSI_FLAGS_SUPPORTED, + .required_flags = ALPINE_MSI_FLAGS_REQUIRED, + .chip_flags = MSI_CHIP_FLAG_SET_EOI, + .bus_select_token = DOMAIN_BUS_NEXUS, + .bus_select_mask = MATCH_PCI_MSI, + .prefix = "ALPINE-", + .init_dev_msi_info = msi_lib_init_dev_msi_info, +}; + +static int alpine_msix_init_domains(struct alpine_msix_data *priv, struct device_node *node) { - struct irq_domain *middle_domain, *msi_domain, *gic_domain; + struct irq_domain_info info = { + .fwnode = of_fwnode_handle(node), + .ops = &alpine_msix_middle_domain_ops, + .host_data = priv, + }; struct device_node *gic_node; gic_node = of_irq_find_parent(node); @@ -199,42 +176,26 @@ static int alpine_msix_init_domains(struct alpine_msix_data *priv, return -ENODEV; } - gic_domain = irq_find_host(gic_node); - if (!gic_domain) { + info.parent = irq_find_host(gic_node); + of_node_put(gic_node); + if (!info.parent) { pr_err("Failed to find the GIC domain\n"); return -ENXIO; } - middle_domain = irq_domain_add_tree(NULL, - &alpine_msix_middle_domain_ops, - priv); - if (!middle_domain) { - pr_err("Failed to create the MSIX middle domain\n"); - return -ENOMEM; - } - - middle_domain->parent = gic_domain; - - msi_domain = pci_msi_create_irq_domain(of_node_to_fwnode(node), - &alpine_msix_domain_info, - middle_domain); - if (!msi_domain) { + if (!msi_create_parent_irq_domain(&info, &alpine_msi_parent_ops)) { pr_err("Failed to create MSI domain\n"); - irq_domain_remove(middle_domain); return -ENOMEM; } - return 0; } -static int alpine_msix_init(struct device_node *node, - struct device_node *parent) +static int alpine_msix_init(struct device_node *node, struct device_node *parent) { - struct alpine_msix_data *priv; + struct alpine_msix_data *priv __free(kfree) = kzalloc(sizeof(*priv), GFP_KERNEL); struct resource res; int ret; - priv = kzalloc(sizeof(*priv), GFP_KERNEL); if (!priv) return -ENOMEM; @@ -243,7 +204,7 @@ static int alpine_msix_init(struct device_node *node, ret = of_address_to_resource(node, 0, &res); if (ret) { pr_err("Failed to allocate resource\n"); - goto err_priv; + return ret; } /* @@ -258,37 +219,28 @@ static int alpine_msix_init(struct device_node *node, if (of_property_read_u32(node, "al,msi-base-spi", &priv->spi_first)) { pr_err("Unable to parse MSI base\n"); - ret = -EINVAL; - goto err_priv; + return -EINVAL; } if (of_property_read_u32(node, "al,msi-num-spis", &priv->num_spis)) { pr_err("Unable to parse MSI numbers\n"); - ret = -EINVAL; - goto err_priv; + return -EINVAL; } - priv->msi_map = kcalloc(BITS_TO_LONGS(priv->num_spis), - sizeof(*priv->msi_map), - GFP_KERNEL); - if (!priv->msi_map) { - ret = -ENOMEM; - goto err_priv; - } + unsigned long *msi_map __free(kfree) = bitmap_zalloc(priv->num_spis, GFP_KERNEL); - pr_debug("Registering %d msixs, starting at %d\n", - priv->num_spis, priv->spi_first); + if (!msi_map) + return -ENOMEM; + priv->msi_map = msi_map; + + pr_debug("Registering %d msixs, starting at %d\n", priv->num_spis, priv->spi_first); ret = alpine_msix_init_domains(priv, node); if (ret) - goto err_map; + return ret; + retain_and_null_ptr(priv); + retain_and_null_ptr(msi_map); return 0; - -err_map: - kfree(priv->msi_map); -err_priv: - kfree(priv); - return ret; } IRQCHIP_DECLARE(alpine_msix, "al,alpine-msix", alpine_msix_init); diff --git a/drivers/irqchip/irq-apple-aic.c b/drivers/irqchip/irq-apple-aic.c new file mode 100644 index 000000000000..3c70364e7cdd --- /dev/null +++ b/drivers/irqchip/irq-apple-aic.c @@ -0,0 +1,1118 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Copyright The Asahi Linux Contributors + * + * Based on irq-lpc32xx: + * Copyright 2015-2016 Vladimir Zapolskiy <vz@mleia.com> + * Based on irq-bcm2836: + * Copyright 2015 Broadcom + */ + +/* + * AIC is a fairly simple interrupt controller with the following features: + * + * - 896 level-triggered hardware IRQs + * - Single mask bit per IRQ + * - Per-IRQ affinity setting + * - Automatic masking on event delivery (auto-ack) + * - Software triggering (ORed with hw line) + * - 2 per-CPU IPIs (meant as "self" and "other", but they are + * interchangeable if not symmetric) + * - Automatic prioritization (single event/ack register per CPU, lower IRQs = + * higher priority) + * - Automatic masking on ack + * - Default "this CPU" register view and explicit per-CPU views + * + * In addition, this driver also handles FIQs, as these are routed to the same + * IRQ vector. These are used for Fast IPIs, the ARMv8 timer IRQs, and + * performance counters (TODO). + * + * Implementation notes: + * + * - This driver creates two IRQ domains, one for HW IRQs and internal FIQs, + * and one for IPIs. + * - Since Linux needs more than 2 IPIs, we implement a software IRQ controller + * and funnel all IPIs into one per-CPU IPI (the second "self" IPI is unused). + * - FIQ hwirq numbers are assigned after true hwirqs, and are per-cpu. + * - DT bindings use 3-cell form (like GIC): + * - <0 nr flags> - hwirq #nr + * - <1 nr flags> - FIQ #nr + * - nr=0 Physical HV timer + * - nr=1 Virtual HV timer + * - nr=2 Physical guest timer + * - nr=3 Virtual guest timer + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/bits.h> +#include <linux/bitfield.h> +#include <linux/cpuhotplug.h> +#include <linux/io.h> +#include <linux/irqchip.h> +#include <linux/irqchip/arm-vgic-info.h> +#include <linux/irqdomain.h> +#include <linux/jump_label.h> +#include <linux/limits.h> +#include <linux/of_address.h> +#include <linux/slab.h> +#include <asm/apple_m1_pmu.h> +#include <asm/cputype.h> +#include <asm/exception.h> +#include <asm/sysreg.h> +#include <asm/virt.h> + +#include <dt-bindings/interrupt-controller/apple-aic.h> + +/* + * AIC v1 registers (MMIO) + */ + +#define AIC_INFO 0x0004 +#define AIC_INFO_NR_IRQ GENMASK(15, 0) + +#define AIC_CONFIG 0x0010 + +#define AIC_WHOAMI 0x2000 +#define AIC_EVENT 0x2004 +#define AIC_EVENT_DIE GENMASK(31, 24) +#define AIC_EVENT_TYPE GENMASK(23, 16) +#define AIC_EVENT_NUM GENMASK(15, 0) + +#define AIC_EVENT_TYPE_FIQ 0 /* Software use */ +#define AIC_EVENT_TYPE_IRQ 1 +#define AIC_EVENT_TYPE_IPI 4 +#define AIC_EVENT_IPI_OTHER 1 +#define AIC_EVENT_IPI_SELF 2 + +#define AIC_IPI_SEND 0x2008 +#define AIC_IPI_ACK 0x200c +#define AIC_IPI_MASK_SET 0x2024 +#define AIC_IPI_MASK_CLR 0x2028 + +#define AIC_IPI_SEND_CPU(cpu) BIT(cpu) + +#define AIC_IPI_OTHER BIT(0) +#define AIC_IPI_SELF BIT(31) + +#define AIC_TARGET_CPU 0x3000 + +#define AIC_CPU_IPI_SET(cpu) (0x5008 + ((cpu) << 7)) +#define AIC_CPU_IPI_CLR(cpu) (0x500c + ((cpu) << 7)) +#define AIC_CPU_IPI_MASK_SET(cpu) (0x5024 + ((cpu) << 7)) +#define AIC_CPU_IPI_MASK_CLR(cpu) (0x5028 + ((cpu) << 7)) + +#define AIC_MAX_IRQ 0x400 + +/* + * AIC v2 registers (MMIO) + */ + +#define AIC2_VERSION 0x0000 +#define AIC2_VERSION_VER GENMASK(7, 0) + +#define AIC2_INFO1 0x0004 +#define AIC2_INFO1_NR_IRQ GENMASK(15, 0) +#define AIC2_INFO1_LAST_DIE GENMASK(27, 24) + +#define AIC2_INFO2 0x0008 + +#define AIC2_INFO3 0x000c +#define AIC2_INFO3_MAX_IRQ GENMASK(15, 0) +#define AIC2_INFO3_MAX_DIE GENMASK(27, 24) + +#define AIC2_RESET 0x0010 +#define AIC2_RESET_RESET BIT(0) + +#define AIC2_CONFIG 0x0014 +#define AIC2_CONFIG_ENABLE BIT(0) +#define AIC2_CONFIG_PREFER_PCPU BIT(28) + +#define AIC2_TIMEOUT 0x0028 +#define AIC2_CLUSTER_PRIO 0x0030 +#define AIC2_DELAY_GROUPS 0x0100 + +#define AIC2_IRQ_CFG 0x2000 + +/* + * AIC2 registers are laid out like this, starting at AIC2_IRQ_CFG: + * + * Repeat for each die: + * IRQ_CFG: u32 * MAX_IRQS + * SW_SET: u32 * (MAX_IRQS / 32) + * SW_CLR: u32 * (MAX_IRQS / 32) + * MASK_SET: u32 * (MAX_IRQS / 32) + * MASK_CLR: u32 * (MAX_IRQS / 32) + * HW_STATE: u32 * (MAX_IRQS / 32) + * + * This is followed by a set of event registers, each 16K page aligned. + * The first one is the AP event register we will use. Unfortunately, + * the actual implemented die count is not specified anywhere in the + * capability registers, so we have to explicitly specify the event + * register as a second reg entry in the device tree to remain + * forward-compatible. + */ + +#define AIC2_IRQ_CFG_TARGET GENMASK(3, 0) +#define AIC2_IRQ_CFG_DELAY_IDX GENMASK(7, 5) + +#define MASK_REG(x) (4 * ((x) >> 5)) +#define MASK_BIT(x) BIT((x) & GENMASK(4, 0)) + +/* + * IMP-DEF sysregs that control FIQ sources + */ + +/* IPI request registers */ +#define SYS_IMP_APL_IPI_RR_LOCAL_EL1 sys_reg(3, 5, 15, 0, 0) +#define SYS_IMP_APL_IPI_RR_GLOBAL_EL1 sys_reg(3, 5, 15, 0, 1) +#define IPI_RR_CPU GENMASK(7, 0) +/* Cluster only used for the GLOBAL register */ +#define IPI_RR_CLUSTER GENMASK(23, 16) +#define IPI_RR_TYPE GENMASK(29, 28) +#define IPI_RR_IMMEDIATE 0 +#define IPI_RR_RETRACT 1 +#define IPI_RR_DEFERRED 2 +#define IPI_RR_NOWAKE 3 + +/* IPI status register */ +#define SYS_IMP_APL_IPI_SR_EL1 sys_reg(3, 5, 15, 1, 1) +#define IPI_SR_PENDING BIT(0) + +/* Guest timer FIQ enable register */ +#define SYS_IMP_APL_VM_TMR_FIQ_ENA_EL2 sys_reg(3, 5, 15, 1, 3) +#define VM_TMR_FIQ_ENABLE_V BIT(0) +#define VM_TMR_FIQ_ENABLE_P BIT(1) + +/* Deferred IPI countdown register */ +#define SYS_IMP_APL_IPI_CR_EL1 sys_reg(3, 5, 15, 3, 1) + +/* Uncore PMC control register */ +#define SYS_IMP_APL_UPMCR0_EL1 sys_reg(3, 7, 15, 0, 4) +#define UPMCR0_IMODE GENMASK(18, 16) +#define UPMCR0_IMODE_OFF 0 +#define UPMCR0_IMODE_AIC 2 +#define UPMCR0_IMODE_HALT 3 +#define UPMCR0_IMODE_FIQ 4 + +/* Uncore PMC status register */ +#define SYS_IMP_APL_UPMSR_EL1 sys_reg(3, 7, 15, 6, 4) +#define UPMSR_IACT BIT(0) + +/* MPIDR fields */ +#define MPIDR_CPU(x) MPIDR_AFFINITY_LEVEL(x, 0) +#define MPIDR_CLUSTER(x) MPIDR_AFFINITY_LEVEL(x, 1) + +#define AIC_IRQ_HWIRQ(die, irq) (FIELD_PREP(AIC_EVENT_DIE, die) | \ + FIELD_PREP(AIC_EVENT_TYPE, AIC_EVENT_TYPE_IRQ) | \ + FIELD_PREP(AIC_EVENT_NUM, irq)) +#define AIC_FIQ_HWIRQ(x) (FIELD_PREP(AIC_EVENT_TYPE, AIC_EVENT_TYPE_FIQ) | \ + FIELD_PREP(AIC_EVENT_NUM, x)) +#define AIC_HWIRQ_IRQ(x) FIELD_GET(AIC_EVENT_NUM, x) +#define AIC_HWIRQ_DIE(x) FIELD_GET(AIC_EVENT_DIE, x) +#define AIC_NR_SWIPI 32 + +/* + * FIQ hwirq index definitions: FIQ sources use the DT binding defines + * directly, except that timers are special. At the irqchip level, the + * two timer types are represented by their access method: _EL0 registers + * or _EL02 registers. In the DT binding, the timers are represented + * by their purpose (HV or guest). This mapping is for when the kernel is + * running at EL2 (with VHE). When the kernel is running at EL1, the + * mapping differs and aic_irq_domain_translate() performs the remapping. + */ +enum fiq_hwirq { + /* Must be ordered as in apple-aic.h */ + AIC_TMR_EL0_PHYS = AIC_TMR_HV_PHYS, + AIC_TMR_EL0_VIRT = AIC_TMR_HV_VIRT, + AIC_TMR_EL02_PHYS = AIC_TMR_GUEST_PHYS, + AIC_TMR_EL02_VIRT = AIC_TMR_GUEST_VIRT, + AIC_CPU_PMU_Effi = AIC_CPU_PMU_E, + AIC_CPU_PMU_Perf = AIC_CPU_PMU_P, + /* No need for this to be discovered from DT */ + AIC_VGIC_MI, + AIC_NR_FIQ +}; + +/* True if UNCORE/UNCORE2 and Sn_... IPI registers are present and used (A11+) */ +static DEFINE_STATIC_KEY_TRUE(use_fast_ipi); +/* True if SYS_IMP_APL_IPI_RR_LOCAL_EL1 exists for local fast IPIs (M1+) */ +static DEFINE_STATIC_KEY_TRUE(use_local_fast_ipi); + +struct aic_info { + int version; + + /* Register offsets */ + u32 event; + u32 target_cpu; + u32 irq_cfg; + u32 sw_set; + u32 sw_clr; + u32 mask_set; + u32 mask_clr; + + u32 die_stride; + + /* Features */ + bool fast_ipi; + bool local_fast_ipi; +}; + +static const struct aic_info aic1_info __initconst = { + .version = 1, + + .event = AIC_EVENT, + .target_cpu = AIC_TARGET_CPU, +}; + +static const struct aic_info aic1_fipi_info __initconst = { + .version = 1, + + .event = AIC_EVENT, + .target_cpu = AIC_TARGET_CPU, + + .fast_ipi = true, +}; + +static const struct aic_info aic1_local_fipi_info __initconst = { + .version = 1, + + .event = AIC_EVENT, + .target_cpu = AIC_TARGET_CPU, + + .fast_ipi = true, + .local_fast_ipi = true, +}; + +static const struct aic_info aic2_info __initconst = { + .version = 2, + + .irq_cfg = AIC2_IRQ_CFG, + + .fast_ipi = true, + .local_fast_ipi = true, +}; + +static const struct of_device_id aic_info_match[] = { + { + .compatible = "apple,t8103-aic", + .data = &aic1_local_fipi_info, + }, + { + .compatible = "apple,t8015-aic", + .data = &aic1_fipi_info, + }, + { + .compatible = "apple,aic", + .data = &aic1_info, + }, + { + .compatible = "apple,aic2", + .data = &aic2_info, + }, + {} +}; + +struct aic_irq_chip { + void __iomem *base; + void __iomem *event; + struct irq_domain *hw_domain; + struct { + cpumask_t aff; + } *fiq_aff[AIC_NR_FIQ]; + + int nr_irq; + int max_irq; + int nr_die; + int max_die; + + struct aic_info info; +}; + +static DEFINE_PER_CPU(uint32_t, aic_fiq_unmasked); + +static struct aic_irq_chip *aic_irqc; + +static void aic_handle_ipi(struct pt_regs *regs); + +static u32 aic_ic_read(struct aic_irq_chip *ic, u32 reg) +{ + return readl_relaxed(ic->base + reg); +} + +static void aic_ic_write(struct aic_irq_chip *ic, u32 reg, u32 val) +{ + writel_relaxed(val, ic->base + reg); +} + +/* + * IRQ irqchip + */ + +static void aic_irq_mask(struct irq_data *d) +{ + irq_hw_number_t hwirq = irqd_to_hwirq(d); + struct aic_irq_chip *ic = irq_data_get_irq_chip_data(d); + + u32 off = AIC_HWIRQ_DIE(hwirq) * ic->info.die_stride; + u32 irq = AIC_HWIRQ_IRQ(hwirq); + + aic_ic_write(ic, ic->info.mask_set + off + MASK_REG(irq), MASK_BIT(irq)); +} + +static void aic_irq_unmask(struct irq_data *d) +{ + irq_hw_number_t hwirq = irqd_to_hwirq(d); + struct aic_irq_chip *ic = irq_data_get_irq_chip_data(d); + + u32 off = AIC_HWIRQ_DIE(hwirq) * ic->info.die_stride; + u32 irq = AIC_HWIRQ_IRQ(hwirq); + + aic_ic_write(ic, ic->info.mask_clr + off + MASK_REG(irq), MASK_BIT(irq)); +} + +static void aic_irq_eoi(struct irq_data *d) +{ + /* + * Reading the interrupt reason automatically acknowledges and masks + * the IRQ, so we just unmask it here if needed. + */ + if (!irqd_irq_masked(d)) + aic_irq_unmask(d); +} + +static void __exception_irq_entry aic_handle_irq(struct pt_regs *regs) +{ + struct aic_irq_chip *ic = aic_irqc; + u32 event, type, irq; + + do { + /* + * We cannot use a relaxed read here, as reads from DMA buffers + * need to be ordered after the IRQ fires. + */ + event = readl(ic->event + ic->info.event); + type = FIELD_GET(AIC_EVENT_TYPE, event); + irq = FIELD_GET(AIC_EVENT_NUM, event); + + if (type == AIC_EVENT_TYPE_IRQ) + generic_handle_domain_irq(aic_irqc->hw_domain, event); + else if (type == AIC_EVENT_TYPE_IPI && irq == 1) + aic_handle_ipi(regs); + else if (event != 0) + pr_err_ratelimited("Unknown IRQ event %d, %d\n", type, irq); + } while (event); + + /* + * vGIC maintenance interrupts end up here too, so we need to check + * for them separately. It should however only trigger when NV is + * in use, and be cleared when coming back from the handler. + */ + if (is_kernel_in_hyp_mode() && + (read_sysreg_s(SYS_ICH_HCR_EL2) & ICH_HCR_EL2_En) && + read_sysreg_s(SYS_ICH_MISR_EL2) != 0) { + u64 val; + + generic_handle_domain_irq(aic_irqc->hw_domain, + AIC_FIQ_HWIRQ(AIC_VGIC_MI)); + + if (unlikely((read_sysreg_s(SYS_ICH_HCR_EL2) & ICH_HCR_EL2_En) && + (val = read_sysreg_s(SYS_ICH_MISR_EL2)))) { + pr_err_ratelimited("vGIC IRQ fired and not handled by KVM (MISR=%llx), disabling.\n", + val); + sysreg_clear_set_s(SYS_ICH_HCR_EL2, ICH_HCR_EL2_En, 0); + } + } +} + +static int aic_irq_set_affinity(struct irq_data *d, + const struct cpumask *mask_val, bool force) +{ + irq_hw_number_t hwirq = irqd_to_hwirq(d); + struct aic_irq_chip *ic = irq_data_get_irq_chip_data(d); + int cpu; + + BUG_ON(!ic->info.target_cpu); + + if (force) + cpu = cpumask_first(mask_val); + else + cpu = cpumask_any_and(mask_val, cpu_online_mask); + + aic_ic_write(ic, ic->info.target_cpu + AIC_HWIRQ_IRQ(hwirq) * 4, BIT(cpu)); + irq_data_update_effective_affinity(d, cpumask_of(cpu)); + + return IRQ_SET_MASK_OK; +} + +static int aic_irq_set_type(struct irq_data *d, unsigned int type) +{ + /* + * Some IRQs (e.g. MSIs) implicitly have edge semantics, and we don't + * have a way to find out the type of any given IRQ, so just allow both. + */ + return (type == IRQ_TYPE_LEVEL_HIGH || type == IRQ_TYPE_EDGE_RISING) ? 0 : -EINVAL; +} + +static struct irq_chip aic_chip = { + .name = "AIC", + .irq_mask = aic_irq_mask, + .irq_unmask = aic_irq_unmask, + .irq_eoi = aic_irq_eoi, + .irq_set_affinity = aic_irq_set_affinity, + .irq_set_type = aic_irq_set_type, +}; + +static struct irq_chip aic2_chip = { + .name = "AIC2", + .irq_mask = aic_irq_mask, + .irq_unmask = aic_irq_unmask, + .irq_eoi = aic_irq_eoi, + .irq_set_type = aic_irq_set_type, +}; + +/* + * FIQ irqchip + */ + +static unsigned long aic_fiq_get_idx(struct irq_data *d) +{ + return AIC_HWIRQ_IRQ(irqd_to_hwirq(d)); +} + +static void aic_fiq_set_mask(struct irq_data *d) +{ + /* Only the guest timers have real mask bits, unfortunately. */ + switch (aic_fiq_get_idx(d)) { + case AIC_TMR_EL02_PHYS: + sysreg_clear_set_s(SYS_IMP_APL_VM_TMR_FIQ_ENA_EL2, VM_TMR_FIQ_ENABLE_P, 0); + isb(); + break; + case AIC_TMR_EL02_VIRT: + sysreg_clear_set_s(SYS_IMP_APL_VM_TMR_FIQ_ENA_EL2, VM_TMR_FIQ_ENABLE_V, 0); + isb(); + break; + default: + break; + } +} + +static void aic_fiq_clear_mask(struct irq_data *d) +{ + switch (aic_fiq_get_idx(d)) { + case AIC_TMR_EL02_PHYS: + sysreg_clear_set_s(SYS_IMP_APL_VM_TMR_FIQ_ENA_EL2, 0, VM_TMR_FIQ_ENABLE_P); + isb(); + break; + case AIC_TMR_EL02_VIRT: + sysreg_clear_set_s(SYS_IMP_APL_VM_TMR_FIQ_ENA_EL2, 0, VM_TMR_FIQ_ENABLE_V); + isb(); + break; + default: + break; + } +} + +static void aic_fiq_mask(struct irq_data *d) +{ + aic_fiq_set_mask(d); + __this_cpu_and(aic_fiq_unmasked, ~BIT(aic_fiq_get_idx(d))); +} + +static void aic_fiq_unmask(struct irq_data *d) +{ + aic_fiq_clear_mask(d); + __this_cpu_or(aic_fiq_unmasked, BIT(aic_fiq_get_idx(d))); +} + +static void aic_fiq_eoi(struct irq_data *d) +{ + /* We mask to ack (where we can), so we need to unmask at EOI. */ + if (__this_cpu_read(aic_fiq_unmasked) & BIT(aic_fiq_get_idx(d))) + aic_fiq_clear_mask(d); +} + +#define TIMER_FIRING(x) \ + (((x) & (ARCH_TIMER_CTRL_ENABLE | ARCH_TIMER_CTRL_IT_MASK | \ + ARCH_TIMER_CTRL_IT_STAT)) == \ + (ARCH_TIMER_CTRL_ENABLE | ARCH_TIMER_CTRL_IT_STAT)) + +static void __exception_irq_entry aic_handle_fiq(struct pt_regs *regs) +{ + /* + * It would be really nice if we had a system register that lets us get + * the FIQ source state without having to peek down into sources... + * but such a register does not seem to exist. + * + * So, we have these potential sources to test for: + * - Fast IPIs (not yet used) + * - The 4 timers (CNTP, CNTV for each of HV and guest) + * - Per-core PMCs (not yet supported) + * - Per-cluster uncore PMCs (not yet supported) + * + * Since not dealing with any of these results in a FIQ storm, + * we check for everything here, even things we don't support yet. + */ + + if (static_branch_likely(&use_fast_ipi) && + (read_sysreg_s(SYS_IMP_APL_IPI_SR_EL1) & IPI_SR_PENDING)) + aic_handle_ipi(regs); + + if (TIMER_FIRING(read_sysreg(cntp_ctl_el0))) + generic_handle_domain_irq(aic_irqc->hw_domain, + AIC_FIQ_HWIRQ(AIC_TMR_EL0_PHYS)); + + if (TIMER_FIRING(read_sysreg(cntv_ctl_el0))) + generic_handle_domain_irq(aic_irqc->hw_domain, + AIC_FIQ_HWIRQ(AIC_TMR_EL0_VIRT)); + + if (is_kernel_in_hyp_mode()) { + uint64_t enabled = read_sysreg_s(SYS_IMP_APL_VM_TMR_FIQ_ENA_EL2); + + if ((enabled & VM_TMR_FIQ_ENABLE_P) && + TIMER_FIRING(read_sysreg_s(SYS_CNTP_CTL_EL02))) + generic_handle_domain_irq(aic_irqc->hw_domain, + AIC_FIQ_HWIRQ(AIC_TMR_EL02_PHYS)); + + if ((enabled & VM_TMR_FIQ_ENABLE_V) && + TIMER_FIRING(read_sysreg_s(SYS_CNTV_CTL_EL02))) + generic_handle_domain_irq(aic_irqc->hw_domain, + AIC_FIQ_HWIRQ(AIC_TMR_EL02_VIRT)); + } + + if ((read_sysreg_s(SYS_IMP_APL_PMCR0_EL1) & (PMCR0_IMODE | PMCR0_IACT)) == + (FIELD_PREP(PMCR0_IMODE, PMCR0_IMODE_FIQ) | PMCR0_IACT)) + generic_handle_domain_irq(aic_irqc->hw_domain, + AIC_FIQ_HWIRQ(AIC_CPU_PMU_P)); + + if (static_branch_likely(&use_fast_ipi) && + (FIELD_GET(UPMCR0_IMODE, read_sysreg_s(SYS_IMP_APL_UPMCR0_EL1)) == UPMCR0_IMODE_FIQ) && + (read_sysreg_s(SYS_IMP_APL_UPMSR_EL1) & UPMSR_IACT)) { + /* Same story with uncore PMCs */ + pr_err_ratelimited("Uncore PMC FIQ fired. Masking.\n"); + sysreg_clear_set_s(SYS_IMP_APL_UPMCR0_EL1, UPMCR0_IMODE, + FIELD_PREP(UPMCR0_IMODE, UPMCR0_IMODE_OFF)); + } +} + +static int aic_fiq_set_type(struct irq_data *d, unsigned int type) +{ + return (type == IRQ_TYPE_LEVEL_HIGH) ? 0 : -EINVAL; +} + +static struct irq_chip fiq_chip = { + .name = "AIC-FIQ", + .irq_mask = aic_fiq_mask, + .irq_unmask = aic_fiq_unmask, + .irq_ack = aic_fiq_set_mask, + .irq_eoi = aic_fiq_eoi, + .irq_set_type = aic_fiq_set_type, +}; + +/* + * Main IRQ domain + */ + +static int aic_irq_domain_map(struct irq_domain *id, unsigned int irq, + irq_hw_number_t hw) +{ + struct aic_irq_chip *ic = id->host_data; + u32 type = FIELD_GET(AIC_EVENT_TYPE, hw); + struct irq_chip *chip = &aic_chip; + + if (ic->info.version == 2) + chip = &aic2_chip; + + if (type == AIC_EVENT_TYPE_IRQ) { + irq_domain_set_info(id, irq, hw, chip, id->host_data, + handle_fasteoi_irq, NULL, NULL); + irqd_set_single_target(irq_desc_get_irq_data(irq_to_desc(irq))); + } else { + irq_set_percpu_devid(irq); + irq_domain_set_info(id, irq, hw, &fiq_chip, id->host_data, + handle_percpu_devid_irq, NULL, NULL); + } + + return 0; +} + +static int aic_irq_get_fwspec_info(struct irq_fwspec *fwspec, struct irq_fwspec_info *info) +{ + const struct cpumask *mask; + u32 intid; + + info->flags = 0; + info->affinity = NULL; + + if (fwspec->param[0] != AIC_FIQ) + return 0; + + if (fwspec->param_count == 3) + intid = fwspec->param[1]; + else + intid = fwspec->param[2]; + + if (aic_irqc->fiq_aff[intid]) + mask = &aic_irqc->fiq_aff[intid]->aff; + else + mask = cpu_possible_mask; + + info->affinity = mask; + info->flags = IRQ_FWSPEC_INFO_AFFINITY_VALID; + + return 0; +} + +static int aic_irq_domain_translate(struct irq_domain *id, + struct irq_fwspec *fwspec, + unsigned long *hwirq, + unsigned int *type) +{ + struct aic_irq_chip *ic = id->host_data; + u32 *args; + u32 die = 0; + + if (fwspec->param_count < 3 || fwspec->param_count > 4 || + !is_of_node(fwspec->fwnode)) + return -EINVAL; + + args = &fwspec->param[1]; + + if (fwspec->param_count == 4) { + die = args[0]; + args++; + } + + switch (fwspec->param[0]) { + case AIC_IRQ: + if (die >= ic->nr_die) + return -EINVAL; + if (args[0] >= ic->nr_irq) + return -EINVAL; + *hwirq = AIC_IRQ_HWIRQ(die, args[0]); + break; + case AIC_FIQ: + if (die != 0) + return -EINVAL; + if (args[0] >= AIC_NR_FIQ) + return -EINVAL; + *hwirq = AIC_FIQ_HWIRQ(args[0]); + + /* + * In EL1 the non-redirected registers are the guest's, + * not EL2's, so remap the hwirqs to match. + */ + if (!is_kernel_in_hyp_mode()) { + switch (args[0]) { + case AIC_TMR_GUEST_PHYS: + *hwirq = AIC_FIQ_HWIRQ(AIC_TMR_EL0_PHYS); + break; + case AIC_TMR_GUEST_VIRT: + *hwirq = AIC_FIQ_HWIRQ(AIC_TMR_EL0_VIRT); + break; + case AIC_TMR_HV_PHYS: + case AIC_TMR_HV_VIRT: + return -ENOENT; + default: + break; + } + } + + /* Merge the two PMUs on a single interrupt */ + if (*hwirq == AIC_CPU_PMU_E) + *hwirq = AIC_CPU_PMU_P; + break; + default: + return -EINVAL; + } + + *type = args[1] & IRQ_TYPE_SENSE_MASK; + + return 0; +} + +static int aic_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, + unsigned int nr_irqs, void *arg) +{ + unsigned int type = IRQ_TYPE_NONE; + struct irq_fwspec *fwspec = arg; + irq_hw_number_t hwirq; + int i, ret; + + ret = aic_irq_domain_translate(domain, fwspec, &hwirq, &type); + if (ret) + return ret; + + for (i = 0; i < nr_irqs; i++) { + ret = aic_irq_domain_map(domain, virq + i, hwirq + i); + if (ret) + return ret; + } + + return 0; +} + +static void aic_irq_domain_free(struct irq_domain *domain, unsigned int virq, + unsigned int nr_irqs) +{ + int i; + + for (i = 0; i < nr_irqs; i++) { + struct irq_data *d = irq_domain_get_irq_data(domain, virq + i); + + irq_set_handler(virq + i, NULL); + irq_domain_reset_irq_data(d); + } +} + +static const struct irq_domain_ops aic_irq_domain_ops = { + .translate = aic_irq_domain_translate, + .alloc = aic_irq_domain_alloc, + .free = aic_irq_domain_free, + .get_fwspec_info = aic_irq_get_fwspec_info, +}; + +/* + * IPI irqchip + */ + +static void aic_ipi_send_fast(int cpu) +{ + u64 mpidr = cpu_logical_map(cpu); + u64 my_mpidr = read_cpuid_mpidr(); + u64 cluster = MPIDR_CLUSTER(mpidr); + u64 idx = MPIDR_CPU(mpidr); + + if (static_branch_likely(&use_local_fast_ipi) && MPIDR_CLUSTER(my_mpidr) == cluster) { + write_sysreg_s(FIELD_PREP(IPI_RR_CPU, idx), SYS_IMP_APL_IPI_RR_LOCAL_EL1); + } else { + write_sysreg_s(FIELD_PREP(IPI_RR_CPU, idx) | FIELD_PREP(IPI_RR_CLUSTER, cluster), + SYS_IMP_APL_IPI_RR_GLOBAL_EL1); + } + isb(); +} + +static void aic_handle_ipi(struct pt_regs *regs) +{ + /* + * Ack the IPI. We need to order this after the AIC event read, but + * that is enforced by normal MMIO ordering guarantees. + * + * For the Fast IPI case, this needs to be ordered before the vIPI + * handling below, so we need to isb(); + */ + if (static_branch_likely(&use_fast_ipi)) { + write_sysreg_s(IPI_SR_PENDING, SYS_IMP_APL_IPI_SR_EL1); + isb(); + } else { + aic_ic_write(aic_irqc, AIC_IPI_ACK, AIC_IPI_OTHER); + } + + ipi_mux_process(); + + /* + * No ordering needed here; at worst this just changes the timing of + * when the next IPI will be delivered. + */ + if (!static_branch_likely(&use_fast_ipi)) + aic_ic_write(aic_irqc, AIC_IPI_MASK_CLR, AIC_IPI_OTHER); +} + +static void aic_ipi_send_single(unsigned int cpu) +{ + if (static_branch_likely(&use_fast_ipi)) + aic_ipi_send_fast(cpu); + else + aic_ic_write(aic_irqc, AIC_IPI_SEND, AIC_IPI_SEND_CPU(cpu)); +} + +static int __init aic_init_smp(struct aic_irq_chip *irqc, struct device_node *node) +{ + int base_ipi; + + base_ipi = ipi_mux_create(AIC_NR_SWIPI, aic_ipi_send_single); + if (WARN_ON(base_ipi <= 0)) + return -ENODEV; + + set_smp_ipi_range(base_ipi, AIC_NR_SWIPI); + + return 0; +} + +static int aic_init_cpu(unsigned int cpu) +{ + /* Mask all hard-wired per-CPU IRQ/FIQ sources */ + + /* Pending Fast IPI FIQs */ + if (static_branch_likely(&use_fast_ipi)) + write_sysreg_s(IPI_SR_PENDING, SYS_IMP_APL_IPI_SR_EL1); + + /* Timer FIQs */ + sysreg_clear_set(cntp_ctl_el0, 0, ARCH_TIMER_CTRL_IT_MASK); + sysreg_clear_set(cntv_ctl_el0, 0, ARCH_TIMER_CTRL_IT_MASK); + + /* EL2-only (VHE mode) IRQ sources */ + if (is_kernel_in_hyp_mode()) { + /* Guest timers */ + sysreg_clear_set_s(SYS_IMP_APL_VM_TMR_FIQ_ENA_EL2, + VM_TMR_FIQ_ENABLE_V | VM_TMR_FIQ_ENABLE_P, 0); + + /* vGIC maintenance IRQ */ + sysreg_clear_set_s(SYS_ICH_HCR_EL2, ICH_HCR_EL2_En, 0); + } + + /* PMC FIQ */ + sysreg_clear_set_s(SYS_IMP_APL_PMCR0_EL1, PMCR0_IMODE | PMCR0_IACT, + FIELD_PREP(PMCR0_IMODE, PMCR0_IMODE_OFF)); + + /* Uncore PMC FIQ */ + if (static_branch_likely(&use_fast_ipi)) { + sysreg_clear_set_s(SYS_IMP_APL_UPMCR0_EL1, UPMCR0_IMODE, + FIELD_PREP(UPMCR0_IMODE, UPMCR0_IMODE_OFF)); + } + + /* Commit all of the above */ + isb(); + + if (aic_irqc->info.version == 1) { + /* + * Make sure the kernel's idea of logical CPU order is the same as AIC's + * If we ever end up with a mismatch here, we will have to introduce + * a mapping table similar to what other irqchip drivers do. + */ + WARN_ON(aic_ic_read(aic_irqc, AIC_WHOAMI) != smp_processor_id()); + + /* + * Always keep IPIs unmasked at the hardware level (except auto-masking + * by AIC during processing). We manage masks at the vIPI level. + * These registers only exist on AICv1, AICv2 always uses fast IPIs. + */ + aic_ic_write(aic_irqc, AIC_IPI_ACK, AIC_IPI_SELF | AIC_IPI_OTHER); + if (static_branch_likely(&use_fast_ipi)) { + aic_ic_write(aic_irqc, AIC_IPI_MASK_SET, AIC_IPI_SELF | AIC_IPI_OTHER); + } else { + aic_ic_write(aic_irqc, AIC_IPI_MASK_SET, AIC_IPI_SELF); + aic_ic_write(aic_irqc, AIC_IPI_MASK_CLR, AIC_IPI_OTHER); + } + } + + /* Initialize the local mask state */ + __this_cpu_write(aic_fiq_unmasked, 0); + + return 0; +} + +static struct gic_kvm_info vgic_info __initdata = { + .type = GIC_V3, + .no_maint_irq_mask = true, + .no_hw_deactivation = true, +}; + +static void build_fiq_affinity(struct aic_irq_chip *ic, struct device_node *aff) +{ + int i, n; + u32 fiq; + + if (of_property_read_u32(aff, "apple,fiq-index", &fiq) || + WARN_ON(fiq >= AIC_NR_FIQ) || ic->fiq_aff[fiq]) + return; + + n = of_property_count_elems_of_size(aff, "cpus", sizeof(u32)); + if (WARN_ON(n < 0)) + return; + + ic->fiq_aff[fiq] = kzalloc(sizeof(*ic->fiq_aff[fiq]), GFP_KERNEL); + if (!ic->fiq_aff[fiq]) + return; + + for (i = 0; i < n; i++) { + struct device_node *cpu_node; + u32 cpu_phandle; + int cpu; + + if (of_property_read_u32_index(aff, "cpus", i, &cpu_phandle)) + continue; + + cpu_node = of_find_node_by_phandle(cpu_phandle); + if (WARN_ON(!cpu_node)) + continue; + + cpu = of_cpu_node_to_id(cpu_node); + of_node_put(cpu_node); + if (WARN_ON(cpu < 0)) + continue; + + cpumask_set_cpu(cpu, &ic->fiq_aff[fiq]->aff); + } +} + +static int __init aic_of_ic_init(struct device_node *node, struct device_node *parent) +{ + int i, die; + u32 off, start_off; + void __iomem *regs; + struct aic_irq_chip *irqc; + struct device_node *affs; + const struct of_device_id *match; + + regs = of_iomap(node, 0); + if (WARN_ON(!regs)) + return -EIO; + + irqc = kzalloc(sizeof(*irqc), GFP_KERNEL); + if (!irqc) { + iounmap(regs); + return -ENOMEM; + } + + irqc->base = regs; + + match = of_match_node(aic_info_match, node); + if (!match) + goto err_unmap; + + irqc->info = *(struct aic_info *)match->data; + + aic_irqc = irqc; + + switch (irqc->info.version) { + case 1: { + u32 info; + + info = aic_ic_read(irqc, AIC_INFO); + irqc->nr_irq = FIELD_GET(AIC_INFO_NR_IRQ, info); + irqc->max_irq = AIC_MAX_IRQ; + irqc->nr_die = irqc->max_die = 1; + + off = start_off = irqc->info.target_cpu; + off += sizeof(u32) * irqc->max_irq; /* TARGET_CPU */ + + irqc->event = irqc->base; + + break; + } + case 2: { + u32 info1, info3; + + info1 = aic_ic_read(irqc, AIC2_INFO1); + info3 = aic_ic_read(irqc, AIC2_INFO3); + + irqc->nr_irq = FIELD_GET(AIC2_INFO1_NR_IRQ, info1); + irqc->max_irq = FIELD_GET(AIC2_INFO3_MAX_IRQ, info3); + irqc->nr_die = FIELD_GET(AIC2_INFO1_LAST_DIE, info1) + 1; + irqc->max_die = FIELD_GET(AIC2_INFO3_MAX_DIE, info3); + + off = start_off = irqc->info.irq_cfg; + off += sizeof(u32) * irqc->max_irq; /* IRQ_CFG */ + + irqc->event = of_iomap(node, 1); + if (WARN_ON(!irqc->event)) + goto err_unmap; + + break; + } + } + + irqc->info.sw_set = off; + off += sizeof(u32) * (irqc->max_irq >> 5); /* SW_SET */ + irqc->info.sw_clr = off; + off += sizeof(u32) * (irqc->max_irq >> 5); /* SW_CLR */ + irqc->info.mask_set = off; + off += sizeof(u32) * (irqc->max_irq >> 5); /* MASK_SET */ + irqc->info.mask_clr = off; + off += sizeof(u32) * (irqc->max_irq >> 5); /* MASK_CLR */ + off += sizeof(u32) * (irqc->max_irq >> 5); /* HW_STATE */ + + if (!irqc->info.fast_ipi) + static_branch_disable(&use_fast_ipi); + + if (!irqc->info.local_fast_ipi) + static_branch_disable(&use_local_fast_ipi); + + irqc->info.die_stride = off - start_off; + + irqc->hw_domain = irq_domain_create_tree(of_fwnode_handle(node), + &aic_irq_domain_ops, irqc); + if (WARN_ON(!irqc->hw_domain)) + goto err_unmap; + + irq_domain_update_bus_token(irqc->hw_domain, DOMAIN_BUS_WIRED); + + if (aic_init_smp(irqc, node)) + goto err_remove_domain; + + affs = of_get_child_by_name(node, "affinities"); + if (affs) { + struct device_node *chld; + + for_each_child_of_node(affs, chld) + build_fiq_affinity(irqc, chld); + } + of_node_put(affs); + + set_handle_irq(aic_handle_irq); + set_handle_fiq(aic_handle_fiq); + + off = 0; + for (die = 0; die < irqc->nr_die; die++) { + for (i = 0; i < BITS_TO_U32(irqc->nr_irq); i++) + aic_ic_write(irqc, irqc->info.mask_set + off + i * 4, U32_MAX); + for (i = 0; i < BITS_TO_U32(irqc->nr_irq); i++) + aic_ic_write(irqc, irqc->info.sw_clr + off + i * 4, U32_MAX); + if (irqc->info.target_cpu) + for (i = 0; i < irqc->nr_irq; i++) + aic_ic_write(irqc, irqc->info.target_cpu + off + i * 4, 1); + off += irqc->info.die_stride; + } + + if (irqc->info.version == 2) { + u32 config = aic_ic_read(irqc, AIC2_CONFIG); + + config |= AIC2_CONFIG_ENABLE; + aic_ic_write(irqc, AIC2_CONFIG, config); + } + + if (!is_kernel_in_hyp_mode()) + pr_info("Kernel running in EL1, mapping interrupts"); + + if (static_branch_likely(&use_fast_ipi)) + pr_info("Using Fast IPIs"); + + cpuhp_setup_state(CPUHP_AP_IRQ_APPLE_AIC_STARTING, + "irqchip/apple-aic/ipi:starting", + aic_init_cpu, NULL); + + if (is_kernel_in_hyp_mode()) { + struct irq_fwspec mi = { + .fwnode = of_fwnode_handle(node), + .param_count = 3, + .param = { + [0] = AIC_FIQ, /* This is a lie */ + [1] = AIC_VGIC_MI, + [2] = IRQ_TYPE_LEVEL_HIGH, + }, + }; + + vgic_info.maint_irq = irq_create_fwspec_mapping(&mi); + WARN_ON(!vgic_info.maint_irq); + } + + vgic_set_kvm_info(&vgic_info); + + pr_info("Initialized with %d/%d IRQs * %d/%d die(s), %d FIQs, %d vIPIs", + irqc->nr_irq, irqc->max_irq, irqc->nr_die, irqc->max_die, AIC_NR_FIQ, AIC_NR_SWIPI); + + return 0; + +err_remove_domain: + irq_domain_remove(irqc->hw_domain); +err_unmap: + if (irqc->event && irqc->event != irqc->base) + iounmap(irqc->event); + iounmap(irqc->base); + kfree(irqc); + return -ENODEV; +} + +IRQCHIP_DECLARE(apple_aic, "apple,aic", aic_of_ic_init); +IRQCHIP_DECLARE(apple_aic2, "apple,aic2", aic_of_ic_init); diff --git a/drivers/irqchip/irq-armada-370-xp.c b/drivers/irqchip/irq-armada-370-xp.c index c9bdc5221b82..a4d03a2d1569 100644 --- a/drivers/irqchip/irq-armada-370-xp.c +++ b/drivers/irqchip/irq-armada-370-xp.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0-only /* * Marvell Armada 370 and Armada XP SoC IRQ handling * @@ -7,12 +8,11 @@ * Gregory CLEMENT <gregory.clement@free-electrons.com> * Thomas Petazzoni <thomas.petazzoni@free-electrons.com> * Ben Dooks <ben.dooks@codethink.co.uk> - * - * This file is licensed under the terms of the GNU General Public - * License version 2. This program is licensed "as is" without any - * warranty of any kind, whether express or implied. */ +#include <linux/bitfield.h> +#include <linux/bits.h> +#include <linux/err.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> @@ -20,6 +20,7 @@ #include <linux/interrupt.h> #include <linux/irqchip.h> #include <linux/irqchip/chained_irq.h> +#include <linux/irqchip/irq-msi-lib.h> #include <linux/cpu.h> #include <linux/io.h> #include <linux/of_address.h> @@ -29,6 +30,7 @@ #include <linux/slab.h> #include <linux/syscore_ops.h> #include <linux/msi.h> +#include <linux/types.h> #include <asm/mach/arch.h> #include <asm/exception.h> #include <asm/smp_plat.h> @@ -64,19 +66,17 @@ * device * * The "global interrupt mask/unmask" is modified using the - * ARMADA_370_XP_INT_SET_ENABLE_OFFS and - * ARMADA_370_XP_INT_CLEAR_ENABLE_OFFS registers, which are relative - * to "main_int_base". + * MPIC_INT_SET_ENABLE and MPIC_INT_CLEAR_ENABLE + * registers, which are relative to "mpic->base". * - * The "per-CPU mask/unmask" is modified using the - * ARMADA_370_XP_INT_SET_MASK_OFFS and - * ARMADA_370_XP_INT_CLEAR_MASK_OFFS registers, which are relative to - * "per_cpu_int_base". This base address points to a special address, + * The "per-CPU mask/unmask" is modified using the MPIC_INT_SET_MASK + * and MPIC_INT_CLEAR_MASK registers, which are relative to + * "mpic->per_cpu". This base address points to a special address, * which automatically accesses the registers of the current CPU. * * The per-CPU mask/unmask can also be adjusted using the global - * per-interrupt ARMADA_370_XP_INT_SOURCE_CTL register, which we use - * to configure interrupt affinity. + * per-interrupt MPIC_INT_SOURCE_CTL register, which we use to + * configure interrupt affinity. * * Due to this model, all interrupts need to be mask/unmasked at two * different levels: at the global level and at the per-CPU level. @@ -90,9 +90,8 @@ * the current CPU, running the ->map() code. This allows to have * the interrupt unmasked at this level in non-SMP * configurations. In SMP configurations, the ->set_affinity() - * callback is called, which using the - * ARMADA_370_XP_INT_SOURCE_CTL() readjusts the per-CPU mask/unmask - * for the interrupt. + * callback is called, which using the MPIC_INT_SOURCE_CTL() + * readjusts the per-CPU mask/unmask for the interrupt. * * The ->mask() and ->unmask() operations only mask/unmask the * interrupt at the "global" level. @@ -114,54 +113,95 @@ * at the per-CPU level. */ -/* Registers relative to main_int_base */ -#define ARMADA_370_XP_INT_CONTROL (0x00) -#define ARMADA_370_XP_SW_TRIG_INT_OFFS (0x04) -#define ARMADA_370_XP_INT_SET_ENABLE_OFFS (0x30) -#define ARMADA_370_XP_INT_CLEAR_ENABLE_OFFS (0x34) -#define ARMADA_370_XP_INT_SOURCE_CTL(irq) (0x100 + irq*4) -#define ARMADA_370_XP_INT_SOURCE_CPU_MASK 0xF -#define ARMADA_370_XP_INT_IRQ_FIQ_MASK(cpuid) ((BIT(0) | BIT(8)) << cpuid) - -/* Registers relative to per_cpu_int_base */ -#define ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS (0x08) -#define ARMADA_370_XP_IN_DRBEL_MSK_OFFS (0x0c) -#define ARMADA_375_PPI_CAUSE (0x10) -#define ARMADA_370_XP_CPU_INTACK_OFFS (0x44) -#define ARMADA_370_XP_INT_SET_MASK_OFFS (0x48) -#define ARMADA_370_XP_INT_CLEAR_MASK_OFFS (0x4C) -#define ARMADA_370_XP_INT_FABRIC_MASK_OFFS (0x54) -#define ARMADA_370_XP_INT_CAUSE_PERF(cpu) (1 << cpu) - -#define ARMADA_370_XP_MAX_PER_CPU_IRQS (28) - -#define IPI_DOORBELL_START (0) -#define IPI_DOORBELL_END (8) -#define IPI_DOORBELL_MASK 0xFF -#define PCI_MSI_DOORBELL_START (16) -#define PCI_MSI_DOORBELL_NR (16) -#define PCI_MSI_DOORBELL_END (32) -#define PCI_MSI_DOORBELL_MASK 0xFFFF0000 - -static void __iomem *per_cpu_int_base; -static void __iomem *main_int_base; -static struct irq_domain *armada_370_xp_mpic_domain; -static u32 doorbell_mask_reg; -static int parent_irq; +/* Registers relative to mpic->base */ +#define MPIC_INT_CONTROL 0x00 +#define MPIC_INT_CONTROL_NUMINT_MASK GENMASK(12, 2) +#define MPIC_SW_TRIG_INT 0x04 +#define MPIC_INT_SET_ENABLE 0x30 +#define MPIC_INT_CLEAR_ENABLE 0x34 +#define MPIC_INT_SOURCE_CTL(hwirq) (0x100 + (hwirq) * 4) +#define MPIC_INT_SOURCE_CPU_MASK GENMASK(3, 0) +#define MPIC_INT_IRQ_FIQ_MASK(cpuid) ((BIT(0) | BIT(8)) << (cpuid)) + +/* Registers relative to mpic->per_cpu */ +#define MPIC_IN_DRBEL_CAUSE 0x08 +#define MPIC_IN_DRBEL_MASK 0x0c +#define MPIC_PPI_CAUSE 0x10 +#define MPIC_CPU_INTACK 0x44 +#define MPIC_CPU_INTACK_IID_MASK GENMASK(9, 0) +#define MPIC_INT_SET_MASK 0x48 +#define MPIC_INT_CLEAR_MASK 0x4C +#define MPIC_INT_FABRIC_MASK 0x54 +#define MPIC_INT_CAUSE_PERF(cpu) BIT(cpu) + +#define MPIC_PER_CPU_IRQS_NR 29 + +/* IPI and MSI interrupt definitions for IPI platforms */ +#define IPI_DOORBELL_NR 8 +#define IPI_DOORBELL_MASK GENMASK(7, 0) +#define PCI_MSI_DOORBELL_START 16 +#define PCI_MSI_DOORBELL_NR 16 +#define PCI_MSI_DOORBELL_MASK GENMASK(31, 16) + +/* MSI interrupt definitions for non-IPI platforms */ +#define PCI_MSI_FULL_DOORBELL_START 0 +#define PCI_MSI_FULL_DOORBELL_NR 32 +#define PCI_MSI_FULL_DOORBELL_MASK GENMASK(31, 0) +#define PCI_MSI_FULL_DOORBELL_SRC0_MASK GENMASK(15, 0) +#define PCI_MSI_FULL_DOORBELL_SRC1_MASK GENMASK(31, 16) + +/** + * struct mpic - MPIC private data structure + * @base: MPIC registers base address + * @per_cpu: per-CPU registers base address + * @parent_irq: parent IRQ if MPIC is not top-level interrupt controller + * @domain: MPIC main interrupt domain + * @ipi_domain: IPI domain + * @msi_inner_domain: MSI inner domain + * @msi_used: bitmap of used MSI numbers + * @msi_lock: mutex serializing access to @msi_used + * @msi_doorbell_addr: physical address of MSI doorbell register + * @msi_doorbell_mask: mask of available doorbell bits for MSIs (either PCI_MSI_DOORBELL_MASK or + * PCI_MSI_FULL_DOORBELL_MASK) + * @msi_doorbell_start: first set bit in @msi_doorbell_mask + * @msi_doorbell_size: number of set bits in @msi_doorbell_mask + * @doorbell_mask: doorbell mask of MSIs and IPIs, stored on suspend, restored on resume + */ +struct mpic { + void __iomem *base; + void __iomem *per_cpu; + int parent_irq; + struct irq_domain *domain; +#ifdef CONFIG_SMP + struct irq_domain *ipi_domain; +#endif #ifdef CONFIG_PCI_MSI -static struct irq_domain *armada_370_xp_msi_domain; -static struct irq_domain *armada_370_xp_msi_inner_domain; -static DECLARE_BITMAP(msi_used, PCI_MSI_DOORBELL_NR); -static DEFINE_MUTEX(msi_used_lock); -static phys_addr_t msi_doorbell_addr; + struct irq_domain *msi_inner_domain; + DECLARE_BITMAP(msi_used, PCI_MSI_FULL_DOORBELL_NR); + struct mutex msi_lock; + phys_addr_t msi_doorbell_addr; + u32 msi_doorbell_mask; + unsigned int msi_doorbell_start, msi_doorbell_size; #endif + u32 doorbell_mask; +}; + +static struct mpic *mpic_data __ro_after_init; -static inline bool is_percpu_irq(irq_hw_number_t irq) +static inline bool mpic_is_ipi_available(struct mpic *mpic) { - if (irq <= ARMADA_370_XP_MAX_PER_CPU_IRQS) - return true; + /* + * We distinguish IPI availability in the IC by the IC not having a + * parent irq defined. If a parent irq is defined, there is a parent + * interrupt controller (e.g. GIC) that takes care of inter-processor + * interrupts. + */ + return mpic->parent_irq <= 0; +} - return false; +static inline bool mpic_is_percpu_irq(irq_hw_number_t hwirq) +{ + return hwirq < MPIC_PER_CPU_IRQS_NR; } /* @@ -169,532 +209,695 @@ static inline bool is_percpu_irq(irq_hw_number_t irq) * For shared global interrupts, mask/unmask global enable bit * For CPU interrupts, mask/unmask the calling CPU's bit */ -static void armada_370_xp_irq_mask(struct irq_data *d) +static void mpic_irq_mask(struct irq_data *d) { + struct mpic *mpic = irq_data_get_irq_chip_data(d); irq_hw_number_t hwirq = irqd_to_hwirq(d); - if (!is_percpu_irq(hwirq)) - writel(hwirq, main_int_base + - ARMADA_370_XP_INT_CLEAR_ENABLE_OFFS); + if (!mpic_is_percpu_irq(hwirq)) + writel(hwirq, mpic->base + MPIC_INT_CLEAR_ENABLE); else - writel(hwirq, per_cpu_int_base + - ARMADA_370_XP_INT_SET_MASK_OFFS); + writel(hwirq, mpic->per_cpu + MPIC_INT_SET_MASK); } -static void armada_370_xp_irq_unmask(struct irq_data *d) +static void mpic_irq_unmask(struct irq_data *d) { + struct mpic *mpic = irq_data_get_irq_chip_data(d); irq_hw_number_t hwirq = irqd_to_hwirq(d); - if (!is_percpu_irq(hwirq)) - writel(hwirq, main_int_base + - ARMADA_370_XP_INT_SET_ENABLE_OFFS); + if (!mpic_is_percpu_irq(hwirq)) + writel(hwirq, mpic->base + MPIC_INT_SET_ENABLE); else - writel(hwirq, per_cpu_int_base + - ARMADA_370_XP_INT_CLEAR_MASK_OFFS); + writel(hwirq, mpic->per_cpu + MPIC_INT_CLEAR_MASK); } #ifdef CONFIG_PCI_MSI -static struct irq_chip armada_370_xp_msi_irq_chip = { - .name = "MPIC MSI", - .irq_mask = pci_msi_mask_irq, - .irq_unmask = pci_msi_unmask_irq, -}; - -static struct msi_domain_info armada_370_xp_msi_domain_info = { - .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | - MSI_FLAG_MULTI_PCI_MSI | MSI_FLAG_PCI_MSIX), - .chip = &armada_370_xp_msi_irq_chip, -}; - -static void armada_370_xp_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) +static void mpic_compose_msi_msg(struct irq_data *d, struct msi_msg *msg) { - msg->address_lo = lower_32_bits(msi_doorbell_addr); - msg->address_hi = upper_32_bits(msi_doorbell_addr); - msg->data = 0xf00 | (data->hwirq + PCI_MSI_DOORBELL_START); + unsigned int cpu = cpumask_first(irq_data_get_effective_affinity_mask(d)); + struct mpic *mpic = irq_data_get_irq_chip_data(d); + + msg->address_lo = lower_32_bits(mpic->msi_doorbell_addr); + msg->address_hi = upper_32_bits(mpic->msi_doorbell_addr); + msg->data = BIT(cpu + 8) | (d->hwirq + mpic->msi_doorbell_start); } -static int armada_370_xp_msi_set_affinity(struct irq_data *irq_data, - const struct cpumask *mask, bool force) +static int mpic_msi_set_affinity(struct irq_data *d, const struct cpumask *mask, bool force) { - return -EINVAL; + unsigned int cpu; + + if (!force) + cpu = cpumask_any_and(mask, cpu_online_mask); + else + cpu = cpumask_first(mask); + + if (cpu >= nr_cpu_ids) + return -EINVAL; + + irq_data_update_effective_affinity(d, cpumask_of(cpu)); + + return IRQ_SET_MASK_OK; } -static struct irq_chip armada_370_xp_msi_bottom_irq_chip = { +static struct irq_chip mpic_msi_bottom_irq_chip = { .name = "MPIC MSI", - .irq_compose_msi_msg = armada_370_xp_compose_msi_msg, - .irq_set_affinity = armada_370_xp_msi_set_affinity, + .irq_compose_msi_msg = mpic_compose_msi_msg, + .irq_set_affinity = mpic_msi_set_affinity, }; -static int armada_370_xp_msi_alloc(struct irq_domain *domain, unsigned int virq, - unsigned int nr_irqs, void *args) +static int mpic_msi_alloc(struct irq_domain *domain, unsigned int virq, unsigned int nr_irqs, + void *args) { - int hwirq, i; + struct mpic *mpic = domain->host_data; + int hwirq; - mutex_lock(&msi_used_lock); + mutex_lock(&mpic->msi_lock); + hwirq = bitmap_find_free_region(mpic->msi_used, mpic->msi_doorbell_size, + order_base_2(nr_irqs)); + mutex_unlock(&mpic->msi_lock); - hwirq = bitmap_find_next_zero_area(msi_used, PCI_MSI_DOORBELL_NR, - 0, nr_irqs, 0); - if (hwirq >= PCI_MSI_DOORBELL_NR) { - mutex_unlock(&msi_used_lock); + if (hwirq < 0) return -ENOSPC; - } - bitmap_set(msi_used, hwirq, nr_irqs); - mutex_unlock(&msi_used_lock); - - for (i = 0; i < nr_irqs; i++) { + for (unsigned int i = 0; i < nr_irqs; i++) { irq_domain_set_info(domain, virq + i, hwirq + i, - &armada_370_xp_msi_bottom_irq_chip, + &mpic_msi_bottom_irq_chip, domain->host_data, handle_simple_irq, NULL, NULL); } - return hwirq; + return 0; } -static void armada_370_xp_msi_free(struct irq_domain *domain, - unsigned int virq, unsigned int nr_irqs) +static void mpic_msi_free(struct irq_domain *domain, unsigned int virq, unsigned int nr_irqs) { struct irq_data *d = irq_domain_get_irq_data(domain, virq); + struct mpic *mpic = domain->host_data; - mutex_lock(&msi_used_lock); - bitmap_clear(msi_used, d->hwirq, nr_irqs); - mutex_unlock(&msi_used_lock); + mutex_lock(&mpic->msi_lock); + bitmap_release_region(mpic->msi_used, d->hwirq, order_base_2(nr_irqs)); + mutex_unlock(&mpic->msi_lock); } -static const struct irq_domain_ops armada_370_xp_msi_domain_ops = { - .alloc = armada_370_xp_msi_alloc, - .free = armada_370_xp_msi_free, +static const struct irq_domain_ops mpic_msi_domain_ops = { + .select = msi_lib_irq_domain_select, + .alloc = mpic_msi_alloc, + .free = mpic_msi_free, }; -static int armada_370_xp_msi_init(struct device_node *node, - phys_addr_t main_int_phys_base) +static void mpic_msi_reenable_percpu(struct mpic *mpic) { u32 reg; - msi_doorbell_addr = main_int_phys_base + - ARMADA_370_XP_SW_TRIG_INT_OFFS; + /* Enable MSI doorbell mask and combined cpu local interrupt */ + reg = readl(mpic->per_cpu + MPIC_IN_DRBEL_MASK); + reg |= mpic->msi_doorbell_mask; + writel(reg, mpic->per_cpu + MPIC_IN_DRBEL_MASK); - armada_370_xp_msi_inner_domain = - irq_domain_add_linear(NULL, PCI_MSI_DOORBELL_NR, - &armada_370_xp_msi_domain_ops, NULL); - if (!armada_370_xp_msi_inner_domain) - return -ENOMEM; + /* Unmask local doorbell interrupt */ + writel(1, mpic->per_cpu + MPIC_INT_CLEAR_MASK); +} - armada_370_xp_msi_domain = - pci_msi_create_irq_domain(of_node_to_fwnode(node), - &armada_370_xp_msi_domain_info, - armada_370_xp_msi_inner_domain); - if (!armada_370_xp_msi_domain) { - irq_domain_remove(armada_370_xp_msi_inner_domain); - return -ENOMEM; +#define MPIC_MSI_FLAGS_REQUIRED (MSI_FLAG_USE_DEF_DOM_OPS | \ + MSI_FLAG_USE_DEF_CHIP_OPS) +#define MPIC_MSI_FLAGS_SUPPORTED (MSI_FLAG_MULTI_PCI_MSI | \ + MSI_FLAG_PCI_MSIX | \ + MSI_GENERIC_FLAGS_MASK) + +static const struct msi_parent_ops mpic_msi_parent_ops = { + .required_flags = MPIC_MSI_FLAGS_REQUIRED, + .supported_flags = MPIC_MSI_FLAGS_SUPPORTED, + .bus_select_token = DOMAIN_BUS_NEXUS, + .bus_select_mask = MATCH_PCI_MSI, + .prefix = "MPIC-", + .init_dev_msi_info = msi_lib_init_dev_msi_info, +}; + +static int __init mpic_msi_init(struct mpic *mpic, struct device_node *node, + phys_addr_t main_int_phys_base) +{ + mpic->msi_doorbell_addr = main_int_phys_base + MPIC_SW_TRIG_INT; + + mutex_init(&mpic->msi_lock); + + if (mpic_is_ipi_available(mpic)) { + mpic->msi_doorbell_start = PCI_MSI_DOORBELL_START; + mpic->msi_doorbell_size = PCI_MSI_DOORBELL_NR; + mpic->msi_doorbell_mask = PCI_MSI_DOORBELL_MASK; + } else { + mpic->msi_doorbell_start = PCI_MSI_FULL_DOORBELL_START; + mpic->msi_doorbell_size = PCI_MSI_FULL_DOORBELL_NR; + mpic->msi_doorbell_mask = PCI_MSI_FULL_DOORBELL_MASK; } - reg = readl(per_cpu_int_base + ARMADA_370_XP_IN_DRBEL_MSK_OFFS) - | PCI_MSI_DOORBELL_MASK; + struct irq_domain_info info = { + .fwnode = of_fwnode_handle(node), + .ops = &mpic_msi_domain_ops, + .host_data = mpic, + .size = mpic->msi_doorbell_size, + }; - writel(reg, per_cpu_int_base + - ARMADA_370_XP_IN_DRBEL_MSK_OFFS); + mpic->msi_inner_domain = msi_create_parent_irq_domain(&info, &mpic_msi_parent_ops); + if (!mpic->msi_inner_domain) + return -ENOMEM; - /* Unmask IPI interrupt */ - writel(1, per_cpu_int_base + ARMADA_370_XP_INT_CLEAR_MASK_OFFS); + mpic_msi_reenable_percpu(mpic); + + /* Unmask low 16 MSI irqs on non-IPI platforms */ + if (!mpic_is_ipi_available(mpic)) + writel(0, mpic->per_cpu + MPIC_INT_CLEAR_MASK); return 0; } #else -static inline int armada_370_xp_msi_init(struct device_node *node, - phys_addr_t main_int_phys_base) +static __maybe_unused void mpic_msi_reenable_percpu(struct mpic *mpic) {} + +static inline int mpic_msi_init(struct mpic *mpic, struct device_node *node, + phys_addr_t main_int_phys_base) { return 0; } #endif +static void mpic_perf_init(struct mpic *mpic) +{ + u32 cpuid; + + /* + * This Performance Counter Overflow interrupt is specific for + * Armada 370 and XP. It is not available on Armada 375, 38x and 39x. + */ + if (!of_machine_is_compatible("marvell,armada-370-xp")) + return; + + cpuid = cpu_logical_map(smp_processor_id()); + + /* Enable Performance Counter Overflow interrupts */ + writel(MPIC_INT_CAUSE_PERF(cpuid), mpic->per_cpu + MPIC_INT_FABRIC_MASK); +} + #ifdef CONFIG_SMP -static DEFINE_RAW_SPINLOCK(irq_controller_lock); +static void mpic_ipi_mask(struct irq_data *d) +{ + struct mpic *mpic = irq_data_get_irq_chip_data(d); + u32 reg; -static int armada_xp_set_affinity(struct irq_data *d, - const struct cpumask *mask_val, bool force) + reg = readl(mpic->per_cpu + MPIC_IN_DRBEL_MASK); + reg &= ~BIT(d->hwirq); + writel(reg, mpic->per_cpu + MPIC_IN_DRBEL_MASK); +} + +static void mpic_ipi_unmask(struct irq_data *d) { - irq_hw_number_t hwirq = irqd_to_hwirq(d); - unsigned long reg, mask; - int cpu; + struct mpic *mpic = irq_data_get_irq_chip_data(d); + u32 reg; - /* Select a single core from the affinity mask which is online */ - cpu = cpumask_any_and(mask_val, cpu_online_mask); - mask = 1UL << cpu_logical_map(cpu); + reg = readl(mpic->per_cpu + MPIC_IN_DRBEL_MASK); + reg |= BIT(d->hwirq); + writel(reg, mpic->per_cpu + MPIC_IN_DRBEL_MASK); +} - raw_spin_lock(&irq_controller_lock); - reg = readl(main_int_base + ARMADA_370_XP_INT_SOURCE_CTL(hwirq)); - reg = (reg & (~ARMADA_370_XP_INT_SOURCE_CPU_MASK)) | mask; - writel(reg, main_int_base + ARMADA_370_XP_INT_SOURCE_CTL(hwirq)); - raw_spin_unlock(&irq_controller_lock); +static void mpic_ipi_send_mask(struct irq_data *d, const struct cpumask *mask) +{ + struct mpic *mpic = irq_data_get_irq_chip_data(d); + unsigned int cpu; + u32 map = 0; - irq_data_update_effective_affinity(d, cpumask_of(cpu)); + /* Convert our logical CPU mask into a physical one. */ + for_each_cpu(cpu, mask) + map |= BIT(cpu_logical_map(cpu)); - return IRQ_SET_MASK_OK; + /* + * Ensure that stores to Normal memory are visible to the + * other CPUs before issuing the IPI. + */ + dsb(); + + /* submit softirq */ + writel((map << 8) | d->hwirq, mpic->base + MPIC_SW_TRIG_INT); } -#endif -static struct irq_chip armada_370_xp_irq_chip = { - .name = "MPIC", - .irq_mask = armada_370_xp_irq_mask, - .irq_mask_ack = armada_370_xp_irq_mask, - .irq_unmask = armada_370_xp_irq_unmask, -#ifdef CONFIG_SMP - .irq_set_affinity = armada_xp_set_affinity, -#endif - .flags = IRQCHIP_SKIP_SET_WAKE | IRQCHIP_MASK_ON_SUSPEND, +static void mpic_ipi_ack(struct irq_data *d) +{ + struct mpic *mpic = irq_data_get_irq_chip_data(d); + + writel(~BIT(d->hwirq), mpic->per_cpu + MPIC_IN_DRBEL_CAUSE); +} + +static struct irq_chip mpic_ipi_irqchip = { + .name = "IPI", + .irq_ack = mpic_ipi_ack, + .irq_mask = mpic_ipi_mask, + .irq_unmask = mpic_ipi_unmask, + .ipi_send_mask = mpic_ipi_send_mask, }; -static int armada_370_xp_mpic_irq_map(struct irq_domain *h, - unsigned int virq, irq_hw_number_t hw) +static int mpic_ipi_alloc(struct irq_domain *d, unsigned int virq, + unsigned int nr_irqs, void *args) { - armada_370_xp_irq_mask(irq_get_irq_data(virq)); - if (!is_percpu_irq(hw)) - writel(hw, per_cpu_int_base + - ARMADA_370_XP_INT_CLEAR_MASK_OFFS); - else - writel(hw, main_int_base + ARMADA_370_XP_INT_SET_ENABLE_OFFS); - irq_set_status_flags(virq, IRQ_LEVEL); - - if (is_percpu_irq(hw)) { - irq_set_percpu_devid(virq); - irq_set_chip_and_handler(virq, &armada_370_xp_irq_chip, - handle_percpu_devid_irq); - } else { - irq_set_chip_and_handler(virq, &armada_370_xp_irq_chip, - handle_level_irq); - irqd_set_single_target(irq_desc_get_irq_data(irq_to_desc(virq))); + for (unsigned int i = 0; i < nr_irqs; i++) { + irq_set_percpu_devid(virq + i); + irq_domain_set_info(d, virq + i, i, &mpic_ipi_irqchip, d->host_data, + handle_percpu_devid_irq, NULL, NULL); } - irq_set_probe(virq); return 0; } -static void armada_xp_mpic_smp_cpu_init(void) +static void mpic_ipi_free(struct irq_domain *d, unsigned int virq, + unsigned int nr_irqs) { - u32 control; - int nr_irqs, i; - - control = readl(main_int_base + ARMADA_370_XP_INT_CONTROL); - nr_irqs = (control >> 2) & 0x3ff; + /* Not freeing IPIs */ +} - for (i = 0; i < nr_irqs; i++) - writel(i, per_cpu_int_base + ARMADA_370_XP_INT_SET_MASK_OFFS); +static const struct irq_domain_ops mpic_ipi_domain_ops = { + .alloc = mpic_ipi_alloc, + .free = mpic_ipi_free, +}; - /* Clear pending IPIs */ - writel(0, per_cpu_int_base + ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS); +static void mpic_ipi_resume(struct mpic *mpic) +{ + for (irq_hw_number_t i = 0; i < IPI_DOORBELL_NR; i++) { + unsigned int virq = irq_find_mapping(mpic->ipi_domain, i); + struct irq_data *d; - /* Enable first 8 IPIs */ - writel(IPI_DOORBELL_MASK, per_cpu_int_base + - ARMADA_370_XP_IN_DRBEL_MSK_OFFS); + if (!virq || !irq_percpu_is_enabled(virq)) + continue; - /* Unmask IPI interrupt */ - writel(0, per_cpu_int_base + ARMADA_370_XP_INT_CLEAR_MASK_OFFS); + d = irq_domain_get_irq_data(mpic->ipi_domain, virq); + mpic_ipi_unmask(d); + } } -static void armada_xp_mpic_perf_init(void) +static int __init mpic_ipi_init(struct mpic *mpic, struct device_node *node) { - unsigned long cpuid = cpu_logical_map(smp_processor_id()); + int base_ipi; - /* Enable Performance Counter Overflow interrupts */ - writel(ARMADA_370_XP_INT_CAUSE_PERF(cpuid), - per_cpu_int_base + ARMADA_370_XP_INT_FABRIC_MASK_OFFS); + mpic->ipi_domain = irq_domain_create_linear(of_fwnode_handle(node), IPI_DOORBELL_NR, + &mpic_ipi_domain_ops, mpic); + if (WARN_ON(!mpic->ipi_domain)) + return -ENOMEM; + + irq_domain_update_bus_token(mpic->ipi_domain, DOMAIN_BUS_IPI); + base_ipi = irq_domain_alloc_irqs(mpic->ipi_domain, IPI_DOORBELL_NR, NUMA_NO_NODE, NULL); + if (WARN_ON(!base_ipi)) + return -ENOMEM; + + set_smp_ipi_range(base_ipi, IPI_DOORBELL_NR); + + return 0; } -#ifdef CONFIG_SMP -static void armada_mpic_send_doorbell(const struct cpumask *mask, - unsigned int irq) +static int mpic_set_affinity(struct irq_data *d, const struct cpumask *mask_val, bool force) { - int cpu; - unsigned long map = 0; + struct mpic *mpic = irq_data_get_irq_chip_data(d); + irq_hw_number_t hwirq = irqd_to_hwirq(d); + unsigned int cpu; - /* Convert our logical CPU mask into a physical one. */ - for_each_cpu(cpu, mask) - map |= 1 << cpu_logical_map(cpu); + /* Select a single core from the affinity mask which is online */ + cpu = cpumask_any_and(mask_val, cpu_online_mask); - /* - * Ensure that stores to Normal memory are visible to the - * other CPUs before issuing the IPI. - */ - dsb(); + atomic_io_modify(mpic->base + MPIC_INT_SOURCE_CTL(hwirq), + MPIC_INT_SOURCE_CPU_MASK, BIT(cpu_logical_map(cpu))); - /* submit softirq */ - writel((map << 8) | irq, main_int_base + - ARMADA_370_XP_SW_TRIG_INT_OFFS); + irq_data_update_effective_affinity(d, cpumask_of(cpu)); + + return IRQ_SET_MASK_OK; } -static void armada_xp_mpic_reenable_percpu(void) +static void mpic_smp_cpu_init(struct mpic *mpic) { - unsigned int irq; + for (irq_hw_number_t i = 0; i < mpic->domain->hwirq_max; i++) + writel(i, mpic->per_cpu + MPIC_INT_SET_MASK); - /* Re-enable per-CPU interrupts that were enabled before suspend */ - for (irq = 0; irq < ARMADA_370_XP_MAX_PER_CPU_IRQS; irq++) { - struct irq_data *data; - int virq; + if (!mpic_is_ipi_available(mpic)) + return; - virq = irq_linear_revmap(armada_370_xp_mpic_domain, irq); - if (virq == 0) - continue; + /* Disable all IPIs */ + writel(0, mpic->per_cpu + MPIC_IN_DRBEL_MASK); - data = irq_get_irq_data(virq); + /* Clear pending IPIs */ + writel(0, mpic->per_cpu + MPIC_IN_DRBEL_CAUSE); + + /* Unmask IPI interrupt */ + writel(0, mpic->per_cpu + MPIC_INT_CLEAR_MASK); +} - if (!irq_percpu_is_enabled(virq)) +static void mpic_reenable_percpu(struct mpic *mpic) +{ + /* Re-enable per-CPU interrupts that were enabled before suspend */ + for (irq_hw_number_t i = 0; i < MPIC_PER_CPU_IRQS_NR; i++) { + unsigned int virq = irq_find_mapping(mpic->domain, i); + struct irq_data *d; + + if (!virq || !irq_percpu_is_enabled(virq)) continue; - armada_370_xp_irq_unmask(data); + d = irq_get_irq_data(virq); + mpic_irq_unmask(d); } + + if (mpic_is_ipi_available(mpic)) + mpic_ipi_resume(mpic); + + mpic_msi_reenable_percpu(mpic); } -static int armada_xp_mpic_starting_cpu(unsigned int cpu) +static int mpic_starting_cpu(unsigned int cpu) { - armada_xp_mpic_perf_init(); - armada_xp_mpic_smp_cpu_init(); - armada_xp_mpic_reenable_percpu(); + struct mpic *mpic = irq_get_default_domain()->host_data; + + mpic_perf_init(mpic); + mpic_smp_cpu_init(mpic); + mpic_reenable_percpu(mpic); + return 0; } static int mpic_cascaded_starting_cpu(unsigned int cpu) { - armada_xp_mpic_perf_init(); - armada_xp_mpic_reenable_percpu(); - enable_percpu_irq(parent_irq, IRQ_TYPE_NONE); + struct mpic *mpic = mpic_data; + + mpic_perf_init(mpic); + mpic_reenable_percpu(mpic); + enable_percpu_irq(mpic->parent_irq, IRQ_TYPE_NONE); + return 0; } +#else +static void mpic_smp_cpu_init(struct mpic *mpic) {} +static void mpic_ipi_resume(struct mpic *mpic) {} #endif -static const struct irq_domain_ops armada_370_xp_mpic_irq_ops = { - .map = armada_370_xp_mpic_irq_map, - .xlate = irq_domain_xlate_onecell, +static struct irq_chip mpic_irq_chip = { + .name = "MPIC", + .irq_mask = mpic_irq_mask, + .irq_mask_ack = mpic_irq_mask, + .irq_unmask = mpic_irq_unmask, +#ifdef CONFIG_SMP + .irq_set_affinity = mpic_set_affinity, +#endif + .flags = IRQCHIP_SKIP_SET_WAKE | IRQCHIP_MASK_ON_SUSPEND, +}; + +static int mpic_irq_map(struct irq_domain *domain, unsigned int virq, irq_hw_number_t hwirq) +{ + struct mpic *mpic = domain->host_data; + + /* IRQs 0 and 1 cannot be mapped, they are handled internally */ + if (hwirq <= 1) + return -EINVAL; + + irq_set_chip_data(virq, mpic); + + mpic_irq_mask(irq_get_irq_data(virq)); + if (!mpic_is_percpu_irq(hwirq)) + writel(hwirq, mpic->per_cpu + MPIC_INT_CLEAR_MASK); + else + writel(hwirq, mpic->base + MPIC_INT_SET_ENABLE); + irq_set_status_flags(virq, IRQ_LEVEL); + + if (mpic_is_percpu_irq(hwirq)) { + irq_set_percpu_devid(virq); + irq_set_chip_and_handler(virq, &mpic_irq_chip, handle_percpu_devid_irq); + } else { + irq_set_chip_and_handler(virq, &mpic_irq_chip, handle_level_irq); + irqd_set_single_target(irq_desc_get_irq_data(irq_to_desc(virq))); + } + irq_set_probe(virq); + return 0; +} + +static const struct irq_domain_ops mpic_irq_ops = { + .map = mpic_irq_map, + .xlate = irq_domain_xlate_onecell, }; #ifdef CONFIG_PCI_MSI -static void armada_370_xp_handle_msi_irq(struct pt_regs *regs, bool is_chained) +static void mpic_handle_msi_irq(struct mpic *mpic) { - u32 msimask, msinr; + unsigned long cause; + unsigned int i; - msimask = readl_relaxed(per_cpu_int_base + - ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS) - & PCI_MSI_DOORBELL_MASK; + cause = readl_relaxed(mpic->per_cpu + MPIC_IN_DRBEL_CAUSE); + cause &= mpic->msi_doorbell_mask; + writel(~cause, mpic->per_cpu + MPIC_IN_DRBEL_CAUSE); - writel(~msimask, per_cpu_int_base + - ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS); + for_each_set_bit(i, &cause, BITS_PER_LONG) + generic_handle_domain_irq(mpic->msi_inner_domain, i - mpic->msi_doorbell_start); +} +#else +static void mpic_handle_msi_irq(struct mpic *mpic) {} +#endif - for (msinr = PCI_MSI_DOORBELL_START; - msinr < PCI_MSI_DOORBELL_END; msinr++) { - int irq; +#ifdef CONFIG_SMP +static void mpic_handle_ipi_irq(struct mpic *mpic) +{ + unsigned long cause; + irq_hw_number_t i; - if (!(msimask & BIT(msinr))) - continue; + cause = readl_relaxed(mpic->per_cpu + MPIC_IN_DRBEL_CAUSE); + cause &= IPI_DOORBELL_MASK; - if (is_chained) { - irq = irq_find_mapping(armada_370_xp_msi_inner_domain, - msinr - PCI_MSI_DOORBELL_START); - generic_handle_irq(irq); - } else { - irq = msinr - PCI_MSI_DOORBELL_START; - handle_domain_irq(armada_370_xp_msi_inner_domain, - irq, regs); - } - } + for_each_set_bit(i, &cause, IPI_DOORBELL_NR) + generic_handle_domain_irq(mpic->ipi_domain, i); } #else -static void armada_370_xp_handle_msi_irq(struct pt_regs *r, bool b) {} +static inline void mpic_handle_ipi_irq(struct mpic *mpic) {} #endif -static void armada_370_xp_mpic_handle_cascade_irq(struct irq_desc *desc) +static void mpic_handle_cascade_irq(struct irq_desc *desc) { + struct mpic *mpic = irq_desc_get_handler_data(desc); struct irq_chip *chip = irq_desc_get_chip(desc); - unsigned long irqmap, irqn, irqsrc, cpuid; - unsigned int cascade_irq; + unsigned long cause; + u32 irqsrc, cpuid; + irq_hw_number_t i; chained_irq_enter(chip, desc); - irqmap = readl_relaxed(per_cpu_int_base + ARMADA_375_PPI_CAUSE); + cause = readl_relaxed(mpic->per_cpu + MPIC_PPI_CAUSE); cpuid = cpu_logical_map(smp_processor_id()); - for_each_set_bit(irqn, &irqmap, BITS_PER_LONG) { - irqsrc = readl_relaxed(main_int_base + - ARMADA_370_XP_INT_SOURCE_CTL(irqn)); + for_each_set_bit(i, &cause, MPIC_PER_CPU_IRQS_NR) { + irqsrc = readl_relaxed(mpic->base + MPIC_INT_SOURCE_CTL(i)); /* Check if the interrupt is not masked on current CPU. * Test IRQ (0-1) and FIQ (8-9) mask bits. */ - if (!(irqsrc & ARMADA_370_XP_INT_IRQ_FIQ_MASK(cpuid))) + if (!(irqsrc & MPIC_INT_IRQ_FIQ_MASK(cpuid))) continue; - if (irqn == 1) { - armada_370_xp_handle_msi_irq(NULL, true); + if (i == 0 || i == 1) { + mpic_handle_msi_irq(mpic); continue; } - cascade_irq = irq_find_mapping(armada_370_xp_mpic_domain, irqn); - generic_handle_irq(cascade_irq); + generic_handle_domain_irq(mpic->domain, i); } chained_irq_exit(chip, desc); } -static void __exception_irq_entry -armada_370_xp_handle_irq(struct pt_regs *regs) +static void __exception_irq_entry mpic_handle_irq(struct pt_regs *regs) { - u32 irqstat, irqnr; + struct mpic *mpic = irq_get_default_domain()->host_data; + irq_hw_number_t i; + u32 irqstat; do { - irqstat = readl_relaxed(per_cpu_int_base + - ARMADA_370_XP_CPU_INTACK_OFFS); - irqnr = irqstat & 0x3FF; + irqstat = readl_relaxed(mpic->per_cpu + MPIC_CPU_INTACK); + i = FIELD_GET(MPIC_CPU_INTACK_IID_MASK, irqstat); - if (irqnr > 1022) + if (i > 1022) break; - if (irqnr > 1) { - handle_domain_irq(armada_370_xp_mpic_domain, - irqnr, regs); - continue; - } + if (i > 1) + generic_handle_domain_irq(mpic->domain, i); /* MSI handling */ - if (irqnr == 1) - armada_370_xp_handle_msi_irq(regs, false); + if (i == 1) + mpic_handle_msi_irq(mpic); -#ifdef CONFIG_SMP /* IPI Handling */ - if (irqnr == 0) { - u32 ipimask, ipinr; - - ipimask = readl_relaxed(per_cpu_int_base + - ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS) - & IPI_DOORBELL_MASK; - - writel(~ipimask, per_cpu_int_base + - ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS); - - /* Handle all pending doorbells */ - for (ipinr = IPI_DOORBELL_START; - ipinr < IPI_DOORBELL_END; ipinr++) { - if (ipimask & (0x1 << ipinr)) - handle_IPI(ipinr, regs); - } - continue; - } -#endif - + if (i == 0) + mpic_handle_ipi_irq(mpic); } while (1); } -static int armada_370_xp_mpic_suspend(void) +static int mpic_suspend(void *data) { - doorbell_mask_reg = readl(per_cpu_int_base + - ARMADA_370_XP_IN_DRBEL_MSK_OFFS); + struct mpic *mpic = mpic_data; + + mpic->doorbell_mask = readl(mpic->per_cpu + MPIC_IN_DRBEL_MASK); + return 0; } -static void armada_370_xp_mpic_resume(void) +static void mpic_resume(void *data) { - int nirqs; - irq_hw_number_t irq; + struct mpic *mpic = mpic_data; + bool src0, src1; /* Re-enable interrupts */ - nirqs = (readl(main_int_base + ARMADA_370_XP_INT_CONTROL) >> 2) & 0x3ff; - for (irq = 0; irq < nirqs; irq++) { - struct irq_data *data; - int virq; + for (irq_hw_number_t i = 0; i < mpic->domain->hwirq_max; i++) { + unsigned int virq = irq_find_mapping(mpic->domain, i); + struct irq_data *d; - virq = irq_linear_revmap(armada_370_xp_mpic_domain, irq); - if (virq == 0) + if (!virq) continue; - data = irq_get_irq_data(virq); + d = irq_get_irq_data(virq); - if (!is_percpu_irq(irq)) { + if (!mpic_is_percpu_irq(i)) { /* Non per-CPU interrupts */ - writel(irq, per_cpu_int_base + - ARMADA_370_XP_INT_CLEAR_MASK_OFFS); - if (!irqd_irq_disabled(data)) - armada_370_xp_irq_unmask(data); + writel(i, mpic->per_cpu + MPIC_INT_CLEAR_MASK); + if (!irqd_irq_disabled(d)) + mpic_irq_unmask(d); } else { /* Per-CPU interrupts */ - writel(irq, main_int_base + - ARMADA_370_XP_INT_SET_ENABLE_OFFS); + writel(i, mpic->base + MPIC_INT_SET_ENABLE); /* - * Re-enable on the current CPU, - * armada_xp_mpic_reenable_percpu() will take - * care of secondary CPUs when they come up. + * Re-enable on the current CPU, mpic_reenable_percpu() + * will take care of secondary CPUs when they come up. */ if (irq_percpu_is_enabled(virq)) - armada_370_xp_irq_unmask(data); + mpic_irq_unmask(d); } } /* Reconfigure doorbells for IPIs and MSIs */ - writel(doorbell_mask_reg, - per_cpu_int_base + ARMADA_370_XP_IN_DRBEL_MSK_OFFS); - if (doorbell_mask_reg & IPI_DOORBELL_MASK) - writel(0, per_cpu_int_base + ARMADA_370_XP_INT_CLEAR_MASK_OFFS); - if (doorbell_mask_reg & PCI_MSI_DOORBELL_MASK) - writel(1, per_cpu_int_base + ARMADA_370_XP_INT_CLEAR_MASK_OFFS); + writel(mpic->doorbell_mask, mpic->per_cpu + MPIC_IN_DRBEL_MASK); + + if (mpic_is_ipi_available(mpic)) { + src0 = mpic->doorbell_mask & IPI_DOORBELL_MASK; + src1 = mpic->doorbell_mask & PCI_MSI_DOORBELL_MASK; + } else { + src0 = mpic->doorbell_mask & PCI_MSI_FULL_DOORBELL_SRC0_MASK; + src1 = mpic->doorbell_mask & PCI_MSI_FULL_DOORBELL_SRC1_MASK; + } + + if (src0) + writel(0, mpic->per_cpu + MPIC_INT_CLEAR_MASK); + if (src1) + writel(1, mpic->per_cpu + MPIC_INT_CLEAR_MASK); + + if (mpic_is_ipi_available(mpic)) + mpic_ipi_resume(mpic); } -static struct syscore_ops armada_370_xp_mpic_syscore_ops = { - .suspend = armada_370_xp_mpic_suspend, - .resume = armada_370_xp_mpic_resume, +static const struct syscore_ops mpic_syscore_ops = { + .suspend = mpic_suspend, + .resume = mpic_resume, +}; + +static struct syscore mpic_syscore = { + .ops = &mpic_syscore_ops, }; -static int __init armada_370_xp_mpic_of_init(struct device_node *node, - struct device_node *parent) +static int __init mpic_map_region(struct device_node *np, int index, + void __iomem **base, phys_addr_t *phys_base) { - struct resource main_int_res, per_cpu_int_res; - int nr_irqs, i; - u32 control; + struct resource res; + int err; - BUG_ON(of_address_to_resource(node, 0, &main_int_res)); - BUG_ON(of_address_to_resource(node, 1, &per_cpu_int_res)); + err = of_address_to_resource(np, index, &res); + if (WARN_ON(err)) + goto fail; - BUG_ON(!request_mem_region(main_int_res.start, - resource_size(&main_int_res), - node->full_name)); - BUG_ON(!request_mem_region(per_cpu_int_res.start, - resource_size(&per_cpu_int_res), - node->full_name)); + if (WARN_ON(!request_mem_region(res.start, resource_size(&res), np->full_name))) { + err = -EBUSY; + goto fail; + } - main_int_base = ioremap(main_int_res.start, - resource_size(&main_int_res)); - BUG_ON(!main_int_base); + *base = ioremap(res.start, resource_size(&res)); + if (WARN_ON(!*base)) { + err = -ENOMEM; + goto fail; + } - per_cpu_int_base = ioremap(per_cpu_int_res.start, - resource_size(&per_cpu_int_res)); - BUG_ON(!per_cpu_int_base); + if (phys_base) + *phys_base = res.start; - control = readl(main_int_base + ARMADA_370_XP_INT_CONTROL); - nr_irqs = (control >> 2) & 0x3ff; + return 0; - for (i = 0; i < nr_irqs; i++) - writel(i, main_int_base + ARMADA_370_XP_INT_CLEAR_ENABLE_OFFS); +fail: + pr_err("%pOF: Unable to map resource %d: %pE\n", np, index, ERR_PTR(err)); + return err; +} - armada_370_xp_mpic_domain = - irq_domain_add_linear(node, nr_irqs, - &armada_370_xp_mpic_irq_ops, NULL); - BUG_ON(!armada_370_xp_mpic_domain); - irq_domain_update_bus_token(armada_370_xp_mpic_domain, DOMAIN_BUS_WIRED); +static int __init mpic_of_init(struct device_node *node, struct device_node *parent) +{ + phys_addr_t phys_base; + unsigned int nr_irqs; + struct mpic *mpic; + int err; + + mpic = kzalloc(sizeof(*mpic), GFP_KERNEL); + if (WARN_ON(!mpic)) + return -ENOMEM; + + mpic_data = mpic; + + err = mpic_map_region(node, 0, &mpic->base, &phys_base); + if (err) + return err; + + err = mpic_map_region(node, 1, &mpic->per_cpu, NULL); + if (err) + return err; + + nr_irqs = FIELD_GET(MPIC_INT_CONTROL_NUMINT_MASK, readl(mpic->base + MPIC_INT_CONTROL)); + + for (irq_hw_number_t i = 0; i < nr_irqs; i++) + writel(i, mpic->base + MPIC_INT_CLEAR_ENABLE); + + /* + * Initialize mpic->parent_irq before calling any other functions, since + * it is used to distinguish between IPI and non-IPI platforms. + */ + mpic->parent_irq = irq_of_parse_and_map(node, 0); + + /* + * On non-IPI platforms the driver currently supports only the per-CPU + * interrupts (the first 29 interrupts). See mpic_handle_cascade_irq(). + */ + if (!mpic_is_ipi_available(mpic)) + nr_irqs = MPIC_PER_CPU_IRQS_NR; + + mpic->domain = irq_domain_create_linear(of_fwnode_handle(node), nr_irqs, &mpic_irq_ops, mpic); + if (!mpic->domain) { + pr_err("%pOF: Unable to add IRQ domain\n", node); + return -ENOMEM; + } + + irq_domain_update_bus_token(mpic->domain, DOMAIN_BUS_WIRED); /* Setup for the boot CPU */ - armada_xp_mpic_perf_init(); - armada_xp_mpic_smp_cpu_init(); + mpic_perf_init(mpic); + mpic_smp_cpu_init(mpic); - armada_370_xp_msi_init(node, main_int_res.start); + err = mpic_msi_init(mpic, node, phys_base); + if (err) { + pr_err("%pOF: Unable to initialize MSI domain\n", node); + return err; + } - parent_irq = irq_of_parse_and_map(node, 0); - if (parent_irq <= 0) { - irq_set_default_host(armada_370_xp_mpic_domain); - set_handle_irq(armada_370_xp_handle_irq); + if (mpic_is_ipi_available(mpic)) { + irq_set_default_domain(mpic->domain); + set_handle_irq(mpic_handle_irq); #ifdef CONFIG_SMP - set_smp_cross_call(armada_mpic_send_doorbell); + err = mpic_ipi_init(mpic, node); + if (err) { + pr_err("%pOF: Unable to initialize IPI domain\n", node); + return err; + } + cpuhp_setup_state_nocalls(CPUHP_AP_IRQ_ARMADA_XP_STARTING, "irqchip/armada/ipi:starting", - armada_xp_mpic_starting_cpu, NULL); + mpic_starting_cpu, NULL); #endif } else { #ifdef CONFIG_SMP @@ -702,13 +905,13 @@ static int __init armada_370_xp_mpic_of_init(struct device_node *node, "irqchip/armada/cascade:starting", mpic_cascaded_starting_cpu, NULL); #endif - irq_set_chained_handler(parent_irq, - armada_370_xp_mpic_handle_cascade_irq); + irq_set_chained_handler_and_data(mpic->parent_irq, + mpic_handle_cascade_irq, mpic); } - register_syscore_ops(&armada_370_xp_mpic_syscore_ops); + register_syscore(&mpic_syscore); return 0; } -IRQCHIP_DECLARE(armada_370_xp_mpic, "marvell,mpic", armada_370_xp_mpic_of_init); +IRQCHIP_DECLARE(marvell_mpic, "marvell,mpic", mpic_of_init); diff --git a/drivers/irqchip/irq-aspeed-i2c-ic.c b/drivers/irqchip/irq-aspeed-i2c-ic.c index f20200af0992..87c1feb999ff 100644 --- a/drivers/irqchip/irq-aspeed-i2c-ic.c +++ b/drivers/irqchip/irq-aspeed-i2c-ic.c @@ -1,13 +1,10 @@ +// SPDX-License-Identifier: GPL-2.0-only /* * Aspeed 24XX/25XX I2C Interrupt Controller. * * Copyright (C) 2012-2017 ASPEED Technology Inc. * Copyright 2017 IBM Corporation * Copyright 2017 Google, Inc. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. */ #include <linux/irq.h> @@ -37,14 +34,12 @@ static void aspeed_i2c_ic_irq_handler(struct irq_desc *desc) struct aspeed_i2c_ic *i2c_ic = irq_desc_get_handler_data(desc); struct irq_chip *chip = irq_desc_get_chip(desc); unsigned long bit, status; - unsigned int bus_irq; chained_irq_enter(chip, desc); status = readl(i2c_ic->base); - for_each_set_bit(bit, &status, ASPEED_I2C_IC_NUM_BUS) { - bus_irq = irq_find_mapping(i2c_ic->irq_domain, bit); - generic_handle_irq(bus_irq); - } + for_each_set_bit(bit, &status, ASPEED_I2C_IC_NUM_BUS) + generic_handle_domain_irq(i2c_ic->irq_domain, bit); + chained_irq_exit(chip, desc); } @@ -82,12 +77,12 @@ static int __init aspeed_i2c_ic_of_init(struct device_node *node, } i2c_ic->parent_irq = irq_of_parse_and_map(node, 0); - if (i2c_ic->parent_irq < 0) { - ret = i2c_ic->parent_irq; + if (!i2c_ic->parent_irq) { + ret = -EINVAL; goto err_iounmap; } - i2c_ic->irq_domain = irq_domain_add_linear(node, ASPEED_I2C_IC_NUM_BUS, + i2c_ic->irq_domain = irq_domain_create_linear(of_fwnode_handle(node), ASPEED_I2C_IC_NUM_BUS, &aspeed_i2c_ic_irq_domain_ops, NULL); if (!i2c_ic->irq_domain) { diff --git a/drivers/irqchip/irq-aspeed-intc.c b/drivers/irqchip/irq-aspeed-intc.c new file mode 100644 index 000000000000..8330221799a0 --- /dev/null +++ b/drivers/irqchip/irq-aspeed-intc.c @@ -0,0 +1,139 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Aspeed Interrupt Controller. + * + * Copyright (C) 2023 ASPEED Technology Inc. + */ + +#include <linux/bitops.h> +#include <linux/irq.h> +#include <linux/irqchip.h> +#include <linux/irqchip/chained_irq.h> +#include <linux/irqdomain.h> +#include <linux/of_address.h> +#include <linux/of_irq.h> +#include <linux/io.h> +#include <linux/spinlock.h> + +#define INTC_INT_ENABLE_REG 0x00 +#define INTC_INT_STATUS_REG 0x04 +#define INTC_IRQS_PER_WORD 32 + +struct aspeed_intc_ic { + void __iomem *base; + raw_spinlock_t gic_lock; + raw_spinlock_t intc_lock; + struct irq_domain *irq_domain; +}; + +static void aspeed_intc_ic_irq_handler(struct irq_desc *desc) +{ + struct aspeed_intc_ic *intc_ic = irq_desc_get_handler_data(desc); + struct irq_chip *chip = irq_desc_get_chip(desc); + + chained_irq_enter(chip, desc); + + scoped_guard(raw_spinlock, &intc_ic->gic_lock) { + unsigned long bit, status; + + status = readl(intc_ic->base + INTC_INT_STATUS_REG); + for_each_set_bit(bit, &status, INTC_IRQS_PER_WORD) { + generic_handle_domain_irq(intc_ic->irq_domain, bit); + writel(BIT(bit), intc_ic->base + INTC_INT_STATUS_REG); + } + } + + chained_irq_exit(chip, desc); +} + +static void aspeed_intc_irq_mask(struct irq_data *data) +{ + struct aspeed_intc_ic *intc_ic = irq_data_get_irq_chip_data(data); + unsigned int mask = readl(intc_ic->base + INTC_INT_ENABLE_REG) & ~BIT(data->hwirq); + + guard(raw_spinlock)(&intc_ic->intc_lock); + writel(mask, intc_ic->base + INTC_INT_ENABLE_REG); +} + +static void aspeed_intc_irq_unmask(struct irq_data *data) +{ + struct aspeed_intc_ic *intc_ic = irq_data_get_irq_chip_data(data); + unsigned int unmask = readl(intc_ic->base + INTC_INT_ENABLE_REG) | BIT(data->hwirq); + + guard(raw_spinlock)(&intc_ic->intc_lock); + writel(unmask, intc_ic->base + INTC_INT_ENABLE_REG); +} + +static struct irq_chip aspeed_intc_chip = { + .name = "ASPEED INTC", + .irq_mask = aspeed_intc_irq_mask, + .irq_unmask = aspeed_intc_irq_unmask, +}; + +static int aspeed_intc_ic_map_irq_domain(struct irq_domain *domain, unsigned int irq, + irq_hw_number_t hwirq) +{ + irq_set_chip_and_handler(irq, &aspeed_intc_chip, handle_level_irq); + irq_set_chip_data(irq, domain->host_data); + + return 0; +} + +static const struct irq_domain_ops aspeed_intc_ic_irq_domain_ops = { + .map = aspeed_intc_ic_map_irq_domain, +}; + +static int __init aspeed_intc_ic_of_init(struct device_node *node, + struct device_node *parent) +{ + struct aspeed_intc_ic *intc_ic; + int irq, i, ret = 0; + + intc_ic = kzalloc(sizeof(*intc_ic), GFP_KERNEL); + if (!intc_ic) + return -ENOMEM; + + intc_ic->base = of_iomap(node, 0); + if (!intc_ic->base) { + pr_err("Failed to iomap intc_ic base\n"); + ret = -ENOMEM; + goto err_free_ic; + } + writel(0xffffffff, intc_ic->base + INTC_INT_STATUS_REG); + writel(0x0, intc_ic->base + INTC_INT_ENABLE_REG); + + intc_ic->irq_domain = irq_domain_create_linear(of_fwnode_handle(node), INTC_IRQS_PER_WORD, + &aspeed_intc_ic_irq_domain_ops, intc_ic); + if (!intc_ic->irq_domain) { + ret = -ENOMEM; + goto err_iounmap; + } + + raw_spin_lock_init(&intc_ic->gic_lock); + raw_spin_lock_init(&intc_ic->intc_lock); + + /* Check all the irq numbers valid. If not, unmaps all the base and frees the data. */ + for (i = 0; i < of_irq_count(node); i++) { + irq = irq_of_parse_and_map(node, i); + if (!irq) { + pr_err("Failed to get irq number\n"); + ret = -EINVAL; + goto err_iounmap; + } + } + + for (i = 0; i < of_irq_count(node); i++) { + irq = irq_of_parse_and_map(node, i); + irq_set_chained_handler_and_data(irq, aspeed_intc_ic_irq_handler, intc_ic); + } + + return 0; + +err_iounmap: + iounmap(intc_ic->base); +err_free_ic: + kfree(intc_ic); + return ret; +} + +IRQCHIP_DECLARE(ast2700_intc_ic, "aspeed,ast2700-intc-ic", aspeed_intc_ic_of_init); diff --git a/drivers/irqchip/irq-aspeed-scu-ic.c b/drivers/irqchip/irq-aspeed-scu-ic.c new file mode 100644 index 000000000000..bee59c8c4c93 --- /dev/null +++ b/drivers/irqchip/irq-aspeed-scu-ic.c @@ -0,0 +1,294 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Aspeed AST24XX, AST25XX, AST26XX, and AST27XX SCU Interrupt Controller + * Copyright 2019 IBM Corporation + * + * Eddie James <eajames@linux.ibm.com> + */ + +#include <linux/bitops.h> +#include <linux/io.h> +#include <linux/irq.h> +#include <linux/irqchip.h> +#include <linux/irqchip/chained_irq.h> +#include <linux/irqdomain.h> +#include <linux/of_address.h> +#include <linux/of_irq.h> + +#define ASPEED_SCU_IC_STATUS GENMASK(28, 16) +#define ASPEED_SCU_IC_STATUS_SHIFT 16 +#define AST2700_SCU_IC_STATUS GENMASK(15, 0) + +struct aspeed_scu_ic_variant { + const char *compatible; + unsigned long irq_enable; + unsigned long irq_shift; + unsigned int num_irqs; + unsigned long ier; + unsigned long isr; +}; + +#define SCU_VARIANT(_compat, _shift, _enable, _num, _ier, _isr) { \ + .compatible = _compat, \ + .irq_shift = _shift, \ + .irq_enable = _enable, \ + .num_irqs = _num, \ + .ier = _ier, \ + .isr = _isr, \ +} + +static const struct aspeed_scu_ic_variant scu_ic_variants[] __initconst = { + SCU_VARIANT("aspeed,ast2400-scu-ic", 0, GENMASK(15, 0), 7, 0x00, 0x00), + SCU_VARIANT("aspeed,ast2500-scu-ic", 0, GENMASK(15, 0), 7, 0x00, 0x00), + SCU_VARIANT("aspeed,ast2600-scu-ic0", 0, GENMASK(5, 0), 6, 0x00, 0x00), + SCU_VARIANT("aspeed,ast2600-scu-ic1", 4, GENMASK(5, 4), 2, 0x00, 0x00), + SCU_VARIANT("aspeed,ast2700-scu-ic0", 0, GENMASK(3, 0), 4, 0x00, 0x04), + SCU_VARIANT("aspeed,ast2700-scu-ic1", 0, GENMASK(3, 0), 4, 0x00, 0x04), + SCU_VARIANT("aspeed,ast2700-scu-ic2", 0, GENMASK(3, 0), 4, 0x04, 0x00), + SCU_VARIANT("aspeed,ast2700-scu-ic3", 0, GENMASK(1, 0), 2, 0x04, 0x00), +}; + +struct aspeed_scu_ic { + unsigned long irq_enable; + unsigned long irq_shift; + unsigned int num_irqs; + void __iomem *base; + struct irq_domain *irq_domain; + unsigned long ier; + unsigned long isr; +}; + +static inline bool scu_has_split_isr(struct aspeed_scu_ic *scu) +{ + return scu->ier != scu->isr; +} + +static void aspeed_scu_ic_irq_handler_combined(struct irq_desc *desc) +{ + struct aspeed_scu_ic *scu_ic = irq_desc_get_handler_data(desc); + struct irq_chip *chip = irq_desc_get_chip(desc); + unsigned long bit, enabled, max, status; + unsigned int sts, mask; + + chained_irq_enter(chip, desc); + + mask = scu_ic->irq_enable << ASPEED_SCU_IC_STATUS_SHIFT; + /* + * The SCU IC has just one register to control its operation and read + * status. The interrupt enable bits occupy the lower 16 bits of the + * register, while the interrupt status bits occupy the upper 16 bits. + * The status bit for a given interrupt is always 16 bits shifted from + * the enable bit for the same interrupt. + * Therefore, perform the IRQ operations in the enable bit space by + * shifting the status down to get the mapping and then back up to + * clear the bit. + */ + sts = readl(scu_ic->base); + enabled = sts & scu_ic->irq_enable; + status = (sts >> ASPEED_SCU_IC_STATUS_SHIFT) & enabled; + + bit = scu_ic->irq_shift; + max = scu_ic->num_irqs + bit; + + for_each_set_bit_from(bit, &status, max) { + generic_handle_domain_irq(scu_ic->irq_domain, bit - scu_ic->irq_shift); + writel((readl(scu_ic->base) & ~mask) | BIT(bit + ASPEED_SCU_IC_STATUS_SHIFT), + scu_ic->base); + } + + chained_irq_exit(chip, desc); +} + +static void aspeed_scu_ic_irq_handler_split(struct irq_desc *desc) +{ + struct aspeed_scu_ic *scu_ic = irq_desc_get_handler_data(desc); + struct irq_chip *chip = irq_desc_get_chip(desc); + unsigned long bit, enabled, max, status; + unsigned int sts, mask; + + chained_irq_enter(chip, desc); + + mask = scu_ic->irq_enable; + sts = readl(scu_ic->base + scu_ic->isr); + enabled = sts & scu_ic->irq_enable; + sts = readl(scu_ic->base + scu_ic->isr); + status = sts & enabled; + + bit = scu_ic->irq_shift; + max = scu_ic->num_irqs + bit; + + for_each_set_bit_from(bit, &status, max) { + generic_handle_domain_irq(scu_ic->irq_domain, bit - scu_ic->irq_shift); + /* Clear interrupt */ + writel(BIT(bit), scu_ic->base + scu_ic->isr); + } + + chained_irq_exit(chip, desc); +} + +static void aspeed_scu_ic_irq_mask_combined(struct irq_data *data) +{ + struct aspeed_scu_ic *scu_ic = irq_data_get_irq_chip_data(data); + unsigned int bit = BIT(data->hwirq + scu_ic->irq_shift); + unsigned int mask = bit | (scu_ic->irq_enable << ASPEED_SCU_IC_STATUS_SHIFT); + + /* + * Status bits are cleared by writing 1. In order to prevent the mask + * operation from clearing the status bits, they should be under the + * mask and written with 0. + */ + writel(readl(scu_ic->base) & ~mask, scu_ic->base); +} + +static void aspeed_scu_ic_irq_unmask_combined(struct irq_data *data) +{ + struct aspeed_scu_ic *scu_ic = irq_data_get_irq_chip_data(data); + unsigned int bit = BIT(data->hwirq + scu_ic->irq_shift); + unsigned int mask = bit | (scu_ic->irq_enable << ASPEED_SCU_IC_STATUS_SHIFT); + + /* + * Status bits are cleared by writing 1. In order to prevent the unmask + * operation from clearing the status bits, they should be under the + * mask and written with 0. + */ + writel((readl(scu_ic->base) & ~mask) | bit, scu_ic->base); +} + +static void aspeed_scu_ic_irq_mask_split(struct irq_data *data) +{ + struct aspeed_scu_ic *scu_ic = irq_data_get_irq_chip_data(data); + unsigned int mask = BIT(data->hwirq + scu_ic->irq_shift); + + writel(readl(scu_ic->base) & ~mask, scu_ic->base + scu_ic->ier); +} + +static void aspeed_scu_ic_irq_unmask_split(struct irq_data *data) +{ + struct aspeed_scu_ic *scu_ic = irq_data_get_irq_chip_data(data); + unsigned int bit = BIT(data->hwirq + scu_ic->irq_shift); + + writel(readl(scu_ic->base) | bit, scu_ic->base + scu_ic->ier); +} + +static int aspeed_scu_ic_irq_set_affinity(struct irq_data *data, + const struct cpumask *dest, + bool force) +{ + return -EINVAL; +} + +static struct irq_chip aspeed_scu_ic_chip_combined = { + .name = "aspeed-scu-ic", + .irq_mask = aspeed_scu_ic_irq_mask_combined, + .irq_unmask = aspeed_scu_ic_irq_unmask_combined, + .irq_set_affinity = aspeed_scu_ic_irq_set_affinity, +}; + +static struct irq_chip aspeed_scu_ic_chip_split = { + .name = "ast2700-scu-ic", + .irq_mask = aspeed_scu_ic_irq_mask_split, + .irq_unmask = aspeed_scu_ic_irq_unmask_split, + .irq_set_affinity = aspeed_scu_ic_irq_set_affinity, +}; + +static int aspeed_scu_ic_map(struct irq_domain *domain, unsigned int irq, + irq_hw_number_t hwirq) +{ + struct aspeed_scu_ic *scu_ic = domain->host_data; + + if (scu_has_split_isr(scu_ic)) + irq_set_chip_and_handler(irq, &aspeed_scu_ic_chip_split, handle_level_irq); + else + irq_set_chip_and_handler(irq, &aspeed_scu_ic_chip_combined, handle_level_irq); + irq_set_chip_data(irq, domain->host_data); + + return 0; +} + +static const struct irq_domain_ops aspeed_scu_ic_domain_ops = { + .map = aspeed_scu_ic_map, +}; + +static int aspeed_scu_ic_of_init_common(struct aspeed_scu_ic *scu_ic, + struct device_node *node) +{ + int irq, rc = 0; + + scu_ic->base = of_iomap(node, 0); + if (!scu_ic->base) { + rc = -ENOMEM; + goto err; + } + + if (scu_has_split_isr(scu_ic)) { + writel(AST2700_SCU_IC_STATUS, scu_ic->base + scu_ic->isr); + writel(0, scu_ic->base + scu_ic->ier); + } else { + writel(ASPEED_SCU_IC_STATUS, scu_ic->base); + writel(0, scu_ic->base); + } + + irq = irq_of_parse_and_map(node, 0); + if (!irq) { + rc = -EINVAL; + goto err; + } + + scu_ic->irq_domain = irq_domain_create_linear(of_fwnode_handle(node), scu_ic->num_irqs, + &aspeed_scu_ic_domain_ops, scu_ic); + if (!scu_ic->irq_domain) { + rc = -ENOMEM; + goto err; + } + + irq_set_chained_handler_and_data(irq, scu_has_split_isr(scu_ic) ? + aspeed_scu_ic_irq_handler_split : + aspeed_scu_ic_irq_handler_combined, + scu_ic); + + return 0; + +err: + kfree(scu_ic); + return rc; +} + +static const struct aspeed_scu_ic_variant *aspeed_scu_ic_find_variant(struct device_node *np) +{ + for (int i = 0; i < ARRAY_SIZE(scu_ic_variants); i++) { + if (of_device_is_compatible(np, scu_ic_variants[i].compatible)) + return &scu_ic_variants[i]; + } + return NULL; +} + +static int __init aspeed_scu_ic_of_init(struct device_node *node, struct device_node *parent) +{ + const struct aspeed_scu_ic_variant *variant; + struct aspeed_scu_ic *scu_ic; + + variant = aspeed_scu_ic_find_variant(node); + if (!variant) + return -ENODEV; + + scu_ic = kzalloc(sizeof(*scu_ic), GFP_KERNEL); + if (!scu_ic) + return -ENOMEM; + + scu_ic->irq_enable = variant->irq_enable; + scu_ic->irq_shift = variant->irq_shift; + scu_ic->num_irqs = variant->num_irqs; + scu_ic->ier = variant->ier; + scu_ic->isr = variant->isr; + + return aspeed_scu_ic_of_init_common(scu_ic, node); +} + +IRQCHIP_DECLARE(ast2400_scu_ic, "aspeed,ast2400-scu-ic", aspeed_scu_ic_of_init); +IRQCHIP_DECLARE(ast2500_scu_ic, "aspeed,ast2500-scu-ic", aspeed_scu_ic_of_init); +IRQCHIP_DECLARE(ast2600_scu_ic0, "aspeed,ast2600-scu-ic0", aspeed_scu_ic_of_init); +IRQCHIP_DECLARE(ast2600_scu_ic1, "aspeed,ast2600-scu-ic1", aspeed_scu_ic_of_init); +IRQCHIP_DECLARE(ast2700_scu_ic0, "aspeed,ast2700-scu-ic0", aspeed_scu_ic_of_init); +IRQCHIP_DECLARE(ast2700_scu_ic1, "aspeed,ast2700-scu-ic1", aspeed_scu_ic_of_init); +IRQCHIP_DECLARE(ast2700_scu_ic2, "aspeed,ast2700-scu-ic2", aspeed_scu_ic_of_init); +IRQCHIP_DECLARE(ast2700_scu_ic3, "aspeed,ast2700-scu-ic3", aspeed_scu_ic_of_init); diff --git a/drivers/irqchip/irq-aspeed-vic.c b/drivers/irqchip/irq-aspeed-vic.c index 03ba477ea0d0..9b665b5bb531 100644 --- a/drivers/irqchip/irq-aspeed-vic.c +++ b/drivers/irqchip/irq-aspeed-vic.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright (C) 2015 - Ben Herrenschmidt, IBM Corp. * @@ -7,17 +8,6 @@ * * Copyright (C) 1999 - 2003 ARM Limited * Copyright (C) 2000 Deep Blue Solutions Ltd - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * */ #include <linux/export.h> @@ -81,7 +71,7 @@ static void vic_init_hw(struct aspeed_vic *vic) writel(0, vic->base + AVIC_INT_SELECT); writel(0, vic->base + AVIC_INT_SELECT + 4); - /* Some interrupts have a programable high/low level trigger + /* Some interrupts have a programmable high/low level trigger * (4 GPIO direct inputs), for now we assume this was configured * by firmware. We read which ones are edge now. */ @@ -110,7 +100,7 @@ static void __exception_irq_entry avic_handle_irq(struct pt_regs *regs) if (stat == 0) break; irq += ffs(stat) - 1; - handle_domain_irq(vic->dom, irq, regs); + generic_handle_domain_irq(vic->dom, irq); } } @@ -213,7 +203,7 @@ static int __init avic_of_init(struct device_node *node, } vic->base = regs; - /* Initialize soures, all masked */ + /* Initialize sources, all masked */ vic_init_hw(vic); /* Ready to receive interrupts */ @@ -221,8 +211,8 @@ static int __init avic_of_init(struct device_node *node, set_handle_irq(avic_handle_irq); /* Register our domain */ - vic->dom = irq_domain_add_simple(node, NUM_IRQS, 0, - &avic_dom_ops, vic); + vic->dom = irq_domain_create_simple(of_fwnode_handle(node), NUM_IRQS, 0, + &avic_dom_ops, vic); return 0; } diff --git a/drivers/irqchip/irq-ath79-cpu.c b/drivers/irqchip/irq-ath79-cpu.c index befe93c5a51a..923e4bba3776 100644 --- a/drivers/irqchip/irq-ath79-cpu.c +++ b/drivers/irqchip/irq-ath79-cpu.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0-only /* * Atheros AR71xx/AR724x/AR913x specific interrupt handling * @@ -7,10 +8,6 @@ * Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org> * * Parts of this file are based on Atheros' 2.6.15/2.6.31 BSP - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 as published - * by the Free Software Foundation. */ #include <linux/interrupt.h> diff --git a/drivers/irqchip/irq-ath79-misc.c b/drivers/irqchip/irq-ath79-misc.c index aa7290784636..258b8e9a2d57 100644 --- a/drivers/irqchip/irq-ath79-misc.c +++ b/drivers/irqchip/irq-ath79-misc.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0-only /* * Atheros AR71xx/AR724x/AR913x MISC interrupt controller * @@ -7,10 +8,6 @@ * Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org> * * Parts of this file are based on Atheros' 2.6.15/2.6.31 BSP - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 as published - * by the Free Software Foundation. */ #include <linux/irqchip.h> @@ -18,10 +15,21 @@ #include <linux/of_address.h> #include <linux/of_irq.h> +#include <asm/time.h> + #define AR71XX_RESET_REG_MISC_INT_STATUS 0 #define AR71XX_RESET_REG_MISC_INT_ENABLE 4 #define ATH79_MISC_IRQ_COUNT 32 +#define ATH79_MISC_PERF_IRQ 5 + +static int ath79_perfcount_irq; + +int get_c0_perfcount_int(void) +{ + return ath79_perfcount_irq; +} +EXPORT_SYMBOL_GPL(get_c0_perfcount_int); static void ath79_misc_irq_handler(struct irq_desc *desc) { @@ -44,7 +52,7 @@ static void ath79_misc_irq_handler(struct irq_desc *desc) while (pending) { int bit = __ffs(pending); - generic_handle_irq(irq_linear_revmap(domain, bit)); + generic_handle_domain_irq(domain, bit); pending &= ~BIT(bit); } @@ -113,6 +121,8 @@ static void __init ath79_misc_intc_domain_init( { void __iomem *base = domain->host_data; + ath79_perfcount_irq = irq_create_mapping(domain, ATH79_MISC_PERF_IRQ); + /* Disable and clear all interrupts */ __raw_writel(0, base + AR71XX_RESET_REG_MISC_INT_ENABLE); __raw_writel(0, base + AR71XX_RESET_REG_MISC_INT_STATUS); @@ -139,7 +149,7 @@ static int __init ath79_misc_intc_of_init( return -ENOMEM; } - domain = irq_domain_add_linear(node, ATH79_MISC_IRQ_COUNT, + domain = irq_domain_create_linear(of_fwnode_handle(node), ATH79_MISC_IRQ_COUNT, &misc_irq_domain_ops, base); if (!domain) { pr_err("Failed to add MISC irqdomain\n"); @@ -169,21 +179,3 @@ static int __init ar7240_misc_intc_of_init( IRQCHIP_DECLARE(ar7240_misc_intc, "qca,ar7240-misc-intc", ar7240_misc_intc_of_init); - -void __init ath79_misc_irq_init(void __iomem *regs, int irq, - int irq_base, bool is_ar71xx) -{ - struct irq_domain *domain; - - if (is_ar71xx) - ath79_misc_irq_chip.irq_mask_ack = ar71xx_misc_irq_mask; - else - ath79_misc_irq_chip.irq_ack = ar724x_misc_irq_ack; - - domain = irq_domain_add_legacy(NULL, ATH79_MISC_IRQ_COUNT, - irq_base, 0, &misc_irq_domain_ops, regs); - if (!domain) - panic("Failed to create MISC irqdomain"); - - ath79_misc_intc_domain_init(domain, irq); -} diff --git a/drivers/irqchip/irq-ativic32.c b/drivers/irqchip/irq-ativic32.c deleted file mode 100644 index 85cf6e0e0e52..000000000000 --- a/drivers/irqchip/irq-ativic32.c +++ /dev/null @@ -1,138 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -// Copyright (C) 2005-2017 Andes Technology Corporation - -#include <linux/irq.h> -#include <linux/of.h> -#include <linux/of_irq.h> -#include <linux/of_address.h> -#include <linux/interrupt.h> -#include <linux/irqdomain.h> -#include <linux/irqchip.h> -#include <nds32_intrinsic.h> - -unsigned long wake_mask; - -static void ativic32_ack_irq(struct irq_data *data) -{ - __nds32__mtsr_dsb(BIT(data->hwirq), NDS32_SR_INT_PEND2); -} - -static void ativic32_mask_irq(struct irq_data *data) -{ - unsigned long int_mask2 = __nds32__mfsr(NDS32_SR_INT_MASK2); - __nds32__mtsr_dsb(int_mask2 & (~(BIT(data->hwirq))), NDS32_SR_INT_MASK2); -} - -static void ativic32_unmask_irq(struct irq_data *data) -{ - unsigned long int_mask2 = __nds32__mfsr(NDS32_SR_INT_MASK2); - __nds32__mtsr_dsb(int_mask2 | (BIT(data->hwirq)), NDS32_SR_INT_MASK2); -} - -static int nointc_set_wake(struct irq_data *data, unsigned int on) -{ - unsigned long int_mask = __nds32__mfsr(NDS32_SR_INT_MASK); - static unsigned long irq_orig_bit; - u32 bit = 1 << data->hwirq; - - if (on) { - if (int_mask & bit) - __assign_bit(data->hwirq, &irq_orig_bit, true); - else - __assign_bit(data->hwirq, &irq_orig_bit, false); - - __assign_bit(data->hwirq, &int_mask, true); - __assign_bit(data->hwirq, &wake_mask, true); - - } else { - if (!(irq_orig_bit & bit)) - __assign_bit(data->hwirq, &int_mask, false); - - __assign_bit(data->hwirq, &wake_mask, false); - __assign_bit(data->hwirq, &irq_orig_bit, false); - } - - __nds32__mtsr_dsb(int_mask, NDS32_SR_INT_MASK); - - return 0; -} - -static struct irq_chip ativic32_chip = { - .name = "ativic32", - .irq_ack = ativic32_ack_irq, - .irq_mask = ativic32_mask_irq, - .irq_unmask = ativic32_unmask_irq, - .irq_set_wake = nointc_set_wake, -}; - -static unsigned int __initdata nivic_map[6] = { 6, 2, 10, 16, 24, 32 }; - -static struct irq_domain *root_domain; -static int ativic32_irq_domain_map(struct irq_domain *id, unsigned int virq, - irq_hw_number_t hw) -{ - - unsigned long int_trigger_type; - u32 type; - struct irq_data *irq_data; - int_trigger_type = __nds32__mfsr(NDS32_SR_INT_TRIGGER); - irq_data = irq_get_irq_data(virq); - if (!irq_data) - return -EINVAL; - - if (int_trigger_type & (BIT(hw))) { - irq_set_chip_and_handler(virq, &ativic32_chip, handle_edge_irq); - type = IRQ_TYPE_EDGE_RISING; - } else { - irq_set_chip_and_handler(virq, &ativic32_chip, handle_level_irq); - type = IRQ_TYPE_LEVEL_HIGH; - } - - irqd_set_trigger_type(irq_data, type); - return 0; -} - -static struct irq_domain_ops ativic32_ops = { - .map = ativic32_irq_domain_map, - .xlate = irq_domain_xlate_onecell -}; - -static irq_hw_number_t get_intr_src(void) -{ - return ((__nds32__mfsr(NDS32_SR_ITYPE) & ITYPE_mskVECTOR) >> ITYPE_offVECTOR) - - NDS32_VECTOR_offINTERRUPT; -} - -asmlinkage void asm_do_IRQ(struct pt_regs *regs) -{ - irq_hw_number_t hwirq = get_intr_src(); - handle_domain_irq(root_domain, hwirq, regs); -} - -int __init ativic32_init_irq(struct device_node *node, struct device_node *parent) -{ - unsigned long int_vec_base, nivic, nr_ints; - - if (WARN(parent, "non-root ativic32 are not supported")) - return -EINVAL; - - int_vec_base = __nds32__mfsr(NDS32_SR_IVB); - - if (((int_vec_base & IVB_mskIVIC_VER) >> IVB_offIVIC_VER) == 0) - panic("Unable to use atcivic32 for this cpu.\n"); - - nivic = (int_vec_base & IVB_mskNIVIC) >> IVB_offNIVIC; - if (nivic >= ARRAY_SIZE(nivic_map)) - panic("The number of input for ativic32 is not supported.\n"); - - nr_ints = nivic_map[nivic]; - - root_domain = irq_domain_add_linear(node, nr_ints, - &ativic32_ops, NULL); - - if (!root_domain) - panic("%s: unable to create IRQ domain\n", node->full_name); - - return 0; -} -IRQCHIP_DECLARE(ativic32, "andestech,ativic32", ativic32_init_irq); diff --git a/drivers/irqchip/irq-atmel-aic-common.c b/drivers/irqchip/irq-atmel-aic-common.c index 072bd227b6c6..e68853815c7a 100644 --- a/drivers/irqchip/irq-atmel-aic-common.c +++ b/drivers/irqchip/irq-atmel-aic-common.c @@ -111,8 +111,6 @@ static void __init aic_common_ext_irq_of_init(struct irq_domain *domain) struct device_node *node = irq_domain_get_of_node(domain); struct irq_chip_generic *gc; struct aic_chip_data *aic; - struct property *prop; - const __be32 *p; u32 hwirq; gc = irq_get_domain_generic_chip(domain, 0); @@ -120,7 +118,7 @@ static void __init aic_common_ext_irq_of_init(struct irq_domain *domain) aic = gc->private; aic->ext_irqs |= 1; - of_property_for_each_u32(node, "atmel,external-irqs", prop, p, hwirq) { + of_property_for_each_u32(node, "atmel,external-irqs", hwirq) { gc = irq_get_domain_generic_chip(domain, hwirq); if (!gc) { pr_warn("AIC: external irq %d >= %d skip it\n", @@ -189,20 +187,11 @@ void __init aic_common_rtt_irq_fixup(void) static void __init aic_common_irq_fixup(const struct of_device_id *matches) { - struct device_node *root = of_find_node_by_path("/"); - const struct of_device_id *match; + void (*fixup)(void); - if (!root) - return; - - match = of_match_node(matches, root); - - if (match) { - void (*fixup)(void) = match->data; + fixup = of_machine_get_match_data(matches); + if (fixup) fixup(); - } - - of_node_put(root); } struct irq_domain *__init aic_common_of_init(struct device_node *node, @@ -230,7 +219,7 @@ struct irq_domain *__init aic_common_of_init(struct device_node *node, goto err_iounmap; } - domain = irq_domain_add_linear(node, nchips * 32, ops, aic); + domain = irq_domain_create_linear(of_fwnode_handle(node), nchips * 32, ops, aic); if (!domain) { ret = -ENOMEM; goto err_free_aic; diff --git a/drivers/irqchip/irq-atmel-aic.c b/drivers/irqchip/irq-atmel-aic.c index bb1ad451392f..1dcc52760eca 100644 --- a/drivers/irqchip/irq-atmel-aic.c +++ b/drivers/irqchip/irq-atmel-aic.c @@ -57,8 +57,7 @@ static struct irq_domain *aic_domain; -static asmlinkage void __exception_irq_entry -aic_handle(struct pt_regs *regs) +static void __exception_irq_entry aic_handle(struct pt_regs *regs) { struct irq_domain_chip_generic *dgc = aic_domain->gc; struct irq_chip_generic *gc = dgc->gc[0]; @@ -71,7 +70,7 @@ aic_handle(struct pt_regs *regs) if (!irqstat) irq_reg_writel(gc, 0, AT91_AIC_EOICR); else - handle_domain_irq(aic_domain, irqnr, regs); + generic_handle_domain_irq(aic_domain, irqnr); } static int aic_retrigger(struct irq_data *d) @@ -79,11 +78,10 @@ static int aic_retrigger(struct irq_data *d) struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); /* Enable interrupt on AIC5 */ - irq_gc_lock(gc); + guard(raw_spinlock)(&gc->lock); irq_reg_writel(gc, d->mask, AT91_AIC_ISCR); - irq_gc_unlock(gc); - return 0; + return 1; } static int aic_set_type(struct irq_data *d, unsigned type) @@ -107,30 +105,27 @@ static void aic_suspend(struct irq_data *d) { struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); - irq_gc_lock(gc); + guard(raw_spinlock)(&gc->lock); irq_reg_writel(gc, gc->mask_cache, AT91_AIC_IDCR); irq_reg_writel(gc, gc->wake_active, AT91_AIC_IECR); - irq_gc_unlock(gc); } static void aic_resume(struct irq_data *d) { struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); - irq_gc_lock(gc); + guard(raw_spinlock)(&gc->lock); irq_reg_writel(gc, gc->wake_active, AT91_AIC_IDCR); irq_reg_writel(gc, gc->mask_cache, AT91_AIC_IECR); - irq_gc_unlock(gc); } static void aic_pm_shutdown(struct irq_data *d) { struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); - irq_gc_lock(gc); + guard(raw_spinlock)(&gc->lock); irq_reg_writel(gc, 0xffffffff, AT91_AIC_IDCR); irq_reg_writel(gc, 0xffffffff, AT91_AIC_ICCR); - irq_gc_unlock(gc); } #else #define aic_suspend NULL @@ -176,10 +171,8 @@ static int aic_irq_domain_xlate(struct irq_domain *d, { struct irq_domain_chip_generic *dgc = d->gc; struct irq_chip_generic *gc; - unsigned long flags; unsigned smr; - int idx; - int ret; + int idx, ret; if (!dgc) return -EINVAL; @@ -195,11 +188,10 @@ static int aic_irq_domain_xlate(struct irq_domain *d, gc = dgc->gc[idx]; - irq_gc_lock_irqsave(gc, flags); + guard(raw_spinlock_irqsave)(&gc->lock); smr = irq_reg_readl(gc, AT91_AIC_SMR(*out_hwirq)); aic_common_set_priority(intspec[2], &smr); irq_reg_writel(gc, smr, AT91_AIC_SMR(*out_hwirq)); - irq_gc_unlock_irqrestore(gc, flags); return ret; } diff --git a/drivers/irqchip/irq-atmel-aic5.c b/drivers/irqchip/irq-atmel-aic5.c index 6acad2ea0fb3..1f14b401f71d 100644 --- a/drivers/irqchip/irq-atmel-aic5.c +++ b/drivers/irqchip/irq-atmel-aic5.c @@ -67,8 +67,7 @@ static struct irq_domain *aic5_domain; -static asmlinkage void __exception_irq_entry -aic5_handle(struct pt_regs *regs) +static void __exception_irq_entry aic5_handle(struct pt_regs *regs) { struct irq_chip_generic *bgc = irq_get_domain_generic_chip(aic5_domain, 0); u32 irqnr; @@ -80,7 +79,7 @@ aic5_handle(struct pt_regs *regs) if (!irqstat) irq_reg_writel(bgc, 0, AT91_AIC5_EOICR); else - handle_domain_irq(aic5_domain, irqnr, regs); + generic_handle_domain_irq(aic5_domain, irqnr); } static void aic5_mask(struct irq_data *d) @@ -93,11 +92,10 @@ static void aic5_mask(struct irq_data *d) * Disable interrupt on AIC5. We always take the lock of the * first irq chip as all chips share the same registers. */ - irq_gc_lock(bgc); + guard(raw_spinlock)(&bgc->lock); irq_reg_writel(gc, d->hwirq, AT91_AIC5_SSR); irq_reg_writel(gc, 1, AT91_AIC5_IDCR); gc->mask_cache &= ~d->mask; - irq_gc_unlock(bgc); } static void aic5_unmask(struct irq_data *d) @@ -110,11 +108,10 @@ static void aic5_unmask(struct irq_data *d) * Enable interrupt on AIC5. We always take the lock of the * first irq chip as all chips share the same registers. */ - irq_gc_lock(bgc); + guard(raw_spinlock)(&bgc->lock); irq_reg_writel(gc, d->hwirq, AT91_AIC5_SSR); irq_reg_writel(gc, 1, AT91_AIC5_IECR); gc->mask_cache |= d->mask; - irq_gc_unlock(bgc); } static int aic5_retrigger(struct irq_data *d) @@ -123,12 +120,10 @@ static int aic5_retrigger(struct irq_data *d) struct irq_chip_generic *bgc = irq_get_domain_generic_chip(domain, 0); /* Enable interrupt on AIC5 */ - irq_gc_lock(bgc); + guard(raw_spinlock)(&bgc->lock); irq_reg_writel(bgc, d->hwirq, AT91_AIC5_SSR); irq_reg_writel(bgc, 1, AT91_AIC5_ISCR); - irq_gc_unlock(bgc); - - return 0; + return 1; } static int aic5_set_type(struct irq_data *d, unsigned type) @@ -138,14 +133,12 @@ static int aic5_set_type(struct irq_data *d, unsigned type) unsigned int smr; int ret; - irq_gc_lock(bgc); + guard(raw_spinlock)(&bgc->lock); irq_reg_writel(bgc, d->hwirq, AT91_AIC5_SSR); smr = irq_reg_readl(bgc, AT91_AIC5_SMR); ret = aic_common_set_type(d, type, &smr); if (!ret) irq_reg_writel(bgc, smr, AT91_AIC5_SMR); - irq_gc_unlock(bgc); - return ret; } @@ -167,7 +160,7 @@ static void aic5_suspend(struct irq_data *d) smr_cache[i] = irq_reg_readl(bgc, AT91_AIC5_SMR); } - irq_gc_lock(bgc); + guard(raw_spinlock)(&bgc->lock); for (i = 0; i < dgc->irqs_per_chip; i++) { mask = 1 << i; if ((mask & gc->mask_cache) == (mask & gc->wake_active)) @@ -179,7 +172,6 @@ static void aic5_suspend(struct irq_data *d) else irq_reg_writel(bgc, 1, AT91_AIC5_IDCR); } - irq_gc_unlock(bgc); } static void aic5_resume(struct irq_data *d) @@ -191,7 +183,7 @@ static void aic5_resume(struct irq_data *d) int i; u32 mask; - irq_gc_lock(bgc); + guard(raw_spinlock)(&bgc->lock); if (smr_cache) { irq_reg_writel(bgc, 0xffffffff, AT91_AIC5_SPU); @@ -215,7 +207,6 @@ static void aic5_resume(struct irq_data *d) else irq_reg_writel(bgc, 1, AT91_AIC5_IDCR); } - irq_gc_unlock(bgc); } static void aic5_pm_shutdown(struct irq_data *d) @@ -226,13 +217,12 @@ static void aic5_pm_shutdown(struct irq_data *d) struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); int i; - irq_gc_lock(bgc); + guard(raw_spinlock)(&bgc->lock); for (i = 0; i < dgc->irqs_per_chip; i++) { irq_reg_writel(bgc, i + gc->irq_base, AT91_AIC5_SSR); irq_reg_writel(bgc, 1, AT91_AIC5_IDCR); irq_reg_writel(bgc, 1, AT91_AIC5_ICCR); } - irq_gc_unlock(bgc); } #else #define aic5_suspend NULL @@ -278,7 +268,6 @@ static int aic5_irq_domain_xlate(struct irq_domain *d, unsigned int *out_type) { struct irq_chip_generic *bgc = irq_get_domain_generic_chip(d, 0); - unsigned long flags; unsigned smr; int ret; @@ -290,13 +279,11 @@ static int aic5_irq_domain_xlate(struct irq_domain *d, if (ret) return ret; - irq_gc_lock_irqsave(bgc, flags); + guard(raw_spinlock_irqsave)(&bgc->lock); irq_reg_writel(bgc, *out_hwirq, AT91_AIC5_SSR); smr = irq_reg_readl(bgc, AT91_AIC5_SMR); aic_common_set_priority(intspec[2], &smr); irq_reg_writel(bgc, smr, AT91_AIC5_SMR); - irq_gc_unlock_irqrestore(bgc, flags); - return ret; } @@ -310,9 +297,17 @@ static void __init sama5d3_aic_irq_fixup(void) aic_common_rtc_irq_fixup(); } +static void __init sam9x60_aic_irq_fixup(void) +{ + aic_common_rtc_irq_fixup(); + aic_common_rtt_irq_fixup(); +} + static const struct of_device_id aic5_irq_fixups[] __initconst = { { .compatible = "atmel,sama5d3", .data = sama5d3_aic_irq_fixup }, { .compatible = "atmel,sama5d4", .data = sama5d3_aic_irq_fixup }, + { .compatible = "microchip,sam9x60", .data = sam9x60_aic_irq_fixup }, + { .compatible = "microchip,sam9x7", .data = sam9x60_aic_irq_fixup }, { /* sentinel */ }, }; @@ -390,3 +385,20 @@ static int __init sama5d4_aic5_of_init(struct device_node *node, return aic5_of_init(node, parent, NR_SAMA5D4_IRQS); } IRQCHIP_DECLARE(sama5d4_aic5, "atmel,sama5d4-aic", sama5d4_aic5_of_init); + +#define NR_SAM9X60_IRQS 50 + +static int __init sam9x60_aic5_of_init(struct device_node *node, + struct device_node *parent) +{ + return aic5_of_init(node, parent, NR_SAM9X60_IRQS); +} +IRQCHIP_DECLARE(sam9x60_aic5, "microchip,sam9x60-aic", sam9x60_aic5_of_init); + +#define NR_SAM9X7_IRQS 70 + +static int __init sam9x7_aic5_of_init(struct device_node *node, struct device_node *parent) +{ + return aic5_of_init(node, parent, NR_SAM9X7_IRQS); +} +IRQCHIP_DECLARE(sam9x7_aic5, "microchip,sam9x7-aic", sam9x7_aic5_of_init); diff --git a/drivers/irqchip/irq-bcm2712-mip.c b/drivers/irqchip/irq-bcm2712-mip.c new file mode 100644 index 000000000000..4761974ad650 --- /dev/null +++ b/drivers/irqchip/irq-bcm2712-mip.c @@ -0,0 +1,288 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2024 Raspberry Pi Ltd., All Rights Reserved. + * Copyright (c) 2024 SUSE + */ + +#include <linux/bitmap.h> +#include <linux/irqchip.h> +#include <linux/irqdomain.h> +#include <linux/msi.h> +#include <linux/of_address.h> +#include <linux/of_platform.h> + +#include <linux/irqchip/irq-msi-lib.h> + +#define MIP_INT_RAISE 0x00 +#define MIP_INT_CLEAR 0x10 +#define MIP_INT_CFGL_HOST 0x20 +#define MIP_INT_CFGH_HOST 0x30 +#define MIP_INT_MASKL_HOST 0x40 +#define MIP_INT_MASKH_HOST 0x50 +#define MIP_INT_MASKL_VPU 0x60 +#define MIP_INT_MASKH_VPU 0x70 +#define MIP_INT_STATUSL_HOST 0x80 +#define MIP_INT_STATUSH_HOST 0x90 +#define MIP_INT_STATUSL_VPU 0xa0 +#define MIP_INT_STATUSH_VPU 0xb0 + +/** + * struct mip_priv - MSI-X interrupt controller data + * @lock: Used to protect bitmap alloc/free + * @base: Base address of MMIO area + * @msg_addr: PCIe MSI-X address + * @msi_base: MSI base + * @num_msis: Count of MSIs + * @msi_offset: MSI offset + * @bitmap: A bitmap for hwirqs + * @parent: Parent domain (GIC) + * @dev: A device pointer + */ +struct mip_priv { + spinlock_t lock; + void __iomem *base; + u64 msg_addr; + u32 msi_base; + u32 num_msis; + u32 msi_offset; + unsigned long *bitmap; + struct irq_domain *parent; + struct device *dev; +}; + +static void mip_compose_msi_msg(struct irq_data *d, struct msi_msg *msg) +{ + struct mip_priv *mip = irq_data_get_irq_chip_data(d); + + msg->address_hi = upper_32_bits(mip->msg_addr); + msg->address_lo = lower_32_bits(mip->msg_addr); + msg->data = d->hwirq; +} + +static struct irq_chip mip_middle_irq_chip = { + .name = "MIP", + .irq_mask = irq_chip_mask_parent, + .irq_unmask = irq_chip_unmask_parent, + .irq_eoi = irq_chip_eoi_parent, + .irq_set_affinity = irq_chip_set_affinity_parent, + .irq_set_type = irq_chip_set_type_parent, + .irq_compose_msi_msg = mip_compose_msi_msg, +}; + +static int mip_alloc_hwirq(struct mip_priv *mip, unsigned int nr_irqs) +{ + guard(spinlock)(&mip->lock); + return bitmap_find_free_region(mip->bitmap, mip->num_msis, ilog2(nr_irqs)); +} + +static void mip_free_hwirq(struct mip_priv *mip, unsigned int hwirq, + unsigned int nr_irqs) +{ + guard(spinlock)(&mip->lock); + bitmap_release_region(mip->bitmap, hwirq, ilog2(nr_irqs)); +} + +static int mip_middle_domain_alloc(struct irq_domain *domain, unsigned int virq, + unsigned int nr_irqs, void *arg) +{ + struct mip_priv *mip = domain->host_data; + struct irq_fwspec fwspec = {0}; + unsigned int hwirq, i; + struct irq_data *irqd; + int irq, ret; + + irq = mip_alloc_hwirq(mip, nr_irqs); + if (irq < 0) + return irq; + + hwirq = irq + mip->msi_offset; + + fwspec.fwnode = domain->parent->fwnode; + fwspec.param_count = 3; + fwspec.param[0] = 0; + fwspec.param[1] = hwirq + mip->msi_base; + fwspec.param[2] = IRQ_TYPE_EDGE_RISING; + + ret = irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, &fwspec); + if (ret) + goto err_free_hwirq; + + for (i = 0; i < nr_irqs; i++) { + irqd = irq_domain_get_irq_data(domain->parent, virq + i); + irqd->chip->irq_set_type(irqd, IRQ_TYPE_EDGE_RISING); + + ret = irq_domain_set_hwirq_and_chip(domain, virq + i, hwirq + i, + &mip_middle_irq_chip, mip); + if (ret) + goto err_free; + + irqd = irq_get_irq_data(virq + i); + irqd_set_single_target(irqd); + irqd_set_affinity_on_activate(irqd); + } + + return 0; + +err_free: + irq_domain_free_irqs_parent(domain, virq, nr_irqs); +err_free_hwirq: + mip_free_hwirq(mip, irq, nr_irqs); + return ret; +} + +static void mip_middle_domain_free(struct irq_domain *domain, unsigned int virq, + unsigned int nr_irqs) +{ + struct irq_data *irqd = irq_domain_get_irq_data(domain, virq); + struct mip_priv *mip; + unsigned int hwirq; + + if (!irqd) + return; + + mip = irq_data_get_irq_chip_data(irqd); + hwirq = irqd_to_hwirq(irqd); + irq_domain_free_irqs_parent(domain, virq, nr_irqs); + mip_free_hwirq(mip, hwirq - mip->msi_offset, nr_irqs); +} + +static const struct irq_domain_ops mip_middle_domain_ops = { + .select = msi_lib_irq_domain_select, + .alloc = mip_middle_domain_alloc, + .free = mip_middle_domain_free, +}; + +#define MIP_MSI_FLAGS_REQUIRED (MSI_FLAG_USE_DEF_DOM_OPS | \ + MSI_FLAG_USE_DEF_CHIP_OPS | \ + MSI_FLAG_PCI_MSI_MASK_PARENT) + +#define MIP_MSI_FLAGS_SUPPORTED (MSI_GENERIC_FLAGS_MASK | \ + MSI_FLAG_MULTI_PCI_MSI | \ + MSI_FLAG_PCI_MSIX) + +static const struct msi_parent_ops mip_msi_parent_ops = { + .supported_flags = MIP_MSI_FLAGS_SUPPORTED, + .required_flags = MIP_MSI_FLAGS_REQUIRED, + .chip_flags = MSI_CHIP_FLAG_SET_EOI | MSI_CHIP_FLAG_SET_ACK, + .bus_select_token = DOMAIN_BUS_GENERIC_MSI, + .bus_select_mask = MATCH_PCI_MSI, + .prefix = "MIP-MSI-", + .init_dev_msi_info = msi_lib_init_dev_msi_info, +}; + +static int mip_init_domains(struct mip_priv *mip, struct device_node *np) +{ + struct irq_domain_info info = { + .fwnode = of_fwnode_handle(np), + .ops = &mip_middle_domain_ops, + .host_data = mip, + .size = mip->num_msis, + .parent = mip->parent, + .dev = mip->dev, + }; + + if (!msi_create_parent_irq_domain(&info, &mip_msi_parent_ops)) + return -ENOMEM; + + /* + * All MSI-X unmasked for the host, masked for the VPU, and edge-triggered. + */ + writel(0, mip->base + MIP_INT_MASKL_HOST); + writel(0, mip->base + MIP_INT_MASKH_HOST); + writel(~0, mip->base + MIP_INT_MASKL_VPU); + writel(~0, mip->base + MIP_INT_MASKH_VPU); + writel(~0, mip->base + MIP_INT_CFGL_HOST); + writel(~0, mip->base + MIP_INT_CFGH_HOST); + + return 0; +} + +static int mip_parse_dt(struct mip_priv *mip, struct device_node *np) +{ + struct of_phandle_args args; + u64 size; + int ret; + + ret = of_property_read_u32(np, "brcm,msi-offset", &mip->msi_offset); + if (ret) + mip->msi_offset = 0; + + ret = of_parse_phandle_with_args(np, "msi-ranges", "#interrupt-cells", + 0, &args); + if (ret) + return ret; + + ret = of_property_read_u32_index(np, "msi-ranges", args.args_count + 1, + &mip->num_msis); + if (ret) + goto err_put; + + ret = of_property_read_reg(np, 1, &mip->msg_addr, &size); + if (ret) + goto err_put; + + mip->msi_base = args.args[1]; + + mip->parent = irq_find_host(args.np); + if (!mip->parent) + ret = -EINVAL; + +err_put: + of_node_put(args.np); + return ret; +} + +static int mip_msi_probe(struct platform_device *pdev, struct device_node *parent) +{ + struct device_node *node = pdev->dev.of_node; + struct mip_priv *mip; + int ret; + + mip = kzalloc(sizeof(*mip), GFP_KERNEL); + if (!mip) + return -ENOMEM; + + spin_lock_init(&mip->lock); + mip->dev = &pdev->dev; + + ret = mip_parse_dt(mip, node); + if (ret) + goto err_priv; + + mip->base = of_iomap(node, 0); + if (!mip->base) { + ret = -ENXIO; + goto err_priv; + } + + mip->bitmap = bitmap_zalloc(mip->num_msis, GFP_KERNEL); + if (!mip->bitmap) { + ret = -ENOMEM; + goto err_base; + } + + ret = mip_init_domains(mip, node); + if (ret) + goto err_map; + + dev_dbg(&pdev->dev, "MIP: MSI-X count: %u, base: %u, offset: %u, msg_addr: %llx\n", + mip->num_msis, mip->msi_base, mip->msi_offset, mip->msg_addr); + + return 0; + +err_map: + bitmap_free(mip->bitmap); +err_base: + iounmap(mip->base); +err_priv: + kfree(mip); + return ret; +} + +IRQCHIP_PLATFORM_DRIVER_BEGIN(mip_msi) +IRQCHIP_MATCH("brcm,bcm2712-mip", mip_msi_probe) +IRQCHIP_PLATFORM_DRIVER_END(mip_msi) +MODULE_DESCRIPTION("Broadcom BCM2712 MSI-X interrupt controller"); +MODULE_AUTHOR("Phil Elwell <phil@raspberrypi.com>"); +MODULE_AUTHOR("Stanimir Varbanov <svarbanov@suse.de>"); +MODULE_LICENSE("GPL"); diff --git a/drivers/irqchip/irq-bcm2835.c b/drivers/irqchip/irq-bcm2835.c index 418245d31921..1e384c870350 100644 --- a/drivers/irqchip/irq-bcm2835.c +++ b/drivers/irqchip/irq-bcm2835.c @@ -61,6 +61,7 @@ | SHORTCUT1_MASK | SHORTCUT2_MASK) #define REG_FIQ_CONTROL 0x0c +#define FIQ_CONTROL_ENABLE BIT(7) #define NR_BANKS 3 #define IRQS_PER_BANK 32 @@ -101,7 +102,9 @@ static void armctrl_unmask_irq(struct irq_data *d) static struct irq_chip armctrl_chip = { .name = "ARMCTRL-level", .irq_mask = armctrl_mask_irq, - .irq_unmask = armctrl_unmask_irq + .irq_unmask = armctrl_unmask_irq, + .flags = IRQCHIP_MASK_ON_SUSPEND | + IRQCHIP_SKIP_SET_WAKE, }; static int armctrl_xlate(struct irq_domain *d, struct device_node *ctrlr, @@ -135,12 +138,13 @@ static int __init armctrl_of_init(struct device_node *node, { void __iomem *base; int irq, b, i; + u32 reg; base = of_iomap(node, 0); if (!base) panic("%pOF: unable to map IC registers\n", node); - intc.domain = irq_domain_add_linear(node, MAKE_HWIRQ(NR_BANKS, 0), + intc.domain = irq_domain_create_linear(of_fwnode_handle(node), MAKE_HWIRQ(NR_BANKS, 0), &armctrl_ops, NULL); if (!intc.domain) panic("%pOF: unable to create IRQ domain\n", node); @@ -157,6 +161,19 @@ static int __init armctrl_of_init(struct device_node *node, handle_level_irq); irq_set_probe(irq); } + + reg = readl_relaxed(intc.enable[b]); + if (reg) { + writel_relaxed(reg, intc.disable[b]); + pr_err(FW_BUG "Bootloader left irq enabled: " + "bank %d irq %*pbl\n", b, IRQS_PER_BANK, ®); + } + } + + reg = readl_relaxed(base + REG_FIQ_CONTROL); + if (reg & FIQ_CONTROL_ENABLE) { + writel_relaxed(0, base + REG_FIQ_CONTROL); + pr_err(FW_BUG "Bootloader left fiq enabled\n"); } if (is_2836) { @@ -231,7 +248,7 @@ static void __exception_irq_entry bcm2835_handle_irq( u32 hwirq; while ((hwirq = get_next_armctrl_hwirq()) != ~0) - handle_domain_irq(intc.domain, hwirq, regs); + generic_handle_domain_irq(intc.domain, hwirq); } static void bcm2836_chained_handle_irq(struct irq_desc *desc) @@ -239,7 +256,7 @@ static void bcm2836_chained_handle_irq(struct irq_desc *desc) u32 hwirq; while ((hwirq = get_next_armctrl_hwirq()) != ~0) - generic_handle_irq(irq_linear_revmap(intc.domain, hwirq)); + generic_handle_domain_irq(intc.domain, hwirq); } IRQCHIP_DECLARE(bcm2835_armctrl_ic, "brcm,bcm2835-armctrl-ic", diff --git a/drivers/irqchip/irq-bcm2836.c b/drivers/irqchip/irq-bcm2836.c index 2038693f074c..fafd1f71348e 100644 --- a/drivers/irqchip/irq-bcm2836.c +++ b/drivers/irqchip/irq-bcm2836.c @@ -10,6 +10,7 @@ #include <linux/of_irq.h> #include <linux/irqchip.h> #include <linux/irqdomain.h> +#include <linux/irqchip/chained_irq.h> #include <linux/irqchip/irq-bcm2836.h> #include <asm/exception.h> @@ -57,6 +58,7 @@ static struct irq_chip bcm2836_arm_irqchip_timer = { .name = "bcm2836-timer", .irq_mask = bcm2836_arm_irqchip_mask_timer_irq, .irq_unmask = bcm2836_arm_irqchip_unmask_timer_irq, + .flags = IRQCHIP_MASK_ON_SUSPEND | IRQCHIP_SKIP_SET_WAKE, }; static void bcm2836_arm_irqchip_mask_pmu_irq(struct irq_data *d) @@ -73,6 +75,7 @@ static struct irq_chip bcm2836_arm_irqchip_pmu = { .name = "bcm2836-pmu", .irq_mask = bcm2836_arm_irqchip_mask_pmu_irq, .irq_unmask = bcm2836_arm_irqchip_unmask_pmu_irq, + .flags = IRQCHIP_MASK_ON_SUSPEND | IRQCHIP_SKIP_SET_WAKE, }; static void bcm2836_arm_irqchip_mask_gpu_irq(struct irq_data *d) @@ -87,6 +90,16 @@ static struct irq_chip bcm2836_arm_irqchip_gpu = { .name = "bcm2836-gpu", .irq_mask = bcm2836_arm_irqchip_mask_gpu_irq, .irq_unmask = bcm2836_arm_irqchip_unmask_gpu_irq, + .flags = IRQCHIP_MASK_ON_SUSPEND | IRQCHIP_SKIP_SET_WAKE, +}; + +static void bcm2836_arm_irqchip_dummy_op(struct irq_data *d) +{ +} + +static struct irq_chip bcm2836_arm_irqchip_dummy = { + .name = "bcm2836-dummy", + .irq_eoi = bcm2836_arm_irqchip_dummy_op, }; static int bcm2836_map(struct irq_domain *d, unsigned int irq, @@ -95,6 +108,9 @@ static int bcm2836_map(struct irq_domain *d, unsigned int irq, struct irq_chip *chip; switch (hw) { + case LOCAL_IRQ_MAILBOX0: + chip = &bcm2836_arm_irqchip_dummy; + break; case LOCAL_IRQ_CNTPSIRQ: case LOCAL_IRQ_CNTPNSIRQ: case LOCAL_IRQ_CNTHPIRQ: @@ -127,26 +143,43 @@ __exception_irq_entry bcm2836_arm_irqchip_handle_irq(struct pt_regs *regs) u32 stat; stat = readl_relaxed(intc.base + LOCAL_IRQ_PENDING0 + 4 * cpu); - if (stat & BIT(LOCAL_IRQ_MAILBOX0)) { -#ifdef CONFIG_SMP - void __iomem *mailbox0 = (intc.base + - LOCAL_MAILBOX0_CLR0 + 16 * cpu); - u32 mbox_val = readl(mailbox0); - u32 ipi = ffs(mbox_val) - 1; - - writel(1 << ipi, mailbox0); - handle_IPI(ipi, regs); -#endif - } else if (stat) { + if (stat) { u32 hwirq = ffs(stat) - 1; - handle_domain_irq(intc.domain, hwirq, regs); + generic_handle_domain_irq(intc.domain, hwirq); } } #ifdef CONFIG_SMP -static void bcm2836_arm_irqchip_send_ipi(const struct cpumask *mask, - unsigned int ipi) +static struct irq_domain *ipi_domain; + +static void bcm2836_arm_irqchip_handle_ipi(struct irq_desc *desc) +{ + struct irq_chip *chip = irq_desc_get_chip(desc); + int cpu = smp_processor_id(); + u32 mbox_val; + + chained_irq_enter(chip, desc); + + mbox_val = readl_relaxed(intc.base + LOCAL_MAILBOX0_CLR0 + 16 * cpu); + if (mbox_val) { + int hwirq = ffs(mbox_val) - 1; + generic_handle_domain_irq(ipi_domain, hwirq); + } + + chained_irq_exit(chip, desc); +} + +static void bcm2836_arm_irqchip_ipi_ack(struct irq_data *d) +{ + int cpu = smp_processor_id(); + + writel_relaxed(BIT(d->hwirq), + intc.base + LOCAL_MAILBOX0_CLR0 + 16 * cpu); +} + +static void bcm2836_arm_irqchip_ipi_send_mask(struct irq_data *d, + const struct cpumask *mask) { int cpu; void __iomem *mailbox0_base = intc.base + LOCAL_MAILBOX0_SET0; @@ -157,11 +190,47 @@ static void bcm2836_arm_irqchip_send_ipi(const struct cpumask *mask, */ smp_wmb(); - for_each_cpu(cpu, mask) { - writel(1 << ipi, mailbox0_base + 16 * cpu); + for_each_cpu(cpu, mask) + writel_relaxed(BIT(d->hwirq), mailbox0_base + 16 * cpu); +} + +static struct irq_chip bcm2836_arm_irqchip_ipi = { + .name = "IPI", + .irq_mask = bcm2836_arm_irqchip_dummy_op, + .irq_unmask = bcm2836_arm_irqchip_dummy_op, + .irq_ack = bcm2836_arm_irqchip_ipi_ack, + .ipi_send_mask = bcm2836_arm_irqchip_ipi_send_mask, +}; + +static int bcm2836_arm_irqchip_ipi_alloc(struct irq_domain *d, + unsigned int virq, + unsigned int nr_irqs, void *args) +{ + int i; + + for (i = 0; i < nr_irqs; i++) { + irq_set_percpu_devid(virq + i); + irq_domain_set_info(d, virq + i, i, &bcm2836_arm_irqchip_ipi, + d->host_data, + handle_percpu_devid_irq, + NULL, NULL); } + + return 0; +} + +static void bcm2836_arm_irqchip_ipi_free(struct irq_domain *d, + unsigned int virq, + unsigned int nr_irqs) +{ + /* Not freeing IPIs */ } +static const struct irq_domain_ops ipi_domain_ops = { + .alloc = bcm2836_arm_irqchip_ipi_alloc, + .free = bcm2836_arm_irqchip_ipi_free, +}; + static int bcm2836_cpu_starting(unsigned int cpu) { bcm2836_arm_irqchip_unmask_per_cpu_irq(LOCAL_MAILBOX_INT_CONTROL0, 0, @@ -175,25 +244,55 @@ static int bcm2836_cpu_dying(unsigned int cpu) cpu); return 0; } -#endif -static const struct irq_domain_ops bcm2836_arm_irqchip_intc_ops = { - .xlate = irq_domain_xlate_onetwocell, - .map = bcm2836_map, -}; +#define BITS_PER_MBOX 32 -static void -bcm2836_arm_irqchip_smp_init(void) +static void __init bcm2836_arm_irqchip_smp_init(void) { -#ifdef CONFIG_SMP + struct irq_fwspec ipi_fwspec = { + .fwnode = intc.domain->fwnode, + .param_count = 1, + .param = { + [0] = LOCAL_IRQ_MAILBOX0, + }, + }; + int base_ipi, mux_irq; + + mux_irq = irq_create_fwspec_mapping(&ipi_fwspec); + if (WARN_ON(mux_irq <= 0)) + return; + + ipi_domain = irq_domain_create_linear(intc.domain->fwnode, + BITS_PER_MBOX, &ipi_domain_ops, + NULL); + if (WARN_ON(!ipi_domain)) + return; + + ipi_domain->flags |= IRQ_DOMAIN_FLAG_IPI_SINGLE; + irq_domain_update_bus_token(ipi_domain, DOMAIN_BUS_IPI); + + base_ipi = irq_domain_alloc_irqs(ipi_domain, BITS_PER_MBOX, NUMA_NO_NODE, NULL); + if (WARN_ON(!base_ipi)) + return; + + set_smp_ipi_range(base_ipi, BITS_PER_MBOX); + + irq_set_chained_handler_and_data(mux_irq, + bcm2836_arm_irqchip_handle_ipi, NULL); + /* Unmask IPIs to the boot CPU. */ cpuhp_setup_state(CPUHP_AP_IRQ_BCM2836_STARTING, "irqchip/bcm2836:starting", bcm2836_cpu_starting, bcm2836_cpu_dying); - - set_smp_cross_call(bcm2836_arm_irqchip_send_ipi); -#endif } +#else +#define bcm2836_arm_irqchip_smp_init() do { } while(0) +#endif + +static const struct irq_domain_ops bcm2836_arm_irqchip_intc_ops = { + .xlate = irq_domain_xlate_onetwocell, + .map = bcm2836_map, +}; /* * The LOCAL_IRQ_CNT* timer firings are based off of the external @@ -226,12 +325,14 @@ static int __init bcm2836_arm_irqchip_l1_intc_of_init(struct device_node *node, bcm2835_init_local_timer_frequency(); - intc.domain = irq_domain_add_linear(node, LAST_IRQ + 1, + intc.domain = irq_domain_create_linear(of_fwnode_handle(node), LAST_IRQ + 1, &bcm2836_arm_irqchip_intc_ops, NULL); if (!intc.domain) panic("%pOF: unable to create IRQ domain\n", node); + irq_domain_update_bus_token(intc.domain, DOMAIN_BUS_WIRED); + bcm2836_arm_irqchip_smp_init(); set_handle_irq(bcm2836_arm_irqchip_handle_irq); diff --git a/drivers/irqchip/irq-bcm6345-l1.c b/drivers/irqchip/irq-bcm6345-l1.c index 43f8abe40878..ca4e141c5bc2 100644 --- a/drivers/irqchip/irq-bcm6345-l1.c +++ b/drivers/irqchip/irq-bcm6345-l1.c @@ -1,13 +1,10 @@ +// SPDX-License-Identifier: GPL-2.0-only /* * Broadcom BCM6345 style Level 1 interrupt controller driver * * Copyright (C) 2014 Broadcom Corporation * Copyright 2015 Simon Arlott * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * * This is based on the BCM7038 (which supports SMP) but with a single * enable register instead of separate mask/set/clear registers. * @@ -63,7 +60,6 @@ #include <linux/of.h> #include <linux/of_irq.h> #include <linux/of_address.h> -#include <linux/of_platform.h> #include <linux/platform_device.h> #include <linux/slab.h> #include <linux/smp.h> @@ -85,6 +81,7 @@ struct bcm6345_l1_chip { }; struct bcm6345_l1_cpu { + struct bcm6345_l1_chip *intc; void __iomem *map_base; unsigned int parent_irq; u32 enable_cache[]; @@ -118,33 +115,23 @@ static inline unsigned int cpu_for_irq(struct bcm6345_l1_chip *intc, static void bcm6345_l1_irq_handle(struct irq_desc *desc) { - struct bcm6345_l1_chip *intc = irq_desc_get_handler_data(desc); - struct bcm6345_l1_cpu *cpu; + struct bcm6345_l1_cpu *cpu = irq_desc_get_handler_data(desc); + struct bcm6345_l1_chip *intc = cpu->intc; struct irq_chip *chip = irq_desc_get_chip(desc); unsigned int idx; -#ifdef CONFIG_SMP - cpu = intc->cpus[cpu_logical_map(smp_processor_id())]; -#else - cpu = intc->cpus[0]; -#endif - chained_irq_enter(chip, desc); for (idx = 0; idx < intc->n_words; idx++) { int base = idx * IRQS_PER_WORD; unsigned long pending; irq_hw_number_t hwirq; - unsigned int irq; pending = __raw_readl(cpu->map_base + reg_status(intc, idx)); pending &= __raw_readl(cpu->map_base + reg_enable(intc, idx)); for_each_set_bit(hwirq, &pending, IRQS_PER_WORD) { - irq = irq_linear_revmap(intc->domain, base + hwirq); - if (irq) - do_IRQ(irq); - else + if (generic_handle_domain_irq(intc->domain, base + hwirq)) spurious_interrupt(); } } @@ -205,14 +192,10 @@ static int bcm6345_l1_set_affinity(struct irq_data *d, u32 mask = BIT(d->hwirq % IRQS_PER_WORD); unsigned int old_cpu = cpu_for_irq(intc, d); unsigned int new_cpu; - struct cpumask valid; unsigned long flags; bool enabled; - if (!cpumask_and(&valid, &intc->cpumask, dest)) - return -EINVAL; - - new_cpu = cpumask_any_and(&valid, cpu_online_mask); + new_cpu = cpumask_first_and_and(&intc->cpumask, dest, cpu_online_mask); if (new_cpu >= nr_cpu_ids) return -EINVAL; @@ -223,11 +206,11 @@ static int bcm6345_l1_set_affinity(struct irq_data *d, enabled = intc->cpus[old_cpu]->enable_cache[word] & mask; if (enabled) __bcm6345_l1_mask(d); - cpumask_copy(irq_data_get_affinity_mask(d), dest); + irq_data_update_affinity(d, dest); if (enabled) __bcm6345_l1_unmask(d); } else { - cpumask_copy(irq_data_get_affinity_mask(d), dest); + irq_data_update_affinity(d, dest); } raw_spin_unlock_irqrestore(&intc->lock, flags); @@ -255,15 +238,19 @@ static int __init bcm6345_l1_init_one(struct device_node *dn, else if (intc->n_words != n_words) return -EINVAL; - cpu = intc->cpus[idx] = kzalloc(sizeof(*cpu) + n_words * sizeof(u32), + cpu = intc->cpus[idx] = kzalloc(struct_size(cpu, enable_cache, n_words), GFP_KERNEL); if (!cpu) return -ENOMEM; + cpu->intc = intc; cpu->map_base = ioremap(res.start, sz); if (!cpu->map_base) return -ENOMEM; + if (!request_mem_region(res.start, sz, res.name)) + pr_err("failed to request intc memory"); + for (i = 0; i < n_words; i++) { cpu->enable_cache[i] = 0; __raw_writel(0, cpu->map_base + reg_enable(intc, i)); @@ -275,7 +262,7 @@ static int __init bcm6345_l1_init_one(struct device_node *dn, return -EINVAL; } irq_set_chained_handler_and_data(cpu->parent_irq, - bcm6345_l1_irq_handle, intc); + bcm6345_l1_irq_handle, cpu); return 0; } @@ -322,14 +309,14 @@ static int __init bcm6345_l1_of_init(struct device_node *dn, cpumask_set_cpu(idx, &intc->cpumask); } - if (!cpumask_weight(&intc->cpumask)) { + if (cpumask_empty(&intc->cpumask)) { ret = -ENODEV; goto out_free; } raw_spin_lock_init(&intc->lock); - intc->domain = irq_domain_add_linear(dn, IRQS_PER_WORD * intc->n_words, + intc->domain = irq_domain_create_linear(of_fwnode_handle(dn), IRQS_PER_WORD * intc->n_words, &bcm6345_l1_domain_ops, intc); if (!intc->domain) { @@ -342,8 +329,7 @@ static int __init bcm6345_l1_of_init(struct device_node *dn, for_each_cpu(idx, &intc->cpumask) { struct bcm6345_l1_cpu *cpu = intc->cpus[idx]; - pr_info(" CPU%u at MMIO 0x%p (irq = %d)\n", idx, - cpu->map_base, cpu->parent_irq); + pr_info(" CPU%u (irq = %d)\n", idx, cpu->parent_irq); } return 0; diff --git a/drivers/irqchip/irq-bcm7038-l1.c b/drivers/irqchip/irq-bcm7038-l1.c index 0f6e30e9009d..45c4824be92f 100644 --- a/drivers/irqchip/irq-bcm7038-l1.c +++ b/drivers/irqchip/irq-bcm7038-l1.c @@ -1,12 +1,9 @@ +// SPDX-License-Identifier: GPL-2.0-only /* * Broadcom BCM7038 style Level 1 interrupt controller driver * * Copyright (C) 2014 Broadcom Corporation * Author: Kevin Cernekee - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt @@ -23,13 +20,13 @@ #include <linux/of.h> #include <linux/of_irq.h> #include <linux/of_address.h> -#include <linux/of_platform.h> #include <linux/platform_device.h> #include <linux/slab.h> #include <linux/smp.h> #include <linux/types.h> #include <linux/irqchip.h> #include <linux/irqchip/chained_irq.h> +#include <linux/syscore_ops.h> #define IRQS_PER_WORD 32 #define REG_BYTES_PER_IRQ_WORD (sizeof(u32) * 4) @@ -42,12 +39,17 @@ struct bcm7038_l1_chip { unsigned int n_words; struct irq_domain *domain; struct bcm7038_l1_cpu *cpus[NR_CPUS]; +#ifdef CONFIG_PM_SLEEP + struct list_head list; + u32 wake_mask[MAX_WORDS]; +#endif + u32 irq_fwd_mask[MAX_WORDS]; u8 affinity[MAX_WORDS * IRQS_PER_WORD]; }; struct bcm7038_l1_cpu { void __iomem *map_base; - u32 mask_cache[0]; + u32 mask_cache[]; }; /* @@ -80,12 +82,6 @@ static inline unsigned int reg_status(struct bcm7038_l1_chip *intc, return (0 * intc->n_words + word) * sizeof(u32); } -static inline unsigned int reg_mask_status(struct bcm7038_l1_chip *intc, - unsigned int word) -{ - return (1 * intc->n_words + word) * sizeof(u32); -} - static inline unsigned int reg_mask_set(struct bcm7038_l1_chip *intc, unsigned int word) { @@ -121,7 +117,7 @@ static void bcm7038_l1_irq_handle(struct irq_desc *desc) struct irq_chip *chip = irq_desc_get_chip(desc); unsigned int idx; -#ifdef CONFIG_SMP +#if defined(CONFIG_SMP) && defined(CONFIG_MIPS) cpu = intc->cpus[cpu_logical_map(smp_processor_id())]; #else cpu = intc->cpus[0]; @@ -139,10 +135,8 @@ static void bcm7038_l1_irq_handle(struct irq_desc *desc) ~cpu->mask_cache[idx]; raw_spin_unlock_irqrestore(&intc->lock, flags); - for_each_set_bit(hwirq, &pending, IRQS_PER_WORD) { - generic_handle_irq(irq_find_mapping(intc->domain, - base + hwirq)); - } + for_each_set_bit(hwirq, &pending, IRQS_PER_WORD) + generic_handle_domain_irq(intc->domain, base + hwirq); } chained_irq_exit(chip, desc); @@ -190,6 +184,7 @@ static void bcm7038_l1_mask(struct irq_data *d) raw_spin_unlock_irqrestore(&intc->lock, flags); } +#if defined(CONFIG_MIPS) && defined(CONFIG_SMP) static int bcm7038_l1_set_affinity(struct irq_data *d, const struct cpumask *dest, bool force) @@ -216,42 +211,16 @@ static int bcm7038_l1_set_affinity(struct irq_data *d, return 0; } - -#ifdef CONFIG_SMP -static void bcm7038_l1_cpu_offline(struct irq_data *d) -{ - struct cpumask *mask = irq_data_get_affinity_mask(d); - int cpu = smp_processor_id(); - cpumask_t new_affinity; - - /* This CPU was not on the affinity mask */ - if (!cpumask_test_cpu(cpu, mask)) - return; - - if (cpumask_weight(mask) > 1) { - /* - * Multiple CPU affinity, remove this CPU from the affinity - * mask - */ - cpumask_copy(&new_affinity, mask); - cpumask_clear_cpu(cpu, &new_affinity); - } else { - /* Only CPU, put on the lowest online CPU */ - cpumask_clear(&new_affinity); - cpumask_set_cpu(cpumask_first(cpu_online_mask), &new_affinity); - } - irq_set_affinity_locked(d, &new_affinity, false); -} #endif -static int __init bcm7038_l1_init_one(struct device_node *dn, - unsigned int idx, - struct bcm7038_l1_chip *intc) +static int bcm7038_l1_init_one(struct device_node *dn, unsigned int idx, + struct bcm7038_l1_chip *intc) { struct resource res; resource_size_t sz; struct bcm7038_l1_cpu *cpu; unsigned int i, n_words, parent_irq; + int ret; if (of_address_to_resource(dn, idx, &res)) return -EINVAL; @@ -265,7 +234,15 @@ static int __init bcm7038_l1_init_one(struct device_node *dn, else if (intc->n_words != n_words) return -EINVAL; - cpu = intc->cpus[idx] = kzalloc(sizeof(*cpu) + n_words * sizeof(u32), + ret = of_property_read_u32_array(dn , "brcm,int-fwd-mask", + intc->irq_fwd_mask, n_words); + if (ret != 0 && ret != -EINVAL) { + /* property exists but has the wrong number of words */ + pr_err("invalid brcm,int-fwd-mask property\n"); + return -EINVAL; + } + + cpu = intc->cpus[idx] = kzalloc(struct_size(cpu, mask_cache, n_words), GFP_KERNEL); if (!cpu) return -ENOMEM; @@ -275,8 +252,11 @@ static int __init bcm7038_l1_init_one(struct device_node *dn, return -ENOMEM; for (i = 0; i < n_words; i++) { - l1_writel(0xffffffff, cpu->map_base + reg_mask_set(intc, i)); - cpu->mask_cache[i] = 0xffffffff; + l1_writel(~intc->irq_fwd_mask[i], + cpu->map_base + reg_mask_set(intc, i)); + l1_writel(intc->irq_fwd_mask[i], + cpu->map_base + reg_mask_clr(intc, i)); + cpu->mask_cache[i] = ~intc->irq_fwd_mask[i]; } parent_irq = irq_of_parse_and_map(dn, idx); @@ -284,28 +264,126 @@ static int __init bcm7038_l1_init_one(struct device_node *dn, pr_err("failed to map parent interrupt %d\n", parent_irq); return -EINVAL; } + + if (of_property_read_bool(dn, "brcm,irq-can-wake")) + enable_irq_wake(parent_irq); + irq_set_chained_handler_and_data(parent_irq, bcm7038_l1_irq_handle, intc); return 0; } +#ifdef CONFIG_PM_SLEEP +/* + * We keep a list of bcm7038_l1_chip used for suspend/resume. This hack is + * used because the struct chip_type suspend/resume hooks are not called + * unless chip_type is hooked onto a generic_chip. Since this driver does + * not use generic_chip, we need to manually hook our resume/suspend to + * syscore_ops. + */ +static LIST_HEAD(bcm7038_l1_intcs_list); +static DEFINE_RAW_SPINLOCK(bcm7038_l1_intcs_lock); + +static int bcm7038_l1_suspend(void *data) +{ + struct bcm7038_l1_chip *intc; + int boot_cpu, word; + u32 val; + + /* Wakeup interrupt should only come from the boot cpu */ +#if defined(CONFIG_SMP) && defined(CONFIG_MIPS) + boot_cpu = cpu_logical_map(0); +#else + boot_cpu = 0; +#endif + + list_for_each_entry(intc, &bcm7038_l1_intcs_list, list) { + for (word = 0; word < intc->n_words; word++) { + val = intc->wake_mask[word] | intc->irq_fwd_mask[word]; + l1_writel(~val, + intc->cpus[boot_cpu]->map_base + reg_mask_set(intc, word)); + l1_writel(val, + intc->cpus[boot_cpu]->map_base + reg_mask_clr(intc, word)); + } + } + + return 0; +} + +static void bcm7038_l1_resume(void *data) +{ + struct bcm7038_l1_chip *intc; + int boot_cpu, word; + +#if defined(CONFIG_SMP) && defined(CONFIG_MIPS) + boot_cpu = cpu_logical_map(0); +#else + boot_cpu = 0; +#endif + + list_for_each_entry(intc, &bcm7038_l1_intcs_list, list) { + for (word = 0; word < intc->n_words; word++) { + l1_writel(intc->cpus[boot_cpu]->mask_cache[word], + intc->cpus[boot_cpu]->map_base + reg_mask_set(intc, word)); + l1_writel(~intc->cpus[boot_cpu]->mask_cache[word], + intc->cpus[boot_cpu]->map_base + reg_mask_clr(intc, word)); + } + } +} + +static const struct syscore_ops bcm7038_l1_syscore_ops = { + .suspend = bcm7038_l1_suspend, + .resume = bcm7038_l1_resume, +}; + +static struct syscore bcm7038_l1_syscore = { + .ops = &bcm7038_l1_syscore_ops, +}; + +static int bcm7038_l1_set_wake(struct irq_data *d, unsigned int on) +{ + struct bcm7038_l1_chip *intc = irq_data_get_irq_chip_data(d); + unsigned long flags; + u32 word = d->hwirq / IRQS_PER_WORD; + u32 mask = BIT(d->hwirq % IRQS_PER_WORD); + + raw_spin_lock_irqsave(&intc->lock, flags); + if (on) + intc->wake_mask[word] |= mask; + else + intc->wake_mask[word] &= ~mask; + raw_spin_unlock_irqrestore(&intc->lock, flags); + + return 0; +} +#endif + static struct irq_chip bcm7038_l1_irq_chip = { .name = "bcm7038-l1", .irq_mask = bcm7038_l1_mask, .irq_unmask = bcm7038_l1_unmask, +#if defined(CONFIG_SMP) && defined(CONFIG_MIPS) .irq_set_affinity = bcm7038_l1_set_affinity, -#ifdef CONFIG_SMP - .irq_cpu_offline = bcm7038_l1_cpu_offline, +#endif +#ifdef CONFIG_PM_SLEEP + .irq_set_wake = bcm7038_l1_set_wake, #endif }; static int bcm7038_l1_map(struct irq_domain *d, unsigned int virq, irq_hw_number_t hw_irq) { + struct bcm7038_l1_chip *intc = d->host_data; + u32 mask = BIT(hw_irq % IRQS_PER_WORD); + u32 word = hw_irq / IRQS_PER_WORD; + + if (intc->irq_fwd_mask[word] & mask) + return -EPERM; + irq_set_chip_and_handler(virq, &bcm7038_l1_irq_chip, handle_level_irq); irq_set_chip_data(virq, d->host_data); - irqd_set_single_target(irq_desc_get_irq_data(irq_to_desc(virq))); + irqd_set_single_target(irq_get_irq_data(virq)); return 0; } @@ -314,9 +392,9 @@ static const struct irq_domain_ops bcm7038_l1_domain_ops = { .map = bcm7038_l1_map, }; -int __init bcm7038_l1_of_init(struct device_node *dn, - struct device_node *parent) +static int bcm7038_l1_probe(struct platform_device *pdev, struct device_node *parent) { + struct device_node *dn = pdev->dev.of_node; struct bcm7038_l1_chip *intc; int idx, ret; @@ -335,7 +413,7 @@ int __init bcm7038_l1_of_init(struct device_node *dn, } } - intc->domain = irq_domain_add_linear(dn, IRQS_PER_WORD * intc->n_words, + intc->domain = irq_domain_create_linear(of_fwnode_handle(dn), IRQS_PER_WORD * intc->n_words, &bcm7038_l1_domain_ops, intc); if (!intc->domain) { @@ -343,6 +421,19 @@ int __init bcm7038_l1_of_init(struct device_node *dn, goto out_unmap; } +#ifdef CONFIG_PM_SLEEP + /* Add bcm7038_l1_chip into a list */ + raw_spin_lock(&bcm7038_l1_intcs_lock); + list_add_tail(&intc->list, &bcm7038_l1_intcs_list); + raw_spin_unlock(&bcm7038_l1_intcs_lock); + + if (list_is_singular(&bcm7038_l1_intcs_list)) + register_syscore(&bcm7038_l1_syscore); +#endif + + pr_info("registered BCM7038 L1 intc (%pOF, IRQs: %d)\n", + dn, IRQS_PER_WORD * intc->n_words); + return 0; out_unmap: @@ -360,4 +451,8 @@ out_free: return ret; } -IRQCHIP_DECLARE(bcm7038_l1, "brcm,bcm7038-l1-intc", bcm7038_l1_of_init); +IRQCHIP_PLATFORM_DRIVER_BEGIN(bcm7038_l1) +IRQCHIP_MATCH("brcm,bcm7038-l1-intc", bcm7038_l1_probe) +IRQCHIP_PLATFORM_DRIVER_END(bcm7038_l1) +MODULE_DESCRIPTION("Broadcom STB 7038-style L1/L2 interrupt controller"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/irqchip/irq-bcm7120-l2.c b/drivers/irqchip/irq-bcm7120-l2.c index 8968e5e93fcb..518c9d4366a5 100644 --- a/drivers/irqchip/irq-bcm7120-l2.c +++ b/drivers/irqchip/irq-bcm7120-l2.c @@ -1,11 +1,8 @@ +// SPDX-License-Identifier: GPL-2.0-only /* * Broadcom BCM7120 style Level 2 interrupt controller driver * * Copyright (C) 2014 Broadcom Corporation - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt @@ -66,21 +63,18 @@ static void bcm7120_l2_intc_irq_handle(struct irq_desc *desc) for (idx = 0; idx < b->n_words; idx++) { int base = idx * IRQS_PER_WORD; - struct irq_chip_generic *gc = - irq_get_domain_generic_chip(b->domain, base); + struct irq_chip_generic *gc; unsigned long pending; int hwirq; - irq_gc_lock(gc); - pending = irq_reg_readl(gc, b->stat_offset[idx]) & - gc->mask_cache & - data->irq_map_mask[idx]; - irq_gc_unlock(gc); - - for_each_set_bit(hwirq, &pending, IRQS_PER_WORD) { - generic_handle_irq(irq_find_mapping(b->domain, - base + hwirq)); + gc = irq_get_domain_generic_chip(b->domain, base); + scoped_guard (raw_spinlock, &gc->lock) { + pending = irq_reg_readl(gc, b->stat_offset[idx]) & gc->mask_cache & + data->irq_map_mask[idx]; } + + for_each_set_bit(hwirq, &pending, IRQS_PER_WORD) + generic_handle_domain_irq(b->domain, base + hwirq); } chained_irq_exit(chip, desc); @@ -91,11 +85,9 @@ static void bcm7120_l2_intc_suspend(struct irq_chip_generic *gc) struct bcm7120_l2_intc_data *b = gc->private; struct irq_chip_type *ct = gc->chip_types; - irq_gc_lock(gc); + guard(raw_spinlock)(&gc->lock); if (b->can_wake) - irq_reg_writel(gc, gc->mask_cache | gc->wake_active, - ct->regs.mask); - irq_gc_unlock(gc); + irq_reg_writel(gc, gc->mask_cache | gc->wake_active, ct->regs.mask); } static void bcm7120_l2_intc_resume(struct irq_chip_generic *gc) @@ -103,9 +95,8 @@ static void bcm7120_l2_intc_resume(struct irq_chip_generic *gc) struct irq_chip_type *ct = gc->chip_types; /* Restore the saved mask */ - irq_gc_lock(gc); + guard(raw_spinlock)(&gc->lock); irq_reg_writel(gc, gc->mask_cache, ct->regs.mask); - irq_gc_unlock(gc); } static int bcm7120_l2_intc_init_one(struct device_node *dn, @@ -146,11 +137,13 @@ static int bcm7120_l2_intc_init_one(struct device_node *dn, irq_set_chained_handler_and_data(parent_irq, bcm7120_l2_intc_irq_handle, l1_data); + if (data->can_wake) + enable_irq_wake(parent_irq); + return 0; } -static int __init bcm7120_l2_intc_iomap_7120(struct device_node *dn, - struct bcm7120_l2_intc_data *data) +static int bcm7120_l2_intc_iomap_7120(struct device_node *dn, struct bcm7120_l2_intc_data *data) { int ret; @@ -183,8 +176,7 @@ static int __init bcm7120_l2_intc_iomap_7120(struct device_node *dn, return 0; } -static int __init bcm7120_l2_intc_iomap_3380(struct device_node *dn, - struct bcm7120_l2_intc_data *data) +static int bcm7120_l2_intc_iomap_3380(struct device_node *dn, struct bcm7120_l2_intc_data *data) { unsigned int gc_idx; @@ -214,13 +206,13 @@ static int __init bcm7120_l2_intc_iomap_3380(struct device_node *dn, return 0; } -static int __init bcm7120_l2_intc_probe(struct device_node *dn, - struct device_node *parent, +static int bcm7120_l2_intc_probe(struct platform_device *pdev, struct device_node *parent, int (*iomap_regs_fn)(struct device_node *, - struct bcm7120_l2_intc_data *), + struct bcm7120_l2_intc_data *), const char *intc_name) { unsigned int clr = IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN; + struct device_node *dn = pdev->dev.of_node; struct bcm7120_l2_intc_data *data; struct irq_chip_generic *gc; struct irq_chip_type *ct; @@ -232,7 +224,7 @@ static int __init bcm7120_l2_intc_probe(struct device_node *dn, if (!data) return -ENOMEM; - data->num_parent_irqs = of_irq_count(dn); + data->num_parent_irqs = platform_irq_count(pdev); if (data->num_parent_irqs <= 0) { pr_err("invalid number of parent interrupts\n"); ret = -ENOMEM; @@ -250,13 +242,15 @@ static int __init bcm7120_l2_intc_probe(struct device_node *dn, if (ret < 0) goto out_free_l1_data; + data->can_wake = of_property_read_bool(dn, "brcm,irq-can-wake"); + for (irq = 0; irq < data->num_parent_irqs; irq++) { ret = bcm7120_l2_intc_init_one(dn, data, irq, valid_mask); if (ret) goto out_free_l1_data; } - data->domain = irq_domain_add_linear(dn, IRQS_PER_WORD * data->n_words, + data->domain = irq_domain_create_linear(of_fwnode_handle(dn), IRQS_PER_WORD * data->n_words, &irq_generic_chip_ops, NULL); if (!data->domain) { ret = -ENOMEM; @@ -271,15 +265,13 @@ static int __init bcm7120_l2_intc_probe(struct device_node *dn, flags |= IRQ_GC_BE_IO; ret = irq_alloc_domain_generic_chips(data->domain, IRQS_PER_WORD, 1, - dn->full_name, handle_level_irq, clr, 0, flags); + dn->full_name, handle_level_irq, clr, + IRQ_LEVEL, flags); if (ret) { pr_err("failed to allocate generic irq chip\n"); goto out_free_domain; } - if (of_property_read_bool(dn, "brcm,irq-can-wake")) - data->can_wake = true; - for (idx = 0; idx < data->n_words; idx++) { irq = idx * IRQS_PER_WORD; gc = irq_get_domain_generic_chip(data->domain, irq); @@ -310,7 +302,7 @@ static int __init bcm7120_l2_intc_probe(struct device_node *dn, if (data->can_wake) { /* This IRQ chip can wake the system, set all - * relevant child interupts in wake_enabled mask + * relevant child interrupts in wake_enabled mask */ gc->wake_enabled = 0xffffffff; gc->wake_enabled &= ~gc->unused; @@ -318,6 +310,9 @@ static int __init bcm7120_l2_intc_probe(struct device_node *dn, } } + pr_info("registered %s intc (%pOF, parent IRQ(s): %d)\n", + intc_name, dn, data->num_parent_irqs); + return 0; out_free_domain: @@ -333,22 +328,21 @@ out_unmap: return ret; } -static int __init bcm7120_l2_intc_probe_7120(struct device_node *dn, - struct device_node *parent) +static int bcm7120_l2_intc_probe_7120(struct platform_device *pdev, struct device_node *parent) { - return bcm7120_l2_intc_probe(dn, parent, bcm7120_l2_intc_iomap_7120, + return bcm7120_l2_intc_probe(pdev, parent, bcm7120_l2_intc_iomap_7120, "BCM7120 L2"); } -static int __init bcm7120_l2_intc_probe_3380(struct device_node *dn, - struct device_node *parent) +static int bcm7120_l2_intc_probe_3380(struct platform_device *pdev, struct device_node *parent) { - return bcm7120_l2_intc_probe(dn, parent, bcm7120_l2_intc_iomap_3380, + return bcm7120_l2_intc_probe(pdev, parent, bcm7120_l2_intc_iomap_3380, "BCM3380 L2"); } -IRQCHIP_DECLARE(bcm7120_l2_intc, "brcm,bcm7120-l2-intc", - bcm7120_l2_intc_probe_7120); - -IRQCHIP_DECLARE(bcm3380_l2_intc, "brcm,bcm3380-l2-intc", - bcm7120_l2_intc_probe_3380); +IRQCHIP_PLATFORM_DRIVER_BEGIN(bcm7120_l2) +IRQCHIP_MATCH("brcm,bcm7120-l2-intc", bcm7120_l2_intc_probe_7120) +IRQCHIP_MATCH("brcm,bcm3380-l2-intc", bcm7120_l2_intc_probe_3380) +IRQCHIP_PLATFORM_DRIVER_END(bcm7120_l2) +MODULE_DESCRIPTION("Broadcom STB 7120-style L2 interrupt controller driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/irqchip/irq-brcmstb-l2.c b/drivers/irqchip/irq-brcmstb-l2.c index 0e65f609352e..bb7078d6524f 100644 --- a/drivers/irqchip/irq-brcmstb-l2.c +++ b/drivers/irqchip/irq-brcmstb-l2.c @@ -1,16 +1,8 @@ +// SPDX-License-Identifier: GPL-2.0-only /* * Generic Broadcom Set Top Box Level 2 Interrupt controller driver * - * Copyright (C) 2014-2017 Broadcom - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. + * Copyright (C) 2014-2024 Broadcom */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt @@ -23,7 +15,6 @@ #include <linux/of.h> #include <linux/of_irq.h> #include <linux/of_address.h> -#include <linux/of_platform.h> #include <linux/interrupt.h> #include <linux/irq.h> #include <linux/io.h> @@ -70,32 +61,6 @@ struct brcmstb_l2_intc_data { u32 saved_mask; /* for suspend/resume */ }; -/** - * brcmstb_l2_mask_and_ack - Mask and ack pending interrupt - * @d: irq_data - * - * Chip has separate enable/disable registers instead of a single mask - * register and pending interrupt is acknowledged by setting a bit. - * - * Note: This function is generic and could easily be added to the - * generic irqchip implementation if there ever becomes a will to do so. - * Perhaps with a name like irq_gc_mask_disable_and_ack_set(). - * - * e.g.: https://patchwork.kernel.org/patch/9831047/ - */ -static void brcmstb_l2_mask_and_ack(struct irq_data *d) -{ - struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); - struct irq_chip_type *ct = irq_data_get_chip_type(d); - u32 mask = d->mask; - - irq_gc_lock(gc); - irq_reg_writel(gc, mask, ct->regs.disable); - *ct->mask_cache &= ~mask; - irq_reg_writel(gc, mask, ct->regs.ack); - irq_gc_unlock(gc); -} - static void brcmstb_l2_intc_irq_handle(struct irq_desc *desc) { struct brcmstb_l2_intc_data *b = irq_desc_get_handler_data(desc); @@ -118,28 +83,41 @@ static void brcmstb_l2_intc_irq_handle(struct irq_desc *desc) do { irq = ffs(status) - 1; status &= ~(1 << irq); - generic_handle_irq(irq_linear_revmap(b->domain, irq)); + generic_handle_domain_irq(b->domain, irq); } while (status); out: + /* Don't ack parent before all device writes are done */ + wmb(); + chained_irq_exit(chip, desc); } -static void brcmstb_l2_intc_suspend(struct irq_data *d) +static void __brcmstb_l2_intc_suspend(struct irq_data *d, bool save) { struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); struct irq_chip_type *ct = irq_data_get_chip_type(d); struct brcmstb_l2_intc_data *b = gc->private; - irq_gc_lock(gc); + guard(raw_spinlock_irqsave)(&gc->lock); /* Save the current mask */ - b->saved_mask = irq_reg_readl(gc, ct->regs.mask); + if (save) + b->saved_mask = irq_reg_readl(gc, ct->regs.mask); if (b->can_wake) { /* Program the wakeup mask */ irq_reg_writel(gc, ~gc->wake_active, ct->regs.disable); irq_reg_writel(gc, gc->wake_active, ct->regs.enable); } - irq_gc_unlock(gc); +} + +static void brcmstb_l2_intc_shutdown(struct irq_data *d) +{ + __brcmstb_l2_intc_suspend(d, false); +} + +static void brcmstb_l2_intc_suspend(struct irq_data *d) +{ + __brcmstb_l2_intc_suspend(d, true); } static void brcmstb_l2_intc_resume(struct irq_data *d) @@ -148,7 +126,7 @@ static void brcmstb_l2_intc_resume(struct irq_data *d) struct irq_chip_type *ct = irq_data_get_chip_type(d); struct brcmstb_l2_intc_data *b = gc->private; - irq_gc_lock(gc); + guard(raw_spinlock_irqsave)(&gc->lock); if (ct->chip.irq_ack) { /* Clear unmasked non-wakeup interrupts */ irq_reg_writel(gc, ~b->saved_mask & ~gc->wake_active, @@ -158,15 +136,14 @@ static void brcmstb_l2_intc_resume(struct irq_data *d) /* Restore the saved mask */ irq_reg_writel(gc, b->saved_mask, ct->regs.disable); irq_reg_writel(gc, ~b->saved_mask, ct->regs.enable); - irq_gc_unlock(gc); } -static int __init brcmstb_l2_intc_of_init(struct device_node *np, - struct device_node *parent, - const struct brcmstb_intc_init_params - *init_params) +static int brcmstb_l2_intc_probe(struct platform_device *pdev, struct device_node *parent, + const struct brcmstb_intc_init_params *init_params) { unsigned int clr = IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN; + unsigned int set = 0; + struct device_node *np = pdev->dev.of_node; struct brcmstb_l2_intc_data *data; struct irq_chip_type *ct; int ret; @@ -200,7 +177,7 @@ static int __init brcmstb_l2_intc_of_init(struct device_node *np, goto out_unmap; } - data->domain = irq_domain_add_linear(np, 32, + data->domain = irq_domain_create_linear(of_fwnode_handle(np), 32, &irq_generic_chip_ops, NULL); if (!data->domain) { ret = -ENOMEM; @@ -214,9 +191,12 @@ static int __init brcmstb_l2_intc_of_init(struct device_node *np, if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(CONFIG_CPU_BIG_ENDIAN)) flags |= IRQ_GC_BE_IO; + if (init_params->handler == handle_level_irq) + set |= IRQ_LEVEL; + /* Allocate a single Generic IRQ chip for this node */ ret = irq_alloc_domain_generic_chips(data->domain, 32, 1, - np->full_name, init_params->handler, clr, 0, flags); + np->full_name, init_params->handler, clr, set, flags); if (ret) { pr_err("failed to allocate generic irq chip\n"); goto out_free_domain; @@ -237,7 +217,7 @@ static int __init brcmstb_l2_intc_of_init(struct device_node *np, if (init_params->cpu_clear >= 0) { ct->regs.ack = init_params->cpu_clear; ct->chip.irq_ack = irq_gc_ack_set_bit; - ct->chip.irq_mask_ack = brcmstb_l2_mask_and_ack; + ct->chip.irq_mask_ack = irq_gc_mask_disable_and_ack_set; } else { /* No Ack - but still slightly more efficient to define this */ ct->chip.irq_mask_ack = irq_gc_mask_disable_reg; @@ -252,7 +232,7 @@ static int __init brcmstb_l2_intc_of_init(struct device_node *np, ct->chip.irq_suspend = brcmstb_l2_intc_suspend; ct->chip.irq_resume = brcmstb_l2_intc_resume; - ct->chip.irq_pm_shutdown = brcmstb_l2_intc_suspend; + ct->chip.irq_pm_shutdown = brcmstb_l2_intc_shutdown; if (data->can_wake) { /* This IRQ chip can wake the system, set all child interrupts @@ -260,8 +240,11 @@ static int __init brcmstb_l2_intc_of_init(struct device_node *np, */ data->gc->wake_enabled = 0xffffffff; ct->chip.irq_set_wake = irq_gc_set_wake; + enable_irq_wake(parent_irq); } + pr_info("registered L2 intc (%pOF, parent irq: %d)\n", np, parent_irq); + return 0; out_free_domain: @@ -273,17 +256,21 @@ out_free: return ret; } -int __init brcmstb_l2_edge_intc_of_init(struct device_node *np, - struct device_node *parent) +static int brcmstb_l2_edge_intc_probe(struct platform_device *pdev, struct device_node *parent) { - return brcmstb_l2_intc_of_init(np, parent, &l2_edge_intc_init); + return brcmstb_l2_intc_probe(pdev, parent, &l2_edge_intc_init); } -IRQCHIP_DECLARE(brcmstb_l2_intc, "brcm,l2-intc", brcmstb_l2_edge_intc_of_init); -int __init brcmstb_l2_lvl_intc_of_init(struct device_node *np, - struct device_node *parent) +static int brcmstb_l2_lvl_intc_probe(struct platform_device *pdev, struct device_node *parent) { - return brcmstb_l2_intc_of_init(np, parent, &l2_lvl_intc_init); + return brcmstb_l2_intc_probe(pdev, parent, &l2_lvl_intc_init); } -IRQCHIP_DECLARE(bcm7271_l2_intc, "brcm,bcm7271-l2-intc", - brcmstb_l2_lvl_intc_of_init); + +IRQCHIP_PLATFORM_DRIVER_BEGIN(brcmstb_l2) +IRQCHIP_MATCH("brcm,l2-intc", brcmstb_l2_edge_intc_probe) +IRQCHIP_MATCH("brcm,hif-spi-l2-intc", brcmstb_l2_edge_intc_probe) +IRQCHIP_MATCH("brcm,upg-aux-aon-l2-intc", brcmstb_l2_edge_intc_probe) +IRQCHIP_MATCH("brcm,bcm7271-l2-intc", brcmstb_l2_lvl_intc_probe) +IRQCHIP_PLATFORM_DRIVER_END(brcmstb_l2) +MODULE_DESCRIPTION("Broadcom STB generic L2 interrupt controller"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/irqchip/irq-clps711x.c b/drivers/irqchip/irq-clps711x.c index f913f4db7ae1..c4b73ba2323b 100644 --- a/drivers/irqchip/irq-clps711x.c +++ b/drivers/irqchip/irq-clps711x.c @@ -1,12 +1,8 @@ +// SPDX-License-Identifier: GPL-2.0-or-later /* * CLPS711X IRQ driver * * Copyright (C) 2013 Alexander Shiyan <shc_work@mail.ru> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. */ #include <linux/io.h> @@ -73,7 +69,7 @@ static struct { struct irq_domain_ops ops; } *clps711x_intc; -static asmlinkage void __exception_irq_entry clps711x_irqh(struct pt_regs *regs) +static void __exception_irq_entry clps711x_irqh(struct pt_regs *regs) { u32 irqstat; @@ -81,14 +77,14 @@ static asmlinkage void __exception_irq_entry clps711x_irqh(struct pt_regs *regs) irqstat = readw_relaxed(clps711x_intc->intmr[0]) & readw_relaxed(clps711x_intc->intsr[0]); if (irqstat) - handle_domain_irq(clps711x_intc->domain, - fls(irqstat) - 1, regs); + generic_handle_domain_irq(clps711x_intc->domain, + fls(irqstat) - 1); irqstat = readw_relaxed(clps711x_intc->intmr[1]) & readw_relaxed(clps711x_intc->intsr[1]); if (irqstat) - handle_domain_irq(clps711x_intc->domain, - fls(irqstat) - 1 + 16, regs); + generic_handle_domain_irq(clps711x_intc->domain, + fls(irqstat) - 1 + 16); } while (irqstat); } @@ -188,14 +184,14 @@ static int __init _clps711x_intc_init(struct device_node *np, clps711x_intc->ops.map = clps711x_intc_irq_map; clps711x_intc->ops.xlate = irq_domain_xlate_onecell; clps711x_intc->domain = - irq_domain_add_legacy(np, ARRAY_SIZE(clps711x_irqs), - 0, 0, &clps711x_intc->ops, NULL); + irq_domain_create_legacy(of_fwnode_handle(np), ARRAY_SIZE(clps711x_irqs), 0, 0, + &clps711x_intc->ops, NULL); if (!clps711x_intc->domain) { err = -ENOMEM; goto out_irqfree; } - irq_set_default_host(clps711x_intc->domain); + irq_set_default_domain(clps711x_intc->domain); set_handle_irq(clps711x_irqh); #ifdef CONFIG_FIQ @@ -216,12 +212,6 @@ out_kfree: return err; } -void __init clps711x_intc_init(phys_addr_t base, resource_size_t size) -{ - BUG_ON(_clps711x_intc_init(NULL, base, size)); -} - -#ifdef CONFIG_IRQCHIP static int __init clps711x_intc_init_dt(struct device_node *np, struct device_node *parent) { @@ -235,4 +225,3 @@ static int __init clps711x_intc_init_dt(struct device_node *np, return _clps711x_intc_init(np, res.start, resource_size(&res)); } IRQCHIP_DECLARE(clps711x, "cirrus,ep7209-intc", clps711x_intc_init_dt); -#endif diff --git a/drivers/irqchip/irq-crossbar.c b/drivers/irqchip/irq-crossbar.c index 99d97d7e3fd7..66bb39e24a52 100644 --- a/drivers/irqchip/irq-crossbar.c +++ b/drivers/irqchip/irq-crossbar.c @@ -1,13 +1,9 @@ +// SPDX-License-Identifier: GPL-2.0-only /* * drivers/irqchip/irq-crossbar.c * * Copyright (C) 2013 Texas Instruments Incorporated - http://www.ti.com * Author: Sricharan R <r.sricharan@ti.com> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * */ #include <linux/err.h> #include <linux/io.h> @@ -355,10 +351,8 @@ static int __init irqcrossbar_init(struct device_node *node, if (err) return err; - domain = irq_domain_add_hierarchy(parent_domain, 0, - cb->max_crossbar_sources, - node, &crossbar_domain_ops, - NULL); + domain = irq_domain_create_hierarchy(parent_domain, 0, cb->max_crossbar_sources, + of_fwnode_handle(node), &crossbar_domain_ops, NULL); if (!domain) { pr_err("%pOF: failed to allocated domain\n", node); return -ENOMEM; diff --git a/drivers/irqchip/irq-csky-apb-intc.c b/drivers/irqchip/irq-csky-apb-intc.c index 5a2ec43b7ddd..5b7150705d29 100644 --- a/drivers/irqchip/irq-csky-apb-intc.c +++ b/drivers/irqchip/irq-csky-apb-intc.c @@ -50,11 +50,10 @@ static void irq_ck_mask_set_bit(struct irq_data *d) unsigned long ifr = ct->regs.mask - 8; u32 mask = d->mask; - irq_gc_lock(gc); + guard(raw_spinlock)(&gc->lock); *ct->mask_cache |= mask; irq_reg_writel(gc, *ct->mask_cache, ct->regs.mask); irq_reg_writel(gc, irq_reg_readl(gc, ifr) & ~mask, ifr); - irq_gc_unlock(gc); } static void __init ck_set_gc(struct device_node *node, void __iomem *reg_base, @@ -68,7 +67,7 @@ static void __init ck_set_gc(struct device_node *node, void __iomem *reg_base, gc->chip_types[0].chip.irq_mask = irq_gc_mask_clr_bit; gc->chip_types[0].chip.irq_unmask = irq_gc_mask_set_bit; - if (of_find_property(node, "csky,support-pulse-signal", NULL)) + if (of_property_read_bool(node, "csky,support-pulse-signal")) gc->chip_types[0].chip.irq_unmask = irq_ck_mask_set_bit; } @@ -114,7 +113,7 @@ ck_intc_init_comm(struct device_node *node, struct device_node *parent) return -EINVAL; } - root_domain = irq_domain_add_linear(node, nr_irq, + root_domain = irq_domain_create_linear(of_fwnode_handle(node), nr_irq, &irq_generic_chip_ops, NULL); if (!root_domain) { pr_err("C-SKY Intc irq_domain_add failed.\n"); @@ -136,11 +135,11 @@ static inline bool handle_irq_perbit(struct pt_regs *regs, u32 hwirq, u32 irq_base) { if (hwirq == 0) - return 0; + return false; - handle_domain_irq(root_domain, irq_base + __fls(hwirq), regs); + generic_handle_domain_irq(root_domain, irq_base + __fls(hwirq)); - return 1; + return true; } /* gx6605s 64 irqs interrupt controller */ @@ -176,7 +175,7 @@ gx_intc_init(struct device_node *node, struct device_node *parent) writel(0x0, reg_base + GX_INTC_NEN63_32); /* - * Initial mask reg with all unmasked, because we only use enalbe reg + * Initial mask reg with all unmasked, because we only use enable reg */ writel(0x0, reg_base + GX_INTC_NMASK31_00); writel(0x0, reg_base + GX_INTC_NMASK63_32); diff --git a/drivers/irqchip/irq-csky-mpintc.c b/drivers/irqchip/irq-csky-mpintc.c index c67c961ab6cc..1d1f5091f26f 100644 --- a/drivers/irqchip/irq-csky-mpintc.c +++ b/drivers/irqchip/irq-csky-mpintc.c @@ -32,8 +32,8 @@ static void __iomem *INTCL_base; #define INTCG_CIDSTR 0x1000 #define INTCL_PICTLR 0x0 +#define INTCL_CFGR 0x14 #define INTCL_SIGR 0x60 -#define INTCL_HPPIR 0x68 #define INTCL_RDYIR 0x6c #define INTCL_SENR 0xa0 #define INTCL_CENR 0xa4 @@ -41,25 +41,53 @@ static void __iomem *INTCL_base; static DEFINE_PER_CPU(void __iomem *, intcl_reg); +static unsigned long *__trigger; + +#define IRQ_OFFSET(irq) ((irq < COMM_IRQ_BASE) ? irq : (irq - COMM_IRQ_BASE)) + +#define TRIG_BYTE_OFFSET(i) ((((i) * 2) / 32) * 4) +#define TRIG_BIT_OFFSET(i) (((i) * 2) % 32) + +#define TRIG_VAL(trigger, irq) (trigger << TRIG_BIT_OFFSET(IRQ_OFFSET(irq))) +#define TRIG_VAL_MSK(irq) (~(3 << TRIG_BIT_OFFSET(IRQ_OFFSET(irq)))) + +#define TRIG_BASE(irq) \ + (TRIG_BYTE_OFFSET(IRQ_OFFSET(irq)) + ((irq < COMM_IRQ_BASE) ? \ + (this_cpu_read(intcl_reg) + INTCL_CFGR) : (INTCG_base + INTCG_CICFGR))) + +static DEFINE_SPINLOCK(setup_lock); +static void setup_trigger(unsigned long irq, unsigned long trigger) +{ + unsigned int tmp; + + spin_lock(&setup_lock); + + /* setup trigger */ + tmp = readl_relaxed(TRIG_BASE(irq)) & TRIG_VAL_MSK(irq); + + writel_relaxed(tmp | TRIG_VAL(trigger, irq), TRIG_BASE(irq)); + + spin_unlock(&setup_lock); +} + static void csky_mpintc_handler(struct pt_regs *regs) { void __iomem *reg_base = this_cpu_read(intcl_reg); - do { - handle_domain_irq(root_domain, - readl_relaxed(reg_base + INTCL_RDYIR), - regs); - } while (readl_relaxed(reg_base + INTCL_HPPIR) & BIT(31)); + generic_handle_domain_irq(root_domain, + readl_relaxed(reg_base + INTCL_RDYIR)); } -static void csky_mpintc_enable(struct irq_data *d) +static void csky_mpintc_unmask(struct irq_data *d) { void __iomem *reg_base = this_cpu_read(intcl_reg); + setup_trigger(d->hwirq, __trigger[d->hwirq]); + writel_relaxed(d->hwirq, reg_base + INTCL_SENR); } -static void csky_mpintc_disable(struct irq_data *d) +static void csky_mpintc_mask(struct irq_data *d) { void __iomem *reg_base = this_cpu_read(intcl_reg); @@ -73,6 +101,28 @@ static void csky_mpintc_eoi(struct irq_data *d) writel_relaxed(d->hwirq, reg_base + INTCL_CACR); } +static int csky_mpintc_set_type(struct irq_data *d, unsigned int type) +{ + switch (type & IRQ_TYPE_SENSE_MASK) { + case IRQ_TYPE_LEVEL_HIGH: + __trigger[d->hwirq] = 0; + break; + case IRQ_TYPE_LEVEL_LOW: + __trigger[d->hwirq] = 1; + break; + case IRQ_TYPE_EDGE_RISING: + __trigger[d->hwirq] = 2; + break; + case IRQ_TYPE_EDGE_FALLING: + __trigger[d->hwirq] = 3; + break; + default: + return -EINVAL; + } + + return 0; +} + #ifdef CONFIG_SMP static int csky_irq_set_affinity(struct irq_data *d, const struct cpumask *mask_val, @@ -89,8 +139,19 @@ static int csky_irq_set_affinity(struct irq_data *d, if (cpu >= nr_cpu_ids) return -EINVAL; - /* Enable interrupt destination */ - cpu |= BIT(31); + /* + * The csky,mpintc could support auto irq deliver, but it only + * could deliver external irq to one cpu or all cpus. So it + * doesn't support deliver external irq to a group of cpus + * with cpu_mask. + * SO we only use auto deliver mode when affinity mask_val is + * equal to cpu_present_mask. + * + */ + if (cpumask_equal(mask_val, cpu_present_mask)) + cpu = 0; + else + cpu |= BIT(31); writel_relaxed(cpu, INTCG_base + INTCG_CIDSTR + offset); @@ -103,8 +164,9 @@ static int csky_irq_set_affinity(struct irq_data *d, static struct irq_chip csky_irq_chip = { .name = "C-SKY SMP Intc", .irq_eoi = csky_mpintc_eoi, - .irq_enable = csky_mpintc_enable, - .irq_disable = csky_mpintc_disable, + .irq_unmask = csky_mpintc_unmask, + .irq_mask = csky_mpintc_mask, + .irq_set_type = csky_mpintc_set_type, #ifdef CONFIG_SMP .irq_set_affinity = csky_irq_set_affinity, #endif @@ -125,9 +187,26 @@ static int csky_irqdomain_map(struct irq_domain *d, unsigned int irq, return 0; } +static int csky_irq_domain_xlate_cells(struct irq_domain *d, + struct device_node *ctrlr, const u32 *intspec, + unsigned int intsize, unsigned long *out_hwirq, + unsigned int *out_type) +{ + if (WARN_ON(intsize < 1)) + return -EINVAL; + + *out_hwirq = intspec[0]; + if (intsize > 1) + *out_type = intspec[1] & IRQ_TYPE_SENSE_MASK; + else + *out_type = IRQ_TYPE_LEVEL_HIGH; + + return 0; +} + static const struct irq_domain_ops csky_irqdomain_ops = { .map = csky_irqdomain_map, - .xlate = irq_domain_xlate_onecell, + .xlate = csky_irq_domain_xlate_cells, }; #ifdef CONFIG_SMP @@ -161,6 +240,10 @@ csky_mpintc_init(struct device_node *node, struct device_node *parent) if (ret < 0) nr_irq = INTC_IRQS; + __trigger = kcalloc(nr_irq, sizeof(unsigned long), GFP_KERNEL); + if (__trigger == NULL) + return -ENXIO; + if (INTCG_base == NULL) { INTCG_base = ioremap(mfcr("cr<31, 14>"), INTCL_SIZE*nr_cpu_ids + INTCG_SIZE); @@ -172,7 +255,7 @@ csky_mpintc_init(struct device_node *node, struct device_node *parent) writel_relaxed(BIT(0), INTCG_base + INTCG_ICTLR); } - root_domain = irq_domain_add_linear(node, nr_irq, &csky_irqdomain_ops, + root_domain = irq_domain_create_linear(of_fwnode_handle(node), nr_irq, &csky_irqdomain_ops, NULL); if (!root_domain) return -ENXIO; diff --git a/drivers/irqchip/irq-davinci-cp-intc.c b/drivers/irqchip/irq-davinci-cp-intc.c new file mode 100644 index 000000000000..00cdcc90f614 --- /dev/null +++ b/drivers/irqchip/irq-davinci-cp-intc.c @@ -0,0 +1,246 @@ +// SPDX-License-Identifier: GPL-2.0-only +// +// Author: Steve Chen <schen@mvista.com> +// Copyright (C) 2008-2009, MontaVista Software, Inc. <source@mvista.com> +// Author: Bartosz Golaszewski <bgolaszewski@baylibre.com> +// Copyright (C) 2019, Texas Instruments +// +// TI Common Platform Interrupt Controller (cp_intc) driver + +#include <linux/export.h> +#include <linux/init.h> +#include <linux/irq.h> +#include <linux/irqchip.h> +#include <linux/irqdomain.h> +#include <linux/io.h> +#include <linux/of.h> +#include <linux/of_address.h> +#include <linux/of_irq.h> + +#include <asm/exception.h> + +#define DAVINCI_CP_INTC_CTRL 0x04 +#define DAVINCI_CP_INTC_HOST_CTRL 0x0c +#define DAVINCI_CP_INTC_GLOBAL_ENABLE 0x10 +#define DAVINCI_CP_INTC_SYS_STAT_IDX_CLR 0x24 +#define DAVINCI_CP_INTC_SYS_ENABLE_IDX_SET 0x28 +#define DAVINCI_CP_INTC_SYS_ENABLE_IDX_CLR 0x2c +#define DAVINCI_CP_INTC_HOST_ENABLE_IDX_SET 0x34 +#define DAVINCI_CP_INTC_HOST_ENABLE_IDX_CLR 0x38 +#define DAVINCI_CP_INTC_PRIO_IDX 0x80 +#define DAVINCI_CP_INTC_SYS_STAT_CLR(n) (0x0280 + (n << 2)) +#define DAVINCI_CP_INTC_SYS_ENABLE_CLR(n) (0x0380 + (n << 2)) +#define DAVINCI_CP_INTC_CHAN_MAP(n) (0x0400 + (n << 2)) +#define DAVINCI_CP_INTC_SYS_POLARITY(n) (0x0d00 + (n << 2)) +#define DAVINCI_CP_INTC_SYS_TYPE(n) (0x0d80 + (n << 2)) +#define DAVINCI_CP_INTC_HOST_ENABLE(n) (0x1500 + (n << 2)) +#define DAVINCI_CP_INTC_PRI_INDX_MASK GENMASK(9, 0) +#define DAVINCI_CP_INTC_GPIR_NONE BIT(31) + +static void __iomem *davinci_cp_intc_base; +static struct irq_domain *davinci_cp_intc_irq_domain; + +static inline unsigned int davinci_cp_intc_read(unsigned int offset) +{ + return readl_relaxed(davinci_cp_intc_base + offset); +} + +static inline void davinci_cp_intc_write(unsigned long value, + unsigned int offset) +{ + writel_relaxed(value, davinci_cp_intc_base + offset); +} + +static void davinci_cp_intc_ack_irq(struct irq_data *d) +{ + davinci_cp_intc_write(d->hwirq, DAVINCI_CP_INTC_SYS_STAT_IDX_CLR); +} + +static void davinci_cp_intc_mask_irq(struct irq_data *d) +{ + /* XXX don't know why we need to disable nIRQ here... */ + davinci_cp_intc_write(1, DAVINCI_CP_INTC_HOST_ENABLE_IDX_CLR); + davinci_cp_intc_write(d->hwirq, DAVINCI_CP_INTC_SYS_ENABLE_IDX_CLR); + davinci_cp_intc_write(1, DAVINCI_CP_INTC_HOST_ENABLE_IDX_SET); +} + +static void davinci_cp_intc_unmask_irq(struct irq_data *d) +{ + davinci_cp_intc_write(d->hwirq, DAVINCI_CP_INTC_SYS_ENABLE_IDX_SET); +} + +static int davinci_cp_intc_set_irq_type(struct irq_data *d, + unsigned int flow_type) +{ + unsigned int reg, mask, polarity, type; + + reg = BIT_WORD(d->hwirq); + mask = BIT_MASK(d->hwirq); + polarity = davinci_cp_intc_read(DAVINCI_CP_INTC_SYS_POLARITY(reg)); + type = davinci_cp_intc_read(DAVINCI_CP_INTC_SYS_TYPE(reg)); + + switch (flow_type) { + case IRQ_TYPE_EDGE_RISING: + polarity |= mask; + type |= mask; + break; + case IRQ_TYPE_EDGE_FALLING: + polarity &= ~mask; + type |= mask; + break; + case IRQ_TYPE_LEVEL_HIGH: + polarity |= mask; + type &= ~mask; + break; + case IRQ_TYPE_LEVEL_LOW: + polarity &= ~mask; + type &= ~mask; + break; + default: + return -EINVAL; + } + + davinci_cp_intc_write(polarity, DAVINCI_CP_INTC_SYS_POLARITY(reg)); + davinci_cp_intc_write(type, DAVINCI_CP_INTC_SYS_TYPE(reg)); + + return 0; +} + +static struct irq_chip davinci_cp_intc_irq_chip = { + .name = "cp_intc", + .irq_ack = davinci_cp_intc_ack_irq, + .irq_mask = davinci_cp_intc_mask_irq, + .irq_unmask = davinci_cp_intc_unmask_irq, + .irq_set_type = davinci_cp_intc_set_irq_type, + .flags = IRQCHIP_SKIP_SET_WAKE, +}; + +static void __exception_irq_entry davinci_cp_intc_handle_irq(struct pt_regs *regs) +{ + int gpir, irqnr, none; + + /* + * The interrupt number is in first ten bits. The NONE field set to 1 + * indicates a spurious irq. + */ + + gpir = davinci_cp_intc_read(DAVINCI_CP_INTC_PRIO_IDX); + irqnr = gpir & DAVINCI_CP_INTC_PRI_INDX_MASK; + none = gpir & DAVINCI_CP_INTC_GPIR_NONE; + + if (unlikely(none)) { + pr_err_once("%s: spurious irq!\n", __func__); + return; + } + + generic_handle_domain_irq(davinci_cp_intc_irq_domain, irqnr); +} + +static int davinci_cp_intc_host_map(struct irq_domain *h, unsigned int virq, + irq_hw_number_t hw) +{ + pr_debug("cp_intc_host_map(%d, 0x%lx)\n", virq, hw); + + irq_set_chip(virq, &davinci_cp_intc_irq_chip); + irq_set_probe(virq); + irq_set_handler(virq, handle_edge_irq); + + return 0; +} + +static const struct irq_domain_ops davinci_cp_intc_irq_domain_ops = { + .map = davinci_cp_intc_host_map, + .xlate = irq_domain_xlate_onetwocell, +}; + +static int __init davinci_cp_intc_do_init(struct resource *res, unsigned int num_irqs, + struct device_node *node) +{ + unsigned int num_regs = BITS_TO_LONGS(num_irqs); + int offset, irq_base; + void __iomem *req; + + req = request_mem_region(res->start, resource_size(res), "davinci-cp-intc"); + if (!req) { + pr_err("%s: register range busy\n", __func__); + return -EBUSY; + } + + davinci_cp_intc_base = ioremap(res->start, resource_size(res)); + if (!davinci_cp_intc_base) { + pr_err("%s: unable to ioremap register range\n", __func__); + return -EINVAL; + } + + davinci_cp_intc_write(0, DAVINCI_CP_INTC_GLOBAL_ENABLE); + + /* Disable all host interrupts */ + davinci_cp_intc_write(0, DAVINCI_CP_INTC_HOST_ENABLE(0)); + + /* Disable system interrupts */ + for (offset = 0; offset < num_regs; offset++) + davinci_cp_intc_write(~0, DAVINCI_CP_INTC_SYS_ENABLE_CLR(offset)); + + /* Set to normal mode, no nesting, no priority hold */ + davinci_cp_intc_write(0, DAVINCI_CP_INTC_CTRL); + davinci_cp_intc_write(0, DAVINCI_CP_INTC_HOST_CTRL); + + /* Clear system interrupt status */ + for (offset = 0; offset < num_regs; offset++) + davinci_cp_intc_write(~0, DAVINCI_CP_INTC_SYS_STAT_CLR(offset)); + + /* Enable nIRQ (what about nFIQ?) */ + davinci_cp_intc_write(1, DAVINCI_CP_INTC_HOST_ENABLE_IDX_SET); + + /* 4 channels per register */ + num_regs = (num_irqs + 3) >> 2; + /* Default all priorities to channel 7. */ + for (offset = 0; offset < num_regs; offset++) + davinci_cp_intc_write(0x07070707, DAVINCI_CP_INTC_CHAN_MAP(offset)); + + irq_base = irq_alloc_descs(-1, 0, num_irqs, 0); + if (irq_base < 0) { + pr_err("%s: unable to allocate interrupt descriptors: %d\n", __func__, irq_base); + return irq_base; + } + + davinci_cp_intc_irq_domain = irq_domain_create_legacy(of_fwnode_handle(node), num_irqs, + irq_base, 0, + &davinci_cp_intc_irq_domain_ops, + NULL); + + if (!davinci_cp_intc_irq_domain) { + pr_err("%s: unable to create an interrupt domain\n", __func__); + return -EINVAL; + } + + set_handle_irq(davinci_cp_intc_handle_irq); + + /* Enable global interrupt */ + davinci_cp_intc_write(1, DAVINCI_CP_INTC_GLOBAL_ENABLE); + + return 0; +} + +static int __init davinci_cp_intc_of_init(struct device_node *node, + struct device_node *parent) +{ + unsigned int num_irqs; + struct resource res; + int ret; + + ret = of_address_to_resource(node, 0, &res); + if (ret) { + pr_err("%s: unable to get the register range from device-tree\n", __func__); + return ret; + } + + ret = of_property_read_u32(node, "ti,intc-size", &num_irqs); + if (ret) { + pr_err("%s: unable to read the 'ti,intc-size' property\n", __func__); + return ret; + } + + return davinci_cp_intc_do_init(&res, num_irqs, node); +} +IRQCHIP_DECLARE(cp_intc, "ti,cp-intc", davinci_cp_intc_of_init); diff --git a/drivers/irqchip/irq-digicolor.c b/drivers/irqchip/irq-digicolor.c index fc38d2da11b9..eb5a8de82751 100644 --- a/drivers/irqchip/irq-digicolor.c +++ b/drivers/irqchip/irq-digicolor.c @@ -50,7 +50,7 @@ static void __exception_irq_entry digicolor_handle_irq(struct pt_regs *regs) return; } - handle_domain_irq(digicolor_irq_domain, hwirq, regs); + generic_handle_domain_irq(digicolor_irq_domain, hwirq); } while (1); } @@ -95,7 +95,7 @@ static int __init digicolor_of_init(struct device_node *node, regmap_write(ucregs, UC_IRQ_CONTROL, 1); digicolor_irq_domain = - irq_domain_add_linear(node, 64, &irq_generic_chip_ops, NULL); + irq_domain_create_linear(of_fwnode_handle(node), 64, &irq_generic_chip_ops, NULL); if (!digicolor_irq_domain) { pr_err("%pOF: unable to create IRQ domain\n", node); return -ENOMEM; diff --git a/drivers/irqchip/irq-dw-apb-ictl.c b/drivers/irqchip/irq-dw-apb-ictl.c index e4550e9c810b..4240a0dbf627 100644 --- a/drivers/irqchip/irq-dw-apb-ictl.c +++ b/drivers/irqchip/irq-dw-apb-ictl.c @@ -17,6 +17,7 @@ #include <linux/irqchip/chained_irq.h> #include <linux/of_address.h> #include <linux/of_irq.h> +#include <linux/interrupt.h> #define APB_INT_ENABLE_L 0x00 #define APB_INT_ENABLE_H 0x04 @@ -26,7 +27,28 @@ #define APB_INT_FINALSTATUS_H 0x34 #define APB_INT_BASE_OFFSET 0x04 -static void dw_apb_ictl_handler(struct irq_desc *desc) +/* irq domain of the primary interrupt controller. */ +static struct irq_domain *dw_apb_ictl_irq_domain; + +static void __irq_entry dw_apb_ictl_handle_irq(struct pt_regs *regs) +{ + struct irq_domain *d = dw_apb_ictl_irq_domain; + int n; + + for (n = 0; n < d->revmap_size; n += 32) { + struct irq_chip_generic *gc = irq_get_domain_generic_chip(d, n); + u32 stat = readl_relaxed(gc->reg_base + APB_INT_FINALSTATUS_L); + + while (stat) { + u32 hwirq = ffs(stat) - 1; + + generic_handle_domain_irq(d, hwirq); + stat &= ~BIT(hwirq); + } + } +} + +static void dw_apb_ictl_handle_irq_cascaded(struct irq_desc *desc) { struct irq_domain *d = irq_desc_get_handler_data(desc); struct irq_chip *chip = irq_desc_get_chip(desc); @@ -40,26 +62,48 @@ static void dw_apb_ictl_handler(struct irq_desc *desc) while (stat) { u32 hwirq = ffs(stat) - 1; - u32 virq = irq_find_mapping(d, gc->irq_base + hwirq); + generic_handle_domain_irq(d, gc->irq_base + hwirq); - generic_handle_irq(virq); - stat &= ~(1 << hwirq); + stat &= ~BIT(hwirq); } } chained_irq_exit(chip, desc); } +static int dw_apb_ictl_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, + unsigned int nr_irqs, void *arg) +{ + int i, ret; + irq_hw_number_t hwirq; + unsigned int type = IRQ_TYPE_NONE; + struct irq_fwspec *fwspec = arg; + + ret = irq_domain_translate_onecell(domain, fwspec, &hwirq, &type); + if (ret) + return ret; + + for (i = 0; i < nr_irqs; i++) + irq_map_generic_chip(domain, virq + i, hwirq + i); + + return 0; +} + +static const struct irq_domain_ops dw_apb_ictl_irq_domain_ops = { + .translate = irq_domain_translate_onecell, + .alloc = dw_apb_ictl_irq_domain_alloc, + .free = irq_domain_free_irqs_top, +}; + #ifdef CONFIG_PM static void dw_apb_ictl_resume(struct irq_data *d) { struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); struct irq_chip_type *ct = irq_data_get_chip_type(d); - irq_gc_lock(gc); + guard(raw_spinlock)(&gc->lock); writel_relaxed(~0, gc->reg_base + ct->regs.enable); writel_relaxed(*ct->mask_cache, gc->reg_base + ct->regs.mask); - irq_gc_unlock(gc); } #else #define dw_apb_ictl_resume NULL @@ -68,19 +112,27 @@ static void dw_apb_ictl_resume(struct irq_data *d) static int __init dw_apb_ictl_init(struct device_node *np, struct device_node *parent) { + const struct irq_domain_ops *domain_ops; unsigned int clr = IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN; struct resource r; struct irq_domain *domain; struct irq_chip_generic *gc; void __iomem *iobase; - int ret, nrirqs, irq, i; + int ret, nrirqs, parent_irq, i; u32 reg; - /* Map the parent interrupt for the chained handler */ - irq = irq_of_parse_and_map(np, 0); - if (irq <= 0) { - pr_err("%pOF: unable to parse irq\n", np); - return -EINVAL; + if (!parent) { + /* Used as the primary interrupt controller */ + parent_irq = 0; + domain_ops = &dw_apb_ictl_irq_domain_ops; + } else { + /* Map the parent interrupt for the chained handler */ + parent_irq = irq_of_parse_and_map(np, 0); + if (parent_irq <= 0) { + pr_err("%pOF: unable to parse irq\n", np); + return -EINVAL; + } + domain_ops = &irq_generic_chip_ops; } ret = of_address_to_resource(np, 0, &r); @@ -120,8 +172,7 @@ static int __init dw_apb_ictl_init(struct device_node *np, else nrirqs = fls(readl_relaxed(iobase + APB_INT_ENABLE_L)); - domain = irq_domain_add_linear(np, nrirqs, - &irq_generic_chip_ops, NULL); + domain = irq_domain_create_linear(of_fwnode_handle(np), nrirqs, domain_ops, NULL); if (!domain) { pr_err("%pOF: unable to add irq domain\n", np); ret = -ENOMEM; @@ -146,7 +197,13 @@ static int __init dw_apb_ictl_init(struct device_node *np, gc->chip_types[0].chip.irq_resume = dw_apb_ictl_resume; } - irq_set_chained_handler_and_data(irq, dw_apb_ictl_handler, domain); + if (parent_irq) { + irq_set_chained_handler_and_data(parent_irq, + dw_apb_ictl_handle_irq_cascaded, domain); + } else { + dw_apb_ictl_irq_domain = domain; + set_handle_irq(dw_apb_ictl_handle_irq); + } return 0; diff --git a/drivers/irqchip/irq-econet-en751221.c b/drivers/irqchip/irq-econet-en751221.c new file mode 100644 index 000000000000..d83d5eb12795 --- /dev/null +++ b/drivers/irqchip/irq-econet-en751221.c @@ -0,0 +1,310 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * EN751221 Interrupt Controller Driver. + * + * The EcoNet EN751221 Interrupt Controller is a simple interrupt controller + * designed for the MIPS 34Kc MT SMP processor with 2 VPEs. Each interrupt can + * be routed to either VPE but not both, so to support per-CPU interrupts, a + * secondary IRQ number is allocated to control masking/unmasking on VPE#1. In + * this driver, these are called "shadow interrupts". The assignment of shadow + * interrupts is defined by the SoC integrator when wiring the interrupt lines, + * so they are configurable in the device tree. + * + * If an interrupt (say 30) needs per-CPU capability, the SoC integrator + * allocates another IRQ number (say 29) to be its shadow. The device tree + * reflects this by adding the pair <30 29> to the "econet,shadow-interrupts" + * property. + * + * When VPE#1 requests IRQ 30, the driver manipulates the mask bit for IRQ 29, + * telling the hardware to mask VPE#1's view of IRQ 30. + * + * Copyright (C) 2025 Caleb James DeLisle <cjd@cjdns.fr> + */ + +#include <linux/cleanup.h> +#include <linux/io.h> +#include <linux/of.h> +#include <linux/of_address.h> +#include <linux/of_irq.h> +#include <linux/irqdomain.h> +#include <linux/irqchip.h> +#include <linux/irqchip/chained_irq.h> + +#define IRQ_COUNT 40 + +#define NOT_PERCPU 0xff +#define IS_SHADOW 0xfe + +#define REG_MASK0 0x04 +#define REG_MASK1 0x50 +#define REG_PENDING0 0x08 +#define REG_PENDING1 0x54 + +/** + * @membase: Base address of the interrupt controller registers + * @interrupt_shadows: Array of all interrupts, for each value, + * - NOT_PERCPU: This interrupt is not per-cpu, so it has no shadow + * - IS_SHADOW: This interrupt is a shadow of another per-cpu interrupt + * - else: This is a per-cpu interrupt whose shadow is the value + */ +static struct { + void __iomem *membase; + u8 interrupt_shadows[IRQ_COUNT]; +} econet_intc __ro_after_init; + +static DEFINE_RAW_SPINLOCK(irq_lock); + +/* IRQs must be disabled */ +static void econet_wreg(u32 reg, u32 val, u32 mask) +{ + u32 v; + + guard(raw_spinlock)(&irq_lock); + + v = ioread32(econet_intc.membase + reg); + v &= ~mask; + v |= val & mask; + iowrite32(v, econet_intc.membase + reg); +} + +/* IRQs must be disabled */ +static void econet_chmask(u32 hwirq, bool unmask) +{ + u32 reg, mask; + u8 shadow; + + /* + * If the IRQ is a shadow, it should never be manipulated directly. + * It should only be masked/unmasked as a result of the "real" per-cpu + * irq being manipulated by a thread running on VPE#1. + * If it is per-cpu (has a shadow), and we're on VPE#1, the shadow is what we mask. + * This is single processor only, so smp_processor_id() never exceeds 1. + */ + shadow = econet_intc.interrupt_shadows[hwirq]; + if (WARN_ON_ONCE(shadow == IS_SHADOW)) + return; + else if (shadow != NOT_PERCPU && smp_processor_id() == 1) + hwirq = shadow; + + if (hwirq >= 32) { + reg = REG_MASK1; + mask = BIT(hwirq - 32); + } else { + reg = REG_MASK0; + mask = BIT(hwirq); + } + + econet_wreg(reg, unmask ? mask : 0, mask); +} + +/* IRQs must be disabled */ +static void econet_intc_mask(struct irq_data *d) +{ + econet_chmask(d->hwirq, false); +} + +/* IRQs must be disabled */ +static void econet_intc_unmask(struct irq_data *d) +{ + econet_chmask(d->hwirq, true); +} + +static void econet_mask_all(void) +{ + /* IRQs are generally disabled during init, but guarding here makes it non-obligatory. */ + guard(irqsave)(); + econet_wreg(REG_MASK0, 0, ~0); + econet_wreg(REG_MASK1, 0, ~0); +} + +static void econet_intc_handle_pending(struct irq_domain *d, u32 pending, u32 offset) +{ + int hwirq; + + while (pending) { + hwirq = fls(pending) - 1; + generic_handle_domain_irq(d, hwirq + offset); + pending &= ~BIT(hwirq); + } +} + +static void econet_intc_from_parent(struct irq_desc *desc) +{ + struct irq_chip *chip = irq_desc_get_chip(desc); + struct irq_domain *domain; + u32 pending0, pending1; + + chained_irq_enter(chip, desc); + + pending0 = ioread32(econet_intc.membase + REG_PENDING0); + pending1 = ioread32(econet_intc.membase + REG_PENDING1); + + if (unlikely(!(pending0 | pending1))) { + spurious_interrupt(); + } else { + domain = irq_desc_get_handler_data(desc); + econet_intc_handle_pending(domain, pending0, 0); + econet_intc_handle_pending(domain, pending1, 32); + } + + chained_irq_exit(chip, desc); +} + +static const struct irq_chip econet_irq_chip; + +static int econet_intc_map(struct irq_domain *d, u32 irq, irq_hw_number_t hwirq) +{ + int ret; + + if (hwirq >= IRQ_COUNT) { + pr_err("%s: hwirq %lu out of range\n", __func__, hwirq); + return -EINVAL; + } else if (econet_intc.interrupt_shadows[hwirq] == IS_SHADOW) { + pr_err("%s: can't map hwirq %lu, it is a shadow interrupt\n", __func__, hwirq); + return -EINVAL; + } + + if (econet_intc.interrupt_shadows[hwirq] == NOT_PERCPU) { + irq_set_chip_and_handler(irq, &econet_irq_chip, handle_level_irq); + } else { + irq_set_chip_and_handler(irq, &econet_irq_chip, handle_percpu_devid_irq); + ret = irq_set_percpu_devid(irq); + if (ret) + pr_warn("%s: Failed irq_set_percpu_devid for %u: %d\n", d->name, irq, ret); + } + + irq_set_chip_data(irq, NULL); + return 0; +} + +static const struct irq_chip econet_irq_chip = { + .name = "en751221-intc", + .irq_unmask = econet_intc_unmask, + .irq_mask = econet_intc_mask, + .irq_mask_ack = econet_intc_mask, +}; + +static const struct irq_domain_ops econet_domain_ops = { + .xlate = irq_domain_xlate_onecell, + .map = econet_intc_map +}; + +static int __init get_shadow_interrupts(struct device_node *node) +{ + const char *field = "econet,shadow-interrupts"; + int num_shadows; + + num_shadows = of_property_count_u32_elems(node, field); + + memset(econet_intc.interrupt_shadows, NOT_PERCPU, + sizeof(econet_intc.interrupt_shadows)); + + if (num_shadows <= 0) { + return 0; + } else if (num_shadows % 2) { + pr_err("%pOF: %s count is odd, ignoring\n", node, field); + return 0; + } + + u32 *shadows __free(kfree) = kmalloc_array(num_shadows, sizeof(u32), GFP_KERNEL); + if (!shadows) + return -ENOMEM; + + if (of_property_read_u32_array(node, field, shadows, num_shadows)) { + pr_err("%pOF: Failed to read %s\n", node, field); + return -EINVAL; + } + + for (int i = 0; i < num_shadows; i += 2) { + u32 shadow = shadows[i + 1]; + u32 target = shadows[i]; + + if (shadow > IRQ_COUNT) { + pr_err("%pOF: %s[%d] shadow(%d) out of range\n", + node, field, i + 1, shadow); + continue; + } + + if (target >= IRQ_COUNT) { + pr_err("%pOF: %s[%d] target(%d) out of range\n", node, field, i, target); + continue; + } + + if (econet_intc.interrupt_shadows[target] != NOT_PERCPU) { + pr_err("%pOF: %s[%d] target(%d) already has a shadow\n", + node, field, i, target); + continue; + } + + if (econet_intc.interrupt_shadows[shadow] != NOT_PERCPU) { + pr_err("%pOF: %s[%d] shadow(%d) already has a target\n", + node, field, i + 1, shadow); + continue; + } + + econet_intc.interrupt_shadows[target] = shadow; + econet_intc.interrupt_shadows[shadow] = IS_SHADOW; + } + + return 0; +} + +static int __init econet_intc_of_init(struct device_node *node, struct device_node *parent) +{ + struct irq_domain *domain; + struct resource res; + int ret, irq; + + ret = get_shadow_interrupts(node); + if (ret) + return ret; + + irq = irq_of_parse_and_map(node, 0); + if (!irq) { + pr_err("%pOF: DT: Failed to get IRQ from 'interrupts'\n", node); + return -EINVAL; + } + + if (of_address_to_resource(node, 0, &res)) { + pr_err("%pOF: DT: Failed to get 'reg'\n", node); + ret = -EINVAL; + goto err_dispose_mapping; + } + + if (!request_mem_region(res.start, resource_size(&res), res.name)) { + pr_err("%pOF: Failed to request memory\n", node); + ret = -EBUSY; + goto err_dispose_mapping; + } + + econet_intc.membase = ioremap(res.start, resource_size(&res)); + if (!econet_intc.membase) { + pr_err("%pOF: Failed to remap membase\n", node); + ret = -ENOMEM; + goto err_release; + } + + econet_mask_all(); + + domain = irq_domain_create_linear(of_fwnode_handle(node), IRQ_COUNT, + &econet_domain_ops, NULL); + if (!domain) { + pr_err("%pOF: Failed to add irqdomain\n", node); + ret = -ENOMEM; + goto err_unmap; + } + + irq_set_chained_handler_and_data(irq, econet_intc_from_parent, domain); + + return 0; + +err_unmap: + iounmap(econet_intc.membase); +err_release: + release_mem_region(res.start, resource_size(&res)); +err_dispose_mapping: + irq_dispose_mapping(irq); + return ret; +} + +IRQCHIP_DECLARE(econet_en751221_intc, "econet,en751221-intc", econet_intc_of_init); diff --git a/drivers/irqchip/irq-eznps.c b/drivers/irqchip/irq-eznps.c deleted file mode 100644 index 2a7a38830a8d..000000000000 --- a/drivers/irqchip/irq-eznps.c +++ /dev/null @@ -1,165 +0,0 @@ -/* - * Copyright (c) 2016, Mellanox Technologies. All rights reserved. - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -#include <linux/interrupt.h> -#include <linux/module.h> -#include <linux/of.h> -#include <linux/irq.h> -#include <linux/irqdomain.h> -#include <linux/irqchip.h> -#include <soc/nps/common.h> - -#define NPS_NR_CPU_IRQS 8 /* number of interrupt lines of NPS400 CPU */ -#define NPS_TIMER0_IRQ 3 - -/* - * NPS400 core includes an Interrupt Controller (IC) support. - * All cores can deactivate level irqs at first level control - * at cores mesh layer called MTM. - * For devices out side chip e.g. uart, network there is another - * level called Global Interrupt Manager (GIM). - * This second level can control level and edge interrupt. - * - * NOTE: AUX_IENABLE and CTOP_AUX_IACK are auxiliary registers - * with private HW copy per CPU. - */ - -static void nps400_irq_mask(struct irq_data *irqd) -{ - unsigned int ienb; - unsigned int irq = irqd_to_hwirq(irqd); - - ienb = read_aux_reg(AUX_IENABLE); - ienb &= ~(1 << irq); - write_aux_reg(AUX_IENABLE, ienb); -} - -static void nps400_irq_unmask(struct irq_data *irqd) -{ - unsigned int ienb; - unsigned int irq = irqd_to_hwirq(irqd); - - ienb = read_aux_reg(AUX_IENABLE); - ienb |= (1 << irq); - write_aux_reg(AUX_IENABLE, ienb); -} - -static void nps400_irq_eoi_global(struct irq_data *irqd) -{ - unsigned int __maybe_unused irq = irqd_to_hwirq(irqd); - - write_aux_reg(CTOP_AUX_IACK, 1 << irq); - - /* Don't ack GIC before all device access attempts are done */ - mb(); - - nps_ack_gic(); -} - -static void nps400_irq_ack(struct irq_data *irqd) -{ - unsigned int __maybe_unused irq = irqd_to_hwirq(irqd); - - write_aux_reg(CTOP_AUX_IACK, 1 << irq); -} - -static struct irq_chip nps400_irq_chip_fasteoi = { - .name = "NPS400 IC Global", - .irq_mask = nps400_irq_mask, - .irq_unmask = nps400_irq_unmask, - .irq_eoi = nps400_irq_eoi_global, -}; - -static struct irq_chip nps400_irq_chip_percpu = { - .name = "NPS400 IC", - .irq_mask = nps400_irq_mask, - .irq_unmask = nps400_irq_unmask, - .irq_ack = nps400_irq_ack, -}; - -static int nps400_irq_map(struct irq_domain *d, unsigned int virq, - irq_hw_number_t hw) -{ - switch (hw) { - case NPS_TIMER0_IRQ: -#ifdef CONFIG_SMP - case NPS_IPI_IRQ: -#endif - irq_set_percpu_devid(virq); - irq_set_chip_and_handler(virq, &nps400_irq_chip_percpu, - handle_percpu_devid_irq); - break; - default: - irq_set_chip_and_handler(virq, &nps400_irq_chip_fasteoi, - handle_fasteoi_irq); - break; - } - - return 0; -} - -static const struct irq_domain_ops nps400_irq_ops = { - .xlate = irq_domain_xlate_onecell, - .map = nps400_irq_map, -}; - -static int __init nps400_of_init(struct device_node *node, - struct device_node *parent) -{ - struct irq_domain *nps400_root_domain; - - if (parent) { - pr_err("DeviceTree incore ic not a root irq controller\n"); - return -EINVAL; - } - - nps400_root_domain = irq_domain_add_linear(node, NPS_NR_CPU_IRQS, - &nps400_irq_ops, NULL); - - if (!nps400_root_domain) { - pr_err("nps400 root irq domain not avail\n"); - return -ENOMEM; - } - - /* - * Needed for primary domain lookup to succeed - * This is a primary irqchip, and can never have a parent - */ - irq_set_default_host(nps400_root_domain); - -#ifdef CONFIG_SMP - irq_create_mapping(nps400_root_domain, NPS_IPI_IRQ); -#endif - - return 0; -} -IRQCHIP_DECLARE(ezchip_nps400_ic, "ezchip,nps400-ic", nps400_of_init); diff --git a/drivers/irqchip/irq-ftintc010.c b/drivers/irqchip/irq-ftintc010.c index 0bf98425dca5..a59a66d79da6 100644 --- a/drivers/irqchip/irq-ftintc010.c +++ b/drivers/irqchip/irq-ftintc010.c @@ -11,7 +11,6 @@ #include <linux/irq.h> #include <linux/io.h> #include <linux/irqchip.h> -#include <linux/irqchip/versatile-fpga.h> #include <linux/irqdomain.h> #include <linux/module.h> #include <linux/of.h> @@ -126,7 +125,7 @@ static struct irq_chip ft010_irq_chip = { /* Local static for the IRQ entry call */ static struct ft010_irq_data firq; -asmlinkage void __exception_irq_entry ft010_irqchip_handle_irq(struct pt_regs *regs) +static void __exception_irq_entry ft010_irqchip_handle_irq(struct pt_regs *regs) { struct ft010_irq_data *f = &firq; int irq; @@ -134,7 +133,7 @@ asmlinkage void __exception_irq_entry ft010_irqchip_handle_irq(struct pt_regs *r while ((status = readl(FT010_IRQ_STATUS(f->base)))) { irq = ffs(status) - 1; - handle_domain_irq(f->domain, irq, regs); + generic_handle_domain_irq(f->domain, irq); } } @@ -163,7 +162,7 @@ static const struct irq_domain_ops ft010_irqdomain_ops = { .xlate = irq_domain_xlate_onetwocell, }; -int __init ft010_of_init_irq(struct device_node *node, +static int __init ft010_of_init_irq(struct device_node *node, struct device_node *parent) { struct ft010_irq_data *f = &firq; @@ -181,8 +180,9 @@ int __init ft010_of_init_irq(struct device_node *node, writel(0, FT010_IRQ_MASK(f->base)); writel(0, FT010_FIQ_MASK(f->base)); - f->domain = irq_domain_add_simple(node, FT010_NUM_IRQS, 0, - &ft010_irqdomain_ops, f); + f->domain = irq_domain_create_simple(of_fwnode_handle(node), + FT010_NUM_IRQS, 0, + &ft010_irqdomain_ops, f); set_handle_irq(ft010_irqchip_handle_irq); return 0; diff --git a/drivers/irqchip/irq-gic-common.c b/drivers/irqchip/irq-gic-common.c index 3c93c6f4d1f1..c776f9142610 100644 --- a/drivers/irqchip/irq-gic-common.c +++ b/drivers/irqchip/irq-gic-common.c @@ -1,46 +1,29 @@ +// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2002 ARM Limited, All Rights Reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see <http://www.gnu.org/licenses/>. */ #include <linux/interrupt.h> #include <linux/io.h> #include <linux/irq.h> #include <linux/irqchip/arm-gic.h> +#include <linux/kernel.h> #include "irq-gic-common.h" static DEFINE_RAW_SPINLOCK(irq_controller_lock); -static const struct gic_kvm_info *gic_kvm_info; - -const struct gic_kvm_info *gic_get_kvm_info(void) -{ - return gic_kvm_info; -} - -void gic_set_kvm_info(const struct gic_kvm_info *info) -{ - BUG_ON(gic_kvm_info != NULL); - gic_kvm_info = info; -} - void gic_enable_of_quirks(const struct device_node *np, const struct gic_quirk *quirks, void *data) { for (; quirks->desc; quirks++) { - if (!of_device_is_compatible(np, quirks->compatible)) + if (!quirks->compatible && !quirks->property) + continue; + if (quirks->compatible && + !of_device_is_compatible(np, quirks->compatible)) + continue; + if (quirks->property && + !of_property_read_bool(np, quirks->property)) continue; if (quirks->init(data)) pr_info("GIC: enabling workaround for %s\n", @@ -52,6 +35,8 @@ void gic_enable_quirks(u32 iidr, const struct gic_quirk *quirks, void *data) { for (; quirks->desc; quirks++) { + if (quirks->compatible || quirks->property) + continue; if (quirks->iidr != (quirks->mask & iidr)) continue; if (quirks->init(data)) @@ -61,7 +46,7 @@ void gic_enable_quirks(u32 iidr, const struct gic_quirk *quirks, } int gic_configure_irq(unsigned int irq, unsigned int type, - void __iomem *base, void (*sync_access)(void)) + void __iomem *base) { u32 confmask = 0x2 << ((irq % 16) * 2); u32 confoff = (irq / 16) * 4; @@ -74,7 +59,7 @@ int gic_configure_irq(unsigned int irq, unsigned int type, * for "irq", depending on "type". */ raw_spin_lock_irqsave(&irq_controller_lock, flags); - val = oldval = readl_relaxed(base + GIC_DIST_CONFIG + confoff); + val = oldval = readl_relaxed(base + confoff); if (type & IRQ_TYPE_LEVEL_MASK) val &= ~confmask; else if (type & IRQ_TYPE_EDGE_BOTH) @@ -94,24 +79,16 @@ int gic_configure_irq(unsigned int irq, unsigned int type, * does not allow us to set the configuration or we are in a * non-secure mode, and hence it may not be catastrophic. */ - writel_relaxed(val, base + GIC_DIST_CONFIG + confoff); - if (readl_relaxed(base + GIC_DIST_CONFIG + confoff) != val) { - if (WARN_ON(irq >= 32)) - ret = -EINVAL; - else - pr_warn("GIC: PPI%d is secure or misconfigured\n", - irq - 16); - } - raw_spin_unlock_irqrestore(&irq_controller_lock, flags); + writel_relaxed(val, base + confoff); + if (readl_relaxed(base + confoff) != val) + ret = -EINVAL; - if (sync_access) - sync_access(); + raw_spin_unlock_irqrestore(&irq_controller_lock, flags); return ret; } -void gic_dist_config(void __iomem *base, int gic_irqs, - void (*sync_access)(void)) +void gic_dist_config(void __iomem *base, int gic_irqs, u8 priority) { unsigned int i; @@ -126,7 +103,8 @@ void gic_dist_config(void __iomem *base, int gic_irqs, * Set priority on all global interrupts. */ for (i = 32; i < gic_irqs; i += 4) - writel_relaxed(GICD_INT_DEF_PRI_X4, base + GIC_DIST_PRI + i); + writel_relaxed(REPEAT_BYTE_U32(priority), + base + GIC_DIST_PRI + i); /* * Deactivate and disable all SPIs. Leave the PPI and SGIs @@ -138,31 +116,27 @@ void gic_dist_config(void __iomem *base, int gic_irqs, writel_relaxed(GICD_INT_EN_CLR_X32, base + GIC_DIST_ENABLE_CLEAR + i / 8); } - - if (sync_access) - sync_access(); } -void gic_cpu_config(void __iomem *base, void (*sync_access)(void)) +void gic_cpu_config(void __iomem *base, int nr, u8 priority) { int i; /* * Deal with the banked PPI and SGI interrupts - disable all - * PPI interrupts, ensure all SGI interrupts are enabled. - * Make sure everything is deactivated. + * private interrupts. Make sure everything is deactivated. */ - writel_relaxed(GICD_INT_EN_CLR_X32, base + GIC_DIST_ACTIVE_CLEAR); - writel_relaxed(GICD_INT_EN_CLR_PPI, base + GIC_DIST_ENABLE_CLEAR); - writel_relaxed(GICD_INT_EN_SET_SGI, base + GIC_DIST_ENABLE_SET); + for (i = 0; i < nr; i += 32) { + writel_relaxed(GICD_INT_EN_CLR_X32, + base + GIC_DIST_ACTIVE_CLEAR + i / 8); + writel_relaxed(GICD_INT_EN_CLR_X32, + base + GIC_DIST_ENABLE_CLEAR + i / 8); + } /* * Set priority on PPI and SGI interrupts */ - for (i = 0; i < 32; i += 4) - writel_relaxed(GICD_INT_DEF_PRI_X4, + for (i = 0; i < nr; i += 4) + writel_relaxed(REPEAT_BYTE_U32(priority), base + GIC_DIST_PRI + i * 4 / 4); - - if (sync_access) - sync_access(); } diff --git a/drivers/irqchip/irq-gic-common.h b/drivers/irqchip/irq-gic-common.h index 97e58fb6c232..710cab61d919 100644 --- a/drivers/irqchip/irq-gic-common.h +++ b/drivers/irqchip/irq-gic-common.h @@ -1,17 +1,6 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2002 ARM Limited, All Rights Reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see <http://www.gnu.org/licenses/>. */ #ifndef _IRQ_GIC_COMMON_H @@ -19,26 +8,29 @@ #include <linux/of.h> #include <linux/irqdomain.h> +#include <linux/msi.h> #include <linux/irqchip/arm-gic-common.h> struct gic_quirk { const char *desc; const char *compatible; + const char *property; bool (*init)(void *data); u32 iidr; u32 mask; }; int gic_configure_irq(unsigned int irq, unsigned int type, - void __iomem *base, void (*sync_access)(void)); -void gic_dist_config(void __iomem *base, int gic_irqs, - void (*sync_access)(void)); -void gic_cpu_config(void __iomem *base, void (*sync_access)(void)); + void __iomem *base); +void gic_dist_config(void __iomem *base, int gic_irqs, u8 priority); +void gic_cpu_config(void __iomem *base, int nr, u8 priority); void gic_enable_quirks(u32 iidr, const struct gic_quirk *quirks, void *data); void gic_enable_of_quirks(const struct device_node *np, const struct gic_quirk *quirks, void *data); -void gic_set_kvm_info(const struct gic_kvm_info *info); +#define RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING (1 << 0) +#define RDIST_FLAGS_RD_TABLES_PREALLOCATED (1 << 1) +#define RDIST_FLAGS_FORCE_NON_SHAREABLE (1 << 2) #endif /* _IRQ_GIC_COMMON_H */ diff --git a/drivers/irqchip/irq-gic-its-msi-parent.c b/drivers/irqchip/irq-gic-its-msi-parent.c new file mode 100644 index 000000000000..12f45228c867 --- /dev/null +++ b/drivers/irqchip/irq-gic-its-msi-parent.c @@ -0,0 +1,331 @@ +// SPDX-License-Identifier: GPL-2.0-only +// Copyright (C) 2013-2015 ARM Limited, All Rights Reserved. +// Author: Marc Zyngier <marc.zyngier@arm.com> +// Copyright (C) 2022 Linutronix GmbH +// Copyright (C) 2022 Intel + +#include <linux/acpi_iort.h> +#include <linux/of_address.h> +#include <linux/pci.h> + +#include "irq-gic-its-msi-parent.h" +#include <linux/irqchip/irq-msi-lib.h> + +#define ITS_MSI_FLAGS_REQUIRED (MSI_FLAG_USE_DEF_DOM_OPS | \ + MSI_FLAG_USE_DEF_CHIP_OPS | \ + MSI_FLAG_PCI_MSI_MASK_PARENT) + +#define ITS_MSI_FLAGS_SUPPORTED (MSI_GENERIC_FLAGS_MASK | \ + MSI_FLAG_PCI_MSIX | \ + MSI_FLAG_MULTI_PCI_MSI) + +static int its_translate_frame_address(struct device_node *msi_node, phys_addr_t *pa) +{ + struct resource res; + int ret; + + ret = of_property_match_string(msi_node, "reg-names", "ns-translate"); + if (ret < 0) + return ret; + + ret = of_address_to_resource(msi_node, ret, &res); + if (ret) + return ret; + + *pa = res.start; + return 0; +} + +#ifdef CONFIG_PCI_MSI +static int its_pci_msi_vec_count(struct pci_dev *pdev, void *data) +{ + int msi, msix, *count = data; + + msi = max(pci_msi_vec_count(pdev), 0); + msix = max(pci_msix_vec_count(pdev), 0); + *count += max(msi, msix); + + return 0; +} + +static int its_get_pci_alias(struct pci_dev *pdev, u16 alias, void *data) +{ + struct pci_dev **alias_dev = data; + + *alias_dev = pdev; + + return 0; +} + +static int its_pci_msi_prepare(struct irq_domain *domain, struct device *dev, + int nvec, msi_alloc_info_t *info) +{ + struct pci_dev *pdev, *alias_dev; + struct msi_domain_info *msi_info; + int alias_count = 0, minnvec = 1; + + if (!dev_is_pci(dev)) + return -EINVAL; + + pdev = to_pci_dev(dev); + /* + * If pdev is downstream of any aliasing bridges, take an upper + * bound of how many other vectors could map to the same DevID. + * Also tell the ITS that the signalling will come from a proxy + * device, and that special allocation rules apply. + */ + pci_for_each_dma_alias(pdev, its_get_pci_alias, &alias_dev); + if (alias_dev != pdev) { + if (alias_dev->subordinate) + pci_walk_bus(alias_dev->subordinate, + its_pci_msi_vec_count, &alias_count); + info->flags |= MSI_ALLOC_FLAGS_PROXY_DEVICE; + } + + /* ITS specific DeviceID, as the core ITS ignores dev. */ + info->scratchpad[0].ul = pci_msi_domain_get_msi_rid(domain->parent, pdev); + + /* + * Always allocate a power of 2, and special case device 0 for + * broken systems where the DevID is not wired (and all devices + * appear as DevID 0). For that reason, we generously allocate a + * minimum of 32 MSIs for DevID 0. If you want more because all + * your devices are aliasing to DevID 0, consider fixing your HW. + */ + nvec = max(nvec, alias_count); + if (!info->scratchpad[0].ul) + minnvec = 32; + nvec = max_t(int, minnvec, roundup_pow_of_two(nvec)); + + msi_info = msi_get_domain_info(domain->parent); + return msi_info->ops->msi_prepare(domain->parent, dev, nvec, info); +} + +static int its_v5_pci_msi_prepare(struct irq_domain *domain, struct device *dev, + int nvec, msi_alloc_info_t *info) +{ + struct device_node *msi_node = NULL; + struct msi_domain_info *msi_info; + struct pci_dev *pdev; + phys_addr_t pa; + u32 rid; + int ret; + + if (!dev_is_pci(dev)) + return -EINVAL; + + pdev = to_pci_dev(dev); + + rid = pci_msi_map_rid_ctlr_node(pdev, &msi_node); + if (!msi_node) + return -ENODEV; + + ret = its_translate_frame_address(msi_node, &pa); + if (ret) + return -ENODEV; + + of_node_put(msi_node); + + /* ITS specific DeviceID */ + info->scratchpad[0].ul = rid; + /* ITS translate frame physical address */ + info->scratchpad[1].ul = pa; + + /* Always allocate power of two vectors */ + nvec = roundup_pow_of_two(nvec); + + msi_info = msi_get_domain_info(domain->parent); + return msi_info->ops->msi_prepare(domain->parent, dev, nvec, info); +} +#else /* CONFIG_PCI_MSI */ +#define its_pci_msi_prepare NULL +#define its_v5_pci_msi_prepare NULL +#endif /* !CONFIG_PCI_MSI */ + +static int of_pmsi_get_msi_info(struct irq_domain *domain, struct device *dev, u32 *dev_id, + phys_addr_t *pa) +{ + struct of_phandle_iterator it; + int ret; + + /* Suck the DeviceID out of the msi-parent property */ + of_for_each_phandle(&it, ret, dev->of_node, "msi-parent", "#msi-cells", -1) { + /* GICv5 ITS domain matches the MSI controller node parent */ + struct device_node *np __free(device_node) = pa ? of_get_parent(it.node) + : of_node_get(it.node); + + if (np == irq_domain_get_of_node(domain)) { + u32 args; + + if (WARN_ON(of_phandle_iterator_args(&it, &args, 1) != 1)) + ret = -EINVAL; + + if (!ret && pa) + ret = its_translate_frame_address(it.node, pa); + + if (!ret) + *dev_id = args; + + of_node_put(it.node); + return ret; + } + } + + struct device_node *msi_ctrl __free(device_node) = NULL; + + return of_map_id(dev->of_node, dev->id, "msi-map", "msi-map-mask", &msi_ctrl, dev_id); +} + +int __weak iort_pmsi_get_dev_id(struct device *dev, u32 *dev_id) +{ + return -1; +} + +static int its_pmsi_prepare(struct irq_domain *domain, struct device *dev, + int nvec, msi_alloc_info_t *info) +{ + struct msi_domain_info *msi_info; + u32 dev_id; + int ret; + + if (dev->of_node) + ret = of_pmsi_get_msi_info(domain->parent, dev, &dev_id, NULL); + else + ret = iort_pmsi_get_dev_id(dev, &dev_id); + if (ret) + return ret; + + /* ITS specific DeviceID, as the core ITS ignores dev. */ + info->scratchpad[0].ul = dev_id; + + /* Allocate at least 32 MSIs, and always as a power of 2 */ + nvec = max_t(int, 32, roundup_pow_of_two(nvec)); + + msi_info = msi_get_domain_info(domain->parent); + return msi_info->ops->msi_prepare(domain->parent, + dev, nvec, info); +} + +static int its_v5_pmsi_prepare(struct irq_domain *domain, struct device *dev, + int nvec, msi_alloc_info_t *info) +{ + struct msi_domain_info *msi_info; + phys_addr_t pa; + u32 dev_id; + int ret; + + if (!dev->of_node) + return -ENODEV; + + ret = of_pmsi_get_msi_info(domain->parent, dev, &dev_id, &pa); + if (ret) + return ret; + + /* ITS specific DeviceID */ + info->scratchpad[0].ul = dev_id; + /* ITS translate frame physical address */ + info->scratchpad[1].ul = pa; + + /* Allocate always as a power of 2 */ + nvec = roundup_pow_of_two(nvec); + + msi_info = msi_get_domain_info(domain->parent); + return msi_info->ops->msi_prepare(domain->parent, dev, nvec, info); +} + +static void its_msi_teardown(struct irq_domain *domain, msi_alloc_info_t *info) +{ + struct msi_domain_info *msi_info; + + msi_info = msi_get_domain_info(domain->parent); + msi_info->ops->msi_teardown(domain->parent, info); +} + +static bool its_init_dev_msi_info(struct device *dev, struct irq_domain *domain, + struct irq_domain *real_parent, struct msi_domain_info *info) +{ + if (!msi_lib_init_dev_msi_info(dev, domain, real_parent, info)) + return false; + + switch(info->bus_token) { + case DOMAIN_BUS_PCI_DEVICE_MSI: + case DOMAIN_BUS_PCI_DEVICE_MSIX: + /* + * FIXME: This probably should be done after a (not yet + * existing) post domain creation callback once to make + * support for dynamic post-enable MSI-X allocations + * work without having to reevaluate the domain size + * over and over. It is known already at allocation + * time via info->hwsize. + * + * That should work perfectly fine for MSI/MSI-X but needs + * some thoughts for purely software managed MSI domains + * where the index space is only limited artificially via + * %MSI_MAX_INDEX. + */ + info->ops->msi_prepare = its_pci_msi_prepare; + info->ops->msi_teardown = its_msi_teardown; + break; + case DOMAIN_BUS_DEVICE_MSI: + case DOMAIN_BUS_WIRED_TO_MSI: + /* + * FIXME: See the above PCI prepare comment. The domain + * size is also known at domain creation time. + */ + info->ops->msi_prepare = its_pmsi_prepare; + info->ops->msi_teardown = its_msi_teardown; + break; + default: + /* Confused. How did the lib return true? */ + WARN_ON_ONCE(1); + return false; + } + + return true; +} + +static bool its_v5_init_dev_msi_info(struct device *dev, struct irq_domain *domain, + struct irq_domain *real_parent, struct msi_domain_info *info) +{ + if (!msi_lib_init_dev_msi_info(dev, domain, real_parent, info)) + return false; + + switch (info->bus_token) { + case DOMAIN_BUS_PCI_DEVICE_MSI: + case DOMAIN_BUS_PCI_DEVICE_MSIX: + info->ops->msi_prepare = its_v5_pci_msi_prepare; + info->ops->msi_teardown = its_msi_teardown; + break; + case DOMAIN_BUS_DEVICE_MSI: + case DOMAIN_BUS_WIRED_TO_MSI: + info->ops->msi_prepare = its_v5_pmsi_prepare; + info->ops->msi_teardown = its_msi_teardown; + break; + default: + /* Confused. How did the lib return true? */ + WARN_ON_ONCE(1); + return false; + } + + return true; +} + +const struct msi_parent_ops gic_v3_its_msi_parent_ops = { + .supported_flags = ITS_MSI_FLAGS_SUPPORTED, + .required_flags = ITS_MSI_FLAGS_REQUIRED, + .chip_flags = MSI_CHIP_FLAG_SET_EOI, + .bus_select_token = DOMAIN_BUS_NEXUS, + .bus_select_mask = MATCH_PCI_MSI | MATCH_PLATFORM_MSI, + .prefix = "ITS-", + .init_dev_msi_info = its_init_dev_msi_info, +}; + +const struct msi_parent_ops gic_v5_its_msi_parent_ops = { + .supported_flags = ITS_MSI_FLAGS_SUPPORTED, + .required_flags = ITS_MSI_FLAGS_REQUIRED, + .chip_flags = MSI_CHIP_FLAG_SET_EOI, + .bus_select_token = DOMAIN_BUS_NEXUS, + .bus_select_mask = MATCH_PCI_MSI | MATCH_PLATFORM_MSI, + .prefix = "ITS-v5-", + .init_dev_msi_info = its_v5_init_dev_msi_info, +}; diff --git a/drivers/irqchip/irq-gic-its-msi-parent.h b/drivers/irqchip/irq-gic-its-msi-parent.h new file mode 100644 index 000000000000..df016f347337 --- /dev/null +++ b/drivers/irqchip/irq-gic-its-msi-parent.h @@ -0,0 +1,12 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2024 ARM Limited, All Rights Reserved. + */ + +#ifndef _IRQ_GIC_ITS_MSI_PARENT_H +#define _IRQ_GIC_ITS_MSI_PARENT_H + +extern const struct msi_parent_ops gic_v3_its_msi_parent_ops; +extern const struct msi_parent_ops gic_v5_its_msi_parent_ops; + +#endif /* _IRQ_GIC_ITS_MSI_PARENT_H */ diff --git a/drivers/irqchip/irq-gic-pm.c b/drivers/irqchip/irq-gic-pm.c index ecafd295c31c..a275a8071a25 100644 --- a/drivers/irqchip/irq-gic-pm.c +++ b/drivers/irqchip/irq-gic-pm.c @@ -1,25 +1,13 @@ +// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2016 NVIDIA CORPORATION, All Rights Reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see <http://www.gnu.org/licenses/>. */ #include <linux/module.h> #include <linux/clk.h> -#include <linux/of_device.h> +#include <linux/of.h> #include <linux/of_irq.h> #include <linux/irqchip/arm-gic.h> #include <linux/platform_device.h> -#include <linux/pm_clock.h> #include <linux/pm_runtime.h> #include <linux/slab.h> @@ -28,17 +16,25 @@ struct gic_clk_data { const char *const *clocks; }; +struct gic_chip_pm { + struct gic_chip_data *chip_data; + const struct gic_clk_data *clk_data; + struct clk_bulk_data *clks; +}; + static int gic_runtime_resume(struct device *dev) { - struct gic_chip_data *gic = dev_get_drvdata(dev); + struct gic_chip_pm *chip_pm = dev_get_drvdata(dev); + struct gic_chip_data *gic = chip_pm->chip_data; + const struct gic_clk_data *data = chip_pm->clk_data; int ret; - ret = pm_clk_resume(dev); + ret = clk_bulk_prepare_enable(data->num_clocks, chip_pm->clks); if (ret) return ret; /* - * On the very first resume, the pointer to the driver data + * On the very first resume, the pointer to chip_pm->chip_data * will be NULL and this is intentional, because we do not * want to restore the GIC on the very first resume. So if * the pointer is not valid just return. @@ -54,35 +50,14 @@ static int gic_runtime_resume(struct device *dev) static int gic_runtime_suspend(struct device *dev) { - struct gic_chip_data *gic = dev_get_drvdata(dev); + struct gic_chip_pm *chip_pm = dev_get_drvdata(dev); + struct gic_chip_data *gic = chip_pm->chip_data; + const struct gic_clk_data *data = chip_pm->clk_data; gic_dist_save(gic); gic_cpu_save(gic); - return pm_clk_suspend(dev); -} - -static int gic_get_clocks(struct device *dev, const struct gic_clk_data *data) -{ - unsigned int i; - int ret; - - if (!dev || !data) - return -EINVAL; - - ret = pm_clk_create(dev); - if (ret) - return ret; - - for (i = 0; i < data->num_clocks; i++) { - ret = of_pm_clk_add_clk(dev, data->clocks[i]); - if (ret) { - dev_err(dev, "failed to add clock %s\n", - data->clocks[i]); - pm_clk_destroy(dev); - return ret; - } - } + clk_bulk_disable_unprepare(data->num_clocks, chip_pm->clks); return 0; } @@ -91,8 +66,8 @@ static int gic_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; const struct gic_clk_data *data; - struct gic_chip_data *gic; - int ret, irq; + struct gic_chip_pm *chip_pm; + int ret, irq, i; data = of_device_get_match_data(&pdev->dev); if (!data) { @@ -100,28 +75,41 @@ static int gic_probe(struct platform_device *pdev) return -ENODEV; } + chip_pm = devm_kzalloc(dev, sizeof(*chip_pm), GFP_KERNEL); + if (!chip_pm) + return -ENOMEM; + irq = irq_of_parse_and_map(dev->of_node, 0); if (!irq) { dev_err(dev, "no parent interrupt found!\n"); return -EINVAL; } - ret = gic_get_clocks(dev, data); + chip_pm->clks = devm_kcalloc(dev, data->num_clocks, + sizeof(*chip_pm->clks), GFP_KERNEL); + if (!chip_pm->clks) + return -ENOMEM; + + for (i = 0; i < data->num_clocks; i++) + chip_pm->clks[i].id = data->clocks[i]; + + ret = devm_clk_bulk_get(dev, data->num_clocks, chip_pm->clks); if (ret) goto irq_dispose; + chip_pm->clk_data = data; + dev_set_drvdata(dev, chip_pm); + pm_runtime_enable(dev); - ret = pm_runtime_get_sync(dev); + ret = pm_runtime_resume_and_get(dev); if (ret < 0) goto rpm_disable; - ret = gic_of_init_child(dev, &gic, irq); + ret = gic_of_init_child(dev, &chip_pm->chip_data, irq); if (ret) goto rpm_put; - platform_set_drvdata(pdev, gic); - pm_runtime_put(dev); dev_info(dev, "GIC IRQ controller registered\n"); @@ -132,7 +120,6 @@ rpm_put: pm_runtime_put_sync(dev); rpm_disable: pm_runtime_disable(dev); - pm_clk_destroy(dev); irq_dispose: irq_dispose_mapping(irq); @@ -142,6 +129,8 @@ irq_dispose: static const struct dev_pm_ops gic_pm_ops = { SET_RUNTIME_PM_OPS(gic_runtime_suspend, gic_runtime_resume, NULL) + SET_LATE_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, + pm_runtime_force_resume) }; static const char * const gic400_clocks[] = { diff --git a/drivers/irqchip/irq-gic-realview.c b/drivers/irqchip/irq-gic-realview.c index b4c1924f0255..38fab02ffe9d 100644 --- a/drivers/irqchip/irq-gic-realview.c +++ b/drivers/irqchip/irq-gic-realview.c @@ -57,6 +57,7 @@ realview_gic_of_init(struct device_node *node, struct device_node *parent) /* The PB11MPCore GIC needs to be configured in the syscon */ map = syscon_node_to_regmap(np); + of_node_put(np); if (!IS_ERR(map)) { /* new irq mode with no DCC */ regmap_write(map, REALVIEW_SYS_LOCK_OFFSET, diff --git a/drivers/irqchip/irq-gic-v2m.c b/drivers/irqchip/irq-gic-v2m.c index f5fe0100f9ff..8a3410c2b7b5 100644 --- a/drivers/irqchip/irq-gic-v2m.c +++ b/drivers/irqchip/irq-gic-v2m.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0-only /* * ARM GIC v2m MSI(-X) support * Support for Message Signaled Interrupts for systems that @@ -7,25 +8,25 @@ * Authors: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com> * Harish Kasiviswanathan <harish.kasiviswanathan@amd.com> * Brandon Anderson <brandon.anderson@amd.com> - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 as published - * by the Free Software Foundation. */ #define pr_fmt(fmt) "GICv2m: " fmt #include <linux/acpi.h> -#include <linux/dma-iommu.h> +#include <linux/iommu.h> #include <linux/irq.h> #include <linux/irqdomain.h> #include <linux/kernel.h> +#include <linux/pci.h> #include <linux/msi.h> #include <linux/of_address.h> #include <linux/of_pci.h> #include <linux/slab.h> #include <linux/spinlock.h> #include <linux/irqchip/arm-gic.h> +#include <linux/irqchip/arm-gic-common.h> + +#include <linux/irqchip/irq-msi-lib.h> /* * MSI_TYPER: @@ -56,6 +57,7 @@ /* List of flags for specific v2m implementation */ #define GICV2M_NEEDS_SPI_OFFSET 0x00000001 +#define GICV2M_GRAVITON_ADDRESS_ONLY 0x00000002 static LIST_HEAD(v2m_nodes); static DEFINE_SPINLOCK(v2m_lock); @@ -72,45 +74,27 @@ struct v2m_data { u32 flags; /* v2m flags for specific implementation */ }; -static void gicv2m_mask_msi_irq(struct irq_data *d) +static phys_addr_t gicv2m_get_msi_addr(struct v2m_data *v2m, int hwirq) { - pci_msi_mask_irq(d); - irq_chip_mask_parent(d); + if (v2m->flags & GICV2M_GRAVITON_ADDRESS_ONLY) + return v2m->res.start | ((hwirq - 32) << 3); + else + return v2m->res.start + V2M_MSI_SETSPI_NS; } -static void gicv2m_unmask_msi_irq(struct irq_data *d) -{ - pci_msi_unmask_irq(d); - irq_chip_unmask_parent(d); -} - -static struct irq_chip gicv2m_msi_irq_chip = { - .name = "MSI", - .irq_mask = gicv2m_mask_msi_irq, - .irq_unmask = gicv2m_unmask_msi_irq, - .irq_eoi = irq_chip_eoi_parent, - .irq_write_msi_msg = pci_msi_domain_write_msg, -}; - -static struct msi_domain_info gicv2m_msi_domain_info = { - .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | - MSI_FLAG_PCI_MSIX | MSI_FLAG_MULTI_PCI_MSI), - .chip = &gicv2m_msi_irq_chip, -}; - static void gicv2m_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) { struct v2m_data *v2m = irq_data_get_irq_chip_data(data); - phys_addr_t addr = v2m->res.start + V2M_MSI_SETSPI_NS; - - msg->address_hi = upper_32_bits(addr); - msg->address_lo = lower_32_bits(addr); - msg->data = data->hwirq; + phys_addr_t addr = gicv2m_get_msi_addr(v2m, data->hwirq); + if (v2m->flags & GICV2M_GRAVITON_ADDRESS_ONLY) + msg->data = 0; + else + msg->data = data->hwirq; if (v2m->flags & GICV2M_NEEDS_SPI_OFFSET) msg->data -= v2m->spi_offset; - iommu_dma_map_msi_msg(data->irq, msg); + msi_msg_set_addr(irq_data_get_msi_desc(data), msg, addr); } static struct irq_chip gicv2m_irq_chip = { @@ -167,15 +151,21 @@ static void gicv2m_unalloc_msi(struct v2m_data *v2m, unsigned int hwirq, static int gicv2m_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, unsigned int nr_irqs, void *args) { + msi_alloc_info_t *info = args; struct v2m_data *v2m = NULL, *tmp; - int hwirq, offset, i, err = 0; + int hwirq, i, err = 0; + unsigned long offset; + unsigned long align_mask = nr_irqs - 1; spin_lock(&v2m_lock); list_for_each_entry(tmp, &v2m_nodes, entry) { - offset = bitmap_find_free_region(tmp->bm, tmp->nr_spis, - get_count_order(nr_irqs)); - if (offset >= 0) { + unsigned long align_off = tmp->spi_start - (tmp->spi_start & ~align_mask); + + offset = bitmap_find_next_zero_area_off(tmp->bm, tmp->nr_spis, 0, + nr_irqs, align_mask, align_off); + if (offset < tmp->nr_spis) { v2m = tmp; + bitmap_set(v2m->bm, offset, nr_irqs); break; } } @@ -186,6 +176,11 @@ static int gicv2m_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, hwirq = v2m->spi_start + offset; + err = iommu_dma_prepare_msi(info->desc, + gicv2m_get_msi_addr(v2m, hwirq)); + if (err) + return err; + for (i = 0; i < nr_irqs; i++) { err = gicv2m_irq_gic_domain_alloc(domain, virq + i, hwirq + i); if (err) @@ -214,6 +209,7 @@ static void gicv2m_irq_domain_free(struct irq_domain *domain, } static const struct irq_domain_ops gicv2m_domain_ops = { + .select = msi_lib_irq_domain_select, .alloc = gicv2m_irq_domain_alloc, .free = gicv2m_irq_domain_free, }; @@ -234,26 +230,13 @@ static bool is_msi_spi_valid(u32 base, u32 num) return true; } -static struct irq_chip gicv2m_pmsi_irq_chip = { - .name = "pMSI", -}; - -static struct msi_domain_ops gicv2m_pmsi_ops = { -}; - -static struct msi_domain_info gicv2m_pmsi_domain_info = { - .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS), - .ops = &gicv2m_pmsi_ops, - .chip = &gicv2m_pmsi_irq_chip, -}; - -static void gicv2m_teardown(void) +static void __init gicv2m_teardown(void) { struct v2m_data *v2m, *tmp; list_for_each_entry_safe(v2m, tmp, &v2m_nodes, entry) { list_del(&v2m->entry); - kfree(v2m->bm); + bitmap_free(v2m->bm); iounmap(v2m->base); of_node_put(to_of_node(v2m->fwnode)); if (is_fwnode_irqchip(v2m->fwnode)) @@ -262,58 +245,61 @@ static void gicv2m_teardown(void) } } -static int gicv2m_allocate_domains(struct irq_domain *parent) + +#define GICV2M_MSI_FLAGS_REQUIRED (MSI_FLAG_USE_DEF_DOM_OPS | \ + MSI_FLAG_USE_DEF_CHIP_OPS | \ + MSI_FLAG_PCI_MSI_MASK_PARENT) + +#define GICV2M_MSI_FLAGS_SUPPORTED (MSI_GENERIC_FLAGS_MASK | \ + MSI_FLAG_PCI_MSIX | \ + MSI_FLAG_MULTI_PCI_MSI) + +static struct msi_parent_ops gicv2m_msi_parent_ops = { + .supported_flags = GICV2M_MSI_FLAGS_SUPPORTED, + .required_flags = GICV2M_MSI_FLAGS_REQUIRED, + .chip_flags = MSI_CHIP_FLAG_SET_EOI, + .bus_select_token = DOMAIN_BUS_NEXUS, + .bus_select_mask = MATCH_PCI_MSI | MATCH_PLATFORM_MSI, + .prefix = "GICv2m-", + .init_dev_msi_info = msi_lib_init_dev_msi_info, +}; + +static __init int gicv2m_allocate_domains(struct irq_domain *parent) { - struct irq_domain *inner_domain, *pci_domain, *plat_domain; + struct irq_domain_info info = { + .ops = &gicv2m_domain_ops, + .parent = parent, + }; struct v2m_data *v2m; v2m = list_first_entry_or_null(&v2m_nodes, struct v2m_data, entry); if (!v2m) return 0; - inner_domain = irq_domain_create_tree(v2m->fwnode, - &gicv2m_domain_ops, v2m); - if (!inner_domain) { - pr_err("Failed to create GICv2m domain\n"); - return -ENOMEM; - } + info.host_data = v2m; + info.fwnode = v2m->fwnode; - irq_domain_update_bus_token(inner_domain, DOMAIN_BUS_NEXUS); - inner_domain->parent = parent; - pci_domain = pci_msi_create_irq_domain(v2m->fwnode, - &gicv2m_msi_domain_info, - inner_domain); - plat_domain = platform_msi_create_irq_domain(v2m->fwnode, - &gicv2m_pmsi_domain_info, - inner_domain); - if (!pci_domain || !plat_domain) { - pr_err("Failed to create MSI domains\n"); - if (plat_domain) - irq_domain_remove(plat_domain); - if (pci_domain) - irq_domain_remove(pci_domain); - irq_domain_remove(inner_domain); + if (!msi_create_parent_irq_domain(&info, &gicv2m_msi_parent_ops)) { + pr_err("Failed to create GICv2m domain\n"); return -ENOMEM; } - return 0; } static int __init gicv2m_init_one(struct fwnode_handle *fwnode, u32 spi_start, u32 nr_spis, - struct resource *res) + struct resource *res, u32 flags) { int ret; struct v2m_data *v2m; v2m = kzalloc(sizeof(struct v2m_data), GFP_KERNEL); - if (!v2m) { - pr_err("Failed to allocate struct v2m_data.\n"); + if (!v2m) return -ENOMEM; - } INIT_LIST_HEAD(&v2m->entry); v2m->fwnode = fwnode; + v2m->flags = flags; memcpy(&v2m->res, res, sizeof(struct resource)); @@ -328,7 +314,14 @@ static int __init gicv2m_init_one(struct fwnode_handle *fwnode, v2m->spi_start = spi_start; v2m->nr_spis = nr_spis; } else { - u32 typer = readl_relaxed(v2m->base + V2M_MSI_TYPER); + u32 typer; + + /* Graviton should always have explicit spi_start/nr_spis */ + if (v2m->flags & GICV2M_GRAVITON_ADDRESS_ONLY) { + ret = -EINVAL; + goto err_iounmap; + } + typer = readl_relaxed(v2m->base + V2M_MSI_TYPER); v2m->spi_start = V2M_MSI_TYPER_BASE_SPI(typer); v2m->nr_spis = V2M_MSI_TYPER_NUM_SPI(typer); @@ -347,22 +340,24 @@ static int __init gicv2m_init_one(struct fwnode_handle *fwnode, * the MSI data is the absolute value within the range from * spi_start to (spi_start + num_spis). * - * Broadom NS2 GICv2m implementation has an erratum where the MSI data + * Broadcom NS2 GICv2m implementation has an erratum where the MSI data * is 'spi_number - 32' + * + * Reading that register fails on the Graviton implementation */ - switch (readl_relaxed(v2m->base + V2M_MSI_IIDR)) { - case XGENE_GICV2M_MSI_IIDR: - v2m->flags |= GICV2M_NEEDS_SPI_OFFSET; - v2m->spi_offset = v2m->spi_start; - break; - case BCM_NS2_GICV2M_MSI_IIDR: - v2m->flags |= GICV2M_NEEDS_SPI_OFFSET; - v2m->spi_offset = 32; - break; + if (!(v2m->flags & GICV2M_GRAVITON_ADDRESS_ONLY)) { + switch (readl_relaxed(v2m->base + V2M_MSI_IIDR)) { + case XGENE_GICV2M_MSI_IIDR: + v2m->flags |= GICV2M_NEEDS_SPI_OFFSET; + v2m->spi_offset = v2m->spi_start; + break; + case BCM_NS2_GICV2M_MSI_IIDR: + v2m->flags |= GICV2M_NEEDS_SPI_OFFSET; + v2m->spi_offset = 32; + break; + } } - - v2m->bm = kcalloc(BITS_TO_LONGS(v2m->nr_spis), sizeof(long), - GFP_KERNEL); + v2m->bm = bitmap_zalloc(v2m->nr_spis, GFP_KERNEL); if (!v2m->bm) { ret = -ENOMEM; goto err_iounmap; @@ -381,7 +376,7 @@ err_free_v2m: return ret; } -static struct of_device_id gicv2m_device_id[] = { +static __initconst struct of_device_id gicv2m_device_id[] = { { .compatible = "arm,gic-v2m-frame", }, {}, }; @@ -398,7 +393,7 @@ static int __init gicv2m_of_init(struct fwnode_handle *parent_handle, u32 spi_start = 0, nr_spis = 0; struct resource res; - if (!of_find_property(child, "msi-controller", NULL)) + if (!of_property_read_bool(child, "msi-controller")) continue; ret = of_address_to_resource(child, 0, &res); @@ -413,13 +408,14 @@ static int __init gicv2m_of_init(struct fwnode_handle *parent_handle, pr_info("DT overriding V2M MSI_TYPER (base:%u, num:%u)\n", spi_start, nr_spis); - ret = gicv2m_init_one(&child->fwnode, spi_start, nr_spis, &res); - if (ret) { - of_node_put(child); + ret = gicv2m_init_one(&child->fwnode, spi_start, nr_spis, + &res, 0); + if (ret) break; - } } + if (ret && child) + of_node_put(child); if (!ret) ret = gicv2m_allocate_domains(parent); if (ret) @@ -445,8 +441,27 @@ static struct fwnode_handle *gicv2m_get_fwnode(struct device *dev) return data->fwnode; } +static __init bool acpi_check_amazon_graviton_quirks(void) +{ + static struct acpi_table_madt *madt; + acpi_status status; + bool rc = false; + +#define ACPI_AMZN_OEM_ID "AMAZON" + + status = acpi_get_table(ACPI_SIG_MADT, 0, + (struct acpi_table_header **)&madt); + + if (ACPI_FAILURE(status) || !madt) + return rc; + rc = !memcmp(madt->header.oem_id, ACPI_AMZN_OEM_ID, ACPI_OEM_ID_SIZE); + acpi_put_table((struct acpi_table_header *)madt); + + return rc; +} + static int __init -acpi_parse_madt_msi(struct acpi_subtable_header *header, +acpi_parse_madt_msi(union acpi_subtable_headers *header, const unsigned long end) { int ret; @@ -454,6 +469,7 @@ acpi_parse_madt_msi(struct acpi_subtable_header *header, u32 spi_start = 0, nr_spis = 0; struct acpi_madt_generic_msi_frame *m; struct fwnode_handle *fwnode; + u32 flags = 0; m = (struct acpi_madt_generic_msi_frame *)header; if (BAD_MADT_ENTRY(m, end)) @@ -463,6 +479,13 @@ acpi_parse_madt_msi(struct acpi_subtable_header *header, res.end = m->base_address + SZ_4K - 1; res.flags = IORESOURCE_MEM; + if (acpi_check_amazon_graviton_quirks()) { + pr_info("applying Amazon Graviton quirk\n"); + res.end = res.start + SZ_8K - 1; + flags |= GICV2M_GRAVITON_ADDRESS_ONLY; + gicv2m_msi_parent_ops.supported_flags &= ~MSI_FLAG_MULTI_PCI_MSI; + } + if (m->flags & ACPI_MADT_OVERRIDE_SPI_VALUES) { spi_start = m->spi_base; nr_spis = m->spi_count; @@ -471,13 +494,13 @@ acpi_parse_madt_msi(struct acpi_subtable_header *header, spi_start, nr_spis); } - fwnode = irq_domain_alloc_fwnode((void *)m->base_address); + fwnode = irq_domain_alloc_fwnode(&res.start); if (!fwnode) { pr_err("Unable to allocate GICv2m domain token\n"); return -EINVAL; } - ret = gicv2m_init_one(fwnode, spi_start, nr_spis, &res); + ret = gicv2m_init_one(fwnode, spi_start, nr_spis, &res, flags); if (ret) irq_domain_free_fwnode(fwnode); diff --git a/drivers/irqchip/irq-gic-v3-its-fsl-mc-msi.c b/drivers/irqchip/irq-gic-v3-its-fsl-mc-msi.c index 606efa64adff..b5785472765a 100644 --- a/drivers/irqchip/irq-gic-v3-its-fsl-mc-msi.c +++ b/drivers/irqchip/irq-gic-v3-its-fsl-mc-msi.c @@ -7,8 +7,8 @@ * */ -#include <linux/of_device.h> -#include <linux/of_address.h> +#include <linux/acpi.h> +#include <linux/acpi_iort.h> #include <linux/irq.h> #include <linux/msi.h> #include <linux/of.h> @@ -23,6 +23,19 @@ static struct irq_chip its_msi_irq_chip = { .irq_set_affinity = msi_domain_set_affinity }; +static u32 fsl_mc_msi_domain_get_msi_id(struct irq_domain *domain, + struct fsl_mc_device *mc_dev) +{ + struct device_node *of_node; + u32 out_id; + + of_node = irq_domain_get_of_node(domain); + out_id = of_node ? of_msi_xlate(&mc_dev->dev, &of_node, mc_dev->icid) : + iort_msi_map_id(&mc_dev->dev, mc_dev->icid); + + return out_id; +} + static int its_fsl_mc_msi_prepare(struct irq_domain *msi_domain, struct device *dev, int nvec, msi_alloc_info_t *info) @@ -43,7 +56,8 @@ static int its_fsl_mc_msi_prepare(struct irq_domain *msi_domain, * NOTE: This device id corresponds to the IOMMU stream ID * associated with the DPRC object (ICID). */ - info->scratchpad[0].ul = mc_bus_dev->icid; + info->scratchpad[0].ul = fsl_mc_msi_domain_get_msi_id(msi_domain, + mc_bus_dev); msi_info = msi_get_domain_info(msi_domain->parent); /* Allocate at least 32 MSIs, and always as a power of 2 */ @@ -66,12 +80,71 @@ static const struct of_device_id its_device_id[] = { {}, }; -static int __init its_fsl_mc_msi_init(void) +static void __init its_fsl_mc_msi_init_one(struct fwnode_handle *handle, + const char *name) { - struct device_node *np; struct irq_domain *parent; struct irq_domain *mc_msi_domain; + parent = irq_find_matching_fwnode(handle, DOMAIN_BUS_NEXUS); + if (!parent || !msi_get_domain_info(parent)) { + pr_err("%s: unable to locate ITS domain\n", name); + return; + } + + mc_msi_domain = fsl_mc_msi_create_irq_domain(handle, + &its_fsl_mc_msi_domain_info, + parent); + if (!mc_msi_domain) { + pr_err("%s: unable to create fsl-mc domain\n", name); + return; + } + + pr_info("fsl-mc MSI: %s domain created\n", name); +} + +#ifdef CONFIG_ACPI +static int __init +its_fsl_mc_msi_parse_madt(union acpi_subtable_headers *header, + const unsigned long end) +{ + struct acpi_madt_generic_translator *its_entry; + struct fwnode_handle *dom_handle; + const char *node_name; + int err = 0; + + its_entry = (struct acpi_madt_generic_translator *)header; + node_name = kasprintf(GFP_KERNEL, "ITS@0x%lx", + (long)its_entry->base_address); + + dom_handle = iort_find_domain_token(its_entry->translation_id); + if (!dom_handle) { + pr_err("%s: Unable to locate ITS domain handle\n", node_name); + err = -ENXIO; + goto out; + } + + its_fsl_mc_msi_init_one(dom_handle, node_name); + +out: + kfree(node_name); + return err; +} + + +static void __init its_fsl_mc_acpi_msi_init(void) +{ + acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_TRANSLATOR, + its_fsl_mc_msi_parse_madt, 0); +} +#else +static inline void its_fsl_mc_acpi_msi_init(void) { } +#endif + +static void __init its_fsl_mc_of_msi_init(void) +{ + struct device_node *np; + for (np = of_find_matching_node(NULL, its_device_id); np; np = of_find_matching_node(np, its_device_id)) { if (!of_device_is_available(np)) @@ -79,23 +152,15 @@ static int __init its_fsl_mc_msi_init(void) if (!of_property_read_bool(np, "msi-controller")) continue; - parent = irq_find_matching_host(np, DOMAIN_BUS_NEXUS); - if (!parent || !msi_get_domain_info(parent)) { - pr_err("%pOF: unable to locate ITS domain\n", np); - continue; - } - - mc_msi_domain = fsl_mc_msi_create_irq_domain( - of_node_to_fwnode(np), - &its_fsl_mc_msi_domain_info, - parent); - if (!mc_msi_domain) { - pr_err("%pOF: unable to create fsl-mc domain\n", np); - continue; - } - - pr_info("fsl-mc MSI: %pOF domain created\n", np); + its_fsl_mc_msi_init_one(of_fwnode_handle(np), + np->full_name); } +} + +static int __init its_fsl_mc_msi_init(void) +{ + its_fsl_mc_of_msi_init(); + its_fsl_mc_acpi_msi_init(); return 0; } diff --git a/drivers/irqchip/irq-gic-v3-its-pci-msi.c b/drivers/irqchip/irq-gic-v3-its-pci-msi.c deleted file mode 100644 index 8d6d009d1d58..000000000000 --- a/drivers/irqchip/irq-gic-v3-its-pci-msi.c +++ /dev/null @@ -1,208 +0,0 @@ -/* - * Copyright (C) 2013-2015 ARM Limited, All Rights Reserved. - * Author: Marc Zyngier <marc.zyngier@arm.com> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see <http://www.gnu.org/licenses/>. - */ - -#include <linux/acpi_iort.h> -#include <linux/msi.h> -#include <linux/of.h> -#include <linux/of_irq.h> -#include <linux/of_pci.h> - -static void its_mask_msi_irq(struct irq_data *d) -{ - pci_msi_mask_irq(d); - irq_chip_mask_parent(d); -} - -static void its_unmask_msi_irq(struct irq_data *d) -{ - pci_msi_unmask_irq(d); - irq_chip_unmask_parent(d); -} - -static struct irq_chip its_msi_irq_chip = { - .name = "ITS-MSI", - .irq_unmask = its_unmask_msi_irq, - .irq_mask = its_mask_msi_irq, - .irq_eoi = irq_chip_eoi_parent, - .irq_write_msi_msg = pci_msi_domain_write_msg, -}; - -static int its_pci_msi_vec_count(struct pci_dev *pdev, void *data) -{ - int msi, msix, *count = data; - - msi = max(pci_msi_vec_count(pdev), 0); - msix = max(pci_msix_vec_count(pdev), 0); - *count += max(msi, msix); - - return 0; -} - -static int its_get_pci_alias(struct pci_dev *pdev, u16 alias, void *data) -{ - struct pci_dev **alias_dev = data; - - *alias_dev = pdev; - - return 0; -} - -static int its_pci_msi_prepare(struct irq_domain *domain, struct device *dev, - int nvec, msi_alloc_info_t *info) -{ - struct pci_dev *pdev, *alias_dev; - struct msi_domain_info *msi_info; - int alias_count = 0, minnvec = 1; - - if (!dev_is_pci(dev)) - return -EINVAL; - - msi_info = msi_get_domain_info(domain->parent); - - pdev = to_pci_dev(dev); - /* - * If pdev is downstream of any aliasing bridges, take an upper - * bound of how many other vectors could map to the same DevID. - */ - pci_for_each_dma_alias(pdev, its_get_pci_alias, &alias_dev); - if (alias_dev != pdev && alias_dev->subordinate) - pci_walk_bus(alias_dev->subordinate, its_pci_msi_vec_count, - &alias_count); - - /* ITS specific DeviceID, as the core ITS ignores dev. */ - info->scratchpad[0].ul = pci_msi_domain_get_msi_rid(domain, pdev); - - /* - * Always allocate a power of 2, and special case device 0 for - * broken systems where the DevID is not wired (and all devices - * appear as DevID 0). For that reason, we generously allocate a - * minimum of 32 MSIs for DevID 0. If you want more because all - * your devices are aliasing to DevID 0, consider fixing your HW. - */ - nvec = max(nvec, alias_count); - if (!info->scratchpad[0].ul) - minnvec = 32; - nvec = max_t(int, minnvec, roundup_pow_of_two(nvec)); - return msi_info->ops->msi_prepare(domain->parent, dev, nvec, info); -} - -static struct msi_domain_ops its_pci_msi_ops = { - .msi_prepare = its_pci_msi_prepare, -}; - -static struct msi_domain_info its_pci_msi_domain_info = { - .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | - MSI_FLAG_MULTI_PCI_MSI | MSI_FLAG_PCI_MSIX), - .ops = &its_pci_msi_ops, - .chip = &its_msi_irq_chip, -}; - -static struct of_device_id its_device_id[] = { - { .compatible = "arm,gic-v3-its", }, - {}, -}; - -static int __init its_pci_msi_init_one(struct fwnode_handle *handle, - const char *name) -{ - struct irq_domain *parent; - - parent = irq_find_matching_fwnode(handle, DOMAIN_BUS_NEXUS); - if (!parent || !msi_get_domain_info(parent)) { - pr_err("%s: Unable to locate ITS domain\n", name); - return -ENXIO; - } - - if (!pci_msi_create_irq_domain(handle, &its_pci_msi_domain_info, - parent)) { - pr_err("%s: Unable to create PCI domain\n", name); - return -ENOMEM; - } - - return 0; -} - -static int __init its_pci_of_msi_init(void) -{ - struct device_node *np; - - for (np = of_find_matching_node(NULL, its_device_id); np; - np = of_find_matching_node(np, its_device_id)) { - if (!of_device_is_available(np)) - continue; - if (!of_property_read_bool(np, "msi-controller")) - continue; - - if (its_pci_msi_init_one(of_node_to_fwnode(np), np->full_name)) - continue; - - pr_info("PCI/MSI: %pOF domain created\n", np); - } - - return 0; -} - -#ifdef CONFIG_ACPI - -static int __init -its_pci_msi_parse_madt(struct acpi_subtable_header *header, - const unsigned long end) -{ - struct acpi_madt_generic_translator *its_entry; - struct fwnode_handle *dom_handle; - const char *node_name; - int err = -ENXIO; - - its_entry = (struct acpi_madt_generic_translator *)header; - node_name = kasprintf(GFP_KERNEL, "ITS@0x%lx", - (long)its_entry->base_address); - dom_handle = iort_find_domain_token(its_entry->translation_id); - if (!dom_handle) { - pr_err("%s: Unable to locate ITS domain handle\n", node_name); - goto out; - } - - err = its_pci_msi_init_one(dom_handle, node_name); - if (!err) - pr_info("PCI/MSI: %s domain created\n", node_name); - -out: - kfree(node_name); - return err; -} - -static int __init its_pci_acpi_msi_init(void) -{ - acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_TRANSLATOR, - its_pci_msi_parse_madt, 0); - return 0; -} -#else -static int __init its_pci_acpi_msi_init(void) -{ - return 0; -} -#endif - -static int __init its_pci_msi_init(void) -{ - its_pci_of_msi_init(); - its_pci_acpi_msi_init(); - - return 0; -} -early_initcall(its_pci_msi_init); diff --git a/drivers/irqchip/irq-gic-v3-its-platform-msi.c b/drivers/irqchip/irq-gic-v3-its-platform-msi.c deleted file mode 100644 index 7b8e87b493fe..000000000000 --- a/drivers/irqchip/irq-gic-v3-its-platform-msi.c +++ /dev/null @@ -1,174 +0,0 @@ -/* - * Copyright (C) 2013-2015 ARM Limited, All Rights Reserved. - * Author: Marc Zyngier <marc.zyngier@arm.com> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see <http://www.gnu.org/licenses/>. - */ - -#include <linux/acpi_iort.h> -#include <linux/device.h> -#include <linux/msi.h> -#include <linux/of.h> -#include <linux/of_irq.h> - -static struct irq_chip its_pmsi_irq_chip = { - .name = "ITS-pMSI", -}; - -static int of_pmsi_get_dev_id(struct irq_domain *domain, struct device *dev, - u32 *dev_id) -{ - int ret, index = 0; - - /* Suck the DeviceID out of the msi-parent property */ - do { - struct of_phandle_args args; - - ret = of_parse_phandle_with_args(dev->of_node, - "msi-parent", "#msi-cells", - index, &args); - if (args.np == irq_domain_get_of_node(domain)) { - if (WARN_ON(args.args_count != 1)) - return -EINVAL; - *dev_id = args.args[0]; - break; - } - index++; - } while (!ret); - - return ret; -} - -int __weak iort_pmsi_get_dev_id(struct device *dev, u32 *dev_id) -{ - return -1; -} - -static int its_pmsi_prepare(struct irq_domain *domain, struct device *dev, - int nvec, msi_alloc_info_t *info) -{ - struct msi_domain_info *msi_info; - u32 dev_id; - int ret; - - msi_info = msi_get_domain_info(domain->parent); - - if (dev->of_node) - ret = of_pmsi_get_dev_id(domain, dev, &dev_id); - else - ret = iort_pmsi_get_dev_id(dev, &dev_id); - if (ret) - return ret; - - /* ITS specific DeviceID, as the core ITS ignores dev. */ - info->scratchpad[0].ul = dev_id; - - /* Allocate at least 32 MSIs, and always as a power of 2 */ - nvec = max_t(int, 32, roundup_pow_of_two(nvec)); - return msi_info->ops->msi_prepare(domain->parent, - dev, nvec, info); -} - -static struct msi_domain_ops its_pmsi_ops = { - .msi_prepare = its_pmsi_prepare, -}; - -static struct msi_domain_info its_pmsi_domain_info = { - .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS), - .ops = &its_pmsi_ops, - .chip = &its_pmsi_irq_chip, -}; - -static const struct of_device_id its_device_id[] = { - { .compatible = "arm,gic-v3-its", }, - {}, -}; - -static int __init its_pmsi_init_one(struct fwnode_handle *fwnode, - const char *name) -{ - struct irq_domain *parent; - - parent = irq_find_matching_fwnode(fwnode, DOMAIN_BUS_NEXUS); - if (!parent || !msi_get_domain_info(parent)) { - pr_err("%s: unable to locate ITS domain\n", name); - return -ENXIO; - } - - if (!platform_msi_create_irq_domain(fwnode, &its_pmsi_domain_info, - parent)) { - pr_err("%s: unable to create platform domain\n", name); - return -ENXIO; - } - - pr_info("Platform MSI: %s domain created\n", name); - return 0; -} - -#ifdef CONFIG_ACPI -static int __init -its_pmsi_parse_madt(struct acpi_subtable_header *header, - const unsigned long end) -{ - struct acpi_madt_generic_translator *its_entry; - struct fwnode_handle *domain_handle; - const char *node_name; - int err = -ENXIO; - - its_entry = (struct acpi_madt_generic_translator *)header; - node_name = kasprintf(GFP_KERNEL, "ITS@0x%lx", - (long)its_entry->base_address); - domain_handle = iort_find_domain_token(its_entry->translation_id); - if (!domain_handle) { - pr_err("%s: Unable to locate ITS domain handle\n", node_name); - goto out; - } - - err = its_pmsi_init_one(domain_handle, node_name); - -out: - kfree(node_name); - return err; -} - -static void __init its_pmsi_acpi_init(void) -{ - acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_TRANSLATOR, - its_pmsi_parse_madt, 0); -} -#else -static inline void its_pmsi_acpi_init(void) { } -#endif - -static void __init its_pmsi_of_init(void) -{ - struct device_node *np; - - for (np = of_find_matching_node(NULL, its_device_id); np; - np = of_find_matching_node(np, its_device_id)) { - if (!of_device_is_available(np)) - continue; - if (!of_property_read_bool(np, "msi-controller")) - continue; - - its_pmsi_init_one(of_node_to_fwnode(np), np->full_name); - } -} - -static int __init its_pmsi_init(void) -{ - its_pmsi_of_init(); - its_pmsi_acpi_init(); - return 0; -} -early_initcall(its_pmsi_init); diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c index db20e992a40f..ada585bfa451 100644 --- a/drivers/irqchip/irq-gic-v3-its.c +++ b/drivers/irqchip/irq-gic-v3-its.c @@ -1,33 +1,25 @@ +// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2013-2017 ARM Limited, All Rights Reserved. * Author: Marc Zyngier <marc.zyngier@arm.com> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see <http://www.gnu.org/licenses/>. */ #include <linux/acpi.h> #include <linux/acpi_iort.h> +#include <linux/bitfield.h> #include <linux/bitmap.h> #include <linux/cpu.h> #include <linux/crash_dump.h> #include <linux/delay.h> -#include <linux/dma-iommu.h> #include <linux/efi.h> +#include <linux/genalloc.h> #include <linux/interrupt.h> +#include <linux/iommu.h> +#include <linux/iopoll.h> #include <linux/irqdomain.h> #include <linux/list.h> -#include <linux/list_sort.h> #include <linux/log2.h> +#include <linux/mem_encrypt.h> #include <linux/memblock.h> #include <linux/mm.h> #include <linux/msi.h> @@ -37,6 +29,7 @@ #include <linux/of_pci.h> #include <linux/of_platform.h> #include <linux/percpu.h> +#include <linux/set_memory.h> #include <linux/slab.h> #include <linux/syscore_ops.h> @@ -48,14 +41,18 @@ #include <asm/exception.h> #include "irq-gic-common.h" +#include "irq-gic-its-msi-parent.h" +#include <linux/irqchip/irq-msi-lib.h> #define ITS_FLAGS_CMDQ_NEEDS_FLUSHING (1ULL << 0) #define ITS_FLAGS_WORKAROUND_CAVIUM_22375 (1ULL << 1) #define ITS_FLAGS_WORKAROUND_CAVIUM_23144 (1ULL << 2) -#define ITS_FLAGS_SAVE_SUSPEND_STATE (1ULL << 3) +#define ITS_FLAGS_FORCE_NON_SHAREABLE (1ULL << 3) +#define ITS_FLAGS_WORKAROUND_HISILICON_162100801 (1ULL << 4) -#define RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING (1 << 0) -#define RDIST_FLAGS_RD_TABLES_PREALLOCATED (1 << 1) +#define RD_LOCAL_LPI_ENABLED BIT(0) +#define RD_LOCAL_PENDTABLE_PREALLOCATED BIT(1) +#define RD_LOCAL_MEMRESERVE_DONE BIT(2) static u32 lpi_id_bits; @@ -68,7 +65,8 @@ static u32 lpi_id_bits; #define LPI_PROPBASE_SZ ALIGN(BIT(LPI_NRBITS), SZ_64K) #define LPI_PENDBASE_SZ ALIGN(BIT(LPI_NRBITS) / 8, SZ_64K) -#define LPI_PROP_DEFAULT_PRIO GICD_INT_DEF_PRI +static u8 __ro_after_init lpi_prop_prio; +static struct its_node *find_4_1_its(void); /* * Collection structure - just an ID, and a redistributor address to @@ -97,11 +95,17 @@ struct its_device; * The ITS structure - contains most of the infrastructure, with the * top-level MSI domain, the command queue, the collections, and the * list of devices writing to it. + * + * dev_alloc_lock has to be taken for device allocations, while the + * spinlock must be taken to parse data structures such as the device + * list. */ struct its_node { raw_spinlock_t lock; + struct mutex dev_alloc_lock; struct list_head entry; void __iomem *base; + void __iomem *sgir_base; phys_addr_t phys_base; struct its_cmd_block *cmd_base; struct its_cmd_block *cmd_write; @@ -109,24 +113,38 @@ struct its_node { struct its_collection *collections; struct fwnode_handle *fwnode_handle; u64 (*get_msi_base)(struct its_device *its_dev); + u64 typer; u64 cbaser_save; u32 ctlr_save; + u32 mpidr; struct list_head its_device_list; u64 flags; unsigned long list_nr; - u32 ite_size; - u32 device_ids; int numa_node; unsigned int msi_domain_flags; u32 pre_its_base; /* for Socionext Synquacer */ - bool is_v4; int vlpi_redist_offset; }; +static DEFINE_PER_CPU(struct its_node *, local_4_1_its); + +#define is_v4(its) (!!((its)->typer & GITS_TYPER_VLPIS)) +#define is_v4_1(its) (!!((its)->typer & GITS_TYPER_VMAPP)) +#define device_ids(its) (FIELD_GET(GITS_TYPER_DEVBITS, (its)->typer) + 1) + #define ITS_ITT_ALIGN SZ_256 /* The maximum number of VPEID bits supported by VLPI commands */ -#define ITS_MAX_VPEID_BITS (16) +#define ITS_MAX_VPEID_BITS \ + ({ \ + int nvpeid = 16; \ + if (gic_rdists->has_rvpeid && \ + gic_rdists->gicd_typer2 & GICD_TYPER2_VIL) \ + nvpeid = 1 + (gic_rdists->gicd_typer2 & \ + GICD_TYPER2_VID); \ + \ + nvpeid; \ + }) #define ITS_MAX_VPEID (1 << (ITS_MAX_VPEID_BITS)) /* Convert page order to size in bytes */ @@ -137,7 +155,7 @@ struct event_lpi_map { u16 *col_map; irq_hw_number_t lpi_base; int nr_lpis; - struct mutex vlpi_lock; + raw_spinlock_t vlpi_lock; struct its_vm *vm; struct its_vlpi_map *vlpi_maps; int nr_vlpis; @@ -154,8 +172,10 @@ struct its_device { struct its_node *its; struct event_lpi_map event_map; void *itt; + u32 itt_sz; u32 nr_ites; u32 device_id; + bool shared; }; static struct { @@ -165,6 +185,13 @@ static struct { int next_victim; } vpe_proxy; +struct cpu_lpi_count { + atomic_t managed; + atomic_t unmanaged; +}; + +static DEFINE_PER_CPU(struct cpu_lpi_count, cpu_lpi_count); + static LIST_HEAD(its_nodes); static DEFINE_RAW_SPINLOCK(its_lock); static struct rdists *gic_rdists; @@ -181,6 +208,125 @@ static DEFINE_IDA(its_vpeid_ida); #define gic_data_rdist_rd_base() (gic_data_rdist()->rd_base) #define gic_data_rdist_vlpi_base() (gic_data_rdist_rd_base() + SZ_128K) +static gfp_t gfp_flags_quirk; + +static struct page *its_alloc_pages_node(int node, gfp_t gfp, + unsigned int order) +{ + struct page *page; + int ret = 0; + + page = alloc_pages_node(node, gfp | gfp_flags_quirk, order); + + if (!page) + return NULL; + + ret = set_memory_decrypted((unsigned long)page_address(page), + 1 << order); + /* + * If set_memory_decrypted() fails then we don't know what state the + * page is in, so we can't free it. Instead we leak it. + * set_memory_decrypted() will already have WARNed. + */ + if (ret) + return NULL; + + return page; +} + +static struct page *its_alloc_pages(gfp_t gfp, unsigned int order) +{ + return its_alloc_pages_node(NUMA_NO_NODE, gfp, order); +} + +static void its_free_pages(void *addr, unsigned int order) +{ + /* + * If the memory cannot be encrypted again then we must leak the pages. + * set_memory_encrypted() will already have WARNed. + */ + if (set_memory_encrypted((unsigned long)addr, 1 << order)) + return; + free_pages((unsigned long)addr, order); +} + +static struct gen_pool *itt_pool; + +static void *itt_alloc_pool(int node, int size) +{ + unsigned long addr; + struct page *page; + + if (size >= PAGE_SIZE) { + page = its_alloc_pages_node(node, GFP_KERNEL | __GFP_ZERO, get_order(size)); + + return page ? page_address(page) : NULL; + } + + do { + addr = gen_pool_alloc(itt_pool, size); + if (addr) + break; + + page = its_alloc_pages_node(node, GFP_KERNEL | __GFP_ZERO, 0); + if (!page) + break; + + gen_pool_add(itt_pool, (unsigned long)page_address(page), PAGE_SIZE, node); + } while (!addr); + + return (void *)addr; +} + +static void itt_free_pool(void *addr, int size) +{ + if (!addr) + return; + + if (size >= PAGE_SIZE) { + its_free_pages(addr, get_order(size)); + return; + } + + gen_pool_free(itt_pool, (unsigned long)addr, size); +} + +/* + * Skip ITSs that have no vLPIs mapped, unless we're on GICv4.1, as we + * always have vSGIs mapped. + */ +static bool require_its_list_vmovp(struct its_vm *vm, struct its_node *its) +{ + return (gic_rdists->has_rvpeid || vm->vlpi_count[its->list_nr]); +} + +static bool rdists_support_shareable(void) +{ + return !(gic_rdists->flags & RDIST_FLAGS_FORCE_NON_SHAREABLE); +} + +static u16 get_its_list(struct its_vm *vm) +{ + struct its_node *its; + unsigned long its_list = 0; + + list_for_each_entry(its, &its_nodes, entry) { + if (!is_v4(its)) + continue; + + if (require_its_list_vmovp(vm, its)) + __set_bit(its->list_nr, &its_list); + } + + return (u16)its_list; +} + +static inline u32 its_get_event_id(struct irq_data *d) +{ + struct its_device *its_dev = irq_data_get_irq_chip_data(d); + return d->hwirq - its_dev->event_map.lpi_base; +} + static struct its_collection *dev_event_to_col(struct its_device *its_dev, u32 event) { @@ -189,9 +335,85 @@ static struct its_collection *dev_event_to_col(struct its_device *its_dev, return its->collections + its_dev->event_map.col_map[event]; } +static struct its_vlpi_map *dev_event_to_vlpi_map(struct its_device *its_dev, + u32 event) +{ + if (WARN_ON_ONCE(event >= its_dev->event_map.nr_lpis)) + return NULL; + + return &its_dev->event_map.vlpi_maps[event]; +} + +static struct its_vlpi_map *get_vlpi_map(struct irq_data *d) +{ + if (irqd_is_forwarded_to_vcpu(d)) { + struct its_device *its_dev = irq_data_get_irq_chip_data(d); + u32 event = its_get_event_id(d); + + return dev_event_to_vlpi_map(its_dev, event); + } + + return NULL; +} + +static int vpe_to_cpuid_lock(struct its_vpe *vpe, unsigned long *flags) +{ + raw_spin_lock_irqsave(&vpe->vpe_lock, *flags); + return vpe->col_idx; +} + +static void vpe_to_cpuid_unlock(struct its_vpe *vpe, unsigned long flags) +{ + raw_spin_unlock_irqrestore(&vpe->vpe_lock, flags); +} + +static struct irq_chip its_vpe_irq_chip; + +static int irq_to_cpuid_lock(struct irq_data *d, unsigned long *flags) +{ + struct its_vpe *vpe = NULL; + int cpu; + + if (d->chip == &its_vpe_irq_chip) { + vpe = irq_data_get_irq_chip_data(d); + } else { + struct its_vlpi_map *map = get_vlpi_map(d); + if (map) + vpe = map->vpe; + } + + if (vpe) { + cpu = vpe_to_cpuid_lock(vpe, flags); + } else { + /* Physical LPIs are already locked via the irq_desc lock */ + struct its_device *its_dev = irq_data_get_irq_chip_data(d); + cpu = its_dev->event_map.col_map[its_get_event_id(d)]; + /* Keep GCC quiet... */ + *flags = 0; + } + + return cpu; +} + +static void irq_to_cpuid_unlock(struct irq_data *d, unsigned long flags) +{ + struct its_vpe *vpe = NULL; + + if (d->chip == &its_vpe_irq_chip) { + vpe = irq_data_get_irq_chip_data(d); + } else { + struct its_vlpi_map *map = get_vlpi_map(d); + if (map) + vpe = map->vpe; + } + + if (vpe) + vpe_to_cpuid_unlock(vpe, flags); +} + static struct its_collection *valid_col(struct its_collection *col) { - if (WARN_ON_ONCE(col->target_address & GENMASK_ULL(0, 15))) + if (WARN_ON_ONCE(col->target_address & GENMASK_ULL(15, 0))) return NULL; return col; @@ -288,6 +510,19 @@ struct its_cmd_desc { u16 seq_num; u16 its_list; } its_vmovp_cmd; + + struct { + struct its_vpe *vpe; + } its_invdb_cmd; + + struct { + struct its_vpe *vpe; + u8 sgi; + u8 priority; + bool enable; + bool group; + bool clear; + } its_vsgi_cmd; }; }; @@ -295,7 +530,10 @@ struct its_cmd_desc { * The ITS command block, which is what the ITS actually parses. */ struct its_cmd_block { - u64 raw_cmd[4]; + union { + u64 raw_cmd[4]; + __le64 raw_cmd_le[4]; + }; }; #define ITS_CMD_QUEUE_SZ SZ_64K @@ -401,13 +639,70 @@ static void its_encode_vpt_size(struct its_cmd_block *cmd, u8 vpt_size) its_mask_encode(&cmd->raw_cmd[3], vpt_size, 4, 0); } +static void its_encode_vconf_addr(struct its_cmd_block *cmd, u64 vconf_pa) +{ + its_mask_encode(&cmd->raw_cmd[0], vconf_pa >> 16, 51, 16); +} + +static void its_encode_alloc(struct its_cmd_block *cmd, bool alloc) +{ + its_mask_encode(&cmd->raw_cmd[0], alloc, 8, 8); +} + +static void its_encode_ptz(struct its_cmd_block *cmd, bool ptz) +{ + its_mask_encode(&cmd->raw_cmd[0], ptz, 9, 9); +} + +static void its_encode_vmapp_default_db(struct its_cmd_block *cmd, + u32 vpe_db_lpi) +{ + its_mask_encode(&cmd->raw_cmd[1], vpe_db_lpi, 31, 0); +} + +static void its_encode_vmovp_default_db(struct its_cmd_block *cmd, + u32 vpe_db_lpi) +{ + its_mask_encode(&cmd->raw_cmd[3], vpe_db_lpi, 31, 0); +} + +static void its_encode_db(struct its_cmd_block *cmd, bool db) +{ + its_mask_encode(&cmd->raw_cmd[2], db, 63, 63); +} + +static void its_encode_sgi_intid(struct its_cmd_block *cmd, u8 sgi) +{ + its_mask_encode(&cmd->raw_cmd[0], sgi, 35, 32); +} + +static void its_encode_sgi_priority(struct its_cmd_block *cmd, u8 prio) +{ + its_mask_encode(&cmd->raw_cmd[0], prio >> 4, 23, 20); +} + +static void its_encode_sgi_group(struct its_cmd_block *cmd, bool grp) +{ + its_mask_encode(&cmd->raw_cmd[0], grp, 10, 10); +} + +static void its_encode_sgi_clear(struct its_cmd_block *cmd, bool clr) +{ + its_mask_encode(&cmd->raw_cmd[0], clr, 9, 9); +} + +static void its_encode_sgi_enable(struct its_cmd_block *cmd, bool en) +{ + its_mask_encode(&cmd->raw_cmd[0], en, 8, 8); +} + static inline void its_fixup_cmd(struct its_cmd_block *cmd) { /* Let's fixup BE commands */ - cmd->raw_cmd[0] = cpu_to_le64(cmd->raw_cmd[0]); - cmd->raw_cmd[1] = cpu_to_le64(cmd->raw_cmd[1]); - cmd->raw_cmd[2] = cpu_to_le64(cmd->raw_cmd[2]); - cmd->raw_cmd[3] = cpu_to_le64(cmd->raw_cmd[3]); + cmd->raw_cmd_le[0] = cpu_to_le64(cmd->raw_cmd[0]); + cmd->raw_cmd_le[1] = cpu_to_le64(cmd->raw_cmd[1]); + cmd->raw_cmd_le[2] = cpu_to_le64(cmd->raw_cmd[2]); + cmd->raw_cmd_le[3] = cpu_to_le64(cmd->raw_cmd[3]); } static struct its_collection *its_build_mapd_cmd(struct its_node *its, @@ -418,7 +713,6 @@ static struct its_collection *its_build_mapd_cmd(struct its_node *its, u8 size = ilog2(desc->its_mapd_cmd.dev->nr_ites); itt_addr = virt_to_phys(desc->its_mapd_cmd.dev->itt); - itt_addr = ALIGN(itt_addr, ITS_ITT_ALIGN); its_encode_cmd(cmd, GITS_CMD_MAPD); its_encode_devid(cmd, desc->its_mapd_cmd.dev->device_id); @@ -561,11 +855,11 @@ static struct its_collection *its_build_invall_cmd(struct its_node *its, struct its_cmd_desc *desc) { its_encode_cmd(cmd, GITS_CMD_INVALL); - its_encode_collection(cmd, desc->its_mapc_cmd.col->col_id); + its_encode_collection(cmd, desc->its_invall_cmd.col->col_id); its_fixup_cmd(cmd); - return NULL; + return desc->its_invall_cmd.col; } static struct its_vpe *its_build_vinvall_cmd(struct its_node *its, @@ -584,22 +878,59 @@ static struct its_vpe *its_build_vmapp_cmd(struct its_node *its, struct its_cmd_block *cmd, struct its_cmd_desc *desc) { - unsigned long vpt_addr; + struct its_vpe *vpe = valid_vpe(its, desc->its_vmapp_cmd.vpe); + unsigned long vpt_addr, vconf_addr; u64 target; - - vpt_addr = virt_to_phys(page_address(desc->its_vmapp_cmd.vpe->vpt_page)); - target = desc->its_vmapp_cmd.col->target_address + its->vlpi_redist_offset; + bool alloc; its_encode_cmd(cmd, GITS_CMD_VMAPP); its_encode_vpeid(cmd, desc->its_vmapp_cmd.vpe->vpe_id); its_encode_valid(cmd, desc->its_vmapp_cmd.valid); + + if (!desc->its_vmapp_cmd.valid) { + alloc = !atomic_dec_return(&desc->its_vmapp_cmd.vpe->vmapp_count); + if (is_v4_1(its)) { + its_encode_alloc(cmd, alloc); + /* + * Unmapping a VPE is self-synchronizing on GICv4.1, + * no need to issue a VSYNC. + */ + vpe = NULL; + } + + goto out; + } + + vpt_addr = virt_to_phys(page_address(desc->its_vmapp_cmd.vpe->vpt_page)); + target = desc->its_vmapp_cmd.col->target_address + its->vlpi_redist_offset; + its_encode_target(cmd, target); its_encode_vpt_addr(cmd, vpt_addr); its_encode_vpt_size(cmd, LPI_NRBITS - 1); + alloc = !atomic_fetch_inc(&desc->its_vmapp_cmd.vpe->vmapp_count); + + if (!is_v4_1(its)) + goto out; + + vconf_addr = virt_to_phys(page_address(desc->its_vmapp_cmd.vpe->its_vm->vprop_page)); + + its_encode_alloc(cmd, alloc); + + /* + * GICv4.1 provides a way to get the VLPI state, which needs the vPE + * to be unmapped first, and in this case, we may remap the vPE + * back while the VPT is not empty. So we can't assume that the + * VPT is empty on map. This is why we never advertise PTZ. + */ + its_encode_ptz(cmd, false); + its_encode_vconf_addr(cmd, vconf_addr); + its_encode_vmapp_default_db(cmd, desc->its_vmapp_cmd.vpe->vpe_db_lpi); + +out: its_fixup_cmd(cmd); - return valid_vpe(its, desc->its_vmapp_cmd.vpe); + return vpe; } static struct its_vpe *its_build_vmapti_cmd(struct its_node *its, @@ -608,7 +939,7 @@ static struct its_vpe *its_build_vmapti_cmd(struct its_node *its, { u32 db; - if (desc->its_vmapti_cmd.db_enabled) + if (!is_v4_1(its) && desc->its_vmapti_cmd.db_enabled) db = desc->its_vmapti_cmd.vpe->vpe_db_lpi; else db = 1023; @@ -631,7 +962,7 @@ static struct its_vpe *its_build_vmovi_cmd(struct its_node *its, { u32 db; - if (desc->its_vmovi_cmd.db_enabled) + if (!is_v4_1(its) && desc->its_vmovi_cmd.db_enabled) db = desc->its_vmovi_cmd.vpe->vpe_db_lpi; else db = 1023; @@ -661,11 +992,105 @@ static struct its_vpe *its_build_vmovp_cmd(struct its_node *its, its_encode_vpeid(cmd, desc->its_vmovp_cmd.vpe->vpe_id); its_encode_target(cmd, target); + if (is_v4_1(its)) { + its_encode_db(cmd, true); + its_encode_vmovp_default_db(cmd, desc->its_vmovp_cmd.vpe->vpe_db_lpi); + } + its_fixup_cmd(cmd); return valid_vpe(its, desc->its_vmovp_cmd.vpe); } +static struct its_vpe *its_build_vinv_cmd(struct its_node *its, + struct its_cmd_block *cmd, + struct its_cmd_desc *desc) +{ + struct its_vlpi_map *map; + + map = dev_event_to_vlpi_map(desc->its_inv_cmd.dev, + desc->its_inv_cmd.event_id); + + its_encode_cmd(cmd, GITS_CMD_INV); + its_encode_devid(cmd, desc->its_inv_cmd.dev->device_id); + its_encode_event_id(cmd, desc->its_inv_cmd.event_id); + + its_fixup_cmd(cmd); + + return valid_vpe(its, map->vpe); +} + +static struct its_vpe *its_build_vint_cmd(struct its_node *its, + struct its_cmd_block *cmd, + struct its_cmd_desc *desc) +{ + struct its_vlpi_map *map; + + map = dev_event_to_vlpi_map(desc->its_int_cmd.dev, + desc->its_int_cmd.event_id); + + its_encode_cmd(cmd, GITS_CMD_INT); + its_encode_devid(cmd, desc->its_int_cmd.dev->device_id); + its_encode_event_id(cmd, desc->its_int_cmd.event_id); + + its_fixup_cmd(cmd); + + return valid_vpe(its, map->vpe); +} + +static struct its_vpe *its_build_vclear_cmd(struct its_node *its, + struct its_cmd_block *cmd, + struct its_cmd_desc *desc) +{ + struct its_vlpi_map *map; + + map = dev_event_to_vlpi_map(desc->its_clear_cmd.dev, + desc->its_clear_cmd.event_id); + + its_encode_cmd(cmd, GITS_CMD_CLEAR); + its_encode_devid(cmd, desc->its_clear_cmd.dev->device_id); + its_encode_event_id(cmd, desc->its_clear_cmd.event_id); + + its_fixup_cmd(cmd); + + return valid_vpe(its, map->vpe); +} + +static struct its_vpe *its_build_invdb_cmd(struct its_node *its, + struct its_cmd_block *cmd, + struct its_cmd_desc *desc) +{ + if (WARN_ON(!is_v4_1(its))) + return NULL; + + its_encode_cmd(cmd, GITS_CMD_INVDB); + its_encode_vpeid(cmd, desc->its_invdb_cmd.vpe->vpe_id); + + its_fixup_cmd(cmd); + + return valid_vpe(its, desc->its_invdb_cmd.vpe); +} + +static struct its_vpe *its_build_vsgi_cmd(struct its_node *its, + struct its_cmd_block *cmd, + struct its_cmd_desc *desc) +{ + if (WARN_ON(!is_v4_1(its))) + return NULL; + + its_encode_cmd(cmd, GITS_CMD_VSGI); + its_encode_vpeid(cmd, desc->its_vsgi_cmd.vpe->vpe_id); + its_encode_sgi_intid(cmd, desc->its_vsgi_cmd.sgi); + its_encode_sgi_priority(cmd, desc->its_vsgi_cmd.priority); + its_encode_sgi_group(cmd, desc->its_vsgi_cmd.group); + its_encode_sgi_clear(cmd, desc->its_vsgi_cmd.clear); + its_encode_sgi_enable(cmd, desc->its_vsgi_cmd.enable); + + its_fixup_cmd(cmd); + + return valid_vpe(its, desc->its_vsgi_cmd.vpe); +} + static u64 its_cmd_ptr_to_offset(struct its_node *its, struct its_cmd_block *ptr) { @@ -739,32 +1164,43 @@ static void its_flush_cmd(struct its_node *its, struct its_cmd_block *cmd) } static int its_wait_for_range_completion(struct its_node *its, - struct its_cmd_block *from, + u64 prev_idx, struct its_cmd_block *to) { - u64 rd_idx, from_idx, to_idx; + u64 rd_idx, to_idx, linear_idx; u32 count = 1000000; /* 1s! */ - from_idx = its_cmd_ptr_to_offset(its, from); + /* Linearize to_idx if the command set has wrapped around */ to_idx = its_cmd_ptr_to_offset(its, to); + if (to_idx < prev_idx) + to_idx += ITS_CMD_QUEUE_SZ; + + linear_idx = prev_idx; while (1) { + s64 delta; + rd_idx = readl_relaxed(its->base + GITS_CREADR); - /* Direct case */ - if (from_idx < to_idx && rd_idx >= to_idx) - break; + /* + * Compute the read pointer progress, taking the + * potential wrap-around into account. + */ + delta = rd_idx - prev_idx; + if (rd_idx < prev_idx) + delta += ITS_CMD_QUEUE_SZ; - /* Wrapped case */ - if (from_idx >= to_idx && rd_idx >= to_idx && rd_idx < from_idx) + linear_idx += delta; + if (linear_idx >= to_idx) break; count--; if (!count) { - pr_err_ratelimited("ITS queue timeout (%llu %llu %llu)\n", - from_idx, to_idx, rd_idx); + pr_err_ratelimited("ITS queue timeout (%llu %llu)\n", + to_idx, linear_idx); return -1; } + prev_idx = rd_idx; cpu_relax(); udelay(1); } @@ -781,6 +1217,7 @@ void name(struct its_node *its, \ struct its_cmd_block *cmd, *sync_cmd, *next_cmd; \ synctype *sync_obj; \ unsigned long flags; \ + u64 rd_idx; \ \ raw_spin_lock_irqsave(&its->lock, flags); \ \ @@ -802,10 +1239,11 @@ void name(struct its_node *its, \ } \ \ post: \ + rd_idx = readl_relaxed(its->base + GITS_CREADR); \ next_cmd = its_post_commands(its); \ raw_spin_unlock_irqrestore(&its->lock, flags); \ \ - if (its_wait_for_range_completion(its, cmd, next_cmd)) \ + if (its_wait_for_range_completion(its, rd_idx, next_cmd)) \ pr_err_ratelimited("ITS cmd %ps failed\n", builder); \ } @@ -930,7 +1368,7 @@ static void its_send_invall(struct its_node *its, struct its_collection *col) static void its_send_vmapti(struct its_device *dev, u32 id) { - struct its_vlpi_map *map = &dev->event_map.vlpi_maps[id]; + struct its_vlpi_map *map = dev_event_to_vlpi_map(dev, id); struct its_cmd_desc desc; desc.its_vmapti_cmd.vpe = map->vpe; @@ -944,7 +1382,7 @@ static void its_send_vmapti(struct its_device *dev, u32 id) static void its_send_vmovi(struct its_device *dev, u32 id) { - struct its_vlpi_map *map = &dev->event_map.vlpi_maps[id]; + struct its_vlpi_map *map = dev_event_to_vlpi_map(dev, id); struct its_cmd_desc desc; desc.its_vmovi_cmd.vpe = map->vpe; @@ -969,17 +1407,14 @@ static void its_send_vmapp(struct its_node *its, static void its_send_vmovp(struct its_vpe *vpe) { - struct its_cmd_desc desc; + struct its_cmd_desc desc = {}; struct its_node *its; - unsigned long flags; int col_id = vpe->col_idx; desc.its_vmovp_cmd.vpe = vpe; - desc.its_vmovp_cmd.its_list = (u16)its_list_map; if (!its_list_map) { its = list_first_entry(&its_nodes, struct its_node, entry); - desc.its_vmovp_cmd.seq_num = 0; desc.its_vmovp_cmd.col = &its->collections[col_id]; its_send_single_vcommand(its, its_build_vmovp_cmd, &desc); return; @@ -993,23 +1428,21 @@ static void its_send_vmovp(struct its_vpe *vpe) * * Wall <-- Head. */ - raw_spin_lock_irqsave(&vmovp_lock, flags); - + guard(raw_spinlock)(&vmovp_lock); desc.its_vmovp_cmd.seq_num = vmovp_seq_num++; + desc.its_vmovp_cmd.its_list = get_its_list(vpe->its_vm); /* Emit VMOVPs */ list_for_each_entry(its, &its_nodes, entry) { - if (!its->is_v4) + if (!is_v4(its)) continue; - if (!vpe->its_vm->vlpi_count[its->list_nr]) + if (!require_its_list_vmovp(vpe->its_vm, its)) continue; desc.its_vmovp_cmd.col = &its->collections[col_id]; its_send_single_vcommand(its, its_build_vmovp_cmd, &desc); } - - raw_spin_unlock_irqrestore(&vmovp_lock, flags); } static void its_send_vinvall(struct its_node *its, struct its_vpe *vpe) @@ -1020,29 +1453,68 @@ static void its_send_vinvall(struct its_node *its, struct its_vpe *vpe) its_send_single_vcommand(its, its_build_vinvall_cmd, &desc); } -/* - * irqchip functions - assumes MSI, mostly. - */ +static void its_send_vinv(struct its_device *dev, u32 event_id) +{ + struct its_cmd_desc desc; -static inline u32 its_get_event_id(struct irq_data *d) + /* + * There is no real VINV command. This is just a normal INV, + * with a VSYNC instead of a SYNC. + */ + desc.its_inv_cmd.dev = dev; + desc.its_inv_cmd.event_id = event_id; + + its_send_single_vcommand(dev->its, its_build_vinv_cmd, &desc); +} + +static void its_send_vint(struct its_device *dev, u32 event_id) { - struct its_device *its_dev = irq_data_get_irq_chip_data(d); - return d->hwirq - its_dev->event_map.lpi_base; + struct its_cmd_desc desc; + + /* + * There is no real VINT command. This is just a normal INT, + * with a VSYNC instead of a SYNC. + */ + desc.its_int_cmd.dev = dev; + desc.its_int_cmd.event_id = event_id; + + its_send_single_vcommand(dev->its, its_build_vint_cmd, &desc); } +static void its_send_vclear(struct its_device *dev, u32 event_id) +{ + struct its_cmd_desc desc; + + /* + * There is no real VCLEAR command. This is just a normal CLEAR, + * with a VSYNC instead of a SYNC. + */ + desc.its_clear_cmd.dev = dev; + desc.its_clear_cmd.event_id = event_id; + + its_send_single_vcommand(dev->its, its_build_vclear_cmd, &desc); +} + +static void its_send_invdb(struct its_node *its, struct its_vpe *vpe) +{ + struct its_cmd_desc desc; + + desc.its_invdb_cmd.vpe = vpe; + its_send_single_vcommand(its, its_build_invdb_cmd, &desc); +} + +/* + * irqchip functions - assumes MSI, mostly. + */ static void lpi_write_config(struct irq_data *d, u8 clr, u8 set) { + struct its_vlpi_map *map = get_vlpi_map(d); irq_hw_number_t hwirq; void *va; u8 *cfg; - if (irqd_is_forwarded_to_vcpu(d)) { - struct its_device *its_dev = irq_data_get_irq_chip_data(d); - u32 event = its_get_event_id(d); - struct its_vlpi_map *map; - - va = page_address(its_dev->event_map.vm->vprop_page); - map = &its_dev->event_map.vlpi_maps[event]; + if (map) { + va = page_address(map->vm->vprop_page); hwirq = map->vintid; /* Remember the updated property */ @@ -1068,30 +1540,90 @@ static void lpi_write_config(struct irq_data *d, u8 clr, u8 set) dsb(ishst); } +static void wait_for_syncr(void __iomem *rdbase) +{ + while (readl_relaxed(rdbase + GICR_SYNCR) & 1) + cpu_relax(); +} + +static void __direct_lpi_inv(struct irq_data *d, u64 val) +{ + void __iomem *rdbase; + unsigned long flags; + int cpu; + + /* Target the redistributor this LPI is currently routed to */ + cpu = irq_to_cpuid_lock(d, &flags); + raw_spin_lock(&gic_data_rdist_cpu(cpu)->rd_lock); + + rdbase = per_cpu_ptr(gic_rdists->rdist, cpu)->rd_base; + gic_write_lpir(val, rdbase + GICR_INVLPIR); + wait_for_syncr(rdbase); + + raw_spin_unlock(&gic_data_rdist_cpu(cpu)->rd_lock); + irq_to_cpuid_unlock(d, flags); +} + +static void direct_lpi_inv(struct irq_data *d) +{ + struct its_vlpi_map *map = get_vlpi_map(d); + u64 val; + + if (map) { + struct its_device *its_dev = irq_data_get_irq_chip_data(d); + + WARN_ON(!is_v4_1(its_dev->its)); + + val = GICR_INVLPIR_V; + val |= FIELD_PREP(GICR_INVLPIR_VPEID, map->vpe->vpe_id); + val |= FIELD_PREP(GICR_INVLPIR_INTID, map->vintid); + } else { + val = d->hwirq; + } + + __direct_lpi_inv(d, val); +} + static void lpi_update_config(struct irq_data *d, u8 clr, u8 set) { struct its_device *its_dev = irq_data_get_irq_chip_data(d); lpi_write_config(d, clr, set); - its_send_inv(its_dev, its_get_event_id(d)); + if (gic_rdists->has_direct_lpi && + (is_v4_1(its_dev->its) || !irqd_is_forwarded_to_vcpu(d))) + direct_lpi_inv(d); + else if (!irqd_is_forwarded_to_vcpu(d)) + its_send_inv(its_dev, its_get_event_id(d)); + else + its_send_vinv(its_dev, its_get_event_id(d)); } static void its_vlpi_set_doorbell(struct irq_data *d, bool enable) { struct its_device *its_dev = irq_data_get_irq_chip_data(d); u32 event = its_get_event_id(d); + struct its_vlpi_map *map; + + /* + * GICv4.1 does away with the per-LPI nonsense, nothing to do + * here. + */ + if (is_v4_1(its_dev->its)) + return; - if (its_dev->event_map.vlpi_maps[event].db_enabled == enable) + map = dev_event_to_vlpi_map(its_dev, event); + + if (map->db_enabled == enable) return; - its_dev->event_map.vlpi_maps[event].db_enabled = enable; + map->db_enabled = enable; /* * More fun with the architecture: * * Ideally, we'd issue a VMAPTI to set the doorbell to its LPI * value or to 1023, depending on the enable bit. But that - * would be issueing a mapping for an /existing/ DevID+EventID + * would be issuing a mapping for an /existing/ DevID+EventID * pair, which is UNPREDICTABLE. Instead, let's issue a VMOVI * to the /same/ vPE, using this opportunity to adjust the * doorbell. Mouahahahaha. We loves it, Precious. @@ -1115,42 +1647,161 @@ static void its_unmask_irq(struct irq_data *d) lpi_update_config(d, 0, LPI_PROP_ENABLED); } +static __maybe_unused u32 its_read_lpi_count(struct irq_data *d, int cpu) +{ + if (irqd_affinity_is_managed(d)) + return atomic_read(&per_cpu_ptr(&cpu_lpi_count, cpu)->managed); + + return atomic_read(&per_cpu_ptr(&cpu_lpi_count, cpu)->unmanaged); +} + +static void its_inc_lpi_count(struct irq_data *d, int cpu) +{ + if (irqd_affinity_is_managed(d)) + atomic_inc(&per_cpu_ptr(&cpu_lpi_count, cpu)->managed); + else + atomic_inc(&per_cpu_ptr(&cpu_lpi_count, cpu)->unmanaged); +} + +static void its_dec_lpi_count(struct irq_data *d, int cpu) +{ + if (irqd_affinity_is_managed(d)) + atomic_dec(&per_cpu_ptr(&cpu_lpi_count, cpu)->managed); + else + atomic_dec(&per_cpu_ptr(&cpu_lpi_count, cpu)->unmanaged); +} + +static unsigned int cpumask_pick_least_loaded(struct irq_data *d, + const struct cpumask *cpu_mask) +{ + unsigned int cpu = nr_cpu_ids, tmp; + int count = S32_MAX; + + for_each_cpu(tmp, cpu_mask) { + int this_count = its_read_lpi_count(d, tmp); + if (this_count < count) { + cpu = tmp; + count = this_count; + } + } + + return cpu; +} + +/* + * As suggested by Thomas Gleixner in: + * https://lore.kernel.org/r/87h80q2aoc.fsf@nanos.tec.linutronix.de + */ +static int its_select_cpu(struct irq_data *d, + const struct cpumask *aff_mask) +{ + struct its_device *its_dev = irq_data_get_irq_chip_data(d); + static DEFINE_RAW_SPINLOCK(tmpmask_lock); + static struct cpumask __tmpmask; + struct cpumask *tmpmask; + unsigned long flags; + int cpu, node; + node = its_dev->its->numa_node; + tmpmask = &__tmpmask; + + raw_spin_lock_irqsave(&tmpmask_lock, flags); + + if (!irqd_affinity_is_managed(d)) { + /* First try the NUMA node */ + if (node != NUMA_NO_NODE) { + /* + * Try the intersection of the affinity mask and the + * node mask (and the online mask, just to be safe). + */ + cpumask_and(tmpmask, cpumask_of_node(node), aff_mask); + cpumask_and(tmpmask, tmpmask, cpu_online_mask); + + /* + * Ideally, we would check if the mask is empty, and + * try again on the full node here. + * + * But it turns out that the way ACPI describes the + * affinity for ITSs only deals about memory, and + * not target CPUs, so it cannot describe a single + * ITS placed next to two NUMA nodes. + * + * Instead, just fallback on the online mask. This + * diverges from Thomas' suggestion above. + */ + cpu = cpumask_pick_least_loaded(d, tmpmask); + if (cpu < nr_cpu_ids) + goto out; + + /* If we can't cross sockets, give up */ + if ((its_dev->its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144)) + goto out; + + /* If the above failed, expand the search */ + } + + /* Try the intersection of the affinity and online masks */ + cpumask_and(tmpmask, aff_mask, cpu_online_mask); + + /* If that doesn't fly, the online mask is the last resort */ + if (cpumask_empty(tmpmask)) + cpumask_copy(tmpmask, cpu_online_mask); + + cpu = cpumask_pick_least_loaded(d, tmpmask); + } else { + cpumask_copy(tmpmask, aff_mask); + + /* If we cannot cross sockets, limit the search to that node */ + if ((its_dev->its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144) && + node != NUMA_NO_NODE) + cpumask_and(tmpmask, tmpmask, cpumask_of_node(node)); + + cpu = cpumask_pick_least_loaded(d, tmpmask); + } +out: + raw_spin_unlock_irqrestore(&tmpmask_lock, flags); + + pr_debug("IRQ%d -> %*pbl CPU%d\n", d->irq, cpumask_pr_args(aff_mask), cpu); + return cpu; +} + static int its_set_affinity(struct irq_data *d, const struct cpumask *mask_val, bool force) { - unsigned int cpu; - const struct cpumask *cpu_mask = cpu_online_mask; struct its_device *its_dev = irq_data_get_irq_chip_data(d); struct its_collection *target_col; u32 id = its_get_event_id(d); + int cpu, prev_cpu; /* A forwarded interrupt should use irq_set_vcpu_affinity */ if (irqd_is_forwarded_to_vcpu(d)) return -EINVAL; - /* lpi cannot be routed to a redistributor that is on a foreign node */ - if (its_dev->its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144) { - if (its_dev->its->numa_node >= 0) { - cpu_mask = cpumask_of_node(its_dev->its->numa_node); - if (!cpumask_intersects(mask_val, cpu_mask)) - return -EINVAL; - } - } + prev_cpu = its_dev->event_map.col_map[id]; + its_dec_lpi_count(d, prev_cpu); - cpu = cpumask_any_and(mask_val, cpu_mask); + if (!force) + cpu = its_select_cpu(d, mask_val); + else + cpu = cpumask_pick_least_loaded(d, mask_val); - if (cpu >= nr_cpu_ids) - return -EINVAL; + if (cpu < 0 || cpu >= nr_cpu_ids) + goto err; /* don't set the affinity when the target cpu is same as current one */ - if (cpu != its_dev->event_map.col_map[id]) { + if (cpu != prev_cpu) { target_col = &its_dev->its->collections[cpu]; its_send_movi(its_dev, target_col, id); its_dev->event_map.col_map[id] = cpu; irq_data_update_effective_affinity(d, cpumask_of(cpu)); } + its_inc_lpi_count(d, cpu); + return IRQ_SET_MASK_OK_DONE; + +err: + its_inc_lpi_count(d, prev_cpu); + return -EINVAL; } static u64 its_irq_get_msi_base(struct its_device *its_dev) @@ -1163,17 +1814,10 @@ static u64 its_irq_get_msi_base(struct its_device *its_dev) static void its_irq_compose_msi_msg(struct irq_data *d, struct msi_msg *msg) { struct its_device *its_dev = irq_data_get_irq_chip_data(d); - struct its_node *its; - u64 addr; - - its = its_dev->its; - addr = its->get_msi_base(its_dev); - - msg->address_lo = lower_32_bits(addr); - msg->address_hi = upper_32_bits(addr); - msg->data = its_get_event_id(d); - iommu_dma_map_msi_msg(d->irq, msg); + msg->data = its_get_event_id(d); + msi_msg_set_addr(irq_data_get_msi_desc(d), msg, + its_dev->its->get_msi_base(its_dev)); } static int its_irq_set_irqchip_state(struct irq_data *d, @@ -1186,23 +1830,52 @@ static int its_irq_set_irqchip_state(struct irq_data *d, if (which != IRQCHIP_STATE_PENDING) return -EINVAL; - if (state) - its_send_int(its_dev, event); - else - its_send_clear(its_dev, event); + if (irqd_is_forwarded_to_vcpu(d)) { + if (state) + its_send_vint(its_dev, event); + else + its_send_vclear(its_dev, event); + } else { + if (state) + its_send_int(its_dev, event); + else + its_send_clear(its_dev, event); + } return 0; } -static void its_map_vm(struct its_node *its, struct its_vm *vm) +static int its_irq_retrigger(struct irq_data *d) { - unsigned long flags; + return !its_irq_set_irqchip_state(d, IRQCHIP_STATE_PENDING, true); +} - /* Not using the ITS list? Everything is always mapped. */ - if (!its_list_map) +/* + * Two favourable cases: + * + * (a) Either we have a GICv4.1, and all vPEs have to be mapped at all times + * for vSGI delivery + * + * (b) Or the ITSs do not use a list map, meaning that VMOVP is cheap enough + * and we're better off mapping all VPEs always + * + * If neither (a) nor (b) is true, then we map vPEs on demand. + * + */ +static bool gic_requires_eager_mapping(void) +{ + if (!its_list_map || gic_rdists->has_rvpeid) + return true; + + return false; +} + +static void its_map_vm(struct its_node *its, struct its_vm *vm) +{ + if (gic_requires_eager_mapping()) return; - raw_spin_lock_irqsave(&vmovp_lock, flags); + guard(raw_spinlock_irqsave)(&vm->vmapp_lock); /* * If the VM wasn't mapped yet, iterate over the vpes and get @@ -1215,65 +1888,53 @@ static void its_map_vm(struct its_node *its, struct its_vm *vm) for (i = 0; i < vm->nr_vpes; i++) { struct its_vpe *vpe = vm->vpes[i]; - struct irq_data *d = irq_get_irq_data(vpe->irq); - /* Map the VPE to the first possible CPU */ - vpe->col_idx = cpumask_first(cpu_online_mask); - its_send_vmapp(its, vpe, true); + scoped_guard(raw_spinlock, &vpe->vpe_lock) + its_send_vmapp(its, vpe, true); + its_send_vinvall(its, vpe); - irq_data_update_effective_affinity(d, cpumask_of(vpe->col_idx)); } } - - raw_spin_unlock_irqrestore(&vmovp_lock, flags); } static void its_unmap_vm(struct its_node *its, struct its_vm *vm) { - unsigned long flags; - /* Not using the ITS list? Everything is always mapped. */ - if (!its_list_map) + if (gic_requires_eager_mapping()) return; - raw_spin_lock_irqsave(&vmovp_lock, flags); + guard(raw_spinlock_irqsave)(&vm->vmapp_lock); if (!--vm->vlpi_count[its->list_nr]) { int i; - for (i = 0; i < vm->nr_vpes; i++) + for (i = 0; i < vm->nr_vpes; i++) { + guard(raw_spinlock)(&vm->vpes[i]->vpe_lock); its_send_vmapp(its, vm->vpes[i], false); + } } - - raw_spin_unlock_irqrestore(&vmovp_lock, flags); } static int its_vlpi_map(struct irq_data *d, struct its_cmd_info *info) { struct its_device *its_dev = irq_data_get_irq_chip_data(d); u32 event = its_get_event_id(d); - int ret = 0; if (!info->map) return -EINVAL; - mutex_lock(&its_dev->event_map.vlpi_lock); - if (!its_dev->event_map.vm) { struct its_vlpi_map *maps; maps = kcalloc(its_dev->event_map.nr_lpis, sizeof(*maps), - GFP_KERNEL); - if (!maps) { - ret = -ENOMEM; - goto out; - } + GFP_ATOMIC); + if (!maps) + return -ENOMEM; its_dev->event_map.vm = info->map->vm; its_dev->event_map.vlpi_maps = maps; } else if (its_dev->event_map.vm != info->map->vm) { - ret = -EINVAL; - goto out; + return -EINVAL; } /* Get our private copy of the mapping information */ @@ -1305,45 +1966,32 @@ static int its_vlpi_map(struct irq_data *d, struct its_cmd_info *info) its_dev->event_map.nr_vlpis++; } -out: - mutex_unlock(&its_dev->event_map.vlpi_lock); - return ret; + return 0; } static int its_vlpi_get(struct irq_data *d, struct its_cmd_info *info) { struct its_device *its_dev = irq_data_get_irq_chip_data(d); - u32 event = its_get_event_id(d); - int ret = 0; + struct its_vlpi_map *map; - mutex_lock(&its_dev->event_map.vlpi_lock); + map = get_vlpi_map(d); - if (!its_dev->event_map.vm || - !its_dev->event_map.vlpi_maps[event].vm) { - ret = -EINVAL; - goto out; - } + if (!its_dev->event_map.vm || !map) + return -EINVAL; /* Copy our mapping information to the incoming request */ - *info->map = its_dev->event_map.vlpi_maps[event]; + *info->map = *map; -out: - mutex_unlock(&its_dev->event_map.vlpi_lock); - return ret; + return 0; } static int its_vlpi_unmap(struct irq_data *d) { struct its_device *its_dev = irq_data_get_irq_chip_data(d); u32 event = its_get_event_id(d); - int ret = 0; - - mutex_lock(&its_dev->event_map.vlpi_lock); - if (!its_dev->event_map.vm || !irqd_is_forwarded_to_vcpu(d)) { - ret = -EINVAL; - goto out; - } + if (!its_dev->event_map.vm || !irqd_is_forwarded_to_vcpu(d)) + return -EINVAL; /* Drop the virtual mapping */ its_send_discard(its_dev, event); @@ -1351,7 +1999,7 @@ static int its_vlpi_unmap(struct irq_data *d) /* and restore the physical one */ irqd_clr_forwarded_to_vcpu(d); its_send_mapti(its_dev, d->hwirq, event); - lpi_update_config(d, 0xff, (LPI_PROP_DEFAULT_PRIO | + lpi_update_config(d, 0xff, (lpi_prop_prio | LPI_PROP_ENABLED | LPI_PROP_GROUP1)); @@ -1367,9 +2015,7 @@ static int its_vlpi_unmap(struct irq_data *d) kfree(its_dev->event_map.vlpi_maps); } -out: - mutex_unlock(&its_dev->event_map.vlpi_lock); - return ret; + return 0; } static int its_vlpi_prop_update(struct irq_data *d, struct its_cmd_info *info) @@ -1394,9 +2040,11 @@ static int its_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu_info) struct its_cmd_info *info = vcpu_info; /* Need a v4 ITS */ - if (!its_dev->its->is_v4) + if (!is_v4(its_dev->its)) return -EINVAL; + guard(raw_spinlock)(&its_dev->event_map.vlpi_lock); + /* Unmap request? */ if (!info) return its_vlpi_unmap(d); @@ -1425,6 +2073,7 @@ static struct irq_chip its_irq_chip = { .irq_set_affinity = its_set_affinity, .irq_compose_msi_msg = its_irq_compose_msi_msg, .irq_set_irqchip_state = its_irq_set_irqchip_state, + .irq_retrigger = its_irq_retrigger, .irq_set_vcpu_affinity = its_irq_set_vcpu_affinity, }; @@ -1459,9 +2108,8 @@ static struct lpi_range *mk_lpi_range(u32 base, u32 span) { struct lpi_range *range; - range = kzalloc(sizeof(*range), GFP_KERNEL); + range = kmalloc(sizeof(*range), GFP_KERNEL); if (range) { - INIT_LIST_HEAD(&range->entry); range->base_id = base; range->span = span; } @@ -1469,31 +2117,6 @@ static struct lpi_range *mk_lpi_range(u32 base, u32 span) return range; } -static int lpi_range_cmp(void *priv, struct list_head *a, struct list_head *b) -{ - struct lpi_range *ra, *rb; - - ra = container_of(a, struct lpi_range, entry); - rb = container_of(b, struct lpi_range, entry); - - return rb->base_id - ra->base_id; -} - -static void merge_lpi_ranges(void) -{ - struct lpi_range *range, *tmp; - - list_for_each_entry_safe(range, tmp, &lpi_range_list, entry) { - if (!list_is_last(&range->entry, &lpi_range_list) && - (tmp->base_id == (range->base_id + range->span))) { - tmp->base_id = range->base_id; - tmp->span += range->span; - list_del(&range->entry); - kfree(range); - } - } -} - static int alloc_lpi_range(u32 nr_lpis, u32 *base) { struct lpi_range *range, *tmp; @@ -1523,25 +2146,49 @@ static int alloc_lpi_range(u32 nr_lpis, u32 *base) return err; } +static void merge_lpi_ranges(struct lpi_range *a, struct lpi_range *b) +{ + if (&a->entry == &lpi_range_list || &b->entry == &lpi_range_list) + return; + if (a->base_id + a->span != b->base_id) + return; + b->base_id = a->base_id; + b->span += a->span; + list_del(&a->entry); + kfree(a); +} + static int free_lpi_range(u32 base, u32 nr_lpis) { - struct lpi_range *new; - int err = 0; + struct lpi_range *new, *old; + + new = mk_lpi_range(base, nr_lpis); + if (!new) + return -ENOMEM; mutex_lock(&lpi_range_lock); - new = mk_lpi_range(base, nr_lpis); - if (!new) { - err = -ENOMEM; - goto out; + list_for_each_entry_reverse(old, &lpi_range_list, entry) { + if (old->base_id < base) + break; } + /* + * old is the last element with ->base_id smaller than base, + * so new goes right after it. If there are no elements with + * ->base_id smaller than base, &old->entry ends up pointing + * at the head of the list, and inserting new it the start of + * the list is the right thing to do in that case as well. + */ + list_add(&new->entry, &old->entry); + /* + * Now check if we can merge with the preceding and/or + * following ranges. + */ + merge_lpi_ranges(old, new); + merge_lpi_ranges(new, list_next_entry(new, entry)); - list_add(&new->entry, &lpi_range_list); - list_sort(NULL, &lpi_range_list, lpi_range_cmp); - merge_lpi_ranges(); -out: mutex_unlock(&lpi_range_lock); - return err; + return 0; } static int __init its_lpi_init(u32 id_bits) @@ -1580,10 +2227,13 @@ static unsigned long *its_lpi_alloc(int nr_irqs, u32 *base, int *nr_ids) nr_irqs /= 2; } while (nr_irqs > 0); + if (!nr_irqs) + err = -ENOSPC; + if (err) goto out; - bitmap = kcalloc(BITS_TO_LONGS(nr_irqs), sizeof (long), GFP_ATOMIC); + bitmap = bitmap_zalloc(nr_irqs, GFP_ATOMIC); if (!bitmap) goto out; @@ -1599,13 +2249,13 @@ out: static void its_lpi_free(unsigned long *bitmap, u32 base, u32 nr_ids) { WARN_ON(free_lpi_range(base, nr_ids)); - kfree(bitmap); + bitmap_free(bitmap); } static void gic_reset_prop_table(void *va) { - /* Priority 0xa0, Group-1, disabled */ - memset(va, LPI_PROP_DEFAULT_PRIO | LPI_PROP_GROUP1, LPI_PROPBASE_SZ); + /* Regular IRQ priority, Group-1, disabled */ + memset(va, lpi_prop_prio | LPI_PROP_GROUP1, LPI_PROPBASE_SZ); /* Make sure the GIC will observe the written configuration */ gic_flush_dcache_to_poc(va, LPI_PROPBASE_SZ); @@ -1615,7 +2265,8 @@ static struct page *its_allocate_prop_table(gfp_t gfp_flags) { struct page *prop_page; - prop_page = alloc_pages(gfp_flags, get_order(LPI_PROPBASE_SZ)); + prop_page = its_alloc_pages(gfp_flags, + get_order(LPI_PROPBASE_SZ)); if (!prop_page) return NULL; @@ -1626,8 +2277,7 @@ static struct page *its_allocate_prop_table(gfp_t gfp_flags) static void its_free_prop_table(struct page *prop_page) { - free_pages((unsigned long)page_address(prop_page), - get_order(LPI_PROPBASE_SZ)); + its_free_pages(page_address(prop_page), get_order(LPI_PROPBASE_SZ)); } static bool gic_check_reserved_range(phys_addr_t addr, unsigned long size) @@ -1645,7 +2295,7 @@ static bool gic_check_reserved_range(phys_addr_t addr, unsigned long size) addr_end = addr + size - 1; - for_each_reserved_mem_region(i, &start, &end) { + for_each_reserved_mem_range(i, &start, &end) { if (addr >= start && addr_end <= end) return true; } @@ -1729,17 +2379,17 @@ static void its_write_baser(struct its_node *its, struct its_baser *baser, } static int its_setup_baser(struct its_node *its, struct its_baser *baser, - u64 cache, u64 shr, u32 psz, u32 order, - bool indirect) + u64 cache, u64 shr, u32 order, bool indirect) { u64 val = its_read_baser(its, baser); u64 esz = GITS_BASER_ENTRY_SIZE(val); u64 type = GITS_BASER_TYPE(val); u64 baser_phys, tmp; - u32 alloc_pages; + u32 alloc_pages, psz; + struct page *page; void *base; -retry_alloc_baser: + psz = baser->psz; alloc_pages = (PAGE_ORDER_TO_SIZE(order) / psz); if (alloc_pages > GITS_BASER_PAGES_MAX) { pr_warn("ITS@%pa: %s too large, reduce ITS pages %u->%u\n", @@ -1749,10 +2399,11 @@ retry_alloc_baser: order = get_order(GITS_BASER_PAGES_MAX * psz); } - base = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, order); - if (!base) + page = its_alloc_pages_node(its->numa_node, GFP_KERNEL | __GFP_ZERO, order); + if (!page) return -ENOMEM; + base = (void *)page_address(page); baser_phys = virt_to_phys(base); /* Check if the physical address of the memory is above 48bits */ @@ -1761,7 +2412,7 @@ retry_alloc_baser: /* 52bit PA is supported only when PageSize=64K */ if (psz != SZ_64K) { pr_err("ITS: no 52bit PA support when psz=%d\n", psz); - free_pages((unsigned long)base, order); + its_free_pages(base, order); return -ENXIO; } @@ -1792,6 +2443,9 @@ retry_baser: break; } + if (!shr) + gic_flush_dcache_to_poc(base, PAGE_ORDER_TO_SIZE(order)); + its_write_baser(its, baser, val); tmp = baser->val; @@ -1804,37 +2458,17 @@ retry_baser: * non-cacheable as well. */ shr = tmp & GITS_BASER_SHAREABILITY_MASK; - if (!shr) { + if (!shr) cache = GITS_BASER_nC; - gic_flush_dcache_to_poc(base, PAGE_ORDER_TO_SIZE(order)); - } - goto retry_baser; - } - - if ((val ^ tmp) & GITS_BASER_PAGE_SIZE_MASK) { - /* - * Page size didn't stick. Let's try a smaller - * size and retry. If we reach 4K, then - * something is horribly wrong... - */ - free_pages((unsigned long)base, order); - baser->base = NULL; - switch (psz) { - case SZ_16K: - psz = SZ_4K; - goto retry_alloc_baser; - case SZ_64K: - psz = SZ_16K; - goto retry_alloc_baser; - } + goto retry_baser; } if (val != tmp) { pr_err("ITS@%pa: %s doesn't stick: %llx %llx\n", &its->phys_base, its_base_type_string[type], val, tmp); - free_pages((unsigned long)base, order); + its_free_pages(base, order); return -ENXIO; } @@ -1855,13 +2489,14 @@ retry_baser: static bool its_parse_indirect_baser(struct its_node *its, struct its_baser *baser, - u32 psz, u32 *order, u32 ids) + u32 *order, u32 ids) { u64 tmp = its_read_baser(its, baser); u64 type = GITS_BASER_TYPE(tmp); u64 esz = GITS_BASER_ENTRY_SIZE(tmp); u64 val = GITS_BASER_InnerShareable | GITS_BASER_RaWaWb; u32 new_order = *order; + u32 psz = baser->psz; bool indirect = false; /* No need to enable Indirection if memory requirement < (psz*2)bytes */ @@ -1894,12 +2529,12 @@ static bool its_parse_indirect_baser(struct its_node *its, * feature is not supported by hardware. */ new_order = max_t(u32, get_order(esz << ids), new_order); - if (new_order >= MAX_ORDER) { - new_order = MAX_ORDER - 1; + if (new_order > MAX_PAGE_ORDER) { + new_order = MAX_PAGE_ORDER; ids = ilog2(PAGE_ORDER_TO_SIZE(new_order) / (int)esz); - pr_warn("ITS@%pa: %s Table too large, reduce ids %u->%u\n", + pr_warn("ITS@%pa: %s Table too large, reduce ids %llu->%u\n", &its->phys_base, its_base_type_string[type], - its->device_ids, ids); + device_ids(its), ids); } *order = new_order; @@ -1907,60 +2542,187 @@ static bool its_parse_indirect_baser(struct its_node *its, return indirect; } +static u32 compute_common_aff(u64 val) +{ + u32 aff, clpiaff; + + aff = FIELD_GET(GICR_TYPER_AFFINITY, val); + clpiaff = FIELD_GET(GICR_TYPER_COMMON_LPI_AFF, val); + + return aff & ~(GENMASK(31, 0) >> (clpiaff * 8)); +} + +static u32 compute_its_aff(struct its_node *its) +{ + u64 val; + u32 svpet; + + /* + * Reencode the ITS SVPET and MPIDR as a GICR_TYPER, and compute + * the resulting affinity. We then use that to see if this match + * our own affinity. + */ + svpet = FIELD_GET(GITS_TYPER_SVPET, its->typer); + val = FIELD_PREP(GICR_TYPER_COMMON_LPI_AFF, svpet); + val |= FIELD_PREP(GICR_TYPER_AFFINITY, its->mpidr); + return compute_common_aff(val); +} + +static struct its_node *find_sibling_its(struct its_node *cur_its) +{ + struct its_node *its; + u32 aff; + + if (!FIELD_GET(GITS_TYPER_SVPET, cur_its->typer)) + return NULL; + + aff = compute_its_aff(cur_its); + + list_for_each_entry(its, &its_nodes, entry) { + u64 baser; + + if (!is_v4_1(its) || its == cur_its) + continue; + + if (!FIELD_GET(GITS_TYPER_SVPET, its->typer)) + continue; + + if (aff != compute_its_aff(its)) + continue; + + /* GICv4.1 guarantees that the vPE table is GITS_BASER2 */ + baser = its->tables[2].val; + if (!(baser & GITS_BASER_VALID)) + continue; + + return its; + } + + return NULL; +} + static void its_free_tables(struct its_node *its) { int i; for (i = 0; i < GITS_BASER_NR_REGS; i++) { if (its->tables[i].base) { - free_pages((unsigned long)its->tables[i].base, - its->tables[i].order); + its_free_pages(its->tables[i].base, its->tables[i].order); its->tables[i].base = NULL; } } } +static int its_probe_baser_psz(struct its_node *its, struct its_baser *baser) +{ + u64 psz = SZ_64K; + + while (psz) { + u64 val, gpsz; + + val = its_read_baser(its, baser); + val &= ~GITS_BASER_PAGE_SIZE_MASK; + + switch (psz) { + case SZ_64K: + gpsz = GITS_BASER_PAGE_SIZE_64K; + break; + case SZ_16K: + gpsz = GITS_BASER_PAGE_SIZE_16K; + break; + case SZ_4K: + default: + gpsz = GITS_BASER_PAGE_SIZE_4K; + break; + } + + gpsz >>= GITS_BASER_PAGE_SIZE_SHIFT; + + val |= FIELD_PREP(GITS_BASER_PAGE_SIZE_MASK, gpsz); + its_write_baser(its, baser, val); + + if (FIELD_GET(GITS_BASER_PAGE_SIZE_MASK, baser->val) == gpsz) + break; + + switch (psz) { + case SZ_64K: + psz = SZ_16K; + break; + case SZ_16K: + psz = SZ_4K; + break; + case SZ_4K: + default: + return -1; + } + } + + baser->psz = psz; + return 0; +} + static int its_alloc_tables(struct its_node *its) { u64 shr = GITS_BASER_InnerShareable; u64 cache = GITS_BASER_RaWaWb; - u32 psz = SZ_64K; int err, i; if (its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_22375) /* erratum 24313: ignore memory access type */ cache = GITS_BASER_nCnB; + if (its->flags & ITS_FLAGS_FORCE_NON_SHAREABLE) { + cache = GITS_BASER_nC; + shr = 0; + } + for (i = 0; i < GITS_BASER_NR_REGS; i++) { struct its_baser *baser = its->tables + i; u64 val = its_read_baser(its, baser); u64 type = GITS_BASER_TYPE(val); - u32 order = get_order(psz); bool indirect = false; + u32 order; - switch (type) { - case GITS_BASER_TYPE_NONE: + if (type == GITS_BASER_TYPE_NONE) continue; + if (its_probe_baser_psz(its, baser)) { + its_free_tables(its); + return -ENXIO; + } + + order = get_order(baser->psz); + + switch (type) { case GITS_BASER_TYPE_DEVICE: - indirect = its_parse_indirect_baser(its, baser, - psz, &order, - its->device_ids); + indirect = its_parse_indirect_baser(its, baser, &order, + device_ids(its)); + break; + case GITS_BASER_TYPE_VCPU: - indirect = its_parse_indirect_baser(its, baser, - psz, &order, + if (is_v4_1(its)) { + struct its_node *sibling; + + WARN_ON(i != 2); + if ((sibling = find_sibling_its(its))) { + *baser = sibling->tables[2]; + its_write_baser(its, baser, baser->val); + continue; + } + } + + indirect = its_parse_indirect_baser(its, baser, &order, ITS_MAX_VPEID_BITS); break; } - err = its_setup_baser(its, baser, cache, shr, psz, order, indirect); + err = its_setup_baser(its, baser, cache, shr, order, indirect); if (err < 0) { its_free_tables(its); return err; } /* Update settings which will be used for next BASERn */ - psz = baser->psz; cache = baser->val & GITS_BASER_CACHEABILITY_MASK; shr = baser->val & GITS_BASER_SHAREABILITY_MASK; } @@ -1968,6 +2730,297 @@ static int its_alloc_tables(struct its_node *its) return 0; } +static u64 inherit_vpe_l1_table_from_its(void) +{ + struct its_node *its; + u64 val; + u32 aff; + + val = gic_read_typer(gic_data_rdist_rd_base() + GICR_TYPER); + aff = compute_common_aff(val); + + list_for_each_entry(its, &its_nodes, entry) { + u64 baser, addr; + + if (!is_v4_1(its)) + continue; + + if (!FIELD_GET(GITS_TYPER_SVPET, its->typer)) + continue; + + if (aff != compute_its_aff(its)) + continue; + + /* GICv4.1 guarantees that the vPE table is GITS_BASER2 */ + baser = its->tables[2].val; + if (!(baser & GITS_BASER_VALID)) + continue; + + /* We have a winner! */ + gic_data_rdist()->vpe_l1_base = its->tables[2].base; + + val = GICR_VPROPBASER_4_1_VALID; + if (baser & GITS_BASER_INDIRECT) + val |= GICR_VPROPBASER_4_1_INDIRECT; + val |= FIELD_PREP(GICR_VPROPBASER_4_1_PAGE_SIZE, + FIELD_GET(GITS_BASER_PAGE_SIZE_MASK, baser)); + switch (FIELD_GET(GITS_BASER_PAGE_SIZE_MASK, baser)) { + case GIC_PAGE_SIZE_64K: + addr = GITS_BASER_ADDR_48_to_52(baser); + break; + default: + addr = baser & GENMASK_ULL(47, 12); + break; + } + val |= FIELD_PREP(GICR_VPROPBASER_4_1_ADDR, addr >> 12); + if (rdists_support_shareable()) { + val |= FIELD_PREP(GICR_VPROPBASER_SHAREABILITY_MASK, + FIELD_GET(GITS_BASER_SHAREABILITY_MASK, baser)); + val |= FIELD_PREP(GICR_VPROPBASER_INNER_CACHEABILITY_MASK, + FIELD_GET(GITS_BASER_INNER_CACHEABILITY_MASK, baser)); + } + val |= FIELD_PREP(GICR_VPROPBASER_4_1_SIZE, GITS_BASER_NR_PAGES(baser) - 1); + + *this_cpu_ptr(&local_4_1_its) = its; + return val; + } + + return 0; +} + +static u64 inherit_vpe_l1_table_from_rd(cpumask_t **mask) +{ + u32 aff; + u64 val; + int cpu; + + val = gic_read_typer(gic_data_rdist_rd_base() + GICR_TYPER); + aff = compute_common_aff(val); + + for_each_possible_cpu(cpu) { + void __iomem *base = gic_data_rdist_cpu(cpu)->rd_base; + + if (!base || cpu == smp_processor_id()) + continue; + + val = gic_read_typer(base + GICR_TYPER); + if (aff != compute_common_aff(val)) + continue; + + /* + * At this point, we have a victim. This particular CPU + * has already booted, and has an affinity that matches + * ours wrt CommonLPIAff. Let's use its own VPROPBASER. + * Make sure we don't write the Z bit in that case. + */ + val = gicr_read_vpropbaser(base + SZ_128K + GICR_VPROPBASER); + val &= ~GICR_VPROPBASER_4_1_Z; + + gic_data_rdist()->vpe_l1_base = gic_data_rdist_cpu(cpu)->vpe_l1_base; + *mask = gic_data_rdist_cpu(cpu)->vpe_table_mask; + + *this_cpu_ptr(&local_4_1_its) = *per_cpu_ptr(&local_4_1_its, cpu); + return val; + } + + return 0; +} + +static bool allocate_vpe_l2_table(int cpu, u32 id) +{ + void __iomem *base = gic_data_rdist_cpu(cpu)->rd_base; + unsigned int psz, esz, idx, npg, gpsz; + u64 val; + struct page *page; + __le64 *table; + + if (!gic_rdists->has_rvpeid) + return true; + + /* Skip non-present CPUs */ + if (!base) + return true; + + val = gicr_read_vpropbaser(base + SZ_128K + GICR_VPROPBASER); + + esz = FIELD_GET(GICR_VPROPBASER_4_1_ENTRY_SIZE, val) + 1; + gpsz = FIELD_GET(GICR_VPROPBASER_4_1_PAGE_SIZE, val); + npg = FIELD_GET(GICR_VPROPBASER_4_1_SIZE, val) + 1; + + switch (gpsz) { + default: + WARN_ON(1); + fallthrough; + case GIC_PAGE_SIZE_4K: + psz = SZ_4K; + break; + case GIC_PAGE_SIZE_16K: + psz = SZ_16K; + break; + case GIC_PAGE_SIZE_64K: + psz = SZ_64K; + break; + } + + /* Don't allow vpe_id that exceeds single, flat table limit */ + if (!(val & GICR_VPROPBASER_4_1_INDIRECT)) + return (id < (npg * psz / (esz * SZ_8))); + + /* Compute 1st level table index & check if that exceeds table limit */ + idx = id >> ilog2(psz / (esz * SZ_8)); + if (idx >= (npg * psz / GITS_LVL1_ENTRY_SIZE)) + return false; + + table = gic_data_rdist_cpu(cpu)->vpe_l1_base; + + /* Allocate memory for 2nd level table */ + if (!table[idx]) { + page = its_alloc_pages(GFP_KERNEL | __GFP_ZERO, get_order(psz)); + if (!page) + return false; + + /* Flush Lvl2 table to PoC if hw doesn't support coherency */ + if (!(val & GICR_VPROPBASER_SHAREABILITY_MASK)) + gic_flush_dcache_to_poc(page_address(page), psz); + + table[idx] = cpu_to_le64(page_to_phys(page) | GITS_BASER_VALID); + + /* Flush Lvl1 entry to PoC if hw doesn't support coherency */ + if (!(val & GICR_VPROPBASER_SHAREABILITY_MASK)) + gic_flush_dcache_to_poc(table + idx, GITS_LVL1_ENTRY_SIZE); + + /* Ensure updated table contents are visible to RD hardware */ + dsb(sy); + } + + return true; +} + +static int allocate_vpe_l1_table(void) +{ + void __iomem *vlpi_base = gic_data_rdist_vlpi_base(); + u64 val, gpsz, npg, pa; + unsigned int psz = SZ_64K; + unsigned int np, epp, esz; + struct page *page; + + if (!gic_rdists->has_rvpeid) + return 0; + + /* + * if VPENDBASER.Valid is set, disable any previously programmed + * VPE by setting PendingLast while clearing Valid. This has the + * effect of making sure no doorbell will be generated and we can + * then safely clear VPROPBASER.Valid. + */ + if (gicr_read_vpendbaser(vlpi_base + GICR_VPENDBASER) & GICR_VPENDBASER_Valid) + gicr_write_vpendbaser(GICR_VPENDBASER_PendingLast, + vlpi_base + GICR_VPENDBASER); + + /* + * If we can inherit the configuration from another RD, let's do + * so. Otherwise, we have to go through the allocation process. We + * assume that all RDs have the exact same requirements, as + * nothing will work otherwise. + */ + val = inherit_vpe_l1_table_from_rd(&gic_data_rdist()->vpe_table_mask); + if (val & GICR_VPROPBASER_4_1_VALID) + goto out; + + gic_data_rdist()->vpe_table_mask = kzalloc(sizeof(cpumask_t), GFP_ATOMIC); + if (!gic_data_rdist()->vpe_table_mask) + return -ENOMEM; + + val = inherit_vpe_l1_table_from_its(); + if (val & GICR_VPROPBASER_4_1_VALID) + goto out; + + /* First probe the page size */ + val = FIELD_PREP(GICR_VPROPBASER_4_1_PAGE_SIZE, GIC_PAGE_SIZE_64K); + gicr_write_vpropbaser(val, vlpi_base + GICR_VPROPBASER); + val = gicr_read_vpropbaser(vlpi_base + GICR_VPROPBASER); + gpsz = FIELD_GET(GICR_VPROPBASER_4_1_PAGE_SIZE, val); + esz = FIELD_GET(GICR_VPROPBASER_4_1_ENTRY_SIZE, val); + + switch (gpsz) { + default: + gpsz = GIC_PAGE_SIZE_4K; + fallthrough; + case GIC_PAGE_SIZE_4K: + psz = SZ_4K; + break; + case GIC_PAGE_SIZE_16K: + psz = SZ_16K; + break; + case GIC_PAGE_SIZE_64K: + psz = SZ_64K; + break; + } + + /* + * Start populating the register from scratch, including RO fields + * (which we want to print in debug cases...) + */ + val = 0; + val |= FIELD_PREP(GICR_VPROPBASER_4_1_PAGE_SIZE, gpsz); + val |= FIELD_PREP(GICR_VPROPBASER_4_1_ENTRY_SIZE, esz); + + /* How many entries per GIC page? */ + esz++; + epp = psz / (esz * SZ_8); + + /* + * If we need more than just a single L1 page, flag the table + * as indirect and compute the number of required L1 pages. + */ + if (epp < ITS_MAX_VPEID) { + int nl2; + + val |= GICR_VPROPBASER_4_1_INDIRECT; + + /* Number of L2 pages required to cover the VPEID space */ + nl2 = DIV_ROUND_UP(ITS_MAX_VPEID, epp); + + /* Number of L1 pages to point to the L2 pages */ + npg = DIV_ROUND_UP(nl2 * SZ_8, psz); + } else { + npg = 1; + } + + val |= FIELD_PREP(GICR_VPROPBASER_4_1_SIZE, npg - 1); + + /* Right, that's the number of CPU pages we need for L1 */ + np = DIV_ROUND_UP(npg * psz, PAGE_SIZE); + + pr_debug("np = %d, npg = %lld, psz = %d, epp = %d, esz = %d\n", + np, npg, psz, epp, esz); + page = its_alloc_pages(GFP_ATOMIC | __GFP_ZERO, get_order(np * PAGE_SIZE)); + if (!page) + return -ENOMEM; + + gic_data_rdist()->vpe_l1_base = page_address(page); + pa = virt_to_phys(page_address(page)); + WARN_ON(!IS_ALIGNED(pa, psz)); + + val |= FIELD_PREP(GICR_VPROPBASER_4_1_ADDR, pa >> 12); + if (rdists_support_shareable()) { + val |= GICR_VPROPBASER_RaWb; + val |= GICR_VPROPBASER_InnerShareable; + } + val |= GICR_VPROPBASER_4_1_Z; + val |= GICR_VPROPBASER_4_1_VALID; + +out: + gicr_write_vpropbaser(val, vlpi_base + GICR_VPROPBASER); + cpumask_set_cpu(smp_processor_id(), gic_data_rdist()->vpe_table_mask); + + pr_debug("CPU%d: VPROPBASER = %llx %*pbl\n", + smp_processor_id(), val, + cpumask_pr_args(gic_data_rdist()->vpe_table_mask)); + + return 0; +} + static int its_alloc_collections(struct its_node *its) { int i; @@ -1987,8 +3040,7 @@ static struct page *its_allocate_pending_table(gfp_t gfp_flags) { struct page *pend_page; - pend_page = alloc_pages(gfp_flags | __GFP_ZERO, - get_order(LPI_PENDBASE_SZ)); + pend_page = its_alloc_pages(gfp_flags | __GFP_ZERO, get_order(LPI_PENDBASE_SZ)); if (!pend_page) return NULL; @@ -2000,7 +3052,7 @@ static struct page *its_allocate_pending_table(gfp_t gfp_flags) static void its_free_pending_table(struct page *pt) { - free_pages((unsigned long)page_address(pt), get_order(LPI_PENDBASE_SZ)); + its_free_pages(page_address(pt), get_order(LPI_PENDBASE_SZ)); } /* @@ -2059,6 +3111,46 @@ static int __init allocate_lpi_tables(void) return 0; } +static u64 read_vpend_dirty_clear(void __iomem *vlpi_base) +{ + u32 count = 1000000; /* 1s! */ + bool clean; + u64 val; + + do { + val = gicr_read_vpendbaser(vlpi_base + GICR_VPENDBASER); + clean = !(val & GICR_VPENDBASER_Dirty); + if (!clean) { + count--; + cpu_relax(); + udelay(1); + } + } while (!clean && count); + + if (unlikely(!clean)) + pr_err_ratelimited("ITS virtual pending table not cleaning\n"); + + return val; +} + +static u64 its_clear_vpend_valid(void __iomem *vlpi_base, u64 clr, u64 set) +{ + u64 val; + + /* Make sure we wait until the RD is done with the initial scan */ + val = read_vpend_dirty_clear(vlpi_base); + val &= ~GICR_VPENDBASER_Valid; + val &= ~clr; + val |= set; + gicr_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER); + + val = read_vpend_dirty_clear(vlpi_base); + if (unlikely(val & GICR_VPENDBASER_Dirty)) + val |= GICR_VPENDBASER_PendingLast; + + return val; +} + static void its_cpu_init_lpis(void) { void __iomem *rbase = gic_data_rdist_rd_base(); @@ -2066,7 +3158,7 @@ static void its_cpu_init_lpis(void) phys_addr_t paddr; u64 val, tmp; - if (gic_data_rdist()->lpi_enabled) + if (gic_data_rdist()->flags & RD_LOCAL_LPI_ENABLED) return; val = readl_relaxed(rbase + GICR_CTLR); @@ -2085,15 +3177,13 @@ static void its_cpu_init_lpis(void) paddr &= GENMASK_ULL(51, 16); WARN_ON(!gic_check_reserved_range(paddr, LPI_PENDBASE_SZ)); - its_free_pending_table(gic_data_rdist()->pend_page); - gic_data_rdist()->pend_page = NULL; + gic_data_rdist()->flags |= RD_LOCAL_PENDTABLE_PREALLOCATED; goto out; } pend_page = gic_data_rdist()->pend_page; paddr = page_to_phys(pend_page); - WARN_ON(gic_reserve_range(paddr, LPI_PENDBASE_SZ)); /* set PROPBASE */ val = (gic_rdists->prop_table_pa | @@ -2104,6 +3194,9 @@ static void its_cpu_init_lpis(void) gicr_write_propbaser(val, rbase + GICR_PROPBASER); tmp = gicr_read_propbaser(rbase + GICR_PROPBASER); + if (!rdists_support_shareable()) + tmp &= ~GICR_PROPBASER_SHAREABILITY_MASK; + if ((tmp ^ val) & GICR_PROPBASER_SHAREABILITY_MASK) { if (!(tmp & GICR_PROPBASER_SHAREABILITY_MASK)) { /* @@ -2128,6 +3221,9 @@ static void its_cpu_init_lpis(void) gicr_write_pendbaser(val, rbase + GICR_PENDBASER); tmp = gicr_read_pendbaser(rbase + GICR_PENDBASER); + if (!rdists_support_shareable()) + tmp &= ~GICR_PENDBASER_SHAREABILITY_MASK; + if (!(tmp & GICR_PENDBASER_SHAREABILITY_MASK)) { /* * The HW reports non-shareable, we must remove the @@ -2144,13 +3240,47 @@ static void its_cpu_init_lpis(void) val |= GICR_CTLR_ENABLE_LPIS; writel_relaxed(val, rbase + GICR_CTLR); +out: + if (gic_rdists->has_vlpis && !gic_rdists->has_rvpeid) { + void __iomem *vlpi_base = gic_data_rdist_vlpi_base(); + + /* + * It's possible for CPU to receive VLPIs before it is + * scheduled as a vPE, especially for the first CPU, and the + * VLPI with INTID larger than 2^(IDbits+1) will be considered + * as out of range and dropped by GIC. + * So we initialize IDbits to known value to avoid VLPI drop. + */ + val = (LPI_NRBITS - 1) & GICR_VPROPBASER_IDBITS_MASK; + pr_debug("GICv4: CPU%d: Init IDbits to 0x%llx for GICR_VPROPBASER\n", + smp_processor_id(), val); + gicr_write_vpropbaser(val, vlpi_base + GICR_VPROPBASER); + + /* + * Also clear Valid bit of GICR_VPENDBASER, in case some + * ancient programming gets left in and has possibility of + * corrupting memory. + */ + val = its_clear_vpend_valid(vlpi_base, 0, 0); + } + + if (allocate_vpe_l1_table()) { + /* + * If the allocation has failed, we're in massive trouble. + * Disable direct injection, and pray that no VM was + * already running... + */ + gic_rdists->has_rvpeid = false; + gic_rdists->has_vlpis = false; + } + /* Make sure the GIC has seen the above */ dsb(sy); -out: - gic_data_rdist()->lpi_enabled = true; + gic_data_rdist()->flags |= RD_LOCAL_LPI_ENABLED; pr_info("GICv3: CPU%d: using %s LPI pending table @%pa\n", smp_processor_id(), - gic_data_rdist()->pend_page ? "allocated" : "reserved", + gic_data_rdist()->flags & RD_LOCAL_PENDTABLE_PREALLOCATED ? + "reserved" : "allocated", &paddr); } @@ -2236,7 +3366,8 @@ static struct its_baser *its_get_baser(struct its_node *its, u32 type) return NULL; } -static bool its_alloc_table_entry(struct its_baser *baser, u32 id) +static bool its_alloc_table_entry(struct its_node *its, + struct its_baser *baser, u32 id) { struct page *page; u32 esz, idx; @@ -2256,7 +3387,8 @@ static bool its_alloc_table_entry(struct its_baser *baser, u32 id) /* Allocate memory for 2nd level table */ if (!table[idx]) { - page = alloc_pages(GFP_KERNEL | __GFP_ZERO, get_order(baser->psz)); + page = its_alloc_pages_node(its->numa_node, GFP_KERNEL | __GFP_ZERO, + get_order(baser->psz)); if (!page) return false; @@ -2285,14 +3417,15 @@ static bool its_alloc_device_table(struct its_node *its, u32 dev_id) /* Don't allow device id that exceeds ITS hardware limit */ if (!baser) - return (ilog2(dev_id) < its->device_ids); + return (ilog2(dev_id) < device_ids(its)); - return its_alloc_table_entry(baser, dev_id); + return its_alloc_table_entry(its, baser, dev_id); } static bool its_alloc_vpe_table(u32 vpe_id) { struct its_node *its; + int cpu; /* * Make sure the L2 tables are allocated on *all* v4 ITSs. We @@ -2304,14 +3437,27 @@ static bool its_alloc_vpe_table(u32 vpe_id) list_for_each_entry(its, &its_nodes, entry) { struct its_baser *baser; - if (!its->is_v4) + if (!is_v4(its)) continue; baser = its_get_baser(its, GITS_BASER_TYPE_VCPU); if (!baser) return false; - if (!its_alloc_table_entry(baser, vpe_id)) + if (!its_alloc_table_entry(its, baser, vpe_id)) + return false; + } + + /* Non v4.1? No need to iterate RDs and go back early. */ + if (!gic_rdists->has_rvpeid) + return true; + + /* + * Make sure the L2 tables are allocated for all copies of + * the L1 table on *all* v4.1 RDs. + */ + for_each_possible_cpu(cpu) { + if (!allocate_vpe_l2_table(cpu, vpe_id)) return false; } @@ -2337,15 +3483,18 @@ static struct its_device *its_create_device(struct its_node *its, u32 dev_id, if (WARN_ON(!is_power_of_2(nvecs))) nvecs = roundup_pow_of_two(nvecs); - dev = kzalloc(sizeof(*dev), GFP_KERNEL); /* * Even if the device wants a single LPI, the ITT must be * sized as a power of two (and you need at least one bit...). */ nr_ites = max(2, nvecs); - sz = nr_ites * its->ite_size; - sz = max(sz, ITS_ITT_ALIGN) + ITS_ITT_ALIGN - 1; - itt = kzalloc(sz, GFP_KERNEL); + sz = nr_ites * (FIELD_GET(GITS_TYPER_ITT_ENTRY_SIZE, its->typer) + 1); + sz = max(sz, ITS_ITT_ALIGN); + + itt = itt_alloc_pool(its->numa_node, sz); + + dev = kzalloc(sizeof(*dev), GFP_KERNEL); + if (alloc_lpis) { lpi_map = its_lpi_alloc(nvecs, &lpi_base, &nr_lpis); if (lpi_map) @@ -2357,10 +3506,10 @@ static struct its_device *its_create_device(struct its_node *its, u32 dev_id, lpi_base = 0; } - if (!dev || !itt || !col_map || (!lpi_map && alloc_lpis)) { + if (!dev || !itt || !col_map || (!lpi_map && alloc_lpis)) { kfree(dev); - kfree(itt); - kfree(lpi_map); + itt_free_pool(itt, sz); + bitmap_free(lpi_map); kfree(col_map); return NULL; } @@ -2369,12 +3518,13 @@ static struct its_device *its_create_device(struct its_node *its, u32 dev_id, dev->its = its; dev->itt = itt; + dev->itt_sz = sz; dev->nr_ites = nr_ites; dev->event_map.lpi_map = lpi_map; dev->event_map.col_map = col_map; dev->event_map.lpi_base = lpi_base; dev->event_map.nr_lpis = nr_lpis; - mutex_init(&dev->event_map.vlpi_lock); + raw_spin_lock_init(&dev->event_map.vlpi_lock); dev->device_id = dev_id; INIT_LIST_HEAD(&dev->entry); @@ -2395,21 +3545,23 @@ static void its_free_device(struct its_device *its_dev) raw_spin_lock_irqsave(&its_dev->its->lock, flags); list_del(&its_dev->entry); raw_spin_unlock_irqrestore(&its_dev->its->lock, flags); - kfree(its_dev->itt); + kfree(its_dev->event_map.col_map); + itt_free_pool(its_dev->itt, its_dev->itt_sz); kfree(its_dev); } -static int its_alloc_device_irq(struct its_device *dev, irq_hw_number_t *hwirq) +static int its_alloc_device_irq(struct its_device *dev, int nvecs, irq_hw_number_t *hwirq) { int idx; - idx = find_first_zero_bit(dev->event_map.lpi_map, - dev->event_map.nr_lpis); - if (idx == dev->event_map.nr_lpis) + /* Find a free LPI region in lpi_map and allocate them. */ + idx = bitmap_find_free_region(dev->event_map.lpi_map, + dev->event_map.nr_lpis, + get_count_order(nvecs)); + if (idx < 0) return -ENOSPC; *hwirq = dev->event_map.lpi_base + idx; - set_bit(idx, dev->event_map.lpi_map); return 0; } @@ -2421,9 +3573,10 @@ static int its_msi_prepare(struct irq_domain *domain, struct device *dev, struct its_device *its_dev; struct msi_domain_info *msi_info; u32 dev_id; + int err = 0; /* - * We ignore "dev" entierely, and rely on the dev_id that has + * We ignore "dev" entirely, and rely on the dev_id that has * been passed via the scratchpad. This limits this domain's * usefulness to upper layers that definitely know that they * are built on top of the ITS. @@ -2443,6 +3596,7 @@ static int its_msi_prepare(struct irq_domain *domain, struct device *dev, return -EINVAL; } + mutex_lock(&its->dev_alloc_lock); its_dev = its_find_device(its, dev_id); if (its_dev) { /* @@ -2450,22 +3604,54 @@ static int its_msi_prepare(struct irq_domain *domain, struct device *dev, * another alias (PCI bridge of some sort). No need to * create the device. */ + its_dev->shared = true; pr_debug("Reusing ITT for devID %x\n", dev_id); goto out; } its_dev = its_create_device(its, dev_id, nvec, true); - if (!its_dev) - return -ENOMEM; + if (!its_dev) { + err = -ENOMEM; + goto out; + } + + if (info->flags & MSI_ALLOC_FLAGS_PROXY_DEVICE) + its_dev->shared = true; pr_debug("ITT %d entries, %d bits\n", nvec, ilog2(nvec)); out: + mutex_unlock(&its->dev_alloc_lock); info->scratchpad[0].ptr = its_dev; - return 0; + return err; +} + +static void its_msi_teardown(struct irq_domain *domain, msi_alloc_info_t *info) +{ + struct its_device *its_dev = info->scratchpad[0].ptr; + + guard(mutex)(&its_dev->its->dev_alloc_lock); + + /* If the device is shared, keep everything around */ + if (its_dev->shared) + return; + + /* LPIs should have been already unmapped at this stage */ + if (WARN_ON_ONCE(!bitmap_empty(its_dev->event_map.lpi_map, + its_dev->event_map.nr_lpis))) + return; + + its_lpi_free(its_dev->event_map.lpi_map, + its_dev->event_map.lpi_base, + its_dev->event_map.nr_lpis); + + /* Unmap device/itt, and get rid of the tracking */ + its_send_mapd(its_dev, 0); + its_free_device(its_dev); } static struct msi_domain_ops its_msi_domain_ops = { .msi_prepare = its_msi_prepare, + .msi_teardown = its_msi_teardown, }; static int its_irq_gic_domain_alloc(struct irq_domain *domain, @@ -2497,25 +3683,34 @@ static int its_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, { msi_alloc_info_t *info = args; struct its_device *its_dev = info->scratchpad[0].ptr; + struct its_node *its = its_dev->its; + struct irq_data *irqd; irq_hw_number_t hwirq; int err; int i; - for (i = 0; i < nr_irqs; i++) { - err = its_alloc_device_irq(its_dev, &hwirq); - if (err) - return err; + err = its_alloc_device_irq(its_dev, nr_irqs, &hwirq); + if (err) + return err; + + err = iommu_dma_prepare_msi(info->desc, its->get_msi_base(its_dev)); + if (err) + return err; - err = its_irq_gic_domain_alloc(domain, virq + i, hwirq); + for (i = 0; i < nr_irqs; i++) { + err = its_irq_gic_domain_alloc(domain, virq + i, hwirq + i); if (err) return err; irq_domain_set_hwirq_and_chip(domain, virq + i, - hwirq, &its_irq_chip, its_dev); - irqd_set_single_target(irq_desc_get_irq_data(irq_to_desc(virq + i))); + hwirq + i, &its_irq_chip, its_dev); + irqd = irq_get_irq_data(virq + i); + irqd_set_single_target(irqd); + irqd_set_affinity_on_activate(irqd); + irqd_set_resend_when_in_progress(irqd); pr_debug("ID:%d pID:%d vID:%d\n", - (int)(hwirq - its_dev->event_map.lpi_base), - (int) hwirq, virq + i); + (int)(hwirq + i - its_dev->event_map.lpi_base), + (int)(hwirq + i), virq + i); } return 0; @@ -2526,22 +3721,13 @@ static int its_irq_domain_activate(struct irq_domain *domain, { struct its_device *its_dev = irq_data_get_irq_chip_data(d); u32 event = its_get_event_id(d); - const struct cpumask *cpu_mask = cpu_online_mask; int cpu; - /* get the cpu_mask of local node */ - if (its_dev->its->numa_node >= 0) - cpu_mask = cpumask_of_node(its_dev->its->numa_node); - - /* Bind the LPI to the first possible CPU */ - cpu = cpumask_first_and(cpu_mask, cpu_online_mask); - if (cpu >= nr_cpu_ids) { - if (its_dev->its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144) - return -EINVAL; - - cpu = cpumask_first(cpu_online_mask); - } + cpu = its_select_cpu(d, cpu_online_mask); + if (cpu < 0 || cpu >= nr_cpu_ids) + return -EINVAL; + its_inc_lpi_count(d, cpu); its_dev->event_map.col_map[event] = cpu; irq_data_update_effective_affinity(d, cpumask_of(cpu)); @@ -2556,6 +3742,7 @@ static void its_irq_domain_deactivate(struct irq_domain *domain, struct its_device *its_dev = irq_data_get_irq_chip_data(d); u32 event = its_get_event_id(d); + its_dec_lpi_count(d, its_dev->event_map.col_map[event]); /* Stop the delivery of interrupts */ its_send_discard(its_dev, event); } @@ -2567,35 +3754,22 @@ static void its_irq_domain_free(struct irq_domain *domain, unsigned int virq, struct its_device *its_dev = irq_data_get_irq_chip_data(d); int i; + bitmap_release_region(its_dev->event_map.lpi_map, + its_get_event_id(irq_domain_get_irq_data(domain, virq)), + get_count_order(nr_irqs)); + for (i = 0; i < nr_irqs; i++) { struct irq_data *data = irq_domain_get_irq_data(domain, virq + i); - u32 event = its_get_event_id(data); - - /* Mark interrupt index as unused */ - clear_bit(event, its_dev->event_map.lpi_map); - /* Nuke the entry in the domain */ irq_domain_reset_irq_data(data); } - /* If all interrupts have been freed, start mopping the floor */ - if (bitmap_empty(its_dev->event_map.lpi_map, - its_dev->event_map.nr_lpis)) { - its_lpi_free(its_dev->event_map.lpi_map, - its_dev->event_map.lpi_base, - its_dev->event_map.nr_lpis); - kfree(its_dev->event_map.col_map); - - /* Unmap device/itt */ - its_send_mapd(its_dev, 0); - its_free_device(its_dev); - } - irq_domain_free_irqs_parent(domain, virq, nr_irqs); } static const struct irq_domain_ops its_domain_ops = { + .select = msi_lib_irq_domain_select, .alloc = its_irq_domain_alloc, .free = its_irq_domain_free, .activate = its_irq_domain_activate, @@ -2605,7 +3779,7 @@ static const struct irq_domain_ops its_domain_ops = { /* * This is insane. * - * If a GICv4 doesn't implement Direct LPIs (which is extremely + * If a GICv4.0 doesn't implement Direct LPIs (which is extremely * likely), the only way to perform an invalidate is to use a fake * device to issue an INV command, implying that the LPI has first * been mapped to some event on that device. Since this is not exactly @@ -2613,9 +3787,20 @@ static const struct irq_domain_ops its_domain_ops = { * only issue an UNMAP if we're short on available slots. * * Broken by design(tm). + * + * GICv4.1, on the other hand, mandates that we're able to invalidate + * by writing to a MMIO register. It doesn't implement the whole of + * DirectLPI, but that's good enough. And most of the time, we don't + * even have to invalidate anything, as the redistributor can be told + * whether to generate a doorbell or not (we thus leave it enabled, + * always). */ static void its_vpe_db_proxy_unmap_locked(struct its_vpe *vpe) { + /* GICv4.1 doesn't use a proxy, so nothing to do here */ + if (gic_rdists->has_rvpeid) + return; + /* Already unmapped? */ if (vpe->vpe_proxy_event == -1) return; @@ -2638,6 +3823,10 @@ static void its_vpe_db_proxy_unmap_locked(struct its_vpe *vpe) static void its_vpe_db_proxy_unmap(struct its_vpe *vpe) { + /* GICv4.1 doesn't use a proxy, so nothing to do here */ + if (gic_rdists->has_rvpeid) + return; + if (!gic_rdists->has_direct_lpi) { unsigned long flags; @@ -2649,6 +3838,10 @@ static void its_vpe_db_proxy_unmap(struct its_vpe *vpe) static void its_vpe_db_proxy_map_locked(struct its_vpe *vpe) { + /* GICv4.1 doesn't use a proxy, so nothing to do here */ + if (gic_rdists->has_rvpeid) + return; + /* Already mapped? */ if (vpe->vpe_proxy_event != -1) return; @@ -2671,13 +3864,16 @@ static void its_vpe_db_proxy_move(struct its_vpe *vpe, int from, int to) unsigned long flags; struct its_collection *target_col; + /* GICv4.1 doesn't use a proxy, so nothing to do here */ + if (gic_rdists->has_rvpeid) + return; + if (gic_rdists->has_direct_lpi) { void __iomem *rdbase; rdbase = per_cpu_ptr(gic_rdists->rdist, from)->rd_base; gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_CLRLPIR); - while (gic_read_lpir(rdbase + GICR_SYNCR) & 1) - cpu_relax(); + wait_for_syncr(rdbase); return; } @@ -2693,32 +3889,121 @@ static void its_vpe_db_proxy_move(struct its_vpe *vpe, int from, int to) raw_spin_unlock_irqrestore(&vpe_proxy.lock, flags); } +static void its_vpe_4_1_invall_locked(int cpu, struct its_vpe *vpe) +{ + void __iomem *rdbase; + u64 val; + + val = GICR_INVALLR_V; + val |= FIELD_PREP(GICR_INVALLR_VPEID, vpe->vpe_id); + + guard(raw_spinlock)(&gic_data_rdist_cpu(cpu)->rd_lock); + rdbase = per_cpu_ptr(gic_rdists->rdist, cpu)->rd_base; + gic_write_lpir(val, rdbase + GICR_INVALLR); + wait_for_syncr(rdbase); +} + static int its_vpe_set_affinity(struct irq_data *d, const struct cpumask *mask_val, bool force) { struct its_vpe *vpe = irq_data_get_irq_chip_data(d); - int cpu = cpumask_first(mask_val); + unsigned int from, cpu = nr_cpu_ids; + struct cpumask *table_mask; + struct its_node *its; + unsigned long flags; + + /* + * Check if we're racing against a VPE being destroyed, for + * which we don't want to allow a VMOVP. + */ + if (!atomic_read(&vpe->vmapp_count)) { + if (gic_requires_eager_mapping()) + return -EINVAL; + + /* + * If we lazily map the VPEs, this isn't an error and + * we can exit cleanly. + */ + cpu = cpumask_first(mask_val); + irq_data_update_effective_affinity(d, cpumask_of(cpu)); + return IRQ_SET_MASK_OK_DONE; + } /* * Changing affinity is mega expensive, so let's be as lazy as * we can and only do it if we really have to. Also, if mapped * into the proxy device, we need to move the doorbell * interrupt to its new location. + * + * Another thing is that changing the affinity of a vPE affects + * *other interrupts* such as all the vLPIs that are routed to + * this vPE. This means that the irq_desc lock is not enough to + * protect us, and that we must ensure nobody samples vpe->col_idx + * during the update, hence the lock below which must also be + * taken on any vLPI handling path that evaluates vpe->col_idx. + * + * Finally, we must protect ourselves against concurrent updates of + * the mapping state on this VM should the ITS list be in use (see + * the shortcut in its_send_vmovp() otherewise). */ - if (vpe->col_idx != cpu) { - int from = vpe->col_idx; + if (its_list_map) + raw_spin_lock(&vpe->its_vm->vmapp_lock); + + from = vpe_to_cpuid_lock(vpe, &flags); + table_mask = gic_data_rdist_cpu(from)->vpe_table_mask; - vpe->col_idx = cpu; - its_send_vmovp(vpe); - its_vpe_db_proxy_move(vpe, from, cpu); + /* + * If we are offered another CPU in the same GICv4.1 ITS + * affinity, pick this one. Otherwise, any CPU will do. + */ + if (table_mask) + cpu = cpumask_any_and(mask_val, table_mask); + if (cpu < nr_cpu_ids) { + if (cpumask_test_cpu(from, mask_val) && + cpumask_test_cpu(from, table_mask)) + cpu = from; + } else { + cpu = cpumask_first(mask_val); } + if (from == cpu) + goto out; + + vpe->col_idx = cpu; + + its_send_vmovp(vpe); + + its = find_4_1_its(); + if (its && its->flags & ITS_FLAGS_WORKAROUND_HISILICON_162100801) + its_vpe_4_1_invall_locked(cpu, vpe); + + its_vpe_db_proxy_move(vpe, from, cpu); + +out: irq_data_update_effective_affinity(d, cpumask_of(cpu)); + vpe_to_cpuid_unlock(vpe, flags); + + if (its_list_map) + raw_spin_unlock(&vpe->its_vm->vmapp_lock); return IRQ_SET_MASK_OK_DONE; } +static void its_wait_vpt_parse_complete(void) +{ + void __iomem *vlpi_base = gic_data_rdist_vlpi_base(); + u64 val; + + if (!gic_rdists->has_vpend_valid_dirty) + return; + + WARN_ON_ONCE(readq_relaxed_poll_timeout_atomic(vlpi_base + GICR_VPENDBASER, + val, + !(val & GICR_VPENDBASER_Dirty), + 1, 500)); +} + static void its_vpe_schedule(struct its_vpe *vpe) { void __iomem *vlpi_base = gic_data_rdist_vlpi_base(); @@ -2728,14 +4013,18 @@ static void its_vpe_schedule(struct its_vpe *vpe) val = virt_to_phys(page_address(vpe->its_vm->vprop_page)) & GENMASK_ULL(51, 12); val |= (LPI_NRBITS - 1) & GICR_VPROPBASER_IDBITS_MASK; - val |= GICR_VPROPBASER_RaWb; - val |= GICR_VPROPBASER_InnerShareable; - gits_write_vpropbaser(val, vlpi_base + GICR_VPROPBASER); + if (rdists_support_shareable()) { + val |= GICR_VPROPBASER_RaWb; + val |= GICR_VPROPBASER_InnerShareable; + } + gicr_write_vpropbaser(val, vlpi_base + GICR_VPROPBASER); val = virt_to_phys(page_address(vpe->vpt_page)) & GENMASK_ULL(51, 16); - val |= GICR_VPENDBASER_RaWaWb; - val |= GICR_VPENDBASER_NonShareable; + if (rdists_support_shareable()) { + val |= GICR_VPENDBASER_RaWaWb; + val |= GICR_VPENDBASER_InnerShareable; + } /* * There is no good way of finding out if the pending table is * empty as we can race against the doorbell interrupt very @@ -2748,47 +4037,28 @@ static void its_vpe_schedule(struct its_vpe *vpe) val |= GICR_VPENDBASER_PendingLast; val |= vpe->idai ? GICR_VPENDBASER_IDAI : 0; val |= GICR_VPENDBASER_Valid; - gits_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER); + gicr_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER); } static void its_vpe_deschedule(struct its_vpe *vpe) { void __iomem *vlpi_base = gic_data_rdist_vlpi_base(); - u32 count = 1000000; /* 1s! */ - bool clean; u64 val; - /* We're being scheduled out */ - val = gits_read_vpendbaser(vlpi_base + GICR_VPENDBASER); - val &= ~GICR_VPENDBASER_Valid; - gits_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER); + val = its_clear_vpend_valid(vlpi_base, 0, 0); - do { - val = gits_read_vpendbaser(vlpi_base + GICR_VPENDBASER); - clean = !(val & GICR_VPENDBASER_Dirty); - if (!clean) { - count--; - cpu_relax(); - udelay(1); - } - } while (!clean && count); - - if (unlikely(!clean && !count)) { - pr_err_ratelimited("ITS virtual pending table not cleaning\n"); - vpe->idai = false; - vpe->pending_last = true; - } else { - vpe->idai = !!(val & GICR_VPENDBASER_IDAI); - vpe->pending_last = !!(val & GICR_VPENDBASER_PendingLast); - } + vpe->idai = !!(val & GICR_VPENDBASER_IDAI); + vpe->pending_last = !!(val & GICR_VPENDBASER_PendingLast); } static void its_vpe_invall(struct its_vpe *vpe) { struct its_node *its; + guard(raw_spinlock_irqsave)(&vpe->its_vm->vmapp_lock); + list_for_each_entry(its, &its_nodes, entry) { - if (!its->is_v4) + if (!is_v4(its)) continue; if (its_list_map && !vpe->its_vm->vlpi_count[its->list_nr]) @@ -2817,6 +4087,10 @@ static int its_vpe_set_vcpu_affinity(struct irq_data *d, void *vcpu_info) its_vpe_deschedule(vpe); return 0; + case COMMIT_VPE: + its_wait_vpt_parse_complete(); + return 0; + case INVALL_VPE: its_vpe_invall(vpe); return 0; @@ -2843,16 +4117,10 @@ static void its_vpe_send_inv(struct irq_data *d) { struct its_vpe *vpe = irq_data_get_irq_chip_data(d); - if (gic_rdists->has_direct_lpi) { - void __iomem *rdbase; - - rdbase = per_cpu_ptr(gic_rdists->rdist, vpe->col_idx)->rd_base; - gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_INVLPIR); - while (gic_read_lpir(rdbase + GICR_SYNCR) & 1) - cpu_relax(); - } else { + if (gic_rdists->has_direct_lpi) + __direct_lpi_inv(d, d->parent_data->hwirq); + else its_vpe_send_cmd(vpe, its_send_inv); - } } static void its_vpe_mask_irq(struct irq_data *d) @@ -2891,8 +4159,7 @@ static int its_vpe_set_irqchip_state(struct irq_data *d, gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_SETLPIR); } else { gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_CLRLPIR); - while (gic_read_lpir(rdbase + GICR_SYNCR) & 1) - cpu_relax(); + wait_for_syncr(rdbase); } } else { if (state) @@ -2904,24 +4171,389 @@ static int its_vpe_set_irqchip_state(struct irq_data *d, return 0; } +static int its_vpe_retrigger(struct irq_data *d) +{ + return !its_vpe_set_irqchip_state(d, IRQCHIP_STATE_PENDING, true); +} + static struct irq_chip its_vpe_irq_chip = { .name = "GICv4-vpe", .irq_mask = its_vpe_mask_irq, .irq_unmask = its_vpe_unmask_irq, .irq_eoi = irq_chip_eoi_parent, .irq_set_affinity = its_vpe_set_affinity, + .irq_retrigger = its_vpe_retrigger, .irq_set_irqchip_state = its_vpe_set_irqchip_state, .irq_set_vcpu_affinity = its_vpe_set_vcpu_affinity, }; +static struct its_node *find_4_1_its(void) +{ + struct its_node *its = *this_cpu_ptr(&local_4_1_its); + + if (!its) { + list_for_each_entry(its, &its_nodes, entry) { + if (is_v4_1(its)) + return its; + } + + /* Oops? */ + its = NULL; + } + + return its; +} + +static void its_vpe_4_1_send_inv(struct irq_data *d) +{ + struct its_vpe *vpe = irq_data_get_irq_chip_data(d); + struct its_node *its; + + /* + * GICv4.1 wants doorbells to be invalidated using the + * INVDB command in order to be broadcast to all RDs. Send + * it to the first valid ITS, and let the HW do its magic. + */ + its = find_4_1_its(); + if (its) + its_send_invdb(its, vpe); +} + +static void its_vpe_4_1_mask_irq(struct irq_data *d) +{ + lpi_write_config(d->parent_data, LPI_PROP_ENABLED, 0); + its_vpe_4_1_send_inv(d); +} + +static void its_vpe_4_1_unmask_irq(struct irq_data *d) +{ + lpi_write_config(d->parent_data, 0, LPI_PROP_ENABLED); + its_vpe_4_1_send_inv(d); +} + +static void its_vpe_4_1_schedule(struct its_vpe *vpe, + struct its_cmd_info *info) +{ + void __iomem *vlpi_base = gic_data_rdist_vlpi_base(); + u64 val = 0; + + /* Schedule the VPE */ + val |= GICR_VPENDBASER_Valid; + val |= info->g0en ? GICR_VPENDBASER_4_1_VGRP0EN : 0; + val |= info->g1en ? GICR_VPENDBASER_4_1_VGRP1EN : 0; + val |= FIELD_PREP(GICR_VPENDBASER_4_1_VPEID, vpe->vpe_id); + + gicr_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER); +} + +static void its_vpe_4_1_deschedule(struct its_vpe *vpe, + struct its_cmd_info *info) +{ + void __iomem *vlpi_base = gic_data_rdist_vlpi_base(); + u64 val; + + if (info->req_db) { + unsigned long flags; + + /* + * vPE is going to block: make the vPE non-resident with + * PendingLast clear and DB set. The GIC guarantees that if + * we read-back PendingLast clear, then a doorbell will be + * delivered when an interrupt comes. + * + * Note the locking to deal with the concurrent update of + * pending_last from the doorbell interrupt handler that can + * run concurrently. + */ + raw_spin_lock_irqsave(&vpe->vpe_lock, flags); + val = its_clear_vpend_valid(vlpi_base, + GICR_VPENDBASER_PendingLast, + GICR_VPENDBASER_4_1_DB); + vpe->pending_last = !!(val & GICR_VPENDBASER_PendingLast); + raw_spin_unlock_irqrestore(&vpe->vpe_lock, flags); + } else { + /* + * We're not blocking, so just make the vPE non-resident + * with PendingLast set, indicating that we'll be back. + */ + val = its_clear_vpend_valid(vlpi_base, + 0, + GICR_VPENDBASER_PendingLast); + vpe->pending_last = true; + } +} + +static void its_vpe_4_1_invall(struct its_vpe *vpe) +{ + unsigned long flags; + int cpu; + + /* Target the redistributor this vPE is currently known on */ + cpu = vpe_to_cpuid_lock(vpe, &flags); + its_vpe_4_1_invall_locked(cpu, vpe); + vpe_to_cpuid_unlock(vpe, flags); +} + +static int its_vpe_4_1_set_vcpu_affinity(struct irq_data *d, void *vcpu_info) +{ + struct its_vpe *vpe = irq_data_get_irq_chip_data(d); + struct its_cmd_info *info = vcpu_info; + + switch (info->cmd_type) { + case SCHEDULE_VPE: + its_vpe_4_1_schedule(vpe, info); + return 0; + + case DESCHEDULE_VPE: + its_vpe_4_1_deschedule(vpe, info); + return 0; + + case COMMIT_VPE: + its_wait_vpt_parse_complete(); + return 0; + + case INVALL_VPE: + its_vpe_4_1_invall(vpe); + return 0; + + default: + return -EINVAL; + } +} + +static struct irq_chip its_vpe_4_1_irq_chip = { + .name = "GICv4.1-vpe", + .irq_mask = its_vpe_4_1_mask_irq, + .irq_unmask = its_vpe_4_1_unmask_irq, + .irq_eoi = irq_chip_eoi_parent, + .irq_set_affinity = its_vpe_set_affinity, + .irq_set_vcpu_affinity = its_vpe_4_1_set_vcpu_affinity, +}; + +static void its_configure_sgi(struct irq_data *d, bool clear) +{ + struct its_vpe *vpe = irq_data_get_irq_chip_data(d); + struct its_cmd_desc desc; + + desc.its_vsgi_cmd.vpe = vpe; + desc.its_vsgi_cmd.sgi = d->hwirq; + desc.its_vsgi_cmd.priority = vpe->sgi_config[d->hwirq].priority; + desc.its_vsgi_cmd.enable = vpe->sgi_config[d->hwirq].enabled; + desc.its_vsgi_cmd.group = vpe->sgi_config[d->hwirq].group; + desc.its_vsgi_cmd.clear = clear; + + /* + * GICv4.1 allows us to send VSGI commands to any ITS as long as the + * destination VPE is mapped there. Since we map them eagerly at + * activation time, we're pretty sure the first GICv4.1 ITS will do. + */ + its_send_single_vcommand(find_4_1_its(), its_build_vsgi_cmd, &desc); +} + +static void its_sgi_mask_irq(struct irq_data *d) +{ + struct its_vpe *vpe = irq_data_get_irq_chip_data(d); + + vpe->sgi_config[d->hwirq].enabled = false; + its_configure_sgi(d, false); +} + +static void its_sgi_unmask_irq(struct irq_data *d) +{ + struct its_vpe *vpe = irq_data_get_irq_chip_data(d); + + vpe->sgi_config[d->hwirq].enabled = true; + its_configure_sgi(d, false); +} + +static int its_sgi_set_affinity(struct irq_data *d, + const struct cpumask *mask_val, + bool force) +{ + /* + * There is no notion of affinity for virtual SGIs, at least + * not on the host (since they can only be targeting a vPE). + * Tell the kernel we've done whatever it asked for. + */ + irq_data_update_effective_affinity(d, mask_val); + return IRQ_SET_MASK_OK; +} + +static int its_sgi_set_irqchip_state(struct irq_data *d, + enum irqchip_irq_state which, + bool state) +{ + if (which != IRQCHIP_STATE_PENDING) + return -EINVAL; + + if (state) { + struct its_vpe *vpe = irq_data_get_irq_chip_data(d); + struct its_node *its = find_4_1_its(); + u64 val; + + val = FIELD_PREP(GITS_SGIR_VPEID, vpe->vpe_id); + val |= FIELD_PREP(GITS_SGIR_VINTID, d->hwirq); + writeq_relaxed(val, its->sgir_base + GITS_SGIR - SZ_128K); + } else { + its_configure_sgi(d, true); + } + + return 0; +} + +static int its_sgi_get_irqchip_state(struct irq_data *d, + enum irqchip_irq_state which, bool *val) +{ + struct its_vpe *vpe = irq_data_get_irq_chip_data(d); + void __iomem *base; + unsigned long flags; + u32 count = 1000000; /* 1s! */ + u32 status; + int cpu; + + if (which != IRQCHIP_STATE_PENDING) + return -EINVAL; + + /* + * Locking galore! We can race against two different events: + * + * - Concurrent vPE affinity change: we must make sure it cannot + * happen, or we'll talk to the wrong redistributor. This is + * identical to what happens with vLPIs. + * + * - Concurrent VSGIPENDR access: As it involves accessing two + * MMIO registers, this must be made atomic one way or another. + */ + cpu = vpe_to_cpuid_lock(vpe, &flags); + raw_spin_lock(&gic_data_rdist_cpu(cpu)->rd_lock); + base = gic_data_rdist_cpu(cpu)->rd_base + SZ_128K; + writel_relaxed(vpe->vpe_id, base + GICR_VSGIR); + do { + status = readl_relaxed(base + GICR_VSGIPENDR); + if (!(status & GICR_VSGIPENDR_BUSY)) + goto out; + + count--; + if (!count) { + pr_err_ratelimited("Unable to get SGI status\n"); + goto out; + } + cpu_relax(); + udelay(1); + } while (count); + +out: + raw_spin_unlock(&gic_data_rdist_cpu(cpu)->rd_lock); + vpe_to_cpuid_unlock(vpe, flags); + + if (!count) + return -ENXIO; + + *val = !!(status & (1 << d->hwirq)); + + return 0; +} + +static int its_sgi_set_vcpu_affinity(struct irq_data *d, void *vcpu_info) +{ + struct its_vpe *vpe = irq_data_get_irq_chip_data(d); + struct its_cmd_info *info = vcpu_info; + + switch (info->cmd_type) { + case PROP_UPDATE_VSGI: + vpe->sgi_config[d->hwirq].priority = info->priority; + vpe->sgi_config[d->hwirq].group = info->group; + its_configure_sgi(d, false); + return 0; + + default: + return -EINVAL; + } +} + +static struct irq_chip its_sgi_irq_chip = { + .name = "GICv4.1-sgi", + .irq_mask = its_sgi_mask_irq, + .irq_unmask = its_sgi_unmask_irq, + .irq_set_affinity = its_sgi_set_affinity, + .irq_set_irqchip_state = its_sgi_set_irqchip_state, + .irq_get_irqchip_state = its_sgi_get_irqchip_state, + .irq_set_vcpu_affinity = its_sgi_set_vcpu_affinity, +}; + +static int its_sgi_irq_domain_alloc(struct irq_domain *domain, + unsigned int virq, unsigned int nr_irqs, + void *args) +{ + struct its_vpe *vpe = args; + int i; + + /* Yes, we do want 16 SGIs */ + WARN_ON(nr_irqs != 16); + + for (i = 0; i < 16; i++) { + vpe->sgi_config[i].priority = 0; + vpe->sgi_config[i].enabled = false; + vpe->sgi_config[i].group = false; + + irq_domain_set_hwirq_and_chip(domain, virq + i, i, + &its_sgi_irq_chip, vpe); + irq_set_status_flags(virq + i, IRQ_DISABLE_UNLAZY); + } + + return 0; +} + +static void its_sgi_irq_domain_free(struct irq_domain *domain, + unsigned int virq, + unsigned int nr_irqs) +{ + /* Nothing to do */ +} + +static int its_sgi_irq_domain_activate(struct irq_domain *domain, + struct irq_data *d, bool reserve) +{ + /* Write out the initial SGI configuration */ + its_configure_sgi(d, false); + return 0; +} + +static void its_sgi_irq_domain_deactivate(struct irq_domain *domain, + struct irq_data *d) +{ + struct its_vpe *vpe = irq_data_get_irq_chip_data(d); + + /* + * The VSGI command is awkward: + * + * - To change the configuration, CLEAR must be set to false, + * leaving the pending bit unchanged. + * - To clear the pending bit, CLEAR must be set to true, leaving + * the configuration unchanged. + * + * You just can't do both at once, hence the two commands below. + */ + vpe->sgi_config[d->hwirq].enabled = false; + its_configure_sgi(d, false); + its_configure_sgi(d, true); +} + +static const struct irq_domain_ops its_sgi_domain_ops = { + .alloc = its_sgi_irq_domain_alloc, + .free = its_sgi_irq_domain_free, + .activate = its_sgi_irq_domain_activate, + .deactivate = its_sgi_irq_domain_deactivate, +}; + static int its_vpe_id_alloc(void) { - return ida_simple_get(&its_vpeid_ida, 0, ITS_MAX_VPEID, GFP_KERNEL); + return ida_alloc_max(&its_vpeid_ida, ITS_MAX_VPEID - 1, GFP_KERNEL); } static void its_vpe_id_free(u16 id) { - ida_simple_remove(&its_vpeid_ida, id); + ida_free(&its_vpeid_ida, id); } static int its_vpe_init(struct its_vpe *vpe) @@ -2943,13 +4575,16 @@ static int its_vpe_init(struct its_vpe *vpe) if (!its_alloc_vpe_table(vpe_id)) { its_vpe_id_free(vpe_id); - its_free_pending_table(vpe->vpt_page); + its_free_pending_table(vpt_page); return -ENOMEM; } + raw_spin_lock_init(&vpe->vpe_lock); vpe->vpe_id = vpe_id; vpe->vpt_page = vpt_page; - vpe->vpe_proxy_event = -1; + atomic_set(&vpe->vmapp_count, 0); + if (!gic_rdists->has_rvpeid) + vpe->vpe_proxy_event = -1; return 0; } @@ -2991,13 +4626,12 @@ static void its_vpe_irq_domain_free(struct irq_domain *domain, static int its_vpe_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, unsigned int nr_irqs, void *args) { + struct irq_chip *irqchip = &its_vpe_irq_chip; struct its_vm *vm = args; unsigned long *bitmap; struct page *vprop_page; int base, nr_ids, i, err = 0; - BUG_ON(!vm); - bitmap = its_lpi_alloc(roundup_pow_of_two(nr_irqs), &base, &nr_ids); if (!bitmap) return -ENOMEM; @@ -3017,6 +4651,10 @@ static int its_vpe_irq_domain_alloc(struct irq_domain *domain, unsigned int virq vm->db_lpi_base = base; vm->nr_db_lpis = nr_ids; vm->vprop_page = vprop_page; + raw_spin_lock_init(&vm->vmapp_lock); + + if (gic_rdists->has_rvpeid) + irqchip = &its_vpe_4_1_irq_chip; for (i = 0; i < nr_irqs; i++) { vm->vpes[i]->vpe_db_lpi = base + i; @@ -3028,17 +4666,13 @@ static int its_vpe_irq_domain_alloc(struct irq_domain *domain, unsigned int virq if (err) break; irq_domain_set_hwirq_and_chip(domain, virq + i, i, - &its_vpe_irq_chip, vm->vpes[i]); + irqchip, vm->vpes[i]); set_bit(i, bitmap); + irqd_set_resend_when_in_progress(irq_get_irq_data(virq + i)); } - if (err) { - if (i > 0) - its_vpe_irq_domain_free(domain, virq, i - 1); - - its_lpi_free(bitmap, base, nr_ids); - its_free_prop_table(vprop_page); - } + if (err) + its_vpe_irq_domain_free(domain, virq, i); return err; } @@ -3049,23 +4683,26 @@ static int its_vpe_irq_domain_activate(struct irq_domain *domain, struct its_vpe *vpe = irq_data_get_irq_chip_data(d); struct its_node *its; - /* If we use the list map, we issue VMAPP on demand... */ - if (its_list_map) - return 0; - /* Map the VPE to the first possible CPU */ vpe->col_idx = cpumask_first(cpu_online_mask); + irq_data_update_effective_affinity(d, cpumask_of(vpe->col_idx)); + + /* + * If we use the list map, we issue VMAPP on demand... Unless + * we're on a GICv4.1 and we eagerly map the VPE on all ITSs + * so that VSGIs can work. + */ + if (!gic_requires_eager_mapping()) + return 0; list_for_each_entry(its, &its_nodes, entry) { - if (!its->is_v4) + if (!is_v4(its)) continue; its_send_vmapp(its, vpe, true); its_send_vinvall(its, vpe); } - irq_data_update_effective_affinity(d, cpumask_of(vpe->col_idx)); - return 0; } @@ -3076,18 +4713,27 @@ static void its_vpe_irq_domain_deactivate(struct irq_domain *domain, struct its_node *its; /* - * If we use the list map, we unmap the VPE once no VLPIs are - * associated with the VM. + * If we use the list map on GICv4.0, we unmap the VPE once no + * VLPIs are associated with the VM. */ - if (its_list_map) + if (!gic_requires_eager_mapping()) return; list_for_each_entry(its, &its_nodes, entry) { - if (!its->is_v4) + if (!is_v4(its)) continue; its_send_vmapp(its, vpe, false); } + + /* + * There may be a direct read to the VPT after unmapping the + * vPE, to guarantee the validity of this, we make the VPT + * memory coherent with the CPU caches here. + */ + if (find_4_1_its() && !atomic_read(&vpe->vmapp_count)) + gic_flush_dcache_to_poc(page_address(vpe->vpt_page), + LPI_PENDBASE_SZ); } static const struct irq_domain_ops its_vpe_domain_ops = { @@ -3134,8 +4780,9 @@ static bool __maybe_unused its_enable_quirk_cavium_22375(void *data) { struct its_node *its = data; - /* erratum 22375: only alloc 8MB table size */ - its->device_ids = 0x14; /* 20 bits, 8MB */ + /* erratum 22375: only alloc 8MB table size (20 bits) */ + its->typer &= ~GITS_TYPER_DEVBITS; + its->typer |= FIELD_PREP(GITS_TYPER_DEVBITS, 20 - 1); its->flags |= ITS_FLAGS_WORKAROUND_CAVIUM_22375; return true; @@ -3155,7 +4802,8 @@ static bool __maybe_unused its_enable_quirk_qdf2400_e0065(void *data) struct its_node *its = data; /* On QDF2400, the size of the ITE is 16Bytes */ - its->ite_size = 16; + its->typer &= ~GITS_TYPER_ITT_ENTRY_SIZE; + its->typer |= FIELD_PREP(GITS_TYPER_ITT_ENTRY_SIZE, 16 - 1); return true; } @@ -3189,11 +4837,13 @@ static bool __maybe_unused its_enable_quirk_socionext_synquacer(void *data) its->get_msi_base = its_irq_get_msi_base_pre_its; ids = ilog2(pre_its_window[1]) - 2; - if (its->device_ids > ids) - its->device_ids = ids; + if (device_ids(its) > ids) { + its->typer &= ~GITS_TYPER_DEVBITS; + its->typer |= FIELD_PREP(GITS_TYPER_DEVBITS, ids - 1); + } /* the pre-ITS breaks isolation, so disable MSI remapping */ - its->msi_domain_flags &= ~IRQ_DOMAIN_FLAG_MSI_REMAP; + its->msi_domain_flags &= ~IRQ_DOMAIN_FLAG_ISOLATED_MSI; return true; } return false; @@ -3211,6 +4861,47 @@ static bool __maybe_unused its_enable_quirk_hip07_161600802(void *data) return true; } +static bool __maybe_unused its_enable_rk3588001(void *data) +{ + struct its_node *its = data; + + if (!of_machine_is_compatible("rockchip,rk3588") && + !of_machine_is_compatible("rockchip,rk3588s")) + return false; + + its->flags |= ITS_FLAGS_FORCE_NON_SHAREABLE; + gic_rdists->flags |= RDIST_FLAGS_FORCE_NON_SHAREABLE; + + return true; +} + +static bool its_set_non_coherent(void *data) +{ + struct its_node *its = data; + + its->flags |= ITS_FLAGS_FORCE_NON_SHAREABLE; + return true; +} + +static bool __maybe_unused its_enable_quirk_hip09_162100801(void *data) +{ + struct its_node *its = data; + + its->flags |= ITS_FLAGS_WORKAROUND_HISILICON_162100801; + return true; +} + +static bool __maybe_unused its_enable_rk3568002(void *data) +{ + if (!of_machine_is_compatible("rockchip,rk3566") && + !of_machine_is_compatible("rockchip,rk3568")) + return false; + + gfp_flags_quirk |= GFP_DMA32; + + return true; +} + static const struct gic_quirk its_quirks[] = { #ifdef CONFIG_CAVIUM_ERRATUM_22375 { @@ -3257,6 +4948,35 @@ static const struct gic_quirk its_quirks[] = { .init = its_enable_quirk_hip07_161600802, }, #endif +#ifdef CONFIG_HISILICON_ERRATUM_162100801 + { + .desc = "ITS: Hip09 erratum 162100801", + .iidr = 0x00051736, + .mask = 0xffffffff, + .init = its_enable_quirk_hip09_162100801, + }, +#endif +#ifdef CONFIG_ROCKCHIP_ERRATUM_3588001 + { + .desc = "ITS: Rockchip erratum RK3588001", + .iidr = 0x0201743b, + .mask = 0xffffffff, + .init = its_enable_rk3588001, + }, +#endif + { + .desc = "ITS: non-coherent attribute", + .property = "dma-noncoherent", + .init = its_set_non_coherent, + }, +#ifdef CONFIG_ROCKCHIP_ERRATUM_3568002 + { + .desc = "ITS: Rockchip erratum RK3568002", + .iidr = 0x0201743b, + .mask = 0xffffffff, + .init = its_enable_rk3568002, + }, +#endif { } }; @@ -3266,9 +4986,13 @@ static void its_enable_quirks(struct its_node *its) u32 iidr = readl_relaxed(its->base + GITS_IIDR); gic_enable_quirks(iidr, its_quirks, its); + + if (is_of_node(its->fwnode_handle)) + gic_enable_of_quirks(to_of_node(its->fwnode_handle), + its_quirks, its); } -static int its_save_disable(void) +static int its_save_disable(void *data) { struct its_node *its; int err = 0; @@ -3277,9 +5001,6 @@ static int its_save_disable(void) list_for_each_entry(its, &its_nodes, entry) { void __iomem *base; - if (!(its->flags & ITS_FLAGS_SAVE_SUSPEND_STATE)) - continue; - base = its->base; its->ctlr_save = readl_relaxed(base + GITS_CTLR); err = its_force_quiescent(base); @@ -3298,9 +5019,6 @@ err: list_for_each_entry_continue_reverse(its, &its_nodes, entry) { void __iomem *base; - if (!(its->flags & ITS_FLAGS_SAVE_SUSPEND_STATE)) - continue; - base = its->base; writel_relaxed(its->ctlr_save, base + GITS_CTLR); } @@ -3310,7 +5028,7 @@ err: return err; } -static void its_restore_enable(void) +static void its_restore_enable(void *data) { struct its_node *its; int ret; @@ -3320,9 +5038,6 @@ static void its_restore_enable(void) void __iomem *base; int i; - if (!(its->flags & ITS_FLAGS_SAVE_SUSPEND_STATE)) - continue; - base = its->base; /* @@ -3330,7 +5045,10 @@ static void its_restore_enable(void) * don't restore it since writing to CBASER or BASER<n> * registers is undefined according to the GIC v3 ITS * Specification. + * + * Firmware resuming with the ITS enabled is terminally broken. */ + WARN_ON(readl_relaxed(base + GITS_CTLR) & GITS_CTLR_ENABLE); ret = its_force_quiescent(base); if (ret) { pr_err("ITS@%pa: failed to quiesce on resume: %d\n", @@ -3370,33 +5088,69 @@ static void its_restore_enable(void) raw_spin_unlock(&its_lock); } -static struct syscore_ops its_syscore_ops = { +static const struct syscore_ops its_syscore_ops = { .suspend = its_save_disable, .resume = its_restore_enable, }; -static int its_init_domain(struct fwnode_handle *handle, struct its_node *its) +static struct syscore its_syscore = { + .ops = &its_syscore_ops, +}; + +static void __init __iomem *its_map_one(struct resource *res, int *err) +{ + void __iomem *its_base; + u32 val; + + its_base = ioremap(res->start, SZ_64K); + if (!its_base) { + pr_warn("ITS@%pa: Unable to map ITS registers\n", &res->start); + *err = -ENOMEM; + return NULL; + } + + val = readl_relaxed(its_base + GITS_PIDR2) & GIC_PIDR2_ARCH_MASK; + if (val != 0x30 && val != 0x40) { + pr_warn("ITS@%pa: No ITS detected, giving up\n", &res->start); + *err = -ENODEV; + goto out_unmap; + } + + *err = its_force_quiescent(its_base); + if (*err) { + pr_warn("ITS@%pa: Failed to quiesce, giving up\n", &res->start); + goto out_unmap; + } + + return its_base; + +out_unmap: + iounmap(its_base); + return NULL; +} + +static int its_init_domain(struct its_node *its) { - struct irq_domain *inner_domain; + struct irq_domain_info dom_info = { + .fwnode = its->fwnode_handle, + .ops = &its_domain_ops, + .domain_flags = its->msi_domain_flags, + .parent = its_parent, + }; struct msi_domain_info *info; info = kzalloc(sizeof(*info), GFP_KERNEL); if (!info) return -ENOMEM; - inner_domain = irq_domain_create_tree(handle, &its_domain_ops, its); - if (!inner_domain) { - kfree(info); - return -ENOMEM; - } - - inner_domain->parent = its_parent; - irq_domain_update_bus_token(inner_domain, DOMAIN_BUS_NEXUS); - inner_domain->flags |= its->msi_domain_flags; info->ops = &its_msi_domain_ops; info->data = its; - inner_domain->host_data = info; + dom_info.host_data = info; + if (!msi_create_parent_irq_domain(&dom_info, &gic_v3_its_msi_parent_ops)) { + kfree(info); + return -ENOMEM; + } return 0; } @@ -3417,13 +5171,11 @@ static int its_init_vpe_domain(void) entries = roundup_pow_of_two(nr_cpu_ids); vpe_proxy.vpes = kcalloc(entries, sizeof(*vpe_proxy.vpes), GFP_KERNEL); - if (!vpe_proxy.vpes) { - pr_err("ITS: Can't allocate GICv4 proxy device array\n"); + if (!vpe_proxy.vpes) return -ENOMEM; - } /* Use the last possible DevID */ - devid = GENMASK(its->device_ids - 1, 0); + devid = GENMASK(device_ids(its) - 1, 0); vpe_proxy.dev = its_create_device(its, devid, entries, false); if (!vpe_proxy.dev) { kfree(vpe_proxy.vpes); @@ -3441,8 +5193,7 @@ static int its_init_vpe_domain(void) return 0; } -static int __init its_compute_its_list_map(struct resource *res, - void __iomem *its_base) +static int __init its_compute_its_list_map(struct its_node *its) { int its_number; u32 ctlr; @@ -3456,15 +5207,15 @@ static int __init its_compute_its_list_map(struct resource *res, its_number = find_first_zero_bit(&its_list_map, GICv4_ITS_LIST_MAX); if (its_number >= GICv4_ITS_LIST_MAX) { pr_err("ITS@%pa: No ITSList entry available!\n", - &res->start); + &its->phys_base); return -EINVAL; } - ctlr = readl_relaxed(its_base + GITS_CTLR); + ctlr = readl_relaxed(its->base + GITS_CTLR); ctlr &= ~GITS_CTLR_ITS_NUMBER; ctlr |= its_number << GITS_CTLR_ITS_NUMBER_SHIFT; - writel_relaxed(ctlr, its_base + GITS_CTLR); - ctlr = readl_relaxed(its_base + GITS_CTLR); + writel_relaxed(ctlr, its->base + GITS_CTLR); + ctlr = readl_relaxed(its->base + GITS_CTLR); if ((ctlr & GITS_CTLR_ITS_NUMBER) != (its_number << GITS_CTLR_ITS_NUMBER_SHIFT)) { its_number = ctlr & GITS_CTLR_ITS_NUMBER; its_number >>= GITS_CTLR_ITS_NUMBER_SHIFT; @@ -3472,87 +5223,61 @@ static int __init its_compute_its_list_map(struct resource *res, if (test_and_set_bit(its_number, &its_list_map)) { pr_err("ITS@%pa: Duplicate ITSList entry %d\n", - &res->start, its_number); + &its->phys_base, its_number); return -EINVAL; } return its_number; } -static int __init its_probe_one(struct resource *res, - struct fwnode_handle *handle, int numa_node) +static int __init its_probe_one(struct its_node *its) { - struct its_node *its; - void __iomem *its_base; - u32 val, ctlr; - u64 baser, tmp, typer; + u64 baser, tmp; + struct page *page; + u32 ctlr; int err; - its_base = ioremap(res->start, resource_size(res)); - if (!its_base) { - pr_warn("ITS@%pa: Unable to map ITS registers\n", &res->start); - return -ENOMEM; - } - - val = readl_relaxed(its_base + GITS_PIDR2) & GIC_PIDR2_ARCH_MASK; - if (val != 0x30 && val != 0x40) { - pr_warn("ITS@%pa: No ITS detected, giving up\n", &res->start); - err = -ENODEV; - goto out_unmap; - } - - err = its_force_quiescent(its_base); - if (err) { - pr_warn("ITS@%pa: Failed to quiesce, giving up\n", &res->start); - goto out_unmap; - } - - pr_info("ITS %pR\n", res); - - its = kzalloc(sizeof(*its), GFP_KERNEL); - if (!its) { - err = -ENOMEM; - goto out_unmap; - } + its_enable_quirks(its); - raw_spin_lock_init(&its->lock); - INIT_LIST_HEAD(&its->entry); - INIT_LIST_HEAD(&its->its_device_list); - typer = gic_read_typer(its_base + GITS_TYPER); - its->base = its_base; - its->phys_base = res->start; - its->ite_size = GITS_TYPER_ITT_ENTRY_SIZE(typer); - its->device_ids = GITS_TYPER_DEVBITS(typer); - its->is_v4 = !!(typer & GITS_TYPER_VLPIS); - if (its->is_v4) { - if (!(typer & GITS_TYPER_VMOVP)) { - err = its_compute_its_list_map(res, its_base); + if (is_v4(its)) { + if (!(its->typer & GITS_TYPER_VMOVP)) { + err = its_compute_its_list_map(its); if (err < 0) - goto out_free_its; + goto out; its->list_nr = err; pr_info("ITS@%pa: Using ITS number %d\n", - &res->start, err); + &its->phys_base, err); } else { - pr_info("ITS@%pa: Single VMOVP capable\n", &res->start); + pr_info("ITS@%pa: Single VMOVP capable\n", &its->phys_base); } - } - its->numa_node = numa_node; + if (is_v4_1(its)) { + u32 svpet = FIELD_GET(GITS_TYPER_SVPET, its->typer); + + its->sgir_base = ioremap(its->phys_base + SZ_128K, SZ_64K); + if (!its->sgir_base) { + err = -ENOMEM; + goto out; + } - its->cmd_base = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, - get_order(ITS_CMD_QUEUE_SZ)); - if (!its->cmd_base) { + its->mpidr = readl_relaxed(its->base + GITS_MPIDR); + + pr_info("ITS@%pa: Using GICv4.1 mode %08x %08x\n", + &its->phys_base, its->mpidr, svpet); + } + } + + page = its_alloc_pages_node(its->numa_node, + GFP_KERNEL | __GFP_ZERO, + get_order(ITS_CMD_QUEUE_SZ)); + if (!page) { err = -ENOMEM; - goto out_free_its; + goto out_unmap_sgir; } + its->cmd_base = (void *)page_address(page); its->cmd_write = its->cmd_base; - its->fwnode_handle = handle; - its->get_msi_base = its_irq_get_msi_base; - its->msi_domain_flags = IRQ_DOMAIN_FLAG_MSI_REMAP; - - its_enable_quirks(its); err = its_alloc_tables(its); if (err) @@ -3571,6 +5296,9 @@ static int __init its_probe_one(struct resource *res, gits_write_cbaser(baser, its->base + GITS_CBASER); tmp = gits_read_cbaser(its->base + GITS_CBASER); + if (its->flags & ITS_FLAGS_FORCE_NON_SHAREABLE) + tmp &= ~GITS_CBASER_SHAREABILITY_MASK; + if ((tmp ^ baser) & GITS_CBASER_SHAREABILITY_MASK) { if (!(tmp & GITS_CBASER_SHAREABILITY_MASK)) { /* @@ -3590,14 +5318,11 @@ static int __init its_probe_one(struct resource *res, gits_write_cwriter(0, its->base + GITS_CWRITER); ctlr = readl_relaxed(its->base + GITS_CTLR); ctlr |= GITS_CTLR_ENABLE; - if (its->is_v4) + if (is_v4(its)) ctlr |= GITS_CTLR_ImDe; writel_relaxed(ctlr, its->base + GITS_CTLR); - if (GITS_TYPER_HCC(typer)) - its->flags |= ITS_FLAGS_SAVE_SUSPEND_STATE; - - err = its_init_domain(handle, its); + err = its_init_domain(its); if (err) goto out_free_tables; @@ -3610,12 +5335,12 @@ static int __init its_probe_one(struct resource *res, out_free_tables: its_free_tables(its); out_free_cmd: - free_pages((unsigned long)its->cmd_base, get_order(ITS_CMD_QUEUE_SZ)); -out_free_its: - kfree(its); -out_unmap: - iounmap(its_base); - pr_err("ITS@%pa: failed probing (%d)\n", &res->start, err); + its_free_pages(its->cmd_base, get_order(ITS_CMD_QUEUE_SZ)); +out_unmap_sgir: + if (its->sgir_base) + iounmap(its->sgir_base); +out: + pr_err("ITS@%pa: failed probing (%d)\n", &its->phys_base, err); return err; } @@ -3646,7 +5371,7 @@ static int redist_disable_lpis(void) * * If running with preallocated tables, there is nothing to do. */ - if (gic_data_rdist()->lpi_enabled || + if ((gic_data_rdist()->flags & RD_LOCAL_LPI_ENABLED) || (gic_rdists->flags & RDIST_FLAGS_RD_TABLES_PREALLOCATED)) return 0; @@ -3708,18 +5433,146 @@ int its_cpu_init(void) return 0; } +static void rdist_memreserve_cpuhp_cleanup_workfn(struct work_struct *work) +{ + cpuhp_remove_state_nocalls(gic_rdists->cpuhp_memreserve_state); + gic_rdists->cpuhp_memreserve_state = CPUHP_INVALID; +} + +static DECLARE_WORK(rdist_memreserve_cpuhp_cleanup_work, + rdist_memreserve_cpuhp_cleanup_workfn); + +static int its_cpu_memreserve_lpi(unsigned int cpu) +{ + struct page *pend_page; + int ret = 0; + + /* This gets to run exactly once per CPU */ + if (gic_data_rdist()->flags & RD_LOCAL_MEMRESERVE_DONE) + return 0; + + pend_page = gic_data_rdist()->pend_page; + if (WARN_ON(!pend_page)) { + ret = -ENOMEM; + goto out; + } + /* + * If the pending table was pre-programmed, free the memory we + * preemptively allocated. Otherwise, reserve that memory for + * later kexecs. + */ + if (gic_data_rdist()->flags & RD_LOCAL_PENDTABLE_PREALLOCATED) { + its_free_pending_table(pend_page); + gic_data_rdist()->pend_page = NULL; + } else { + phys_addr_t paddr = page_to_phys(pend_page); + WARN_ON(gic_reserve_range(paddr, LPI_PENDBASE_SZ)); + } + +out: + /* Last CPU being brought up gets to issue the cleanup */ + if (!IS_ENABLED(CONFIG_SMP) || + cpumask_equal(&cpus_booted_once_mask, cpu_possible_mask)) + schedule_work(&rdist_memreserve_cpuhp_cleanup_work); + + gic_data_rdist()->flags |= RD_LOCAL_MEMRESERVE_DONE; + return ret; +} + +/* Mark all the BASER registers as invalid before they get reprogrammed */ +static int __init its_reset_one(struct resource *res) +{ + void __iomem *its_base; + int err, i; + + its_base = its_map_one(res, &err); + if (!its_base) + return err; + + for (i = 0; i < GITS_BASER_NR_REGS; i++) + gits_write_baser(0, its_base + GITS_BASER + (i << 3)); + + iounmap(its_base); + return 0; +} + static const struct of_device_id its_device_id[] = { { .compatible = "arm,gic-v3-its", }, {}, }; +static struct its_node __init *its_node_init(struct resource *res, + struct fwnode_handle *handle, int numa_node) +{ + void __iomem *its_base; + struct its_node *its; + int err; + + its_base = its_map_one(res, &err); + if (!its_base) + return NULL; + + pr_info("ITS %pR\n", res); + + its = kzalloc(sizeof(*its), GFP_KERNEL); + if (!its) + goto out_unmap; + + raw_spin_lock_init(&its->lock); + mutex_init(&its->dev_alloc_lock); + INIT_LIST_HEAD(&its->entry); + INIT_LIST_HEAD(&its->its_device_list); + + its->typer = gic_read_typer(its_base + GITS_TYPER); + its->base = its_base; + its->phys_base = res->start; + its->get_msi_base = its_irq_get_msi_base; + its->msi_domain_flags = IRQ_DOMAIN_FLAG_ISOLATED_MSI | IRQ_DOMAIN_FLAG_MSI_IMMUTABLE; + + its->numa_node = numa_node; + its->fwnode_handle = handle; + + return its; + +out_unmap: + iounmap(its_base); + return NULL; +} + +static void its_node_destroy(struct its_node *its) +{ + iounmap(its->base); + kfree(its); +} + static int __init its_of_probe(struct device_node *node) { struct device_node *np; struct resource res; + int err; + /* + * Make sure *all* the ITS are reset before we probe any, as + * they may be sharing memory. If any of the ITS fails to + * reset, don't even try to go any further, as this could + * result in something even worse. + */ for (np = of_find_matching_node(node, its_device_id); np; np = of_find_matching_node(np, its_device_id)) { + if (!of_device_is_available(np) || + !of_property_read_bool(np, "msi-controller") || + of_address_to_resource(np, 0, &res)) + continue; + + err = its_reset_one(&res); + if (err) + return err; + } + + for (np = of_find_matching_node(node, its_device_id); np; + np = of_find_matching_node(np, its_device_id)) { + struct its_node *its; + if (!of_device_is_available(np)) continue; if (!of_property_read_bool(np, "msi-controller")) { @@ -3733,7 +5586,16 @@ static int __init its_of_probe(struct device_node *node) continue; } - its_probe_one(&res, &np->fwnode, of_node_to_nid(np)); + + its = its_node_init(&res, &np->fwnode, of_node_to_nid(np)); + if (!its) + return -ENOMEM; + + err = its_probe_one(its); + if (err) { + its_node_destroy(its); + return err; + } } return 0; } @@ -3764,13 +5626,13 @@ static int __init acpi_get_its_numa_node(u32 its_id) return NUMA_NO_NODE; } -static int __init gic_acpi_match_srat_its(struct acpi_subtable_header *header, +static int __init gic_acpi_match_srat_its(union acpi_subtable_headers *header, const unsigned long end) { return 0; } -static int __init gic_acpi_parse_srat_its(struct acpi_subtable_header *header, +static int __init gic_acpi_parse_srat_its(union acpi_subtable_headers *header, const unsigned long end) { int node; @@ -3786,7 +5648,12 @@ static int __init gic_acpi_parse_srat_its(struct acpi_subtable_header *header, return -EINVAL; } - node = acpi_map_pxm_to_node(its_affinity->proximity_domain); + /* + * Note that in theory a new proximity node could be created by this + * entry as it is an SRAT resource allocation structure. + * We do not currently support doing so. + */ + node = pxm_to_node(its_affinity->proximity_domain); if (node == NUMA_NO_NODE || node >= MAX_NUMNODES) { pr_err("SRAT: Invalid NUMA node %d in ITS affinity\n", node); @@ -3815,10 +5682,8 @@ static void __init acpi_table_parse_srat_its(void) its_srat_maps = kmalloc_array(count, sizeof(struct its_srat_map), GFP_KERNEL); - if (!its_srat_maps) { - pr_warn("SRAT: Failed to allocate memory for its_srat_maps!\n"); + if (!its_srat_maps) return; - } acpi_table_parse_entries(ACPI_SIG_SRAT, sizeof(struct acpi_table_srat), @@ -3837,11 +5702,12 @@ static int __init acpi_get_its_numa_node(u32 its_id) { return NUMA_NO_NODE; } static void __init acpi_its_srat_maps_free(void) { } #endif -static int __init gic_acpi_parse_madt_its(struct acpi_subtable_header *header, +static int __init gic_acpi_parse_madt_its(union acpi_subtable_headers *header, const unsigned long end) { struct acpi_madt_generic_translator *its_entry; struct fwnode_handle *dom_handle; + struct its_node *its; struct resource res; int err; @@ -3851,7 +5717,7 @@ static int __init gic_acpi_parse_madt_its(struct acpi_subtable_header *header, res.end = its_entry->base_address + ACPI_GICV3_ITS_MEM_SIZE - 1; res.flags = IORESOURCE_MEM; - dom_handle = irq_domain_alloc_fwnode((void *)its_entry->base_address); + dom_handle = irq_domain_alloc_fwnode(&res.start); if (!dom_handle) { pr_err("ITS@%pa: Unable to allocate GICv3 ITS domain token\n", &res.start); @@ -3866,36 +5732,102 @@ static int __init gic_acpi_parse_madt_its(struct acpi_subtable_header *header, goto dom_err; } - err = its_probe_one(&res, dom_handle, - acpi_get_its_numa_node(its_entry->translation_id)); + its = its_node_init(&res, dom_handle, + acpi_get_its_numa_node(its_entry->translation_id)); + if (!its) { + err = -ENOMEM; + goto node_err; + } + + if (acpi_get_madt_revision() >= 7 && + (its_entry->flags & ACPI_MADT_ITS_NON_COHERENT)) + its->flags |= ITS_FLAGS_FORCE_NON_SHAREABLE; + + err = its_probe_one(its); if (!err) return 0; +node_err: iort_deregister_domain_token(its_entry->translation_id); dom_err: irq_domain_free_fwnode(dom_handle); return err; } +static int __init its_acpi_reset(union acpi_subtable_headers *header, + const unsigned long end) +{ + struct acpi_madt_generic_translator *its_entry; + struct resource res; + + its_entry = (struct acpi_madt_generic_translator *)header; + res = (struct resource) { + .start = its_entry->base_address, + .end = its_entry->base_address + ACPI_GICV3_ITS_MEM_SIZE - 1, + .flags = IORESOURCE_MEM, + }; + + return its_reset_one(&res); +} + static void __init its_acpi_probe(void) { acpi_table_parse_srat_its(); - acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_TRANSLATOR, - gic_acpi_parse_madt_its, 0); + /* + * Make sure *all* the ITS are reset before we probe any, as + * they may be sharing memory. If any of the ITS fails to + * reset, don't even try to go any further, as this could + * result in something even worse. + */ + if (acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_TRANSLATOR, + its_acpi_reset, 0) > 0) + acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_TRANSLATOR, + gic_acpi_parse_madt_its, 0); acpi_its_srat_maps_free(); } #else static void __init its_acpi_probe(void) { } #endif +int __init its_lpi_memreserve_init(void) +{ + int state; + + if (!efi_enabled(EFI_CONFIG_TABLES)) + return 0; + + if (list_empty(&its_nodes)) + return 0; + + gic_rdists->cpuhp_memreserve_state = CPUHP_INVALID; + state = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, + "irqchip/arm/gicv3/memreserve:online", + its_cpu_memreserve_lpi, + NULL); + if (state < 0) + return state; + + gic_rdists->cpuhp_memreserve_state = state; + + return 0; +} + int __init its_init(struct fwnode_handle *handle, struct rdists *rdists, - struct irq_domain *parent_domain) + struct irq_domain *parent_domain, u8 irq_prio) { struct device_node *of_node; struct its_node *its; bool has_v4 = false; + bool has_v4_1 = false; int err; + itt_pool = gen_pool_create(get_order(ITS_ITT_ALIGN), -1); + if (!itt_pool) + return -ENOMEM; + + gic_rdists = rdists; + + lpi_prop_prio = irq_prio; its_parent = parent_domain; of_node = to_of_node(handle); if (of_node) @@ -3908,24 +5840,35 @@ int __init its_init(struct fwnode_handle *handle, struct rdists *rdists, return -ENXIO; } - gic_rdists = rdists; - err = allocate_lpi_tables(); if (err) return err; - list_for_each_entry(its, &its_nodes, entry) - has_v4 |= its->is_v4; + list_for_each_entry(its, &its_nodes, entry) { + has_v4 |= is_v4(its); + has_v4_1 |= is_v4_1(its); + } + + /* Don't bother with inconsistent systems */ + if (WARN_ON(!has_v4_1 && rdists->has_rvpeid)) + rdists->has_rvpeid = false; if (has_v4 & rdists->has_vlpis) { + const struct irq_domain_ops *sgi_ops; + + if (has_v4_1) + sgi_ops = &its_sgi_domain_ops; + else + sgi_ops = NULL; + if (its_init_vpe_domain() || - its_init_v4(parent_domain, &its_vpe_domain_ops)) { + its_init_v4(parent_domain, &its_vpe_domain_ops, sgi_ops)) { rdists->has_vlpis = false; pr_err("ITS: Disabling GICv4 support\n"); } } - register_syscore_ops(&its_syscore_ops); + register_syscore(&its_syscore); return 0; } diff --git a/drivers/irqchip/irq-gic-v3-mbi.c b/drivers/irqchip/irq-gic-v3-mbi.c index ad70e7c416e3..aa11bbe8026a 100644 --- a/drivers/irqchip/irq-gic-v3-mbi.c +++ b/drivers/irqchip/irq-gic-v3-mbi.c @@ -6,7 +6,7 @@ #define pr_fmt(fmt) "GICv3: " fmt -#include <linux/dma-iommu.h> +#include <linux/iommu.h> #include <linux/irq.h> #include <linux/irqdomain.h> #include <linux/kernel.h> @@ -18,13 +18,15 @@ #include <linux/irqchip/arm-gic-v3.h> +#include <linux/irqchip/irq-msi-lib.h> + struct mbi_range { u32 spi_start; u32 nr_spis; unsigned long *bm; }; -static struct mutex mbi_lock; +static DEFINE_MUTEX(mbi_lock); static phys_addr_t mbi_phys_base; static struct mbi_range *mbi_ranges; static unsigned int mbi_range_nr; @@ -84,6 +86,7 @@ static void mbi_free_msi(struct mbi_range *mbi, unsigned int hwirq, static int mbi_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, unsigned int nr_irqs, void *args) { + msi_alloc_info_t *info = args; struct mbi_range *mbi = NULL; int hwirq, offset, i, err = 0; @@ -104,6 +107,11 @@ static int mbi_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, hwirq = mbi->spi_start + offset; + err = iommu_dma_prepare_msi(info->desc, + mbi_phys_base + GICD_SETSPI_NSR); + if (err) + return err; + for (i = 0; i < nr_irqs; i++) { err = mbi_irq_gic_domain_alloc(domain, virq + i, hwirq + i); if (err) @@ -132,126 +140,79 @@ static void mbi_irq_domain_free(struct irq_domain *domain, } static const struct irq_domain_ops mbi_domain_ops = { + .select = msi_lib_irq_domain_select, .alloc = mbi_irq_domain_alloc, .free = mbi_irq_domain_free, }; static void mbi_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) { - msg[0].address_hi = upper_32_bits(mbi_phys_base + GICD_SETSPI_NSR); - msg[0].address_lo = lower_32_bits(mbi_phys_base + GICD_SETSPI_NSR); msg[0].data = data->parent_data->hwirq; - - iommu_dma_map_msi_msg(data->irq, msg); -} - -#ifdef CONFIG_PCI_MSI -/* PCI-specific irqchip */ -static void mbi_mask_msi_irq(struct irq_data *d) -{ - pci_msi_mask_irq(d); - irq_chip_mask_parent(d); -} - -static void mbi_unmask_msi_irq(struct irq_data *d) -{ - pci_msi_unmask_irq(d); - irq_chip_unmask_parent(d); + msi_msg_set_addr(irq_data_get_msi_desc(data), &msg[0], + mbi_phys_base + GICD_SETSPI_NSR); } -static struct irq_chip mbi_msi_irq_chip = { - .name = "MSI", - .irq_mask = mbi_mask_msi_irq, - .irq_unmask = mbi_unmask_msi_irq, - .irq_eoi = irq_chip_eoi_parent, - .irq_compose_msi_msg = mbi_compose_msi_msg, - .irq_write_msi_msg = pci_msi_domain_write_msg, -}; - -static struct msi_domain_info mbi_msi_domain_info = { - .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | - MSI_FLAG_PCI_MSIX | MSI_FLAG_MULTI_PCI_MSI), - .chip = &mbi_msi_irq_chip, -}; - -static int mbi_allocate_pci_domain(struct irq_domain *nexus_domain, - struct irq_domain **pci_domain) -{ - *pci_domain = pci_msi_create_irq_domain(nexus_domain->parent->fwnode, - &mbi_msi_domain_info, - nexus_domain); - if (!*pci_domain) - return -ENOMEM; - - return 0; -} -#else -static int mbi_allocate_pci_domain(struct irq_domain *nexus_domain, - struct irq_domain **pci_domain) -{ - *pci_domain = NULL; - return 0; -} -#endif - static void mbi_compose_mbi_msg(struct irq_data *data, struct msi_msg *msg) { mbi_compose_msi_msg(data, msg); - msg[1].address_hi = upper_32_bits(mbi_phys_base + GICD_CLRSPI_NSR); - msg[1].address_lo = lower_32_bits(mbi_phys_base + GICD_CLRSPI_NSR); msg[1].data = data->parent_data->hwirq; - - iommu_dma_map_msi_msg(data->irq, &msg[1]); + msi_msg_set_addr(irq_data_get_msi_desc(data), &msg[1], + mbi_phys_base + GICD_CLRSPI_NSR); } -/* Platform-MSI specific irqchip */ -static struct irq_chip mbi_pmsi_irq_chip = { - .name = "pMSI", - .irq_set_type = irq_chip_set_type_parent, - .irq_compose_msi_msg = mbi_compose_mbi_msg, - .flags = IRQCHIP_SUPPORTS_LEVEL_MSI, -}; - -static struct msi_domain_ops mbi_pmsi_ops = { -}; +static bool mbi_init_dev_msi_info(struct device *dev, struct irq_domain *domain, + struct irq_domain *real_parent, struct msi_domain_info *info) +{ + if (!msi_lib_init_dev_msi_info(dev, domain, real_parent, info)) + return false; + + switch (info->bus_token) { + case DOMAIN_BUS_PCI_DEVICE_MSI: + case DOMAIN_BUS_PCI_DEVICE_MSIX: + info->chip->irq_compose_msi_msg = mbi_compose_msi_msg; + return true; + + case DOMAIN_BUS_DEVICE_MSI: + info->chip->irq_compose_msi_msg = mbi_compose_mbi_msg; + info->chip->irq_set_type = irq_chip_set_type_parent; + info->chip->flags |= IRQCHIP_SUPPORTS_LEVEL_MSI; + info->flags |= MSI_FLAG_LEVEL_CAPABLE; + return true; + + default: + WARN_ON_ONCE(1); + return false; + } +} -static struct msi_domain_info mbi_pmsi_domain_info = { - .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | - MSI_FLAG_LEVEL_CAPABLE), - .ops = &mbi_pmsi_ops, - .chip = &mbi_pmsi_irq_chip, +#define MBI_MSI_FLAGS_REQUIRED (MSI_FLAG_USE_DEF_DOM_OPS | \ + MSI_FLAG_USE_DEF_CHIP_OPS | \ + MSI_FLAG_PCI_MSI_MASK_PARENT) + +#define MBI_MSI_FLAGS_SUPPORTED (MSI_GENERIC_FLAGS_MASK | \ + MSI_FLAG_PCI_MSIX | \ + MSI_FLAG_MULTI_PCI_MSI) + +static const struct msi_parent_ops gic_v3_mbi_msi_parent_ops = { + .supported_flags = MBI_MSI_FLAGS_SUPPORTED, + .required_flags = MBI_MSI_FLAGS_REQUIRED, + .chip_flags = MSI_CHIP_FLAG_SET_EOI, + .bus_select_token = DOMAIN_BUS_NEXUS, + .bus_select_mask = MATCH_PCI_MSI | MATCH_PLATFORM_MSI, + .prefix = "MBI-", + .init_dev_msi_info = mbi_init_dev_msi_info, }; -static int mbi_allocate_domains(struct irq_domain *parent) +static int mbi_allocate_domain(struct irq_domain *parent) { - struct irq_domain *nexus_domain, *pci_domain, *plat_domain; - int err; - - nexus_domain = irq_domain_create_tree(parent->fwnode, - &mbi_domain_ops, NULL); - if (!nexus_domain) - return -ENOMEM; - - irq_domain_update_bus_token(nexus_domain, DOMAIN_BUS_NEXUS); - nexus_domain->parent = parent; - - err = mbi_allocate_pci_domain(nexus_domain, &pci_domain); + struct irq_domain_info info = { + .fwnode = parent->fwnode, + .ops = &mbi_domain_ops, + .parent = parent, + }; - plat_domain = platform_msi_create_irq_domain(parent->fwnode, - &mbi_pmsi_domain_info, - nexus_domain); - - if (err || !plat_domain) { - if (plat_domain) - irq_domain_remove(plat_domain); - if (pci_domain) - irq_domain_remove(pci_domain); - irq_domain_remove(nexus_domain); - return -ENOMEM; - } - - return 0; + return msi_create_parent_irq_domain(&info, &gic_v3_mbi_msi_parent_ops) ? 0 : -ENOMEM; } int __init mbi_init(struct fwnode_handle *fwnode, struct irq_domain *parent) @@ -284,8 +245,7 @@ int __init mbi_init(struct fwnode_handle *fwnode, struct irq_domain *parent) if (ret) goto err_free_mbi; - mbi_ranges[n].bm = kcalloc(BITS_TO_LONGS(mbi_ranges[n].nr_spis), - sizeof(long), GFP_KERNEL); + mbi_ranges[n].bm = bitmap_zalloc(mbi_ranges[n].nr_spis, GFP_KERNEL); if (!mbi_ranges[n].bm) { ret = -ENOMEM; goto err_free_mbi; @@ -297,7 +257,7 @@ int __init mbi_init(struct fwnode_handle *fwnode, struct irq_domain *parent) reg = of_get_property(np, "mbi-alias", NULL); if (reg) { mbi_phys_base = of_translate_address(np, reg); - if (mbi_phys_base == OF_BAD_ADDR) { + if (mbi_phys_base == (phys_addr_t)OF_BAD_ADDR) { ret = -ENXIO; goto err_free_mbi; } @@ -314,7 +274,7 @@ int __init mbi_init(struct fwnode_handle *fwnode, struct irq_domain *parent) pr_info("Using MBI frame %pa\n", &mbi_phys_base); - ret = mbi_allocate_domains(parent); + ret = mbi_allocate_domain(parent); if (ret) goto err_free_mbi; @@ -323,7 +283,7 @@ int __init mbi_init(struct fwnode_handle *fwnode, struct irq_domain *parent) err_free_mbi: if (mbi_ranges) { for (n = 0; n < mbi_range_nr; n++) - kfree(mbi_ranges[n].bm); + bitmap_free(mbi_ranges[n].bm); kfree(mbi_ranges); } diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c index 0868a9d81c3c..6607ab58f72e 100644 --- a/drivers/irqchip/irq-gic-v3.c +++ b/drivers/irqchip/irq-gic-v3.c @@ -1,18 +1,7 @@ +// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2013-2017 ARM Limited, All Rights Reserved. * Author: Marc Zyngier <marc.zyngier@arm.com> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see <http://www.gnu.org/licenses/>. */ #define pr_fmt(fmt) "GICv3: " fmt @@ -23,16 +12,23 @@ #include <linux/delay.h> #include <linux/interrupt.h> #include <linux/irqdomain.h> +#include <linux/kernel.h> +#include <linux/kstrtox.h> #include <linux/of.h> #include <linux/of_address.h> #include <linux/of_irq.h> #include <linux/percpu.h> +#include <linux/refcount.h> #include <linux/slab.h> +#include <linux/iopoll.h> #include <linux/irqchip.h> #include <linux/irqchip/arm-gic-common.h> #include <linux/irqchip/arm-gic-v3.h> -#include <linux/irqchip/irq-partition-percpu.h> +#include <linux/irqchip/arm-gic-v3-prio.h> +#include <linux/bitfield.h> +#include <linux/bits.h> +#include <linux/arm-smccc.h> #include <asm/cputype.h> #include <asm/exception.h> @@ -41,7 +37,15 @@ #include "irq-gic-common.h" +static u8 dist_prio_irq __ro_after_init = GICV3_PRIO_IRQ; +static u8 dist_prio_nmi __ro_after_init = GICV3_PRIO_NMI; + #define FLAGS_WORKAROUND_GICR_WAKER_MSM8996 (1ULL << 0) +#define FLAGS_WORKAROUND_CAVIUM_ERRATUM_38539 (1ULL << 1) +#define FLAGS_WORKAROUND_ASR_ERRATUM_8601001 (1ULL << 2) +#define FLAGS_WORKAROUND_INSECURE (1ULL << 3) + +static struct cpumask broken_rdists __read_mostly __maybe_unused; struct redist_region { void __iomem *redist_base; @@ -51,6 +55,7 @@ struct redist_region { struct gic_chip_data { struct fwnode_handle *fwnode; + phys_addr_t dist_phys_base; void __iomem *dist_base; struct redist_region *redist_regions; struct rdists rdists; @@ -59,14 +64,174 @@ struct gic_chip_data { u32 nr_redist_regions; u64 flags; bool has_rss; - unsigned int irq_nr; - struct partition_desc *ppi_descs[16]; + unsigned int ppi_nr; + struct partition_affinity *parts; + unsigned int nr_parts; +}; + +struct partition_affinity { + cpumask_t mask; + struct fwnode_handle *partition_id; }; +#define T241_CHIPS_MAX 4 +static void __iomem *t241_dist_base_alias[T241_CHIPS_MAX] __read_mostly; +static DEFINE_STATIC_KEY_FALSE(gic_nvidia_t241_erratum); + +static DEFINE_STATIC_KEY_FALSE(gic_arm64_2941627_erratum); + static struct gic_chip_data gic_data __read_mostly; static DEFINE_STATIC_KEY_TRUE(supports_deactivate_key); -static struct gic_kvm_info gic_v3_kvm_info; +#define GIC_ID_NR (1U << GICD_TYPER_ID_BITS(gic_data.rdists.gicd_typer)) +#define GIC_LINE_NR min(GICD_TYPER_SPIS(gic_data.rdists.gicd_typer), 1020U) +#define GIC_ESPI_NR GICD_TYPER_ESPIS(gic_data.rdists.gicd_typer) + +static bool nmi_support_forbidden; + +/* + * There are 16 SGIs, though we only actually use 8 in Linux. The other 8 SGIs + * are potentially stolen by the secure side. Some code, especially code dealing + * with hwirq IDs, is simplified by accounting for all 16. + */ +#define SGI_NR 16 + +/* + * The behaviours of RPR and PMR registers differ depending on the value of + * SCR_EL3.FIQ, and the behaviour of non-secure priority registers of the + * distributor and redistributors depends on whether security is enabled in the + * GIC. + * + * When security is enabled, non-secure priority values from the (re)distributor + * are presented to the GIC CPUIF as follow: + * (GIC_(R)DIST_PRI[irq] >> 1) | 0x80; + * + * If SCR_EL3.FIQ == 1, the values written to/read from PMR and RPR at non-secure + * EL1 are subject to a similar operation thus matching the priorities presented + * from the (re)distributor when security is enabled. When SCR_EL3.FIQ == 0, + * these values are unchanged by the GIC. + * + * see GICv3/GICv4 Architecture Specification (IHI0069D): + * - section 4.8.1 Non-secure accesses to register fields for Secure interrupt + * priorities. + * - Figure 4-7 Secure read of the priority field for a Non-secure Group 1 + * interrupt. + */ +static DEFINE_STATIC_KEY_FALSE(supports_pseudo_nmis); + +static u32 gic_get_pribits(void) +{ + u32 pribits; + + pribits = gic_read_ctlr(); + pribits &= ICC_CTLR_EL1_PRI_BITS_MASK; + pribits >>= ICC_CTLR_EL1_PRI_BITS_SHIFT; + pribits++; + + return pribits; +} + +static bool gic_has_group0(void) +{ + u32 val; + u32 old_pmr; + + old_pmr = gic_read_pmr(); + + /* + * Let's find out if Group0 is under control of EL3 or not by + * setting the highest possible, non-zero priority in PMR. + * + * If SCR_EL3.FIQ is set, the priority gets shifted down in + * order for the CPU interface to set bit 7, and keep the + * actual priority in the non-secure range. In the process, it + * looses the least significant bit and the actual priority + * becomes 0x80. Reading it back returns 0, indicating that + * we're don't have access to Group0. + */ + gic_write_pmr(BIT(8 - gic_get_pribits())); + val = gic_read_pmr(); + + gic_write_pmr(old_pmr); + + return val != 0; +} + +static inline bool gic_dist_security_disabled(void) +{ + return readl_relaxed(gic_data.dist_base + GICD_CTLR) & GICD_CTLR_DS; +} + +static bool cpus_have_security_disabled __ro_after_init; +static bool cpus_have_group0 __ro_after_init; + +static void __init gic_prio_init(void) +{ + bool ds; + + cpus_have_group0 = gic_has_group0(); + + ds = gic_dist_security_disabled(); + if ((gic_data.flags & FLAGS_WORKAROUND_INSECURE) && !ds) { + if (cpus_have_group0) { + u32 val; + + val = readl_relaxed(gic_data.dist_base + GICD_CTLR); + val |= GICD_CTLR_DS; + writel_relaxed(val, gic_data.dist_base + GICD_CTLR); + + ds = gic_dist_security_disabled(); + if (ds) + pr_warn("Broken GIC integration, security disabled\n"); + } else { + pr_warn("Broken GIC integration, pNMI forbidden\n"); + nmi_support_forbidden = true; + } + } + + cpus_have_security_disabled = ds; + + /* + * How priority values are used by the GIC depends on two things: + * the security state of the GIC (controlled by the GICD_CTLR.DS bit) + * and if Group 0 interrupts can be delivered to Linux in the non-secure + * world as FIQs (controlled by the SCR_EL3.FIQ bit). These affect the + * way priorities are presented in ICC_PMR_EL1 and in the distributor: + * + * GICD_CTLR.DS | SCR_EL3.FIQ | ICC_PMR_EL1 | Distributor + * ------------------------------------------------------- + * 1 | - | unchanged | unchanged + * ------------------------------------------------------- + * 0 | 1 | non-secure | non-secure + * ------------------------------------------------------- + * 0 | 0 | unchanged | non-secure + * + * In the non-secure view reads and writes are modified: + * + * - A value written is right-shifted by one and the MSB is set, + * forcing the priority into the non-secure range. + * + * - A value read is left-shifted by one. + * + * In the first two cases, where ICC_PMR_EL1 and the interrupt priority + * are both either modified or unchanged, we can use the same set of + * priorities. + * + * In the last case, where only the interrupt priorities are modified to + * be in the non-secure range, we program the non-secure values into + * the distributor to match the PMR values we want. + */ + if (cpus_have_group0 && !cpus_have_security_disabled) { + dist_prio_irq = __gicv3_prio_to_ns(dist_prio_irq); + dist_prio_nmi = __gicv3_prio_to_ns(dist_prio_nmi); + } + + pr_info("GICD_CTLR.DS=%d, SCR_EL3.FIQ=%d\n", + cpus_have_security_disabled, + !cpus_have_group0); +} + +static struct gic_kvm_info gic_v3_kvm_info __initdata; static DEFINE_PER_CPU(bool, has_rss); #define MPIDR_RS(mpidr) (((mpidr) & 0xF0UL) >> 4) @@ -77,70 +242,133 @@ static DEFINE_PER_CPU(bool, has_rss); /* Our default, arbitrary priority value. Linux only uses one anyway. */ #define DEFAULT_PMR_VALUE 0xf0 -static inline unsigned int gic_irq(struct irq_data *d) +enum gic_intid_range { + SGI_RANGE, + PPI_RANGE, + SPI_RANGE, + EPPI_RANGE, + ESPI_RANGE, + LPI_RANGE, + __INVALID_RANGE__ +}; + +static enum gic_intid_range __get_intid_range(irq_hw_number_t hwirq) +{ + switch (hwirq) { + case 0 ... 15: + return SGI_RANGE; + case 16 ... 31: + return PPI_RANGE; + case 32 ... 1019: + return SPI_RANGE; + case EPPI_BASE_INTID ... (EPPI_BASE_INTID + 63): + return EPPI_RANGE; + case ESPI_BASE_INTID ... (ESPI_BASE_INTID + 1023): + return ESPI_RANGE; + case 8192 ... GENMASK(23, 0): + return LPI_RANGE; + default: + return __INVALID_RANGE__; + } +} + +static enum gic_intid_range get_intid_range(struct irq_data *d) { - return d->hwirq; + return __get_intid_range(d->hwirq); } -static inline int gic_irq_in_rdist(struct irq_data *d) +static inline bool gic_irq_in_rdist(struct irq_data *d) { - return gic_irq(d) < 32; + switch (get_intid_range(d)) { + case SGI_RANGE: + case PPI_RANGE: + case EPPI_RANGE: + return true; + default: + return false; + } +} + +static inline void __iomem *gic_dist_base_alias(struct irq_data *d) +{ + if (static_branch_unlikely(&gic_nvidia_t241_erratum)) { + irq_hw_number_t hwirq = irqd_to_hwirq(d); + u32 chip; + + /* + * For the erratum T241-FABRIC-4, read accesses to GICD_In{E} + * registers are directed to the chip that owns the SPI. The + * the alias region can also be used for writes to the + * GICD_In{E} except GICD_ICENABLERn. Each chip has support + * for 320 {E}SPIs. Mappings for all 4 chips: + * Chip0 = 32-351 + * Chip1 = 352-671 + * Chip2 = 672-991 + * Chip3 = 4096-4415 + */ + switch (__get_intid_range(hwirq)) { + case SPI_RANGE: + chip = (hwirq - 32) / 320; + break; + case ESPI_RANGE: + chip = 3; + break; + default: + unreachable(); + } + return t241_dist_base_alias[chip]; + } + + return gic_data.dist_base; } static inline void __iomem *gic_dist_base(struct irq_data *d) { - if (gic_irq_in_rdist(d)) /* SGI+PPI -> SGI_base for this CPU */ + switch (get_intid_range(d)) { + case SGI_RANGE: + case PPI_RANGE: + case EPPI_RANGE: + /* SGI+PPI -> SGI_base for this CPU */ return gic_data_rdist_sgi_base(); - if (d->hwirq <= 1023) /* SPI -> dist_base */ + case SPI_RANGE: + case ESPI_RANGE: + /* SPI -> dist_base */ return gic_data.dist_base; - return NULL; + default: + return NULL; + } } -static void gic_do_wait_for_rwp(void __iomem *base) +static void gic_do_wait_for_rwp(void __iomem *base, u32 bit) { - u32 count = 1000000; /* 1s! */ + u32 val; + int ret; - while (readl_relaxed(base + GICD_CTLR) & GICD_CTLR_RWP) { - count--; - if (!count) { - pr_err_ratelimited("RWP timeout, gone fishing\n"); - return; - } - cpu_relax(); - udelay(1); - }; + ret = readl_relaxed_poll_timeout_atomic(base + GICD_CTLR, val, !(val & bit), + 1, USEC_PER_SEC); + if (ret == -ETIMEDOUT) + pr_err_ratelimited("RWP timeout, gone fishing\n"); } /* Wait for completion of a distributor change */ static void gic_dist_wait_for_rwp(void) { - gic_do_wait_for_rwp(gic_data.dist_base); + gic_do_wait_for_rwp(gic_data.dist_base, GICD_CTLR_RWP); } /* Wait for completion of a redistributor change */ static void gic_redist_wait_for_rwp(void) { - gic_do_wait_for_rwp(gic_data_rdist_rd_base()); + gic_do_wait_for_rwp(gic_data_rdist_rd_base(), GICR_CTLR_RWP); } -#ifdef CONFIG_ARM64 - -static u64 __maybe_unused gic_read_iar(void) -{ - if (cpus_have_const_cap(ARM64_WORKAROUND_CAVIUM_23154)) - return gic_read_iar_cavium_thunderx(); - else - return gic_read_iar_common(); -} -#endif - static void gic_enable_redist(bool enable) { void __iomem *rbase; - u32 count = 1000000; /* 1s! */ u32 val; + int ret; if (gic_data.flags & FLAGS_WORKAROUND_GICR_WAKER_MSM8996) return; @@ -161,55 +389,107 @@ static void gic_enable_redist(bool enable) return; /* No PM support in this redistributor */ } - while (--count) { - val = readl_relaxed(rbase + GICR_WAKER); - if (enable ^ (bool)(val & GICR_WAKER_ChildrenAsleep)) - break; - cpu_relax(); - udelay(1); - }; - if (!count) + ret = readl_relaxed_poll_timeout_atomic(rbase + GICR_WAKER, val, + enable ^ (bool)(val & GICR_WAKER_ChildrenAsleep), + 1, USEC_PER_SEC); + if (ret == -ETIMEDOUT) { pr_err_ratelimited("redistributor failed to %s...\n", enable ? "wakeup" : "sleep"); + } } /* * Routines to disable, enable, EOI and route interrupts */ +static u32 convert_offset_index(struct irq_data *d, u32 offset, u32 *index) +{ + switch (get_intid_range(d)) { + case SGI_RANGE: + case PPI_RANGE: + case SPI_RANGE: + *index = d->hwirq; + return offset; + case EPPI_RANGE: + /* + * Contrary to the ESPI range, the EPPI range is contiguous + * to the PPI range in the registers, so let's adjust the + * displacement accordingly. Consistency is overrated. + */ + *index = d->hwirq - EPPI_BASE_INTID + 32; + return offset; + case ESPI_RANGE: + *index = d->hwirq - ESPI_BASE_INTID; + switch (offset) { + case GICD_ISENABLER: + return GICD_ISENABLERnE; + case GICD_ICENABLER: + return GICD_ICENABLERnE; + case GICD_ISPENDR: + return GICD_ISPENDRnE; + case GICD_ICPENDR: + return GICD_ICPENDRnE; + case GICD_ISACTIVER: + return GICD_ISACTIVERnE; + case GICD_ICACTIVER: + return GICD_ICACTIVERnE; + case GICD_IPRIORITYR: + return GICD_IPRIORITYRnE; + case GICD_ICFGR: + return GICD_ICFGRnE; + case GICD_IROUTER: + return GICD_IROUTERnE; + default: + break; + } + break; + default: + break; + } + + WARN_ON(1); + *index = d->hwirq; + return offset; +} + static int gic_peek_irq(struct irq_data *d, u32 offset) { - u32 mask = 1 << (gic_irq(d) % 32); void __iomem *base; + u32 index, mask; + + offset = convert_offset_index(d, offset, &index); + mask = 1 << (index % 32); if (gic_irq_in_rdist(d)) base = gic_data_rdist_sgi_base(); else - base = gic_data.dist_base; + base = gic_dist_base_alias(d); - return !!(readl_relaxed(base + offset + (gic_irq(d) / 32) * 4) & mask); + return !!(readl_relaxed(base + offset + (index / 32) * 4) & mask); } static void gic_poke_irq(struct irq_data *d, u32 offset) { - u32 mask = 1 << (gic_irq(d) % 32); - void (*rwp_wait)(void); void __iomem *base; + u32 index, mask; - if (gic_irq_in_rdist(d)) { + offset = convert_offset_index(d, offset, &index); + mask = 1 << (index % 32); + + if (gic_irq_in_rdist(d)) base = gic_data_rdist_sgi_base(); - rwp_wait = gic_redist_wait_for_rwp; - } else { + else base = gic_data.dist_base; - rwp_wait = gic_dist_wait_for_rwp; - } - writel_relaxed(mask, base + offset + (gic_irq(d) / 32) * 4); - rwp_wait(); + writel_relaxed(mask, base + offset + (index / 32) * 4); } static void gic_mask_irq(struct irq_data *d) { gic_poke_irq(d, GICD_ICENABLER); + if (gic_irq_in_rdist(d)) + gic_redist_wait_for_rwp(); + else + gic_dist_wait_for_rwp(); } static void gic_eoimode1_mask_irq(struct irq_data *d) @@ -232,12 +512,18 @@ static void gic_unmask_irq(struct irq_data *d) gic_poke_irq(d, GICD_ISENABLER); } +static inline bool gic_supports_nmi(void) +{ + return IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI) && + static_branch_likely(&supports_pseudo_nmis); +} + static int gic_irq_set_irqchip_state(struct irq_data *d, enum irqchip_irq_state which, bool val) { u32 reg; - if (d->hwirq >= gic_data.irq_nr) /* PPI/SPI only */ + if (d->hwirq >= 8192) /* SGI/PPI/SPI only */ return -EINVAL; switch (which) { @@ -250,7 +536,11 @@ static int gic_irq_set_irqchip_state(struct irq_data *d, break; case IRQCHIP_STATE_MASKED: - reg = val ? GICD_ICENABLER : GICD_ISENABLER; + if (val) { + gic_mask_irq(d); + return 0; + } + reg = GICD_ISENABLER; break; default: @@ -258,13 +548,20 @@ static int gic_irq_set_irqchip_state(struct irq_data *d, } gic_poke_irq(d, reg); + + /* + * Force read-back to guarantee that the active state has taken + * effect, and won't race with a guest-driven deactivation. + */ + if (reg == GICD_ISACTIVER) + gic_peek_irq(d, reg); return 0; } static int gic_irq_get_irqchip_state(struct irq_data *d, enum irqchip_irq_state which, bool *val) { - if (d->hwirq >= gic_data.irq_nr) /* PPI/SPI only */ + if (d->hwirq >= 8192) /* PPI/SPI only */ return -EINVAL; switch (which) { @@ -287,9 +584,103 @@ static int gic_irq_get_irqchip_state(struct irq_data *d, return 0; } +static void gic_irq_set_prio(struct irq_data *d, u8 prio) +{ + void __iomem *base = gic_dist_base(d); + u32 offset, index; + + offset = convert_offset_index(d, GICD_IPRIORITYR, &index); + + writeb_relaxed(prio, base + offset + index); +} + +static int gic_irq_nmi_setup(struct irq_data *d) +{ + struct irq_desc *desc = irq_to_desc(d->irq); + + if (!gic_supports_nmi()) + return -EINVAL; + + if (gic_peek_irq(d, GICD_ISENABLER)) { + pr_err("Cannot set NMI property of enabled IRQ %u\n", d->irq); + return -EINVAL; + } + + /* + * A secondary irq_chip should be in charge of LPI request, + * it should not be possible to get there + */ + if (WARN_ON(irqd_to_hwirq(d) >= 8192)) + return -EINVAL; + + /* desc lock should already be held */ + if (!gic_irq_in_rdist(d)) + desc->handle_irq = handle_fasteoi_nmi; + + gic_irq_set_prio(d, dist_prio_nmi); + + return 0; +} + +static void gic_irq_nmi_teardown(struct irq_data *d) +{ + struct irq_desc *desc = irq_to_desc(d->irq); + + if (WARN_ON(!gic_supports_nmi())) + return; + + if (gic_peek_irq(d, GICD_ISENABLER)) { + pr_err("Cannot set NMI property of enabled IRQ %u\n", d->irq); + return; + } + + /* + * A secondary irq_chip should be in charge of LPI request, + * it should not be possible to get there + */ + if (WARN_ON(irqd_to_hwirq(d) >= 8192)) + return; + + /* desc lock should already be held */ + if (!gic_irq_in_rdist(d)) + desc->handle_irq = handle_fasteoi_irq; + + gic_irq_set_prio(d, dist_prio_irq); +} + +static bool gic_arm64_erratum_2941627_needed(struct irq_data *d) +{ + enum gic_intid_range range; + + if (!static_branch_unlikely(&gic_arm64_2941627_erratum)) + return false; + + range = get_intid_range(d); + + /* + * The workaround is needed if the IRQ is an SPI and + * the target cpu is different from the one we are + * executing on. + */ + return (range == SPI_RANGE || range == ESPI_RANGE) && + !cpumask_test_cpu(raw_smp_processor_id(), + irq_data_get_effective_affinity_mask(d)); +} + static void gic_eoi_irq(struct irq_data *d) { - gic_write_eoir(gic_irq(d)); + write_gicreg(irqd_to_hwirq(d), ICC_EOIR1_EL1); + isb(); + + if (gic_arm64_erratum_2941627_needed(d)) { + /* + * Make sure the GIC stream deactivate packet + * issued by ICC_EOIR1_EL1 has completed before + * deactivating through GICD_IACTIVER. + */ + dsb(sy); + gic_poke_irq(d, GICD_ICACTIVER); + } } static void gic_eoimode1_eoi_irq(struct irq_data *d) @@ -298,39 +689,56 @@ static void gic_eoimode1_eoi_irq(struct irq_data *d) * No need to deactivate an LPI, or an interrupt that * is is getting forwarded to a vcpu. */ - if (gic_irq(d) >= 8192 || irqd_is_forwarded_to_vcpu(d)) + if (irqd_to_hwirq(d) >= 8192 || irqd_is_forwarded_to_vcpu(d)) return; - gic_write_dir(gic_irq(d)); + + if (!gic_arm64_erratum_2941627_needed(d)) + gic_write_dir(irqd_to_hwirq(d)); + else + gic_poke_irq(d, GICD_ICACTIVER); } static int gic_set_type(struct irq_data *d, unsigned int type) { - unsigned int irq = gic_irq(d); - void (*rwp_wait)(void); + irq_hw_number_t irq = irqd_to_hwirq(d); + enum gic_intid_range range; void __iomem *base; + u32 offset, index; + int ret; + + range = get_intid_range(d); /* Interrupt configuration for SGIs can't be changed */ - if (irq < 16) - return -EINVAL; + if (range == SGI_RANGE) + return type != IRQ_TYPE_EDGE_RISING ? -EINVAL : 0; /* SPIs have restrictions on the supported types */ - if (irq >= 32 && type != IRQ_TYPE_LEVEL_HIGH && - type != IRQ_TYPE_EDGE_RISING) + if ((range == SPI_RANGE || range == ESPI_RANGE) && + type != IRQ_TYPE_LEVEL_HIGH && type != IRQ_TYPE_EDGE_RISING) return -EINVAL; - if (gic_irq_in_rdist(d)) { + if (gic_irq_in_rdist(d)) base = gic_data_rdist_sgi_base(); - rwp_wait = gic_redist_wait_for_rwp; - } else { - base = gic_data.dist_base; - rwp_wait = gic_dist_wait_for_rwp; + else + base = gic_dist_base_alias(d); + + offset = convert_offset_index(d, GICD_ICFGR, &index); + + ret = gic_configure_irq(index, type, base + offset); + if (ret && (range == PPI_RANGE || range == EPPI_RANGE)) { + /* Misconfigured PPIs are usually not fatal */ + pr_warn("GIC: PPI INTID%ld is secure or misconfigured\n", irq); + ret = 0; } - return gic_configure_irq(irq, type, base, rwp_wait); + return ret; } static int gic_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu) { + if (get_intid_range(d) == SGI_RANGE) + return -EINVAL; + if (vcpu) irqd_set_forwarded_to_vcpu(d); else @@ -338,10 +746,16 @@ static int gic_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu) return 0; } -static u64 gic_mpidr_to_affinity(unsigned long mpidr) +static u64 gic_cpu_to_affinity(int cpu) { + u64 mpidr = cpu_logical_map(cpu); u64 aff; + /* ASR8601 needs to have its affinities shifted down... */ + if (unlikely(gic_data.flags & FLAGS_WORKAROUND_ASR_ERRATUM_8601001)) + mpidr = (MPIDR_AFFINITY_LEVEL(mpidr, 1) | + (MPIDR_AFFINITY_LEVEL(mpidr, 2) << 8)); + aff = ((u64)MPIDR_AFFINITY_LEVEL(mpidr, 3) << 32 | MPIDR_AFFINITY_LEVEL(mpidr, 2) << 16 | MPIDR_AFFINITY_LEVEL(mpidr, 1) << 8 | @@ -350,49 +764,160 @@ static u64 gic_mpidr_to_affinity(unsigned long mpidr) return aff; } -static asmlinkage void __exception_irq_entry gic_handle_irq(struct pt_regs *regs) +static void gic_deactivate_unhandled(u32 irqnr) { - u32 irqnr; + if (static_branch_likely(&supports_deactivate_key)) { + if (irqnr < 8192) + gic_write_dir(irqnr); + } else { + write_gicreg(irqnr, ICC_EOIR1_EL1); + isb(); + } +} - irqnr = gic_read_iar(); +/* + * Follow a read of the IAR with any HW maintenance that needs to happen prior + * to invoking the relevant IRQ handler. We must do two things: + * + * (1) Ensure instruction ordering between a read of IAR and subsequent + * instructions in the IRQ handler using an ISB. + * + * It is possible for the IAR to report an IRQ which was signalled *after* + * the CPU took an IRQ exception as multiple interrupts can race to be + * recognized by the GIC, earlier interrupts could be withdrawn, and/or + * later interrupts could be prioritized by the GIC. + * + * For devices which are tightly coupled to the CPU, such as PMUs, a + * context synchronization event is necessary to ensure that system + * register state is not stale, as these may have been indirectly written + * *after* exception entry. + * + * (2) Execute an interrupt priority drop when EOI mode 1 is in use. + */ +static inline void gic_complete_ack(u32 irqnr) +{ + if (static_branch_likely(&supports_deactivate_key)) + write_gicreg(irqnr, ICC_EOIR1_EL1); + + isb(); +} - if (likely(irqnr > 15 && irqnr < 1020) || irqnr >= 8192) { - int err; +static bool gic_rpr_is_nmi_prio(void) +{ + if (!gic_supports_nmi()) + return false; - if (static_branch_likely(&supports_deactivate_key)) - gic_write_eoir(irqnr); - else - isb(); + return unlikely(gic_read_rpr() == GICV3_PRIO_NMI); +} - err = handle_domain_irq(gic_data.domain, irqnr, regs); - if (err) { - WARN_ONCE(true, "Unexpected interrupt received!\n"); - if (static_branch_likely(&supports_deactivate_key)) { - if (irqnr < 8192) - gic_write_dir(irqnr); - } else { - gic_write_eoir(irqnr); - } - } +static bool gic_irqnr_is_special(u32 irqnr) +{ + return irqnr >= 1020 && irqnr <= 1023; +} + +static void __gic_handle_irq(u32 irqnr, struct pt_regs *regs) +{ + if (gic_irqnr_is_special(irqnr)) return; + + gic_complete_ack(irqnr); + + if (generic_handle_domain_irq(gic_data.domain, irqnr)) { + WARN_ONCE(true, "Unexpected interrupt (irqnr %u)\n", irqnr); + gic_deactivate_unhandled(irqnr); } - if (irqnr < 16) { - gic_write_eoir(irqnr); - if (static_branch_likely(&supports_deactivate_key)) - gic_write_dir(irqnr); -#ifdef CONFIG_SMP - /* - * Unlike GICv2, we don't need an smp_rmb() here. - * The control dependency from gic_read_iar to - * the ISB in gic_write_eoir is enough to ensure - * that any shared data read by handle_IPI will - * be read after the ACK. - */ - handle_IPI(irqnr, regs); -#else - WARN_ONCE(true, "Unexpected SGI received!\n"); -#endif +} + +static void __gic_handle_nmi(u32 irqnr, struct pt_regs *regs) +{ + if (gic_irqnr_is_special(irqnr)) + return; + + gic_complete_ack(irqnr); + + if (generic_handle_domain_nmi(gic_data.domain, irqnr)) { + WARN_ONCE(true, "Unexpected pseudo-NMI (irqnr %u)\n", irqnr); + gic_deactivate_unhandled(irqnr); + } +} + +/* + * An exception has been taken from a context with IRQs enabled, and this could + * be an IRQ or an NMI. + * + * The entry code called us with DAIF.IF set to keep NMIs masked. We must clear + * DAIF.IF (and update ICC_PMR_EL1 to mask regular IRQs) prior to returning, + * after handling any NMI but before handling any IRQ. + * + * The entry code has performed IRQ entry, and if an NMI is detected we must + * perform NMI entry/exit around invoking the handler. + */ +static void __gic_handle_irq_from_irqson(struct pt_regs *regs) +{ + bool is_nmi; + u32 irqnr; + + irqnr = gic_read_iar(); + + is_nmi = gic_rpr_is_nmi_prio(); + + if (is_nmi) { + nmi_enter(); + __gic_handle_nmi(irqnr, regs); + nmi_exit(); + } + + if (gic_prio_masking_enabled()) { + gic_pmr_mask_irqs(); + gic_arch_enable_irqs(); } + + if (!is_nmi) + __gic_handle_irq(irqnr, regs); +} + +/* + * An exception has been taken from a context with IRQs disabled, which can only + * be an NMI. + * + * The entry code called us with DAIF.IF set to keep NMIs masked. We must leave + * DAIF.IF (and ICC_PMR_EL1) unchanged. + * + * The entry code has performed NMI entry. + */ +static void __gic_handle_irq_from_irqsoff(struct pt_regs *regs) +{ + u64 pmr; + u32 irqnr; + + /* + * We were in a context with IRQs disabled. However, the + * entry code has set PMR to a value that allows any + * interrupt to be acknowledged, and not just NMIs. This can + * lead to surprising effects if the NMI has been retired in + * the meantime, and that there is an IRQ pending. The IRQ + * would then be taken in NMI context, something that nobody + * wants to debug twice. + * + * Until we sort this, drop PMR again to a level that will + * actually only allow NMIs before reading IAR, and then + * restore it to what it was. + */ + pmr = gic_read_pmr(); + gic_pmr_mask_irqs(); + isb(); + irqnr = gic_read_iar(); + gic_write_pmr(pmr); + + __gic_handle_nmi(irqnr, regs); +} + +static void __exception_irq_entry gic_handle_irq(struct pt_regs *regs) +{ + if (unlikely(gic_supports_nmi() && !interrupts_enabled(regs))) + __gic_handle_irq_from_irqsoff(regs); + else + __gic_handle_irq_from_irqson(regs); } static void __init gic_dist_init(void) @@ -400,6 +925,7 @@ static void __init gic_dist_init(void) unsigned int i; u64 affinity; void __iomem *base = gic_data.dist_base; + u32 val; /* Disable the distributor */ writel_relaxed(0, base + GICD_CTLR); @@ -411,22 +937,48 @@ static void __init gic_dist_init(void) * do the right thing if the kernel is running in secure mode, * but that's not the intended use case anyway. */ - for (i = 32; i < gic_data.irq_nr; i += 32) + for (i = 32; i < GIC_LINE_NR; i += 32) writel_relaxed(~0, base + GICD_IGROUPR + i / 8); - gic_dist_config(base, gic_data.irq_nr, gic_dist_wait_for_rwp); + /* Extended SPI range, not handled by the GICv2/GICv3 common code */ + for (i = 0; i < GIC_ESPI_NR; i += 32) { + writel_relaxed(~0U, base + GICD_ICENABLERnE + i / 8); + writel_relaxed(~0U, base + GICD_ICACTIVERnE + i / 8); + } + + for (i = 0; i < GIC_ESPI_NR; i += 32) + writel_relaxed(~0U, base + GICD_IGROUPRnE + i / 8); + + for (i = 0; i < GIC_ESPI_NR; i += 16) + writel_relaxed(0, base + GICD_ICFGRnE + i / 4); - /* Enable distributor with ARE, Group1 */ - writel_relaxed(GICD_CTLR_ARE_NS | GICD_CTLR_ENABLE_G1A | GICD_CTLR_ENABLE_G1, - base + GICD_CTLR); + for (i = 0; i < GIC_ESPI_NR; i += 4) + writel_relaxed(REPEAT_BYTE_U32(dist_prio_irq), + base + GICD_IPRIORITYRnE + i); + + /* Now do the common stuff */ + gic_dist_config(base, GIC_LINE_NR, dist_prio_irq); + + val = GICD_CTLR_ARE_NS | GICD_CTLR_ENABLE_G1A | GICD_CTLR_ENABLE_G1; + if (gic_data.rdists.gicd_typer2 & GICD_TYPER2_nASSGIcap) { + pr_info("Enabling SGIs without active state\n"); + val |= GICD_CTLR_nASSGIreq; + } + + /* Enable distributor with ARE, Group1, and wait for it to drain */ + writel_relaxed(val, base + GICD_CTLR); + gic_dist_wait_for_rwp(); /* * Set all global interrupts to the boot CPU only. ARE must be * enabled. */ - affinity = gic_mpidr_to_affinity(cpu_logical_map(smp_processor_id())); - for (i = 32; i < gic_data.irq_nr; i++) + affinity = gic_cpu_to_affinity(smp_processor_id()); + for (i = 32; i < GIC_LINE_NR; i++) gic_write_irouter(affinity, base + GICD_IROUTER + i * 8); + + for (i = 0; i < GIC_ESPI_NR; i++) + gic_write_irouter(affinity, base + GICD_IROUTERnE + i * 8); } static int gic_iterate_rdists(int (*fn)(struct redist_region *, void __iomem *)) @@ -470,7 +1022,7 @@ static int gic_iterate_rdists(int (*fn)(struct redist_region *, void __iomem *)) static int __gic_populate_rdist(struct redist_region *region, void __iomem *ptr) { - unsigned long mpidr = cpu_logical_map(smp_processor_id()); + unsigned long mpidr; u64 typer; u32 aff; @@ -478,6 +1030,8 @@ static int __gic_populate_rdist(struct redist_region *region, void __iomem *ptr) * Convert affinity to a 32bit value that can be matched to * GICR_TYPER bits [63:32]. */ + mpidr = gic_cpu_to_affinity(smp_processor_id()); + aff = (MPIDR_AFFINITY_LEVEL(mpidr, 3) << 24 | MPIDR_AFFINITY_LEVEL(mpidr, 2) << 16 | MPIDR_AFFINITY_LEVEL(mpidr, 1) << 8 | @@ -486,6 +1040,7 @@ static int __gic_populate_rdist(struct redist_region *region, void __iomem *ptr) typer = gic_read_typer(ptr + GICR_TYPER); if ((typer >> 32) == aff) { u64 offset = ptr - region->redist_base; + raw_spin_lock_init(&gic_data_rdist()->rd_lock); gic_data_rdist_rd_base() = ptr; gic_data_rdist()->phys_base = region->phys_base + offset; @@ -512,32 +1067,77 @@ static int gic_populate_rdist(void) return -ENODEV; } -static int __gic_update_vlpi_properties(struct redist_region *region, - void __iomem *ptr) +static int __gic_update_rdist_properties(struct redist_region *region, + void __iomem *ptr) { u64 typer = gic_read_typer(ptr + GICR_TYPER); + u32 ctlr = readl_relaxed(ptr + GICR_CTLR); + + /* Boot-time cleanup */ + if ((typer & GICR_TYPER_VLPIS) && (typer & GICR_TYPER_RVPEID)) { + u64 val; + + /* Deactivate any present vPE */ + val = gicr_read_vpendbaser(ptr + SZ_128K + GICR_VPENDBASER); + if (val & GICR_VPENDBASER_Valid) + gicr_write_vpendbaser(GICR_VPENDBASER_PendingLast, + ptr + SZ_128K + GICR_VPENDBASER); + + /* Mark the VPE table as invalid */ + val = gicr_read_vpropbaser(ptr + SZ_128K + GICR_VPROPBASER); + val &= ~GICR_VPROPBASER_4_1_VALID; + gicr_write_vpropbaser(val, ptr + SZ_128K + GICR_VPROPBASER); + } + gic_data.rdists.has_vlpis &= !!(typer & GICR_TYPER_VLPIS); - gic_data.rdists.has_direct_lpi &= !!(typer & GICR_TYPER_DirectLPIS); + + /* + * TYPER.RVPEID implies some form of DirectLPI, no matter what the + * doc says... :-/ And CTLR.IR implies another subset of DirectLPI + * that the ITS driver can make use of for LPIs (and not VLPIs). + * + * These are 3 different ways to express the same thing, depending + * on the revision of the architecture and its relaxations over + * time. Just group them under the 'direct_lpi' banner. + */ + gic_data.rdists.has_rvpeid &= !!(typer & GICR_TYPER_RVPEID); + gic_data.rdists.has_direct_lpi &= (!!(typer & GICR_TYPER_DirectLPIS) | + !!(ctlr & GICR_CTLR_IR) | + gic_data.rdists.has_rvpeid); + gic_data.rdists.has_vpend_valid_dirty &= !!(typer & GICR_TYPER_DIRTY); + + /* Detect non-sensical configurations */ + if (WARN_ON_ONCE(gic_data.rdists.has_rvpeid && !gic_data.rdists.has_vlpis)) { + gic_data.rdists.has_direct_lpi = false; + gic_data.rdists.has_vlpis = false; + gic_data.rdists.has_rvpeid = false; + } + + gic_data.ppi_nr = min(GICR_TYPER_NR_PPIS(typer), gic_data.ppi_nr); return 1; } -static void gic_update_vlpi_properties(void) +static void gic_update_rdist_properties(void) { - gic_iterate_rdists(__gic_update_vlpi_properties); - pr_info("%sVLPI support, %sdirect LPI support\n", - !gic_data.rdists.has_vlpis ? "no " : "", - !gic_data.rdists.has_direct_lpi ? "no " : ""); + gic_data.ppi_nr = UINT_MAX; + gic_iterate_rdists(__gic_update_rdist_properties); + if (WARN_ON(gic_data.ppi_nr == UINT_MAX)) + gic_data.ppi_nr = 0; + pr_info("GICv3 features: %d PPIs%s%s\n", + gic_data.ppi_nr, + gic_data.has_rss ? ", RSS" : "", + gic_data.rdists.has_direct_lpi ? ", DirectLPI" : ""); + + if (gic_data.rdists.has_vlpis) + pr_info("GICv4 features: %s%s%s\n", + gic_data.rdists.has_direct_lpi ? "DirectLPI " : "", + gic_data.rdists.has_rvpeid ? "RVPEID " : "", + gic_data.rdists.has_vpend_valid_dirty ? "Valid+Dirty " : ""); } -static void gic_cpu_sys_reg_init(void) +static void gic_cpu_sys_reg_enable(void) { - int i, cpu = smp_processor_id(); - u64 mpidr = cpu_logical_map(cpu); - u64 need_rss = MPIDR_RS(mpidr); - bool group0; - u32 val, pribits; - /* * Need to check that the SRE bit has actually been set. If * not, it means that SRE is disabled at EL2. We're going to @@ -548,28 +1148,34 @@ static void gic_cpu_sys_reg_init(void) if (!gic_enable_sre()) pr_err("GIC: unable to set SRE (disabled at EL2), panic ahead\n"); - pribits = gic_read_ctlr(); - pribits &= ICC_CTLR_EL1_PRI_BITS_MASK; - pribits >>= ICC_CTLR_EL1_PRI_BITS_SHIFT; - pribits++; +} - /* - * Let's find out if Group0 is under control of EL3 or not by - * setting the highest possible, non-zero priority in PMR. - * - * If SCR_EL3.FIQ is set, the priority gets shifted down in - * order for the CPU interface to set bit 7, and keep the - * actual priority in the non-secure range. In the process, it - * looses the least significant bit and the actual priority - * becomes 0x80. Reading it back returns 0, indicating that - * we're don't have access to Group0. - */ - write_gicreg(BIT(8 - pribits), ICC_PMR_EL1); - val = read_gicreg(ICC_PMR_EL1); - group0 = val != 0; +static void gic_cpu_sys_reg_init(void) +{ + int i, cpu = smp_processor_id(); + u64 mpidr = gic_cpu_to_affinity(cpu); + u64 need_rss = MPIDR_RS(mpidr); + bool group0; + u32 pribits; + + pribits = gic_get_pribits(); + + group0 = gic_has_group0(); /* Set priority mask register */ - write_gicreg(DEFAULT_PMR_VALUE, ICC_PMR_EL1); + if (!gic_prio_masking_enabled()) { + write_gicreg(DEFAULT_PMR_VALUE, ICC_PMR_EL1); + } else if (gic_supports_nmi()) { + /* + * Check that all CPUs use the same priority space. + * + * If there's a mismatch with the boot CPU, the system is + * likely to die as interrupt masking will not work properly on + * all CPUs. + */ + WARN_ON(group0 != cpus_have_group0); + WARN_ON(gic_dist_security_disabled() != cpus_have_security_disabled); + } /* * Some firmwares hand over to the kernel with the BPR changed from @@ -594,8 +1200,10 @@ static void gic_cpu_sys_reg_init(void) case 7: write_gicreg(0, ICC_AP0R3_EL1); write_gicreg(0, ICC_AP0R2_EL1); + fallthrough; case 6: write_gicreg(0, ICC_AP0R1_EL1); + fallthrough; case 5: case 4: write_gicreg(0, ICC_AP0R0_EL1); @@ -609,8 +1217,10 @@ static void gic_cpu_sys_reg_init(void) case 7: write_gicreg(0, ICC_AP1R3_EL1); write_gicreg(0, ICC_AP1R2_EL1); + fallthrough; case 6: write_gicreg(0, ICC_AP1R1_EL1); + fallthrough; case 5: case 4: write_gicreg(0, ICC_AP1R0_EL1); @@ -628,11 +1238,11 @@ static void gic_cpu_sys_reg_init(void) for_each_online_cpu(i) { bool have_rss = per_cpu(has_rss, i) && per_cpu(has_rss, cpu); - need_rss |= MPIDR_RS(cpu_logical_map(i)); + need_rss |= MPIDR_RS(gic_cpu_to_affinity(i)); if (need_rss && (!have_rss)) pr_crit("CPU%d (%lx) can't SGI CPU%d (%lx), no RSS\n", cpu, (unsigned long)mpidr, - i, (unsigned long)cpu_logical_map(i)); + i, (unsigned long)gic_cpu_to_affinity(i)); } /** @@ -650,7 +1260,7 @@ static bool gicv3_nolpi; static int __init gicv3_nolpi_cfg(char *buf) { - return strtobool(buf, &gicv3_nolpi); + return kstrtobool(buf, &gicv3_nolpi); } early_param("irqchip.gicv3_nolpi", gicv3_nolpi_cfg); @@ -664,6 +1274,7 @@ static int gic_dist_supports_lpis(void) static void gic_cpu_init(void) { void __iomem *rbase; + int i; /* Register ourselves with the rest of the world */ if (gic_populate_rdist()) @@ -671,12 +1282,19 @@ static void gic_cpu_init(void) gic_enable_redist(true); + WARN((gic_data.ppi_nr > 16 || GIC_ESPI_NR != 0) && + !(gic_read_ctlr() & ICC_CTLR_EL1_ExtRange), + "Distributor has extended ranges, but CPU%d doesn't\n", + smp_processor_id()); + rbase = gic_data_rdist_sgi_base(); /* Configure SGIs/PPIs as non-secure Group-1 */ - writel_relaxed(~0, rbase + GICR_IGROUPR0); + for (i = 0; i < gic_data.ppi_nr + SGI_NR; i += 32) + writel_relaxed(~0, rbase + GICR_IGROUPR0 + i / 8); - gic_cpu_config(rbase, gic_redist_wait_for_rwp); + gic_cpu_config(rbase, gic_data.ppi_nr + SGI_NR, dist_prio_irq); + gic_redist_wait_for_rwp(); /* initialise system registers */ gic_cpu_sys_reg_init(); @@ -687,8 +1305,21 @@ static void gic_cpu_init(void) #define MPIDR_TO_SGI_RS(mpidr) (MPIDR_RS(mpidr) << ICC_SGI1R_RS_SHIFT) #define MPIDR_TO_SGI_CLUSTER_ID(mpidr) ((mpidr) & ~0xFUL) +/* + * gic_starting_cpu() is called after the last point where cpuhp is allowed + * to fail. So pre check for problems earlier. + */ +static int gic_check_rdist(unsigned int cpu) +{ + if (cpumask_test_cpu(cpu, &broken_rdists)) + return -EINVAL; + + return 0; +} + static int gic_starting_cpu(unsigned int cpu) { + gic_cpu_sys_reg_enable(); gic_cpu_init(); if (gic_dist_supports_lpis()) @@ -701,9 +1332,11 @@ static u16 gic_compute_target_list(int *base_cpu, const struct cpumask *mask, unsigned long cluster_id) { int next_cpu, cpu = *base_cpu; - unsigned long mpidr = cpu_logical_map(cpu); + unsigned long mpidr; u16 tlist = 0; + mpidr = gic_cpu_to_affinity(cpu); + while (cpu < nr_cpu_ids) { tlist |= 1 << (mpidr & 0xf); @@ -712,7 +1345,7 @@ static u16 gic_compute_target_list(int *base_cpu, const struct cpumask *mask, goto out; cpu = next_cpu; - mpidr = cpu_logical_map(cpu); + mpidr = gic_cpu_to_affinity(cpu); if (cluster_id != MPIDR_TO_SGI_CLUSTER_ID(mpidr)) { cpu--; @@ -743,43 +1376,60 @@ static void gic_send_sgi(u64 cluster_id, u16 tlist, unsigned int irq) gic_write_sgi1r(val); } -static void gic_raise_softirq(const struct cpumask *mask, unsigned int irq) +static void gic_ipi_send_mask(struct irq_data *d, const struct cpumask *mask) { int cpu; - if (WARN_ON(irq >= 16)) + if (WARN_ON(d->hwirq >= 16)) return; /* * Ensure that stores to Normal memory are visible to the * other CPUs before issuing the IPI. */ - wmb(); + dsb(ishst); for_each_cpu(cpu, mask) { - u64 cluster_id = MPIDR_TO_SGI_CLUSTER_ID(cpu_logical_map(cpu)); + u64 cluster_id = MPIDR_TO_SGI_CLUSTER_ID(gic_cpu_to_affinity(cpu)); u16 tlist; tlist = gic_compute_target_list(&cpu, mask, cluster_id); - gic_send_sgi(cluster_id, tlist, irq); + gic_send_sgi(cluster_id, tlist, d->hwirq); } /* Force the above writes to ICC_SGI1R_EL1 to be executed */ isb(); } -static void gic_smp_init(void) +static void __init gic_smp_init(void) { - set_smp_cross_call(gic_raise_softirq); + struct irq_fwspec sgi_fwspec = { + .fwnode = gic_data.fwnode, + .param_count = 1, + }; + int base_sgi; + + cpuhp_setup_state_nocalls(CPUHP_BP_PREPARE_DYN, + "irqchip/arm/gicv3:checkrdist", + gic_check_rdist, NULL); + cpuhp_setup_state_nocalls(CPUHP_AP_IRQ_GIC_STARTING, "irqchip/arm/gicv3:starting", gic_starting_cpu, NULL); + + /* Register all 8 non-secure SGIs */ + base_sgi = irq_domain_alloc_irqs(gic_data.domain, 8, NUMA_NO_NODE, &sgi_fwspec); + if (WARN_ON(base_sgi <= 0)) + return; + + set_smp_ipi_range(base_sgi, 8); } static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val, bool force) { unsigned int cpu; + u32 offset, index; void __iomem *reg; int enabled; u64 val; @@ -800,8 +1450,9 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val, if (enabled) gic_mask_irq(d); - reg = gic_dist_base(d) + GICD_IROUTER + (gic_irq(d) * 8); - val = gic_mpidr_to_affinity(cpu_logical_map(cpu)); + offset = convert_offset_index(d, GICD_IROUTER, &index); + reg = gic_dist_base(d) + offset + (index * 8); + val = gic_cpu_to_affinity(cpu); gic_write_irouter(val, reg); @@ -811,8 +1462,6 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val, */ if (enabled) gic_unmask_irq(d); - else - gic_dist_wait_for_rwp(); irq_data_update_effective_affinity(d, cpumask_of(cpu)); @@ -820,22 +1469,23 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val, } #else #define gic_set_affinity NULL +#define gic_ipi_send_mask NULL #define gic_smp_init() do { } while(0) #endif -#ifdef CONFIG_CPU_PM -/* Check whether it's single security state view */ -static bool gic_dist_security_disabled(void) +static int gic_retrigger(struct irq_data *data) { - return readl_relaxed(gic_data.dist_base + GICD_CTLR) & GICD_CTLR_DS; + return !gic_irq_set_irqchip_state(data, IRQCHIP_STATE_PENDING, true); } +#ifdef CONFIG_CPU_PM static int gic_cpu_pm_notifier(struct notifier_block *self, unsigned long cmd, void *v) { - if (cmd == CPU_PM_EXIT) { + if (cmd == CPU_PM_EXIT || cmd == CPU_PM_ENTER_FAILED) { if (gic_dist_security_disabled()) gic_enable_redist(true); + gic_cpu_sys_reg_enable(); gic_cpu_sys_reg_init(); } else if (cmd == CPU_PM_ENTER && gic_dist_security_disabled()) { gic_write_grpen1(0); @@ -864,8 +1514,12 @@ static struct irq_chip gic_chip = { .irq_eoi = gic_eoi_irq, .irq_set_type = gic_set_type, .irq_set_affinity = gic_set_affinity, + .irq_retrigger = gic_retrigger, .irq_get_irqchip_state = gic_irq_get_irqchip_state, .irq_set_irqchip_state = gic_irq_set_irqchip_state, + .irq_nmi_setup = gic_irq_nmi_setup, + .irq_nmi_teardown = gic_irq_nmi_teardown, + .ipi_send_mask = gic_ipi_send_mask, .flags = IRQCHIP_SET_TYPE_MASKED | IRQCHIP_SKIP_SET_WAKE | IRQCHIP_MASK_ON_SUSPEND, @@ -878,66 +1532,71 @@ static struct irq_chip gic_eoimode1_chip = { .irq_eoi = gic_eoimode1_eoi_irq, .irq_set_type = gic_set_type, .irq_set_affinity = gic_set_affinity, + .irq_retrigger = gic_retrigger, .irq_get_irqchip_state = gic_irq_get_irqchip_state, .irq_set_irqchip_state = gic_irq_set_irqchip_state, .irq_set_vcpu_affinity = gic_irq_set_vcpu_affinity, + .irq_nmi_setup = gic_irq_nmi_setup, + .irq_nmi_teardown = gic_irq_nmi_teardown, + .ipi_send_mask = gic_ipi_send_mask, .flags = IRQCHIP_SET_TYPE_MASKED | IRQCHIP_SKIP_SET_WAKE | IRQCHIP_MASK_ON_SUSPEND, }; -#define GIC_ID_NR (1U << GICD_TYPER_ID_BITS(gic_data.rdists.gicd_typer)) - static int gic_irq_domain_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hw) { struct irq_chip *chip = &gic_chip; + struct irq_data *irqd = irq_desc_get_irq_data(irq_to_desc(irq)); if (static_branch_likely(&supports_deactivate_key)) chip = &gic_eoimode1_chip; - /* SGIs are private to the core kernel */ - if (hw < 16) - return -EPERM; - /* Nothing here */ - if (hw >= gic_data.irq_nr && hw < 8192) - return -EPERM; - /* Off limits */ - if (hw >= GIC_ID_NR) - return -EPERM; - - /* PPIs */ - if (hw < 32) { + switch (__get_intid_range(hw)) { + case SGI_RANGE: + case PPI_RANGE: + case EPPI_RANGE: irq_set_percpu_devid(irq); irq_domain_set_info(d, irq, hw, chip, d->host_data, handle_percpu_devid_irq, NULL, NULL); - irq_set_status_flags(irq, IRQ_NOAUTOEN); - } - /* SPIs */ - if (hw >= 32 && hw < gic_data.irq_nr) { + break; + + case SPI_RANGE: + case ESPI_RANGE: irq_domain_set_info(d, irq, hw, chip, d->host_data, handle_fasteoi_irq, NULL, NULL); irq_set_probe(irq); - irqd_set_single_target(irq_desc_get_irq_data(irq_to_desc(irq))); - } - /* LPIs */ - if (hw >= 8192 && hw < GIC_ID_NR) { + irqd_set_single_target(irqd); + break; + + case LPI_RANGE: if (!gic_dist_supports_lpis()) return -EPERM; irq_domain_set_info(d, irq, hw, chip, d->host_data, handle_fasteoi_irq, NULL, NULL); + break; + + default: + return -EPERM; } + /* Prevents SW retriggers which mess up the ACK/EOI ordering */ + irqd_set_handle_enforce_irqctx(irqd); return 0; } -#define GIC_IRQ_TYPE_PARTITION (GIC_IRQ_TYPE_LPI + 1) - static int gic_irq_domain_translate(struct irq_domain *d, struct irq_fwspec *fwspec, unsigned long *hwirq, unsigned int *type) { + if (fwspec->param_count == 1 && fwspec->param[0] < 16) { + *hwirq = fwspec->param[0]; + *type = IRQ_TYPE_EDGE_RISING; + return 0; + } + if (is_of_node(fwspec->fwnode)) { if (fwspec->param_count < 3) return -EINVAL; @@ -947,9 +1606,14 @@ static int gic_irq_domain_translate(struct irq_domain *d, *hwirq = fwspec->param[1] + 32; break; case 1: /* PPI */ - case GIC_IRQ_TYPE_PARTITION: *hwirq = fwspec->param[1] + 16; break; + case 2: /* ESPI */ + *hwirq = fwspec->param[1] + ESPI_BASE_INTID; + break; + case 3: /* EPPI */ + *hwirq = fwspec->param[1] + EPPI_BASE_INTID; + break; case GIC_IRQ_TYPE_LPI: /* LPI */ *hwirq = fwspec->param[1]; break; @@ -961,10 +1625,8 @@ static int gic_irq_domain_translate(struct irq_domain *d, /* * Make it clear that broken DTs are... broken. - * Partitionned PPIs are an unfortunate exception. */ - WARN_ON(*type == IRQ_TYPE_NONE && - fwspec->param[0] != GIC_IRQ_TYPE_PARTITION); + WARN_ON(*type == IRQ_TYPE_NONE); return 0; } @@ -972,6 +1634,12 @@ static int gic_irq_domain_translate(struct irq_domain *d, if(fwspec->param_count != 2) return -EINVAL; + if (fwspec->param[0] < 16) { + pr_err(FW_BUG "Illegal GSI%d translation request\n", + fwspec->param[0]); + return -EINVAL; + } + *hwirq = fwspec->param[0]; *type = fwspec->param[1]; @@ -1019,77 +1687,293 @@ static int gic_irq_domain_select(struct irq_domain *d, struct irq_fwspec *fwspec, enum irq_domain_bus_token bus_token) { + irq_hw_number_t hwirq; + unsigned int type; + int ret; + /* Not for us */ - if (fwspec->fwnode != d->fwnode) + if (fwspec->fwnode != d->fwnode) return 0; + /* Handle pure domain searches */ + if (!fwspec->param_count) + return d->bus_token == bus_token; + /* If this is not DT, then we have a single domain */ if (!is_of_node(fwspec->fwnode)) return 1; - /* - * If this is a PPI and we have a 4th (non-null) parameter, - * then we need to match the partition domain. - */ - if (fwspec->param_count >= 4 && - fwspec->param[0] == 1 && fwspec->param[3] != 0) - return d == partition_get_domain(gic_data.ppi_descs[fwspec->param[1]]); + ret = gic_irq_domain_translate(d, fwspec, &hwirq, &type); + if (WARN_ON_ONCE(ret)) + return 0; return d == gic_data.domain; } +static int gic_irq_get_fwspec_info(struct irq_fwspec *fwspec, struct irq_fwspec_info *info) +{ + const struct cpumask *mask = NULL; + + info->flags = 0; + info->affinity = NULL; + + /* ACPI is not capable of describing PPI affinity -- yet */ + if (!is_of_node(fwspec->fwnode)) + return 0; + + /* If the specifier provides an affinity, use it */ + if (fwspec->param_count == 4 && fwspec->param[3]) { + struct fwnode_handle *fw; + + switch (fwspec->param[0]) { + case 1: /* PPI */ + case 3: /* EPPI */ + break; + default: + return 0; + } + + fw = of_fwnode_handle(of_find_node_by_phandle(fwspec->param[3])); + if (!fw) + return -ENOENT; + + for (int i = 0; i < gic_data.nr_parts; i++) { + if (gic_data.parts[i].partition_id == fw) { + mask = &gic_data.parts[i].mask; + break; + } + } + + if (!mask) + return -ENOENT; + } else { + mask = cpu_possible_mask; + } + + info->affinity = mask; + info->flags = IRQ_FWSPEC_INFO_AFFINITY_VALID; + + return 0; +} + static const struct irq_domain_ops gic_irq_domain_ops = { .translate = gic_irq_domain_translate, .alloc = gic_irq_domain_alloc, .free = gic_irq_domain_free, .select = gic_irq_domain_select, + .get_fwspec_info = gic_irq_get_fwspec_info, }; -static int partition_domain_translate(struct irq_domain *d, - struct irq_fwspec *fwspec, - unsigned long *hwirq, - unsigned int *type) +static bool gic_enable_quirk_msm8996(void *data) { - struct device_node *np; - int ret; + struct gic_chip_data *d = data; - np = of_find_node_by_phandle(fwspec->param[3]); - if (WARN_ON(!np)) - return -EINVAL; + d->flags |= FLAGS_WORKAROUND_GICR_WAKER_MSM8996; - ret = partition_translate_id(gic_data.ppi_descs[fwspec->param[1]], - of_node_to_fwnode(np)); - if (ret < 0) - return ret; + return true; +} - *hwirq = ret; - *type = fwspec->param[2] & IRQ_TYPE_SENSE_MASK; +static bool gic_enable_quirk_cavium_38539(void *data) +{ + struct gic_chip_data *d = data; - return 0; + d->flags |= FLAGS_WORKAROUND_CAVIUM_ERRATUM_38539; + + return true; } -static const struct irq_domain_ops partition_domain_ops = { - .translate = partition_domain_translate, - .select = gic_irq_domain_select, -}; +static bool gic_enable_quirk_hip06_07(void *data) +{ + struct gic_chip_data *d = data; -static bool gic_enable_quirk_msm8996(void *data) + /* + * HIP06 GICD_IIDR clashes with GIC-600 product number (despite + * not being an actual ARM implementation). The saving grace is + * that GIC-600 doesn't have ESPI, so nothing to do in that case. + * HIP07 doesn't even have a proper IIDR, and still pretends to + * have ESPI. In both cases, put them right. + */ + if (d->rdists.gicd_typer & GICD_TYPER_ESPI) { + /* Zero both ESPI and the RES0 field next to it... */ + d->rdists.gicd_typer &= ~GENMASK(9, 8); + return true; + } + + return false; +} + +#define T241_CHIPN_MASK GENMASK_ULL(45, 44) +#define T241_CHIP_GICDA_OFFSET 0x1580000 +#define SMCCC_SOC_ID_T241 0x036b0241 + +static bool gic_enable_quirk_nvidia_t241(void *data) +{ + s32 soc_id = arm_smccc_get_soc_id_version(); + unsigned long chip_bmask = 0; + phys_addr_t phys; + u32 i; + + /* Check JEP106 code for NVIDIA T241 chip (036b:0241) */ + if ((soc_id < 0) || (soc_id != SMCCC_SOC_ID_T241)) + return false; + + /* Find the chips based on GICR regions PHYS addr */ + for (i = 0; i < gic_data.nr_redist_regions; i++) { + chip_bmask |= BIT(FIELD_GET(T241_CHIPN_MASK, + (u64)gic_data.redist_regions[i].phys_base)); + } + + if (hweight32(chip_bmask) < 3) + return false; + + /* Setup GICD alias regions */ + for (i = 0; i < ARRAY_SIZE(t241_dist_base_alias); i++) { + if (chip_bmask & BIT(i)) { + phys = gic_data.dist_phys_base + T241_CHIP_GICDA_OFFSET; + phys |= FIELD_PREP(T241_CHIPN_MASK, i); + t241_dist_base_alias[i] = ioremap(phys, SZ_64K); + WARN_ON_ONCE(!t241_dist_base_alias[i]); + } + } + static_branch_enable(&gic_nvidia_t241_erratum); + return true; +} + +static bool gic_enable_quirk_asr8601(void *data) { struct gic_chip_data *d = data; - d->flags |= FLAGS_WORKAROUND_GICR_WAKER_MSM8996; + d->flags |= FLAGS_WORKAROUND_ASR_ERRATUM_8601001; return true; } -static int __init gic_init_bases(void __iomem *dist_base, +static bool gic_enable_quirk_arm64_2941627(void *data) +{ + static_branch_enable(&gic_arm64_2941627_erratum); + return true; +} + +static bool gic_enable_quirk_rk3399(void *data) +{ + struct gic_chip_data *d = data; + + if (of_machine_is_compatible("rockchip,rk3399")) { + d->flags |= FLAGS_WORKAROUND_INSECURE; + return true; + } + + return false; +} + +static bool rd_set_non_coherent(void *data) +{ + struct gic_chip_data *d = data; + + d->rdists.flags |= RDIST_FLAGS_FORCE_NON_SHAREABLE; + return true; +} + +static const struct gic_quirk gic_quirks[] = { + { + .desc = "GICv3: Qualcomm MSM8996 broken firmware", + .compatible = "qcom,msm8996-gic-v3", + .init = gic_enable_quirk_msm8996, + }, + { + .desc = "GICv3: ASR erratum 8601001", + .compatible = "asr,asr8601-gic-v3", + .init = gic_enable_quirk_asr8601, + }, + { + .desc = "GICv3: HIP06 erratum 161010803", + .iidr = 0x0204043b, + .mask = 0xffffffff, + .init = gic_enable_quirk_hip06_07, + }, + { + .desc = "GICv3: HIP07 erratum 161010803", + .iidr = 0x00000000, + .mask = 0xffffffff, + .init = gic_enable_quirk_hip06_07, + }, + { + /* + * Reserved register accesses generate a Synchronous + * External Abort. This erratum applies to: + * - ThunderX: CN88xx + * - OCTEON TX: CN83xx, CN81xx + * - OCTEON TX2: CN93xx, CN96xx, CN98xx, CNF95xx* + */ + .desc = "GICv3: Cavium erratum 38539", + .iidr = 0xa000034c, + .mask = 0xe8f00fff, + .init = gic_enable_quirk_cavium_38539, + }, + { + .desc = "GICv3: NVIDIA erratum T241-FABRIC-4", + .iidr = 0x0402043b, + .mask = 0xffffffff, + .init = gic_enable_quirk_nvidia_t241, + }, + { + /* + * GIC-700: 2941627 workaround - IP variant [0,1] + * + */ + .desc = "GICv3: ARM64 erratum 2941627", + .iidr = 0x0400043b, + .mask = 0xff0e0fff, + .init = gic_enable_quirk_arm64_2941627, + }, + { + /* + * GIC-700: 2941627 workaround - IP variant [2] + */ + .desc = "GICv3: ARM64 erratum 2941627", + .iidr = 0x0402043b, + .mask = 0xff0f0fff, + .init = gic_enable_quirk_arm64_2941627, + }, + { + .desc = "GICv3: non-coherent attribute", + .property = "dma-noncoherent", + .init = rd_set_non_coherent, + }, + { + .desc = "GICv3: Insecure RK3399 integration", + .iidr = 0x0000043b, + .mask = 0xff000fff, + .init = gic_enable_quirk_rk3399, + }, + { + } +}; + +static void gic_enable_nmi_support(void) +{ + if (!gic_prio_masking_enabled() || nmi_support_forbidden) + return; + + pr_info("Pseudo-NMIs enabled using %s ICC_PMR_EL1 synchronisation\n", + gic_has_relaxed_pmr_sync() ? "relaxed" : "forced"); + + static_branch_enable(&supports_pseudo_nmis); + + if (static_branch_likely(&supports_deactivate_key)) + gic_eoimode1_chip.flags |= IRQCHIP_SUPPORTS_NMI; + else + gic_chip.flags |= IRQCHIP_SUPPORTS_NMI; +} + +static int __init gic_init_bases(phys_addr_t dist_phys_base, + void __iomem *dist_base, struct redist_region *rdist_regs, u32 nr_redist_regions, u64 redist_stride, struct fwnode_handle *handle) { u32 typer; - int gic_irqs; int err; if (!is_hyp_mode_available()) @@ -1099,6 +1983,7 @@ static int __init gic_init_bases(void __iomem *dist_base, pr_info("GIC: Using split EOI/Deactivate mode\n"); gic_data.fwnode = handle; + gic_data.dist_phys_base = dist_phys_base; gic_data.dist_base = dist_base; gic_data.redist_regions = rdist_regs; gic_data.nr_redist_regions = nr_redist_regions; @@ -1106,30 +1991,42 @@ static int __init gic_init_bases(void __iomem *dist_base, /* * Find out how many interrupts are supported. - * The GIC only supports up to 1020 interrupt sources (SGI+PPI+SPI) */ typer = readl_relaxed(gic_data.dist_base + GICD_TYPER); gic_data.rdists.gicd_typer = typer; - gic_irqs = GICD_TYPER_IRQS(typer); - if (gic_irqs > 1020) - gic_irqs = 1020; - gic_data.irq_nr = gic_irqs; + + gic_enable_quirks(readl_relaxed(gic_data.dist_base + GICD_IIDR), + gic_quirks, &gic_data); + + pr_info("%d SPIs implemented\n", GIC_LINE_NR - 32); + pr_info("%d Extended SPIs implemented\n", GIC_ESPI_NR); + + /* + * ThunderX1 explodes on reading GICD_TYPER2, in violation of the + * architecture spec (which says that reserved registers are RES0). + */ + if (!(gic_data.flags & FLAGS_WORKAROUND_CAVIUM_ERRATUM_38539)) + gic_data.rdists.gicd_typer2 = readl_relaxed(gic_data.dist_base + GICD_TYPER2); gic_data.domain = irq_domain_create_tree(handle, &gic_irq_domain_ops, &gic_data); - irq_domain_update_bus_token(gic_data.domain, DOMAIN_BUS_WIRED); gic_data.rdists.rdist = alloc_percpu(typeof(*gic_data.rdists.rdist)); - gic_data.rdists.has_vlpis = true; - gic_data.rdists.has_direct_lpi = true; + if (!static_branch_unlikely(&gic_nvidia_t241_erratum)) { + /* Disable GICv4.x features for the erratum T241-FABRIC-4 */ + gic_data.rdists.has_rvpeid = true; + gic_data.rdists.has_vlpis = true; + gic_data.rdists.has_direct_lpi = true; + gic_data.rdists.has_vpend_valid_dirty = true; + } if (WARN_ON(!gic_data.domain) || WARN_ON(!gic_data.rdists.rdist)) { err = -ENOMEM; goto out_free; } + irq_domain_update_bus_token(gic_data.domain, DOMAIN_BUS_WIRED); + gic_data.has_rss = !!(typer & GICD_TYPER_RSS); - pr_info("Distributor has %sRange Selector support\n", - gic_data.has_rss ? "" : "no "); if (typer & GICD_TYPER_MBIS) { err = mbi_init(handle, gic_data.domain); @@ -1139,16 +2036,23 @@ static int __init gic_init_bases(void __iomem *dist_base, set_handle_irq(gic_handle_irq); - gic_update_vlpi_properties(); + gic_update_rdist_properties(); - gic_smp_init(); + gic_cpu_sys_reg_enable(); + gic_prio_init(); gic_dist_init(); gic_cpu_init(); + gic_enable_nmi_support(); + gic_smp_init(); gic_cpu_pm_init(); if (gic_dist_supports_lpis()) { - its_init(handle, &gic_data.rdists, gic_data.domain); + its_init(handle, &gic_data.rdists, gic_data.domain, dist_prio_irq); its_cpu_init(); + its_lpi_memreserve_init(); + } else { + if (IS_ENABLED(CONFIG_ARM_GIC_V2M)) + gicv2m_init(handle, gic_data.domain); } return 0; @@ -1183,7 +2087,6 @@ static void __init gic_populate_ppi_partitions(struct device_node *gic_node) return; nr_parts = of_get_child_count(parts_node); - if (!nr_parts) goto out_put_node; @@ -1197,7 +2100,7 @@ static void __init gic_populate_ppi_partitions(struct device_node *gic_node) part = &parts[part_idx]; - part->partition_id = of_node_to_fwnode(child_part); + part->partition_id = of_fwnode_handle(child_part); pr_info("GIC: PPI partition %pOFn[%d] { ", child_part, part_idx); @@ -1221,51 +2124,32 @@ static void __init gic_populate_ppi_partitions(struct device_node *gic_node) continue; cpu = of_cpu_node_to_id(cpu_node); - if (WARN_ON(cpu < 0)) + if (WARN_ON(cpu < 0)) { + of_node_put(cpu_node); continue; + } pr_cont("%pOF[%d] ", cpu_node, cpu); cpumask_set_cpu(cpu, &part->mask); + of_node_put(cpu_node); } pr_cont("}\n"); part_idx++; } - for (i = 0; i < 16; i++) { - unsigned int irq; - struct partition_desc *desc; - struct irq_fwspec ppi_fwspec = { - .fwnode = gic_data.fwnode, - .param_count = 3, - .param = { - [0] = GIC_IRQ_TYPE_PARTITION, - [1] = i, - [2] = IRQ_TYPE_NONE, - }, - }; - - irq = irq_create_fwspec_mapping(&ppi_fwspec); - if (WARN_ON(!irq)) - continue; - desc = partition_create_desc(gic_data.fwnode, parts, nr_parts, - irq, &partition_domain_ops); - if (WARN_ON(!desc)) - continue; - - gic_data.ppi_descs[i] = desc; - } + gic_data.parts = parts; + gic_data.nr_parts = nr_parts; out_put_node: of_node_put(parts_node); } -static void __init gic_of_setup_kvm_info(struct device_node *node) +static void __init gic_of_setup_kvm_info(struct device_node *node, u32 nr_redist_regions) { int ret; struct resource r; - u32 gicv_idx; gic_v3_kvm_info.type = GIC_V3; @@ -1273,43 +2157,58 @@ static void __init gic_of_setup_kvm_info(struct device_node *node) if (!gic_v3_kvm_info.maint_irq) return; - if (of_property_read_u32(node, "#redistributor-regions", - &gicv_idx)) - gicv_idx = 1; - - gicv_idx += 3; /* Also skip GICD, GICC, GICH */ - ret = of_address_to_resource(node, gicv_idx, &r); + /* Also skip GICD, GICC, GICH */ + ret = of_address_to_resource(node, nr_redist_regions + 3, &r); if (!ret) gic_v3_kvm_info.vcpu = r; gic_v3_kvm_info.has_v4 = gic_data.rdists.has_vlpis; - gic_set_kvm_info(&gic_v3_kvm_info); + gic_v3_kvm_info.has_v4_1 = gic_data.rdists.has_rvpeid; + vgic_set_kvm_info(&gic_v3_kvm_info); } -static const struct gic_quirk gic_quirks[] = { - { - .desc = "GICv3: Qualcomm MSM8996 broken firmware", - .compatible = "qcom,msm8996-gic-v3", - .init = gic_enable_quirk_msm8996, - }, - { - } -}; +static void gic_request_region(resource_size_t base, resource_size_t size, + const char *name) +{ + if (!request_mem_region(base, size, name)) + pr_warn_once(FW_BUG "%s region %pa has overlapping address\n", + name, &base); +} + +static void __iomem *gic_of_iomap(struct device_node *node, int idx, + const char *name, struct resource *res) +{ + void __iomem *base; + int ret; + + ret = of_address_to_resource(node, idx, res); + if (ret) + return IOMEM_ERR_PTR(ret); + + gic_request_region(res->start, resource_size(res), name); + base = of_iomap(node, idx); + + return base ?: IOMEM_ERR_PTR(-ENOMEM); +} static int __init gic_of_init(struct device_node *node, struct device_node *parent) { + phys_addr_t dist_phys_base; void __iomem *dist_base; struct redist_region *rdist_regs; + struct resource res; u64 redist_stride; u32 nr_redist_regions; int err, i; - dist_base = of_iomap(node, 0); - if (!dist_base) { + dist_base = gic_of_iomap(node, 0, "GICD", &res); + if (IS_ERR(dist_base)) { pr_err("%pOF: unable to map gic dist registers\n", node); - return -ENXIO; + return PTR_ERR(dist_base); } + dist_phys_base = res.start; + err = gic_validate_dist_version(dist_base); if (err) { pr_err("%pOF: no distributor detected, giving up\n", node); @@ -1327,12 +2226,8 @@ static int __init gic_of_init(struct device_node *node, struct device_node *pare } for (i = 0; i < nr_redist_regions; i++) { - struct resource res; - int ret; - - ret = of_address_to_resource(node, 1 + i, &res); - rdist_regs[i].redist_base = of_iomap(node, 1 + i); - if (ret || !rdist_regs[i].redist_base) { + rdist_regs[i].redist_base = gic_of_iomap(node, 1 + i, "GICR", &res); + if (IS_ERR(rdist_regs[i].redist_base)) { pr_err("%pOF: couldn't map region %d\n", node, i); err = -ENODEV; goto out_unmap_rdist; @@ -1345,20 +2240,20 @@ static int __init gic_of_init(struct device_node *node, struct device_node *pare gic_enable_of_quirks(node, gic_quirks, &gic_data); - err = gic_init_bases(dist_base, rdist_regs, nr_redist_regions, - redist_stride, &node->fwnode); + err = gic_init_bases(dist_phys_base, dist_base, rdist_regs, + nr_redist_regions, redist_stride, &node->fwnode); if (err) goto out_unmap_rdist; gic_populate_ppi_partitions(node); if (static_branch_likely(&supports_deactivate_key)) - gic_of_setup_kvm_info(node); + gic_of_setup_kvm_info(node, nr_redist_regions); return 0; out_unmap_rdist: for (i = 0; i < nr_redist_regions; i++) - if (rdist_regs[i].redist_base) + if (rdist_regs[i].redist_base && !IS_ERR(rdist_regs[i].redist_base)) iounmap(rdist_regs[i].redist_base); kfree(rdist_regs); out_unmap_dist: @@ -1375,6 +2270,7 @@ static struct struct redist_region *redist_regs; u32 nr_redist_regions; bool single_redist; + int enabled_rdists; u32 maint_irq; int maint_irq_mode; phys_addr_t vcpu_base; @@ -1392,7 +2288,7 @@ gic_acpi_register_redist(phys_addr_t phys_base, void __iomem *redist_base) } static int __init -gic_acpi_parse_madt_redist(struct acpi_subtable_header *header, +gic_acpi_parse_madt_redist(union acpi_subtable_headers *header, const unsigned long end) { struct acpi_madt_generic_redistributor *redist = @@ -1405,12 +2301,18 @@ gic_acpi_parse_madt_redist(struct acpi_subtable_header *header, return -ENOMEM; } + if (acpi_get_madt_revision() >= 7 && + (redist->flags & ACPI_MADT_GICR_NON_COHERENT)) + gic_data.rdists.flags |= RDIST_FLAGS_FORCE_NON_SHAREABLE; + + gic_request_region(redist->base_address, redist->length, "GICR"); + gic_acpi_register_redist(redist->base_address, redist_base); return 0; } static int __init -gic_acpi_parse_madt_gicc(struct acpi_subtable_header *header, +gic_acpi_parse_madt_gicc(union acpi_subtable_headers *header, const unsigned long end) { struct acpi_madt_generic_interrupt *gicc = @@ -1419,13 +2321,33 @@ gic_acpi_parse_madt_gicc(struct acpi_subtable_header *header, u32 size = reg == GIC_PIDR2_ARCH_GICv4 ? SZ_64K * 4 : SZ_64K * 2; void __iomem *redist_base; - /* GICC entry which has !ACPI_MADT_ENABLED is not unusable so skip */ - if (!(gicc->flags & ACPI_MADT_ENABLED)) + /* Neither enabled or online capable means it doesn't exist, skip it */ + if (!(gicc->flags & (ACPI_MADT_ENABLED | ACPI_MADT_GICC_ONLINE_CAPABLE))) + return 0; + + /* + * Capable but disabled CPUs can be brought online later. What about + * the redistributor? ACPI doesn't want to say! + * Virtual hotplug systems can use the MADT's "always-on" GICR entries. + * Otherwise, prevent such CPUs from being brought online. + */ + if (!(gicc->flags & ACPI_MADT_ENABLED)) { + int cpu = get_cpu_for_acpi_id(gicc->uid); + + pr_warn("CPU %u's redistributor is inaccessible: this CPU can't be brought online\n", cpu); + if (cpu >= 0) + cpumask_set_cpu(cpu, &broken_rdists); return 0; + } redist_base = ioremap(gicc->gicr_base_address, size); if (!redist_base) return -ENOMEM; + gic_request_region(gicc->gicr_base_address, size, "GICR"); + + if (acpi_get_madt_revision() >= 7 && + (gicc->flags & ACPI_MADT_GICC_NON_COHERENT)) + gic_data.rdists.flags |= RDIST_FLAGS_FORCE_NON_SHAREABLE; gic_acpi_register_redist(gicc->gicr_base_address, redist_base); return 0; @@ -1452,14 +2374,14 @@ static int __init gic_acpi_collect_gicr_base(void) return -ENODEV; } -static int __init gic_acpi_match_gicr(struct acpi_subtable_header *header, +static int __init gic_acpi_match_gicr(union acpi_subtable_headers *header, const unsigned long end) { /* Subtable presence means that redist exists, that's it */ return 0; } -static int __init gic_acpi_match_gicc(struct acpi_subtable_header *header, +static int __init gic_acpi_match_gicc(union acpi_subtable_headers *header, const unsigned long end) { struct acpi_madt_generic_interrupt *gicc = @@ -1467,19 +2389,15 @@ static int __init gic_acpi_match_gicc(struct acpi_subtable_header *header, /* * If GICC is enabled and has valid gicr base address, then it means - * GICR base is presented via GICC - */ - if ((gicc->flags & ACPI_MADT_ENABLED) && gicc->gicr_base_address) - return 0; - - /* - * It's perfectly valid firmware can pass disabled GICC entry, driver - * should not treat as errors, skip the entry instead of probe fail. + * GICR base is presented via GICC. The redistributor is only known to + * be accessible if the GICC is marked as enabled. If this bit is not + * set, we'd need to add the redistributor at runtime, which isn't + * supported. */ - if (!(gicc->flags & ACPI_MADT_ENABLED)) - return 0; + if (gicc->flags & ACPI_MADT_ENABLED && gicc->gicr_base_address) + acpi_data.enabled_rdists++; - return -ENODEV; + return 0; } static int __init gic_acpi_count_gicr_regions(void) @@ -1500,8 +2418,10 @@ static int __init gic_acpi_count_gicr_regions(void) count = acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_INTERRUPT, gic_acpi_match_gicc, 0); - if (count > 0) + if (count > 0) { acpi_data.single_redist = true; + count = acpi_data.enabled_rdists; + } return count; } @@ -1525,7 +2445,7 @@ static bool __init acpi_validate_gic_table(struct acpi_subtable_header *header, return true; } -static int __init gic_acpi_parse_virt_madt_gicc(struct acpi_subtable_header *header, +static int __init gic_acpi_parse_virt_madt_gicc(union acpi_subtable_headers *header, const unsigned long end) { struct acpi_madt_generic_interrupt *gicc = @@ -1533,8 +2453,8 @@ static int __init gic_acpi_parse_virt_madt_gicc(struct acpi_subtable_header *hea int maint_irq_mode; static int first_madt = true; - /* Skip unusable CPUs */ - if (!(gicc->flags & ACPI_MADT_ENABLED)) + if (!(gicc->flags & + (ACPI_MADT_ENABLED | ACPI_MADT_GICC_ONLINE_CAPABLE))) return 0; maint_irq_mode = (gicc->flags & ACPI_MADT_VGIC_IRQ_MODE) ? @@ -1603,14 +2523,21 @@ static void __init gic_acpi_setup_kvm_info(void) } gic_v3_kvm_info.has_v4 = gic_data.rdists.has_vlpis; - gic_set_kvm_info(&gic_v3_kvm_info); + gic_v3_kvm_info.has_v4_1 = gic_data.rdists.has_rvpeid; + vgic_set_kvm_info(&gic_v3_kvm_info); +} + +static struct fwnode_handle *gsi_domain_handle; + +static struct fwnode_handle *gic_v3_get_gsi_domain_id(u32 gsi) +{ + return gsi_domain_handle; } static int __init -gic_acpi_init(struct acpi_subtable_header *header, const unsigned long end) +gic_acpi_init(union acpi_subtable_headers *header, const unsigned long end) { struct acpi_madt_generic_distributor *dist; - struct fwnode_handle *domain_handle; size_t size; int i, err; @@ -1622,6 +2549,7 @@ gic_acpi_init(struct acpi_subtable_header *header, const unsigned long end) pr_err("Unable to map GICD registers\n"); return -ENOMEM; } + gic_request_region(dist->base_address, ACPI_GICV3_DIST_MEM_SIZE, "GICD"); err = gic_validate_dist_version(acpi_data.dist_base); if (err) { @@ -1641,18 +2569,19 @@ gic_acpi_init(struct acpi_subtable_header *header, const unsigned long end) if (err) goto out_redist_unmap; - domain_handle = irq_domain_alloc_fwnode(acpi_data.dist_base); - if (!domain_handle) { + gsi_domain_handle = irq_domain_alloc_fwnode(&dist->base_address); + if (!gsi_domain_handle) { err = -ENOMEM; goto out_redist_unmap; } - err = gic_init_bases(acpi_data.dist_base, acpi_data.redist_regs, - acpi_data.nr_redist_regions, 0, domain_handle); + err = gic_init_bases(dist->base_address, acpi_data.dist_base, + acpi_data.redist_regs, acpi_data.nr_redist_regions, + 0, gsi_domain_handle); if (err) goto out_fwhandle_free; - acpi_set_irq_model(ACPI_IRQ_MODEL_GIC, domain_handle); + acpi_set_irq_model(ACPI_IRQ_MODEL_GIC, gic_v3_get_gsi_domain_id); if (static_branch_likely(&supports_deactivate_key)) gic_acpi_setup_kvm_info(); @@ -1660,7 +2589,7 @@ gic_acpi_init(struct acpi_subtable_header *header, const unsigned long end) return 0; out_fwhandle_free: - irq_domain_free_fwnode(domain_handle); + irq_domain_free_fwnode(gsi_domain_handle); out_redist_unmap: for (i = 0; i < acpi_data.nr_redist_regions; i++) if (acpi_data.redist_regs[i].redist_base) diff --git a/drivers/irqchip/irq-gic-v4.c b/drivers/irqchip/irq-gic-v4.c index dba9d67cb9c1..8455b4a5fbb0 100644 --- a/drivers/irqchip/irq-gic-v4.c +++ b/drivers/irqchip/irq-gic-v4.c @@ -1,24 +1,14 @@ +// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2016,2017 ARM Limited, All Rights Reserved. * Author: Marc Zyngier <marc.zyngier@arm.com> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see <http://www.gnu.org/licenses/>. */ #include <linux/interrupt.h> #include <linux/irq.h> #include <linux/irqdomain.h> #include <linux/msi.h> +#include <linux/pid.h> #include <linux/sched.h> #include <linux/irqchip/arm-gic-v4.h> @@ -96,6 +86,74 @@ static struct irq_domain *gic_domain; static const struct irq_domain_ops *vpe_domain_ops; +static const struct irq_domain_ops *sgi_domain_ops; + +#ifdef CONFIG_ARM64 +#include <asm/cpufeature.h> + +bool gic_cpuif_has_vsgi(void) +{ + unsigned long fld, reg = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1); + + fld = cpuid_feature_extract_unsigned_field(reg, ID_AA64PFR0_EL1_GIC_SHIFT); + + return fld >= ID_AA64PFR0_EL1_GIC_V4P1; +} +#else +bool gic_cpuif_has_vsgi(void) +{ + return false; +} +#endif + +static bool has_v4_1(void) +{ + return !!sgi_domain_ops; +} + +static bool has_v4_1_sgi(void) +{ + return has_v4_1() && gic_cpuif_has_vsgi(); +} + +static int its_alloc_vcpu_sgis(struct its_vpe *vpe, int idx) +{ + char *name; + int sgi_base; + + if (!has_v4_1_sgi()) + return 0; + + name = kasprintf(GFP_KERNEL, "GICv4-sgi-%d", task_pid_nr(current)); + if (!name) + goto err; + + vpe->fwnode = irq_domain_alloc_named_id_fwnode(name, idx); + if (!vpe->fwnode) + goto err; + + kfree(name); + name = NULL; + + vpe->sgi_domain = irq_domain_create_linear(vpe->fwnode, 16, + sgi_domain_ops, vpe); + if (!vpe->sgi_domain) + goto err; + + sgi_base = irq_domain_alloc_irqs(vpe->sgi_domain, 16, NUMA_NO_NODE, vpe); + if (sgi_base <= 0) + goto err; + + return 0; + +err: + if (vpe->sgi_domain) + irq_domain_remove(vpe->sgi_domain); + if (vpe->fwnode) + irq_domain_free_fwnode(vpe->fwnode); + kfree(name); + return -ENOMEM; +} int its_alloc_vcpu_irqs(struct its_vm *vm) { @@ -117,14 +175,18 @@ int its_alloc_vcpu_irqs(struct its_vm *vm) vm->vpes[i]->idai = true; } - vpe_base_irq = __irq_domain_alloc_irqs(vm->domain, -1, vm->nr_vpes, - NUMA_NO_NODE, vm, - false, NULL); + vpe_base_irq = irq_domain_alloc_irqs(vm->domain, vm->nr_vpes, + NUMA_NO_NODE, vm); if (vpe_base_irq <= 0) goto err; - for (i = 0; i < vm->nr_vpes; i++) + for (i = 0; i < vm->nr_vpes; i++) { + int ret; vm->vpes[i]->irq = vpe_base_irq + i; + ret = its_alloc_vcpu_sgis(vm->vpes[i], i); + if (ret) + goto err; + } return 0; @@ -137,8 +199,28 @@ err: return -ENOMEM; } +static void its_free_sgi_irqs(struct its_vm *vm) +{ + int i; + + if (!has_v4_1_sgi()) + return; + + for (i = 0; i < vm->nr_vpes; i++) { + unsigned int irq = irq_find_mapping(vm->vpes[i]->sgi_domain, 0); + + if (WARN_ON(!irq)) + continue; + + irq_domain_free_irqs(irq, 16); + irq_domain_remove(vm->vpes[i]->sgi_domain); + irq_domain_free_fwnode(vm->vpes[i]->fwnode); + } +} + void its_free_vcpu_irqs(struct its_vm *vm) { + its_free_sgi_irqs(vm); irq_domain_free_irqs(vm->vpes[0]->irq, vm->nr_vpes); irq_domain_remove(vm->domain); irq_domain_free_fwnode(vm->fwnode); @@ -149,17 +231,73 @@ static int its_send_vpe_cmd(struct its_vpe *vpe, struct its_cmd_info *info) return irq_set_vcpu_affinity(vpe->irq, info); } -int its_schedule_vpe(struct its_vpe *vpe, bool on) +int its_make_vpe_non_resident(struct its_vpe *vpe, bool db) { - struct its_cmd_info info; + struct irq_desc *desc = irq_to_desc(vpe->irq); + struct its_cmd_info info = { }; + int ret; WARN_ON(preemptible()); - info.cmd_type = on ? SCHEDULE_VPE : DESCHEDULE_VPE; + info.cmd_type = DESCHEDULE_VPE; + if (has_v4_1()) { + /* GICv4.1 can directly deal with doorbells */ + info.req_db = db; + } else { + /* Undo the nested disable_irq() calls... */ + while (db && irqd_irq_disabled(&desc->irq_data)) + enable_irq(vpe->irq); + } - return its_send_vpe_cmd(vpe, &info); + ret = its_send_vpe_cmd(vpe, &info); + if (!ret) + vpe->resident = false; + + vpe->ready = false; + + return ret; +} + +int its_make_vpe_resident(struct its_vpe *vpe, bool g0en, bool g1en) +{ + struct its_cmd_info info = { }; + int ret; + + WARN_ON(preemptible()); + + info.cmd_type = SCHEDULE_VPE; + if (has_v4_1()) { + info.g0en = g0en; + info.g1en = g1en; + } else { + /* Disabled the doorbell, as we're about to enter the guest */ + disable_irq_nosync(vpe->irq); + } + + ret = its_send_vpe_cmd(vpe, &info); + if (!ret) + vpe->resident = true; + + return ret; } +int its_commit_vpe(struct its_vpe *vpe) +{ + struct its_cmd_info info = { + .cmd_type = COMMIT_VPE, + }; + int ret; + + WARN_ON(preemptible()); + + ret = its_send_vpe_cmd(vpe, &info); + if (!ret) + vpe->ready = true; + + return ret; +} + + int its_invall_vpe(struct its_vpe *vpe) { struct its_cmd_info info = { @@ -204,10 +342,10 @@ int its_get_vlpi(int irq, struct its_vlpi_map *map) return irq_set_vcpu_affinity(irq, &info); } -int its_unmap_vlpi(int irq) +void its_unmap_vlpi(int irq) { irq_clear_status_flags(irq, IRQ_DISABLE_UNLAZY); - return irq_set_vcpu_affinity(irq, NULL); + WARN_ON_ONCE(irq_set_vcpu_affinity(irq, NULL)); } int its_prop_update_vlpi(int irq, u8 config, bool inv) @@ -222,12 +360,28 @@ int its_prop_update_vlpi(int irq, u8 config, bool inv) return irq_set_vcpu_affinity(irq, &info); } -int its_init_v4(struct irq_domain *domain, const struct irq_domain_ops *ops) +int its_prop_update_vsgi(int irq, u8 priority, bool group) +{ + struct its_cmd_info info = { + .cmd_type = PROP_UPDATE_VSGI, + { + .priority = priority, + .group = group, + }, + }; + + return irq_set_vcpu_affinity(irq, &info); +} + +int its_init_v4(struct irq_domain *domain, + const struct irq_domain_ops *vpe_ops, + const struct irq_domain_ops *sgi_ops) { if (domain) { pr_info("ITS: Enabling GICv4 support\n"); gic_domain = domain; - vpe_domain_ops = ops; + vpe_domain_ops = vpe_ops; + sgi_domain_ops = sgi_ops; return 0; } diff --git a/drivers/irqchip/irq-gic-v5-irs.c b/drivers/irqchip/irq-gic-v5-irs.c new file mode 100644 index 000000000000..ce2732d649a3 --- /dev/null +++ b/drivers/irqchip/irq-gic-v5-irs.c @@ -0,0 +1,827 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2024-2025 ARM Limited, All Rights Reserved. + */ + +#define pr_fmt(fmt) "GICv5 IRS: " fmt + +#include <linux/kmemleak.h> +#include <linux/log2.h> +#include <linux/of.h> +#include <linux/of_address.h> + +#include <linux/irqchip.h> +#include <linux/irqchip/arm-gic-v5.h> + +/* + * Hardcoded ID_BITS limit for systems supporting only a 1-level IST + * table. Systems supporting only a 1-level IST table aren't expected + * to require more than 2^12 LPIs. Tweak as required. + */ +#define LPI_ID_BITS_LINEAR 12 + +#define IRS_FLAGS_NON_COHERENT BIT(0) + +static DEFINE_PER_CPU_READ_MOSTLY(struct gicv5_irs_chip_data *, per_cpu_irs_data); +static LIST_HEAD(irs_nodes); + +static u32 irs_readl_relaxed(struct gicv5_irs_chip_data *irs_data, + const u32 reg_offset) +{ + return readl_relaxed(irs_data->irs_base + reg_offset); +} + +static void irs_writel_relaxed(struct gicv5_irs_chip_data *irs_data, + const u32 val, const u32 reg_offset) +{ + writel_relaxed(val, irs_data->irs_base + reg_offset); +} + +static u64 irs_readq_relaxed(struct gicv5_irs_chip_data *irs_data, + const u32 reg_offset) +{ + return readq_relaxed(irs_data->irs_base + reg_offset); +} + +static void irs_writeq_relaxed(struct gicv5_irs_chip_data *irs_data, + const u64 val, const u32 reg_offset) +{ + writeq_relaxed(val, irs_data->irs_base + reg_offset); +} + +/* + * The polling wait (in gicv5_wait_for_op_s_atomic()) on a GIC register + * provides the memory barriers (through MMIO accessors) + * required to synchronize CPU and GIC access to IST memory. + */ +static int gicv5_irs_ist_synchronise(struct gicv5_irs_chip_data *irs_data) +{ + return gicv5_wait_for_op_atomic(irs_data->irs_base, GICV5_IRS_IST_STATUSR, + GICV5_IRS_IST_STATUSR_IDLE, NULL); +} + +static int __init gicv5_irs_init_ist_linear(struct gicv5_irs_chip_data *irs_data, + unsigned int lpi_id_bits, + unsigned int istsz) +{ + size_t l2istsz; + u32 n, cfgr; + void *ist; + u64 baser; + int ret; + + /* Taken from GICv5 specifications 10.2.1.13 IRS_IST_BASER */ + n = max(5, lpi_id_bits + 1 + istsz); + + l2istsz = BIT(n + 1); + /* + * Check memory requirements. For a linear IST we cap the + * number of ID bits to a value that should never exceed + * kmalloc interface memory allocation limits, so this + * check is really belt and braces. + */ + if (l2istsz > KMALLOC_MAX_SIZE) { + u8 lpi_id_cap = ilog2(KMALLOC_MAX_SIZE) - 2 + istsz; + + pr_warn("Limiting LPI ID bits from %u to %u\n", + lpi_id_bits, lpi_id_cap); + lpi_id_bits = lpi_id_cap; + l2istsz = KMALLOC_MAX_SIZE; + } + + ist = kzalloc(l2istsz, GFP_KERNEL); + if (!ist) + return -ENOMEM; + + if (irs_data->flags & IRS_FLAGS_NON_COHERENT) + dcache_clean_inval_poc((unsigned long)ist, + (unsigned long)ist + l2istsz); + else + dsb(ishst); + + cfgr = FIELD_PREP(GICV5_IRS_IST_CFGR_STRUCTURE, + GICV5_IRS_IST_CFGR_STRUCTURE_LINEAR) | + FIELD_PREP(GICV5_IRS_IST_CFGR_ISTSZ, istsz) | + FIELD_PREP(GICV5_IRS_IST_CFGR_L2SZ, + GICV5_IRS_IST_CFGR_L2SZ_4K) | + FIELD_PREP(GICV5_IRS_IST_CFGR_LPI_ID_BITS, lpi_id_bits); + irs_writel_relaxed(irs_data, cfgr, GICV5_IRS_IST_CFGR); + + gicv5_global_data.ist.l2 = false; + + baser = (virt_to_phys(ist) & GICV5_IRS_IST_BASER_ADDR_MASK) | + FIELD_PREP(GICV5_IRS_IST_BASER_VALID, 0x1); + irs_writeq_relaxed(irs_data, baser, GICV5_IRS_IST_BASER); + + ret = gicv5_irs_ist_synchronise(irs_data); + if (ret) { + kfree(ist); + return ret; + } + kmemleak_ignore(ist); + + return 0; +} + +static int __init gicv5_irs_init_ist_two_level(struct gicv5_irs_chip_data *irs_data, + unsigned int lpi_id_bits, + unsigned int istsz, + unsigned int l2sz) +{ + __le64 *l1ist; + u32 cfgr, n; + size_t l1sz; + u64 baser; + int ret; + + /* Taken from GICv5 specifications 10.2.1.13 IRS_IST_BASER */ + n = max(5, lpi_id_bits - ((10 - istsz) + (2 * l2sz)) + 2); + + l1sz = BIT(n + 1); + + l1ist = kzalloc(l1sz, GFP_KERNEL); + if (!l1ist) + return -ENOMEM; + + if (irs_data->flags & IRS_FLAGS_NON_COHERENT) + dcache_clean_inval_poc((unsigned long)l1ist, + (unsigned long)l1ist + l1sz); + else + dsb(ishst); + + cfgr = FIELD_PREP(GICV5_IRS_IST_CFGR_STRUCTURE, + GICV5_IRS_IST_CFGR_STRUCTURE_TWO_LEVEL) | + FIELD_PREP(GICV5_IRS_IST_CFGR_ISTSZ, istsz) | + FIELD_PREP(GICV5_IRS_IST_CFGR_L2SZ, l2sz) | + FIELD_PREP(GICV5_IRS_IST_CFGR_LPI_ID_BITS, lpi_id_bits); + irs_writel_relaxed(irs_data, cfgr, GICV5_IRS_IST_CFGR); + + /* + * The L2SZ determine bits required at L2 level. Number of bytes + * required by metadata is reported through istsz - the number of bits + * covered by L2 entries scales accordingly. + */ + gicv5_global_data.ist.l2_size = BIT(11 + (2 * l2sz) + 1); + gicv5_global_data.ist.l2_bits = (10 - istsz) + (2 * l2sz); + gicv5_global_data.ist.l1ist_addr = l1ist; + gicv5_global_data.ist.l2 = true; + + baser = (virt_to_phys(l1ist) & GICV5_IRS_IST_BASER_ADDR_MASK) | + FIELD_PREP(GICV5_IRS_IST_BASER_VALID, 0x1); + irs_writeq_relaxed(irs_data, baser, GICV5_IRS_IST_BASER); + + ret = gicv5_irs_ist_synchronise(irs_data); + if (ret) { + kfree(l1ist); + return ret; + } + + return 0; +} + +/* + * Alloc L2 IST entries on demand. + * + * Locking/serialization is guaranteed by irqdomain core code by + * taking the hierarchical domain struct irq_domain.root->mutex. + */ +int gicv5_irs_iste_alloc(const u32 lpi) +{ + struct gicv5_irs_chip_data *irs_data; + unsigned int index; + u32 l2istr, l2bits; + __le64 *l1ist; + size_t l2size; + void *l2ist; + int ret; + + if (!gicv5_global_data.ist.l2) + return 0; + + irs_data = per_cpu(per_cpu_irs_data, smp_processor_id()); + if (!irs_data) + return -ENOENT; + + l2size = gicv5_global_data.ist.l2_size; + l2bits = gicv5_global_data.ist.l2_bits; + l1ist = gicv5_global_data.ist.l1ist_addr; + index = lpi >> l2bits; + + if (FIELD_GET(GICV5_ISTL1E_VALID, le64_to_cpu(l1ist[index]))) + return 0; + + l2ist = kzalloc(l2size, GFP_KERNEL); + if (!l2ist) + return -ENOMEM; + + l1ist[index] = cpu_to_le64(virt_to_phys(l2ist) & GICV5_ISTL1E_L2_ADDR_MASK); + + if (irs_data->flags & IRS_FLAGS_NON_COHERENT) { + dcache_clean_inval_poc((unsigned long)l2ist, + (unsigned long)l2ist + l2size); + dcache_clean_poc((unsigned long)(l1ist + index), + (unsigned long)(l1ist + index) + sizeof(*l1ist)); + } else { + dsb(ishst); + } + + l2istr = FIELD_PREP(GICV5_IRS_MAP_L2_ISTR_ID, lpi); + irs_writel_relaxed(irs_data, l2istr, GICV5_IRS_MAP_L2_ISTR); + + ret = gicv5_irs_ist_synchronise(irs_data); + if (ret) { + l1ist[index] = 0; + kfree(l2ist); + return ret; + } + kmemleak_ignore(l2ist); + + /* + * Make sure we invalidate the cache line pulled before the IRS + * had a chance to update the L1 entry and mark it valid. + */ + if (irs_data->flags & IRS_FLAGS_NON_COHERENT) { + /* + * gicv5_irs_ist_synchronise() includes memory + * barriers (MMIO accessors) required to guarantee that the + * following dcache invalidation is not executed before the + * IST mapping operation has completed. + */ + dcache_inval_poc((unsigned long)(l1ist + index), + (unsigned long)(l1ist + index) + sizeof(*l1ist)); + } + + return 0; +} + +/* + * Try to match the L2 IST size to the pagesize, and if this is not possible + * pick the smallest supported L2 size in order to minimise the requirement for + * physically contiguous blocks of memory as page-sized allocations are + * guaranteed to be physically contiguous, and are by definition the easiest to + * find. + * + * Fall back to the smallest supported size (in the event that the pagesize + * itself is not supported) again serves to make it easier to find physically + * contiguous blocks of memory. + */ +static unsigned int gicv5_irs_l2_sz(u32 idr2) +{ + switch (PAGE_SIZE) { + case SZ_64K: + if (GICV5_IRS_IST_L2SZ_SUPPORT_64KB(idr2)) + return GICV5_IRS_IST_CFGR_L2SZ_64K; + fallthrough; + case SZ_4K: + if (GICV5_IRS_IST_L2SZ_SUPPORT_4KB(idr2)) + return GICV5_IRS_IST_CFGR_L2SZ_4K; + fallthrough; + case SZ_16K: + if (GICV5_IRS_IST_L2SZ_SUPPORT_16KB(idr2)) + return GICV5_IRS_IST_CFGR_L2SZ_16K; + break; + } + + if (GICV5_IRS_IST_L2SZ_SUPPORT_4KB(idr2)) + return GICV5_IRS_IST_CFGR_L2SZ_4K; + + return GICV5_IRS_IST_CFGR_L2SZ_64K; +} + +static int __init gicv5_irs_init_ist(struct gicv5_irs_chip_data *irs_data) +{ + u32 lpi_id_bits, idr2_id_bits, idr2_min_lpi_id_bits, l2_iste_sz, l2sz; + u32 l2_iste_sz_split, idr2; + bool two_levels, istmd; + u64 baser; + int ret; + + baser = irs_readq_relaxed(irs_data, GICV5_IRS_IST_BASER); + if (FIELD_GET(GICV5_IRS_IST_BASER_VALID, baser)) { + pr_err("IST is marked as valid already; cannot allocate\n"); + return -EPERM; + } + + idr2 = irs_readl_relaxed(irs_data, GICV5_IRS_IDR2); + two_levels = !!FIELD_GET(GICV5_IRS_IDR2_IST_LEVELS, idr2); + + idr2_id_bits = FIELD_GET(GICV5_IRS_IDR2_ID_BITS, idr2); + idr2_min_lpi_id_bits = FIELD_GET(GICV5_IRS_IDR2_MIN_LPI_ID_BITS, idr2); + + /* + * For two level tables we are always supporting the maximum allowed + * number of IDs. + * + * For 1-level tables, we should support a number of bits that + * is >= min_lpi_id_bits but cap it to LPI_ID_BITS_LINEAR lest + * the level 1-table gets too large and its memory allocation + * may fail. + */ + if (two_levels) { + lpi_id_bits = idr2_id_bits; + } else { + lpi_id_bits = max(LPI_ID_BITS_LINEAR, idr2_min_lpi_id_bits); + lpi_id_bits = min(lpi_id_bits, idr2_id_bits); + } + + /* + * Cap the ID bits according to the CPUIF supported ID bits + */ + lpi_id_bits = min(lpi_id_bits, gicv5_global_data.cpuif_id_bits); + + if (two_levels) + l2sz = gicv5_irs_l2_sz(idr2); + + istmd = !!FIELD_GET(GICV5_IRS_IDR2_ISTMD, idr2); + + l2_iste_sz = GICV5_IRS_IST_CFGR_ISTSZ_4; + + if (istmd) { + l2_iste_sz_split = FIELD_GET(GICV5_IRS_IDR2_ISTMD_SZ, idr2); + + if (lpi_id_bits < l2_iste_sz_split) + l2_iste_sz = GICV5_IRS_IST_CFGR_ISTSZ_8; + else + l2_iste_sz = GICV5_IRS_IST_CFGR_ISTSZ_16; + } + + /* + * Follow GICv5 specification recommendation to opt in for two + * level tables (ref: 10.2.1.14 IRS_IST_CFGR). + */ + if (two_levels && (lpi_id_bits > ((10 - l2_iste_sz) + (2 * l2sz)))) { + ret = gicv5_irs_init_ist_two_level(irs_data, lpi_id_bits, + l2_iste_sz, l2sz); + } else { + ret = gicv5_irs_init_ist_linear(irs_data, lpi_id_bits, + l2_iste_sz); + } + if (ret) + return ret; + + gicv5_init_lpis(BIT(lpi_id_bits)); + + return 0; +} + +struct iaffid_entry { + u16 iaffid; + bool valid; +}; + +static DEFINE_PER_CPU(struct iaffid_entry, cpu_iaffid); + +int gicv5_irs_cpu_to_iaffid(int cpuid, u16 *iaffid) +{ + if (!per_cpu(cpu_iaffid, cpuid).valid) { + pr_err("IAFFID for CPU %d has not been initialised\n", cpuid); + return -ENODEV; + } + + *iaffid = per_cpu(cpu_iaffid, cpuid).iaffid; + + return 0; +} + +struct gicv5_irs_chip_data *gicv5_irs_lookup_by_spi_id(u32 spi_id) +{ + struct gicv5_irs_chip_data *irs_data; + u32 min, max; + + list_for_each_entry(irs_data, &irs_nodes, entry) { + if (!irs_data->spi_range) + continue; + + min = irs_data->spi_min; + max = irs_data->spi_min + irs_data->spi_range - 1; + if (spi_id >= min && spi_id <= max) + return irs_data; + } + + return NULL; +} + +static int gicv5_irs_wait_for_spi_op(struct gicv5_irs_chip_data *irs_data) +{ + u32 statusr; + int ret; + + ret = gicv5_wait_for_op_atomic(irs_data->irs_base, GICV5_IRS_SPI_STATUSR, + GICV5_IRS_SPI_STATUSR_IDLE, &statusr); + if (ret) + return ret; + + return !!FIELD_GET(GICV5_IRS_SPI_STATUSR_V, statusr) ? 0 : -EIO; +} + +static int gicv5_irs_wait_for_irs_pe(struct gicv5_irs_chip_data *irs_data, + bool selr) +{ + bool valid = true; + u32 statusr; + int ret; + + ret = gicv5_wait_for_op_atomic(irs_data->irs_base, GICV5_IRS_PE_STATUSR, + GICV5_IRS_PE_STATUSR_IDLE, &statusr); + if (ret) + return ret; + + if (selr) + valid = !!FIELD_GET(GICV5_IRS_PE_STATUSR_V, statusr); + + return valid ? 0 : -EIO; +} + +static int gicv5_irs_wait_for_pe_selr(struct gicv5_irs_chip_data *irs_data) +{ + return gicv5_irs_wait_for_irs_pe(irs_data, true); +} + +static int gicv5_irs_wait_for_pe_cr0(struct gicv5_irs_chip_data *irs_data) +{ + return gicv5_irs_wait_for_irs_pe(irs_data, false); +} + +int gicv5_spi_irq_set_type(struct irq_data *d, unsigned int type) +{ + struct gicv5_irs_chip_data *irs_data = d->chip_data; + u32 selr, cfgr; + bool level; + int ret; + + /* + * There is no distinction between HIGH/LOW for level IRQs + * and RISING/FALLING for edge IRQs in the architecture, + * hence consider them equivalent. + */ + switch (type) { + case IRQ_TYPE_EDGE_RISING: + case IRQ_TYPE_EDGE_FALLING: + level = false; + break; + case IRQ_TYPE_LEVEL_HIGH: + case IRQ_TYPE_LEVEL_LOW: + level = true; + break; + default: + return -EINVAL; + } + + guard(raw_spinlock)(&irs_data->spi_config_lock); + + selr = FIELD_PREP(GICV5_IRS_SPI_SELR_ID, d->hwirq); + irs_writel_relaxed(irs_data, selr, GICV5_IRS_SPI_SELR); + ret = gicv5_irs_wait_for_spi_op(irs_data); + if (ret) + return ret; + + cfgr = FIELD_PREP(GICV5_IRS_SPI_CFGR_TM, level); + irs_writel_relaxed(irs_data, cfgr, GICV5_IRS_SPI_CFGR); + + return gicv5_irs_wait_for_spi_op(irs_data); +} + +static int gicv5_irs_wait_for_idle(struct gicv5_irs_chip_data *irs_data) +{ + return gicv5_wait_for_op_atomic(irs_data->irs_base, GICV5_IRS_CR0, + GICV5_IRS_CR0_IDLE, NULL); +} + +void gicv5_irs_syncr(void) +{ + struct gicv5_irs_chip_data *irs_data; + u32 syncr; + + irs_data = list_first_entry_or_null(&irs_nodes, struct gicv5_irs_chip_data, entry); + if (WARN_ON_ONCE(!irs_data)) + return; + + syncr = FIELD_PREP(GICV5_IRS_SYNCR_SYNC, 1); + irs_writel_relaxed(irs_data, syncr, GICV5_IRS_SYNCR); + + gicv5_wait_for_op(irs_data->irs_base, GICV5_IRS_SYNC_STATUSR, + GICV5_IRS_SYNC_STATUSR_IDLE); +} + +int gicv5_irs_register_cpu(int cpuid) +{ + struct gicv5_irs_chip_data *irs_data; + u32 selr, cr0; + u16 iaffid; + int ret; + + ret = gicv5_irs_cpu_to_iaffid(cpuid, &iaffid); + if (ret) { + pr_err("IAFFID for CPU %d has not been initialised\n", cpuid); + return ret; + } + + irs_data = per_cpu(per_cpu_irs_data, cpuid); + if (!irs_data) { + pr_err("No IRS associated with CPU %u\n", cpuid); + return -ENXIO; + } + + selr = FIELD_PREP(GICV5_IRS_PE_SELR_IAFFID, iaffid); + irs_writel_relaxed(irs_data, selr, GICV5_IRS_PE_SELR); + + ret = gicv5_irs_wait_for_pe_selr(irs_data); + if (ret) { + pr_err("IAFFID 0x%x used in IRS_PE_SELR is invalid\n", iaffid); + return -ENXIO; + } + + cr0 = FIELD_PREP(GICV5_IRS_PE_CR0_DPS, 0x1); + irs_writel_relaxed(irs_data, cr0, GICV5_IRS_PE_CR0); + + ret = gicv5_irs_wait_for_pe_cr0(irs_data); + if (ret) + return ret; + + pr_debug("CPU %d enabled PE IAFFID 0x%x\n", cpuid, iaffid); + + return 0; +} + +static void __init gicv5_irs_init_bases(struct gicv5_irs_chip_data *irs_data, + void __iomem *irs_base, + struct fwnode_handle *handle) +{ + struct device_node *np = to_of_node(handle); + u32 cr0, cr1; + + irs_data->fwnode = handle; + irs_data->irs_base = irs_base; + + if (of_property_read_bool(np, "dma-noncoherent")) { + /* + * A non-coherent IRS implies that some cache levels cannot be + * used coherently by the cores and GIC. Our only option is to mark + * memory attributes for the GIC as non-cacheable; by default, + * non-cacheable memory attributes imply outer-shareable + * shareability, the value written into IRS_CR1_SH is ignored. + */ + cr1 = FIELD_PREP(GICV5_IRS_CR1_VPED_WA, GICV5_NO_WRITE_ALLOC) | + FIELD_PREP(GICV5_IRS_CR1_VPED_RA, GICV5_NO_READ_ALLOC) | + FIELD_PREP(GICV5_IRS_CR1_VMD_WA, GICV5_NO_WRITE_ALLOC) | + FIELD_PREP(GICV5_IRS_CR1_VMD_RA, GICV5_NO_READ_ALLOC) | + FIELD_PREP(GICV5_IRS_CR1_VPET_RA, GICV5_NO_READ_ALLOC) | + FIELD_PREP(GICV5_IRS_CR1_VMT_RA, GICV5_NO_READ_ALLOC) | + FIELD_PREP(GICV5_IRS_CR1_IST_WA, GICV5_NO_WRITE_ALLOC) | + FIELD_PREP(GICV5_IRS_CR1_IST_RA, GICV5_NO_READ_ALLOC) | + FIELD_PREP(GICV5_IRS_CR1_IC, GICV5_NON_CACHE) | + FIELD_PREP(GICV5_IRS_CR1_OC, GICV5_NON_CACHE); + irs_data->flags |= IRS_FLAGS_NON_COHERENT; + } else { + cr1 = FIELD_PREP(GICV5_IRS_CR1_VPED_WA, GICV5_WRITE_ALLOC) | + FIELD_PREP(GICV5_IRS_CR1_VPED_RA, GICV5_READ_ALLOC) | + FIELD_PREP(GICV5_IRS_CR1_VMD_WA, GICV5_WRITE_ALLOC) | + FIELD_PREP(GICV5_IRS_CR1_VMD_RA, GICV5_READ_ALLOC) | + FIELD_PREP(GICV5_IRS_CR1_VPET_RA, GICV5_READ_ALLOC) | + FIELD_PREP(GICV5_IRS_CR1_VMT_RA, GICV5_READ_ALLOC) | + FIELD_PREP(GICV5_IRS_CR1_IST_WA, GICV5_WRITE_ALLOC) | + FIELD_PREP(GICV5_IRS_CR1_IST_RA, GICV5_READ_ALLOC) | + FIELD_PREP(GICV5_IRS_CR1_IC, GICV5_WB_CACHE) | + FIELD_PREP(GICV5_IRS_CR1_OC, GICV5_WB_CACHE) | + FIELD_PREP(GICV5_IRS_CR1_SH, GICV5_INNER_SHARE); + } + + irs_writel_relaxed(irs_data, cr1, GICV5_IRS_CR1); + + cr0 = FIELD_PREP(GICV5_IRS_CR0_IRSEN, 0x1); + irs_writel_relaxed(irs_data, cr0, GICV5_IRS_CR0); + gicv5_irs_wait_for_idle(irs_data); +} + +static int __init gicv5_irs_of_init_affinity(struct device_node *node, + struct gicv5_irs_chip_data *irs_data, + u8 iaffid_bits) +{ + /* + * Detect IAFFID<->CPU mappings from the device tree and + * record IRS<->CPU topology information. + */ + u16 iaffid_mask = GENMASK(iaffid_bits - 1, 0); + int ret, i, ncpus, niaffids; + + ncpus = of_count_phandle_with_args(node, "cpus", NULL); + if (ncpus < 0) + return -EINVAL; + + niaffids = of_property_count_elems_of_size(node, "arm,iaffids", + sizeof(u16)); + if (niaffids != ncpus) + return -EINVAL; + + u16 *iaffids __free(kfree) = kcalloc(niaffids, sizeof(*iaffids), GFP_KERNEL); + if (!iaffids) + return -ENOMEM; + + ret = of_property_read_u16_array(node, "arm,iaffids", iaffids, niaffids); + if (ret) + return ret; + + for (i = 0; i < ncpus; i++) { + struct device_node *cpu_node; + int cpu; + + cpu_node = of_parse_phandle(node, "cpus", i); + if (!cpu_node) { + pr_warn(FW_BUG "Erroneous CPU node phandle\n"); + continue; + } + + cpu = of_cpu_node_to_id(cpu_node); + of_node_put(cpu_node); + if (cpu < 0) + continue; + + if (iaffids[i] & ~iaffid_mask) { + pr_warn("CPU %d iaffid 0x%x exceeds IRS iaffid bits\n", + cpu, iaffids[i]); + continue; + } + + per_cpu(cpu_iaffid, cpu).iaffid = iaffids[i]; + per_cpu(cpu_iaffid, cpu).valid = true; + + /* We also know that the CPU is connected to this IRS */ + per_cpu(per_cpu_irs_data, cpu) = irs_data; + } + + return ret; +} + +static void irs_setup_pri_bits(u32 idr1) +{ + switch (FIELD_GET(GICV5_IRS_IDR1_PRIORITY_BITS, idr1)) { + case GICV5_IRS_IDR1_PRIORITY_BITS_1BITS: + gicv5_global_data.irs_pri_bits = 1; + break; + case GICV5_IRS_IDR1_PRIORITY_BITS_2BITS: + gicv5_global_data.irs_pri_bits = 2; + break; + case GICV5_IRS_IDR1_PRIORITY_BITS_3BITS: + gicv5_global_data.irs_pri_bits = 3; + break; + case GICV5_IRS_IDR1_PRIORITY_BITS_4BITS: + gicv5_global_data.irs_pri_bits = 4; + break; + case GICV5_IRS_IDR1_PRIORITY_BITS_5BITS: + gicv5_global_data.irs_pri_bits = 5; + break; + default: + pr_warn("Detected wrong IDR priority bits value 0x%lx\n", + FIELD_GET(GICV5_IRS_IDR1_PRIORITY_BITS, idr1)); + gicv5_global_data.irs_pri_bits = 1; + break; + } +} + +static int __init gicv5_irs_init(struct device_node *node) +{ + struct gicv5_irs_chip_data *irs_data; + void __iomem *irs_base; + u32 idr, spi_count; + u8 iaffid_bits; + int ret; + + irs_data = kzalloc(sizeof(*irs_data), GFP_KERNEL); + if (!irs_data) + return -ENOMEM; + + raw_spin_lock_init(&irs_data->spi_config_lock); + + ret = of_property_match_string(node, "reg-names", "ns-config"); + if (ret < 0) { + pr_err("%pOF: ns-config reg-name not present\n", node); + goto out_err; + } + + irs_base = of_io_request_and_map(node, ret, of_node_full_name(node)); + if (IS_ERR(irs_base)) { + pr_err("%pOF: unable to map GICv5 IRS registers\n", node); + ret = PTR_ERR(irs_base); + goto out_err; + } + + gicv5_irs_init_bases(irs_data, irs_base, &node->fwnode); + + idr = irs_readl_relaxed(irs_data, GICV5_IRS_IDR1); + iaffid_bits = FIELD_GET(GICV5_IRS_IDR1_IAFFID_BITS, idr) + 1; + + ret = gicv5_irs_of_init_affinity(node, irs_data, iaffid_bits); + if (ret) { + pr_err("Failed to parse CPU IAFFIDs from the device tree!\n"); + goto out_iomem; + } + + idr = irs_readl_relaxed(irs_data, GICV5_IRS_IDR2); + if (WARN(!FIELD_GET(GICV5_IRS_IDR2_LPI, idr), + "LPI support not available - no IPIs, can't proceed\n")) { + ret = -ENODEV; + goto out_iomem; + } + + idr = irs_readl_relaxed(irs_data, GICV5_IRS_IDR7); + irs_data->spi_min = FIELD_GET(GICV5_IRS_IDR7_SPI_BASE, idr); + + idr = irs_readl_relaxed(irs_data, GICV5_IRS_IDR6); + irs_data->spi_range = FIELD_GET(GICV5_IRS_IDR6_SPI_IRS_RANGE, idr); + + if (irs_data->spi_range) { + pr_info("%s detected SPI range [%u-%u]\n", + of_node_full_name(node), + irs_data->spi_min, + irs_data->spi_min + + irs_data->spi_range - 1); + } + + /* + * Do the global setting only on the first IRS. + * Global properties (iaffid_bits, global spi count) are guaranteed to + * be consistent across IRSes by the architecture. + */ + if (list_empty(&irs_nodes)) { + + idr = irs_readl_relaxed(irs_data, GICV5_IRS_IDR1); + irs_setup_pri_bits(idr); + + idr = irs_readl_relaxed(irs_data, GICV5_IRS_IDR5); + + spi_count = FIELD_GET(GICV5_IRS_IDR5_SPI_RANGE, idr); + gicv5_global_data.global_spi_count = spi_count; + + gicv5_init_lpi_domain(); + + pr_debug("Detected %u SPIs globally\n", spi_count); + } + + list_add_tail(&irs_data->entry, &irs_nodes); + + return 0; + +out_iomem: + iounmap(irs_base); +out_err: + kfree(irs_data); + return ret; +} + +void __init gicv5_irs_remove(void) +{ + struct gicv5_irs_chip_data *irs_data, *tmp_data; + + gicv5_free_lpi_domain(); + gicv5_deinit_lpis(); + + list_for_each_entry_safe(irs_data, tmp_data, &irs_nodes, entry) { + iounmap(irs_data->irs_base); + list_del(&irs_data->entry); + kfree(irs_data); + } +} + +int __init gicv5_irs_enable(void) +{ + struct gicv5_irs_chip_data *irs_data; + int ret; + + irs_data = list_first_entry_or_null(&irs_nodes, + struct gicv5_irs_chip_data, entry); + if (!irs_data) + return -ENODEV; + + ret = gicv5_irs_init_ist(irs_data); + if (ret) { + pr_err("Failed to init IST\n"); + return ret; + } + + return 0; +} + +void __init gicv5_irs_its_probe(void) +{ + struct gicv5_irs_chip_data *irs_data; + + list_for_each_entry(irs_data, &irs_nodes, entry) + gicv5_its_of_probe(to_of_node(irs_data->fwnode)); +} + +int __init gicv5_irs_of_probe(struct device_node *parent) +{ + struct device_node *np; + int ret; + + for_each_available_child_of_node(parent, np) { + if (!of_device_is_compatible(np, "arm,gic-v5-irs")) + continue; + + ret = gicv5_irs_init(np); + if (ret) + pr_err("Failed to init IRS %s\n", np->full_name); + } + + return list_empty(&irs_nodes) ? -ENODEV : 0; +} diff --git a/drivers/irqchip/irq-gic-v5-its.c b/drivers/irqchip/irq-gic-v5-its.c new file mode 100644 index 000000000000..554485f0be1f --- /dev/null +++ b/drivers/irqchip/irq-gic-v5-its.c @@ -0,0 +1,1233 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2024-2025 ARM Limited, All Rights Reserved. + */ + +#define pr_fmt(fmt) "GICv5 ITS: " fmt + +#include <linux/bitmap.h> +#include <linux/iommu.h> +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/msi.h> +#include <linux/of.h> +#include <linux/of_address.h> +#include <linux/of_irq.h> +#include <linux/slab.h> + +#include <linux/irqchip.h> +#include <linux/irqchip/arm-gic-v5.h> +#include <linux/irqchip/irq-msi-lib.h> + +#include "irq-gic-its-msi-parent.h" + +#define ITS_FLAGS_NON_COHERENT BIT(0) + +struct gicv5_its_chip_data { + struct xarray its_devices; + struct mutex dev_alloc_lock; + struct fwnode_handle *fwnode; + struct gicv5_its_devtab_cfg devtab_cfgr; + void __iomem *its_base; + u32 flags; + unsigned int msi_domain_flags; +}; + +struct gicv5_its_dev { + struct gicv5_its_chip_data *its_node; + struct gicv5_its_itt_cfg itt_cfg; + unsigned long *event_map; + u32 device_id; + u32 num_events; + phys_addr_t its_trans_phys_base; +}; + +static u32 its_readl_relaxed(struct gicv5_its_chip_data *its_node, const u32 reg_offset) +{ + return readl_relaxed(its_node->its_base + reg_offset); +} + +static void its_writel_relaxed(struct gicv5_its_chip_data *its_node, const u32 val, + const u32 reg_offset) +{ + writel_relaxed(val, its_node->its_base + reg_offset); +} + +static void its_writeq_relaxed(struct gicv5_its_chip_data *its_node, const u64 val, + const u32 reg_offset) +{ + writeq_relaxed(val, its_node->its_base + reg_offset); +} + +static void gicv5_its_dcache_clean(struct gicv5_its_chip_data *its, void *start, + size_t sz) +{ + void *end = start + sz; + + if (its->flags & ITS_FLAGS_NON_COHERENT) + dcache_clean_inval_poc((unsigned long)start, (unsigned long)end); + else + dsb(ishst); +} + +static void its_write_table_entry(struct gicv5_its_chip_data *its, __le64 *entry, + u64 val) +{ + WRITE_ONCE(*entry, cpu_to_le64(val)); + gicv5_its_dcache_clean(its, entry, sizeof(*entry)); +} + +#define devtab_cfgr_field(its, f) \ + FIELD_GET(GICV5_ITS_DT_CFGR_##f, (its)->devtab_cfgr.cfgr) + +static int gicv5_its_cache_sync(struct gicv5_its_chip_data *its) +{ + return gicv5_wait_for_op_atomic(its->its_base, GICV5_ITS_STATUSR, + GICV5_ITS_STATUSR_IDLE, NULL); +} + +static void gicv5_its_syncr(struct gicv5_its_chip_data *its, + struct gicv5_its_dev *its_dev) +{ + u64 syncr; + + syncr = FIELD_PREP(GICV5_ITS_SYNCR_SYNC, 1) | + FIELD_PREP(GICV5_ITS_SYNCR_DEVICEID, its_dev->device_id); + + its_writeq_relaxed(its, syncr, GICV5_ITS_SYNCR); + + gicv5_wait_for_op(its->its_base, GICV5_ITS_SYNC_STATUSR, GICV5_ITS_SYNC_STATUSR_IDLE); +} + +/* Number of bits required for each L2 {device/interrupt translation} table size */ +#define ITS_L2SZ_64K_L2_BITS 13 +#define ITS_L2SZ_16K_L2_BITS 11 +#define ITS_L2SZ_4K_L2_BITS 9 + +static unsigned int gicv5_its_l2sz_to_l2_bits(unsigned int sz) +{ + switch (sz) { + case GICV5_ITS_DT_ITT_CFGR_L2SZ_64k: + return ITS_L2SZ_64K_L2_BITS; + case GICV5_ITS_DT_ITT_CFGR_L2SZ_16k: + return ITS_L2SZ_16K_L2_BITS; + case GICV5_ITS_DT_ITT_CFGR_L2SZ_4k: + default: + return ITS_L2SZ_4K_L2_BITS; + } +} + +static int gicv5_its_itt_cache_inv(struct gicv5_its_chip_data *its, u32 device_id, + u16 event_id) +{ + u32 eventr, eidr; + u64 didr; + + didr = FIELD_PREP(GICV5_ITS_DIDR_DEVICEID, device_id); + eidr = FIELD_PREP(GICV5_ITS_EIDR_EVENTID, event_id); + eventr = FIELD_PREP(GICV5_ITS_INV_EVENTR_I, 0x1); + + its_writeq_relaxed(its, didr, GICV5_ITS_DIDR); + its_writel_relaxed(its, eidr, GICV5_ITS_EIDR); + its_writel_relaxed(its, eventr, GICV5_ITS_INV_EVENTR); + + return gicv5_its_cache_sync(its); +} + +static void gicv5_its_free_itt_linear(struct gicv5_its_dev *its_dev) +{ + kfree(its_dev->itt_cfg.linear.itt); +} + +static void gicv5_its_free_itt_two_level(struct gicv5_its_dev *its_dev) +{ + unsigned int i, num_ents = its_dev->itt_cfg.l2.num_l1_ents; + + for (i = 0; i < num_ents; i++) + kfree(its_dev->itt_cfg.l2.l2ptrs[i]); + + kfree(its_dev->itt_cfg.l2.l2ptrs); + kfree(its_dev->itt_cfg.l2.l1itt); +} + +static void gicv5_its_free_itt(struct gicv5_its_dev *its_dev) +{ + if (!its_dev->itt_cfg.l2itt) + gicv5_its_free_itt_linear(its_dev); + else + gicv5_its_free_itt_two_level(its_dev); +} + +static int gicv5_its_create_itt_linear(struct gicv5_its_chip_data *its, + struct gicv5_its_dev *its_dev, + unsigned int event_id_bits) +{ + unsigned int num_ents = BIT(event_id_bits); + __le64 *itt; + + itt = kcalloc(num_ents, sizeof(*itt), GFP_KERNEL); + if (!itt) + return -ENOMEM; + + its_dev->itt_cfg.linear.itt = itt; + its_dev->itt_cfg.linear.num_ents = num_ents; + its_dev->itt_cfg.l2itt = false; + its_dev->itt_cfg.event_id_bits = event_id_bits; + + gicv5_its_dcache_clean(its, itt, num_ents * sizeof(*itt)); + + return 0; +} + +/* + * Allocate a two-level ITT. All ITT entries are allocated in one go, unlike + * with the device table. Span may be used to limit the second level table + * size, where possible. + */ +static int gicv5_its_create_itt_two_level(struct gicv5_its_chip_data *its, + struct gicv5_its_dev *its_dev, + unsigned int event_id_bits, + unsigned int itt_l2sz, + unsigned int num_events) +{ + unsigned int l1_bits, l2_bits, span, events_per_l2_table; + unsigned int complete_tables, final_span, num_ents; + __le64 *itt_l1, *itt_l2, **l2ptrs; + int i, ret; + u64 val; + + ret = gicv5_its_l2sz_to_l2_bits(itt_l2sz); + if (ret >= event_id_bits) { + pr_debug("Incorrect l2sz (0x%x) for %u EventID bits. Cannot allocate ITT\n", + itt_l2sz, event_id_bits); + return -EINVAL; + } + + l2_bits = ret; + + l1_bits = event_id_bits - l2_bits; + + num_ents = BIT(l1_bits); + + itt_l1 = kcalloc(num_ents, sizeof(*itt_l1), GFP_KERNEL); + if (!itt_l1) + return -ENOMEM; + + l2ptrs = kcalloc(num_ents, sizeof(*l2ptrs), GFP_KERNEL); + if (!l2ptrs) { + kfree(itt_l1); + return -ENOMEM; + } + + its_dev->itt_cfg.l2.l2ptrs = l2ptrs; + + its_dev->itt_cfg.l2.l2sz = itt_l2sz; + its_dev->itt_cfg.l2.l1itt = itt_l1; + its_dev->itt_cfg.l2.num_l1_ents = num_ents; + its_dev->itt_cfg.l2itt = true; + its_dev->itt_cfg.event_id_bits = event_id_bits; + + /* + * Need to determine how many entries there are per L2 - this is based + * on the number of bits in the table. + */ + events_per_l2_table = BIT(l2_bits); + complete_tables = num_events / events_per_l2_table; + final_span = order_base_2(num_events % events_per_l2_table); + + for (i = 0; i < num_ents; i++) { + size_t l2sz; + + span = i == complete_tables ? final_span : l2_bits; + + itt_l2 = kcalloc(BIT(span), sizeof(*itt_l2), GFP_KERNEL); + if (!itt_l2) { + ret = -ENOMEM; + goto out_free; + } + + its_dev->itt_cfg.l2.l2ptrs[i] = itt_l2; + + l2sz = BIT(span) * sizeof(*itt_l2); + + gicv5_its_dcache_clean(its, itt_l2, l2sz); + + val = (virt_to_phys(itt_l2) & GICV5_ITTL1E_L2_ADDR_MASK) | + FIELD_PREP(GICV5_ITTL1E_SPAN, span) | + FIELD_PREP(GICV5_ITTL1E_VALID, 0x1); + + WRITE_ONCE(itt_l1[i], cpu_to_le64(val)); + } + + gicv5_its_dcache_clean(its, itt_l1, num_ents * sizeof(*itt_l1)); + + return 0; + +out_free: + for (i = i - 1; i >= 0; i--) + kfree(its_dev->itt_cfg.l2.l2ptrs[i]); + + kfree(its_dev->itt_cfg.l2.l2ptrs); + kfree(itt_l1); + return ret; +} + +/* + * Function to check whether the device table or ITT table support + * a two-level table and if so depending on the number of id_bits + * requested, determine whether a two-level table is required. + * + * Return the 2-level size value if a two level table is deemed + * necessary. + */ +static bool gicv5_its_l2sz_two_level(bool devtab, u32 its_idr1, u8 id_bits, u8 *sz) +{ + unsigned int l2_bits, l2_sz; + + if (devtab && !FIELD_GET(GICV5_ITS_IDR1_DT_LEVELS, its_idr1)) + return false; + + if (!devtab && !FIELD_GET(GICV5_ITS_IDR1_ITT_LEVELS, its_idr1)) + return false; + + /* + * Pick an L2 size that matches the pagesize; if a match + * is not found, go for the smallest supported l2 size granule. + * + * This ensures that we will always be able to allocate + * contiguous memory at L2. + */ + switch (PAGE_SIZE) { + case SZ_64K: + if (GICV5_ITS_IDR1_L2SZ_SUPPORT_64KB(its_idr1)) { + l2_sz = GICV5_ITS_DT_ITT_CFGR_L2SZ_64k; + break; + } + fallthrough; + case SZ_4K: + if (GICV5_ITS_IDR1_L2SZ_SUPPORT_4KB(its_idr1)) { + l2_sz = GICV5_ITS_DT_ITT_CFGR_L2SZ_4k; + break; + } + fallthrough; + case SZ_16K: + if (GICV5_ITS_IDR1_L2SZ_SUPPORT_16KB(its_idr1)) { + l2_sz = GICV5_ITS_DT_ITT_CFGR_L2SZ_16k; + break; + } + if (GICV5_ITS_IDR1_L2SZ_SUPPORT_4KB(its_idr1)) { + l2_sz = GICV5_ITS_DT_ITT_CFGR_L2SZ_4k; + break; + } + if (GICV5_ITS_IDR1_L2SZ_SUPPORT_64KB(its_idr1)) { + l2_sz = GICV5_ITS_DT_ITT_CFGR_L2SZ_64k; + break; + } + + l2_sz = GICV5_ITS_DT_ITT_CFGR_L2SZ_4k; + break; + } + + l2_bits = gicv5_its_l2sz_to_l2_bits(l2_sz); + + if (l2_bits > id_bits) + return false; + + *sz = l2_sz; + + return true; +} + +static __le64 *gicv5_its_device_get_itte_ref(struct gicv5_its_dev *its_dev, + u16 event_id) +{ + unsigned int l1_idx, l2_idx, l2_bits; + __le64 *l2_itt; + + if (!its_dev->itt_cfg.l2itt) { + __le64 *itt = its_dev->itt_cfg.linear.itt; + + return &itt[event_id]; + } + + l2_bits = gicv5_its_l2sz_to_l2_bits(its_dev->itt_cfg.l2.l2sz); + l1_idx = event_id >> l2_bits; + l2_idx = event_id & GENMASK(l2_bits - 1, 0); + l2_itt = its_dev->itt_cfg.l2.l2ptrs[l1_idx]; + + return &l2_itt[l2_idx]; +} + +static int gicv5_its_device_cache_inv(struct gicv5_its_chip_data *its, + struct gicv5_its_dev *its_dev) +{ + u32 devicer; + u64 didr; + + didr = FIELD_PREP(GICV5_ITS_DIDR_DEVICEID, its_dev->device_id); + devicer = FIELD_PREP(GICV5_ITS_INV_DEVICER_I, 0x1) | + FIELD_PREP(GICV5_ITS_INV_DEVICER_EVENTID_BITS, + its_dev->itt_cfg.event_id_bits) | + FIELD_PREP(GICV5_ITS_INV_DEVICER_L1, 0x0); + its_writeq_relaxed(its, didr, GICV5_ITS_DIDR); + its_writel_relaxed(its, devicer, GICV5_ITS_INV_DEVICER); + + return gicv5_its_cache_sync(its); +} + +/* + * Allocate a level 2 device table entry, update L1 parent to reference it. + * Only used for 2-level device tables, and it is called on demand. + */ +static int gicv5_its_alloc_l2_devtab(struct gicv5_its_chip_data *its, + unsigned int l1_index) +{ + __le64 *l2devtab, *l1devtab = its->devtab_cfgr.l2.l1devtab; + u8 span, l2sz, l2_bits; + u64 l1dte; + + if (FIELD_GET(GICV5_DTL1E_VALID, le64_to_cpu(l1devtab[l1_index]))) + return 0; + + span = FIELD_GET(GICV5_DTL1E_SPAN, le64_to_cpu(l1devtab[l1_index])); + l2sz = devtab_cfgr_field(its, L2SZ); + + l2_bits = gicv5_its_l2sz_to_l2_bits(l2sz); + + /* + * Span allows us to create a smaller L2 device table. + * If it is too large, use the number of allowed L2 bits. + */ + if (span > l2_bits) + span = l2_bits; + + l2devtab = kcalloc(BIT(span), sizeof(*l2devtab), GFP_KERNEL); + if (!l2devtab) + return -ENOMEM; + + its->devtab_cfgr.l2.l2ptrs[l1_index] = l2devtab; + + l1dte = FIELD_PREP(GICV5_DTL1E_SPAN, span) | + (virt_to_phys(l2devtab) & GICV5_DTL1E_L2_ADDR_MASK) | + FIELD_PREP(GICV5_DTL1E_VALID, 0x1); + its_write_table_entry(its, &l1devtab[l1_index], l1dte); + + return 0; +} + +static __le64 *gicv5_its_devtab_get_dte_ref(struct gicv5_its_chip_data *its, + u32 device_id, bool alloc) +{ + u8 str = devtab_cfgr_field(its, STRUCTURE); + unsigned int l2sz, l2_bits, l1_idx, l2_idx; + __le64 *l2devtab; + int ret; + + if (str == GICV5_ITS_DT_ITT_CFGR_STRUCTURE_LINEAR) { + l2devtab = its->devtab_cfgr.linear.devtab; + return &l2devtab[device_id]; + } + + l2sz = devtab_cfgr_field(its, L2SZ); + l2_bits = gicv5_its_l2sz_to_l2_bits(l2sz); + l1_idx = device_id >> l2_bits; + l2_idx = device_id & GENMASK(l2_bits - 1, 0); + + if (alloc) { + /* + * Allocate a new L2 device table here before + * continuing. We make the assumption that the span in + * the L1 table has been set correctly, and blindly use + * that value. + */ + ret = gicv5_its_alloc_l2_devtab(its, l1_idx); + if (ret) + return NULL; + } + + l2devtab = its->devtab_cfgr.l2.l2ptrs[l1_idx]; + return &l2devtab[l2_idx]; +} + +/* + * Register a new device in the device table. Allocate an ITT and + * program the L2DTE entry according to the ITT structure that + * was chosen. + */ +static int gicv5_its_device_register(struct gicv5_its_chip_data *its, + struct gicv5_its_dev *its_dev) +{ + u8 event_id_bits, device_id_bits, itt_struct, itt_l2sz; + phys_addr_t itt_phys_base; + bool two_level_itt; + u32 idr1, idr2; + __le64 *dte; + u64 val; + int ret; + + device_id_bits = devtab_cfgr_field(its, DEVICEID_BITS); + + if (its_dev->device_id >= BIT(device_id_bits)) { + pr_err("Supplied DeviceID (%u) outside of Device Table range (%u)!", + its_dev->device_id, (u32)GENMASK(device_id_bits - 1, 0)); + return -EINVAL; + } + + dte = gicv5_its_devtab_get_dte_ref(its, its_dev->device_id, true); + if (!dte) + return -ENOMEM; + + if (FIELD_GET(GICV5_DTL2E_VALID, le64_to_cpu(*dte))) + return -EBUSY; + + /* + * Determine how many bits we need, validate those against the max. + * Based on these, determine if we should go for a 1- or 2-level ITT. + */ + event_id_bits = order_base_2(its_dev->num_events); + + idr2 = its_readl_relaxed(its, GICV5_ITS_IDR2); + + if (event_id_bits > FIELD_GET(GICV5_ITS_IDR2_EVENTID_BITS, idr2)) { + pr_err("Required EventID bits (%u) larger than supported bits (%u)!", + event_id_bits, + (u8)FIELD_GET(GICV5_ITS_IDR2_EVENTID_BITS, idr2)); + return -EINVAL; + } + + idr1 = its_readl_relaxed(its, GICV5_ITS_IDR1); + + /* + * L2 ITT size is programmed into the L2DTE regardless of + * whether a two-level or linear ITT is built, init it. + */ + itt_l2sz = 0; + + two_level_itt = gicv5_its_l2sz_two_level(false, idr1, event_id_bits, + &itt_l2sz); + if (two_level_itt) + ret = gicv5_its_create_itt_two_level(its, its_dev, event_id_bits, + itt_l2sz, + its_dev->num_events); + else + ret = gicv5_its_create_itt_linear(its, its_dev, event_id_bits); + if (ret) + return ret; + + itt_phys_base = two_level_itt ? virt_to_phys(its_dev->itt_cfg.l2.l1itt) : + virt_to_phys(its_dev->itt_cfg.linear.itt); + + itt_struct = two_level_itt ? GICV5_ITS_DT_ITT_CFGR_STRUCTURE_TWO_LEVEL : + GICV5_ITS_DT_ITT_CFGR_STRUCTURE_LINEAR; + + val = FIELD_PREP(GICV5_DTL2E_EVENT_ID_BITS, event_id_bits) | + FIELD_PREP(GICV5_DTL2E_ITT_STRUCTURE, itt_struct) | + (itt_phys_base & GICV5_DTL2E_ITT_ADDR_MASK) | + FIELD_PREP(GICV5_DTL2E_ITT_L2SZ, itt_l2sz) | + FIELD_PREP(GICV5_DTL2E_VALID, 0x1); + + its_write_table_entry(its, dte, val); + + ret = gicv5_its_device_cache_inv(its, its_dev); + if (ret) { + its_write_table_entry(its, dte, 0); + gicv5_its_free_itt(its_dev); + return ret; + } + + return 0; +} + +/* + * Unregister a device in the device table. Lookup the device by ID, free the + * corresponding ITT, mark the device as invalid in the device table. + */ +static int gicv5_its_device_unregister(struct gicv5_its_chip_data *its, + struct gicv5_its_dev *its_dev) +{ + __le64 *dte; + + dte = gicv5_its_devtab_get_dte_ref(its, its_dev->device_id, false); + + if (!FIELD_GET(GICV5_DTL2E_VALID, le64_to_cpu(*dte))) { + pr_debug("Device table entry for DeviceID 0x%x is not valid. Nothing to clean up!", + its_dev->device_id); + return -EINVAL; + } + + /* Zero everything - make it clear that this is an invalid entry */ + its_write_table_entry(its, dte, 0); + + gicv5_its_free_itt(its_dev); + + return gicv5_its_device_cache_inv(its, its_dev); +} + +/* + * Allocate a 1-level device table. All entries are allocated, but marked + * invalid. + */ +static int gicv5_its_alloc_devtab_linear(struct gicv5_its_chip_data *its, + u8 device_id_bits) +{ + __le64 *devtab; + size_t sz; + u64 baser; + u32 cfgr; + + /* + * We expect a GICv5 implementation requiring a large number of + * deviceID bits to support a 2-level device table. If that's not + * the case, cap the number of deviceIDs supported according to the + * kmalloc limits so that the system can chug along with a linear + * device table. + */ + sz = BIT_ULL(device_id_bits) * sizeof(*devtab); + if (sz > KMALLOC_MAX_SIZE) { + u8 device_id_cap = ilog2(KMALLOC_MAX_SIZE/sizeof(*devtab)); + + pr_warn("Limiting device ID bits from %u to %u\n", + device_id_bits, device_id_cap); + device_id_bits = device_id_cap; + } + + devtab = kcalloc(BIT(device_id_bits), sizeof(*devtab), GFP_KERNEL); + if (!devtab) + return -ENOMEM; + + gicv5_its_dcache_clean(its, devtab, sz); + + cfgr = FIELD_PREP(GICV5_ITS_DT_CFGR_STRUCTURE, + GICV5_ITS_DT_ITT_CFGR_STRUCTURE_LINEAR) | + FIELD_PREP(GICV5_ITS_DT_CFGR_L2SZ, 0) | + FIELD_PREP(GICV5_ITS_DT_CFGR_DEVICEID_BITS, device_id_bits); + its_writel_relaxed(its, cfgr, GICV5_ITS_DT_CFGR); + + baser = virt_to_phys(devtab) & GICV5_ITS_DT_BASER_ADDR_MASK; + its_writeq_relaxed(its, baser, GICV5_ITS_DT_BASER); + + its->devtab_cfgr.cfgr = cfgr; + its->devtab_cfgr.linear.devtab = devtab; + + return 0; +} + +/* + * Allocate a 2-level device table. L2 entries are not allocated, + * they are allocated on-demand. + */ +static int gicv5_its_alloc_devtab_two_level(struct gicv5_its_chip_data *its, + u8 device_id_bits, + u8 devtab_l2sz) +{ + unsigned int l1_bits, l2_bits, i; + __le64 *l1devtab, **l2ptrs; + size_t l1_sz; + u64 baser; + u32 cfgr; + + l2_bits = gicv5_its_l2sz_to_l2_bits(devtab_l2sz); + + l1_bits = device_id_bits - l2_bits; + l1_sz = BIT(l1_bits) * sizeof(*l1devtab); + /* + * With 2-level device table support it is highly unlikely + * that we are not able to allocate the required amount of + * device table memory to cover deviceID space; cap the + * deviceID space if we encounter such set-up. + * If this ever becomes a problem we could revisit the policy + * behind level 2 size selection to reduce level-1 deviceID bits. + */ + if (l1_sz > KMALLOC_MAX_SIZE) { + l1_bits = ilog2(KMALLOC_MAX_SIZE/sizeof(*l1devtab)); + + pr_warn("Limiting device ID bits from %u to %u\n", + device_id_bits, l1_bits + l2_bits); + device_id_bits = l1_bits + l2_bits; + l1_sz = KMALLOC_MAX_SIZE; + } + + l1devtab = kcalloc(BIT(l1_bits), sizeof(*l1devtab), GFP_KERNEL); + if (!l1devtab) + return -ENOMEM; + + l2ptrs = kcalloc(BIT(l1_bits), sizeof(*l2ptrs), GFP_KERNEL); + if (!l2ptrs) { + kfree(l1devtab); + return -ENOMEM; + } + + for (i = 0; i < BIT(l1_bits); i++) + l1devtab[i] = cpu_to_le64(FIELD_PREP(GICV5_DTL1E_SPAN, l2_bits)); + + gicv5_its_dcache_clean(its, l1devtab, l1_sz); + + cfgr = FIELD_PREP(GICV5_ITS_DT_CFGR_STRUCTURE, + GICV5_ITS_DT_ITT_CFGR_STRUCTURE_TWO_LEVEL) | + FIELD_PREP(GICV5_ITS_DT_CFGR_L2SZ, devtab_l2sz) | + FIELD_PREP(GICV5_ITS_DT_CFGR_DEVICEID_BITS, device_id_bits); + its_writel_relaxed(its, cfgr, GICV5_ITS_DT_CFGR); + + baser = virt_to_phys(l1devtab) & GICV5_ITS_DT_BASER_ADDR_MASK; + its_writeq_relaxed(its, baser, GICV5_ITS_DT_BASER); + + its->devtab_cfgr.cfgr = cfgr; + its->devtab_cfgr.l2.l1devtab = l1devtab; + its->devtab_cfgr.l2.l2ptrs = l2ptrs; + + return 0; +} + +/* + * Initialise the device table as either 1- or 2-level depending on what is + * supported by the hardware. + */ +static int gicv5_its_init_devtab(struct gicv5_its_chip_data *its) +{ + u8 device_id_bits, devtab_l2sz; + bool two_level_devtab; + u32 idr1; + + idr1 = its_readl_relaxed(its, GICV5_ITS_IDR1); + + device_id_bits = FIELD_GET(GICV5_ITS_IDR1_DEVICEID_BITS, idr1); + two_level_devtab = gicv5_its_l2sz_two_level(true, idr1, device_id_bits, + &devtab_l2sz); + if (two_level_devtab) + return gicv5_its_alloc_devtab_two_level(its, device_id_bits, + devtab_l2sz); + else + return gicv5_its_alloc_devtab_linear(its, device_id_bits); +} + +static void gicv5_its_deinit_devtab(struct gicv5_its_chip_data *its) +{ + u8 str = devtab_cfgr_field(its, STRUCTURE); + + if (str == GICV5_ITS_DT_ITT_CFGR_STRUCTURE_LINEAR) { + kfree(its->devtab_cfgr.linear.devtab); + } else { + kfree(its->devtab_cfgr.l2.l1devtab); + kfree(its->devtab_cfgr.l2.l2ptrs); + } +} + +static void gicv5_its_compose_msi_msg(struct irq_data *d, struct msi_msg *msg) +{ + struct gicv5_its_dev *its_dev = irq_data_get_irq_chip_data(d); + u64 addr = its_dev->its_trans_phys_base; + + msg->data = FIELD_GET(GICV5_ITS_HWIRQ_EVENT_ID, d->hwirq); + msi_msg_set_addr(irq_data_get_msi_desc(d), msg, addr); +} + +static const struct irq_chip gicv5_its_irq_chip = { + .name = "GICv5-ITS-MSI", + .irq_mask = irq_chip_mask_parent, + .irq_unmask = irq_chip_unmask_parent, + .irq_eoi = irq_chip_eoi_parent, + .irq_set_affinity = irq_chip_set_affinity_parent, + .irq_get_irqchip_state = irq_chip_get_parent_state, + .irq_set_irqchip_state = irq_chip_set_parent_state, + .irq_compose_msi_msg = gicv5_its_compose_msi_msg, +}; + +static struct gicv5_its_dev *gicv5_its_find_device(struct gicv5_its_chip_data *its, + u32 device_id) +{ + struct gicv5_its_dev *dev = xa_load(&its->its_devices, device_id); + + return dev ? dev : ERR_PTR(-ENODEV); +} + +static struct gicv5_its_dev *gicv5_its_alloc_device(struct gicv5_its_chip_data *its, int nvec, + u32 dev_id) +{ + struct gicv5_its_dev *its_dev; + void *entry; + int ret; + + its_dev = gicv5_its_find_device(its, dev_id); + if (!IS_ERR(its_dev)) { + pr_err("A device with this DeviceID (0x%x) has already been registered.\n", + dev_id); + + return ERR_PTR(-EBUSY); + } + + its_dev = kzalloc(sizeof(*its_dev), GFP_KERNEL); + if (!its_dev) + return ERR_PTR(-ENOMEM); + + its_dev->device_id = dev_id; + its_dev->num_events = nvec; + + ret = gicv5_its_device_register(its, its_dev); + if (ret) { + pr_err("Failed to register the device\n"); + goto out_dev_free; + } + + its_dev->its_node = its; + + its_dev->event_map = (unsigned long *)bitmap_zalloc(its_dev->num_events, GFP_KERNEL); + if (!its_dev->event_map) { + ret = -ENOMEM; + goto out_unregister; + } + + entry = xa_store(&its->its_devices, dev_id, its_dev, GFP_KERNEL); + if (xa_is_err(entry)) { + ret = xa_err(entry); + goto out_bitmap_free; + } + + return its_dev; + +out_bitmap_free: + bitmap_free(its_dev->event_map); +out_unregister: + gicv5_its_device_unregister(its, its_dev); +out_dev_free: + kfree(its_dev); + return ERR_PTR(ret); +} + +static int gicv5_its_msi_prepare(struct irq_domain *domain, struct device *dev, + int nvec, msi_alloc_info_t *info) +{ + u32 dev_id = info->scratchpad[0].ul; + struct msi_domain_info *msi_info; + struct gicv5_its_chip_data *its; + struct gicv5_its_dev *its_dev; + + msi_info = msi_get_domain_info(domain); + its = msi_info->data; + + guard(mutex)(&its->dev_alloc_lock); + + its_dev = gicv5_its_alloc_device(its, nvec, dev_id); + if (IS_ERR(its_dev)) + return PTR_ERR(its_dev); + + its_dev->its_trans_phys_base = info->scratchpad[1].ul; + info->scratchpad[0].ptr = its_dev; + + return 0; +} + +static void gicv5_its_msi_teardown(struct irq_domain *domain, msi_alloc_info_t *info) +{ + struct gicv5_its_dev *its_dev = info->scratchpad[0].ptr; + struct msi_domain_info *msi_info; + struct gicv5_its_chip_data *its; + + msi_info = msi_get_domain_info(domain); + its = msi_info->data; + + guard(mutex)(&its->dev_alloc_lock); + + if (WARN_ON_ONCE(!bitmap_empty(its_dev->event_map, its_dev->num_events))) + return; + + xa_erase(&its->its_devices, its_dev->device_id); + bitmap_free(its_dev->event_map); + gicv5_its_device_unregister(its, its_dev); + kfree(its_dev); +} + +static struct msi_domain_ops gicv5_its_msi_domain_ops = { + .msi_prepare = gicv5_its_msi_prepare, + .msi_teardown = gicv5_its_msi_teardown, +}; + +static int gicv5_its_map_event(struct gicv5_its_dev *its_dev, u16 event_id, u32 lpi) +{ + struct gicv5_its_chip_data *its = its_dev->its_node; + u64 itt_entry; + __le64 *itte; + + itte = gicv5_its_device_get_itte_ref(its_dev, event_id); + + if (FIELD_GET(GICV5_ITTL2E_VALID, *itte)) + return -EEXIST; + + itt_entry = FIELD_PREP(GICV5_ITTL2E_LPI_ID, lpi) | + FIELD_PREP(GICV5_ITTL2E_VALID, 0x1); + + its_write_table_entry(its, itte, itt_entry); + + gicv5_its_itt_cache_inv(its, its_dev->device_id, event_id); + + return 0; +} + +static void gicv5_its_unmap_event(struct gicv5_its_dev *its_dev, u16 event_id) +{ + struct gicv5_its_chip_data *its = its_dev->its_node; + u64 itte_val; + __le64 *itte; + + itte = gicv5_its_device_get_itte_ref(its_dev, event_id); + + itte_val = le64_to_cpu(*itte); + itte_val &= ~GICV5_ITTL2E_VALID; + + its_write_table_entry(its, itte, itte_val); + + gicv5_its_itt_cache_inv(its, its_dev->device_id, event_id); +} + +static int gicv5_its_alloc_eventid(struct gicv5_its_dev *its_dev, msi_alloc_info_t *info, + unsigned int nr_irqs, u32 *eventid) +{ + int event_id_base; + + if (!(info->flags & MSI_ALLOC_FLAGS_FIXED_MSG_DATA)) { + event_id_base = bitmap_find_free_region(its_dev->event_map, + its_dev->num_events, + get_count_order(nr_irqs)); + if (event_id_base < 0) + return event_id_base; + } else { + /* + * We want to have a fixed EventID mapped for hardcoded + * message data allocations. + */ + if (WARN_ON_ONCE(nr_irqs != 1)) + return -EINVAL; + + event_id_base = info->hwirq; + + if (event_id_base >= its_dev->num_events) { + pr_err("EventID ouside of ITT range; cannot allocate an ITT entry!\n"); + + return -EINVAL; + } + + if (test_and_set_bit(event_id_base, its_dev->event_map)) { + pr_warn("Can't reserve event_id bitmap\n"); + return -EINVAL; + + } + } + + *eventid = event_id_base; + + return 0; +} + +static void gicv5_its_free_eventid(struct gicv5_its_dev *its_dev, u32 event_id_base, + unsigned int nr_irqs) +{ + bitmap_release_region(its_dev->event_map, event_id_base, + get_count_order(nr_irqs)); +} + +static int gicv5_its_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, + unsigned int nr_irqs, void *arg) +{ + u32 device_id, event_id_base, lpi; + struct gicv5_its_dev *its_dev; + msi_alloc_info_t *info = arg; + irq_hw_number_t hwirq; + struct irq_data *irqd; + int ret, i; + + its_dev = info->scratchpad[0].ptr; + + ret = gicv5_its_alloc_eventid(its_dev, info, nr_irqs, &event_id_base); + if (ret) + return ret; + + ret = iommu_dma_prepare_msi(info->desc, its_dev->its_trans_phys_base); + if (ret) + goto out_eventid; + + device_id = its_dev->device_id; + + for (i = 0; i < nr_irqs; i++) { + ret = gicv5_alloc_lpi(); + if (ret < 0) { + pr_debug("Failed to find free LPI!\n"); + goto out_free_irqs; + } + lpi = ret; + + ret = irq_domain_alloc_irqs_parent(domain, virq + i, 1, &lpi); + if (ret) { + gicv5_free_lpi(lpi); + goto out_free_irqs; + } + + /* + * Store eventid and deviceid into the hwirq for later use. + * + * hwirq = event_id << 32 | device_id + */ + hwirq = FIELD_PREP(GICV5_ITS_HWIRQ_DEVICE_ID, device_id) | + FIELD_PREP(GICV5_ITS_HWIRQ_EVENT_ID, (u64)event_id_base + i); + irq_domain_set_info(domain, virq + i, hwirq, + &gicv5_its_irq_chip, its_dev, + handle_fasteoi_irq, NULL, NULL); + + irqd = irq_get_irq_data(virq + i); + irqd_set_single_target(irqd); + irqd_set_affinity_on_activate(irqd); + } + + return 0; + +out_free_irqs: + while (--i >= 0) { + irqd = irq_domain_get_irq_data(domain, virq + i); + gicv5_free_lpi(irqd->parent_data->hwirq); + irq_domain_reset_irq_data(irqd); + irq_domain_free_irqs_parent(domain, virq + i, 1); + } +out_eventid: + gicv5_its_free_eventid(its_dev, event_id_base, nr_irqs); + return ret; +} + +static void gicv5_its_irq_domain_free(struct irq_domain *domain, unsigned int virq, + unsigned int nr_irqs) +{ + struct irq_data *d = irq_domain_get_irq_data(domain, virq); + struct gicv5_its_chip_data *its; + struct gicv5_its_dev *its_dev; + u16 event_id_base; + unsigned int i; + + its_dev = irq_data_get_irq_chip_data(d); + its = its_dev->its_node; + + event_id_base = FIELD_GET(GICV5_ITS_HWIRQ_EVENT_ID, d->hwirq); + + bitmap_release_region(its_dev->event_map, event_id_base, + get_count_order(nr_irqs)); + + /* Hierarchically free irq data */ + for (i = 0; i < nr_irqs; i++) { + d = irq_domain_get_irq_data(domain, virq + i); + + gicv5_free_lpi(d->parent_data->hwirq); + irq_domain_reset_irq_data(d); + irq_domain_free_irqs_parent(domain, virq + i, 1); + } + + gicv5_its_syncr(its, its_dev); + gicv5_irs_syncr(); +} + +static int gicv5_its_irq_domain_activate(struct irq_domain *domain, struct irq_data *d, + bool reserve) +{ + struct gicv5_its_dev *its_dev = irq_data_get_irq_chip_data(d); + u16 event_id; + u32 lpi; + + event_id = FIELD_GET(GICV5_ITS_HWIRQ_EVENT_ID, d->hwirq); + lpi = d->parent_data->hwirq; + + return gicv5_its_map_event(its_dev, event_id, lpi); +} + +static void gicv5_its_irq_domain_deactivate(struct irq_domain *domain, + struct irq_data *d) +{ + struct gicv5_its_dev *its_dev = irq_data_get_irq_chip_data(d); + u16 event_id; + + event_id = FIELD_GET(GICV5_ITS_HWIRQ_EVENT_ID, d->hwirq); + + gicv5_its_unmap_event(its_dev, event_id); +} + +static const struct irq_domain_ops gicv5_its_irq_domain_ops = { + .alloc = gicv5_its_irq_domain_alloc, + .free = gicv5_its_irq_domain_free, + .activate = gicv5_its_irq_domain_activate, + .deactivate = gicv5_its_irq_domain_deactivate, + .select = msi_lib_irq_domain_select, +}; + +static int gicv5_its_write_cr0(struct gicv5_its_chip_data *its, bool enable) +{ + u32 cr0 = FIELD_PREP(GICV5_ITS_CR0_ITSEN, enable); + + its_writel_relaxed(its, cr0, GICV5_ITS_CR0); + return gicv5_wait_for_op_atomic(its->its_base, GICV5_ITS_CR0, + GICV5_ITS_CR0_IDLE, NULL); +} + +static int gicv5_its_enable(struct gicv5_its_chip_data *its) +{ + return gicv5_its_write_cr0(its, true); +} + +static int gicv5_its_disable(struct gicv5_its_chip_data *its) +{ + return gicv5_its_write_cr0(its, false); +} + +static void gicv5_its_print_info(struct gicv5_its_chip_data *its_node) +{ + bool devtab_linear; + u8 device_id_bits; + u8 str; + + device_id_bits = devtab_cfgr_field(its_node, DEVICEID_BITS); + + str = devtab_cfgr_field(its_node, STRUCTURE); + devtab_linear = (str == GICV5_ITS_DT_ITT_CFGR_STRUCTURE_LINEAR); + + pr_info("ITS %s enabled using %s device table device_id_bits %u\n", + fwnode_get_name(its_node->fwnode), + devtab_linear ? "linear" : "2-level", + device_id_bits); +} + +static int gicv5_its_init_domain(struct gicv5_its_chip_data *its, struct irq_domain *parent) +{ + struct irq_domain_info dom_info = { + .fwnode = its->fwnode, + .ops = &gicv5_its_irq_domain_ops, + .domain_flags = its->msi_domain_flags, + .parent = parent, + }; + struct msi_domain_info *info; + + info = kzalloc(sizeof(*info), GFP_KERNEL); + if (!info) + return -ENOMEM; + + info->ops = &gicv5_its_msi_domain_ops; + info->data = its; + dom_info.host_data = info; + + if (!msi_create_parent_irq_domain(&dom_info, &gic_v5_its_msi_parent_ops)) { + kfree(info); + return -ENOMEM; + } + + return 0; +} + +static int __init gicv5_its_init_bases(void __iomem *its_base, struct fwnode_handle *handle, + struct irq_domain *parent_domain) +{ + struct device_node *np = to_of_node(handle); + struct gicv5_its_chip_data *its_node; + u32 cr0, cr1; + bool enabled; + int ret; + + its_node = kzalloc(sizeof(*its_node), GFP_KERNEL); + if (!its_node) + return -ENOMEM; + + mutex_init(&its_node->dev_alloc_lock); + xa_init(&its_node->its_devices); + its_node->fwnode = handle; + its_node->its_base = its_base; + its_node->msi_domain_flags = IRQ_DOMAIN_FLAG_ISOLATED_MSI | + IRQ_DOMAIN_FLAG_FWNODE_PARENT; + + cr0 = its_readl_relaxed(its_node, GICV5_ITS_CR0); + enabled = FIELD_GET(GICV5_ITS_CR0_ITSEN, cr0); + if (WARN(enabled, "ITS %s enabled, disabling it before proceeding\n", np->full_name)) { + ret = gicv5_its_disable(its_node); + if (ret) + goto out_free_node; + } + + if (of_property_read_bool(np, "dma-noncoherent")) { + /* + * A non-coherent ITS implies that some cache levels cannot be + * used coherently by the cores and GIC. Our only option is to mark + * memory attributes for the GIC as non-cacheable; by default, + * non-cacheable memory attributes imply outer-shareable + * shareability, the value written into ITS_CR1_SH is ignored. + */ + cr1 = FIELD_PREP(GICV5_ITS_CR1_ITT_RA, GICV5_NO_READ_ALLOC) | + FIELD_PREP(GICV5_ITS_CR1_DT_RA, GICV5_NO_READ_ALLOC) | + FIELD_PREP(GICV5_ITS_CR1_IC, GICV5_NON_CACHE) | + FIELD_PREP(GICV5_ITS_CR1_OC, GICV5_NON_CACHE); + its_node->flags |= ITS_FLAGS_NON_COHERENT; + } else { + cr1 = FIELD_PREP(GICV5_ITS_CR1_ITT_RA, GICV5_READ_ALLOC) | + FIELD_PREP(GICV5_ITS_CR1_DT_RA, GICV5_READ_ALLOC) | + FIELD_PREP(GICV5_ITS_CR1_IC, GICV5_WB_CACHE) | + FIELD_PREP(GICV5_ITS_CR1_OC, GICV5_WB_CACHE) | + FIELD_PREP(GICV5_ITS_CR1_SH, GICV5_INNER_SHARE); + } + + its_writel_relaxed(its_node, cr1, GICV5_ITS_CR1); + + ret = gicv5_its_init_devtab(its_node); + if (ret) + goto out_free_node; + + ret = gicv5_its_enable(its_node); + if (ret) + goto out_free_devtab; + + ret = gicv5_its_init_domain(its_node, parent_domain); + if (ret) + goto out_disable_its; + + gicv5_its_print_info(its_node); + + return 0; + +out_disable_its: + gicv5_its_disable(its_node); +out_free_devtab: + gicv5_its_deinit_devtab(its_node); +out_free_node: + kfree(its_node); + return ret; +} + +static int __init gicv5_its_init(struct device_node *node) +{ + void __iomem *its_base; + int ret, idx; + + idx = of_property_match_string(node, "reg-names", "ns-config"); + if (idx < 0) { + pr_err("%pOF: ns-config reg-name not present\n", node); + return -ENODEV; + } + + its_base = of_io_request_and_map(node, idx, of_node_full_name(node)); + if (IS_ERR(its_base)) { + pr_err("%pOF: unable to map GICv5 ITS_CONFIG_FRAME\n", node); + return PTR_ERR(its_base); + } + + ret = gicv5_its_init_bases(its_base, of_fwnode_handle(node), + gicv5_global_data.lpi_domain); + if (ret) + goto out_unmap; + + return 0; + +out_unmap: + iounmap(its_base); + return ret; +} + +void __init gicv5_its_of_probe(struct device_node *parent) +{ + struct device_node *np; + + for_each_available_child_of_node(parent, np) { + if (!of_device_is_compatible(np, "arm,gic-v5-its")) + continue; + + if (gicv5_its_init(np)) + pr_err("Failed to init ITS %s\n", np->full_name); + } +} diff --git a/drivers/irqchip/irq-gic-v5-iwb.c b/drivers/irqchip/irq-gic-v5-iwb.c new file mode 100644 index 000000000000..ad9fdc14d1c6 --- /dev/null +++ b/drivers/irqchip/irq-gic-v5-iwb.c @@ -0,0 +1,277 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2024-2025 ARM Limited, All Rights Reserved. + */ +#define pr_fmt(fmt) "GICv5 IWB: " fmt + +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/msi.h> +#include <linux/of.h> +#include <linux/of_address.h> +#include <linux/of_platform.h> + +#include <linux/irqchip.h> +#include <linux/irqchip/arm-gic-v5.h> + +struct gicv5_iwb_chip_data { + void __iomem *iwb_base; + u16 nr_regs; +}; + +static u32 iwb_readl_relaxed(struct gicv5_iwb_chip_data *iwb_node, const u32 reg_offset) +{ + return readl_relaxed(iwb_node->iwb_base + reg_offset); +} + +static void iwb_writel_relaxed(struct gicv5_iwb_chip_data *iwb_node, const u32 val, + const u32 reg_offset) +{ + writel_relaxed(val, iwb_node->iwb_base + reg_offset); +} + +static int gicv5_iwb_wait_for_wenabler(struct gicv5_iwb_chip_data *iwb_node) +{ + return gicv5_wait_for_op_atomic(iwb_node->iwb_base, GICV5_IWB_WENABLE_STATUSR, + GICV5_IWB_WENABLE_STATUSR_IDLE, NULL); +} + +static int __gicv5_iwb_set_wire_enable(struct gicv5_iwb_chip_data *iwb_node, + u32 iwb_wire, bool enable) +{ + u32 n = iwb_wire / 32; + u8 i = iwb_wire % 32; + u32 val; + + if (n >= iwb_node->nr_regs) { + pr_err("IWB_WENABLER<n> is invalid for n=%u\n", n); + return -EINVAL; + } + + /* + * Enable IWB wire/pin at this point + * Note: This is not the same as enabling the interrupt + */ + val = iwb_readl_relaxed(iwb_node, GICV5_IWB_WENABLER + (4 * n)); + if (enable) + val |= BIT(i); + else + val &= ~BIT(i); + iwb_writel_relaxed(iwb_node, val, GICV5_IWB_WENABLER + (4 * n)); + + return gicv5_iwb_wait_for_wenabler(iwb_node); +} + +static int gicv5_iwb_enable_wire(struct gicv5_iwb_chip_data *iwb_node, + u32 iwb_wire) +{ + return __gicv5_iwb_set_wire_enable(iwb_node, iwb_wire, true); +} + +static int gicv5_iwb_disable_wire(struct gicv5_iwb_chip_data *iwb_node, + u32 iwb_wire) +{ + return __gicv5_iwb_set_wire_enable(iwb_node, iwb_wire, false); +} + +static void gicv5_iwb_irq_disable(struct irq_data *d) +{ + struct gicv5_iwb_chip_data *iwb_node = irq_data_get_irq_chip_data(d); + + gicv5_iwb_disable_wire(iwb_node, d->hwirq); + irq_chip_disable_parent(d); +} + +static void gicv5_iwb_irq_enable(struct irq_data *d) +{ + struct gicv5_iwb_chip_data *iwb_node = irq_data_get_irq_chip_data(d); + + gicv5_iwb_enable_wire(iwb_node, d->hwirq); + irq_chip_enable_parent(d); +} + +static int gicv5_iwb_set_type(struct irq_data *d, unsigned int type) +{ + struct gicv5_iwb_chip_data *iwb_node = irq_data_get_irq_chip_data(d); + u32 iwb_wire, n, wtmr; + u8 i; + + iwb_wire = d->hwirq; + i = iwb_wire % 32; + n = iwb_wire / 32; + + if (n >= iwb_node->nr_regs) { + pr_err_once("reg %u out of range\n", n); + return -EINVAL; + } + + wtmr = iwb_readl_relaxed(iwb_node, GICV5_IWB_WTMR + (4 * n)); + + switch (type) { + case IRQ_TYPE_LEVEL_HIGH: + case IRQ_TYPE_LEVEL_LOW: + wtmr |= BIT(i); + break; + case IRQ_TYPE_EDGE_RISING: + case IRQ_TYPE_EDGE_FALLING: + wtmr &= ~BIT(i); + break; + default: + pr_debug("unexpected wire trigger mode"); + return -EINVAL; + } + + iwb_writel_relaxed(iwb_node, wtmr, GICV5_IWB_WTMR + (4 * n)); + + return 0; +} + +static void gicv5_iwb_domain_set_desc(msi_alloc_info_t *alloc_info, struct msi_desc *desc) +{ + alloc_info->desc = desc; + alloc_info->hwirq = (u32)desc->data.icookie.value; +} + +static int gicv5_iwb_irq_domain_translate(struct irq_domain *d, struct irq_fwspec *fwspec, + irq_hw_number_t *hwirq, + unsigned int *type) +{ + if (!is_of_node(fwspec->fwnode)) + return -EINVAL; + + if (fwspec->param_count < 2) + return -EINVAL; + + /* + * param[0] is be the wire + * param[1] is the interrupt type + */ + *hwirq = fwspec->param[0]; + *type = fwspec->param[1] & IRQ_TYPE_SENSE_MASK; + + return 0; +} + +static void gicv5_iwb_write_msi_msg(struct irq_data *d, struct msi_msg *msg) {} + +static const struct msi_domain_template iwb_msi_template = { + .chip = { + .name = "GICv5-IWB", + .irq_mask = irq_chip_mask_parent, + .irq_unmask = irq_chip_unmask_parent, + .irq_enable = gicv5_iwb_irq_enable, + .irq_disable = gicv5_iwb_irq_disable, + .irq_eoi = irq_chip_eoi_parent, + .irq_set_type = gicv5_iwb_set_type, + .irq_write_msi_msg = gicv5_iwb_write_msi_msg, + .irq_set_affinity = irq_chip_set_affinity_parent, + .irq_get_irqchip_state = irq_chip_get_parent_state, + .irq_set_irqchip_state = irq_chip_set_parent_state, + .flags = IRQCHIP_SET_TYPE_MASKED | + IRQCHIP_SKIP_SET_WAKE | + IRQCHIP_MASK_ON_SUSPEND, + }, + + .ops = { + .set_desc = gicv5_iwb_domain_set_desc, + .msi_translate = gicv5_iwb_irq_domain_translate, + }, + + .info = { + .bus_token = DOMAIN_BUS_WIRED_TO_MSI, + .flags = MSI_FLAG_USE_DEV_FWNODE, + }, + + .alloc_info = { + .flags = MSI_ALLOC_FLAGS_FIXED_MSG_DATA, + }, +}; + +static bool gicv5_iwb_create_device_domain(struct device *dev, unsigned int size, + struct gicv5_iwb_chip_data *iwb_node) +{ + if (WARN_ON_ONCE(!dev->msi.domain)) + return false; + + return msi_create_device_irq_domain(dev, MSI_DEFAULT_DOMAIN, + &iwb_msi_template, size, + NULL, iwb_node); +} + +static struct gicv5_iwb_chip_data * +gicv5_iwb_init_bases(void __iomem *iwb_base, struct platform_device *pdev) +{ + u32 nr_wires, idr0, cr0; + unsigned int n; + int ret; + + struct gicv5_iwb_chip_data *iwb_node __free(kfree) = kzalloc(sizeof(*iwb_node), + GFP_KERNEL); + if (!iwb_node) + return ERR_PTR(-ENOMEM); + + iwb_node->iwb_base = iwb_base; + + idr0 = iwb_readl_relaxed(iwb_node, GICV5_IWB_IDR0); + nr_wires = (FIELD_GET(GICV5_IWB_IDR0_IW_RANGE, idr0) + 1) * 32; + + cr0 = iwb_readl_relaxed(iwb_node, GICV5_IWB_CR0); + if (!FIELD_GET(GICV5_IWB_CR0_IWBEN, cr0)) { + dev_err(&pdev->dev, "IWB must be enabled in firmware\n"); + return ERR_PTR(-EINVAL); + } + + iwb_node->nr_regs = FIELD_GET(GICV5_IWB_IDR0_IW_RANGE, idr0) + 1; + + for (n = 0; n < iwb_node->nr_regs; n++) + iwb_writel_relaxed(iwb_node, 0, GICV5_IWB_WENABLER + (sizeof(u32) * n)); + + ret = gicv5_iwb_wait_for_wenabler(iwb_node); + if (ret) + return ERR_PTR(ret); + + if (!gicv5_iwb_create_device_domain(&pdev->dev, nr_wires, iwb_node)) + return ERR_PTR(-ENOMEM); + + return_ptr(iwb_node); +} + +static int gicv5_iwb_device_probe(struct platform_device *pdev) +{ + struct gicv5_iwb_chip_data *iwb_node; + void __iomem *iwb_base; + struct resource *res; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) + return -EINVAL; + + iwb_base = devm_ioremap(&pdev->dev, res->start, resource_size(res)); + if (!iwb_base) { + dev_err(&pdev->dev, "failed to ioremap %pR\n", res); + return -ENOMEM; + } + + iwb_node = gicv5_iwb_init_bases(iwb_base, pdev); + if (IS_ERR(iwb_node)) + return PTR_ERR(iwb_node); + + return 0; +} + +static const struct of_device_id gicv5_iwb_of_match[] = { + { .compatible = "arm,gic-v5-iwb" }, + { /* END */ } +}; +MODULE_DEVICE_TABLE(of, gicv5_iwb_of_match); + +static struct platform_driver gicv5_iwb_platform_driver = { + .driver = { + .name = "GICv5 IWB", + .of_match_table = gicv5_iwb_of_match, + .suppress_bind_attrs = true, + }, + .probe = gicv5_iwb_device_probe, +}; + +module_platform_driver(gicv5_iwb_platform_driver); diff --git a/drivers/irqchip/irq-gic-v5.c b/drivers/irqchip/irq-gic-v5.c new file mode 100644 index 000000000000..41ef286c4d78 --- /dev/null +++ b/drivers/irqchip/irq-gic-v5.c @@ -0,0 +1,1130 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2024-2025 ARM Limited, All Rights Reserved. + */ + +#define pr_fmt(fmt) "GICv5: " fmt + +#include <linux/cpuhotplug.h> +#include <linux/idr.h> +#include <linux/irqdomain.h> +#include <linux/slab.h> +#include <linux/wordpart.h> + +#include <linux/irqchip.h> +#include <linux/irqchip/arm-gic-v5.h> +#include <linux/irqchip/arm-vgic-info.h> + +#include <asm/cpufeature.h> +#include <asm/exception.h> + +static u8 pri_bits __ro_after_init = 5; + +#define GICV5_IRQ_PRI_MASK 0x1f +#define GICV5_IRQ_PRI_MI (GICV5_IRQ_PRI_MASK & GENMASK(4, 5 - pri_bits)) + +#define PPI_NR 128 + +static bool gicv5_cpuif_has_gcie(void) +{ + return this_cpu_has_cap(ARM64_HAS_GICV5_CPUIF); +} + +struct gicv5_chip_data gicv5_global_data __read_mostly; + +static DEFINE_IDA(lpi_ida); +static u32 num_lpis __ro_after_init; + +void __init gicv5_init_lpis(u32 lpis) +{ + num_lpis = lpis; +} + +void __init gicv5_deinit_lpis(void) +{ + num_lpis = 0; +} + +static int alloc_lpi(void) +{ + if (!num_lpis) + return -ENOSPC; + + return ida_alloc_max(&lpi_ida, num_lpis - 1, GFP_KERNEL); +} + +static void release_lpi(u32 lpi) +{ + ida_free(&lpi_ida, lpi); +} + +int gicv5_alloc_lpi(void) +{ + return alloc_lpi(); +} + +void gicv5_free_lpi(u32 lpi) +{ + release_lpi(lpi); +} + +static void gicv5_ppi_priority_init(void) +{ + write_sysreg_s(REPEAT_BYTE(GICV5_IRQ_PRI_MI), SYS_ICC_PPI_PRIORITYR0_EL1); + write_sysreg_s(REPEAT_BYTE(GICV5_IRQ_PRI_MI), SYS_ICC_PPI_PRIORITYR1_EL1); + write_sysreg_s(REPEAT_BYTE(GICV5_IRQ_PRI_MI), SYS_ICC_PPI_PRIORITYR2_EL1); + write_sysreg_s(REPEAT_BYTE(GICV5_IRQ_PRI_MI), SYS_ICC_PPI_PRIORITYR3_EL1); + write_sysreg_s(REPEAT_BYTE(GICV5_IRQ_PRI_MI), SYS_ICC_PPI_PRIORITYR4_EL1); + write_sysreg_s(REPEAT_BYTE(GICV5_IRQ_PRI_MI), SYS_ICC_PPI_PRIORITYR5_EL1); + write_sysreg_s(REPEAT_BYTE(GICV5_IRQ_PRI_MI), SYS_ICC_PPI_PRIORITYR6_EL1); + write_sysreg_s(REPEAT_BYTE(GICV5_IRQ_PRI_MI), SYS_ICC_PPI_PRIORITYR7_EL1); + write_sysreg_s(REPEAT_BYTE(GICV5_IRQ_PRI_MI), SYS_ICC_PPI_PRIORITYR8_EL1); + write_sysreg_s(REPEAT_BYTE(GICV5_IRQ_PRI_MI), SYS_ICC_PPI_PRIORITYR9_EL1); + write_sysreg_s(REPEAT_BYTE(GICV5_IRQ_PRI_MI), SYS_ICC_PPI_PRIORITYR10_EL1); + write_sysreg_s(REPEAT_BYTE(GICV5_IRQ_PRI_MI), SYS_ICC_PPI_PRIORITYR11_EL1); + write_sysreg_s(REPEAT_BYTE(GICV5_IRQ_PRI_MI), SYS_ICC_PPI_PRIORITYR12_EL1); + write_sysreg_s(REPEAT_BYTE(GICV5_IRQ_PRI_MI), SYS_ICC_PPI_PRIORITYR13_EL1); + write_sysreg_s(REPEAT_BYTE(GICV5_IRQ_PRI_MI), SYS_ICC_PPI_PRIORITYR14_EL1); + write_sysreg_s(REPEAT_BYTE(GICV5_IRQ_PRI_MI), SYS_ICC_PPI_PRIORITYR15_EL1); + + /* + * Context syncronization required to make sure system register writes + * effects are synchronised. + */ + isb(); +} + +static void gicv5_hwirq_init(irq_hw_number_t hwirq, u8 priority, u8 hwirq_type) +{ + u64 cdpri, cdaff; + u16 iaffid; + int ret; + + if (hwirq_type == GICV5_HWIRQ_TYPE_LPI || hwirq_type == GICV5_HWIRQ_TYPE_SPI) { + cdpri = FIELD_PREP(GICV5_GIC_CDPRI_PRIORITY_MASK, priority) | + FIELD_PREP(GICV5_GIC_CDPRI_TYPE_MASK, hwirq_type) | + FIELD_PREP(GICV5_GIC_CDPRI_ID_MASK, hwirq); + gic_insn(cdpri, CDPRI); + + ret = gicv5_irs_cpu_to_iaffid(smp_processor_id(), &iaffid); + + if (WARN_ON_ONCE(ret)) + return; + + cdaff = FIELD_PREP(GICV5_GIC_CDAFF_IAFFID_MASK, iaffid) | + FIELD_PREP(GICV5_GIC_CDAFF_TYPE_MASK, hwirq_type) | + FIELD_PREP(GICV5_GIC_CDAFF_ID_MASK, hwirq); + gic_insn(cdaff, CDAFF); + } +} + +static void gicv5_ppi_irq_mask(struct irq_data *d) +{ + u64 hwirq_id_bit = BIT_ULL(d->hwirq % 64); + + if (d->hwirq < 64) + sysreg_clear_set_s(SYS_ICC_PPI_ENABLER0_EL1, hwirq_id_bit, 0); + else + sysreg_clear_set_s(SYS_ICC_PPI_ENABLER1_EL1, hwirq_id_bit, 0); + + /* + * We must ensure that the disable takes effect immediately to + * guarantee that the lazy-disabled IRQ mechanism works. + * A context synchronization event is required to guarantee it. + * Reference: I_ZLTKB/R_YRGMH GICv5 specification - section 2.9.1. + */ + isb(); +} + +static void gicv5_iri_irq_mask(struct irq_data *d, u8 hwirq_type) +{ + u64 cddis; + + cddis = FIELD_PREP(GICV5_GIC_CDDIS_ID_MASK, d->hwirq) | + FIELD_PREP(GICV5_GIC_CDDIS_TYPE_MASK, hwirq_type); + + gic_insn(cddis, CDDIS); + /* + * We must make sure that GIC CDDIS write effects are propagated + * immediately to make sure the disable takes effect to guarantee + * that the lazy-disabled IRQ mechanism works. + * Rule R_XCLJC states that the effects of a GIC system instruction + * complete in finite time. + * The GSB ensures completion of the GIC instruction and prevents + * loads, stores and GIC instructions from executing part of their + * functionality before the GSB SYS. + */ + gsb_sys(); +} + +static void gicv5_spi_irq_mask(struct irq_data *d) +{ + gicv5_iri_irq_mask(d, GICV5_HWIRQ_TYPE_SPI); +} + +static void gicv5_lpi_irq_mask(struct irq_data *d) +{ + gicv5_iri_irq_mask(d, GICV5_HWIRQ_TYPE_LPI); +} + +static void gicv5_ppi_irq_unmask(struct irq_data *d) +{ + u64 hwirq_id_bit = BIT_ULL(d->hwirq % 64); + + if (d->hwirq < 64) + sysreg_clear_set_s(SYS_ICC_PPI_ENABLER0_EL1, 0, hwirq_id_bit); + else + sysreg_clear_set_s(SYS_ICC_PPI_ENABLER1_EL1, 0, hwirq_id_bit); + /* + * We must ensure that the enable takes effect in finite time - a + * context synchronization event is required to guarantee it, we + * can not take for granted that would happen (eg a core going straight + * into idle after enabling a PPI). + * Reference: I_ZLTKB/R_YRGMH GICv5 specification - section 2.9.1. + */ + isb(); +} + +static void gicv5_iri_irq_unmask(struct irq_data *d, u8 hwirq_type) +{ + u64 cden; + + cden = FIELD_PREP(GICV5_GIC_CDEN_ID_MASK, d->hwirq) | + FIELD_PREP(GICV5_GIC_CDEN_TYPE_MASK, hwirq_type); + /* + * Rule R_XCLJC states that the effects of a GIC system instruction + * complete in finite time and that's the only requirement when + * unmasking an SPI/LPI IRQ. + */ + gic_insn(cden, CDEN); +} + +static void gicv5_spi_irq_unmask(struct irq_data *d) +{ + gicv5_iri_irq_unmask(d, GICV5_HWIRQ_TYPE_SPI); +} + +static void gicv5_lpi_irq_unmask(struct irq_data *d) +{ + gicv5_iri_irq_unmask(d, GICV5_HWIRQ_TYPE_LPI); +} + +static void gicv5_hwirq_eoi(u32 hwirq_id, u8 hwirq_type) +{ + u64 cddi; + + cddi = FIELD_PREP(GICV5_GIC_CDDI_ID_MASK, hwirq_id) | + FIELD_PREP(GICV5_GIC_CDDI_TYPE_MASK, hwirq_type); + + gic_insn(cddi, CDDI); + + gic_insn(0, CDEOI); +} + +static void gicv5_ppi_irq_eoi(struct irq_data *d) +{ + /* Skip deactivate for forwarded PPI interrupts */ + if (irqd_is_forwarded_to_vcpu(d)) { + gic_insn(0, CDEOI); + return; + } + + gicv5_hwirq_eoi(d->hwirq, GICV5_HWIRQ_TYPE_PPI); +} + +static void gicv5_spi_irq_eoi(struct irq_data *d) +{ + gicv5_hwirq_eoi(d->hwirq, GICV5_HWIRQ_TYPE_SPI); +} + +static void gicv5_lpi_irq_eoi(struct irq_data *d) +{ + gicv5_hwirq_eoi(d->hwirq, GICV5_HWIRQ_TYPE_LPI); +} + +static int gicv5_iri_irq_set_affinity(struct irq_data *d, + const struct cpumask *mask_val, + bool force, u8 hwirq_type) +{ + int ret, cpuid; + u16 iaffid; + u64 cdaff; + + if (force) + cpuid = cpumask_first(mask_val); + else + cpuid = cpumask_any_and(mask_val, cpu_online_mask); + + ret = gicv5_irs_cpu_to_iaffid(cpuid, &iaffid); + if (ret) + return ret; + + cdaff = FIELD_PREP(GICV5_GIC_CDAFF_IAFFID_MASK, iaffid) | + FIELD_PREP(GICV5_GIC_CDAFF_TYPE_MASK, hwirq_type) | + FIELD_PREP(GICV5_GIC_CDAFF_ID_MASK, d->hwirq); + gic_insn(cdaff, CDAFF); + + irq_data_update_effective_affinity(d, cpumask_of(cpuid)); + + return IRQ_SET_MASK_OK_DONE; +} + +static int gicv5_spi_irq_set_affinity(struct irq_data *d, + const struct cpumask *mask_val, + bool force) +{ + return gicv5_iri_irq_set_affinity(d, mask_val, force, + GICV5_HWIRQ_TYPE_SPI); +} + +static int gicv5_lpi_irq_set_affinity(struct irq_data *d, + const struct cpumask *mask_val, + bool force) +{ + return gicv5_iri_irq_set_affinity(d, mask_val, force, + GICV5_HWIRQ_TYPE_LPI); +} + +enum ppi_reg { + PPI_PENDING, + PPI_ACTIVE, + PPI_HM +}; + +static __always_inline u64 read_ppi_sysreg_s(unsigned int irq, + const enum ppi_reg which) +{ + switch (which) { + case PPI_PENDING: + return irq < 64 ? read_sysreg_s(SYS_ICC_PPI_SPENDR0_EL1) : + read_sysreg_s(SYS_ICC_PPI_SPENDR1_EL1); + case PPI_ACTIVE: + return irq < 64 ? read_sysreg_s(SYS_ICC_PPI_SACTIVER0_EL1) : + read_sysreg_s(SYS_ICC_PPI_SACTIVER1_EL1); + case PPI_HM: + return irq < 64 ? read_sysreg_s(SYS_ICC_PPI_HMR0_EL1) : + read_sysreg_s(SYS_ICC_PPI_HMR1_EL1); + default: + BUILD_BUG_ON(1); + } +} + +static __always_inline void write_ppi_sysreg_s(unsigned int irq, bool set, + const enum ppi_reg which) +{ + u64 bit = BIT_ULL(irq % 64); + + switch (which) { + case PPI_PENDING: + if (set) { + if (irq < 64) + write_sysreg_s(bit, SYS_ICC_PPI_SPENDR0_EL1); + else + write_sysreg_s(bit, SYS_ICC_PPI_SPENDR1_EL1); + } else { + if (irq < 64) + write_sysreg_s(bit, SYS_ICC_PPI_CPENDR0_EL1); + else + write_sysreg_s(bit, SYS_ICC_PPI_CPENDR1_EL1); + } + return; + case PPI_ACTIVE: + if (set) { + if (irq < 64) + write_sysreg_s(bit, SYS_ICC_PPI_SACTIVER0_EL1); + else + write_sysreg_s(bit, SYS_ICC_PPI_SACTIVER1_EL1); + } else { + if (irq < 64) + write_sysreg_s(bit, SYS_ICC_PPI_CACTIVER0_EL1); + else + write_sysreg_s(bit, SYS_ICC_PPI_CACTIVER1_EL1); + } + return; + default: + BUILD_BUG_ON(1); + } +} + +static int gicv5_ppi_irq_get_irqchip_state(struct irq_data *d, + enum irqchip_irq_state which, + bool *state) +{ + u64 hwirq_id_bit = BIT_ULL(d->hwirq % 64); + + switch (which) { + case IRQCHIP_STATE_PENDING: + *state = !!(read_ppi_sysreg_s(d->hwirq, PPI_PENDING) & hwirq_id_bit); + return 0; + case IRQCHIP_STATE_ACTIVE: + *state = !!(read_ppi_sysreg_s(d->hwirq, PPI_ACTIVE) & hwirq_id_bit); + return 0; + default: + pr_debug("Unexpected PPI irqchip state\n"); + return -EINVAL; + } +} + +static int gicv5_iri_irq_get_irqchip_state(struct irq_data *d, + enum irqchip_irq_state which, + bool *state, u8 hwirq_type) +{ + u64 icsr, cdrcfg; + + cdrcfg = d->hwirq | FIELD_PREP(GICV5_GIC_CDRCFG_TYPE_MASK, hwirq_type); + + gic_insn(cdrcfg, CDRCFG); + isb(); + icsr = read_sysreg_s(SYS_ICC_ICSR_EL1); + + if (FIELD_GET(ICC_ICSR_EL1_F, icsr)) { + pr_err("ICSR_EL1 is invalid\n"); + return -EINVAL; + } + + switch (which) { + case IRQCHIP_STATE_PENDING: + *state = !!(FIELD_GET(ICC_ICSR_EL1_Pending, icsr)); + return 0; + + case IRQCHIP_STATE_ACTIVE: + *state = !!(FIELD_GET(ICC_ICSR_EL1_Active, icsr)); + return 0; + + default: + pr_debug("Unexpected irqchip_irq_state\n"); + return -EINVAL; + } +} + +static int gicv5_spi_irq_get_irqchip_state(struct irq_data *d, + enum irqchip_irq_state which, + bool *state) +{ + return gicv5_iri_irq_get_irqchip_state(d, which, state, + GICV5_HWIRQ_TYPE_SPI); +} + +static int gicv5_lpi_irq_get_irqchip_state(struct irq_data *d, + enum irqchip_irq_state which, + bool *state) +{ + return gicv5_iri_irq_get_irqchip_state(d, which, state, + GICV5_HWIRQ_TYPE_LPI); +} + +static int gicv5_ppi_irq_set_irqchip_state(struct irq_data *d, + enum irqchip_irq_state which, + bool state) +{ + switch (which) { + case IRQCHIP_STATE_PENDING: + write_ppi_sysreg_s(d->hwirq, state, PPI_PENDING); + return 0; + case IRQCHIP_STATE_ACTIVE: + write_ppi_sysreg_s(d->hwirq, state, PPI_ACTIVE); + return 0; + default: + pr_debug("Unexpected PPI irqchip state\n"); + return -EINVAL; + } +} + +static void gicv5_iri_irq_write_pending_state(struct irq_data *d, bool state, + u8 hwirq_type) +{ + u64 cdpend; + + cdpend = FIELD_PREP(GICV5_GIC_CDPEND_TYPE_MASK, hwirq_type) | + FIELD_PREP(GICV5_GIC_CDPEND_ID_MASK, d->hwirq) | + FIELD_PREP(GICV5_GIC_CDPEND_PENDING_MASK, state); + + gic_insn(cdpend, CDPEND); +} + +static void gicv5_spi_irq_write_pending_state(struct irq_data *d, bool state) +{ + gicv5_iri_irq_write_pending_state(d, state, GICV5_HWIRQ_TYPE_SPI); +} + +static void gicv5_lpi_irq_write_pending_state(struct irq_data *d, bool state) +{ + gicv5_iri_irq_write_pending_state(d, state, GICV5_HWIRQ_TYPE_LPI); +} + +static int gicv5_spi_irq_set_irqchip_state(struct irq_data *d, + enum irqchip_irq_state which, + bool state) +{ + switch (which) { + case IRQCHIP_STATE_PENDING: + gicv5_spi_irq_write_pending_state(d, state); + break; + default: + pr_debug("Unexpected irqchip_irq_state\n"); + return -EINVAL; + } + + return 0; +} + +static int gicv5_lpi_irq_set_irqchip_state(struct irq_data *d, + enum irqchip_irq_state which, + bool state) +{ + switch (which) { + case IRQCHIP_STATE_PENDING: + gicv5_lpi_irq_write_pending_state(d, state); + break; + + default: + pr_debug("Unexpected irqchip_irq_state\n"); + return -EINVAL; + } + + return 0; +} + +static int gicv5_spi_irq_retrigger(struct irq_data *data) +{ + return !gicv5_spi_irq_set_irqchip_state(data, IRQCHIP_STATE_PENDING, + true); +} + +static int gicv5_lpi_irq_retrigger(struct irq_data *data) +{ + return !gicv5_lpi_irq_set_irqchip_state(data, IRQCHIP_STATE_PENDING, + true); +} + +static void gicv5_ipi_send_single(struct irq_data *d, unsigned int cpu) +{ + /* Mark the LPI pending */ + irq_chip_retrigger_hierarchy(d); +} + +static bool gicv5_ppi_irq_is_level(irq_hw_number_t hwirq) +{ + u64 bit = BIT_ULL(hwirq % 64); + + return !!(read_ppi_sysreg_s(hwirq, PPI_HM) & bit); +} + +static int gicv5_ppi_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu) +{ + if (vcpu) + irqd_set_forwarded_to_vcpu(d); + else + irqd_clr_forwarded_to_vcpu(d); + + return 0; +} + +static const struct irq_chip gicv5_ppi_irq_chip = { + .name = "GICv5-PPI", + .irq_mask = gicv5_ppi_irq_mask, + .irq_unmask = gicv5_ppi_irq_unmask, + .irq_eoi = gicv5_ppi_irq_eoi, + .irq_get_irqchip_state = gicv5_ppi_irq_get_irqchip_state, + .irq_set_irqchip_state = gicv5_ppi_irq_set_irqchip_state, + .irq_set_vcpu_affinity = gicv5_ppi_irq_set_vcpu_affinity, + .flags = IRQCHIP_SKIP_SET_WAKE | + IRQCHIP_MASK_ON_SUSPEND, +}; + +static const struct irq_chip gicv5_spi_irq_chip = { + .name = "GICv5-SPI", + .irq_mask = gicv5_spi_irq_mask, + .irq_unmask = gicv5_spi_irq_unmask, + .irq_eoi = gicv5_spi_irq_eoi, + .irq_set_type = gicv5_spi_irq_set_type, + .irq_set_affinity = gicv5_spi_irq_set_affinity, + .irq_retrigger = gicv5_spi_irq_retrigger, + .irq_get_irqchip_state = gicv5_spi_irq_get_irqchip_state, + .irq_set_irqchip_state = gicv5_spi_irq_set_irqchip_state, + .flags = IRQCHIP_SET_TYPE_MASKED | + IRQCHIP_SKIP_SET_WAKE | + IRQCHIP_MASK_ON_SUSPEND, +}; + +static const struct irq_chip gicv5_lpi_irq_chip = { + .name = "GICv5-LPI", + .irq_mask = gicv5_lpi_irq_mask, + .irq_unmask = gicv5_lpi_irq_unmask, + .irq_eoi = gicv5_lpi_irq_eoi, + .irq_set_affinity = gicv5_lpi_irq_set_affinity, + .irq_retrigger = gicv5_lpi_irq_retrigger, + .irq_get_irqchip_state = gicv5_lpi_irq_get_irqchip_state, + .irq_set_irqchip_state = gicv5_lpi_irq_set_irqchip_state, + .flags = IRQCHIP_SKIP_SET_WAKE | + IRQCHIP_MASK_ON_SUSPEND, +}; + +static const struct irq_chip gicv5_ipi_irq_chip = { + .name = "GICv5-IPI", + .irq_mask = irq_chip_mask_parent, + .irq_unmask = irq_chip_unmask_parent, + .irq_eoi = irq_chip_eoi_parent, + .irq_set_affinity = irq_chip_set_affinity_parent, + .irq_get_irqchip_state = irq_chip_get_parent_state, + .irq_set_irqchip_state = irq_chip_set_parent_state, + .ipi_send_single = gicv5_ipi_send_single, + .flags = IRQCHIP_SKIP_SET_WAKE | + IRQCHIP_MASK_ON_SUSPEND, +}; + +static __always_inline int gicv5_irq_domain_translate(struct irq_domain *d, + struct irq_fwspec *fwspec, + irq_hw_number_t *hwirq, + unsigned int *type, + const u8 hwirq_type) +{ + if (!is_of_node(fwspec->fwnode)) + return -EINVAL; + + if (fwspec->param_count < 3) + return -EINVAL; + + if (fwspec->param[0] != hwirq_type) + return -EINVAL; + + *hwirq = fwspec->param[1]; + + switch (hwirq_type) { + case GICV5_HWIRQ_TYPE_PPI: + /* + * Handling mode is hardcoded for PPIs, set the type using + * HW reported value. + */ + *type = gicv5_ppi_irq_is_level(*hwirq) ? IRQ_TYPE_LEVEL_LOW : + IRQ_TYPE_EDGE_RISING; + break; + case GICV5_HWIRQ_TYPE_SPI: + *type = fwspec->param[2] & IRQ_TYPE_SENSE_MASK; + break; + default: + BUILD_BUG_ON(1); + } + + return 0; +} + +static int gicv5_irq_ppi_domain_translate(struct irq_domain *d, + struct irq_fwspec *fwspec, + irq_hw_number_t *hwirq, + unsigned int *type) +{ + return gicv5_irq_domain_translate(d, fwspec, hwirq, type, + GICV5_HWIRQ_TYPE_PPI); +} + +static int gicv5_irq_ppi_domain_alloc(struct irq_domain *domain, unsigned int virq, + unsigned int nr_irqs, void *arg) +{ + unsigned int type = IRQ_TYPE_NONE; + struct irq_fwspec *fwspec = arg; + irq_hw_number_t hwirq; + int ret; + + if (WARN_ON_ONCE(nr_irqs != 1)) + return -EINVAL; + + ret = gicv5_irq_ppi_domain_translate(domain, fwspec, &hwirq, &type); + if (ret) + return ret; + + if (type & IRQ_TYPE_LEVEL_MASK) + irq_set_status_flags(virq, IRQ_LEVEL); + + irq_set_percpu_devid(virq); + irq_domain_set_info(domain, virq, hwirq, &gicv5_ppi_irq_chip, NULL, + handle_percpu_devid_irq, NULL, NULL); + + return 0; +} + +static void gicv5_irq_domain_free(struct irq_domain *domain, unsigned int virq, + unsigned int nr_irqs) +{ + struct irq_data *d; + + if (WARN_ON_ONCE(nr_irqs != 1)) + return; + + d = irq_domain_get_irq_data(domain, virq); + + irq_set_handler(virq, NULL); + irq_domain_reset_irq_data(d); +} + +static int gicv5_irq_ppi_domain_select(struct irq_domain *d, struct irq_fwspec *fwspec, + enum irq_domain_bus_token bus_token) +{ + if (fwspec->fwnode != d->fwnode) + return 0; + + if (fwspec->param[0] != GICV5_HWIRQ_TYPE_PPI) + return 0; + + return (d == gicv5_global_data.ppi_domain); +} + +static const struct irq_domain_ops gicv5_irq_ppi_domain_ops = { + .translate = gicv5_irq_ppi_domain_translate, + .alloc = gicv5_irq_ppi_domain_alloc, + .free = gicv5_irq_domain_free, + .select = gicv5_irq_ppi_domain_select +}; + +static int gicv5_irq_spi_domain_translate(struct irq_domain *d, + struct irq_fwspec *fwspec, + irq_hw_number_t *hwirq, + unsigned int *type) +{ + return gicv5_irq_domain_translate(d, fwspec, hwirq, type, + GICV5_HWIRQ_TYPE_SPI); +} + +static int gicv5_irq_spi_domain_alloc(struct irq_domain *domain, unsigned int virq, + unsigned int nr_irqs, void *arg) +{ + struct gicv5_irs_chip_data *chip_data; + unsigned int type = IRQ_TYPE_NONE; + struct irq_fwspec *fwspec = arg; + struct irq_data *irqd; + irq_hw_number_t hwirq; + int ret; + + if (WARN_ON_ONCE(nr_irqs != 1)) + return -EINVAL; + + ret = gicv5_irq_spi_domain_translate(domain, fwspec, &hwirq, &type); + if (ret) + return ret; + + irqd = irq_desc_get_irq_data(irq_to_desc(virq)); + chip_data = gicv5_irs_lookup_by_spi_id(hwirq); + + irq_domain_set_info(domain, virq, hwirq, &gicv5_spi_irq_chip, chip_data, + handle_fasteoi_irq, NULL, NULL); + irq_set_probe(virq); + irqd_set_single_target(irqd); + + gicv5_hwirq_init(hwirq, GICV5_IRQ_PRI_MI, GICV5_HWIRQ_TYPE_SPI); + + return 0; +} + +static int gicv5_irq_spi_domain_select(struct irq_domain *d, struct irq_fwspec *fwspec, + enum irq_domain_bus_token bus_token) +{ + if (fwspec->fwnode != d->fwnode) + return 0; + + if (fwspec->param[0] != GICV5_HWIRQ_TYPE_SPI) + return 0; + + return (d == gicv5_global_data.spi_domain); +} + +static const struct irq_domain_ops gicv5_irq_spi_domain_ops = { + .translate = gicv5_irq_spi_domain_translate, + .alloc = gicv5_irq_spi_domain_alloc, + .free = gicv5_irq_domain_free, + .select = gicv5_irq_spi_domain_select +}; + +static void gicv5_lpi_config_reset(struct irq_data *d) +{ + u64 cdhm; + + /* + * Reset LPIs handling mode to edge by default and clear pending + * state to make sure we start the LPI with a clean state from + * previous incarnations. + */ + cdhm = FIELD_PREP(GICV5_GIC_CDHM_HM_MASK, 0) | + FIELD_PREP(GICV5_GIC_CDHM_TYPE_MASK, GICV5_HWIRQ_TYPE_LPI) | + FIELD_PREP(GICV5_GIC_CDHM_ID_MASK, d->hwirq); + gic_insn(cdhm, CDHM); + + gicv5_lpi_irq_write_pending_state(d, false); +} + +static int gicv5_irq_lpi_domain_alloc(struct irq_domain *domain, unsigned int virq, + unsigned int nr_irqs, void *arg) +{ + irq_hw_number_t hwirq; + struct irq_data *irqd; + u32 *lpi = arg; + int ret; + + if (WARN_ON_ONCE(nr_irqs != 1)) + return -EINVAL; + + hwirq = *lpi; + + irqd = irq_domain_get_irq_data(domain, virq); + + irq_domain_set_info(domain, virq, hwirq, &gicv5_lpi_irq_chip, NULL, + handle_fasteoi_irq, NULL, NULL); + irqd_set_single_target(irqd); + + ret = gicv5_irs_iste_alloc(hwirq); + if (ret < 0) + return ret; + + gicv5_hwirq_init(hwirq, GICV5_IRQ_PRI_MI, GICV5_HWIRQ_TYPE_LPI); + gicv5_lpi_config_reset(irqd); + + return 0; +} + +static const struct irq_domain_ops gicv5_irq_lpi_domain_ops = { + .alloc = gicv5_irq_lpi_domain_alloc, + .free = gicv5_irq_domain_free, +}; + +void __init gicv5_init_lpi_domain(void) +{ + struct irq_domain *d; + + d = irq_domain_create_tree(NULL, &gicv5_irq_lpi_domain_ops, NULL); + gicv5_global_data.lpi_domain = d; +} + +void __init gicv5_free_lpi_domain(void) +{ + irq_domain_remove(gicv5_global_data.lpi_domain); + gicv5_global_data.lpi_domain = NULL; +} + +static int gicv5_irq_ipi_domain_alloc(struct irq_domain *domain, unsigned int virq, + unsigned int nr_irqs, void *arg) +{ + struct irq_data *irqd; + int ret, i; + u32 lpi; + + for (i = 0; i < nr_irqs; i++) { + ret = gicv5_alloc_lpi(); + if (ret < 0) + return ret; + + lpi = ret; + + ret = irq_domain_alloc_irqs_parent(domain, virq + i, 1, &lpi); + if (ret) { + gicv5_free_lpi(lpi); + return ret; + } + + irqd = irq_domain_get_irq_data(domain, virq + i); + + irq_domain_set_hwirq_and_chip(domain, virq + i, i, + &gicv5_ipi_irq_chip, NULL); + + irqd_set_single_target(irqd); + + irq_set_handler(virq + i, handle_percpu_irq); + } + + return 0; +} + +static void gicv5_irq_ipi_domain_free(struct irq_domain *domain, unsigned int virq, + unsigned int nr_irqs) +{ + struct irq_data *d; + unsigned int i; + + for (i = 0; i < nr_irqs; i++) { + d = irq_domain_get_irq_data(domain, virq + i); + + if (!d) + return; + + gicv5_free_lpi(d->parent_data->hwirq); + + irq_set_handler(virq + i, NULL); + irq_domain_reset_irq_data(d); + irq_domain_free_irqs_parent(domain, virq + i, 1); + } +} + +static const struct irq_domain_ops gicv5_irq_ipi_domain_ops = { + .alloc = gicv5_irq_ipi_domain_alloc, + .free = gicv5_irq_ipi_domain_free, +}; + +static void handle_irq_per_domain(u32 hwirq) +{ + u8 hwirq_type = FIELD_GET(GICV5_HWIRQ_TYPE, hwirq); + u32 hwirq_id = FIELD_GET(GICV5_HWIRQ_ID, hwirq); + struct irq_domain *domain; + + switch (hwirq_type) { + case GICV5_HWIRQ_TYPE_PPI: + domain = gicv5_global_data.ppi_domain; + break; + case GICV5_HWIRQ_TYPE_SPI: + domain = gicv5_global_data.spi_domain; + break; + case GICV5_HWIRQ_TYPE_LPI: + domain = gicv5_global_data.lpi_domain; + break; + default: + pr_err_once("Unknown IRQ type, bail out\n"); + return; + } + + if (generic_handle_domain_irq(domain, hwirq_id)) { + pr_err_once("Could not handle, hwirq = 0x%x", hwirq_id); + gicv5_hwirq_eoi(hwirq_id, hwirq_type); + } +} + +static void __exception_irq_entry gicv5_handle_irq(struct pt_regs *regs) +{ + bool valid; + u32 hwirq; + u64 ia; + + ia = gicr_insn(CDIA); + valid = GICV5_GICR_CDIA_VALID(ia); + + if (!valid) + return; + + /* + * Ensure that the CDIA instruction effects (ie IRQ activation) are + * completed before handling the interrupt. + */ + gsb_ack(); + + /* + * Ensure instruction ordering between an acknowledgment and subsequent + * instructions in the IRQ handler using an ISB. + */ + isb(); + + hwirq = FIELD_GET(GICV5_HWIRQ_INTID, ia); + + handle_irq_per_domain(hwirq); +} + +static void gicv5_cpu_disable_interrupts(void) +{ + u64 cr0; + + cr0 = FIELD_PREP(ICC_CR0_EL1_EN, 0); + write_sysreg_s(cr0, SYS_ICC_CR0_EL1); +} + +static void gicv5_cpu_enable_interrupts(void) +{ + u64 cr0, pcr; + + write_sysreg_s(0, SYS_ICC_PPI_ENABLER0_EL1); + write_sysreg_s(0, SYS_ICC_PPI_ENABLER1_EL1); + + gicv5_ppi_priority_init(); + + pcr = FIELD_PREP(ICC_PCR_EL1_PRIORITY, GICV5_IRQ_PRI_MI); + write_sysreg_s(pcr, SYS_ICC_PCR_EL1); + + cr0 = FIELD_PREP(ICC_CR0_EL1_EN, 1); + write_sysreg_s(cr0, SYS_ICC_CR0_EL1); +} + +static int base_ipi_virq; + +static int gicv5_starting_cpu(unsigned int cpu) +{ + if (WARN(!gicv5_cpuif_has_gcie(), + "GICv5 system components present but CPU does not have FEAT_GCIE")) + return -ENODEV; + + gicv5_cpu_enable_interrupts(); + + return gicv5_irs_register_cpu(cpu); +} + +static void __init gicv5_smp_init(void) +{ + unsigned int num_ipis = GICV5_IPIS_PER_CPU * nr_cpu_ids; + + cpuhp_setup_state_nocalls(CPUHP_AP_IRQ_GIC_STARTING, + "irqchip/arm/gicv5:starting", + gicv5_starting_cpu, NULL); + + base_ipi_virq = irq_domain_alloc_irqs(gicv5_global_data.ipi_domain, + num_ipis, NUMA_NO_NODE, NULL); + if (WARN(base_ipi_virq <= 0, "IPI IRQ allocation was not successful")) + return; + + set_smp_ipi_range_percpu(base_ipi_virq, GICV5_IPIS_PER_CPU, nr_cpu_ids); +} + +static void __init gicv5_free_domains(void) +{ + if (gicv5_global_data.ppi_domain) + irq_domain_remove(gicv5_global_data.ppi_domain); + if (gicv5_global_data.spi_domain) + irq_domain_remove(gicv5_global_data.spi_domain); + if (gicv5_global_data.ipi_domain) + irq_domain_remove(gicv5_global_data.ipi_domain); + + gicv5_global_data.ppi_domain = NULL; + gicv5_global_data.spi_domain = NULL; + gicv5_global_data.ipi_domain = NULL; +} + +static int __init gicv5_init_domains(struct fwnode_handle *handle) +{ + u32 spi_count = gicv5_global_data.global_spi_count; + struct irq_domain *d; + + d = irq_domain_create_linear(handle, PPI_NR, &gicv5_irq_ppi_domain_ops, NULL); + if (!d) + return -ENOMEM; + + irq_domain_update_bus_token(d, DOMAIN_BUS_WIRED); + gicv5_global_data.ppi_domain = d; + + if (spi_count) { + d = irq_domain_create_linear(handle, spi_count, + &gicv5_irq_spi_domain_ops, NULL); + + if (!d) { + gicv5_free_domains(); + return -ENOMEM; + } + + gicv5_global_data.spi_domain = d; + irq_domain_update_bus_token(d, DOMAIN_BUS_WIRED); + } + + if (!WARN(!gicv5_global_data.lpi_domain, + "LPI domain uninitialized, can't set up IPIs")) { + d = irq_domain_create_hierarchy(gicv5_global_data.lpi_domain, + 0, GICV5_IPIS_PER_CPU * nr_cpu_ids, + NULL, &gicv5_irq_ipi_domain_ops, + NULL); + + if (!d) { + gicv5_free_domains(); + return -ENOMEM; + } + gicv5_global_data.ipi_domain = d; + } + gicv5_global_data.fwnode = handle; + + return 0; +} + +static void gicv5_set_cpuif_pribits(void) +{ + u64 icc_idr0 = read_sysreg_s(SYS_ICC_IDR0_EL1); + + switch (FIELD_GET(ICC_IDR0_EL1_PRI_BITS, icc_idr0)) { + case ICC_IDR0_EL1_PRI_BITS_4BITS: + gicv5_global_data.cpuif_pri_bits = 4; + break; + case ICC_IDR0_EL1_PRI_BITS_5BITS: + gicv5_global_data.cpuif_pri_bits = 5; + break; + default: + pr_err("Unexpected ICC_IDR0_EL1_PRI_BITS value, default to 4"); + gicv5_global_data.cpuif_pri_bits = 4; + break; + } +} + +static void gicv5_set_cpuif_idbits(void) +{ + u32 icc_idr0 = read_sysreg_s(SYS_ICC_IDR0_EL1); + + switch (FIELD_GET(ICC_IDR0_EL1_ID_BITS, icc_idr0)) { + case ICC_IDR0_EL1_ID_BITS_16BITS: + gicv5_global_data.cpuif_id_bits = 16; + break; + case ICC_IDR0_EL1_ID_BITS_24BITS: + gicv5_global_data.cpuif_id_bits = 24; + break; + default: + pr_err("Unexpected ICC_IDR0_EL1_ID_BITS value, default to 16"); + gicv5_global_data.cpuif_id_bits = 16; + break; + } +} + +#ifdef CONFIG_KVM +static struct gic_kvm_info gic_v5_kvm_info __initdata; + +static void __init gic_of_setup_kvm_info(struct device_node *node) +{ + gic_v5_kvm_info.type = GIC_V5; + + /* GIC Virtual CPU interface maintenance interrupt */ + gic_v5_kvm_info.no_maint_irq_mask = false; + gic_v5_kvm_info.maint_irq = irq_of_parse_and_map(node, 0); + if (!gic_v5_kvm_info.maint_irq) { + pr_warn("cannot find GICv5 virtual CPU interface maintenance interrupt\n"); + return; + } + + vgic_set_kvm_info(&gic_v5_kvm_info); +} +#else +static inline void __init gic_of_setup_kvm_info(struct device_node *node) +{ +} +#endif // CONFIG_KVM + +static int __init gicv5_of_init(struct device_node *node, struct device_node *parent) +{ + int ret = gicv5_irs_of_probe(node); + if (ret) + return ret; + + ret = gicv5_init_domains(of_fwnode_handle(node)); + if (ret) + goto out_irs; + + gicv5_set_cpuif_pribits(); + gicv5_set_cpuif_idbits(); + + pri_bits = min_not_zero(gicv5_global_data.cpuif_pri_bits, + gicv5_global_data.irs_pri_bits); + + ret = gicv5_starting_cpu(smp_processor_id()); + if (ret) + goto out_dom; + + ret = set_handle_irq(gicv5_handle_irq); + if (ret) + goto out_int; + + ret = gicv5_irs_enable(); + if (ret) + goto out_int; + + gicv5_smp_init(); + + gicv5_irs_its_probe(); + + gic_of_setup_kvm_info(node); + + return 0; + +out_int: + gicv5_cpu_disable_interrupts(); +out_dom: + gicv5_free_domains(); +out_irs: + gicv5_irs_remove(); + + return ret; +} +IRQCHIP_DECLARE(gic_v5, "arm,gic-v5", gicv5_of_init); diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c index ba2a37a27a54..ec70c84e9f91 100644 --- a/drivers/irqchip/irq-gic.c +++ b/drivers/irqchip/irq-gic.c @@ -1,10 +1,7 @@ +// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2002 ARM Limited, All Rights Reserved. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * * Interrupt architecture for the GIC: * * o There is one Interrupt Distributor, which receives interrupts @@ -22,6 +19,7 @@ */ #include <linux/init.h> #include <linux/kernel.h> +#include <linux/kstrtox.h> #include <linux/err.h> #include <linux/module.h> #include <linux/list.h> @@ -37,6 +35,7 @@ #include <linux/irqdomain.h> #include <linux/interrupt.h> #include <linux/percpu.h> +#include <linux/seq_file.h> #include <linux/slab.h> #include <linux/irqchip.h> #include <linux/irqchip/chained_irq.h> @@ -55,7 +54,7 @@ static void gic_check_cpu_features(void) { - WARN_TAINT_ONCE(this_cpu_has_cap(ARM64_HAS_SYSREG_GIC_CPUIF), + WARN_TAINT_ONCE(this_cpu_has_cap(ARM64_HAS_GICV3_CPUIF), TAINT_CPU_OUT_OF_SPEC, "GICv3 system registers enabled, broken firmware!\n"); } @@ -65,11 +64,10 @@ static void gic_check_cpu_features(void) union gic_base { void __iomem *common_base; - void __percpu * __iomem *percpu_base; + void __iomem * __percpu *percpu_base; }; struct gic_chip_data { - struct irq_chip chip; union gic_base dist_base; union gic_base cpu_base; void __iomem *raw_dist_base; @@ -86,9 +84,6 @@ struct gic_chip_data { #endif struct irq_domain *domain; unsigned int gic_irqs; -#ifdef CONFIG_GIC_NON_BANKED - void __iomem *(*get_base)(union gic_base *); -#endif }; #ifdef CONFIG_BL_SWITCHER @@ -113,6 +108,8 @@ static DEFINE_RAW_SPINLOCK(cpu_map_lock); #endif +static DEFINE_STATIC_KEY_FALSE(needs_rmw_access); + /* * The GIC mapping of CPU interfaces does not necessarily match * the logical CPU numbering. Let's use a mapping as returned @@ -125,38 +122,32 @@ static DEFINE_STATIC_KEY_TRUE(supports_deactivate_key); static struct gic_chip_data gic_data[CONFIG_ARM_GIC_MAX_NR] __read_mostly; -static struct gic_kvm_info gic_v2_kvm_info; +static struct gic_kvm_info gic_v2_kvm_info __initdata; + +static DEFINE_PER_CPU(u32, sgi_intid); #ifdef CONFIG_GIC_NON_BANKED -static void __iomem *gic_get_percpu_base(union gic_base *base) -{ - return raw_cpu_read(*base->percpu_base); -} +static DEFINE_STATIC_KEY_FALSE(frankengic_key); -static void __iomem *gic_get_common_base(union gic_base *base) +static void enable_frankengic(void) { - return base->common_base; + static_branch_enable(&frankengic_key); } -static inline void __iomem *gic_data_dist_base(struct gic_chip_data *data) +static inline void __iomem *__get_base(union gic_base *base) { - return data->get_base(&data->dist_base); -} + if (static_branch_unlikely(&frankengic_key)) + return raw_cpu_read(*base->percpu_base); -static inline void __iomem *gic_data_cpu_base(struct gic_chip_data *data) -{ - return data->get_base(&data->cpu_base); + return base->common_base; } -static inline void gic_set_base_accessor(struct gic_chip_data *data, - void __iomem *(*f)(union gic_base *)) -{ - data->get_base = f; -} +#define gic_data_dist_base(d) __get_base(&(d)->dist_base) +#define gic_data_cpu_base(d) __get_base(&(d)->cpu_base) #else #define gic_data_dist_base(d) ((d)->dist_base.common_base) #define gic_data_cpu_base(d) ((d)->cpu_base.common_base) -#define gic_set_base_accessor(d, f) +#define enable_frankengic() do { } while(0) #endif static inline void __iomem *gic_dist_base(struct irq_data *d) @@ -171,11 +162,6 @@ static inline void __iomem *gic_cpu_base(struct irq_data *d) return gic_data_cpu_base(gic_data); } -static inline unsigned int gic_irq(struct irq_data *d) -{ - return d->hwirq; -} - static inline bool cascading_gic_irq(struct irq_data *d) { void *data = irq_data_get_irq_handler_data(d); @@ -192,14 +178,16 @@ static inline bool cascading_gic_irq(struct irq_data *d) */ static void gic_poke_irq(struct irq_data *d, u32 offset) { - u32 mask = 1 << (gic_irq(d) % 32); - writel_relaxed(mask, gic_dist_base(d) + offset + (gic_irq(d) / 32) * 4); + u32 mask = 1 << (irqd_to_hwirq(d) % 32); + + writel_relaxed(mask, gic_dist_base(d) + offset + (irqd_to_hwirq(d) / 32) * 4); } static int gic_peek_irq(struct irq_data *d, u32 offset) { - u32 mask = 1 << (gic_irq(d) % 32); - return !!(readl_relaxed(gic_dist_base(d) + offset + (gic_irq(d) / 32) * 4) & mask); + u32 mask = 1 << (irqd_to_hwirq(d) % 32); + + return !!(readl_relaxed(gic_dist_base(d) + offset + (irqd_to_hwirq(d) / 32) * 4) & mask); } static void gic_mask_irq(struct irq_data *d) @@ -229,16 +217,26 @@ static void gic_unmask_irq(struct irq_data *d) static void gic_eoi_irq(struct irq_data *d) { - writel_relaxed(gic_irq(d), gic_cpu_base(d) + GIC_CPU_EOI); + irq_hw_number_t hwirq = irqd_to_hwirq(d); + + if (hwirq < 16) + hwirq = this_cpu_read(sgi_intid); + + writel_relaxed(hwirq, gic_cpu_base(d) + GIC_CPU_EOI); } static void gic_eoimode1_eoi_irq(struct irq_data *d) { + irq_hw_number_t hwirq = irqd_to_hwirq(d); + /* Do not deactivate an IRQ forwarded to a vcpu. */ if (irqd_is_forwarded_to_vcpu(d)) return; - writel_relaxed(gic_irq(d), gic_cpu_base(d) + GIC_CPU_DEACTIVATE); + if (hwirq < 16) + hwirq = this_cpu_read(sgi_intid); + + writel_relaxed(hwirq, gic_cpu_base(d) + GIC_CPU_DEACTIVATE); } static int gic_irq_set_irqchip_state(struct irq_data *d, @@ -292,25 +290,33 @@ static int gic_irq_get_irqchip_state(struct irq_data *d, static int gic_set_type(struct irq_data *d, unsigned int type) { + irq_hw_number_t gicirq = irqd_to_hwirq(d); void __iomem *base = gic_dist_base(d); - unsigned int gicirq = gic_irq(d); + int ret; /* Interrupt configuration for SGIs can't be changed */ if (gicirq < 16) - return -EINVAL; + return type != IRQ_TYPE_EDGE_RISING ? -EINVAL : 0; /* SPIs have restrictions on the supported types */ if (gicirq >= 32 && type != IRQ_TYPE_LEVEL_HIGH && type != IRQ_TYPE_EDGE_RISING) return -EINVAL; - return gic_configure_irq(gicirq, type, base, NULL); + ret = gic_configure_irq(gicirq, type, base + GIC_DIST_CONFIG); + if (ret && gicirq < 32) { + /* Misconfigured PPIs are usually not fatal */ + pr_warn("GIC: PPI%ld is secure or misconfigured\n", gicirq - 16); + ret = 0; + } + + return ret; } static int gic_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu) { /* Only interrupts on the primary GIC can be forwarded to a vcpu. */ - if (cascading_gic_irq(d)) + if (cascading_gic_irq(d) || irqd_to_hwirq(d) < 16) return -EINVAL; if (vcpu) @@ -320,35 +326,10 @@ static int gic_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu) return 0; } -#ifdef CONFIG_SMP -static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val, - bool force) +static int gic_retrigger(struct irq_data *data) { - void __iomem *reg = gic_dist_base(d) + GIC_DIST_TARGET + (gic_irq(d) & ~3); - unsigned int cpu, shift = (gic_irq(d) % 4) * 8; - u32 val, mask, bit; - unsigned long flags; - - if (!force) - cpu = cpumask_any_and(mask_val, cpu_online_mask); - else - cpu = cpumask_first(mask_val); - - if (cpu >= NR_GIC_CPU_IF || cpu >= nr_cpu_ids) - return -EINVAL; - - gic_lock_irqsave(flags); - mask = 0xff << shift; - bit = gic_cpu_map[cpu] << shift; - val = readl_relaxed(reg) & ~mask; - writel_relaxed(val | bit, reg); - gic_unlock_irqrestore(flags); - - irq_data_update_effective_affinity(d, cpumask_of(cpu)); - - return IRQ_SET_MASK_OK_DONE; + return !gic_irq_set_irqchip_state(data, IRQCHIP_STATE_PENDING, true); } -#endif static void __exception_irq_entry gic_handle_irq(struct pt_regs *regs) { @@ -360,31 +341,33 @@ static void __exception_irq_entry gic_handle_irq(struct pt_regs *regs) irqstat = readl_relaxed(cpu_base + GIC_CPU_INTACK); irqnr = irqstat & GICC_IAR_INT_ID_MASK; - if (likely(irqnr > 15 && irqnr < 1020)) { - if (static_branch_likely(&supports_deactivate_key)) - writel_relaxed(irqstat, cpu_base + GIC_CPU_EOI); - isb(); - handle_domain_irq(gic->domain, irqnr, regs); - continue; - } - if (irqnr < 16) { + if (unlikely(irqnr >= 1020)) + break; + + if (static_branch_likely(&supports_deactivate_key)) writel_relaxed(irqstat, cpu_base + GIC_CPU_EOI); - if (static_branch_likely(&supports_deactivate_key)) - writel_relaxed(irqstat, cpu_base + GIC_CPU_DEACTIVATE); -#ifdef CONFIG_SMP + isb(); + + /* + * Ensure any shared data written by the CPU sending the IPI + * is read after we've read the ACK register on the GIC. + * + * Pairs with the write barrier in gic_ipi_send_mask + */ + if (irqnr <= 15) { + smp_rmb(); + /* - * Ensure any shared data written by the CPU sending - * the IPI is read after we've read the ACK register - * on the GIC. - * - * Pairs with the write barrier in gic_raise_softirq + * The GIC encodes the source CPU in GICC_IAR, + * leading to the deactivation to fail if not + * written back as is to GICC_EOI. Stash the INTID + * away for gic_eoi_irq() to write back. This only + * works because we don't nest SGIs... */ - smp_rmb(); - handle_IPI(irqnr, regs); -#endif - continue; + this_cpu_write(sgi_intid, irqstat); } - break; + + generic_handle_domain_irq(gic->domain, irqnr); } while (1); } @@ -392,8 +375,9 @@ static void gic_handle_cascade_irq(struct irq_desc *desc) { struct gic_chip_data *chip_data = irq_desc_get_handler_data(desc); struct irq_chip *chip = irq_desc_get_chip(desc); - unsigned int cascade_irq, gic_irq; + unsigned int gic_irq; unsigned long status; + int ret; chained_irq_enter(chip, desc); @@ -403,29 +387,23 @@ static void gic_handle_cascade_irq(struct irq_desc *desc) if (gic_irq == GICC_INT_SPURIOUS) goto out; - cascade_irq = irq_find_mapping(chip_data->domain, gic_irq); - if (unlikely(gic_irq < 32 || gic_irq > 1020)) { + isb(); + ret = generic_handle_domain_irq(chip_data->domain, gic_irq); + if (unlikely(ret)) handle_bad_irq(desc); - } else { - isb(); - generic_handle_irq(cascade_irq); - } - out: chained_irq_exit(chip, desc); } -static const struct irq_chip gic_chip = { - .irq_mask = gic_mask_irq, - .irq_unmask = gic_unmask_irq, - .irq_eoi = gic_eoi_irq, - .irq_set_type = gic_set_type, - .irq_get_irqchip_state = gic_irq_get_irqchip_state, - .irq_set_irqchip_state = gic_irq_set_irqchip_state, - .flags = IRQCHIP_SET_TYPE_MASKED | - IRQCHIP_SKIP_SET_WAKE | - IRQCHIP_MASK_ON_SUSPEND, -}; +static void gic_irq_print_chip(struct irq_data *d, struct seq_file *p) +{ + struct gic_chip_data *gic = irq_data_get_irq_chip_data(d); + + if (gic->domain->pm_dev) + seq_puts(p, gic->domain->pm_dev->of_node->name); + else + seq_printf(p, "GIC-%d", (int)(gic - &gic_data[0])); +} void __init gic_cascade_irq(unsigned int gic_nr, unsigned int irq) { @@ -501,7 +479,7 @@ static void gic_dist_init(struct gic_chip_data *gic) for (i = 32; i < gic_irqs; i += 4) writel_relaxed(cpumask, base + GIC_DIST_TARGET + i * 4 / 4); - gic_dist_config(base, gic_irqs, NULL); + gic_dist_config(base, gic_irqs, GICD_INT_DEF_PRI); writel_relaxed(GICD_ENABLE, base + GIC_DIST_CTRL); } @@ -538,7 +516,7 @@ static int gic_cpu_init(struct gic_chip_data *gic) gic_cpu_map[i] &= ~cpu_mask; } - gic_cpu_config(dist_base, NULL); + gic_cpu_config(dist_base, 32, GICD_INT_DEF_PRI); writel_relaxed(GICC_INT_PRI_THRESHOLD, base + GIC_CPU_PRIMASK); gic_cpu_if_up(gic); @@ -630,7 +608,7 @@ void gic_dist_restore(struct gic_chip_data *gic) dist_base + GIC_DIST_CONFIG + i * 4); for (i = 0; i < DIV_ROUND_UP(gic_irqs, 4); i++) - writel_relaxed(GICD_INT_DEF_PRI_X4, + writel_relaxed(REPEAT_BYTE_U32(GICD_INT_DEF_PRI), dist_base + GIC_DIST_PRI + i * 4); for (i = 0; i < DIV_ROUND_UP(gic_irqs, 4); i++) @@ -719,7 +697,7 @@ void gic_cpu_restore(struct gic_chip_data *gic) writel_relaxed(ptr[i], dist_base + GIC_DIST_CONFIG + i * 4); for (i = 0; i < DIV_ROUND_UP(32, 4); i++) - writel_relaxed(GICD_INT_DEF_PRI_X4, + writel_relaxed(REPEAT_BYTE_U32(GICD_INT_DEF_PRI), dist_base + GIC_DIST_PRI + i * 4); writel_relaxed(GICC_INT_PRI_THRESHOLD, cpu_base + GIC_CPU_PRIMASK); @@ -731,11 +709,6 @@ static int gic_notifier(struct notifier_block *self, unsigned long cmd, void *v) int i; for (i = 0; i < CONFIG_ARM_GIC_MAX_NR; i++) { -#ifdef CONFIG_GIC_NON_BANKED - /* Skip over unused GICs */ - if (!gic_data[i].get_base) - continue; -#endif switch (cmd) { case CPU_PM_ENTER: gic_cpu_save(&gic_data[i]); @@ -798,14 +771,60 @@ static int gic_pm_init(struct gic_chip_data *gic) #endif #ifdef CONFIG_SMP -static void gic_raise_softirq(const struct cpumask *mask, unsigned int irq) +static void rmw_writeb(u8 bval, void __iomem *addr) +{ + static DEFINE_RAW_SPINLOCK(rmw_lock); + unsigned long offset = (unsigned long)addr & 3UL; + unsigned long shift = offset * 8; + unsigned long flags; + u32 val; + + raw_spin_lock_irqsave(&rmw_lock, flags); + + addr -= offset; + val = readl_relaxed(addr); + val &= ~GENMASK(shift + 7, shift); + val |= bval << shift; + writel_relaxed(val, addr); + + raw_spin_unlock_irqrestore(&rmw_lock, flags); +} + +static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val, + bool force) +{ + void __iomem *reg = gic_dist_base(d) + GIC_DIST_TARGET + irqd_to_hwirq(d); + struct gic_chip_data *gic = irq_data_get_irq_chip_data(d); + unsigned int cpu; + + if (unlikely(gic != &gic_data[0])) + return -EINVAL; + + if (!force) + cpu = cpumask_any_and(mask_val, cpu_online_mask); + else + cpu = cpumask_first(mask_val); + + if (cpu >= NR_GIC_CPU_IF || cpu >= nr_cpu_ids) + return -EINVAL; + + if (static_branch_unlikely(&needs_rmw_access)) + rmw_writeb(gic_cpu_map[cpu], reg); + else + writeb_relaxed(gic_cpu_map[cpu], reg); + irq_data_update_effective_affinity(d, cpumask_of(cpu)); + + return IRQ_SET_MASK_OK_DONE; +} + +static void gic_ipi_send_mask(struct irq_data *d, const struct cpumask *mask) { int cpu; unsigned long flags, map = 0; if (unlikely(nr_cpu_ids == 1)) { /* Only one CPU? let's do a self-IPI... */ - writel_relaxed(2 << 24 | irq, + writel_relaxed(2 << 24 | d->hwirq, gic_data_dist_base(&gic_data[0]) + GIC_DIST_SOFTINT); return; } @@ -823,12 +842,74 @@ static void gic_raise_softirq(const struct cpumask *mask, unsigned int irq) dmb(ishst); /* this always happens on GIC0 */ - writel_relaxed(map << 16 | irq, gic_data_dist_base(&gic_data[0]) + GIC_DIST_SOFTINT); + writel_relaxed(map << 16 | d->hwirq, gic_data_dist_base(&gic_data[0]) + GIC_DIST_SOFTINT); gic_unlock_irqrestore(flags); } + +static int gic_starting_cpu(unsigned int cpu) +{ + gic_cpu_init(&gic_data[0]); + return 0; +} + +static __init void gic_smp_init(void) +{ + struct irq_fwspec sgi_fwspec = { + .fwnode = gic_data[0].domain->fwnode, + .param_count = 1, + }; + int base_sgi; + + cpuhp_setup_state_nocalls(CPUHP_AP_IRQ_GIC_STARTING, + "irqchip/arm/gic:starting", + gic_starting_cpu, NULL); + + base_sgi = irq_domain_alloc_irqs(gic_data[0].domain, 8, NUMA_NO_NODE, &sgi_fwspec); + if (WARN_ON(base_sgi <= 0)) + return; + + set_smp_ipi_range(base_sgi, 8); +} +#else +#define gic_smp_init() do { } while(0) +#define gic_set_affinity NULL +#define gic_ipi_send_mask NULL #endif +static const struct irq_chip gic_chip = { + .irq_mask = gic_mask_irq, + .irq_unmask = gic_unmask_irq, + .irq_eoi = gic_eoi_irq, + .irq_set_type = gic_set_type, + .irq_retrigger = gic_retrigger, + .irq_set_affinity = gic_set_affinity, + .ipi_send_mask = gic_ipi_send_mask, + .irq_get_irqchip_state = gic_irq_get_irqchip_state, + .irq_set_irqchip_state = gic_irq_set_irqchip_state, + .irq_print_chip = gic_irq_print_chip, + .flags = IRQCHIP_SET_TYPE_MASKED | + IRQCHIP_SKIP_SET_WAKE | + IRQCHIP_MASK_ON_SUSPEND, +}; + +static const struct irq_chip gic_chip_mode1 = { + .name = "GICv2", + .irq_mask = gic_eoimode1_mask_irq, + .irq_unmask = gic_unmask_irq, + .irq_eoi = gic_eoimode1_eoi_irq, + .irq_set_type = gic_set_type, + .irq_retrigger = gic_retrigger, + .irq_set_affinity = gic_set_affinity, + .ipi_send_mask = gic_ipi_send_mask, + .irq_get_irqchip_state = gic_irq_get_irqchip_state, + .irq_set_irqchip_state = gic_irq_set_irqchip_state, + .irq_set_vcpu_affinity = gic_irq_set_vcpu_affinity, + .flags = IRQCHIP_SET_TYPE_MASKED | + IRQCHIP_SKIP_SET_WAKE | + IRQCHIP_MASK_ON_SUSPEND, +}; + #ifdef CONFIG_BL_SWITCHER /* * gic_send_sgi - send a SGI directly to given CPU interface number @@ -943,7 +1024,7 @@ void gic_migrate_target(unsigned int new_cpu_id) /* * gic_get_sgir_physaddr - get the physical address for the SGI register * - * REturn the physical address of the SGI register to be used + * Return the physical address of the SGI register to be used * by some early assembly code when the kernel is not yet available. */ static unsigned long gic_dist_physaddr; @@ -972,23 +1053,29 @@ static int gic_irq_domain_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hw) { struct gic_chip_data *gic = d->host_data; + struct irq_data *irqd = irq_desc_get_irq_data(irq_to_desc(irq)); + const struct irq_chip *chip; - if (hw < 32) { + chip = (static_branch_likely(&supports_deactivate_key) && + gic == &gic_data[0]) ? &gic_chip_mode1 : &gic_chip; + + switch (hw) { + case 0 ... 31: irq_set_percpu_devid(irq); - irq_domain_set_info(d, irq, hw, &gic->chip, d->host_data, + irq_domain_set_info(d, irq, hw, chip, d->host_data, handle_percpu_devid_irq, NULL, NULL); - irq_set_status_flags(irq, IRQ_NOAUTOEN); - } else { - irq_domain_set_info(d, irq, hw, &gic->chip, d->host_data, + break; + default: + irq_domain_set_info(d, irq, hw, chip, d->host_data, handle_fasteoi_irq, NULL, NULL); irq_set_probe(irq); - irqd_set_single_target(irq_desc_get_irq_data(irq_to_desc(irq))); + irqd_set_single_target(irqd); + break; } - return 0; -} -static void gic_irq_domain_unmap(struct irq_domain *d, unsigned int irq) -{ + /* Prevents SW retriggers which mess up the ACK/EOI ordering */ + irqd_set_handle_enforce_irqctx(irqd); + return 0; } static int gic_irq_domain_translate(struct irq_domain *d, @@ -996,24 +1083,32 @@ static int gic_irq_domain_translate(struct irq_domain *d, unsigned long *hwirq, unsigned int *type) { + if (fwspec->param_count == 1 && fwspec->param[0] < 16) { + *hwirq = fwspec->param[0]; + *type = IRQ_TYPE_EDGE_RISING; + return 0; + } + if (is_of_node(fwspec->fwnode)) { if (fwspec->param_count < 3) return -EINVAL; - /* Get the interrupt number and add 16 to skip over SGIs */ - *hwirq = fwspec->param[1] + 16; - - /* - * For SPIs, we need to add 16 more to get the GIC irq - * ID number - */ - if (!fwspec->param[0]) - *hwirq += 16; + switch (fwspec->param[0]) { + case 0: /* SPI */ + *hwirq = fwspec->param[1] + 32; + break; + case 1: /* PPI */ + *hwirq = fwspec->param[1] + 16; + break; + default: + return -EINVAL; + } *type = fwspec->param[2] & IRQ_TYPE_SENSE_MASK; /* Make it clear that broken DTs are... broken */ - WARN_ON(*type == IRQ_TYPE_NONE); + WARN(*type == IRQ_TYPE_NONE, + "HW irq %ld has invalid type\n", *hwirq); return 0; } @@ -1021,22 +1116,23 @@ static int gic_irq_domain_translate(struct irq_domain *d, if(fwspec->param_count != 2) return -EINVAL; + if (fwspec->param[0] < 16) { + pr_err(FW_BUG "Illegal GSI%d translation request\n", + fwspec->param[0]); + return -EINVAL; + } + *hwirq = fwspec->param[0]; *type = fwspec->param[1]; - WARN_ON(*type == IRQ_TYPE_NONE); + WARN(*type == IRQ_TYPE_NONE, + "HW irq %ld has invalid type\n", *hwirq); return 0; } return -EINVAL; } -static int gic_starting_cpu(unsigned int cpu) -{ - gic_cpu_init(&gic_data[0]); - return 0; -} - static int gic_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, unsigned int nr_irqs, void *arg) { @@ -1064,36 +1160,10 @@ static const struct irq_domain_ops gic_irq_domain_hierarchy_ops = { .free = irq_domain_free_irqs_top, }; -static const struct irq_domain_ops gic_irq_domain_ops = { - .map = gic_irq_domain_map, - .unmap = gic_irq_domain_unmap, -}; - -static void gic_init_chip(struct gic_chip_data *gic, struct device *dev, - const char *name, bool use_eoimode1) -{ - /* Initialize irq_chip */ - gic->chip = gic_chip; - gic->chip.name = name; - gic->chip.parent_device = dev; - - if (use_eoimode1) { - gic->chip.irq_mask = gic_eoimode1_mask_irq; - gic->chip.irq_eoi = gic_eoimode1_eoi_irq; - gic->chip.irq_set_vcpu_affinity = gic_irq_set_vcpu_affinity; - } - -#ifdef CONFIG_SMP - if (gic == &gic_data[0]) - gic->chip.irq_set_affinity = gic_set_affinity; -#endif -} - -static int gic_init_bases(struct gic_chip_data *gic, int irq_start, +static int gic_init_bases(struct gic_chip_data *gic, struct fwnode_handle *handle) { - irq_hw_number_t hwirq_base; - int gic_irqs, irq_base, ret; + int gic_irqs, ret; if (IS_ENABLED(CONFIG_GIC_NON_BANKED) && gic->percpu_offset) { /* Frankein-GIC without banked registers... */ @@ -1117,7 +1187,7 @@ static int gic_init_bases(struct gic_chip_data *gic, int irq_start, gic->raw_cpu_base + offset; } - gic_set_base_accessor(gic, gic_get_percpu_base); + enable_frankengic(); } else { /* Normal, sane GIC... */ WARN(gic->percpu_offset, @@ -1125,7 +1195,6 @@ static int gic_init_bases(struct gic_chip_data *gic, int irq_start, gic->percpu_offset); gic->dist_base.common_base = gic->raw_dist_base; gic->cpu_base.common_base = gic->raw_cpu_base; - gic_set_base_accessor(gic, gic_get_common_base); } /* @@ -1138,37 +1207,9 @@ static int gic_init_bases(struct gic_chip_data *gic, int irq_start, gic_irqs = 1020; gic->gic_irqs = gic_irqs; - if (handle) { /* DT/ACPI */ - gic->domain = irq_domain_create_linear(handle, gic_irqs, - &gic_irq_domain_hierarchy_ops, - gic); - } else { /* Legacy support */ - /* - * For primary GICs, skip over SGIs. - * For secondary GICs, skip over PPIs, too. - */ - if (gic == &gic_data[0] && (irq_start & 31) > 0) { - hwirq_base = 16; - if (irq_start != -1) - irq_start = (irq_start & ~31) + 16; - } else { - hwirq_base = 32; - } - - gic_irqs -= hwirq_base; /* calculate # of irqs to allocate */ - - irq_base = irq_alloc_descs(irq_start, 16, gic_irqs, - numa_node_id()); - if (irq_base < 0) { - WARN(1, "Cannot allocate irq_descs @ IRQ%d, assuming pre-allocated\n", - irq_start); - irq_base = irq_start; - } - - gic->domain = irq_domain_add_legacy(NULL, gic_irqs, irq_base, - hwirq_base, &gic_irq_domain_ops, gic); - } - + gic->domain = irq_domain_create_linear(handle, gic_irqs, + &gic_irq_domain_hierarchy_ops, + gic); if (WARN_ON(!gic->domain)) { ret = -ENODEV; goto error; @@ -1195,10 +1236,8 @@ error: } static int __init __gic_init_bases(struct gic_chip_data *gic, - int irq_start, struct fwnode_handle *handle) { - char *name; int i, ret; if (WARN_ON(!gic || gic->domain)) @@ -1212,53 +1251,19 @@ static int __init __gic_init_bases(struct gic_chip_data *gic, */ for (i = 0; i < NR_GIC_CPU_IF; i++) gic_cpu_map[i] = 0xff; -#ifdef CONFIG_SMP - set_smp_cross_call(gic_raise_softirq); -#endif - cpuhp_setup_state_nocalls(CPUHP_AP_IRQ_GIC_STARTING, - "irqchip/arm/gic:starting", - gic_starting_cpu, NULL); + set_handle_irq(gic_handle_irq); if (static_branch_likely(&supports_deactivate_key)) pr_info("GIC: Using split EOI/Deactivate mode\n"); } - if (static_branch_likely(&supports_deactivate_key) && gic == &gic_data[0]) { - name = kasprintf(GFP_KERNEL, "GICv2"); - gic_init_chip(gic, NULL, name, true); - } else { - name = kasprintf(GFP_KERNEL, "GIC-%d", (int)(gic-&gic_data[0])); - gic_init_chip(gic, NULL, name, false); - } - - ret = gic_init_bases(gic, irq_start, handle); - if (ret) - kfree(name); + ret = gic_init_bases(gic, handle); + if (gic == &gic_data[0]) + gic_smp_init(); return ret; } -void __init gic_init(unsigned int gic_nr, int irq_start, - void __iomem *dist_base, void __iomem *cpu_base) -{ - struct gic_chip_data *gic; - - if (WARN_ON(gic_nr >= CONFIG_ARM_GIC_MAX_NR)) - return; - - /* - * Non-DT/ACPI systems won't run a hypervisor, so let's not - * bother with these... - */ - static_branch_disable(&supports_deactivate_key); - - gic = &gic_data[gic_nr]; - gic->raw_dist_base = dist_base; - gic->raw_cpu_base = cpu_base; - - __gic_init_bases(gic, irq_start, NULL); -} - static void gic_teardown(struct gic_chip_data *gic) { if (WARN_ON(!gic)) @@ -1270,13 +1275,12 @@ static void gic_teardown(struct gic_chip_data *gic) iounmap(gic->raw_cpu_base); } -#ifdef CONFIG_OF static int gic_cnt __initdata; static bool gicv2_force_probe; static int __init gicv2_force_probe_cfg(char *buf) { - return strtobool(buf, &gicv2_force_probe); + return kstrtobool(buf, &gicv2_force_probe); } early_param("irqchip.gicv2_force_probe", gicv2_force_probe_cfg); @@ -1358,6 +1362,30 @@ static bool gic_check_eoimode(struct device_node *node, void __iomem **base) return true; } +static bool gic_enable_rmw_access(void *data) +{ + /* + * The EMEV2 class of machines has a broken interconnect, and + * locks up on accesses that are less than 32bit. So far, only + * the affinity setting requires it. + */ + if (of_machine_is_compatible("renesas,emev2")) { + static_branch_enable(&needs_rmw_access); + return true; + } + + return false; +} + +static const struct gic_quirk gic_quirks[] = { + { + .desc = "broken byte access", + .compatible = "arm,pl390", + .init = gic_enable_rmw_access, + }, + { }, +}; + static int gic_of_setup(struct gic_chip_data *gic, struct device_node *node) { if (!gic || !node) @@ -1374,6 +1402,8 @@ static int gic_of_setup(struct gic_chip_data *gic, struct device_node *node) if (of_property_read_u32(node, "cpu-offset", &gic->percpu_offset)) gic->percpu_offset = 0; + gic_enable_of_quirks(node, gic_quirks, gic); + return 0; error: @@ -1393,18 +1423,17 @@ int gic_of_init_child(struct device *dev, struct gic_chip_data **gic, int irq) if (!*gic) return -ENOMEM; - gic_init_chip(*gic, dev, dev->of_node->name, false); - ret = gic_of_setup(*gic, dev->of_node); if (ret) return ret; - ret = gic_init_bases(*gic, -1, &dev->of_node->fwnode); + ret = gic_init_bases(*gic, &dev->of_node->fwnode); if (ret) { gic_teardown(*gic); return ret; } + irq_domain_set_pm_device((*gic)->domain, dev); irq_set_chained_handler_and_data(irq, gic_handle_cascade_irq, *gic); return 0; @@ -1430,8 +1459,10 @@ static void __init gic_of_setup_kvm_info(struct device_node *node) if (ret) return; + gic_v2_kvm_info.gicc_base = gic_data[0].cpu_base.common_base; + if (static_branch_likely(&supports_deactivate_key)) - gic_set_kvm_info(&gic_v2_kvm_info); + vgic_set_kvm_info(&gic_v2_kvm_info); } int __init @@ -1459,7 +1490,7 @@ gic_of_init(struct device_node *node, struct device_node *parent) if (gic_cnt == 0 && !gic_check_eoimode(node, &gic->raw_cpu_base)) static_branch_disable(&supports_deactivate_key); - ret = __gic_init_bases(gic, -1, &node->fwnode); + ret = __gic_init_bases(gic, &node->fwnode); if (ret) { gic_teardown(gic); return ret; @@ -1490,12 +1521,6 @@ IRQCHIP_DECLARE(cortex_a7_gic, "arm,cortex-a7-gic", gic_of_init); IRQCHIP_DECLARE(msm_8660_qgic, "qcom,msm-8660-qgic", gic_of_init); IRQCHIP_DECLARE(msm_qgic2, "qcom,msm-qgic2", gic_of_init); IRQCHIP_DECLARE(pl390, "arm,pl390", gic_of_init); -#else -int gic_of_init_child(struct device *dev, struct gic_chip_data **gic, int irq) -{ - return -ENOTSUPP; -} -#endif #ifdef CONFIG_ACPI static struct @@ -1508,7 +1533,7 @@ static struct } acpi_data __initdata; static int __init -gic_acpi_parse_madt_cpu(struct acpi_subtable_header *header, +gic_acpi_parse_madt_cpu(union acpi_subtable_headers *header, const unsigned long end) { struct acpi_madt_generic_interrupt *processor; @@ -1540,7 +1565,7 @@ gic_acpi_parse_madt_cpu(struct acpi_subtable_header *header, } /* The things you have to do to just *count* something... */ -static int __init acpi_dummy_func(struct acpi_subtable_header *header, +static int __init acpi_dummy_func(union acpi_subtable_headers *header, const unsigned long end) { return 0; @@ -1597,15 +1622,22 @@ static void __init gic_acpi_setup_kvm_info(void) return; gic_v2_kvm_info.maint_irq = irq; + gic_v2_kvm_info.gicc_base = gic_data[0].cpu_base.common_base; - gic_set_kvm_info(&gic_v2_kvm_info); + vgic_set_kvm_info(&gic_v2_kvm_info); +} + +static struct fwnode_handle *gsi_domain_handle; + +static struct fwnode_handle *gic_v2_get_gsi_domain_id(u32 gsi) +{ + return gsi_domain_handle; } -static int __init gic_v2_acpi_init(struct acpi_subtable_header *header, +static int __init gic_v2_acpi_init(union acpi_subtable_headers *header, const unsigned long end) { struct acpi_madt_generic_distributor *dist; - struct fwnode_handle *domain_handle; struct gic_chip_data *gic = &gic_data[0]; int count, ret; @@ -1643,22 +1675,22 @@ static int __init gic_v2_acpi_init(struct acpi_subtable_header *header, /* * Initialize GIC instance zero (no multi-GIC support). */ - domain_handle = irq_domain_alloc_fwnode(gic->raw_dist_base); - if (!domain_handle) { + gsi_domain_handle = irq_domain_alloc_fwnode(&dist->base_address); + if (!gsi_domain_handle) { pr_err("Unable to allocate domain handle\n"); gic_teardown(gic); return -ENOMEM; } - ret = __gic_init_bases(gic, -1, domain_handle); + ret = __gic_init_bases(gic, gsi_domain_handle); if (ret) { pr_err("Failed to initialise GIC\n"); - irq_domain_free_fwnode(domain_handle); + irq_domain_free_fwnode(gsi_domain_handle); gic_teardown(gic); return ret; } - acpi_set_irq_model(ACPI_IRQ_MODEL_GIC, domain_handle); + acpi_set_irq_model(ACPI_IRQ_MODEL_GIC, gic_v2_get_gsi_domain_id); if (IS_ENABLED(CONFIG_ARM_GIC_V2M)) gicv2m_init(NULL, gic_data[0].domain); diff --git a/drivers/irqchip/irq-goldfish-pic.c b/drivers/irqchip/irq-goldfish-pic.c index 2a92f03c73e4..a8b23b507ecd 100644 --- a/drivers/irqchip/irq-goldfish-pic.c +++ b/drivers/irqchip/irq-goldfish-pic.c @@ -1,12 +1,8 @@ +// SPDX-License-Identifier: GPL-2.0-or-later /* * Driver for MIPS Goldfish Programmable Interrupt Controller. * * Author: Miodrag Dinic <miodrag.dinic@mips.com> - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the - * Free Software Foundation; either version 2 of the License, or (at your - * option) any later version. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt @@ -38,15 +34,14 @@ static void goldfish_pic_cascade(struct irq_desc *desc) { struct goldfish_pic_data *gfpic = irq_desc_get_handler_data(desc); struct irq_chip *host_chip = irq_desc_get_chip(desc); - u32 pending, hwirq, virq; + u32 pending, hwirq; chained_irq_enter(host_chip, desc); pending = readl(gfpic->base + GFPIC_REG_IRQ_PENDING); while (pending) { hwirq = __fls(pending); - virq = irq_linear_revmap(gfpic->irq_domain, hwirq); - generic_handle_irq(virq); + generic_handle_domain_irq(gfpic->irq_domain, hwirq); pending &= ~(1 << hwirq); } @@ -106,10 +101,9 @@ static int __init goldfish_pic_of_init(struct device_node *of_node, irq_setup_generic_chip(gc, IRQ_MSK(GFPIC_NR_IRQS), 0, IRQ_NOPROBE | IRQ_LEVEL, 0); - gfpic->irq_domain = irq_domain_add_legacy(of_node, GFPIC_NR_IRQS, - GFPIC_IRQ_BASE, 0, - &goldfish_irq_domain_ops, - NULL); + gfpic->irq_domain = irq_domain_create_legacy(of_fwnode_handle(of_node), GFPIC_NR_IRQS, + GFPIC_IRQ_BASE, 0, &goldfish_irq_domain_ops, + NULL); if (!gfpic->irq_domain) { pr_err("Failed to add irqdomain!\n"); ret = -ENOMEM; diff --git a/drivers/irqchip/irq-hip04.c b/drivers/irqchip/irq-hip04.c index 5b4fd2f4e5f8..b7958c5a1221 100644 --- a/drivers/irqchip/irq-hip04.c +++ b/drivers/irqchip/irq-hip04.c @@ -1,14 +1,11 @@ +// SPDX-License-Identifier: GPL-2.0-only /* - * Hisilicon HiP04 INTC + * HiSilicon HiP04 INTC * * Copyright (C) 2002-2014 ARM Limited. - * Copyright (c) 2013-2014 Hisilicon Ltd. + * Copyright (c) 2013-2014 HiSilicon Ltd. * Copyright (c) 2013-2014 Linaro Ltd. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * * Interrupt architecture for the HIP04 INTC: * * o There is one Interrupt Distributor, which receives interrupts @@ -133,7 +130,12 @@ static int hip04_irq_set_type(struct irq_data *d, unsigned int type) raw_spin_lock(&irq_controller_lock); - ret = gic_configure_irq(irq, type, base, NULL); + ret = gic_configure_irq(irq, type, base + GIC_DIST_CONFIG); + if (ret && irq < 32) { + /* Misconfigured PPIs are usually not fatal */ + pr_warn("GIC: PPI%d is secure or misconfigured\n", irq - 16); + ret = 0; + } raw_spin_unlock(&irq_controller_lock); @@ -169,6 +171,29 @@ static int hip04_irq_set_affinity(struct irq_data *d, return IRQ_SET_MASK_OK; } + +static void hip04_ipi_send_mask(struct irq_data *d, const struct cpumask *mask) +{ + int cpu; + unsigned long flags, map = 0; + + raw_spin_lock_irqsave(&irq_controller_lock, flags); + + /* Convert our logical CPU mask into a physical one. */ + for_each_cpu(cpu, mask) + map |= hip04_cpu_map[cpu]; + + /* + * Ensure that stores to Normal memory are visible to the + * other CPUs before they observe us issuing the IPI. + */ + dmb(ishst); + + /* this always happens on GIC0 */ + writel_relaxed(map << 8 | d->hwirq, hip04_data.dist_base + GIC_DIST_SOFTINT); + + raw_spin_unlock_irqrestore(&irq_controller_lock, flags); +} #endif static void __exception_irq_entry hip04_handle_irq(struct pt_regs *regs) @@ -180,19 +205,9 @@ static void __exception_irq_entry hip04_handle_irq(struct pt_regs *regs) irqstat = readl_relaxed(cpu_base + GIC_CPU_INTACK); irqnr = irqstat & GICC_IAR_INT_ID_MASK; - if (likely(irqnr > 15 && irqnr <= HIP04_MAX_IRQS)) { - handle_domain_irq(hip04_data.domain, irqnr, regs); - continue; - } - if (irqnr < 16) { - writel_relaxed(irqstat, cpu_base + GIC_CPU_EOI); -#ifdef CONFIG_SMP - handle_IPI(irqnr, regs); -#endif - continue; - } - break; - } while (1); + if (irqnr <= HIP04_MAX_IRQS) + generic_handle_domain_irq(hip04_data.domain, irqnr); + } while (irqnr > HIP04_MAX_IRQS); } static struct irq_chip hip04_irq_chip = { @@ -203,6 +218,7 @@ static struct irq_chip hip04_irq_chip = { .irq_set_type = hip04_irq_set_type, #ifdef CONFIG_SMP .irq_set_affinity = hip04_irq_set_affinity, + .ipi_send_mask = hip04_ipi_send_mask, #endif .flags = IRQCHIP_SET_TYPE_MASKED | IRQCHIP_SKIP_SET_WAKE | @@ -244,7 +260,7 @@ static void __init hip04_irq_dist_init(struct hip04_irq_data *intc) for (i = 32; i < nr_irqs; i += 2) writel_relaxed(cpumask, base + GIC_DIST_TARGET + ((i * 2) & ~3)); - gic_dist_config(base, nr_irqs, NULL); + gic_dist_config(base, nr_irqs, GICD_INT_DEF_PRI); writel_relaxed(1, base + GIC_DIST_CTRL); } @@ -271,37 +287,12 @@ static void hip04_irq_cpu_init(struct hip04_irq_data *intc) if (i != cpu) hip04_cpu_map[i] &= ~cpu_mask; - gic_cpu_config(dist_base, NULL); + gic_cpu_config(dist_base, 32, GICD_INT_DEF_PRI); writel_relaxed(0xf0, base + GIC_CPU_PRIMASK); writel_relaxed(1, base + GIC_CPU_CTRL); } -#ifdef CONFIG_SMP -static void hip04_raise_softirq(const struct cpumask *mask, unsigned int irq) -{ - int cpu; - unsigned long flags, map = 0; - - raw_spin_lock_irqsave(&irq_controller_lock, flags); - - /* Convert our logical CPU mask into a physical one. */ - for_each_cpu(cpu, mask) - map |= hip04_cpu_map[cpu]; - - /* - * Ensure that stores to Normal memory are visible to the - * other CPUs before they observe us issuing the IPI. - */ - dmb(ishst); - - /* this always happens on GIC0 */ - writel_relaxed(map << 8 | irq, hip04_data.dist_base + GIC_DIST_SOFTINT); - - raw_spin_unlock_irqrestore(&irq_controller_lock, flags); -} -#endif - static int hip04_irq_domain_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hw) { @@ -309,7 +300,6 @@ static int hip04_irq_domain_map(struct irq_domain *d, unsigned int irq, irq_set_percpu_devid(irq); irq_set_chip_and_handler(irq, &hip04_irq_chip, handle_percpu_devid_irq); - irq_set_status_flags(irq, IRQ_NOAUTOEN); } else { irq_set_chip_and_handler(irq, &hip04_irq_chip, handle_fasteoi_irq); @@ -326,10 +316,13 @@ static int hip04_irq_domain_xlate(struct irq_domain *d, unsigned long *out_hwirq, unsigned int *out_type) { - unsigned long ret = 0; - if (irq_domain_get_of_node(d) != controller) return -EINVAL; + if (intsize == 1 && intspec[0] < 16) { + *out_hwirq = intspec[0]; + *out_type = IRQ_TYPE_EDGE_RISING; + return 0; + } if (intsize < 3) return -EINVAL; @@ -342,7 +335,7 @@ static int hip04_irq_domain_xlate(struct irq_domain *d, *out_type = intspec[2] & IRQ_TYPE_SENSE_MASK; - return ret; + return 0; } static int hip04_irq_starting_cpu(unsigned int cpu) @@ -359,7 +352,6 @@ static const struct irq_domain_ops hip04_irq_domain_ops = { static int __init hip04_of_init(struct device_node *node, struct device_node *parent) { - irq_hw_number_t hwirq_base = 16; int nr_irqs, irq_base, i; if (WARN_ON(!node)) @@ -388,24 +380,19 @@ hip04_of_init(struct device_node *node, struct device_node *parent) nr_irqs = HIP04_MAX_IRQS; hip04_data.nr_irqs = nr_irqs; - nr_irqs -= hwirq_base; /* calculate # of irqs to allocate */ - - irq_base = irq_alloc_descs(-1, hwirq_base, nr_irqs, numa_node_id()); + irq_base = irq_alloc_descs(-1, 0, nr_irqs, numa_node_id()); if (irq_base < 0) { pr_err("failed to allocate IRQ numbers\n"); return -EINVAL; } - hip04_data.domain = irq_domain_add_legacy(node, nr_irqs, irq_base, - hwirq_base, - &hip04_irq_domain_ops, - &hip04_data); - + hip04_data.domain = irq_domain_create_legacy(of_fwnode_handle(node), nr_irqs, irq_base, 0, + &hip04_irq_domain_ops, &hip04_data); if (WARN_ON(!hip04_data.domain)) return -EINVAL; #ifdef CONFIG_SMP - set_smp_cross_call(hip04_raise_softirq); + set_smp_ipi_range(irq_base, 16); #endif set_handle_irq(hip04_handle_irq); diff --git a/drivers/irqchip/irq-i8259.c b/drivers/irqchip/irq-i8259.c index b0d4aab1a58c..cca77f9948a3 100644 --- a/drivers/irqchip/irq-i8259.c +++ b/drivers/irqchip/irq-i8259.c @@ -202,13 +202,13 @@ spurious_8259A_irq: } } -static void i8259A_resume(void) +static void i8259A_resume(void *data) { if (i8259A_auto_eoi >= 0) init_8259A(i8259A_auto_eoi); } -static void i8259A_shutdown(void) +static void i8259A_shutdown(void *data) { /* Put the i8259A into a quiescent state that * the kernel initialization code can get it @@ -220,18 +220,14 @@ static void i8259A_shutdown(void) } } -static struct syscore_ops i8259_syscore_ops = { +static const struct syscore_ops i8259_syscore_ops = { .resume = i8259A_resume, .shutdown = i8259A_shutdown, }; -static int __init i8259A_init_sysfs(void) -{ - register_syscore_ops(&i8259_syscore_ops); - return 0; -} - -device_initcall(i8259A_init_sysfs); +static struct syscore i8259_syscore = { + .ops = &i8259_syscore_ops, +}; static void init_8259A(int auto_eoi) { @@ -276,15 +272,6 @@ static void init_8259A(int auto_eoi) raw_spin_unlock_irqrestore(&i8259A_lock, flags); } -/* - * IRQ2 is cascade interrupt to second interrupt controller - */ -static struct irqaction irq2 = { - .handler = no_action, - .name = "cascade", - .flags = IRQF_NO_THREAD, -}; - static struct resource pic1_io_resource = { .name = "pic1", .start = PIC_MASTER_CMD, @@ -319,6 +306,10 @@ static const struct irq_domain_ops i8259A_ops = { */ struct irq_domain * __init __init_i8259_irqs(struct device_node *node) { + /* + * PIC_CASCADE_IR is cascade interrupt to second interrupt controller + */ + int irq = I8259A_IRQ_BASE + PIC_CASCADE_IR; struct irq_domain *domain; insert_resource(&ioport_resource, &pic1_io_resource); @@ -326,12 +317,14 @@ struct irq_domain * __init __init_i8259_irqs(struct device_node *node) init_8259A(0); - domain = irq_domain_add_legacy(node, 16, I8259A_IRQ_BASE, 0, - &i8259A_ops, NULL); + domain = irq_domain_create_legacy(of_fwnode_handle(node), 16, I8259A_IRQ_BASE, 0, + &i8259A_ops, NULL); if (!domain) panic("Failed to add i8259 IRQ domain"); - setup_irq(I8259A_IRQ_BASE + PIC_CASCADE_IR, &irq2); + if (request_irq(irq, no_action, IRQF_NO_THREAD, "cascade", NULL)) + pr_err("Failed to register cascade interrupt\n"); + register_syscore(&i8259_syscore); return domain; } @@ -344,16 +337,14 @@ static void i8259_irq_dispatch(struct irq_desc *desc) { struct irq_domain *domain = irq_desc_get_handler_data(desc); int hwirq = i8259_poll(); - unsigned int irq; if (hwirq < 0) return; - irq = irq_linear_revmap(domain, hwirq); - generic_handle_irq(irq); + generic_handle_domain_irq(domain, hwirq); } -int __init i8259_of_init(struct device_node *node, struct device_node *parent) +static int __init i8259_of_init(struct device_node *node, struct device_node *parent) { struct irq_domain *domain; unsigned int parent_irq; diff --git a/drivers/irqchip/irq-idt3243x.c b/drivers/irqchip/irq-idt3243x.c new file mode 100644 index 000000000000..f8324fb1fe8f --- /dev/null +++ b/drivers/irqchip/irq-idt3243x.c @@ -0,0 +1,122 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Driver for IDT/Renesas 79RC3243x Interrupt Controller. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/interrupt.h> +#include <linux/irq.h> +#include <linux/irqchip.h> +#include <linux/irqchip/chained_irq.h> +#include <linux/irqdomain.h> +#include <linux/of_address.h> +#include <linux/of_irq.h> + +#define IDT_PIC_NR_IRQS 32 + +#define IDT_PIC_IRQ_PEND 0x00 +#define IDT_PIC_IRQ_MASK 0x08 + +struct idt_pic_data { + void __iomem *base; + struct irq_domain *irq_domain; + struct irq_chip_generic *gc; +}; + +static void idt_irq_dispatch(struct irq_desc *desc) +{ + struct idt_pic_data *idtpic = irq_desc_get_handler_data(desc); + struct irq_chip *host_chip = irq_desc_get_chip(desc); + u32 pending, hwirq; + + chained_irq_enter(host_chip, desc); + + pending = irq_reg_readl(idtpic->gc, IDT_PIC_IRQ_PEND); + pending &= ~idtpic->gc->mask_cache; + while (pending) { + hwirq = __fls(pending); + generic_handle_domain_irq(idtpic->irq_domain, hwirq); + pending &= ~(1 << hwirq); + } + + chained_irq_exit(host_chip, desc); +} + +static int idt_pic_init(struct device_node *of_node, struct device_node *parent) +{ + struct irq_domain *domain; + struct idt_pic_data *idtpic; + struct irq_chip_generic *gc; + struct irq_chip_type *ct; + unsigned int parent_irq; + int ret = 0; + + idtpic = kzalloc(sizeof(*idtpic), GFP_KERNEL); + if (!idtpic) { + ret = -ENOMEM; + goto out_err; + } + + parent_irq = irq_of_parse_and_map(of_node, 0); + if (!parent_irq) { + pr_err("Failed to map parent IRQ!\n"); + ret = -EINVAL; + goto out_free; + } + + idtpic->base = of_iomap(of_node, 0); + if (!idtpic->base) { + pr_err("Failed to map base address!\n"); + ret = -ENOMEM; + goto out_unmap_irq; + } + + domain = irq_domain_create_linear(of_fwnode_handle(of_node), IDT_PIC_NR_IRQS, + &irq_generic_chip_ops, NULL); + if (!domain) { + pr_err("Failed to add irqdomain!\n"); + ret = -ENOMEM; + goto out_iounmap; + } + idtpic->irq_domain = domain; + + ret = irq_alloc_domain_generic_chips(domain, 32, 1, "IDTPIC", + handle_level_irq, 0, + IRQ_NOPROBE | IRQ_LEVEL, 0); + if (ret) + goto out_domain_remove; + + gc = irq_get_domain_generic_chip(domain, 0); + gc->reg_base = idtpic->base; + gc->private = idtpic; + + ct = gc->chip_types; + ct->regs.mask = IDT_PIC_IRQ_MASK; + ct->chip.irq_mask = irq_gc_mask_set_bit; + ct->chip.irq_unmask = irq_gc_mask_clr_bit; + idtpic->gc = gc; + + /* Mask interrupts. */ + writel(0xffffffff, idtpic->base + IDT_PIC_IRQ_MASK); + gc->mask_cache = 0xffffffff; + + irq_set_chained_handler_and_data(parent_irq, + idt_irq_dispatch, idtpic); + + return 0; + +out_domain_remove: + irq_domain_remove(domain); +out_iounmap: + iounmap(idtpic->base); +out_unmap_irq: + irq_dispose_mapping(parent_irq); +out_free: + kfree(idtpic); +out_err: + pr_err("Failed to initialize! (errno = %d)\n", ret); + return ret; +} + +IRQCHIP_DECLARE(idt_pic, "idt,32434-pic", idt_pic_init); diff --git a/drivers/irqchip/irq-imgpdc.c b/drivers/irqchip/irq-imgpdc.c index d00489a4b54f..e9ef2f5a7207 100644 --- a/drivers/irqchip/irq-imgpdc.c +++ b/drivers/irqchip/irq-imgpdc.c @@ -223,7 +223,7 @@ static void pdc_intc_perip_isr(struct irq_desc *desc) { unsigned int irq = irq_desc_get_irq(desc); struct pdc_intc_priv *priv; - unsigned int i, irq_no; + unsigned int i; priv = (struct pdc_intc_priv *)irq_desc_get_handler_data(desc); @@ -237,14 +237,13 @@ static void pdc_intc_perip_isr(struct irq_desc *desc) found: /* pass on the interrupt */ - irq_no = irq_linear_revmap(priv->domain, i); - generic_handle_irq(irq_no); + generic_handle_domain_irq(priv->domain, i); } static void pdc_intc_syswake_isr(struct irq_desc *desc) { struct pdc_intc_priv *priv; - unsigned int syswake, irq_no; + unsigned int syswake; unsigned int status; priv = (struct pdc_intc_priv *)irq_desc_get_handler_data(desc); @@ -258,9 +257,7 @@ static void pdc_intc_syswake_isr(struct irq_desc *desc) if (!(status & 1)) continue; - irq_no = irq_linear_revmap(priv->domain, - syswake_to_hwirq(syswake)); - generic_handle_irq(irq_no); + generic_handle_domain_irq(priv->domain, syswake_to_hwirq(syswake)); } } @@ -316,10 +313,8 @@ static int pdc_intc_probe(struct platform_device *pdev) /* Allocate driver data */ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); - if (!priv) { - dev_err(&pdev->dev, "cannot allocate device data\n"); + if (!priv) return -ENOMEM; - } raw_spin_lock_init(&priv->lock); platform_set_drvdata(pdev, priv); @@ -356,16 +351,12 @@ static int pdc_intc_probe(struct platform_device *pdev) /* Get peripheral IRQ numbers */ priv->perip_irqs = devm_kcalloc(&pdev->dev, 4, priv->nr_perips, GFP_KERNEL); - if (!priv->perip_irqs) { - dev_err(&pdev->dev, "cannot allocate perip IRQ list\n"); + if (!priv->perip_irqs) return -ENOMEM; - } for (i = 0; i < priv->nr_perips; ++i) { irq = platform_get_irq(pdev, 1 + i); - if (irq < 0) { - dev_err(&pdev->dev, "cannot find perip IRQ #%u\n", i); + if (irq < 0) return irq; - } priv->perip_irqs[i] = irq; } /* check if too many were provided */ @@ -376,15 +367,13 @@ static int pdc_intc_probe(struct platform_device *pdev) /* Get syswake IRQ number */ irq = platform_get_irq(pdev, 0); - if (irq < 0) { - dev_err(&pdev->dev, "cannot find syswake IRQ\n"); + if (irq < 0) return irq; - } priv->syswake_irq = irq; /* Set up an IRQ domain */ - priv->domain = irq_domain_add_linear(node, 16, &irq_generic_chip_ops, - priv); + priv->domain = irq_domain_create_linear(dev_fwnode(&pdev->dev), 16, &irq_generic_chip_ops, + priv); if (unlikely(!priv->domain)) { dev_err(&pdev->dev, "cannot add IRQ domain\n"); return -ENOMEM; @@ -472,12 +461,11 @@ err_generic: return ret; } -static int pdc_intc_remove(struct platform_device *pdev) +static void pdc_intc_remove(struct platform_device *pdev) { struct pdc_intc_priv *priv = platform_get_drvdata(pdev); irq_domain_remove(priv->domain); - return 0; } static const struct of_device_id pdc_intc_match[] = { @@ -490,8 +478,8 @@ static struct platform_driver pdc_intc_driver = { .name = "pdc-intc", .of_match_table = pdc_intc_match, }, - .probe = pdc_intc_probe, - .remove = pdc_intc_remove, + .probe = pdc_intc_probe, + .remove = pdc_intc_remove, }; static int __init pdc_intc_init(void) diff --git a/drivers/irqchip/irq-imx-gpcv2.c b/drivers/irqchip/irq-imx-gpcv2.c index 66501ea4fd75..04f7ba0657be 100644 --- a/drivers/irqchip/irq-imx-gpcv2.c +++ b/drivers/irqchip/irq-imx-gpcv2.c @@ -1,9 +1,6 @@ +// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2015 Freescale Semiconductor, Inc. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. */ #include <linux/of_address.h> @@ -29,14 +26,14 @@ struct gpcv2_irqchip_data { u32 cpu2wakeup; }; -static struct gpcv2_irqchip_data *imx_gpcv2_instance; +static struct gpcv2_irqchip_data *imx_gpcv2_instance __ro_after_init; static void __iomem *gpcv2_idx_to_reg(struct gpcv2_irqchip_data *cd, int i) { return cd->gpc_base + cd->cpu2wakeup + i * 4; } -static int gpcv2_wakeup_source_save(void) +static int gpcv2_wakeup_source_save(void *data) { struct gpcv2_irqchip_data *cd; void __iomem *reg; @@ -55,7 +52,7 @@ static int gpcv2_wakeup_source_save(void) return 0; } -static void gpcv2_wakeup_source_restore(void) +static void gpcv2_wakeup_source_restore(void *data) { struct gpcv2_irqchip_data *cd; int i; @@ -68,9 +65,13 @@ static void gpcv2_wakeup_source_restore(void) writel_relaxed(cd->saved_irq_mask[i], gpcv2_idx_to_reg(cd, i)); } -static struct syscore_ops imx_gpcv2_syscore_ops = { - .suspend = gpcv2_wakeup_source_save, - .resume = gpcv2_wakeup_source_restore, +static const struct syscore_ops gpcv2_syscore_ops = { + .suspend = gpcv2_wakeup_source_save, + .resume = gpcv2_wakeup_source_restore, +}; + +static struct syscore gpcv2_syscore = { + .ops = &gpcv2_syscore_ops, }; static int imx_gpcv2_irq_set_wake(struct irq_data *d, unsigned int on) @@ -134,6 +135,7 @@ static struct irq_chip gpcv2_irqchip_data_chip = { .irq_unmask = imx_gpcv2_irq_unmask, .irq_set_wake = imx_gpcv2_irq_set_wake, .irq_retrigger = irq_chip_retrigger_hierarchy, + .irq_set_type = irq_chip_set_type_parent, #ifdef CONFIG_SMP .irq_set_affinity = irq_chip_set_affinity_parent, #endif @@ -230,10 +232,8 @@ static int __init imx_gpcv2_irqchip_init(struct device_node *node, } cd = kzalloc(sizeof(struct gpcv2_irqchip_data), GFP_KERNEL); - if (!cd) { - pr_err("%pOF: kzalloc failed!\n", node); + if (!cd) return -ENOMEM; - } raw_spin_lock_init(&cd->rlock); @@ -244,14 +244,14 @@ static int __init imx_gpcv2_irqchip_init(struct device_node *node, return -ENOMEM; } - domain = irq_domain_add_hierarchy(parent_domain, 0, GPC_MAX_IRQS, - node, &gpcv2_irqchip_data_domain_ops, cd); + domain = irq_domain_create_hierarchy(parent_domain, 0, GPC_MAX_IRQS, + of_fwnode_handle(node), &gpcv2_irqchip_data_domain_ops, cd); if (!domain) { iounmap(cd->gpc_base); kfree(cd); return -ENOMEM; } - irq_set_default_host(domain); + irq_set_default_domain(domain); /* Initially mask all interrupts */ for (i = 0; i < IMR_NUM; i++) { @@ -261,7 +261,7 @@ static int __init imx_gpcv2_irqchip_init(struct device_node *node, case 4: writel_relaxed(~0, reg + GPC_IMR1_CORE2); writel_relaxed(~0, reg + GPC_IMR1_CORE3); - /* fall through */ + fallthrough; case 2: writel_relaxed(~0, reg + GPC_IMR1_CORE0); writel_relaxed(~0, reg + GPC_IMR1_CORE1); @@ -280,13 +280,14 @@ static int __init imx_gpcv2_irqchip_init(struct device_node *node, writel_relaxed(~0x1, cd->gpc_base + cd->cpu2wakeup); imx_gpcv2_instance = cd; - register_syscore_ops(&imx_gpcv2_syscore_ops); + register_syscore(&gpcv2_syscore); /* * Clear the OF_POPULATED flag set in of_irq_init so that * later the GPC power domain driver will not be skipped. */ of_node_clear_flag(node, OF_POPULATED); + fwnode_dev_initialized(domain->fwnode, false); return 0; } diff --git a/drivers/irqchip/irq-imx-intmux.c b/drivers/irqchip/irq-imx-intmux.c new file mode 100644 index 000000000000..5f9b204d350b --- /dev/null +++ b/drivers/irqchip/irq-imx-intmux.c @@ -0,0 +1,366 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright 2017 NXP + +/* INTMUX Block Diagram + * + * ________________ + * interrupt source # 0 +---->| | + * | | | + * interrupt source # 1 +++-->| | + * ... | | | channel # 0 |--------->interrupt out # 0 + * ... | | | | + * ... | | | | + * interrupt source # X-1 +++-->|________________| + * | | | + * | | | + * | | | ________________ + * +---->| | + * | | | | | + * | +-->| | + * | | | | channel # 1 |--------->interrupt out # 1 + * | | +>| | + * | | | | | + * | | | |________________| + * | | | + * | | | + * | | | ... + * | | | ... + * | | | + * | | | ________________ + * +---->| | + * | | | | + * +-->| | + * | | channel # N |--------->interrupt out # N + * +>| | + * | | + * |________________| + * + * + * N: Interrupt Channel Instance Number (N=7) + * X: Interrupt Source Number for each channel (X=32) + * + * The INTMUX interrupt multiplexer has 8 channels, each channel receives 32 + * interrupt sources and generates 1 interrupt output. + * + */ + +#include <linux/clk.h> +#include <linux/interrupt.h> +#include <linux/irq.h> +#include <linux/irqchip/chained_irq.h> +#include <linux/irqdomain.h> +#include <linux/kernel.h> +#include <linux/mod_devicetable.h> +#include <linux/of_irq.h> +#include <linux/platform_device.h> +#include <linux/spinlock.h> +#include <linux/pm_runtime.h> + +#define CHANIER(n) (0x10 + (0x40 * n)) +#define CHANIPR(n) (0x20 + (0x40 * n)) + +#define CHAN_MAX_NUM 0x8 + +struct intmux_irqchip_data { + u32 saved_reg; + int chanidx; + int irq; + struct irq_domain *domain; +}; + +struct intmux_data { + raw_spinlock_t lock; + void __iomem *regs; + struct clk *ipg_clk; + int channum; + struct intmux_irqchip_data irqchip_data[] __counted_by(channum); +}; + +static void imx_intmux_irq_mask(struct irq_data *d) +{ + struct intmux_irqchip_data *irqchip_data = d->chip_data; + int idx = irqchip_data->chanidx; + struct intmux_data *data = container_of(irqchip_data, struct intmux_data, + irqchip_data[idx]); + unsigned long flags; + void __iomem *reg; + u32 val; + + raw_spin_lock_irqsave(&data->lock, flags); + reg = data->regs + CHANIER(idx); + val = readl_relaxed(reg); + /* disable the interrupt source of this channel */ + val &= ~BIT(d->hwirq); + writel_relaxed(val, reg); + raw_spin_unlock_irqrestore(&data->lock, flags); +} + +static void imx_intmux_irq_unmask(struct irq_data *d) +{ + struct intmux_irqchip_data *irqchip_data = d->chip_data; + int idx = irqchip_data->chanidx; + struct intmux_data *data = container_of(irqchip_data, struct intmux_data, + irqchip_data[idx]); + unsigned long flags; + void __iomem *reg; + u32 val; + + raw_spin_lock_irqsave(&data->lock, flags); + reg = data->regs + CHANIER(idx); + val = readl_relaxed(reg); + /* enable the interrupt source of this channel */ + val |= BIT(d->hwirq); + writel_relaxed(val, reg); + raw_spin_unlock_irqrestore(&data->lock, flags); +} + +static struct irq_chip imx_intmux_irq_chip __ro_after_init = { + .name = "intmux", + .irq_mask = imx_intmux_irq_mask, + .irq_unmask = imx_intmux_irq_unmask, +}; + +static int imx_intmux_irq_map(struct irq_domain *h, unsigned int irq, + irq_hw_number_t hwirq) +{ + struct intmux_irqchip_data *data = h->host_data; + + irq_set_chip_data(irq, data); + irq_set_chip_and_handler(irq, &imx_intmux_irq_chip, handle_level_irq); + + return 0; +} + +static int imx_intmux_irq_xlate(struct irq_domain *d, struct device_node *node, + const u32 *intspec, unsigned int intsize, + unsigned long *out_hwirq, unsigned int *out_type) +{ + struct intmux_irqchip_data *irqchip_data = d->host_data; + int idx = irqchip_data->chanidx; + struct intmux_data *data = container_of(irqchip_data, struct intmux_data, + irqchip_data[idx]); + + /* + * two cells needed in interrupt specifier: + * the 1st cell: hw interrupt number + * the 2nd cell: channel index + */ + if (WARN_ON(intsize != 2)) + return -EINVAL; + + if (WARN_ON(intspec[1] >= data->channum)) + return -EINVAL; + + *out_hwirq = intspec[0]; + *out_type = IRQ_TYPE_LEVEL_HIGH; + + return 0; +} + +static int imx_intmux_irq_select(struct irq_domain *d, struct irq_fwspec *fwspec, + enum irq_domain_bus_token bus_token) +{ + struct intmux_irqchip_data *irqchip_data = d->host_data; + + /* Not for us */ + if (fwspec->fwnode != d->fwnode) + return false; + + /* Handle pure domain searches */ + if (!fwspec->param_count) + return d->bus_token == bus_token; + + return irqchip_data->chanidx == fwspec->param[1]; +} + +static const struct irq_domain_ops imx_intmux_domain_ops = { + .map = imx_intmux_irq_map, + .xlate = imx_intmux_irq_xlate, + .select = imx_intmux_irq_select, +}; + +static void imx_intmux_irq_handler(struct irq_desc *desc) +{ + struct intmux_irqchip_data *irqchip_data = irq_desc_get_handler_data(desc); + int idx = irqchip_data->chanidx; + struct intmux_data *data = container_of(irqchip_data, struct intmux_data, + irqchip_data[idx]); + unsigned long irqstat; + int pos; + + chained_irq_enter(irq_desc_get_chip(desc), desc); + + /* read the interrupt source pending status of this channel */ + irqstat = readl_relaxed(data->regs + CHANIPR(idx)); + + for_each_set_bit(pos, &irqstat, 32) + generic_handle_domain_irq(irqchip_data->domain, pos); + + chained_irq_exit(irq_desc_get_chip(desc), desc); +} + +static int imx_intmux_probe(struct platform_device *pdev) +{ + struct device_node *np = pdev->dev.of_node; + struct irq_domain *domain; + struct intmux_data *data; + int channum; + int i, ret; + + channum = platform_irq_count(pdev); + if (channum == -EPROBE_DEFER) { + return -EPROBE_DEFER; + } else if (channum > CHAN_MAX_NUM) { + dev_err(&pdev->dev, "supports up to %d multiplex channels\n", + CHAN_MAX_NUM); + return -EINVAL; + } + + data = devm_kzalloc(&pdev->dev, struct_size(data, irqchip_data, channum), GFP_KERNEL); + if (!data) + return -ENOMEM; + + data->regs = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(data->regs)) { + dev_err(&pdev->dev, "failed to initialize reg\n"); + return PTR_ERR(data->regs); + } + + data->ipg_clk = devm_clk_get(&pdev->dev, "ipg"); + if (IS_ERR(data->ipg_clk)) + return dev_err_probe(&pdev->dev, PTR_ERR(data->ipg_clk), + "failed to get ipg clk\n"); + + data->channum = channum; + raw_spin_lock_init(&data->lock); + + pm_runtime_get_noresume(&pdev->dev); + pm_runtime_set_active(&pdev->dev); + pm_runtime_enable(&pdev->dev); + + ret = clk_prepare_enable(data->ipg_clk); + if (ret) { + dev_err(&pdev->dev, "failed to enable ipg clk: %d\n", ret); + return ret; + } + + for (i = 0; i < channum; i++) { + data->irqchip_data[i].chanidx = i; + + data->irqchip_data[i].irq = irq_of_parse_and_map(np, i); + if (data->irqchip_data[i].irq <= 0) { + ret = -EINVAL; + dev_err(&pdev->dev, "failed to get irq\n"); + goto out; + } + + domain = irq_domain_create_linear(of_fwnode_handle(np), 32, &imx_intmux_domain_ops, + &data->irqchip_data[i]); + if (!domain) { + ret = -ENOMEM; + dev_err(&pdev->dev, "failed to create IRQ domain\n"); + goto out; + } + data->irqchip_data[i].domain = domain; + irq_domain_set_pm_device(domain, &pdev->dev); + + /* disable all interrupt sources of this channel firstly */ + writel_relaxed(0, data->regs + CHANIER(i)); + + irq_set_chained_handler_and_data(data->irqchip_data[i].irq, + imx_intmux_irq_handler, + &data->irqchip_data[i]); + } + + platform_set_drvdata(pdev, data); + + /* + * Let pm_runtime_put() disable clock. + * If CONFIG_PM is not enabled, the clock will stay powered. + */ + pm_runtime_put(&pdev->dev); + + return 0; +out: + clk_disable_unprepare(data->ipg_clk); + return ret; +} + +static void imx_intmux_remove(struct platform_device *pdev) +{ + struct intmux_data *data = platform_get_drvdata(pdev); + int i; + + for (i = 0; i < data->channum; i++) { + /* disable all interrupt sources of this channel */ + writel_relaxed(0, data->regs + CHANIER(i)); + + irq_set_chained_handler_and_data(data->irqchip_data[i].irq, + NULL, NULL); + + irq_domain_remove(data->irqchip_data[i].domain); + } + + pm_runtime_disable(&pdev->dev); +} + +#ifdef CONFIG_PM +static int imx_intmux_runtime_suspend(struct device *dev) +{ + struct intmux_data *data = dev_get_drvdata(dev); + struct intmux_irqchip_data *irqchip_data; + int i; + + for (i = 0; i < data->channum; i++) { + irqchip_data = &data->irqchip_data[i]; + irqchip_data->saved_reg = readl_relaxed(data->regs + CHANIER(i)); + } + + clk_disable_unprepare(data->ipg_clk); + + return 0; +} + +static int imx_intmux_runtime_resume(struct device *dev) +{ + struct intmux_data *data = dev_get_drvdata(dev); + struct intmux_irqchip_data *irqchip_data; + int ret, i; + + ret = clk_prepare_enable(data->ipg_clk); + if (ret) { + dev_err(dev, "failed to enable ipg clk: %d\n", ret); + return ret; + } + + for (i = 0; i < data->channum; i++) { + irqchip_data = &data->irqchip_data[i]; + writel_relaxed(irqchip_data->saved_reg, data->regs + CHANIER(i)); + } + + return 0; +} +#endif + +static const struct dev_pm_ops imx_intmux_pm_ops = { + SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, + pm_runtime_force_resume) + SET_RUNTIME_PM_OPS(imx_intmux_runtime_suspend, + imx_intmux_runtime_resume, NULL) +}; + +static const struct of_device_id imx_intmux_id[] = { + { .compatible = "fsl,imx-intmux", }, + { /* sentinel */ }, +}; + +static struct platform_driver imx_intmux_driver = { + .driver = { + .name = "imx-intmux", + .of_match_table = imx_intmux_id, + .pm = &imx_intmux_pm_ops, + }, + .probe = imx_intmux_probe, + .remove = imx_intmux_remove, +}; +builtin_platform_driver(imx_intmux_driver); diff --git a/drivers/irqchip/irq-imx-irqsteer.c b/drivers/irqchip/irq-imx-irqsteer.c index 5b3f1d735685..4682ce5bf8d3 100644 --- a/drivers/irqchip/irq-imx-irqsteer.c +++ b/drivers/irqchip/irq-imx-irqsteer.c @@ -10,10 +10,13 @@ #include <linux/irqchip/chained_irq.h> #include <linux/irqdomain.h> #include <linux/kernel.h> -#include <linux/of_platform.h> +#include <linux/of.h> +#include <linux/of_irq.h> +#include <linux/platform_device.h> +#include <linux/pm_runtime.h> #include <linux/spinlock.h> -#define CTRL_STRIDE_OFF(_t, _r) (_t * 8 * _r) +#define CTRL_STRIDE_OFF(_t, _r) (_t * 4 * _r) #define CHANCTRL 0x0 #define CHANMASK(n, t) (CTRL_STRIDE_OFF(t, 0) + 0x4 * (n) + 0x4) #define CHANSET(n, t) (CTRL_STRIDE_OFF(t, 1) + 0x4 * (n) + 0x4) @@ -21,21 +24,25 @@ #define CHAN_MINTDIS(t) (CTRL_STRIDE_OFF(t, 3) + 0x4) #define CHAN_MASTRSTAT(t) (CTRL_STRIDE_OFF(t, 3) + 0x8) +#define CHAN_MAX_OUTPUT_INT 0xF + struct irqsteer_data { void __iomem *regs; struct clk *ipg_clk; - int irq; + int irq[CHAN_MAX_OUTPUT_INT]; + int irq_count; raw_spinlock_t lock; - int irq_groups; + int reg_num; int channel; struct irq_domain *domain; u32 *saved_reg; + struct device *dev; }; static int imx_irqsteer_get_reg_index(struct irqsteer_data *data, unsigned long irqnum) { - return (data->irq_groups * 2 - irqnum / 32 - 1); + return (data->reg_num - irqnum / 32 - 1); } static void imx_irqsteer_irq_unmask(struct irq_data *d) @@ -46,9 +53,9 @@ static void imx_irqsteer_irq_unmask(struct irq_data *d) u32 val; raw_spin_lock_irqsave(&data->lock, flags); - val = readl_relaxed(data->regs + CHANMASK(idx, data->irq_groups)); + val = readl_relaxed(data->regs + CHANMASK(idx, data->reg_num)); val |= BIT(d->hwirq % 32); - writel_relaxed(val, data->regs + CHANMASK(idx, data->irq_groups)); + writel_relaxed(val, data->regs + CHANMASK(idx, data->reg_num)); raw_spin_unlock_irqrestore(&data->lock, flags); } @@ -60,16 +67,32 @@ static void imx_irqsteer_irq_mask(struct irq_data *d) u32 val; raw_spin_lock_irqsave(&data->lock, flags); - val = readl_relaxed(data->regs + CHANMASK(idx, data->irq_groups)); + val = readl_relaxed(data->regs + CHANMASK(idx, data->reg_num)); val &= ~BIT(d->hwirq % 32); - writel_relaxed(val, data->regs + CHANMASK(idx, data->irq_groups)); + writel_relaxed(val, data->regs + CHANMASK(idx, data->reg_num)); raw_spin_unlock_irqrestore(&data->lock, flags); } -static struct irq_chip imx_irqsteer_irq_chip = { - .name = "irqsteer", - .irq_mask = imx_irqsteer_irq_mask, - .irq_unmask = imx_irqsteer_irq_unmask, +static void imx_irqsteer_irq_bus_lock(struct irq_data *d) +{ + struct irqsteer_data *data = d->chip_data; + + pm_runtime_get_sync(data->dev); +} + +static void imx_irqsteer_irq_bus_sync_unlock(struct irq_data *d) +{ + struct irqsteer_data *data = d->chip_data; + + pm_runtime_put_autosuspend(data->dev); +} + +static const struct irq_chip imx_irqsteer_irq_chip = { + .name = "irqsteer", + .irq_mask = imx_irqsteer_irq_mask, + .irq_unmask = imx_irqsteer_irq_unmask, + .irq_bus_lock = imx_irqsteer_irq_bus_lock, + .irq_bus_sync_unlock = imx_irqsteer_irq_bus_sync_unlock, }; static int imx_irqsteer_irq_map(struct irq_domain *h, unsigned int irq, @@ -87,26 +110,47 @@ static const struct irq_domain_ops imx_irqsteer_domain_ops = { .xlate = irq_domain_xlate_onecell, }; +static int imx_irqsteer_get_hwirq_base(struct irqsteer_data *data, u32 irq) +{ + int i; + + for (i = 0; i < data->irq_count; i++) { + if (data->irq[i] == irq) + return i * 64; + } + + return -EINVAL; +} + static void imx_irqsteer_irq_handler(struct irq_desc *desc) { struct irqsteer_data *data = irq_desc_get_handler_data(desc); - int i; + int hwirq; + int irq, i; chained_irq_enter(irq_desc_get_chip(desc), desc); - for (i = 0; i < data->irq_groups * 64; i += 32) { - int idx = imx_irqsteer_get_reg_index(data, i); + irq = irq_desc_get_irq(desc); + hwirq = imx_irqsteer_get_hwirq_base(data, irq); + if (hwirq < 0) { + pr_warn("%s: unable to get hwirq base for irq %d\n", + __func__, irq); + return; + } + + for (i = 0; i < 2; i++, hwirq += 32) { + int idx = imx_irqsteer_get_reg_index(data, hwirq); unsigned long irqmap; - int pos, virq; + int pos; + + if (hwirq >= data->reg_num * 32) + break; irqmap = readl_relaxed(data->regs + - CHANSTATUS(idx, data->irq_groups)); + CHANSTATUS(idx, data->reg_num)); - for_each_set_bit(pos, &irqmap, 32) { - virq = irq_find_mapping(data->domain, pos + i); - if (virq) - generic_handle_irq(virq); - } + for_each_set_bit(pos, &irqmap, 32) + generic_handle_domain_irq(data->domain, pos + hwirq); } chained_irq_exit(irq_desc_get_chip(desc), desc); @@ -116,42 +160,44 @@ static int imx_irqsteer_probe(struct platform_device *pdev) { struct device_node *np = pdev->dev.of_node; struct irqsteer_data *data; - struct resource *res; - int ret; + u32 irqs_num; + int i, ret; data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL); if (!data) return -ENOMEM; - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - data->regs = devm_ioremap_resource(&pdev->dev, res); + data->dev = &pdev->dev; + data->regs = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(data->regs)) { dev_err(&pdev->dev, "failed to initialize reg\n"); return PTR_ERR(data->regs); } - data->irq = platform_get_irq(pdev, 0); - if (data->irq <= 0) { - dev_err(&pdev->dev, "failed to get irq\n"); - return -ENODEV; - } - data->ipg_clk = devm_clk_get(&pdev->dev, "ipg"); - if (IS_ERR(data->ipg_clk)) { - ret = PTR_ERR(data->ipg_clk); - if (ret != -EPROBE_DEFER) - dev_err(&pdev->dev, "failed to get ipg clk: %d\n", ret); - return ret; - } + if (IS_ERR(data->ipg_clk)) + return dev_err_probe(&pdev->dev, PTR_ERR(data->ipg_clk), + "failed to get ipg clk\n"); raw_spin_lock_init(&data->lock); - of_property_read_u32(np, "fsl,irq-groups", &data->irq_groups); - of_property_read_u32(np, "fsl,channel", &data->channel); + ret = of_property_read_u32(np, "fsl,num-irqs", &irqs_num); + if (ret) + return ret; + ret = of_property_read_u32(np, "fsl,channel", &data->channel); + if (ret) + return ret; + + /* + * There is one output irq for each group of 64 inputs. + * One register bit map can represent 32 input interrupts. + */ + data->irq_count = DIV_ROUND_UP(irqs_num, 64); + data->reg_num = irqs_num / 32; - if (IS_ENABLED(CONFIG_PM_SLEEP)) { + if (IS_ENABLED(CONFIG_PM)) { data->saved_reg = devm_kzalloc(&pdev->dev, - sizeof(u32) * data->irq_groups * 2, + sizeof(u32) * data->reg_num, GFP_KERNEL); if (!data->saved_reg) return -ENOMEM; @@ -166,42 +212,67 @@ static int imx_irqsteer_probe(struct platform_device *pdev) /* steer all IRQs into configured channel */ writel_relaxed(BIT(data->channel), data->regs + CHANCTRL); - data->domain = irq_domain_add_linear(np, data->irq_groups * 64, - &imx_irqsteer_domain_ops, data); + data->domain = irq_domain_create_linear(dev_fwnode(&pdev->dev), data->reg_num * 32, + &imx_irqsteer_domain_ops, data); if (!data->domain) { dev_err(&pdev->dev, "failed to create IRQ domain\n"); - clk_disable_unprepare(data->ipg_clk); - return -ENOMEM; + ret = -ENOMEM; + goto out; } + irq_domain_set_pm_device(data->domain, &pdev->dev); - irq_set_chained_handler_and_data(data->irq, imx_irqsteer_irq_handler, - data); + if (!data->irq_count || data->irq_count > CHAN_MAX_OUTPUT_INT) { + ret = -EINVAL; + goto out; + } + + for (i = 0; i < data->irq_count; i++) { + data->irq[i] = irq_of_parse_and_map(np, i); + if (!data->irq[i]) + break; + + irq_set_chained_handler_and_data(data->irq[i], + imx_irqsteer_irq_handler, + data); + } platform_set_drvdata(pdev, data); + pm_runtime_set_active(&pdev->dev); + pm_runtime_enable(&pdev->dev); + return 0; +out: + clk_disable_unprepare(data->ipg_clk); + return ret; } -static int imx_irqsteer_remove(struct platform_device *pdev) +static void imx_irqsteer_remove(struct platform_device *pdev) { struct irqsteer_data *irqsteer_data = platform_get_drvdata(pdev); + int i; + + for (i = 0; i < irqsteer_data->irq_count; i++) { + if (!irqsteer_data->irq[i]) + break; + + irq_set_chained_handler_and_data(irqsteer_data->irq[i], + NULL, NULL); + } - irq_set_chained_handler_and_data(irqsteer_data->irq, NULL, NULL); irq_domain_remove(irqsteer_data->domain); clk_disable_unprepare(irqsteer_data->ipg_clk); - - return 0; } -#ifdef CONFIG_PM_SLEEP +#ifdef CONFIG_PM static void imx_irqsteer_save_regs(struct irqsteer_data *data) { int i; - for (i = 0; i < data->irq_groups * 2; i++) + for (i = 0; i < data->reg_num; i++) data->saved_reg[i] = readl_relaxed(data->regs + - CHANMASK(i, data->irq_groups)); + CHANMASK(i, data->reg_num)); } static void imx_irqsteer_restore_regs(struct irqsteer_data *data) @@ -209,9 +280,9 @@ static void imx_irqsteer_restore_regs(struct irqsteer_data *data) int i; writel_relaxed(BIT(data->channel), data->regs + CHANCTRL); - for (i = 0; i < data->irq_groups * 2; i++) + for (i = 0; i < data->reg_num; i++) writel_relaxed(data->saved_reg[i], - data->regs + CHANMASK(i, data->irq_groups)); + data->regs + CHANMASK(i, data->reg_num)); } static int imx_irqsteer_suspend(struct device *dev) @@ -241,7 +312,10 @@ static int imx_irqsteer_resume(struct device *dev) #endif static const struct dev_pm_ops imx_irqsteer_pm_ops = { - SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(imx_irqsteer_suspend, imx_irqsteer_resume) + SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, + pm_runtime_force_resume) + SET_RUNTIME_PM_OPS(imx_irqsteer_suspend, + imx_irqsteer_resume, NULL) }; static const struct of_device_id imx_irqsteer_dt_ids[] = { @@ -251,11 +325,11 @@ static const struct of_device_id imx_irqsteer_dt_ids[] = { static struct platform_driver imx_irqsteer_driver = { .driver = { - .name = "imx-irqsteer", - .of_match_table = imx_irqsteer_dt_ids, - .pm = &imx_irqsteer_pm_ops, + .name = "imx-irqsteer", + .of_match_table = imx_irqsteer_dt_ids, + .pm = &imx_irqsteer_pm_ops, }, - .probe = imx_irqsteer_probe, - .remove = imx_irqsteer_remove, + .probe = imx_irqsteer_probe, + .remove = imx_irqsteer_remove, }; builtin_platform_driver(imx_irqsteer_driver); diff --git a/drivers/irqchip/irq-imx-mu-msi.c b/drivers/irqchip/irq-imx-mu-msi.c new file mode 100644 index 000000000000..c598f2f52fc6 --- /dev/null +++ b/drivers/irqchip/irq-imx-mu-msi.c @@ -0,0 +1,440 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Freescale MU used as MSI controller + * + * Copyright (c) 2018 Pengutronix, Oleksij Rempel <o.rempel@pengutronix.de> + * Copyright 2022 NXP + * Frank Li <Frank.Li@nxp.com> + * Peng Fan <peng.fan@nxp.com> + * + * Based on drivers/mailbox/imx-mailbox.c + */ + +#include <linux/clk.h> +#include <linux/irq.h> +#include <linux/irqchip.h> +#include <linux/irqchip/chained_irq.h> +#include <linux/irqdomain.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/msi.h> +#include <linux/of_irq.h> +#include <linux/of_platform.h> +#include <linux/pm_runtime.h> +#include <linux/pm_domain.h> +#include <linux/spinlock.h> + +#include <linux/irqchip/irq-msi-lib.h> + +#define IMX_MU_CHANS 4 + +enum imx_mu_xcr { + IMX_MU_GIER, + IMX_MU_GCR, + IMX_MU_TCR, + IMX_MU_RCR, + IMX_MU_xCR_MAX, +}; + +enum imx_mu_xsr { + IMX_MU_SR, + IMX_MU_GSR, + IMX_MU_TSR, + IMX_MU_RSR, + IMX_MU_xSR_MAX +}; + +enum imx_mu_type { + IMX_MU_V2 = BIT(1), +}; + +/* Receive Interrupt Enable */ +#define IMX_MU_xCR_RIEn(data, x) ((data->cfg->type) & IMX_MU_V2 ? BIT(x) : BIT(24 + (3 - (x)))) +#define IMX_MU_xSR_RFn(data, x) ((data->cfg->type) & IMX_MU_V2 ? BIT(x) : BIT(24 + (3 - (x)))) + +struct imx_mu_dcfg { + enum imx_mu_type type; + u32 xTR; /* Transmit Register0 */ + u32 xRR; /* Receive Register0 */ + u32 xSR[IMX_MU_xSR_MAX]; /* Status Registers */ + u32 xCR[IMX_MU_xCR_MAX]; /* Control Registers */ +}; + +struct imx_mu_msi { + raw_spinlock_t lock; + struct irq_domain *msi_domain; + void __iomem *regs; + phys_addr_t msiir_addr; + const struct imx_mu_dcfg *cfg; + unsigned long used; + struct clk *clk; +}; + +static void imx_mu_write(struct imx_mu_msi *msi_data, u32 val, u32 offs) +{ + iowrite32(val, msi_data->regs + offs); +} + +static u32 imx_mu_read(struct imx_mu_msi *msi_data, u32 offs) +{ + return ioread32(msi_data->regs + offs); +} + +static u32 imx_mu_xcr_rmw(struct imx_mu_msi *msi_data, enum imx_mu_xcr type, u32 set, u32 clr) +{ + unsigned long flags; + u32 val; + + raw_spin_lock_irqsave(&msi_data->lock, flags); + val = imx_mu_read(msi_data, msi_data->cfg->xCR[type]); + val &= ~clr; + val |= set; + imx_mu_write(msi_data, val, msi_data->cfg->xCR[type]); + raw_spin_unlock_irqrestore(&msi_data->lock, flags); + + return val; +} + +static void imx_mu_msi_parent_mask_irq(struct irq_data *data) +{ + struct imx_mu_msi *msi_data = irq_data_get_irq_chip_data(data); + + imx_mu_xcr_rmw(msi_data, IMX_MU_RCR, 0, IMX_MU_xCR_RIEn(msi_data, data->hwirq)); +} + +static void imx_mu_msi_parent_unmask_irq(struct irq_data *data) +{ + struct imx_mu_msi *msi_data = irq_data_get_irq_chip_data(data); + + imx_mu_xcr_rmw(msi_data, IMX_MU_RCR, IMX_MU_xCR_RIEn(msi_data, data->hwirq), 0); +} + +static void imx_mu_msi_parent_ack_irq(struct irq_data *data) +{ + struct imx_mu_msi *msi_data = irq_data_get_irq_chip_data(data); + + imx_mu_read(msi_data, msi_data->cfg->xRR + data->hwirq * 4); +} + +static void imx_mu_msi_parent_compose_msg(struct irq_data *data, + struct msi_msg *msg) +{ + struct imx_mu_msi *msi_data = irq_data_get_irq_chip_data(data); + u64 addr = msi_data->msiir_addr + 4 * data->hwirq; + + msg->address_hi = upper_32_bits(addr); + msg->address_lo = lower_32_bits(addr); + msg->data = data->hwirq; +} + +static int imx_mu_msi_parent_set_affinity(struct irq_data *irq_data, + const struct cpumask *mask, bool force) +{ + return -EINVAL; +} + +static struct irq_chip imx_mu_msi_parent_chip = { + .name = "MU", + .irq_mask = imx_mu_msi_parent_mask_irq, + .irq_unmask = imx_mu_msi_parent_unmask_irq, + .irq_ack = imx_mu_msi_parent_ack_irq, + .irq_compose_msi_msg = imx_mu_msi_parent_compose_msg, + .irq_set_affinity = imx_mu_msi_parent_set_affinity, +}; + +static int imx_mu_msi_domain_irq_alloc(struct irq_domain *domain, + unsigned int virq, + unsigned int nr_irqs, + void *args) +{ + struct imx_mu_msi *msi_data = domain->host_data; + unsigned long flags; + int pos, err = 0; + + WARN_ON(nr_irqs != 1); + + raw_spin_lock_irqsave(&msi_data->lock, flags); + pos = find_first_zero_bit(&msi_data->used, IMX_MU_CHANS); + if (pos < IMX_MU_CHANS) + __set_bit(pos, &msi_data->used); + else + err = -ENOSPC; + raw_spin_unlock_irqrestore(&msi_data->lock, flags); + + if (err) + return err; + + irq_domain_set_info(domain, virq, pos, + &imx_mu_msi_parent_chip, msi_data, + handle_edge_irq, NULL, NULL); + return 0; +} + +static void imx_mu_msi_domain_irq_free(struct irq_domain *domain, + unsigned int virq, unsigned int nr_irqs) +{ + struct irq_data *d = irq_domain_get_irq_data(domain, virq); + struct imx_mu_msi *msi_data = irq_data_get_irq_chip_data(d); + unsigned long flags; + + raw_spin_lock_irqsave(&msi_data->lock, flags); + __clear_bit(d->hwirq, &msi_data->used); + raw_spin_unlock_irqrestore(&msi_data->lock, flags); +} + +static const struct irq_domain_ops imx_mu_msi_domain_ops = { + .select = msi_lib_irq_domain_select, + .alloc = imx_mu_msi_domain_irq_alloc, + .free = imx_mu_msi_domain_irq_free, +}; + +static void imx_mu_msi_irq_handler(struct irq_desc *desc) +{ + struct imx_mu_msi *msi_data = irq_desc_get_handler_data(desc); + struct irq_chip *chip = irq_desc_get_chip(desc); + u32 status; + int i; + + status = imx_mu_read(msi_data, msi_data->cfg->xSR[IMX_MU_RSR]); + + chained_irq_enter(chip, desc); + for (i = 0; i < IMX_MU_CHANS; i++) { + if (status & IMX_MU_xSR_RFn(msi_data, i)) + generic_handle_domain_irq(msi_data->msi_domain, i); + } + chained_irq_exit(chip, desc); +} + +#define IMX_MU_MSI_FLAGS_REQUIRED (MSI_FLAG_USE_DEF_DOM_OPS | \ + MSI_FLAG_USE_DEF_CHIP_OPS | \ + MSI_FLAG_PARENT_PM_DEV) + +#define IMX_MU_MSI_FLAGS_SUPPORTED (MSI_GENERIC_FLAGS_MASK) + +static const struct msi_parent_ops imx_mu_msi_parent_ops = { + .supported_flags = IMX_MU_MSI_FLAGS_SUPPORTED, + .required_flags = IMX_MU_MSI_FLAGS_REQUIRED, + .chip_flags = MSI_CHIP_FLAG_SET_EOI | MSI_CHIP_FLAG_SET_ACK, + .bus_select_token = DOMAIN_BUS_NEXUS, + .bus_select_mask = MATCH_PLATFORM_MSI, + .prefix = "MU-MSI-", + .init_dev_msi_info = msi_lib_init_dev_msi_info, +}; + +static int imx_mu_msi_domains_init(struct imx_mu_msi *msi_data, struct device *dev) +{ + struct irq_domain_info info = { + .ops = &imx_mu_msi_domain_ops, + .fwnode = dev_fwnode(dev), + .size = IMX_MU_CHANS, + .host_data = msi_data, + }; + struct irq_domain *parent; + + /* Initialize MSI domain parent */ + parent = msi_create_parent_irq_domain(&info, &imx_mu_msi_parent_ops); + if (!parent) { + dev_err(dev, "failed to create IRQ domain\n"); + return -ENOMEM; + } + parent->dev = parent->pm_dev = dev; + return 0; +} + +/* Register offset of different version MU IP */ +static const struct imx_mu_dcfg imx_mu_cfg_imx6sx = { + .type = 0, + .xTR = 0x0, + .xRR = 0x10, + .xSR = { + [IMX_MU_SR] = 0x20, + [IMX_MU_GSR] = 0x20, + [IMX_MU_TSR] = 0x20, + [IMX_MU_RSR] = 0x20, + }, + .xCR = { + [IMX_MU_GIER] = 0x24, + [IMX_MU_GCR] = 0x24, + [IMX_MU_TCR] = 0x24, + [IMX_MU_RCR] = 0x24, + }, +}; + +static const struct imx_mu_dcfg imx_mu_cfg_imx7ulp = { + .type = 0, + .xTR = 0x20, + .xRR = 0x40, + .xSR = { + [IMX_MU_SR] = 0x60, + [IMX_MU_GSR] = 0x60, + [IMX_MU_TSR] = 0x60, + [IMX_MU_RSR] = 0x60, + }, + .xCR = { + [IMX_MU_GIER] = 0x64, + [IMX_MU_GCR] = 0x64, + [IMX_MU_TCR] = 0x64, + [IMX_MU_RCR] = 0x64, + }, +}; + +static const struct imx_mu_dcfg imx_mu_cfg_imx8ulp = { + .type = IMX_MU_V2, + .xTR = 0x200, + .xRR = 0x280, + .xSR = { + [IMX_MU_SR] = 0xC, + [IMX_MU_GSR] = 0x118, + [IMX_MU_TSR] = 0x124, + [IMX_MU_RSR] = 0x12C, + }, + .xCR = { + [IMX_MU_GIER] = 0x110, + [IMX_MU_GCR] = 0x114, + [IMX_MU_TCR] = 0x120, + [IMX_MU_RCR] = 0x128 + }, +}; + +static int imx_mu_probe(struct platform_device *pdev, struct device_node *parent, + const struct imx_mu_dcfg *cfg) +{ + struct device_link *pd_link_a; + struct device_link *pd_link_b; + struct imx_mu_msi *msi_data; + struct resource *res; + struct device *pd_a; + struct device *pd_b; + struct device *dev; + int ret; + int irq; + + dev = &pdev->dev; + + msi_data = devm_kzalloc(&pdev->dev, sizeof(*msi_data), GFP_KERNEL); + if (!msi_data) + return -ENOMEM; + + msi_data->cfg = cfg; + + msi_data->regs = devm_platform_ioremap_resource_byname(pdev, "processor-a-side"); + if (IS_ERR(msi_data->regs)) { + dev_err(&pdev->dev, "failed to initialize 'regs'\n"); + return PTR_ERR(msi_data->regs); + } + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "processor-b-side"); + if (!res) + return -EIO; + + msi_data->msiir_addr = res->start + msi_data->cfg->xTR; + + irq = platform_get_irq(pdev, 0); + if (irq < 0) + return irq; + + platform_set_drvdata(pdev, msi_data); + + msi_data->clk = devm_clk_get(dev, NULL); + if (IS_ERR(msi_data->clk)) + return PTR_ERR(msi_data->clk); + + pd_a = dev_pm_domain_attach_by_name(dev, "processor-a-side"); + if (IS_ERR(pd_a)) + return PTR_ERR(pd_a); + + pd_b = dev_pm_domain_attach_by_name(dev, "processor-b-side"); + if (IS_ERR(pd_b)) + return PTR_ERR(pd_b); + + pd_link_a = device_link_add(dev, pd_a, + DL_FLAG_STATELESS | + DL_FLAG_PM_RUNTIME | + DL_FLAG_RPM_ACTIVE); + + if (!pd_link_a) { + dev_err(dev, "Failed to add device_link to mu a.\n"); + goto err_pd_a; + } + + pd_link_b = device_link_add(dev, pd_b, + DL_FLAG_STATELESS | + DL_FLAG_PM_RUNTIME | + DL_FLAG_RPM_ACTIVE); + + + if (!pd_link_b) { + dev_err(dev, "Failed to add device_link to mu a.\n"); + goto err_pd_b; + } + + ret = imx_mu_msi_domains_init(msi_data, dev); + if (ret) + goto err_dm_init; + + pm_runtime_enable(dev); + + irq_set_chained_handler_and_data(irq, + imx_mu_msi_irq_handler, + msi_data); + + return 0; + +err_dm_init: + device_link_remove(dev, pd_b); +err_pd_b: + device_link_remove(dev, pd_a); +err_pd_a: + return -EINVAL; +} + +static int __maybe_unused imx_mu_runtime_suspend(struct device *dev) +{ + struct imx_mu_msi *priv = dev_get_drvdata(dev); + + clk_disable_unprepare(priv->clk); + + return 0; +} + +static int __maybe_unused imx_mu_runtime_resume(struct device *dev) +{ + struct imx_mu_msi *priv = dev_get_drvdata(dev); + int ret; + + ret = clk_prepare_enable(priv->clk); + if (ret) + dev_err(dev, "failed to enable clock\n"); + + return ret; +} + +static const struct dev_pm_ops imx_mu_pm_ops = { + SET_RUNTIME_PM_OPS(imx_mu_runtime_suspend, + imx_mu_runtime_resume, NULL) +}; + +static int imx_mu_imx7ulp_probe(struct platform_device *pdev, struct device_node *parent) +{ + return imx_mu_probe(pdev, parent, &imx_mu_cfg_imx7ulp); +} + +static int imx_mu_imx6sx_probe(struct platform_device *pdev, struct device_node *parent) +{ + return imx_mu_probe(pdev, parent, &imx_mu_cfg_imx6sx); +} + +static int imx_mu_imx8ulp_probe(struct platform_device *pdev, struct device_node *parent) +{ + return imx_mu_probe(pdev, parent, &imx_mu_cfg_imx8ulp); +} + +IRQCHIP_PLATFORM_DRIVER_BEGIN(imx_mu_msi) +IRQCHIP_MATCH("fsl,imx7ulp-mu-msi", imx_mu_imx7ulp_probe) +IRQCHIP_MATCH("fsl,imx6sx-mu-msi", imx_mu_imx6sx_probe) +IRQCHIP_MATCH("fsl,imx8ulp-mu-msi", imx_mu_imx8ulp_probe) +IRQCHIP_PLATFORM_DRIVER_END(imx_mu_msi, .pm = &imx_mu_pm_ops) + +MODULE_AUTHOR("Frank Li <Frank.Li@nxp.com>"); +MODULE_DESCRIPTION("Freescale MU MSI controller driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/irqchip/irq-ingenic-tcu.c b/drivers/irqchip/irq-ingenic-tcu.c new file mode 100644 index 000000000000..794ecba717c9 --- /dev/null +++ b/drivers/irqchip/irq-ingenic-tcu.c @@ -0,0 +1,183 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * JZ47xx SoCs TCU IRQ driver + * Copyright (C) 2019 Paul Cercueil <paul@crapouillou.net> + */ + +#include <linux/clk.h> +#include <linux/interrupt.h> +#include <linux/irqchip.h> +#include <linux/irqchip/chained_irq.h> +#include <linux/mfd/ingenic-tcu.h> +#include <linux/mfd/syscon.h> +#include <linux/of_irq.h> +#include <linux/regmap.h> + +struct ingenic_tcu { + struct regmap *map; + struct clk *clk; + struct irq_domain *domain; + unsigned int nb_parent_irqs; + u32 parent_irqs[3]; +}; + +static void ingenic_tcu_intc_cascade(struct irq_desc *desc) +{ + struct irq_chip *irq_chip = irq_data_get_irq_chip(&desc->irq_data); + struct irq_domain *domain = irq_desc_get_handler_data(desc); + struct irq_chip_generic *gc = irq_get_domain_generic_chip(domain, 0); + struct regmap *map = gc->private; + uint32_t irq_reg, irq_mask; + unsigned long bits; + unsigned int i; + + regmap_read(map, TCU_REG_TFR, &irq_reg); + regmap_read(map, TCU_REG_TMR, &irq_mask); + + chained_irq_enter(irq_chip, desc); + + irq_reg &= ~irq_mask; + bits = irq_reg; + + for_each_set_bit(i, &bits, 32) + generic_handle_domain_irq(domain, i); + + chained_irq_exit(irq_chip, desc); +} + +static void ingenic_tcu_gc_unmask_enable_reg(struct irq_data *d) +{ + struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); + struct irq_chip_type *ct = irq_data_get_chip_type(d); + struct regmap *map = gc->private; + u32 mask = d->mask; + + guard(raw_spinlock)(&gc->lock); + regmap_write(map, ct->regs.ack, mask); + regmap_write(map, ct->regs.enable, mask); + *ct->mask_cache |= mask; +} + +static void ingenic_tcu_gc_mask_disable_reg(struct irq_data *d) +{ + struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); + struct irq_chip_type *ct = irq_data_get_chip_type(d); + struct regmap *map = gc->private; + u32 mask = d->mask; + + guard(raw_spinlock)(&gc->lock); + regmap_write(map, ct->regs.disable, mask); + *ct->mask_cache &= ~mask; +} + +static void ingenic_tcu_gc_mask_disable_reg_and_ack(struct irq_data *d) +{ + struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); + struct irq_chip_type *ct = irq_data_get_chip_type(d); + struct regmap *map = gc->private; + u32 mask = d->mask; + + guard(raw_spinlock)(&gc->lock); + regmap_write(map, ct->regs.ack, mask); + regmap_write(map, ct->regs.disable, mask); +} + +static int __init ingenic_tcu_irq_init(struct device_node *np, + struct device_node *parent) +{ + struct irq_chip_generic *gc; + struct irq_chip_type *ct; + struct ingenic_tcu *tcu; + struct regmap *map; + unsigned int i; + int ret, irqs; + + map = device_node_to_regmap(np); + if (IS_ERR(map)) + return PTR_ERR(map); + + tcu = kzalloc(sizeof(*tcu), GFP_KERNEL); + if (!tcu) + return -ENOMEM; + + tcu->map = map; + + irqs = of_property_count_elems_of_size(np, "interrupts", sizeof(u32)); + if (irqs < 0 || irqs > ARRAY_SIZE(tcu->parent_irqs)) { + pr_crit("%s: Invalid 'interrupts' property\n", __func__); + ret = -EINVAL; + goto err_free_tcu; + } + + tcu->nb_parent_irqs = irqs; + + tcu->domain = irq_domain_create_linear(of_fwnode_handle(np), 32, &irq_generic_chip_ops, + NULL); + if (!tcu->domain) { + ret = -ENOMEM; + goto err_free_tcu; + } + + ret = irq_alloc_domain_generic_chips(tcu->domain, 32, 1, "TCU", + handle_level_irq, 0, + IRQ_NOPROBE | IRQ_LEVEL, 0); + if (ret) { + pr_crit("%s: Invalid 'interrupts' property\n", __func__); + goto out_domain_remove; + } + + gc = irq_get_domain_generic_chip(tcu->domain, 0); + ct = gc->chip_types; + + gc->wake_enabled = IRQ_MSK(32); + gc->private = tcu->map; + + ct->regs.disable = TCU_REG_TMSR; + ct->regs.enable = TCU_REG_TMCR; + ct->regs.ack = TCU_REG_TFCR; + ct->chip.irq_unmask = ingenic_tcu_gc_unmask_enable_reg; + ct->chip.irq_mask = ingenic_tcu_gc_mask_disable_reg; + ct->chip.irq_mask_ack = ingenic_tcu_gc_mask_disable_reg_and_ack; + ct->chip.flags = IRQCHIP_MASK_ON_SUSPEND | IRQCHIP_SKIP_SET_WAKE; + + /* Mask all IRQs by default */ + regmap_write(tcu->map, TCU_REG_TMSR, IRQ_MSK(32)); + + /* + * On JZ4740, timer 0 and timer 1 have their own interrupt line; + * timers 2-7 share one interrupt. + * On SoCs >= JZ4770, timer 5 has its own interrupt line; + * timers 0-4 and 6-7 share one single interrupt. + * + * To keep things simple, we just register the same handler to + * all parent interrupts. The handler will properly detect which + * channel fired the interrupt. + */ + for (i = 0; i < irqs; i++) { + tcu->parent_irqs[i] = irq_of_parse_and_map(np, i); + if (!tcu->parent_irqs[i]) { + ret = -EINVAL; + goto out_unmap_irqs; + } + + irq_set_chained_handler_and_data(tcu->parent_irqs[i], + ingenic_tcu_intc_cascade, + tcu->domain); + } + + return 0; + +out_unmap_irqs: + for (; i > 0; i--) + irq_dispose_mapping(tcu->parent_irqs[i - 1]); +out_domain_remove: + irq_domain_remove(tcu->domain); +err_free_tcu: + kfree(tcu); + return ret; +} +IRQCHIP_DECLARE(jz4740_tcu_irq, "ingenic,jz4740-tcu", ingenic_tcu_irq_init); +IRQCHIP_DECLARE(jz4725b_tcu_irq, "ingenic,jz4725b-tcu", ingenic_tcu_irq_init); +IRQCHIP_DECLARE(jz4760_tcu_irq, "ingenic,jz4760-tcu", ingenic_tcu_irq_init); +IRQCHIP_DECLARE(jz4770_tcu_irq, "ingenic,jz4770-tcu", ingenic_tcu_irq_init); +IRQCHIP_DECLARE(x1000_tcu_irq, "ingenic,x1000-tcu", ingenic_tcu_irq_init); diff --git a/drivers/irqchip/irq-ingenic.c b/drivers/irqchip/irq-ingenic.c index 2ff08986b536..52393724f213 100644 --- a/drivers/irqchip/irq-ingenic.c +++ b/drivers/irqchip/irq-ingenic.c @@ -1,16 +1,7 @@ +// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright (C) 2009-2010, Lars-Peter Clausen <lars@metafoo.de> - * JZ4740 platform IRQ support - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the - * Free Software Foundation; either version 2 of the License, or (at your - * option) any later version. - * - * You should have received a copy of the GNU General Public License along - * with this program; if not, write to the Free Software Foundation, Inc., - * 675 Mass Ave, Cambridge, MA 02139, USA. - * + * Ingenic XBurst platform IRQ support */ #include <linux/errno.h> @@ -19,7 +10,6 @@ #include <linux/interrupt.h> #include <linux/ioport.h> #include <linux/irqchip.h> -#include <linux/irqchip/ingenic.h> #include <linux/of_address.h> #include <linux/of_irq.h> #include <linux/timex.h> @@ -27,10 +17,10 @@ #include <linux/delay.h> #include <asm/io.h> -#include <asm/mach-jz4740/irq.h> struct ingenic_intc_data { void __iomem *base; + struct irq_domain *domain; unsigned num_chips; }; @@ -44,46 +34,29 @@ struct ingenic_intc_data { static irqreturn_t intc_cascade(int irq, void *data) { struct ingenic_intc_data *intc = irq_get_handler_data(irq); - uint32_t irq_reg; + struct irq_domain *domain = intc->domain; + struct irq_chip_generic *gc; + uint32_t pending; unsigned i; for (i = 0; i < intc->num_chips; i++) { - irq_reg = readl(intc->base + (i * CHIP_SIZE) + - JZ_REG_INTC_PENDING); - if (!irq_reg) + gc = irq_get_domain_generic_chip(domain, i * 32); + + pending = irq_reg_readl(gc, JZ_REG_INTC_PENDING); + if (!pending) continue; - generic_handle_irq(__fls(irq_reg) + (i * 32) + JZ4740_IRQ_BASE); + while (pending) { + int bit = __fls(pending); + + generic_handle_domain_irq(domain, bit + (i * 32)); + pending &= ~BIT(bit); + } } return IRQ_HANDLED; } -static void intc_irq_set_mask(struct irq_chip_generic *gc, uint32_t mask) -{ - struct irq_chip_regs *regs = &gc->chip_types->regs; - - writel(mask, gc->reg_base + regs->enable); - writel(~mask, gc->reg_base + regs->disable); -} - -void ingenic_intc_irq_suspend(struct irq_data *data) -{ - struct irq_chip_generic *gc = irq_data_get_irq_chip_data(data); - intc_irq_set_mask(gc, gc->wake_active); -} - -void ingenic_intc_irq_resume(struct irq_data *data) -{ - struct irq_chip_generic *gc = irq_data_get_irq_chip_data(data); - intc_irq_set_mask(gc, gc->mask_cache); -} - -static struct irqaction intc_cascade_action = { - .handler = intc_cascade, - .name = "SoC intc cascade interrupt", -}; - static int __init ingenic_intc_of_init(struct device_node *node, unsigned num_chips) { @@ -117,17 +90,26 @@ static int __init ingenic_intc_of_init(struct device_node *node, goto out_unmap_irq; } - for (i = 0; i < num_chips; i++) { - /* Mask all irqs */ - writel(0xffffffff, intc->base + (i * CHIP_SIZE) + - JZ_REG_INTC_SET_MASK); + domain = irq_domain_create_linear(of_fwnode_handle(node), num_chips * 32, + &irq_generic_chip_ops, NULL); + if (!domain) { + err = -ENOMEM; + goto out_unmap_base; + } - gc = irq_alloc_generic_chip("INTC", 1, - JZ4740_IRQ_BASE + (i * 32), - intc->base + (i * CHIP_SIZE), - handle_level_irq); + intc->domain = domain; + + err = irq_alloc_domain_generic_chips(domain, 32, 1, "INTC", + handle_level_irq, 0, + IRQ_NOPROBE | IRQ_LEVEL, 0); + if (err) + goto out_domain_remove; + + for (i = 0; i < num_chips; i++) { + gc = irq_get_domain_generic_chip(domain, i * 32); gc->wake_enabled = IRQ_MSK(32); + gc->reg_base = intc->base + (i * CHIP_SIZE); ct = gc->chip_types; ct->regs.enable = JZ_REG_INTC_CLEAR_MASK; @@ -136,21 +118,21 @@ static int __init ingenic_intc_of_init(struct device_node *node, ct->chip.irq_mask = irq_gc_mask_disable_reg; ct->chip.irq_mask_ack = irq_gc_mask_disable_reg; ct->chip.irq_set_wake = irq_gc_set_wake; - ct->chip.irq_suspend = ingenic_intc_irq_suspend; - ct->chip.irq_resume = ingenic_intc_irq_resume; + ct->chip.flags = IRQCHIP_MASK_ON_SUSPEND; - irq_setup_generic_chip(gc, IRQ_MSK(32), 0, 0, - IRQ_NOPROBE | IRQ_LEVEL); + /* Mask all irqs */ + irq_reg_writel(gc, IRQ_MSK(32), JZ_REG_INTC_SET_MASK); } - domain = irq_domain_add_legacy(node, num_chips * 32, JZ4740_IRQ_BASE, 0, - &irq_domain_simple_ops, NULL); - if (!domain) - pr_warn("unable to register IRQ domain\n"); - - setup_irq(parent_irq, &intc_cascade_action); + if (request_irq(parent_irq, intc_cascade, IRQF_NO_SUSPEND, + "SoC intc cascade interrupt", NULL)) + pr_err("Failed to register SoC intc cascade interrupt\n"); return 0; +out_domain_remove: + irq_domain_remove(domain); +out_unmap_base: + iounmap(intc->base); out_unmap_irq: irq_dispose_mapping(parent_irq); out_free: @@ -172,6 +154,7 @@ static int __init intc_2chip_of_init(struct device_node *node, { return ingenic_intc_of_init(node, 2); } +IRQCHIP_DECLARE(jz4760_intc, "ingenic,jz4760-intc", intc_2chip_of_init); IRQCHIP_DECLARE(jz4770_intc, "ingenic,jz4770-intc", intc_2chip_of_init); IRQCHIP_DECLARE(jz4775_intc, "ingenic,jz4775-intc", intc_2chip_of_init); IRQCHIP_DECLARE(jz4780_intc, "ingenic,jz4780-intc", intc_2chip_of_init); diff --git a/drivers/irqchip/irq-ixp4xx.c b/drivers/irqchip/irq-ixp4xx.c new file mode 100644 index 000000000000..a9a5a52b818a --- /dev/null +++ b/drivers/irqchip/irq-ixp4xx.c @@ -0,0 +1,284 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * irqchip for the IXP4xx interrupt controller + * Copyright (C) 2019 Linus Walleij <linus.walleij@linaro.org> + * + * Based on arch/arm/mach-ixp4xx/common.c + * Copyright 2002 (C) Intel Corporation + * Copyright 2003-2004 (C) MontaVista, Software, Inc. + * Copyright (C) Deepak Saxena <dsaxena@plexity.net> + */ +#include <linux/bitops.h> +#include <linux/gpio/driver.h> +#include <linux/irq.h> +#include <linux/io.h> +#include <linux/irqchip.h> +#include <linux/irqdomain.h> +#include <linux/of.h> +#include <linux/of_address.h> +#include <linux/of_irq.h> +#include <linux/platform_device.h> +#include <linux/cpu.h> + +#include <asm/exception.h> +#include <asm/mach/irq.h> + +#define IXP4XX_ICPR 0x00 /* Interrupt Status */ +#define IXP4XX_ICMR 0x04 /* Interrupt Enable */ +#define IXP4XX_ICLR 0x08 /* Interrupt IRQ/FIQ Select */ +#define IXP4XX_ICIP 0x0C /* IRQ Status */ +#define IXP4XX_ICFP 0x10 /* FIQ Status */ +#define IXP4XX_ICHR 0x14 /* Interrupt Priority */ +#define IXP4XX_ICIH 0x18 /* IRQ Highest Pri Int */ +#define IXP4XX_ICFH 0x1C /* FIQ Highest Pri Int */ + +/* IXP43x and IXP46x-only */ +#define IXP4XX_ICPR2 0x20 /* Interrupt Status 2 */ +#define IXP4XX_ICMR2 0x24 /* Interrupt Enable 2 */ +#define IXP4XX_ICLR2 0x28 /* Interrupt IRQ/FIQ Select 2 */ +#define IXP4XX_ICIP2 0x2C /* IRQ Status */ +#define IXP4XX_ICFP2 0x30 /* FIQ Status */ +#define IXP4XX_ICEEN 0x34 /* Error High Pri Enable */ + +/** + * struct ixp4xx_irq - state container for the Faraday IRQ controller + * @irqbase: IRQ controller memory base in virtual memory + * @is_356: if this is an IXP43x, IXP45x or IX46x SoC (with 64 IRQs) + * @irqchip: irqchip for this instance + * @domain: IRQ domain for this instance + */ +struct ixp4xx_irq { + void __iomem *irqbase; + bool is_356; + struct irq_chip irqchip; + struct irq_domain *domain; +}; + +/* Local static state container */ +static struct ixp4xx_irq ixirq; + +/* GPIO Clocks */ +#define IXP4XX_GPIO_CLK_0 14 +#define IXP4XX_GPIO_CLK_1 15 + +static int ixp4xx_set_irq_type(struct irq_data *d, unsigned int type) +{ + /* All are level active high (asserted) here */ + if (type != IRQ_TYPE_LEVEL_HIGH) + return -EINVAL; + return 0; +} + +static void ixp4xx_irq_mask(struct irq_data *d) +{ + struct ixp4xx_irq *ixi = irq_data_get_irq_chip_data(d); + u32 val; + + if (ixi->is_356 && d->hwirq >= 32) { + val = __raw_readl(ixi->irqbase + IXP4XX_ICMR2); + val &= ~BIT(d->hwirq - 32); + __raw_writel(val, ixi->irqbase + IXP4XX_ICMR2); + } else { + val = __raw_readl(ixi->irqbase + IXP4XX_ICMR); + val &= ~BIT(d->hwirq); + __raw_writel(val, ixi->irqbase + IXP4XX_ICMR); + } +} + +/* + * Level triggered interrupts on GPIO lines can only be cleared when the + * interrupt condition disappears. + */ +static void ixp4xx_irq_unmask(struct irq_data *d) +{ + struct ixp4xx_irq *ixi = irq_data_get_irq_chip_data(d); + u32 val; + + if (ixi->is_356 && d->hwirq >= 32) { + val = __raw_readl(ixi->irqbase + IXP4XX_ICMR2); + val |= BIT(d->hwirq - 32); + __raw_writel(val, ixi->irqbase + IXP4XX_ICMR2); + } else { + val = __raw_readl(ixi->irqbase + IXP4XX_ICMR); + val |= BIT(d->hwirq); + __raw_writel(val, ixi->irqbase + IXP4XX_ICMR); + } +} + +static void __exception_irq_entry ixp4xx_handle_irq(struct pt_regs *regs) +{ + struct ixp4xx_irq *ixi = &ixirq; + unsigned long status; + int i; + + status = __raw_readl(ixi->irqbase + IXP4XX_ICIP); + for_each_set_bit(i, &status, 32) + generic_handle_domain_irq(ixi->domain, i); + + /* + * IXP465/IXP435 has an upper IRQ status register + */ + if (ixi->is_356) { + status = __raw_readl(ixi->irqbase + IXP4XX_ICIP2); + for_each_set_bit(i, &status, 32) + generic_handle_domain_irq(ixi->domain, i + 32); + } +} + +static int ixp4xx_irq_domain_translate(struct irq_domain *domain, + struct irq_fwspec *fwspec, + unsigned long *hwirq, + unsigned int *type) +{ + /* We support standard DT translation */ + if (is_of_node(fwspec->fwnode) && fwspec->param_count == 2) { + *hwirq = fwspec->param[0]; + *type = fwspec->param[1]; + return 0; + } + + if (is_fwnode_irqchip(fwspec->fwnode)) { + if (fwspec->param_count != 2) + return -EINVAL; + *hwirq = fwspec->param[0]; + *type = fwspec->param[1]; + WARN_ON(*type == IRQ_TYPE_NONE); + return 0; + } + + return -EINVAL; +} + +static int ixp4xx_irq_domain_alloc(struct irq_domain *d, + unsigned int irq, unsigned int nr_irqs, + void *data) +{ + struct ixp4xx_irq *ixi = d->host_data; + irq_hw_number_t hwirq; + unsigned int type = IRQ_TYPE_NONE; + struct irq_fwspec *fwspec = data; + int ret; + int i; + + ret = ixp4xx_irq_domain_translate(d, fwspec, &hwirq, &type); + if (ret) + return ret; + + for (i = 0; i < nr_irqs; i++) { + /* + * TODO: after converting IXP4xx to only device tree, set + * handle_bad_irq as default handler and assume all consumers + * call .set_type() as this is provided in the second cell in + * the device tree phandle. + */ + irq_domain_set_info(d, + irq + i, + hwirq + i, + &ixi->irqchip, + ixi, + handle_level_irq, + NULL, NULL); + irq_set_probe(irq + i); + } + + return 0; +} + +/* + * This needs to be a hierarchical irqdomain to work well with the + * GPIO irqchip (which is lower in the hierarchy) + */ +static const struct irq_domain_ops ixp4xx_irqdomain_ops = { + .translate = ixp4xx_irq_domain_translate, + .alloc = ixp4xx_irq_domain_alloc, + .free = irq_domain_free_irqs_common, +}; + +/** + * ixp4x_irq_setup() - Common setup code for the IXP4xx interrupt controller + * @ixi: State container + * @irqbase: Virtual memory base for the interrupt controller + * @fwnode: Corresponding fwnode abstraction for this controller + * @is_356: if this is an IXP43x, IXP45x or IXP46x SoC variant + */ +static int __init ixp4xx_irq_setup(struct ixp4xx_irq *ixi, + void __iomem *irqbase, + struct fwnode_handle *fwnode, + bool is_356) +{ + int nr_irqs; + + ixi->irqbase = irqbase; + ixi->is_356 = is_356; + + /* Route all sources to IRQ instead of FIQ */ + __raw_writel(0x0, ixi->irqbase + IXP4XX_ICLR); + + /* Disable all interrupts */ + __raw_writel(0x0, ixi->irqbase + IXP4XX_ICMR); + + if (is_356) { + /* Route upper 32 sources to IRQ instead of FIQ */ + __raw_writel(0x0, ixi->irqbase + IXP4XX_ICLR2); + + /* Disable upper 32 interrupts */ + __raw_writel(0x0, ixi->irqbase + IXP4XX_ICMR2); + + nr_irqs = 64; + } else { + nr_irqs = 32; + } + + ixi->irqchip.name = "IXP4xx"; + ixi->irqchip.irq_mask = ixp4xx_irq_mask; + ixi->irqchip.irq_unmask = ixp4xx_irq_unmask; + ixi->irqchip.irq_set_type = ixp4xx_set_irq_type; + + ixi->domain = irq_domain_create_linear(fwnode, nr_irqs, + &ixp4xx_irqdomain_ops, + ixi); + if (!ixi->domain) { + pr_crit("IXP4XX: can not add primary irqdomain\n"); + return -ENODEV; + } + + set_handle_irq(ixp4xx_handle_irq); + + return 0; +} + +static int __init ixp4xx_of_init_irq(struct device_node *np, + struct device_node *parent) +{ + struct ixp4xx_irq *ixi = &ixirq; + void __iomem *base; + struct fwnode_handle *fwnode; + bool is_356; + int ret; + + base = of_iomap(np, 0); + if (!base) { + pr_crit("IXP4XX: could not ioremap interrupt controller\n"); + return -ENODEV; + } + fwnode = of_fwnode_handle(np); + + /* These chip variants have 64 interrupts */ + is_356 = of_device_is_compatible(np, "intel,ixp43x-interrupt") || + of_device_is_compatible(np, "intel,ixp45x-interrupt") || + of_device_is_compatible(np, "intel,ixp46x-interrupt"); + + ret = ixp4xx_irq_setup(ixi, base, fwnode, is_356); + if (ret) + pr_crit("IXP4XX: failed to set up irqchip\n"); + + return ret; +} +IRQCHIP_DECLARE(ixp42x, "intel,ixp42x-interrupt", + ixp4xx_of_init_irq); +IRQCHIP_DECLARE(ixp43x, "intel,ixp43x-interrupt", + ixp4xx_of_init_irq); +IRQCHIP_DECLARE(ixp45x, "intel,ixp45x-interrupt", + ixp4xx_of_init_irq); +IRQCHIP_DECLARE(ixp46x, "intel,ixp46x-interrupt", + ixp4xx_of_init_irq); diff --git a/drivers/irqchip/irq-jcore-aic.c b/drivers/irqchip/irq-jcore-aic.c index 033bccb41455..94c05cf974be 100644 --- a/drivers/irqchip/irq-jcore-aic.c +++ b/drivers/irqchip/irq-jcore-aic.c @@ -38,7 +38,7 @@ static struct irq_chip jcore_aic; static void handle_jcore_irq(struct irq_desc *desc) { if (irqd_is_per_cpu(irq_desc_get_irq_data(desc))) - handle_percpu_irq(desc); + handle_percpu_devid_irq(desc); else handle_simple_irq(desc); } @@ -68,6 +68,7 @@ static int __init aic_irq_of_init(struct device_node *node, unsigned min_irq = JCORE_AIC2_MIN_HWIRQ; unsigned dom_sz = JCORE_AIC_MAX_HWIRQ+1; struct irq_domain *domain; + int ret; pr_info("Initializing J-Core AIC\n"); @@ -100,11 +101,16 @@ static int __init aic_irq_of_init(struct device_node *node, jcore_aic.irq_unmask = noop; jcore_aic.name = "AIC"; - domain = irq_domain_add_linear(node, dom_sz, &jcore_aic_irqdomain_ops, - &jcore_aic); + ret = irq_alloc_descs(-1, min_irq, dom_sz - min_irq, + of_node_to_nid(node)); + + if (ret < 0) + return ret; + + domain = irq_domain_create_legacy(of_fwnode_handle(node), dom_sz - min_irq, min_irq, + min_irq, &jcore_aic_irqdomain_ops, &jcore_aic); if (!domain) return -ENOMEM; - irq_create_strict_mappings(domain, min_irq, min_irq, dom_sz - min_irq); return 0; } diff --git a/drivers/irqchip/irq-keystone.c b/drivers/irqchip/irq-keystone.c index efbcf8435185..922fff09354f 100644 --- a/drivers/irqchip/irq-keystone.c +++ b/drivers/irqchip/irq-keystone.c @@ -1,18 +1,10 @@ +// SPDX-License-Identifier: GPL-2.0-only /* * Texas Instruments Keystone IRQ controller IP driver * * Copyright (C) 2014 Texas Instruments, Inc. * Author: Sajesh Kumar Saran <sajesh@ti.com> * Grygorii Strashko <grygorii.strashko@ti.com> - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License as - * published by the Free Software Foundation version 2. - * - * This program is distributed "as is" WITHOUT ANY WARRANTY of any - * kind, whether express or implied; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. */ #include <linux/irq.h> @@ -23,7 +15,7 @@ #include <linux/irqdomain.h> #include <linux/irqchip.h> #include <linux/of.h> -#include <linux/of_platform.h> +#include <linux/platform_device.h> #include <linux/mfd/syscon.h> #include <linux/regmap.h> @@ -89,7 +81,7 @@ static irqreturn_t keystone_irq_handler(int irq, void *keystone_irq) struct keystone_irq_device *kirq = keystone_irq; unsigned long wa_lock_flags; unsigned long pending; - int src, virq; + int src, err; dev_dbg(kirq->dev, "start irq %d\n", irq); @@ -104,16 +96,14 @@ static irqreturn_t keystone_irq_handler(int irq, void *keystone_irq) for (src = 0; src < KEYSTONE_N_IRQ; src++) { if (BIT(src) & pending) { - virq = irq_find_mapping(kirq->irqd, src); - dev_dbg(kirq->dev, "dispatch bit %d, virq %d\n", - src, virq); - if (!virq) - dev_warn(kirq->dev, "spurious irq detected hwirq %d, virq %d\n", - src, virq); raw_spin_lock_irqsave(&kirq->wa_lock, wa_lock_flags); - generic_handle_irq(virq); + err = generic_handle_domain_irq(kirq->irqd, src); raw_spin_unlock_irqrestore(&kirq->wa_lock, wa_lock_flags); + + if (err) + dev_warn_ratelimited(kirq->dev, "spurious irq detected hwirq %d\n", + src); } } @@ -151,23 +141,14 @@ static int keystone_irq_probe(struct platform_device *pdev) if (!kirq) return -ENOMEM; - kirq->devctrl_regs = - syscon_regmap_lookup_by_phandle(np, "ti,syscon-dev"); + kirq->devctrl_regs = syscon_regmap_lookup_by_phandle_args(np, "ti,syscon-dev", + 1, &kirq->devctrl_offset); if (IS_ERR(kirq->devctrl_regs)) return PTR_ERR(kirq->devctrl_regs); - ret = of_property_read_u32_index(np, "ti,syscon-dev", 1, - &kirq->devctrl_offset); - if (ret) { - dev_err(dev, "couldn't read the devctrl_offset offset!\n"); - return ret; - } - kirq->irq = platform_get_irq(pdev, 0); - if (kirq->irq < 0) { - dev_err(dev, "no irq resource %d\n", kirq->irq); + if (kirq->irq < 0) return kirq->irq; - } kirq->dev = dev; kirq->mask = ~0x0; @@ -176,8 +157,8 @@ static int keystone_irq_probe(struct platform_device *pdev) kirq->chip.irq_mask = keystone_irq_setmask; kirq->chip.irq_unmask = keystone_irq_unmask; - kirq->irqd = irq_domain_add_linear(np, KEYSTONE_N_IRQ, - &keystone_irq_ops, kirq); + kirq->irqd = irq_domain_create_linear(dev_fwnode(dev), KEYSTONE_N_IRQ, &keystone_irq_ops, + kirq); if (!kirq->irqd) { dev_err(dev, "IRQ domain registration failed\n"); return -ENODEV; @@ -202,7 +183,7 @@ static int keystone_irq_probe(struct platform_device *pdev) return 0; } -static int keystone_irq_remove(struct platform_device *pdev) +static void keystone_irq_remove(struct platform_device *pdev) { struct keystone_irq_device *kirq = platform_get_drvdata(pdev); int hwirq; @@ -213,7 +194,6 @@ static int keystone_irq_remove(struct platform_device *pdev) irq_dispose_mapping(irq_find_mapping(kirq->irqd, hwirq)); irq_domain_remove(kirq->irqd); - return 0; } static const struct of_device_id keystone_irq_dt_ids[] = { diff --git a/drivers/irqchip/irq-lan966x-oic.c b/drivers/irqchip/irq-lan966x-oic.c new file mode 100644 index 000000000000..11d3a0ffa261 --- /dev/null +++ b/drivers/irqchip/irq-lan966x-oic.c @@ -0,0 +1,274 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Driver for the Microchip LAN966x outbound interrupt controller + * + * Copyright (c) 2024 Technology Inc. and its subsidiaries. + * + * Authors: + * Horatiu Vultur <horatiu.vultur@microchip.com> + * Clément Léger <clement.leger@bootlin.com> + * Herve Codina <herve.codina@bootlin.com> + */ + +#include <linux/interrupt.h> +#include <linux/irqchip/chained_irq.h> +#include <linux/irqchip.h> +#include <linux/irq.h> +#include <linux/mod_devicetable.h> +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/slab.h> + +struct lan966x_oic_chip_regs { + int reg_off_ena_set; + int reg_off_ena_clr; + int reg_off_sticky; + int reg_off_ident; + int reg_off_map; +}; + +struct lan966x_oic_data { + void __iomem *regs; + int irq; +}; + +#define LAN966X_OIC_NR_IRQ 86 + +/* Interrupt sticky status */ +#define LAN966X_OIC_INTR_STICKY 0x30 +#define LAN966X_OIC_INTR_STICKY1 0x34 +#define LAN966X_OIC_INTR_STICKY2 0x38 + +/* Interrupt enable */ +#define LAN966X_OIC_INTR_ENA 0x48 +#define LAN966X_OIC_INTR_ENA1 0x4c +#define LAN966X_OIC_INTR_ENA2 0x50 + +/* Atomic clear of interrupt enable */ +#define LAN966X_OIC_INTR_ENA_CLR 0x54 +#define LAN966X_OIC_INTR_ENA_CLR1 0x58 +#define LAN966X_OIC_INTR_ENA_CLR2 0x5c + +/* Atomic set of interrupt */ +#define LAN966X_OIC_INTR_ENA_SET 0x60 +#define LAN966X_OIC_INTR_ENA_SET1 0x64 +#define LAN966X_OIC_INTR_ENA_SET2 0x68 + +/* Mapping of source to destination interrupts (_n = 0..8) */ +#define LAN966X_OIC_DST_INTR_MAP(_n) (0x78 + (_n) * 4) +#define LAN966X_OIC_DST_INTR_MAP1(_n) (0x9c + (_n) * 4) +#define LAN966X_OIC_DST_INTR_MAP2(_n) (0xc0 + (_n) * 4) + +/* Currently active interrupt sources per destination (_n = 0..8) */ +#define LAN966X_OIC_DST_INTR_IDENT(_n) (0xe4 + (_n) * 4) +#define LAN966X_OIC_DST_INTR_IDENT1(_n) (0x108 + (_n) * 4) +#define LAN966X_OIC_DST_INTR_IDENT2(_n) (0x12c + (_n) * 4) + +static unsigned int lan966x_oic_irq_startup(struct irq_data *data) +{ + struct irq_chip_generic *gc = irq_data_get_irq_chip_data(data); + struct irq_chip_type *ct = irq_data_get_chip_type(data); + struct lan966x_oic_chip_regs *chip_regs = gc->private; + u32 map; + + scoped_guard (raw_spinlock, &gc->lock) { + /* Map the source interrupt to the destination */ + map = irq_reg_readl(gc, chip_regs->reg_off_map); + map |= data->mask; + irq_reg_writel(gc, map, chip_regs->reg_off_map); + } + + ct->chip.irq_ack(data); + ct->chip.irq_unmask(data); + + return 0; +} + +static void lan966x_oic_irq_shutdown(struct irq_data *data) +{ + struct irq_chip_generic *gc = irq_data_get_irq_chip_data(data); + struct irq_chip_type *ct = irq_data_get_chip_type(data); + struct lan966x_oic_chip_regs *chip_regs = gc->private; + u32 map; + + ct->chip.irq_mask(data); + + guard(raw_spinlock)(&gc->lock); + + /* Unmap the interrupt */ + map = irq_reg_readl(gc, chip_regs->reg_off_map); + map &= ~data->mask; + irq_reg_writel(gc, map, chip_regs->reg_off_map); +} + +static int lan966x_oic_irq_set_type(struct irq_data *data, + unsigned int flow_type) +{ + if (flow_type != IRQ_TYPE_LEVEL_HIGH) { + pr_err("lan966x oic doesn't support flow type %d\n", flow_type); + return -EINVAL; + } + + return 0; +} + +static void lan966x_oic_irq_handler_domain(struct irq_domain *d, u32 first_irq) +{ + struct irq_chip_generic *gc = irq_get_domain_generic_chip(d, first_irq); + struct lan966x_oic_chip_regs *chip_regs = gc->private; + unsigned long ident; + unsigned int hwirq; + + ident = irq_reg_readl(gc, chip_regs->reg_off_ident); + if (!ident) + return; + + for_each_set_bit(hwirq, &ident, 32) + generic_handle_domain_irq(d, hwirq + first_irq); +} + +static void lan966x_oic_irq_handler(struct irq_desc *desc) +{ + struct irq_domain *d = irq_desc_get_handler_data(desc); + struct irq_chip *chip = irq_desc_get_chip(desc); + + chained_irq_enter(chip, desc); + lan966x_oic_irq_handler_domain(d, 0); + lan966x_oic_irq_handler_domain(d, 32); + lan966x_oic_irq_handler_domain(d, 64); + chained_irq_exit(chip, desc); +} + +static struct lan966x_oic_chip_regs lan966x_oic_chip_regs[3] = { + { + .reg_off_ena_set = LAN966X_OIC_INTR_ENA_SET, + .reg_off_ena_clr = LAN966X_OIC_INTR_ENA_CLR, + .reg_off_sticky = LAN966X_OIC_INTR_STICKY, + .reg_off_ident = LAN966X_OIC_DST_INTR_IDENT(0), + .reg_off_map = LAN966X_OIC_DST_INTR_MAP(0), + }, { + .reg_off_ena_set = LAN966X_OIC_INTR_ENA_SET1, + .reg_off_ena_clr = LAN966X_OIC_INTR_ENA_CLR1, + .reg_off_sticky = LAN966X_OIC_INTR_STICKY1, + .reg_off_ident = LAN966X_OIC_DST_INTR_IDENT1(0), + .reg_off_map = LAN966X_OIC_DST_INTR_MAP1(0), + }, { + .reg_off_ena_set = LAN966X_OIC_INTR_ENA_SET2, + .reg_off_ena_clr = LAN966X_OIC_INTR_ENA_CLR2, + .reg_off_sticky = LAN966X_OIC_INTR_STICKY2, + .reg_off_ident = LAN966X_OIC_DST_INTR_IDENT2(0), + .reg_off_map = LAN966X_OIC_DST_INTR_MAP2(0), + } +}; + +static int lan966x_oic_chip_init(struct irq_chip_generic *gc) +{ + struct lan966x_oic_data *lan966x_oic = gc->domain->host_data; + struct lan966x_oic_chip_regs *chip_regs; + + chip_regs = &lan966x_oic_chip_regs[gc->irq_base / 32]; + + gc->reg_base = lan966x_oic->regs; + gc->chip_types[0].regs.enable = chip_regs->reg_off_ena_set; + gc->chip_types[0].regs.disable = chip_regs->reg_off_ena_clr; + gc->chip_types[0].regs.ack = chip_regs->reg_off_sticky; + gc->chip_types[0].chip.irq_startup = lan966x_oic_irq_startup; + gc->chip_types[0].chip.irq_shutdown = lan966x_oic_irq_shutdown; + gc->chip_types[0].chip.irq_set_type = lan966x_oic_irq_set_type; + gc->chip_types[0].chip.irq_mask = irq_gc_mask_disable_reg; + gc->chip_types[0].chip.irq_unmask = irq_gc_unmask_enable_reg; + gc->chip_types[0].chip.irq_ack = irq_gc_ack_set_bit; + gc->private = chip_regs; + + /* Disable all interrupts handled by this chip */ + irq_reg_writel(gc, ~0U, chip_regs->reg_off_ena_clr); + + return 0; +} + +static void lan966x_oic_chip_exit(struct irq_chip_generic *gc) +{ + /* Disable and ack all interrupts handled by this chip */ + irq_reg_writel(gc, ~0U, gc->chip_types[0].regs.disable); + irq_reg_writel(gc, ~0U, gc->chip_types[0].regs.ack); +} + +static int lan966x_oic_domain_init(struct irq_domain *d) +{ + struct lan966x_oic_data *lan966x_oic = d->host_data; + + irq_set_chained_handler_and_data(lan966x_oic->irq, lan966x_oic_irq_handler, d); + + return 0; +} + +static void lan966x_oic_domain_exit(struct irq_domain *d) +{ + struct lan966x_oic_data *lan966x_oic = d->host_data; + + irq_set_chained_handler_and_data(lan966x_oic->irq, NULL, NULL); +} + +static int lan966x_oic_probe(struct platform_device *pdev) +{ + struct irq_domain_chip_generic_info dgc_info = { + .name = "lan966x-oic", + .handler = handle_level_irq, + .irqs_per_chip = 32, + .num_ct = 1, + .init = lan966x_oic_chip_init, + .exit = lan966x_oic_chip_exit, + }; + struct irq_domain_info d_info = { + .fwnode = of_fwnode_handle(pdev->dev.of_node), + .domain_flags = IRQ_DOMAIN_FLAG_DESTROY_GC, + .size = LAN966X_OIC_NR_IRQ, + .hwirq_max = LAN966X_OIC_NR_IRQ, + .ops = &irq_generic_chip_ops, + .dgc_info = &dgc_info, + .init = lan966x_oic_domain_init, + .exit = lan966x_oic_domain_exit, + }; + struct lan966x_oic_data *lan966x_oic; + struct device *dev = &pdev->dev; + struct irq_domain *domain; + + lan966x_oic = devm_kmalloc(dev, sizeof(*lan966x_oic), GFP_KERNEL); + if (!lan966x_oic) + return -ENOMEM; + + lan966x_oic->regs = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(lan966x_oic->regs)) + return dev_err_probe(dev, PTR_ERR(lan966x_oic->regs), + "failed to map resource\n"); + + lan966x_oic->irq = platform_get_irq(pdev, 0); + if (lan966x_oic->irq < 0) + return dev_err_probe(dev, lan966x_oic->irq, "failed to get the IRQ\n"); + + d_info.host_data = lan966x_oic; + domain = devm_irq_domain_instantiate(dev, &d_info); + if (IS_ERR(domain)) + return dev_err_probe(dev, PTR_ERR(domain), + "failed to instantiate the IRQ domain\n"); + return 0; +} + +static const struct of_device_id lan966x_oic_of_match[] = { + { .compatible = "microchip,lan966x-oic" }, + {} /* sentinel */ +}; +MODULE_DEVICE_TABLE(of, lan966x_oic_of_match); + +static struct platform_driver lan966x_oic_driver = { + .probe = lan966x_oic_probe, + .driver = { + .name = "lan966x-oic", + .of_match_table = lan966x_oic_of_match, + }, +}; +module_platform_driver(lan966x_oic_driver); + +MODULE_AUTHOR("Herve Codina <herve.codina@bootlin.com>"); +MODULE_DESCRIPTION("Microchip LAN966x OIC driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/irqchip/irq-loongarch-avec.c b/drivers/irqchip/irq-loongarch-avec.c new file mode 100644 index 000000000000..bf52dc8345f5 --- /dev/null +++ b/drivers/irqchip/irq-loongarch-avec.c @@ -0,0 +1,433 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2020-2024 Loongson Technologies, Inc. + */ + +#include <linux/cpuhotplug.h> +#include <linux/init.h> +#include <linux/interrupt.h> +#include <linux/irq.h> +#include <linux/irqchip.h> +#include <linux/irqchip/chained_irq.h> +#include <linux/irqdomain.h> +#include <linux/kernel.h> +#include <linux/msi.h> +#include <linux/radix-tree.h> +#include <linux/spinlock.h> + +#include <asm/loongarch.h> +#include <asm/setup.h> + +#include <linux/irqchip/irq-msi-lib.h> +#include "irq-loongson.h" + +#define VECTORS_PER_REG 64 +#define IRR_VECTOR_MASK 0xffUL +#define IRR_INVALID_MASK 0x80000000UL +#define AVEC_MSG_OFFSET 0x100000 + +#ifdef CONFIG_SMP +struct pending_list { + struct list_head head; +}; + +static struct cpumask intersect_mask; +static DEFINE_PER_CPU(struct pending_list, pending_list); +#endif + +static DEFINE_PER_CPU(struct irq_desc * [NR_VECTORS], irq_map); + +struct avecintc_chip { + raw_spinlock_t lock; + struct fwnode_handle *fwnode; + struct irq_domain *domain; + struct irq_matrix *vector_matrix; + phys_addr_t msi_base_addr; +}; + +static struct avecintc_chip loongarch_avec; + +struct avecintc_data { + struct list_head entry; + unsigned int cpu; + unsigned int vec; + unsigned int prev_cpu; + unsigned int prev_vec; + unsigned int moving; +}; + +static inline void avecintc_enable(void) +{ + u64 value; + + value = iocsr_read64(LOONGARCH_IOCSR_MISC_FUNC); + value |= IOCSR_MISC_FUNC_AVEC_EN; + iocsr_write64(value, LOONGARCH_IOCSR_MISC_FUNC); +} + +static inline void avecintc_ack_irq(struct irq_data *d) +{ +} + +static inline void avecintc_mask_irq(struct irq_data *d) +{ +} + +static inline void avecintc_unmask_irq(struct irq_data *d) +{ +} + +#ifdef CONFIG_SMP +static inline void pending_list_init(int cpu) +{ + struct pending_list *plist = per_cpu_ptr(&pending_list, cpu); + + INIT_LIST_HEAD(&plist->head); +} + +static void avecintc_sync(struct avecintc_data *adata) +{ + struct pending_list *plist; + + if (cpu_online(adata->prev_cpu)) { + plist = per_cpu_ptr(&pending_list, adata->prev_cpu); + list_add_tail(&adata->entry, &plist->head); + adata->moving = 1; + mp_ops.send_ipi_single(adata->prev_cpu, ACTION_CLEAR_VECTOR); + } +} + +static int avecintc_set_affinity(struct irq_data *data, const struct cpumask *dest, bool force) +{ + int cpu, ret, vector; + struct avecintc_data *adata; + + scoped_guard(raw_spinlock, &loongarch_avec.lock) { + adata = irq_data_get_irq_chip_data(data); + + if (adata->moving) + return -EBUSY; + + if (cpu_online(adata->cpu) && cpumask_test_cpu(adata->cpu, dest)) + return 0; + + cpumask_and(&intersect_mask, dest, cpu_online_mask); + + ret = irq_matrix_alloc(loongarch_avec.vector_matrix, &intersect_mask, false, &cpu); + if (ret < 0) + return ret; + + vector = ret; + adata->cpu = cpu; + adata->vec = vector; + per_cpu_ptr(irq_map, adata->cpu)[adata->vec] = irq_data_to_desc(data); + avecintc_sync(adata); + } + + irq_data_update_effective_affinity(data, cpumask_of(cpu)); + + return IRQ_SET_MASK_OK; +} + +static int avecintc_cpu_online(unsigned int cpu) +{ + if (!loongarch_avec.vector_matrix) + return 0; + + guard(raw_spinlock)(&loongarch_avec.lock); + + avecintc_enable(); + + irq_matrix_online(loongarch_avec.vector_matrix); + + pending_list_init(cpu); + + return 0; +} + +static int avecintc_cpu_offline(unsigned int cpu) +{ + struct pending_list *plist = per_cpu_ptr(&pending_list, cpu); + + if (!loongarch_avec.vector_matrix) + return 0; + + guard(raw_spinlock)(&loongarch_avec.lock); + + if (!list_empty(&plist->head)) + pr_warn("CPU#%d vector is busy\n", cpu); + + irq_matrix_offline(loongarch_avec.vector_matrix); + + return 0; +} + +void complete_irq_moving(void) +{ + struct pending_list *plist = this_cpu_ptr(&pending_list); + struct avecintc_data *adata, *tdata; + int cpu, vector, bias; + uint64_t isr; + + guard(raw_spinlock)(&loongarch_avec.lock); + + list_for_each_entry_safe(adata, tdata, &plist->head, entry) { + cpu = adata->prev_cpu; + vector = adata->prev_vec; + bias = vector / VECTORS_PER_REG; + switch (bias) { + case 0: + isr = csr_read64(LOONGARCH_CSR_ISR0); + break; + case 1: + isr = csr_read64(LOONGARCH_CSR_ISR1); + break; + case 2: + isr = csr_read64(LOONGARCH_CSR_ISR2); + break; + case 3: + isr = csr_read64(LOONGARCH_CSR_ISR3); + break; + } + + if (isr & (1UL << (vector % VECTORS_PER_REG))) { + mp_ops.send_ipi_single(cpu, ACTION_CLEAR_VECTOR); + continue; + } + list_del(&adata->entry); + irq_matrix_free(loongarch_avec.vector_matrix, cpu, vector, false); + this_cpu_write(irq_map[vector], NULL); + adata->moving = 0; + adata->prev_cpu = adata->cpu; + adata->prev_vec = adata->vec; + } +} +#endif + +static void avecintc_compose_msi_msg(struct irq_data *d, struct msi_msg *msg) +{ + struct avecintc_data *adata = irq_data_get_irq_chip_data(d); + + msg->address_hi = 0x0; + msg->address_lo = (loongarch_avec.msi_base_addr | (adata->vec & 0xff) << 4) + | ((cpu_logical_map(adata->cpu & 0xffff)) << 12); + msg->data = 0x0; +} + +static struct irq_chip avec_irq_controller = { + .name = "AVECINTC", + .irq_ack = avecintc_ack_irq, + .irq_mask = avecintc_mask_irq, + .irq_unmask = avecintc_unmask_irq, +#ifdef CONFIG_SMP + .irq_set_affinity = avecintc_set_affinity, +#endif + .irq_compose_msi_msg = avecintc_compose_msi_msg, +}; + +static void avecintc_irq_dispatch(struct irq_desc *desc) +{ + struct irq_chip *chip = irq_desc_get_chip(desc); + struct irq_desc *d; + + chained_irq_enter(chip, desc); + + while (true) { + unsigned long vector = csr_read64(LOONGARCH_CSR_IRR); + if (vector & IRR_INVALID_MASK) + break; + + vector &= IRR_VECTOR_MASK; + + d = this_cpu_read(irq_map[vector]); + if (d) { + generic_handle_irq_desc(d); + } else { + spurious_interrupt(); + pr_warn("Unexpected IRQ occurs on CPU#%d [vector %ld]\n", smp_processor_id(), vector); + } + } + + chained_irq_exit(chip, desc); +} + +static int avecintc_alloc_vector(struct irq_data *irqd, struct avecintc_data *adata) +{ + int cpu, ret; + + guard(raw_spinlock_irqsave)(&loongarch_avec.lock); + + ret = irq_matrix_alloc(loongarch_avec.vector_matrix, cpu_online_mask, false, &cpu); + if (ret < 0) + return ret; + + adata->prev_cpu = adata->cpu = cpu; + adata->prev_vec = adata->vec = ret; + per_cpu_ptr(irq_map, adata->cpu)[adata->vec] = irq_data_to_desc(irqd); + + return 0; +} + +static int avecintc_domain_alloc(struct irq_domain *domain, unsigned int virq, + unsigned int nr_irqs, void *arg) +{ + for (unsigned int i = 0; i < nr_irqs; i++) { + struct irq_data *irqd = irq_domain_get_irq_data(domain, virq + i); + struct avecintc_data *adata = kzalloc(sizeof(*adata), GFP_KERNEL); + int ret; + + if (!adata) + return -ENOMEM; + + ret = avecintc_alloc_vector(irqd, adata); + if (ret < 0) { + kfree(adata); + return ret; + } + + irq_domain_set_info(domain, virq + i, virq + i, &avec_irq_controller, + adata, handle_edge_irq, NULL, NULL); + irqd_set_single_target(irqd); + irqd_set_affinity_on_activate(irqd); + } + + return 0; +} + +static void avecintc_free_vector(struct irq_data *irqd, struct avecintc_data *adata) +{ + guard(raw_spinlock_irqsave)(&loongarch_avec.lock); + + per_cpu(irq_map, adata->cpu)[adata->vec] = NULL; + irq_matrix_free(loongarch_avec.vector_matrix, adata->cpu, adata->vec, false); + +#ifdef CONFIG_SMP + if (!adata->moving) + return; + + per_cpu(irq_map, adata->prev_cpu)[adata->prev_vec] = NULL; + irq_matrix_free(loongarch_avec.vector_matrix, adata->prev_cpu, adata->prev_vec, false); + list_del_init(&adata->entry); +#endif +} + +static void avecintc_domain_free(struct irq_domain *domain, unsigned int virq, + unsigned int nr_irqs) +{ + for (unsigned int i = 0; i < nr_irqs; i++) { + struct irq_data *d = irq_domain_get_irq_data(domain, virq + i); + + if (d) { + struct avecintc_data *adata = irq_data_get_irq_chip_data(d); + + avecintc_free_vector(d, adata); + irq_domain_reset_irq_data(d); + kfree(adata); + } + } +} + +static const struct irq_domain_ops avecintc_domain_ops = { + .alloc = avecintc_domain_alloc, + .free = avecintc_domain_free, + .select = msi_lib_irq_domain_select, +}; + +static int __init irq_matrix_init(void) +{ + loongarch_avec.vector_matrix = irq_alloc_matrix(NR_VECTORS, 0, NR_VECTORS); + if (!loongarch_avec.vector_matrix) + return -ENOMEM; + + for (int i = 0; i < NR_LEGACY_VECTORS; i++) + irq_matrix_assign_system(loongarch_avec.vector_matrix, i, false); + + irq_matrix_online(loongarch_avec.vector_matrix); + + return 0; +} + +static int __init avecintc_init(struct irq_domain *parent) +{ + int ret, parent_irq; + + raw_spin_lock_init(&loongarch_avec.lock); + + loongarch_avec.fwnode = irq_domain_alloc_named_fwnode("AVECINTC"); + if (!loongarch_avec.fwnode) { + pr_err("Unable to allocate domain handle\n"); + ret = -ENOMEM; + goto out; + } + + loongarch_avec.domain = irq_domain_create_tree(loongarch_avec.fwnode, + &avecintc_domain_ops, NULL); + if (!loongarch_avec.domain) { + pr_err("Unable to create IRQ domain\n"); + ret = -ENOMEM; + goto out_free_handle; + } + + parent_irq = irq_create_mapping(parent, INT_AVEC); + if (!parent_irq) { + pr_err("Failed to mapping hwirq\n"); + ret = -EINVAL; + goto out_remove_domain; + } + + ret = irq_matrix_init(); + if (ret < 0) { + pr_err("Failed to init irq matrix\n"); + goto out_remove_domain; + } + irq_set_chained_handler_and_data(parent_irq, avecintc_irq_dispatch, NULL); + +#ifdef CONFIG_SMP + pending_list_init(0); + cpuhp_setup_state_nocalls(CPUHP_AP_IRQ_AVECINTC_STARTING, + "irqchip/loongarch/avecintc:starting", + avecintc_cpu_online, avecintc_cpu_offline); +#endif + avecintc_enable(); + + return ret; + +out_remove_domain: + irq_domain_remove(loongarch_avec.domain); +out_free_handle: + irq_domain_free_fwnode(loongarch_avec.fwnode); +out: + return ret; +} + +static int __init pch_msi_parse_madt(union acpi_subtable_headers *header, + const unsigned long end) +{ + struct acpi_madt_msi_pic *pchmsi_entry = (struct acpi_madt_msi_pic *)header; + + loongarch_avec.msi_base_addr = pchmsi_entry->msg_address - AVEC_MSG_OFFSET; + + return pch_msi_acpi_init_avec(loongarch_avec.domain); +} + +static inline int __init acpi_cascade_irqdomain_init(void) +{ + return acpi_table_parse_madt(ACPI_MADT_TYPE_MSI_PIC, pch_msi_parse_madt, 1); +} + +int __init avecintc_acpi_init(struct irq_domain *parent) +{ + int ret = avecintc_init(parent); + if (ret < 0) { + pr_err("Failed to init IRQ domain\n"); + return ret; + } + + ret = acpi_cascade_irqdomain_init(); + if (ret < 0) { + pr_err("Failed to init cascade IRQ domain\n"); + return ret; + } + + return ret; +} diff --git a/drivers/irqchip/irq-loongarch-cpu.c b/drivers/irqchip/irq-loongarch-cpu.c new file mode 100644 index 000000000000..950bc087e388 --- /dev/null +++ b/drivers/irqchip/irq-loongarch-cpu.c @@ -0,0 +1,179 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ + +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/interrupt.h> +#include <linux/irq.h> +#include <linux/irqchip.h> +#include <linux/irqdomain.h> + +#include <asm/loongarch.h> +#include <asm/setup.h> + +#include "irq-loongson.h" + +static struct irq_domain *irq_domain; +struct fwnode_handle *cpuintc_handle; + +static u32 lpic_gsi_to_irq(u32 gsi) +{ + int irq = 0; + + /* Only pch irqdomain transferring is required for LoongArch. */ + if (gsi >= GSI_MIN_PCH_IRQ && gsi <= GSI_MAX_PCH_IRQ) + irq = acpi_register_gsi(NULL, gsi, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_HIGH); + + return (irq > 0) ? irq : 0; +} + +static struct fwnode_handle *lpic_get_gsi_domain_id(u32 gsi) +{ + int id; + struct fwnode_handle *domain_handle = NULL; + + switch (gsi) { + case GSI_MIN_CPU_IRQ ... GSI_MAX_CPU_IRQ: + if (liointc_handle) + domain_handle = liointc_handle; + break; + + case GSI_MIN_LPC_IRQ ... GSI_MAX_LPC_IRQ: + if (pch_lpc_handle) + domain_handle = pch_lpc_handle; + break; + + case GSI_MIN_PCH_IRQ ... GSI_MAX_PCH_IRQ: + id = find_pch_pic(gsi); + if (id >= 0 && pch_pic_handle[id]) + domain_handle = pch_pic_handle[id]; + break; + } + + return domain_handle; +} + +static void mask_loongarch_irq(struct irq_data *d) +{ + clear_csr_ecfg(ECFGF(d->hwirq)); +} + +static void unmask_loongarch_irq(struct irq_data *d) +{ + set_csr_ecfg(ECFGF(d->hwirq)); +} + +static struct irq_chip cpu_irq_controller = { + .name = "CPUINTC", + .irq_mask = mask_loongarch_irq, + .irq_unmask = unmask_loongarch_irq, +}; + +static void handle_cpu_irq(struct pt_regs *regs) +{ + int hwirq; + unsigned int estat = read_csr_estat() & CSR_ESTAT_IS; + + while ((hwirq = ffs(estat))) { + estat &= ~BIT(hwirq - 1); + generic_handle_domain_irq(irq_domain, hwirq - 1); + } +} + +static int loongarch_cpu_intc_map(struct irq_domain *d, unsigned int irq, + irq_hw_number_t hwirq) +{ + irq_set_noprobe(irq); + irq_set_chip_and_handler(irq, &cpu_irq_controller, handle_percpu_irq); + + return 0; +} + +static const struct irq_domain_ops loongarch_cpu_intc_irq_domain_ops = { + .map = loongarch_cpu_intc_map, + .xlate = irq_domain_xlate_onecell, +}; + +#ifdef CONFIG_OF +static int __init cpuintc_of_init(struct device_node *of_node, + struct device_node *parent) +{ + cpuintc_handle = of_fwnode_handle(of_node); + + irq_domain = irq_domain_create_linear(cpuintc_handle, EXCCODE_INT_NUM, + &loongarch_cpu_intc_irq_domain_ops, NULL); + if (!irq_domain) + panic("Failed to add irqdomain for loongarch CPU"); + + set_handle_irq(&handle_cpu_irq); + + return 0; +} +IRQCHIP_DECLARE(cpu_intc, "loongson,cpu-interrupt-controller", cpuintc_of_init); +#endif + +static int __init liointc_parse_madt(union acpi_subtable_headers *header, + const unsigned long end) +{ + struct acpi_madt_lio_pic *liointc_entry = (struct acpi_madt_lio_pic *)header; + + return liointc_acpi_init(irq_domain, liointc_entry); +} + +static int __init eiointc_parse_madt(union acpi_subtable_headers *header, + const unsigned long end) +{ + struct acpi_madt_eio_pic *eiointc_entry = (struct acpi_madt_eio_pic *)header; + + return eiointc_acpi_init(irq_domain, eiointc_entry); +} + +static int __init acpi_cascade_irqdomain_init(void) +{ + int r; + + r = acpi_table_parse_madt(ACPI_MADT_TYPE_LIO_PIC, liointc_parse_madt, 0); + if (r < 0) + return r; + + r = acpi_table_parse_madt(ACPI_MADT_TYPE_EIO_PIC, eiointc_parse_madt, 0); + if (r < 0) + return r; + + if (cpu_has_avecint) + r = avecintc_acpi_init(irq_domain); + + return r; +} + +static int __init cpuintc_acpi_init(union acpi_subtable_headers *header, + const unsigned long end) +{ + int ret; + + if (irq_domain) + return 0; + + /* Mask interrupts. */ + clear_csr_ecfg(ECFG0_IM); + clear_csr_estat(ESTATF_IP); + + cpuintc_handle = irq_domain_alloc_named_fwnode("CPUINTC"); + irq_domain = irq_domain_create_linear(cpuintc_handle, EXCCODE_INT_NUM, + &loongarch_cpu_intc_irq_domain_ops, NULL); + + if (!irq_domain) + panic("Failed to add irqdomain for LoongArch CPU"); + + set_handle_irq(&handle_cpu_irq); + acpi_set_irq_model(ACPI_IRQ_MODEL_LPIC, lpic_get_gsi_domain_id); + acpi_set_gsi_to_irq_fallback(lpic_gsi_to_irq); + ret = acpi_cascade_irqdomain_init(); + + return ret; +} + +IRQCHIP_ACPI_DECLARE(cpuintc_v1, ACPI_MADT_TYPE_CORE_PIC, + NULL, ACPI_MADT_CORE_PIC_VERSION_V1, cpuintc_acpi_init); diff --git a/drivers/irqchip/irq-loongson-eiointc.c b/drivers/irqchip/irq-loongson-eiointc.c new file mode 100644 index 000000000000..ad2105685b48 --- /dev/null +++ b/drivers/irqchip/irq-loongson-eiointc.c @@ -0,0 +1,656 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Loongson Extend I/O Interrupt Controller support + * + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ + +#define pr_fmt(fmt) "eiointc: " fmt + +#include <linux/cpuhotplug.h> +#include <linux/interrupt.h> +#include <linux/irq.h> +#include <linux/irqchip.h> +#include <linux/irqdomain.h> +#include <linux/irqchip/chained_irq.h> +#include <linux/kernel.h> +#include <linux/kvm_para.h> +#include <linux/syscore_ops.h> +#include <asm/numa.h> + +#include "irq-loongson.h" + +#define EIOINTC_REG_NODEMAP 0x14a0 +#define EIOINTC_REG_IPMAP 0x14c0 +#define EIOINTC_REG_ENABLE 0x1600 +#define EIOINTC_REG_BOUNCE 0x1680 +#define EIOINTC_REG_ISR 0x1800 +#define EIOINTC_REG_ROUTE 0x1c00 + +#define EXTIOI_VIRT_FEATURES 0x40000000 +#define EXTIOI_HAS_VIRT_EXTENSION BIT(0) +#define EXTIOI_HAS_ENABLE_OPTION BIT(1) +#define EXTIOI_HAS_INT_ENCODE BIT(2) +#define EXTIOI_HAS_CPU_ENCODE BIT(3) +#define EXTIOI_VIRT_CONFIG 0x40000004 +#define EXTIOI_ENABLE BIT(1) +#define EXTIOI_ENABLE_INT_ENCODE BIT(2) +#define EXTIOI_ENABLE_CPU_ENCODE BIT(3) + +#define VEC_REG_COUNT 4 +#define VEC_COUNT_PER_REG 64 +#define VEC_COUNT (VEC_REG_COUNT * VEC_COUNT_PER_REG) +#define VEC_REG_IDX(irq_id) ((irq_id) / VEC_COUNT_PER_REG) +#define VEC_REG_BIT(irq_id) ((irq_id) % VEC_COUNT_PER_REG) +#define EIOINTC_ALL_ENABLE 0xffffffff +#define EIOINTC_ALL_ENABLE_VEC_MASK(vector) (EIOINTC_ALL_ENABLE & ~BIT(vector & 0x1f)) +#define EIOINTC_REG_ENABLE_VEC(vector) (EIOINTC_REG_ENABLE + ((vector >> 5) << 2)) +#define EIOINTC_USE_CPU_ENCODE BIT(0) +#define EIOINTC_ROUTE_MULT_IP BIT(1) + +#define MAX_EIO_NODES (NR_CPUS / CORES_PER_EIO_NODE) + +/* + * Routing registers are 32bit, and there is 8-bit route setting for every + * interrupt vector. So one Route register contains four vectors routing + * information. + */ +#define EIOINTC_REG_ROUTE_VEC(vector) (EIOINTC_REG_ROUTE + (vector & ~0x03)) +#define EIOINTC_REG_ROUTE_VEC_SHIFT(vector) ((vector & 0x03) << 3) +#define EIOINTC_REG_ROUTE_VEC_MASK(vector) (0xff << EIOINTC_REG_ROUTE_VEC_SHIFT(vector)) + +static int nr_pics; +struct eiointc_priv; + +struct eiointc_ip_route { + struct eiointc_priv *priv; + /* Offset Routed destination IP */ + int start; + int end; +}; + +struct eiointc_priv { + u32 node; + u32 vec_count; + nodemask_t node_map; + cpumask_t cpuspan_map; + struct fwnode_handle *domain_handle; + struct irq_domain *eiointc_domain; + int flags; + irq_hw_number_t parent_hwirq; + struct eiointc_ip_route route_info[VEC_REG_COUNT]; +}; + +static struct eiointc_priv *eiointc_priv[MAX_IO_PICS]; + +static void eiointc_enable(void) +{ + uint64_t misc; + + misc = iocsr_read64(LOONGARCH_IOCSR_MISC_FUNC); + misc |= IOCSR_MISC_FUNC_EXT_IOI_EN; + iocsr_write64(misc, LOONGARCH_IOCSR_MISC_FUNC); +} + +static int cpu_to_eio_node(int cpu) +{ + if (!kvm_para_has_feature(KVM_FEATURE_VIRT_EXTIOI)) + return cpu_logical_map(cpu) / CORES_PER_EIO_NODE; + else + return cpu_logical_map(cpu) / CORES_PER_VEIO_NODE; +} + +#ifdef CONFIG_SMP +static void eiointc_set_irq_route(int pos, unsigned int cpu, unsigned int mnode, nodemask_t *node_map) +{ + int i, node, cpu_node, route_node; + unsigned char coremap; + uint32_t pos_off, data, data_byte, data_mask; + + pos_off = pos & ~3; + data_byte = pos & 3; + data_mask = ~BIT_MASK(data_byte) & 0xf; + + /* Calculate node and coremap of target irq */ + cpu_node = cpu_logical_map(cpu) / CORES_PER_EIO_NODE; + coremap = BIT(cpu_logical_map(cpu) % CORES_PER_EIO_NODE); + + for_each_online_cpu(i) { + node = cpu_to_eio_node(i); + if (!node_isset(node, *node_map)) + continue; + + /* EIO node 0 is in charge of inter-node interrupt dispatch */ + route_node = (node == mnode) ? cpu_node : node; + data = ((coremap | (route_node << 4)) << (data_byte * 8)); + csr_any_send(EIOINTC_REG_ROUTE + pos_off, data, data_mask, node * CORES_PER_EIO_NODE); + } +} + +static void veiointc_set_irq_route(unsigned int vector, unsigned int cpu) +{ + unsigned long reg = EIOINTC_REG_ROUTE_VEC(vector); + unsigned int data; + + data = iocsr_read32(reg); + data &= ~EIOINTC_REG_ROUTE_VEC_MASK(vector); + data |= cpu_logical_map(cpu) << EIOINTC_REG_ROUTE_VEC_SHIFT(vector); + iocsr_write32(data, reg); +} + +static DEFINE_RAW_SPINLOCK(affinity_lock); + +static int eiointc_set_irq_affinity(struct irq_data *d, const struct cpumask *affinity, bool force) +{ + unsigned int cpu; + unsigned long flags; + uint32_t vector, regaddr; + struct eiointc_priv *priv = d->domain->host_data; + + raw_spin_lock_irqsave(&affinity_lock, flags); + + cpu = cpumask_first_and_and(&priv->cpuspan_map, affinity, cpu_online_mask); + if (cpu >= nr_cpu_ids) { + raw_spin_unlock_irqrestore(&affinity_lock, flags); + return -EINVAL; + } + + vector = d->hwirq; + regaddr = EIOINTC_REG_ENABLE_VEC(vector); + + if (priv->flags & EIOINTC_USE_CPU_ENCODE) { + iocsr_write32(EIOINTC_ALL_ENABLE_VEC_MASK(vector), regaddr); + veiointc_set_irq_route(vector, cpu); + iocsr_write32(EIOINTC_ALL_ENABLE, regaddr); + } else { + /* Mask target vector */ + csr_any_send(regaddr, EIOINTC_ALL_ENABLE_VEC_MASK(vector), + 0x0, priv->node * CORES_PER_EIO_NODE); + + /* Set route for target vector */ + eiointc_set_irq_route(vector, cpu, priv->node, &priv->node_map); + + /* Unmask target vector */ + csr_any_send(regaddr, EIOINTC_ALL_ENABLE, + 0x0, priv->node * CORES_PER_EIO_NODE); + } + + irq_data_update_effective_affinity(d, cpumask_of(cpu)); + + raw_spin_unlock_irqrestore(&affinity_lock, flags); + + return IRQ_SET_MASK_OK; +} +#endif + +static int eiointc_index(int node) +{ + int i; + + for (i = 0; i < nr_pics; i++) { + if (node_isset(node, eiointc_priv[i]->node_map)) + return i; + } + + return -1; +} + +static int eiointc_router_init(unsigned int cpu) +{ + int i, bit, cores, index, node; + unsigned int data; + int hwirq, mask; + + node = cpu_to_eio_node(cpu); + index = eiointc_index(node); + + if (index < 0) { + pr_err("Error: invalid nodemap!\n"); + return -EINVAL; + } + + /* Enable cpu interrupt pin from eiointc */ + hwirq = eiointc_priv[index]->parent_hwirq; + mask = BIT(hwirq); + if (eiointc_priv[index]->flags & EIOINTC_ROUTE_MULT_IP) + mask |= BIT(hwirq + 1) | BIT(hwirq + 2) | BIT(hwirq + 3); + set_csr_ecfg(mask); + + if (!(eiointc_priv[index]->flags & EIOINTC_USE_CPU_ENCODE)) + cores = CORES_PER_EIO_NODE; + else + cores = CORES_PER_VEIO_NODE; + + if ((cpu_logical_map(cpu) % cores) == 0) { + eiointc_enable(); + + for (i = 0; i < eiointc_priv[0]->vec_count / 32; i++) { + data = (((1 << (i * 2 + 1)) << 16) | (1 << (i * 2))); + iocsr_write32(data, EIOINTC_REG_NODEMAP + i * 4); + } + + for (i = 0; i < eiointc_priv[0]->vec_count / 32 / 4; i++) { + /* + * Route to interrupt pin, relative offset used here + * Offset 0 means routing to IP0 and so on + * + * If flags is set with EIOINTC_ROUTE_MULT_IP, + * every 64 vector routes to different consecutive + * IPs, otherwise all vector routes to the same IP + */ + if (eiointc_priv[index]->flags & EIOINTC_ROUTE_MULT_IP) { + /* The first 64 vectors route to hwirq */ + bit = BIT(hwirq++ - INT_HWI0); + data = bit | (bit << 8); + + /* The second 64 vectors route to hwirq + 1 */ + bit = BIT(hwirq++ - INT_HWI0); + data |= (bit << 16) | (bit << 24); + + /* + * Route to hwirq + 2/hwirq + 3 separately + * in next loop + */ + } else { + bit = BIT(hwirq - INT_HWI0); + data = bit | (bit << 8) | (bit << 16) | (bit << 24); + } + iocsr_write32(data, EIOINTC_REG_IPMAP + i * 4); + } + + for (i = 0; i < eiointc_priv[0]->vec_count / 4; i++) { + /* Route to Node-0 Core-0 */ + if (eiointc_priv[index]->flags & EIOINTC_USE_CPU_ENCODE) + bit = cpu_logical_map(0); + else if (index == 0) + bit = BIT(cpu_logical_map(0)); + else + bit = (eiointc_priv[index]->node << 4) | 1; + + data = bit | (bit << 8) | (bit << 16) | (bit << 24); + iocsr_write32(data, EIOINTC_REG_ROUTE + i * 4); + } + + for (i = 0; i < eiointc_priv[0]->vec_count / 32; i++) { + data = 0xffffffff; + iocsr_write32(data, EIOINTC_REG_ENABLE + i * 4); + iocsr_write32(data, EIOINTC_REG_BOUNCE + i * 4); + } + } + + return 0; +} + +static void eiointc_irq_dispatch(struct irq_desc *desc) +{ + struct eiointc_ip_route *info = irq_desc_get_handler_data(desc); + struct irq_chip *chip = irq_desc_get_chip(desc); + bool handled = false; + u64 pending; + int i; + + chained_irq_enter(chip, desc); + + /* + * If EIOINTC_ROUTE_MULT_IP is set, every 64 interrupt vectors in + * eiointc interrupt controller routes to different cpu interrupt pins + * + * Every cpu interrupt pin has its own irq handler, it is ok to + * read ISR for these 64 interrupt vectors rather than all vectors + */ + for (i = info->start; i < info->end; i++) { + pending = iocsr_read64(EIOINTC_REG_ISR + (i << 3)); + + /* Skip handling if pending bitmap is zero */ + if (!pending) + continue; + + /* Clear the IRQs */ + iocsr_write64(pending, EIOINTC_REG_ISR + (i << 3)); + while (pending) { + int bit = __ffs(pending); + int irq = bit + VEC_COUNT_PER_REG * i; + + generic_handle_domain_irq(info->priv->eiointc_domain, irq); + pending &= ~BIT(bit); + handled = true; + } + } + + if (!handled) + spurious_interrupt(); + + chained_irq_exit(chip, desc); +} + +static void eiointc_ack_irq(struct irq_data *d) +{ +} + +static void eiointc_mask_irq(struct irq_data *d) +{ +} + +static void eiointc_unmask_irq(struct irq_data *d) +{ +} + +static struct irq_chip eiointc_irq_chip = { + .name = "EIOINTC", + .irq_ack = eiointc_ack_irq, + .irq_mask = eiointc_mask_irq, + .irq_unmask = eiointc_unmask_irq, +#ifdef CONFIG_SMP + .irq_set_affinity = eiointc_set_irq_affinity, +#endif +}; + +static int eiointc_domain_alloc(struct irq_domain *domain, unsigned int virq, + unsigned int nr_irqs, void *arg) +{ + int ret; + unsigned int i, type; + unsigned long hwirq = 0; + struct eiointc_priv *priv = domain->host_data; + + ret = irq_domain_translate_onecell(domain, arg, &hwirq, &type); + if (ret) + return ret; + + for (i = 0; i < nr_irqs; i++) { + irq_domain_set_info(domain, virq + i, hwirq + i, &eiointc_irq_chip, + priv, handle_edge_irq, NULL, NULL); + } + + return 0; +} + +static void eiointc_domain_free(struct irq_domain *domain, unsigned int virq, + unsigned int nr_irqs) +{ + int i; + + for (i = 0; i < nr_irqs; i++) { + struct irq_data *d = irq_domain_get_irq_data(domain, virq + i); + + irq_set_handler(virq + i, NULL); + irq_domain_reset_irq_data(d); + } +} + +static const struct irq_domain_ops eiointc_domain_ops = { + .translate = irq_domain_translate_onecell, + .alloc = eiointc_domain_alloc, + .free = eiointc_domain_free, +}; + +static void acpi_set_vec_parent(int node, struct irq_domain *parent, struct acpi_vector_group *vec_group) +{ + int i; + + for (i = 0; i < MAX_IO_PICS; i++) { + if (node == vec_group[i].node) { + vec_group[i].parent = parent; + return; + } + } +} + +static struct irq_domain *acpi_get_vec_parent(int node, struct acpi_vector_group *vec_group) +{ + int i; + + for (i = 0; i < MAX_IO_PICS; i++) { + if (node == vec_group[i].node) + return vec_group[i].parent; + } + return NULL; +} + +static int eiointc_suspend(void *data) +{ + return 0; +} + +static void eiointc_resume(void *data) +{ + eiointc_router_init(0); +} + +static const struct syscore_ops eiointc_syscore_ops = { + .suspend = eiointc_suspend, + .resume = eiointc_resume, +}; + +static struct syscore eiointc_syscore = { + .ops = &eiointc_syscore_ops, +}; + +static int __init pch_pic_parse_madt(union acpi_subtable_headers *header, + const unsigned long end) +{ + struct acpi_madt_bio_pic *pchpic_entry = (struct acpi_madt_bio_pic *)header; + unsigned int node = (pchpic_entry->address >> 44) & 0xf; + struct irq_domain *parent = acpi_get_vec_parent(node, pch_group); + + if (parent) + return pch_pic_acpi_init(parent, pchpic_entry); + + return 0; +} + +static int __init pch_msi_parse_madt(union acpi_subtable_headers *header, + const unsigned long end) +{ + struct irq_domain *parent; + struct acpi_madt_msi_pic *pchmsi_entry = (struct acpi_madt_msi_pic *)header; + int node; + + if (cpu_has_flatmode) + node = early_cpu_to_node(eiointc_priv[nr_pics - 1]->node * CORES_PER_EIO_NODE); + else + node = eiointc_priv[nr_pics - 1]->node; + + parent = acpi_get_vec_parent(node, msi_group); + + if (parent) + return pch_msi_acpi_init(parent, pchmsi_entry); + + return 0; +} + +static int __init acpi_cascade_irqdomain_init(void) +{ + int r; + + r = acpi_table_parse_madt(ACPI_MADT_TYPE_BIO_PIC, pch_pic_parse_madt, 0); + if (r < 0) + return r; + + if (cpu_has_avecint) + return 0; + + r = acpi_table_parse_madt(ACPI_MADT_TYPE_MSI_PIC, pch_msi_parse_madt, 1); + if (r < 0) + return r; + + return 0; +} + +static int __init eiointc_init(struct eiointc_priv *priv, int parent_irq, + u64 node_map) +{ + int i, val; + + node_map = node_map ? node_map : -1ULL; + for_each_possible_cpu(i) { + if (node_map & (1ULL << (cpu_to_eio_node(i)))) { + node_set(cpu_to_eio_node(i), priv->node_map); + cpumask_or(&priv->cpuspan_map, &priv->cpuspan_map, + cpumask_of(i)); + } + } + + priv->eiointc_domain = irq_domain_create_linear(priv->domain_handle, + priv->vec_count, + &eiointc_domain_ops, + priv); + if (!priv->eiointc_domain) { + pr_err("loongson-extioi: cannot add IRQ domain\n"); + return -ENOMEM; + } + + if (kvm_para_has_feature(KVM_FEATURE_VIRT_EXTIOI)) { + val = iocsr_read32(EXTIOI_VIRT_FEATURES); + /* + * With EXTIOI_ENABLE_CPU_ENCODE set + * interrupts can route to 256 vCPUs. + */ + if (val & EXTIOI_HAS_CPU_ENCODE) { + val = iocsr_read32(EXTIOI_VIRT_CONFIG); + val |= EXTIOI_ENABLE_CPU_ENCODE; + iocsr_write32(val, EXTIOI_VIRT_CONFIG); + priv->flags = EIOINTC_USE_CPU_ENCODE; + } + } + + eiointc_priv[nr_pics++] = priv; + /* + * Only the first eiointc device on VM supports routing to + * different CPU interrupt pins. The later eiointc devices use + * generic method if there are multiple eiointc devices in future + */ + if (cpu_has_hypervisor && (nr_pics == 1)) { + priv->flags |= EIOINTC_ROUTE_MULT_IP; + priv->parent_hwirq = INT_HWI0; + } + + if (priv->flags & EIOINTC_ROUTE_MULT_IP) { + for (i = 0; i < priv->vec_count / VEC_COUNT_PER_REG; i++) { + priv->route_info[i].start = priv->parent_hwirq - INT_HWI0 + i; + priv->route_info[i].end = priv->route_info[i].start + 1; + priv->route_info[i].priv = priv; + parent_irq = get_percpu_irq(priv->parent_hwirq + i); + irq_set_chained_handler_and_data(parent_irq, eiointc_irq_dispatch, + &priv->route_info[i]); + } + } else { + priv->route_info[0].start = 0; + priv->route_info[0].end = priv->vec_count / VEC_COUNT_PER_REG; + priv->route_info[0].priv = priv; + irq_set_chained_handler_and_data(parent_irq, eiointc_irq_dispatch, + &priv->route_info[0]); + } + eiointc_router_init(0); + + if (nr_pics == 1) { + register_syscore(&eiointc_syscore); + cpuhp_setup_state_nocalls(CPUHP_AP_IRQ_EIOINTC_STARTING, + "irqchip/loongarch/eiointc:starting", + eiointc_router_init, NULL); + } + + return 0; +} + +int __init eiointc_acpi_init(struct irq_domain *parent, + struct acpi_madt_eio_pic *acpi_eiointc) +{ + int parent_irq, ret; + struct eiointc_priv *priv; + int node; + + priv = kzalloc(sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + priv->domain_handle = irq_domain_alloc_named_id_fwnode("EIOPIC", + acpi_eiointc->node); + if (!priv->domain_handle) { + pr_err("Unable to allocate domain handle\n"); + goto out_free_priv; + } + + priv->vec_count = VEC_COUNT; + priv->node = acpi_eiointc->node; + priv->parent_hwirq = acpi_eiointc->cascade; + parent_irq = irq_create_mapping(parent, acpi_eiointc->cascade); + + ret = eiointc_init(priv, parent_irq, acpi_eiointc->node_map); + if (ret < 0) + goto out_free_handle; + + if (cpu_has_flatmode) + node = early_cpu_to_node(acpi_eiointc->node * CORES_PER_EIO_NODE); + else + node = acpi_eiointc->node; + acpi_set_vec_parent(node, priv->eiointc_domain, pch_group); + acpi_set_vec_parent(node, priv->eiointc_domain, msi_group); + + ret = acpi_cascade_irqdomain_init(); + if (ret < 0) + goto out_free_handle; + + return ret; + +out_free_handle: + irq_domain_free_fwnode(priv->domain_handle); + priv->domain_handle = NULL; +out_free_priv: + kfree(priv); + + return -ENOMEM; +} + +static int __init eiointc_of_init(struct device_node *of_node, + struct device_node *parent) +{ + struct eiointc_priv *priv; + struct irq_data *irq_data; + int parent_irq, ret; + + priv = kzalloc(sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + parent_irq = irq_of_parse_and_map(of_node, 0); + if (parent_irq <= 0) { + ret = -ENODEV; + goto out_free_priv; + } + + ret = irq_set_handler_data(parent_irq, priv); + if (ret < 0) + goto out_free_priv; + + irq_data = irq_get_irq_data(parent_irq); + if (!irq_data) { + ret = -ENODEV; + goto out_free_priv; + } + + /* + * In particular, the number of devices supported by the LS2K0500 + * extended I/O interrupt vector is 128. + */ + if (of_device_is_compatible(of_node, "loongson,ls2k0500-eiointc")) + priv->vec_count = 128; + else + priv->vec_count = VEC_COUNT; + priv->parent_hwirq = irqd_to_hwirq(irq_data); + priv->node = 0; + priv->domain_handle = of_fwnode_handle(of_node); + + ret = eiointc_init(priv, parent_irq, 0); + if (ret < 0) + goto out_free_priv; + + return 0; + +out_free_priv: + kfree(priv); + return ret; +} + +IRQCHIP_DECLARE(loongson_ls2k0500_eiointc, "loongson,ls2k0500-eiointc", eiointc_of_init); +IRQCHIP_DECLARE(loongson_ls2k2000_eiointc, "loongson,ls2k2000-eiointc", eiointc_of_init); diff --git a/drivers/irqchip/irq-loongson-htpic.c b/drivers/irqchip/irq-loongson-htpic.c new file mode 100644 index 000000000000..1c691c4be989 --- /dev/null +++ b/drivers/irqchip/irq-loongson-htpic.c @@ -0,0 +1,150 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2020, Jiaxun Yang <jiaxun.yang@flygoat.com> + * Loongson HTPIC IRQ support + */ + +#include <linux/init.h> +#include <linux/of_address.h> +#include <linux/of_irq.h> +#include <linux/irqchip.h> +#include <linux/irqchip/chained_irq.h> +#include <linux/irq.h> +#include <linux/io.h> +#include <linux/syscore_ops.h> + +#include <asm/i8259.h> + +#define HTPIC_MAX_PARENT_IRQ 4 +#define HTINT_NUM_VECTORS 8 +#define HTINT_EN_OFF 0x20 + +struct loongson_htpic { + void __iomem *base; + struct irq_domain *domain; +}; + +static struct loongson_htpic *htpic; + +static void htpic_irq_dispatch(struct irq_desc *desc) +{ + struct loongson_htpic *priv = irq_desc_get_handler_data(desc); + struct irq_chip *chip = irq_desc_get_chip(desc); + uint32_t pending; + + chained_irq_enter(chip, desc); + pending = readl(priv->base); + /* Ack all IRQs at once, otherwise IRQ flood might happen */ + writel(pending, priv->base); + + if (!pending) + spurious_interrupt(); + + while (pending) { + int bit = __ffs(pending); + + if (unlikely(bit > 15)) { + spurious_interrupt(); + break; + } + + generic_handle_domain_irq(priv->domain, bit); + pending &= ~BIT(bit); + } + chained_irq_exit(chip, desc); +} + +static void htpic_reg_init(void) +{ + int i; + + for (i = 0; i < HTINT_NUM_VECTORS; i++) { + /* Disable all HT Vectors */ + writel(0x0, htpic->base + HTINT_EN_OFF + i * 0x4); + /* Read back to force write */ + (void) readl(htpic->base + i * 0x4); + /* Ack all possible pending IRQs */ + writel(GENMASK(31, 0), htpic->base + i * 0x4); + } + + /* Enable 16 vectors for PIC */ + writel(0xffff, htpic->base + HTINT_EN_OFF); +} + +static void htpic_resume(void *data) +{ + htpic_reg_init(); +} + +static const struct syscore_ops htpic_syscore_ops = { + .resume = htpic_resume, +}; + +static struct syscore htpic_syscore = { + .ops = &htpic_syscore_ops, +}; + +static int __init htpic_of_init(struct device_node *node, struct device_node *parent) +{ + unsigned int parent_irq[4]; + int i, err; + int num_parents = 0; + + if (htpic) { + pr_err("loongson-htpic: Only one HTPIC is allowed in the system\n"); + return -ENODEV; + } + + htpic = kzalloc(sizeof(*htpic), GFP_KERNEL); + if (!htpic) + return -ENOMEM; + + htpic->base = of_iomap(node, 0); + if (!htpic->base) { + err = -ENODEV; + goto out_free; + } + + htpic->domain = __init_i8259_irqs(node); + if (!htpic->domain) { + pr_err("loongson-htpic: Failed to initialize i8259 IRQs\n"); + err = -ENOMEM; + goto out_iounmap; + } + + /* Interrupt may come from any of the 4 interrupt line */ + for (i = 0; i < HTPIC_MAX_PARENT_IRQ; i++) { + parent_irq[i] = irq_of_parse_and_map(node, i); + if (parent_irq[i] <= 0) + break; + + num_parents++; + } + + if (!num_parents) { + pr_err("loongson-htpic: Failed to get parent irqs\n"); + err = -ENODEV; + goto out_remove_domain; + } + + htpic_reg_init(); + + for (i = 0; i < num_parents; i++) { + irq_set_chained_handler_and_data(parent_irq[i], + htpic_irq_dispatch, htpic); + } + + register_syscore(&htpic_syscore); + + return 0; + +out_remove_domain: + irq_domain_remove(htpic->domain); +out_iounmap: + iounmap(htpic->base); +out_free: + kfree(htpic); + return err; +} + +IRQCHIP_DECLARE(loongson_htpic, "loongson,htpic-1.0", htpic_of_init); diff --git a/drivers/irqchip/irq-loongson-htvec.c b/drivers/irqchip/irq-loongson-htvec.c new file mode 100644 index 000000000000..d2be8e954e92 --- /dev/null +++ b/drivers/irqchip/irq-loongson-htvec.c @@ -0,0 +1,331 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2020, Jiaxun Yang <jiaxun.yang@flygoat.com> + * Loongson HyperTransport Interrupt Vector support + */ + +#define pr_fmt(fmt) "htvec: " fmt + +#include <linux/interrupt.h> +#include <linux/irq.h> +#include <linux/irqchip.h> +#include <linux/irqdomain.h> +#include <linux/irqchip/chained_irq.h> +#include <linux/kernel.h> +#include <linux/platform_device.h> +#include <linux/of_address.h> +#include <linux/of_irq.h> +#include <linux/syscore_ops.h> + +#include "irq-loongson.h" + +/* Registers */ +#define HTVEC_EN_OFF 0x20 +#define HTVEC_MAX_PARENT_IRQ 8 +#define VEC_COUNT_PER_REG 32 +#define VEC_REG_IDX(irq_id) ((irq_id) / VEC_COUNT_PER_REG) +#define VEC_REG_BIT(irq_id) ((irq_id) % VEC_COUNT_PER_REG) + +struct htvec { + int num_parents; + void __iomem *base; + struct irq_domain *htvec_domain; + raw_spinlock_t htvec_lock; + u32 saved_vec_en[HTVEC_MAX_PARENT_IRQ]; +}; + +static struct htvec *htvec_priv; + +static void htvec_irq_dispatch(struct irq_desc *desc) +{ + int i; + u32 pending; + bool handled = false; + struct irq_chip *chip = irq_desc_get_chip(desc); + struct htvec *priv = irq_desc_get_handler_data(desc); + + chained_irq_enter(chip, desc); + + for (i = 0; i < priv->num_parents; i++) { + pending = readl(priv->base + 4 * i); + while (pending) { + int bit = __ffs(pending); + + generic_handle_domain_irq(priv->htvec_domain, + bit + VEC_COUNT_PER_REG * i); + pending &= ~BIT(bit); + handled = true; + } + } + + if (!handled) + spurious_interrupt(); + + chained_irq_exit(chip, desc); +} + +static void htvec_ack_irq(struct irq_data *d) +{ + struct htvec *priv = irq_data_get_irq_chip_data(d); + + writel(BIT(VEC_REG_BIT(d->hwirq)), + priv->base + VEC_REG_IDX(d->hwirq) * 4); +} + +static void htvec_mask_irq(struct irq_data *d) +{ + u32 reg; + void __iomem *addr; + struct htvec *priv = irq_data_get_irq_chip_data(d); + + raw_spin_lock(&priv->htvec_lock); + addr = priv->base + HTVEC_EN_OFF; + addr += VEC_REG_IDX(d->hwirq) * 4; + reg = readl(addr); + reg &= ~BIT(VEC_REG_BIT(d->hwirq)); + writel(reg, addr); + raw_spin_unlock(&priv->htvec_lock); +} + +static void htvec_unmask_irq(struct irq_data *d) +{ + u32 reg; + void __iomem *addr; + struct htvec *priv = irq_data_get_irq_chip_data(d); + + raw_spin_lock(&priv->htvec_lock); + addr = priv->base + HTVEC_EN_OFF; + addr += VEC_REG_IDX(d->hwirq) * 4; + reg = readl(addr); + reg |= BIT(VEC_REG_BIT(d->hwirq)); + writel(reg, addr); + raw_spin_unlock(&priv->htvec_lock); +} + +static struct irq_chip htvec_irq_chip = { + .name = "LOONGSON_HTVEC", + .irq_mask = htvec_mask_irq, + .irq_unmask = htvec_unmask_irq, + .irq_ack = htvec_ack_irq, +}; + +static int htvec_domain_alloc(struct irq_domain *domain, unsigned int virq, + unsigned int nr_irqs, void *arg) +{ + int ret; + unsigned long hwirq; + unsigned int type, i; + struct htvec *priv = domain->host_data; + + ret = irq_domain_translate_onecell(domain, arg, &hwirq, &type); + if (ret) + return ret; + + for (i = 0; i < nr_irqs; i++) { + irq_domain_set_info(domain, virq + i, hwirq + i, &htvec_irq_chip, + priv, handle_edge_irq, NULL, NULL); + } + + return 0; +} + +static void htvec_domain_free(struct irq_domain *domain, unsigned int virq, + unsigned int nr_irqs) +{ + int i; + + for (i = 0; i < nr_irqs; i++) { + struct irq_data *d = irq_domain_get_irq_data(domain, virq + i); + + irq_set_handler(virq + i, NULL); + irq_domain_reset_irq_data(d); + } +} + +static const struct irq_domain_ops htvec_domain_ops = { + .translate = irq_domain_translate_onecell, + .alloc = htvec_domain_alloc, + .free = htvec_domain_free, +}; + +static void htvec_reset(struct htvec *priv) +{ + u32 idx; + + /* Clear IRQ cause registers, mask all interrupts */ + for (idx = 0; idx < priv->num_parents; idx++) { + writel_relaxed(0x0, priv->base + HTVEC_EN_OFF + 4 * idx); + writel_relaxed(0xFFFFFFFF, priv->base + 4 * idx); + } +} + +static int htvec_suspend(void *data) +{ + int i; + + for (i = 0; i < htvec_priv->num_parents; i++) + htvec_priv->saved_vec_en[i] = readl(htvec_priv->base + HTVEC_EN_OFF + 4 * i); + + return 0; +} + +static void htvec_resume(void *data) +{ + int i; + + for (i = 0; i < htvec_priv->num_parents; i++) + writel(htvec_priv->saved_vec_en[i], htvec_priv->base + HTVEC_EN_OFF + 4 * i); +} + +static const struct syscore_ops htvec_syscore_ops = { + .suspend = htvec_suspend, + .resume = htvec_resume, +}; + +static struct syscore htvec_syscore = { + .ops = &htvec_syscore_ops, +}; + +static int htvec_init(phys_addr_t addr, unsigned long size, + int num_parents, int parent_irq[], struct fwnode_handle *domain_handle) +{ + int i; + struct htvec *priv; + + priv = kzalloc(sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + priv->num_parents = num_parents; + priv->base = ioremap(addr, size); + raw_spin_lock_init(&priv->htvec_lock); + + /* Setup IRQ domain */ + priv->htvec_domain = irq_domain_create_linear(domain_handle, + (VEC_COUNT_PER_REG * priv->num_parents), + &htvec_domain_ops, priv); + if (!priv->htvec_domain) { + pr_err("loongson-htvec: cannot add IRQ domain\n"); + goto iounmap_base; + } + + htvec_reset(priv); + + for (i = 0; i < priv->num_parents; i++) { + irq_set_chained_handler_and_data(parent_irq[i], + htvec_irq_dispatch, priv); + } + + htvec_priv = priv; + + register_syscore(&htvec_syscore); + + return 0; + +iounmap_base: + iounmap(priv->base); + kfree(priv); + + return -EINVAL; +} + +#ifdef CONFIG_OF + +static int htvec_of_init(struct device_node *node, + struct device_node *parent) +{ + int i, err; + int parent_irq[8]; + int num_parents = 0; + struct resource res; + + if (of_address_to_resource(node, 0, &res)) + return -EINVAL; + + /* Interrupt may come from any of the 8 interrupt lines */ + for (i = 0; i < HTVEC_MAX_PARENT_IRQ; i++) { + parent_irq[i] = irq_of_parse_and_map(node, i); + if (parent_irq[i] <= 0) + break; + + num_parents++; + } + + err = htvec_init(res.start, resource_size(&res), + num_parents, parent_irq, of_fwnode_handle(node)); + if (err < 0) + return err; + + return 0; +} + +IRQCHIP_DECLARE(htvec, "loongson,htvec-1.0", htvec_of_init); + +#endif + +#ifdef CONFIG_ACPI +static int __init pch_pic_parse_madt(union acpi_subtable_headers *header, + const unsigned long end) +{ + struct acpi_madt_bio_pic *pchpic_entry = (struct acpi_madt_bio_pic *)header; + + return pch_pic_acpi_init(htvec_priv->htvec_domain, pchpic_entry); +} + +static int __init pch_msi_parse_madt(union acpi_subtable_headers *header, + const unsigned long end) +{ + struct acpi_madt_msi_pic *pchmsi_entry = (struct acpi_madt_msi_pic *)header; + + return pch_msi_acpi_init(htvec_priv->htvec_domain, pchmsi_entry); +} + +static int __init acpi_cascade_irqdomain_init(void) +{ + int r; + + r = acpi_table_parse_madt(ACPI_MADT_TYPE_BIO_PIC, pch_pic_parse_madt, 0); + if (r < 0) + return r; + + r = acpi_table_parse_madt(ACPI_MADT_TYPE_MSI_PIC, pch_msi_parse_madt, 0); + if (r < 0) + return r; + + return 0; +} + +int __init htvec_acpi_init(struct irq_domain *parent, + struct acpi_madt_ht_pic *acpi_htvec) +{ + int i, ret; + int num_parents, parent_irq[8]; + struct fwnode_handle *domain_handle; + + if (!acpi_htvec) + return -EINVAL; + + num_parents = HTVEC_MAX_PARENT_IRQ; + + domain_handle = irq_domain_alloc_fwnode(&acpi_htvec->address); + if (!domain_handle) { + pr_err("Unable to allocate domain handle\n"); + return -ENOMEM; + } + + /* Interrupt may come from any of the 8 interrupt lines */ + for (i = 0; i < HTVEC_MAX_PARENT_IRQ; i++) + parent_irq[i] = irq_create_mapping(parent, acpi_htvec->cascade[i]); + + ret = htvec_init(acpi_htvec->address, acpi_htvec->size, + num_parents, parent_irq, domain_handle); + + if (ret == 0) + ret = acpi_cascade_irqdomain_init(); + else + irq_domain_free_fwnode(domain_handle); + + return ret; +} + +#endif diff --git a/drivers/irqchip/irq-loongson-liointc.c b/drivers/irqchip/irq-loongson-liointc.c new file mode 100644 index 000000000000..0033c2188abc --- /dev/null +++ b/drivers/irqchip/irq-loongson-liointc.c @@ -0,0 +1,421 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2020, Jiaxun Yang <jiaxun.yang@flygoat.com> + * Loongson Local IO Interrupt Controller support + */ + +#include <linux/errno.h> +#include <linux/init.h> +#include <linux/types.h> +#include <linux/interrupt.h> +#include <linux/ioport.h> +#include <linux/irqchip.h> +#include <linux/of_address.h> +#include <linux/of_irq.h> +#include <linux/io.h> +#include <linux/smp.h> +#include <linux/irqchip/chained_irq.h> + +#ifdef CONFIG_MIPS +#include <loongson.h> +#else +#include <asm/loongson.h> +#endif + +#include "irq-loongson.h" + +#define LIOINTC_CHIP_IRQ 32 +#define LIOINTC_NUM_PARENT 4 +#define LIOINTC_NUM_CORES 4 + +#define LIOINTC_INTC_CHIP_START 0x20 + +#define LIOINTC_REG_INTC_STATUS(core) (LIOINTC_INTC_CHIP_START + 0x20 + (core) * 8) +#define LIOINTC_REG_INTC_EN_STATUS (LIOINTC_INTC_CHIP_START + 0x04) +#define LIOINTC_REG_INTC_ENABLE (LIOINTC_INTC_CHIP_START + 0x08) +#define LIOINTC_REG_INTC_DISABLE (LIOINTC_INTC_CHIP_START + 0x0c) +/* + * LIOINTC_REG_INTC_POL register is only valid for Loongson-2K series, and + * Loongson-3 series behave as noops. + */ +#define LIOINTC_REG_INTC_POL (LIOINTC_INTC_CHIP_START + 0x10) +#define LIOINTC_REG_INTC_EDGE (LIOINTC_INTC_CHIP_START + 0x14) + +#define LIOINTC_SHIFT_INTx 4 + +#define LIOINTC_ERRATA_IRQ 10 + +#if defined(CONFIG_MIPS) +#define liointc_core_id get_ebase_cpunum() +#else +#define liointc_core_id get_csr_cpuid() +#endif + +struct liointc_handler_data { + struct liointc_priv *priv; + u32 parent_int_map; +}; + +struct liointc_priv { + struct irq_chip_generic *gc; + struct liointc_handler_data handler[LIOINTC_NUM_PARENT]; + void __iomem *core_isr[LIOINTC_NUM_CORES]; + u8 map_cache[LIOINTC_CHIP_IRQ]; + u32 int_pol; + u32 int_edge; + bool has_lpc_irq_errata; +}; + +struct fwnode_handle *liointc_handle; + +static void liointc_chained_handle_irq(struct irq_desc *desc) +{ + struct liointc_handler_data *handler = irq_desc_get_handler_data(desc); + struct irq_chip *chip = irq_desc_get_chip(desc); + struct irq_chip_generic *gc = handler->priv->gc; + int core = liointc_core_id % LIOINTC_NUM_CORES; + u32 pending; + + chained_irq_enter(chip, desc); + + pending = readl(handler->priv->core_isr[core]); + + if (!pending) { + /* Always blame LPC IRQ if we have that bug */ + if (handler->priv->has_lpc_irq_errata && + (handler->parent_int_map & gc->mask_cache & + BIT(LIOINTC_ERRATA_IRQ))) + pending = BIT(LIOINTC_ERRATA_IRQ); + else + spurious_interrupt(); + } + + while (pending) { + int bit = __ffs(pending); + + generic_handle_domain_irq(gc->domain, bit); + pending &= ~BIT(bit); + } + + chained_irq_exit(chip, desc); +} + +static void liointc_set_bit(struct irq_chip_generic *gc, + unsigned int offset, + u32 mask, bool set) +{ + if (set) + writel(readl(gc->reg_base + offset) | mask, + gc->reg_base + offset); + else + writel(readl(gc->reg_base + offset) & ~mask, + gc->reg_base + offset); +} + +static int liointc_set_type(struct irq_data *data, unsigned int type) +{ + struct irq_chip_generic *gc = irq_data_get_irq_chip_data(data); + u32 mask = data->mask; + + guard(raw_spinlock)(&gc->lock); + switch (type) { + case IRQ_TYPE_LEVEL_HIGH: + liointc_set_bit(gc, LIOINTC_REG_INTC_EDGE, mask, false); + liointc_set_bit(gc, LIOINTC_REG_INTC_POL, mask, false); + break; + case IRQ_TYPE_LEVEL_LOW: + liointc_set_bit(gc, LIOINTC_REG_INTC_EDGE, mask, false); + liointc_set_bit(gc, LIOINTC_REG_INTC_POL, mask, true); + break; + case IRQ_TYPE_EDGE_RISING: + liointc_set_bit(gc, LIOINTC_REG_INTC_EDGE, mask, true); + liointc_set_bit(gc, LIOINTC_REG_INTC_POL, mask, false); + break; + case IRQ_TYPE_EDGE_FALLING: + liointc_set_bit(gc, LIOINTC_REG_INTC_EDGE, mask, true); + liointc_set_bit(gc, LIOINTC_REG_INTC_POL, mask, true); + break; + default: + return -EINVAL; + } + + irqd_set_trigger_type(data, type); + return 0; +} + +static void liointc_suspend(struct irq_chip_generic *gc) +{ + struct liointc_priv *priv = gc->private; + + priv->int_pol = readl(gc->reg_base + LIOINTC_REG_INTC_POL); + priv->int_edge = readl(gc->reg_base + LIOINTC_REG_INTC_EDGE); +} + +static void liointc_resume(struct irq_chip_generic *gc) +{ + struct liointc_priv *priv = gc->private; + int i; + + guard(raw_spinlock_irqsave)(&gc->lock); + /* Disable all at first */ + writel(0xffffffff, gc->reg_base + LIOINTC_REG_INTC_DISABLE); + /* Restore map cache */ + for (i = 0; i < LIOINTC_CHIP_IRQ; i++) + writeb(priv->map_cache[i], gc->reg_base + i); + writel(priv->int_pol, gc->reg_base + LIOINTC_REG_INTC_POL); + writel(priv->int_edge, gc->reg_base + LIOINTC_REG_INTC_EDGE); + /* Restore mask cache */ + writel(gc->mask_cache, gc->reg_base + LIOINTC_REG_INTC_ENABLE); +} + +static int parent_irq[LIOINTC_NUM_PARENT]; +static u32 parent_int_map[LIOINTC_NUM_PARENT]; +static const char *const parent_names[] = {"int0", "int1", "int2", "int3"}; +static const char *const core_reg_names[] = {"isr0", "isr1", "isr2", "isr3"}; + +static int liointc_domain_xlate(struct irq_domain *d, struct device_node *ctrlr, + const u32 *intspec, unsigned int intsize, + unsigned long *out_hwirq, unsigned int *out_type) +{ + if (WARN_ON(intsize < 1)) + return -EINVAL; + *out_hwirq = intspec[0] - GSI_MIN_CPU_IRQ; + + if (intsize > 1) + *out_type = intspec[1] & IRQ_TYPE_SENSE_MASK; + else + *out_type = IRQ_TYPE_NONE; + + return 0; +} + +static const struct irq_domain_ops acpi_irq_gc_ops = { + .map = irq_map_generic_chip, + .unmap = irq_unmap_generic_chip, + .xlate = liointc_domain_xlate, +}; + +static int liointc_init(phys_addr_t addr, unsigned long size, int revision, + struct fwnode_handle *domain_handle, struct device_node *node) +{ + int i, err; + void __iomem *base; + struct irq_chip_type *ct; + struct irq_chip_generic *gc; + struct irq_domain *domain; + struct liointc_priv *priv; + + priv = kzalloc(sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + base = ioremap(addr, size); + if (!base) + goto out_free_priv; + + for (i = 0; i < LIOINTC_NUM_CORES; i++) + priv->core_isr[i] = base + LIOINTC_REG_INTC_STATUS(i); + + for (i = 0; i < LIOINTC_NUM_PARENT; i++) + priv->handler[i].parent_int_map = parent_int_map[i]; + + if (revision > 1) { + for (i = 0; i < LIOINTC_NUM_CORES; i++) { + int index = of_property_match_string(node, + "reg-names", core_reg_names[i]); + + if (index < 0) + continue; + + priv->core_isr[i] = of_iomap(node, index); + } + + if (!priv->core_isr[0]) + goto out_iounmap; + } + + /* Setup IRQ domain */ + if (!acpi_disabled) + domain = irq_domain_create_linear(domain_handle, LIOINTC_CHIP_IRQ, + &acpi_irq_gc_ops, priv); + else + domain = irq_domain_create_linear(domain_handle, LIOINTC_CHIP_IRQ, + &irq_generic_chip_ops, priv); + if (!domain) { + pr_err("loongson-liointc: cannot add IRQ domain\n"); + goto out_iounmap; + } + + err = irq_alloc_domain_generic_chips(domain, LIOINTC_CHIP_IRQ, 1, + (node ? node->full_name : "LIOINTC"), + handle_level_irq, 0, IRQ_NOPROBE, 0); + if (err) { + pr_err("loongson-liointc: unable to register IRQ domain\n"); + goto out_free_domain; + } + + + /* Disable all IRQs */ + writel(0xffffffff, base + LIOINTC_REG_INTC_DISABLE); + /* Set to level triggered */ + writel(0x0, base + LIOINTC_REG_INTC_EDGE); + + /* Generate parent INT part of map cache */ + for (i = 0; i < LIOINTC_NUM_PARENT; i++) { + u32 pending = priv->handler[i].parent_int_map; + + while (pending) { + int bit = __ffs(pending); + + priv->map_cache[bit] = BIT(i) << LIOINTC_SHIFT_INTx; + pending &= ~BIT(bit); + } + } + + for (i = 0; i < LIOINTC_CHIP_IRQ; i++) { + /* Generate core part of map cache */ + priv->map_cache[i] |= BIT(loongson_sysconf.boot_cpu_id); + writeb(priv->map_cache[i], base + i); + } + + gc = irq_get_domain_generic_chip(domain, 0); + gc->private = priv; + gc->reg_base = base; + gc->domain = domain; + gc->suspend = liointc_suspend; + gc->resume = liointc_resume; + + ct = gc->chip_types; + ct->regs.enable = LIOINTC_REG_INTC_ENABLE; + ct->regs.disable = LIOINTC_REG_INTC_DISABLE; + ct->chip.irq_unmask = irq_gc_unmask_enable_reg; + ct->chip.irq_mask = irq_gc_mask_disable_reg; + ct->chip.irq_mask_ack = irq_gc_mask_disable_reg; + ct->chip.irq_set_type = liointc_set_type; + ct->chip.flags = IRQCHIP_SKIP_SET_WAKE; + + gc->mask_cache = 0; + priv->gc = gc; + + for (i = 0; i < LIOINTC_NUM_PARENT; i++) { + if (parent_irq[i] <= 0) + continue; + + priv->handler[i].priv = priv; + irq_set_chained_handler_and_data(parent_irq[i], + liointc_chained_handle_irq, &priv->handler[i]); + } + + liointc_handle = domain_handle; + return 0; + +out_free_domain: + irq_domain_remove(domain); +out_iounmap: + iounmap(base); +out_free_priv: + kfree(priv); + + return -EINVAL; +} + +#ifdef CONFIG_OF + +static int __init liointc_of_init(struct device_node *node, + struct device_node *parent) +{ + bool have_parent = FALSE; + int sz, i, index, revision, err = 0; + struct resource res; + + if (!of_device_is_compatible(node, "loongson,liointc-2.0")) { + index = 0; + revision = 1; + } else { + index = of_property_match_string(node, "reg-names", "main"); + revision = 2; + } + + if (of_address_to_resource(node, index, &res)) + return -EINVAL; + + for (i = 0; i < LIOINTC_NUM_PARENT; i++) { + parent_irq[i] = of_irq_get_byname(node, parent_names[i]); + if (parent_irq[i] > 0) + have_parent = TRUE; + } + if (!have_parent) + return -ENODEV; + + sz = of_property_read_variable_u32_array(node, + "loongson,parent_int_map", + &parent_int_map[0], + LIOINTC_NUM_PARENT, + LIOINTC_NUM_PARENT); + if (sz < 4) { + pr_err("loongson-liointc: No parent_int_map\n"); + return -ENODEV; + } + + err = liointc_init(res.start, resource_size(&res), + revision, of_fwnode_handle(node), node); + if (err < 0) + return err; + + return 0; +} + +IRQCHIP_DECLARE(loongson_liointc_1_0, "loongson,liointc-1.0", liointc_of_init); +IRQCHIP_DECLARE(loongson_liointc_1_0a, "loongson,liointc-1.0a", liointc_of_init); +IRQCHIP_DECLARE(loongson_liointc_2_0, "loongson,liointc-2.0", liointc_of_init); + +#endif + +#ifdef CONFIG_ACPI +static int __init htintc_parse_madt(union acpi_subtable_headers *header, + const unsigned long end) +{ + struct acpi_madt_ht_pic *htintc_entry = (struct acpi_madt_ht_pic *)header; + struct irq_domain *parent = irq_find_matching_fwnode(liointc_handle, DOMAIN_BUS_ANY); + + return htvec_acpi_init(parent, htintc_entry); +} + +static int __init acpi_cascade_irqdomain_init(void) +{ + int r; + + r = acpi_table_parse_madt(ACPI_MADT_TYPE_HT_PIC, htintc_parse_madt, 0); + if (r < 0) + return r; + + return 0; +} + +int __init liointc_acpi_init(struct irq_domain *parent, struct acpi_madt_lio_pic *acpi_liointc) +{ + int ret; + struct fwnode_handle *domain_handle; + + parent_int_map[0] = acpi_liointc->cascade_map[0]; + parent_int_map[1] = acpi_liointc->cascade_map[1]; + + parent_irq[0] = irq_create_mapping(parent, acpi_liointc->cascade[0]); + parent_irq[1] = irq_create_mapping(parent, acpi_liointc->cascade[1]); + + domain_handle = irq_domain_alloc_fwnode(&acpi_liointc->address); + if (!domain_handle) { + pr_err("Unable to allocate domain handle\n"); + return -ENOMEM; + } + + ret = liointc_init(acpi_liointc->address, acpi_liointc->size, + 1, domain_handle, NULL); + if (ret == 0) + ret = acpi_cascade_irqdomain_init(); + else + irq_domain_free_fwnode(domain_handle); + + return ret; +} +#endif diff --git a/drivers/irqchip/irq-loongson-pch-lpc.c b/drivers/irqchip/irq-loongson-pch-lpc.c new file mode 100644 index 000000000000..3a125f3e4287 --- /dev/null +++ b/drivers/irqchip/irq-loongson-pch-lpc.c @@ -0,0 +1,241 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Loongson LPC Interrupt Controller support + * + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ + +#define pr_fmt(fmt) "lpc: " fmt + +#include <linux/interrupt.h> +#include <linux/irq.h> +#include <linux/irqchip.h> +#include <linux/irqchip/chained_irq.h> +#include <linux/irqdomain.h> +#include <linux/kernel.h> +#include <linux/syscore_ops.h> + +#include "irq-loongson.h" + +/* Registers */ +#define LPC_INT_CTL 0x00 +#define LPC_INT_ENA 0x04 +#define LPC_INT_STS 0x08 +#define LPC_INT_CLR 0x0c +#define LPC_INT_POL 0x10 +#define LPC_COUNT 16 + +/* LPC_INT_CTL */ +#define LPC_INT_CTL_EN BIT(31) + +struct pch_lpc { + void __iomem *base; + struct irq_domain *lpc_domain; + raw_spinlock_t lpc_lock; + u32 saved_reg_ctl; + u32 saved_reg_ena; + u32 saved_reg_pol; +}; + +static struct pch_lpc *pch_lpc_priv; +struct fwnode_handle *pch_lpc_handle; + +static void lpc_irq_ack(struct irq_data *d) +{ + unsigned long flags; + struct pch_lpc *priv = d->domain->host_data; + + raw_spin_lock_irqsave(&priv->lpc_lock, flags); + writel(0x1 << d->hwirq, priv->base + LPC_INT_CLR); + raw_spin_unlock_irqrestore(&priv->lpc_lock, flags); +} + +static void lpc_irq_mask(struct irq_data *d) +{ + unsigned long flags; + struct pch_lpc *priv = d->domain->host_data; + + raw_spin_lock_irqsave(&priv->lpc_lock, flags); + writel(readl(priv->base + LPC_INT_ENA) & (~(0x1 << (d->hwirq))), + priv->base + LPC_INT_ENA); + raw_spin_unlock_irqrestore(&priv->lpc_lock, flags); +} + +static void lpc_irq_unmask(struct irq_data *d) +{ + unsigned long flags; + struct pch_lpc *priv = d->domain->host_data; + + raw_spin_lock_irqsave(&priv->lpc_lock, flags); + writel(readl(priv->base + LPC_INT_ENA) | (0x1 << (d->hwirq)), + priv->base + LPC_INT_ENA); + raw_spin_unlock_irqrestore(&priv->lpc_lock, flags); +} + +static int lpc_irq_set_type(struct irq_data *d, unsigned int type) +{ + u32 val; + u32 mask = 0x1 << (d->hwirq); + struct pch_lpc *priv = d->domain->host_data; + + if (!(type & IRQ_TYPE_LEVEL_MASK)) + return 0; + + val = readl(priv->base + LPC_INT_POL); + + if (type == IRQ_TYPE_LEVEL_HIGH) + val |= mask; + else + val &= ~mask; + + writel(val, priv->base + LPC_INT_POL); + + return 0; +} + +static const struct irq_chip pch_lpc_irq_chip = { + .name = "PCH LPC", + .irq_mask = lpc_irq_mask, + .irq_unmask = lpc_irq_unmask, + .irq_ack = lpc_irq_ack, + .irq_set_type = lpc_irq_set_type, + .flags = IRQCHIP_SKIP_SET_WAKE, +}; + +static void lpc_irq_dispatch(struct irq_desc *desc) +{ + u32 pending, bit; + struct irq_chip *chip = irq_desc_get_chip(desc); + struct pch_lpc *priv = irq_desc_get_handler_data(desc); + + chained_irq_enter(chip, desc); + + pending = readl(priv->base + LPC_INT_ENA); + pending &= readl(priv->base + LPC_INT_STS); + if (!pending) + spurious_interrupt(); + + while (pending) { + bit = __ffs(pending); + + generic_handle_domain_irq(priv->lpc_domain, bit); + pending &= ~BIT(bit); + } + chained_irq_exit(chip, desc); +} + +static int pch_lpc_map(struct irq_domain *d, unsigned int irq, + irq_hw_number_t hw) +{ + irq_set_chip_and_handler(irq, &pch_lpc_irq_chip, handle_level_irq); + return 0; +} + +static const struct irq_domain_ops pch_lpc_domain_ops = { + .map = pch_lpc_map, + .translate = irq_domain_translate_twocell, +}; + +static void pch_lpc_reset(struct pch_lpc *priv) +{ + /* Enable the LPC interrupt, bit31: en bit30: edge */ + writel(LPC_INT_CTL_EN, priv->base + LPC_INT_CTL); + writel(0, priv->base + LPC_INT_ENA); + /* Clear all 18-bit interrpt bit */ + writel(GENMASK(17, 0), priv->base + LPC_INT_CLR); +} + +static int pch_lpc_disabled(struct pch_lpc *priv) +{ + return (readl(priv->base + LPC_INT_ENA) == 0xffffffff) && + (readl(priv->base + LPC_INT_STS) == 0xffffffff); +} + +static int pch_lpc_suspend(void *data) +{ + pch_lpc_priv->saved_reg_ctl = readl(pch_lpc_priv->base + LPC_INT_CTL); + pch_lpc_priv->saved_reg_ena = readl(pch_lpc_priv->base + LPC_INT_ENA); + pch_lpc_priv->saved_reg_pol = readl(pch_lpc_priv->base + LPC_INT_POL); + return 0; +} + +static void pch_lpc_resume(void *data) +{ + writel(pch_lpc_priv->saved_reg_ctl, pch_lpc_priv->base + LPC_INT_CTL); + writel(pch_lpc_priv->saved_reg_ena, pch_lpc_priv->base + LPC_INT_ENA); + writel(pch_lpc_priv->saved_reg_pol, pch_lpc_priv->base + LPC_INT_POL); +} + +static const struct syscore_ops pch_lpc_syscore_ops = { + .suspend = pch_lpc_suspend, + .resume = pch_lpc_resume, +}; + +static struct syscore pch_lpc_syscore = { + .ops = &pch_lpc_syscore_ops, +}; + +int __init pch_lpc_acpi_init(struct irq_domain *parent, + struct acpi_madt_lpc_pic *acpi_pchlpc) +{ + int parent_irq; + struct pch_lpc *priv; + struct irq_fwspec fwspec; + struct fwnode_handle *irq_handle; + + priv = kzalloc(sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + raw_spin_lock_init(&priv->lpc_lock); + + priv->base = ioremap(acpi_pchlpc->address, acpi_pchlpc->size); + if (!priv->base) + goto free_priv; + + if (pch_lpc_disabled(priv)) { + pr_err("Failed to get LPC status\n"); + goto iounmap_base; + } + + irq_handle = irq_domain_alloc_named_fwnode("lpcintc"); + if (!irq_handle) { + pr_err("Unable to allocate domain handle\n"); + goto iounmap_base; + } + + /* + * The LPC interrupt controller is a legacy i8259-compatible device, + * which requires a static 1:1 mapping for IRQs 0-15. + * Use irq_domain_create_legacy to establish this static mapping early. + */ + priv->lpc_domain = irq_domain_create_legacy(irq_handle, LPC_COUNT, 0, 0, + &pch_lpc_domain_ops, priv); + if (!priv->lpc_domain) { + pr_err("Failed to create IRQ domain\n"); + goto free_irq_handle; + } + pch_lpc_reset(priv); + + fwspec.fwnode = parent->fwnode; + fwspec.param[0] = acpi_pchlpc->cascade + GSI_MIN_PCH_IRQ; + fwspec.param[1] = IRQ_TYPE_LEVEL_HIGH; + fwspec.param_count = 2; + parent_irq = irq_create_fwspec_mapping(&fwspec); + irq_set_chained_handler_and_data(parent_irq, lpc_irq_dispatch, priv); + + pch_lpc_priv = priv; + pch_lpc_handle = irq_handle; + register_syscore(&pch_lpc_syscore); + + return 0; + +free_irq_handle: + irq_domain_free_fwnode(irq_handle); +iounmap_base: + iounmap(priv->base); +free_priv: + kfree(priv); + + return -ENOMEM; +} diff --git a/drivers/irqchip/irq-loongson-pch-msi.c b/drivers/irqchip/irq-loongson-pch-msi.c new file mode 100644 index 000000000000..4aedc9b90ff7 --- /dev/null +++ b/drivers/irqchip/irq-loongson-pch-msi.c @@ -0,0 +1,291 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2020, Jiaxun Yang <jiaxun.yang@flygoat.com> + * Loongson PCH MSI support + */ + +#define pr_fmt(fmt) "pch-msi: " fmt + +#include <linux/irqchip.h> +#include <linux/msi.h> +#include <linux/of.h> +#include <linux/of_address.h> +#include <linux/of_irq.h> +#include <linux/of_pci.h> +#include <linux/pci.h> +#include <linux/slab.h> + +#include <linux/irqchip/irq-msi-lib.h> +#include "irq-loongson.h" + +static int nr_pics; + +struct pch_msi_data { + struct mutex msi_map_lock; + phys_addr_t doorbell; + u32 irq_first; /* The vector number that MSIs starts */ + u32 num_irqs; /* The number of vectors for MSIs */ + unsigned long *msi_map; +}; + +static struct fwnode_handle *pch_msi_handle[MAX_IO_PICS]; + +static int pch_msi_allocate_hwirq(struct pch_msi_data *priv, int num_req) +{ + int first; + + mutex_lock(&priv->msi_map_lock); + + first = bitmap_find_free_region(priv->msi_map, priv->num_irqs, + get_count_order(num_req)); + if (first < 0) { + mutex_unlock(&priv->msi_map_lock); + return -ENOSPC; + } + + mutex_unlock(&priv->msi_map_lock); + + return priv->irq_first + first; +} + +static void pch_msi_free_hwirq(struct pch_msi_data *priv, + int hwirq, int num_req) +{ + int first = hwirq - priv->irq_first; + + mutex_lock(&priv->msi_map_lock); + bitmap_release_region(priv->msi_map, first, get_count_order(num_req)); + mutex_unlock(&priv->msi_map_lock); +} + +static void pch_msi_compose_msi_msg(struct irq_data *data, + struct msi_msg *msg) +{ + struct pch_msi_data *priv = irq_data_get_irq_chip_data(data); + + msg->address_hi = upper_32_bits(priv->doorbell); + msg->address_lo = lower_32_bits(priv->doorbell); + msg->data = data->hwirq; +} + +static struct irq_chip middle_irq_chip = { + .name = "PCH MSI", + .irq_mask = irq_chip_mask_parent, + .irq_unmask = irq_chip_unmask_parent, + .irq_ack = irq_chip_ack_parent, + .irq_set_affinity = irq_chip_set_affinity_parent, + .irq_compose_msi_msg = pch_msi_compose_msi_msg, +}; + +static int pch_msi_parent_domain_alloc(struct irq_domain *domain, + unsigned int virq, int hwirq) +{ + struct irq_fwspec fwspec; + + fwspec.fwnode = domain->parent->fwnode; + fwspec.param_count = 1; + fwspec.param[0] = hwirq; + + return irq_domain_alloc_irqs_parent(domain, virq, 1, &fwspec); +} + +static int pch_msi_middle_domain_alloc(struct irq_domain *domain, + unsigned int virq, + unsigned int nr_irqs, void *args) +{ + struct pch_msi_data *priv = domain->host_data; + int hwirq, err, i; + + hwirq = pch_msi_allocate_hwirq(priv, nr_irqs); + if (hwirq < 0) + return hwirq; + + for (i = 0; i < nr_irqs; i++) { + err = pch_msi_parent_domain_alloc(domain, virq + i, hwirq + i); + if (err) + goto err_hwirq; + + irq_domain_set_hwirq_and_chip(domain, virq + i, hwirq + i, + &middle_irq_chip, priv); + } + + return 0; + +err_hwirq: + pch_msi_free_hwirq(priv, hwirq, nr_irqs); + irq_domain_free_irqs_parent(domain, virq, i); + + return err; +} + +static void pch_msi_middle_domain_free(struct irq_domain *domain, + unsigned int virq, + unsigned int nr_irqs) +{ + struct irq_data *d = irq_domain_get_irq_data(domain, virq); + struct pch_msi_data *priv = irq_data_get_irq_chip_data(d); + + irq_domain_free_irqs_parent(domain, virq, nr_irqs); + pch_msi_free_hwirq(priv, d->hwirq, nr_irqs); +} + +static const struct irq_domain_ops pch_msi_middle_domain_ops = { + .alloc = pch_msi_middle_domain_alloc, + .free = pch_msi_middle_domain_free, + .select = msi_lib_irq_domain_select, +}; + +#define PCH_MSI_FLAGS_REQUIRED (MSI_FLAG_USE_DEF_DOM_OPS | \ + MSI_FLAG_USE_DEF_CHIP_OPS | \ + MSI_FLAG_PCI_MSI_MASK_PARENT) + +#define PCH_MSI_FLAGS_SUPPORTED (MSI_GENERIC_FLAGS_MASK | \ + MSI_FLAG_PCI_MSIX | \ + MSI_FLAG_MULTI_PCI_MSI) + +static struct msi_parent_ops pch_msi_parent_ops = { + .required_flags = PCH_MSI_FLAGS_REQUIRED, + .supported_flags = PCH_MSI_FLAGS_SUPPORTED, + .chip_flags = MSI_CHIP_FLAG_SET_EOI | MSI_CHIP_FLAG_SET_ACK, + .bus_select_mask = MATCH_PCI_MSI, + .bus_select_token = DOMAIN_BUS_NEXUS, + .prefix = "PCH-", + .init_dev_msi_info = msi_lib_init_dev_msi_info, +}; + +static int pch_msi_init_domains(struct pch_msi_data *priv, struct irq_domain *parent, + struct fwnode_handle *domain_handle) +{ + struct irq_domain_info info = { + .ops = &pch_msi_middle_domain_ops, + .size = priv->num_irqs, + .parent = parent, + .host_data = priv, + .fwnode = domain_handle, + }; + + if (!msi_create_parent_irq_domain(&info, &pch_msi_parent_ops)) { + pr_err("Failed to create the MSI middle domain\n"); + return -ENOMEM; + } + return 0; +} + +static int pch_msi_init(phys_addr_t msg_address, int irq_base, int irq_count, + struct irq_domain *parent_domain, struct fwnode_handle *domain_handle) +{ + int ret; + struct pch_msi_data *priv; + + priv = kzalloc(sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + mutex_init(&priv->msi_map_lock); + + priv->doorbell = msg_address; + priv->irq_first = irq_base; + priv->num_irqs = irq_count; + + priv->msi_map = bitmap_zalloc(priv->num_irqs, GFP_KERNEL); + if (!priv->msi_map) + goto err_priv; + + pr_debug("Registering %d MSIs, starting at %d\n", + priv->num_irqs, priv->irq_first); + + ret = pch_msi_init_domains(priv, parent_domain, domain_handle); + if (ret) + goto err_map; + + pch_msi_handle[nr_pics++] = domain_handle; + return 0; + +err_map: + bitmap_free(priv->msi_map); +err_priv: + kfree(priv); + + return -EINVAL; +} + +#ifdef CONFIG_OF +static int pch_msi_of_init(struct device_node *node, struct device_node *parent) +{ + int err; + int irq_base, irq_count; + struct resource res; + struct irq_domain *parent_domain; + + parent_domain = irq_find_host(parent); + if (!parent_domain) { + pr_err("Failed to find the parent domain\n"); + return -ENXIO; + } + + if (of_address_to_resource(node, 0, &res)) { + pr_err("Failed to allocate resource\n"); + return -EINVAL; + } + + if (of_property_read_u32(node, "loongson,msi-base-vec", &irq_base)) { + pr_err("Unable to parse MSI vec base\n"); + return -EINVAL; + } + + if (of_property_read_u32(node, "loongson,msi-num-vecs", &irq_count)) { + pr_err("Unable to parse MSI vec number\n"); + return -EINVAL; + } + + err = pch_msi_init(res.start, irq_base, irq_count, parent_domain, of_fwnode_handle(node)); + if (err < 0) + return err; + + return 0; +} + +IRQCHIP_DECLARE(pch_msi, "loongson,pch-msi-1.0", pch_msi_of_init); +#endif + +#ifdef CONFIG_ACPI +struct fwnode_handle *get_pch_msi_handle(int pci_segment) +{ + if (cpu_has_avecint) + return pch_msi_handle[0]; + + for (int i = 0; i < MAX_IO_PICS; i++) { + if (msi_group[i].pci_segment == pci_segment) + return pch_msi_handle[i]; + } + return pch_msi_handle[0]; +} + +int __init pch_msi_acpi_init(struct irq_domain *parent, struct acpi_madt_msi_pic *acpi_pchmsi) +{ + int ret; + struct fwnode_handle *domain_handle; + + domain_handle = irq_domain_alloc_fwnode(&acpi_pchmsi->msg_address); + ret = pch_msi_init(acpi_pchmsi->msg_address, acpi_pchmsi->start, + acpi_pchmsi->count, parent, domain_handle); + if (ret < 0) + irq_domain_free_fwnode(domain_handle); + + return ret; +} + +int __init pch_msi_acpi_init_avec(struct irq_domain *parent) +{ + if (pch_msi_handle[0]) + return 0; + + pch_msi_handle[0] = parent->fwnode; + irq_domain_update_bus_token(parent, DOMAIN_BUS_NEXUS); + + parent->flags |= IRQ_DOMAIN_FLAG_MSI_PARENT; + parent->msi_parent_ops = &pch_msi_parent_ops; + + return 0; +} +#endif diff --git a/drivers/irqchip/irq-loongson-pch-pic.c b/drivers/irqchip/irq-loongson-pch-pic.c new file mode 100644 index 000000000000..c6b369a974a7 --- /dev/null +++ b/drivers/irqchip/irq-loongson-pch-pic.c @@ -0,0 +1,477 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2020, Jiaxun Yang <jiaxun.yang@flygoat.com> + * Loongson PCH PIC support + */ + +#define pr_fmt(fmt) "pch-pic: " fmt + +#include <linux/interrupt.h> +#include <linux/irq.h> +#include <linux/irqchip.h> +#include <linux/irqdomain.h> +#include <linux/kernel.h> +#include <linux/platform_device.h> +#include <linux/of.h> +#include <linux/of_address.h> +#include <linux/of_irq.h> +#include <linux/syscore_ops.h> + +#include "irq-loongson.h" + +/* Registers */ +#define PCH_PIC_MASK 0x20 +#define PCH_PIC_HTMSI_EN 0x40 +#define PCH_PIC_EDGE 0x60 +#define PCH_PIC_CLR 0x80 +#define PCH_PIC_AUTO0 0xc0 +#define PCH_PIC_AUTO1 0xe0 +#define PCH_INT_ROUTE(irq) (0x100 + irq) +#define PCH_INT_HTVEC(irq) (0x200 + irq) +#define PCH_PIC_POL 0x3e0 + +#define PIC_COUNT_PER_REG 32 +#define PIC_REG_COUNT 2 +#define PIC_COUNT (PIC_COUNT_PER_REG * PIC_REG_COUNT) +#define PIC_REG_IDX(irq_id) ((irq_id) / PIC_COUNT_PER_REG) +#define PIC_REG_BIT(irq_id) ((irq_id) % PIC_COUNT_PER_REG) +#define PIC_UNDEF_VECTOR 255 + +static int nr_pics; + +struct pch_pic { + void __iomem *base; + struct irq_domain *pic_domain; + u32 ht_vec_base; + raw_spinlock_t pic_lock; + u32 vec_count; + u32 gsi_base; + u32 saved_vec_en[PIC_REG_COUNT]; + u32 saved_vec_pol[PIC_REG_COUNT]; + u32 saved_vec_edge[PIC_REG_COUNT]; + u8 table[PIC_COUNT]; + int inuse; +}; + +static struct pch_pic *pch_pic_priv[MAX_IO_PICS]; + +struct fwnode_handle *pch_pic_handle[MAX_IO_PICS]; + +static inline u8 hwirq_to_bit(struct pch_pic *priv, int hirq) +{ + return priv->table[hirq]; +} + +static void pch_pic_bitset(struct pch_pic *priv, int offset, int bit) +{ + u32 reg; + void __iomem *addr = priv->base + offset + PIC_REG_IDX(bit) * 4; + + raw_spin_lock(&priv->pic_lock); + reg = readl(addr); + reg |= BIT(PIC_REG_BIT(bit)); + writel(reg, addr); + raw_spin_unlock(&priv->pic_lock); +} + +static void pch_pic_bitclr(struct pch_pic *priv, int offset, int bit) +{ + u32 reg; + void __iomem *addr = priv->base + offset + PIC_REG_IDX(bit) * 4; + + raw_spin_lock(&priv->pic_lock); + reg = readl(addr); + reg &= ~BIT(PIC_REG_BIT(bit)); + writel(reg, addr); + raw_spin_unlock(&priv->pic_lock); +} + +static void pch_pic_mask_irq(struct irq_data *d) +{ + struct pch_pic *priv = irq_data_get_irq_chip_data(d); + + pch_pic_bitset(priv, PCH_PIC_MASK, hwirq_to_bit(priv, d->hwirq)); + irq_chip_mask_parent(d); +} + +static void pch_pic_unmask_irq(struct irq_data *d) +{ + struct pch_pic *priv = irq_data_get_irq_chip_data(d); + int bit = hwirq_to_bit(priv, d->hwirq); + + writel(BIT(PIC_REG_BIT(bit)), + priv->base + PCH_PIC_CLR + PIC_REG_IDX(bit) * 4); + + irq_chip_unmask_parent(d); + pch_pic_bitclr(priv, PCH_PIC_MASK, bit); +} + +static int pch_pic_set_type(struct irq_data *d, unsigned int type) +{ + struct pch_pic *priv = irq_data_get_irq_chip_data(d); + int bit = hwirq_to_bit(priv, d->hwirq); + int ret = 0; + + switch (type) { + case IRQ_TYPE_EDGE_RISING: + pch_pic_bitset(priv, PCH_PIC_EDGE, bit); + pch_pic_bitclr(priv, PCH_PIC_POL, bit); + irq_set_handler_locked(d, handle_edge_irq); + break; + case IRQ_TYPE_EDGE_FALLING: + pch_pic_bitset(priv, PCH_PIC_EDGE, bit); + pch_pic_bitset(priv, PCH_PIC_POL, bit); + irq_set_handler_locked(d, handle_edge_irq); + break; + case IRQ_TYPE_LEVEL_HIGH: + pch_pic_bitclr(priv, PCH_PIC_EDGE, bit); + pch_pic_bitclr(priv, PCH_PIC_POL, bit); + irq_set_handler_locked(d, handle_level_irq); + break; + case IRQ_TYPE_LEVEL_LOW: + pch_pic_bitclr(priv, PCH_PIC_EDGE, bit); + pch_pic_bitset(priv, PCH_PIC_POL, bit); + irq_set_handler_locked(d, handle_level_irq); + break; + default: + ret = -EINVAL; + break; + } + + return ret; +} + +static void pch_pic_ack_irq(struct irq_data *d) +{ + unsigned int reg; + struct pch_pic *priv = irq_data_get_irq_chip_data(d); + int bit = hwirq_to_bit(priv, d->hwirq); + + reg = readl(priv->base + PCH_PIC_EDGE + PIC_REG_IDX(bit) * 4); + if (reg & BIT(PIC_REG_BIT(bit))) { + writel(BIT(PIC_REG_BIT(bit)), + priv->base + PCH_PIC_CLR + PIC_REG_IDX(bit) * 4); + } + irq_chip_ack_parent(d); +} + +static struct irq_chip pch_pic_irq_chip = { + .name = "PCH PIC", + .irq_mask = pch_pic_mask_irq, + .irq_unmask = pch_pic_unmask_irq, + .irq_ack = pch_pic_ack_irq, + .irq_set_affinity = irq_chip_set_affinity_parent, + .irq_set_type = pch_pic_set_type, + .flags = IRQCHIP_SKIP_SET_WAKE, +}; + +static int pch_pic_domain_translate(struct irq_domain *d, + struct irq_fwspec *fwspec, + unsigned long *hwirq, + unsigned int *type) +{ + struct pch_pic *priv = d->host_data; + struct device_node *of_node = to_of_node(fwspec->fwnode); + unsigned long flags; + int i; + + if (of_node) { + if (fwspec->param_count < 2) + return -EINVAL; + + *hwirq = fwspec->param[0]; + *type = fwspec->param[1] & IRQ_TYPE_SENSE_MASK; + } else { + if (fwspec->param_count < 1) + return -EINVAL; + + *hwirq = fwspec->param[0] - priv->gsi_base; + + if (fwspec->param_count > 1) + *type = fwspec->param[1] & IRQ_TYPE_SENSE_MASK; + else + *type = IRQ_TYPE_NONE; + } + + raw_spin_lock_irqsave(&priv->pic_lock, flags); + /* Check pic-table to confirm if the hwirq has been assigned */ + for (i = 0; i < priv->inuse; i++) { + if (priv->table[i] == *hwirq) { + *hwirq = i; + break; + } + } + if (i == priv->inuse) { + /* Assign a new hwirq in pic-table */ + if (priv->inuse >= PIC_COUNT) { + pr_err("pch-pic domain has no free vectors\n"); + raw_spin_unlock_irqrestore(&priv->pic_lock, flags); + return -EINVAL; + } + priv->table[priv->inuse] = *hwirq; + *hwirq = priv->inuse++; + } + raw_spin_unlock_irqrestore(&priv->pic_lock, flags); + + return 0; +} + +static int pch_pic_alloc(struct irq_domain *domain, unsigned int virq, + unsigned int nr_irqs, void *arg) +{ + int err; + unsigned int type; + unsigned long hwirq; + struct irq_fwspec *fwspec = arg; + struct irq_fwspec parent_fwspec; + struct pch_pic *priv = domain->host_data; + + err = pch_pic_domain_translate(domain, fwspec, &hwirq, &type); + if (err) + return err; + + /* Write vector ID */ + writeb(priv->ht_vec_base + hwirq, priv->base + PCH_INT_HTVEC(hwirq_to_bit(priv, hwirq))); + + parent_fwspec.fwnode = domain->parent->fwnode; + parent_fwspec.param_count = 1; + parent_fwspec.param[0] = hwirq + priv->ht_vec_base; + + err = irq_domain_alloc_irqs_parent(domain, virq, 1, &parent_fwspec); + if (err) + return err; + + irq_domain_set_info(domain, virq, hwirq, + &pch_pic_irq_chip, priv, + handle_level_irq, NULL, NULL); + irq_set_probe(virq); + + return 0; +} + +static const struct irq_domain_ops pch_pic_domain_ops = { + .translate = pch_pic_domain_translate, + .alloc = pch_pic_alloc, + .free = irq_domain_free_irqs_parent, +}; + +static void pch_pic_reset(struct pch_pic *priv) +{ + int i; + + for (i = 0; i < PIC_COUNT; i++) { + /* Write vector ID */ + writeb(priv->ht_vec_base + i, priv->base + PCH_INT_HTVEC(hwirq_to_bit(priv, i))); + /* Hardcode route to HT0 Lo */ + writeb(1, priv->base + PCH_INT_ROUTE(i)); + } + + for (i = 0; i < PIC_REG_COUNT; i++) { + /* Clear IRQ cause registers, mask all interrupts */ + writel_relaxed(0xFFFFFFFF, priv->base + PCH_PIC_MASK + 4 * i); + writel_relaxed(0xFFFFFFFF, priv->base + PCH_PIC_CLR + 4 * i); + /* Clear auto bounce, we don't need that */ + writel_relaxed(0, priv->base + PCH_PIC_AUTO0 + 4 * i); + writel_relaxed(0, priv->base + PCH_PIC_AUTO1 + 4 * i); + /* Enable HTMSI transformer */ + writel_relaxed(0xFFFFFFFF, priv->base + PCH_PIC_HTMSI_EN + 4 * i); + } +} + +static int pch_pic_suspend(void *data) +{ + int i, j; + + for (i = 0; i < nr_pics; i++) { + for (j = 0; j < PIC_REG_COUNT; j++) { + pch_pic_priv[i]->saved_vec_pol[j] = + readl(pch_pic_priv[i]->base + PCH_PIC_POL + 4 * j); + pch_pic_priv[i]->saved_vec_edge[j] = + readl(pch_pic_priv[i]->base + PCH_PIC_EDGE + 4 * j); + pch_pic_priv[i]->saved_vec_en[j] = + readl(pch_pic_priv[i]->base + PCH_PIC_MASK + 4 * j); + } + } + + return 0; +} + +static void pch_pic_resume(void *data) +{ + int i, j; + + for (i = 0; i < nr_pics; i++) { + pch_pic_reset(pch_pic_priv[i]); + for (j = 0; j < PIC_REG_COUNT; j++) { + writel(pch_pic_priv[i]->saved_vec_pol[j], + pch_pic_priv[i]->base + PCH_PIC_POL + 4 * j); + writel(pch_pic_priv[i]->saved_vec_edge[j], + pch_pic_priv[i]->base + PCH_PIC_EDGE + 4 * j); + writel(pch_pic_priv[i]->saved_vec_en[j], + pch_pic_priv[i]->base + PCH_PIC_MASK + 4 * j); + } + } +} + +static const struct syscore_ops pch_pic_syscore_ops = { + .suspend = pch_pic_suspend, + .resume = pch_pic_resume, +}; + +static struct syscore pch_pic_syscore = { + .ops = &pch_pic_syscore_ops, +}; + +static int pch_pic_init(phys_addr_t addr, unsigned long size, int vec_base, + struct irq_domain *parent_domain, struct fwnode_handle *domain_handle, + u32 gsi_base) +{ + struct pch_pic *priv; + int i; + + priv = kzalloc(sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + raw_spin_lock_init(&priv->pic_lock); + priv->base = ioremap(addr, size); + if (!priv->base) + goto free_priv; + + priv->inuse = 0; + for (i = 0; i < PIC_COUNT; i++) + priv->table[i] = PIC_UNDEF_VECTOR; + + priv->ht_vec_base = vec_base; + priv->vec_count = ((readq(priv->base) >> 48) & 0xff) + 1; + priv->gsi_base = gsi_base; + + priv->pic_domain = irq_domain_create_hierarchy(parent_domain, 0, + priv->vec_count, domain_handle, + &pch_pic_domain_ops, priv); + + if (!priv->pic_domain) { + pr_err("Failed to create IRQ domain\n"); + goto iounmap_base; + } + + pch_pic_reset(priv); + pch_pic_handle[nr_pics] = domain_handle; + pch_pic_priv[nr_pics++] = priv; + + if (nr_pics == 1) + register_syscore(&pch_pic_syscore); + + return 0; + +iounmap_base: + iounmap(priv->base); +free_priv: + kfree(priv); + + return -EINVAL; +} + +#ifdef CONFIG_OF + +static int pch_pic_of_init(struct device_node *node, + struct device_node *parent) +{ + int err, vec_base; + struct resource res; + struct irq_domain *parent_domain; + + if (of_address_to_resource(node, 0, &res)) + return -EINVAL; + + parent_domain = irq_find_host(parent); + if (!parent_domain) { + pr_err("Failed to find the parent domain\n"); + return -ENXIO; + } + + if (of_property_read_u32(node, "loongson,pic-base-vec", &vec_base)) { + pr_err("Failed to determine pic-base-vec\n"); + return -EINVAL; + } + + err = pch_pic_init(res.start, resource_size(&res), vec_base, + parent_domain, of_fwnode_handle(node), 0); + if (err < 0) + return err; + + return 0; +} + +IRQCHIP_DECLARE(pch_pic, "loongson,pch-pic-1.0", pch_pic_of_init); + +#endif + +#ifdef CONFIG_ACPI +int find_pch_pic(u32 gsi) +{ + int i; + + /* Find the PCH_PIC that manages this GSI. */ + for (i = 0; i < MAX_IO_PICS; i++) { + struct pch_pic *priv = pch_pic_priv[i]; + + if (!priv) + return -1; + + if (gsi >= priv->gsi_base && gsi < (priv->gsi_base + priv->vec_count)) + return i; + } + + pr_err("ERROR: Unable to locate PCH_PIC for GSI %d\n", gsi); + return -1; +} + +static int __init pch_lpc_parse_madt(union acpi_subtable_headers *header, + const unsigned long end) +{ + struct acpi_madt_lpc_pic *pchlpc_entry = (struct acpi_madt_lpc_pic *)header; + + return pch_lpc_acpi_init(pch_pic_priv[0]->pic_domain, pchlpc_entry); +} + +static int __init acpi_cascade_irqdomain_init(void) +{ + int r; + + r = acpi_table_parse_madt(ACPI_MADT_TYPE_LPC_PIC, pch_lpc_parse_madt, 0); + if (r < 0) + return r; + + return 0; +} + +int __init pch_pic_acpi_init(struct irq_domain *parent, + struct acpi_madt_bio_pic *acpi_pchpic) +{ + int ret; + struct fwnode_handle *domain_handle; + + if (find_pch_pic(acpi_pchpic->gsi_base) >= 0) + return 0; + + domain_handle = irq_domain_alloc_fwnode(&acpi_pchpic->address); + if (!domain_handle) { + pr_err("Unable to allocate domain handle\n"); + return -ENOMEM; + } + + ret = pch_pic_init(acpi_pchpic->address, acpi_pchpic->size, + 0, parent, domain_handle, acpi_pchpic->gsi_base); + + if (ret < 0) { + irq_domain_free_fwnode(domain_handle); + return ret; + } + + if (acpi_pchpic->id == 0) + ret = acpi_cascade_irqdomain_init(); + + return ret; +} +#endif diff --git a/drivers/irqchip/irq-loongson.h b/drivers/irqchip/irq-loongson.h new file mode 100644 index 000000000000..11fa138d1f44 --- /dev/null +++ b/drivers/irqchip/irq-loongson.h @@ -0,0 +1,27 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2024 Loongson Technology Corporation Limited + */ + +#ifndef _DRIVERS_IRQCHIP_IRQ_LOONGSON_H +#define _DRIVERS_IRQCHIP_IRQ_LOONGSON_H + +int find_pch_pic(u32 gsi); + +int liointc_acpi_init(struct irq_domain *parent, + struct acpi_madt_lio_pic *acpi_liointc); +int eiointc_acpi_init(struct irq_domain *parent, + struct acpi_madt_eio_pic *acpi_eiointc); +int avecintc_acpi_init(struct irq_domain *parent); + +int htvec_acpi_init(struct irq_domain *parent, + struct acpi_madt_ht_pic *acpi_htvec); +int pch_lpc_acpi_init(struct irq_domain *parent, + struct acpi_madt_lpc_pic *acpi_pchlpc); +int pch_pic_acpi_init(struct irq_domain *parent, + struct acpi_madt_bio_pic *acpi_pchpic); +int pch_msi_acpi_init(struct irq_domain *parent, + struct acpi_madt_msi_pic *acpi_pchmsi); +int pch_msi_acpi_init_avec(struct irq_domain *parent); + +#endif /* _DRIVERS_IRQCHIP_IRQ_LOONGSON_H */ diff --git a/drivers/irqchip/irq-lpc32xx.c b/drivers/irqchip/irq-lpc32xx.c index a48357d369b5..14cca44baa14 100644 --- a/drivers/irqchip/irq-lpc32xx.c +++ b/drivers/irqchip/irq-lpc32xx.c @@ -1,12 +1,6 @@ +// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright 2015-2016 Vladimir Zapolskiy <vz@mleia.com> - * - * The code contained herein is licensed under the GNU General Public - * License. You may obtain a copy of the GNU General Public License - * Version 2 or later at the following locations: - * - * http://www.opensource.org/licenses/gpl-license.html - * http://www.gnu.org/copyleft/gpl.html */ #define pr_fmt(fmt) "%s: " fmt, __func__ @@ -17,6 +11,7 @@ #include <linux/of_address.h> #include <linux/of_irq.h> #include <linux/of_platform.h> +#include <linux/seq_file.h> #include <linux/slab.h> #include <asm/exception.h> @@ -31,8 +26,8 @@ struct lpc32xx_irq_chip { void __iomem *base; + phys_addr_t addr; struct irq_domain *domain; - struct irq_chip chip; }; static struct lpc32xx_irq_chip *lpc32xx_mic_irqc; @@ -124,6 +119,24 @@ static int lpc32xx_irq_set_type(struct irq_data *d, unsigned int type) return 0; } +static void lpc32xx_irq_print_chip(struct irq_data *d, struct seq_file *p) +{ + struct lpc32xx_irq_chip *ic = irq_data_get_irq_chip_data(d); + + if (ic == lpc32xx_mic_irqc) + seq_printf(p, "%08x.mic", ic->addr); + else + seq_printf(p, "%08x.sic", ic->addr); +} + +static const struct irq_chip lpc32xx_chip = { + .irq_ack = lpc32xx_irq_ack, + .irq_mask = lpc32xx_irq_mask, + .irq_unmask = lpc32xx_irq_unmask, + .irq_set_type = lpc32xx_irq_set_type, + .irq_print_chip = lpc32xx_irq_print_chip, +}; + static void __exception_irq_entry lpc32xx_handle_irq(struct pt_regs *regs) { struct lpc32xx_irq_chip *ic = lpc32xx_mic_irqc; @@ -132,7 +145,7 @@ static void __exception_irq_entry lpc32xx_handle_irq(struct pt_regs *regs) while (hwirq) { irq = __ffs(hwirq); hwirq &= ~BIT(irq); - handle_domain_irq(lpc32xx_mic_irqc->domain, irq, regs); + generic_handle_domain_irq(lpc32xx_mic_irqc->domain, irq); } } @@ -147,7 +160,7 @@ static void lpc32xx_sic_handler(struct irq_desc *desc) while (hwirq) { irq = __ffs(hwirq); hwirq &= ~BIT(irq); - generic_handle_irq(irq_find_mapping(ic->domain, irq)); + generic_handle_domain_irq(ic->domain, irq); } chained_irq_exit(chip, desc); @@ -159,7 +172,7 @@ static int lpc32xx_irq_domain_map(struct irq_domain *id, unsigned int virq, struct lpc32xx_irq_chip *ic = id->host_data; irq_set_chip_data(virq, ic); - irq_set_chip_and_handler(virq, &ic->chip, handle_level_irq); + irq_set_chip_and_handler(virq, &lpc32xx_chip, handle_level_irq); irq_set_status_flags(virq, IRQ_LEVEL); irq_set_noprobe(virq); @@ -189,6 +202,7 @@ static int __init lpc32xx_of_ic_init(struct device_node *node, if (!irqc) return -ENOMEM; + irqc->addr = addr; irqc->base = of_iomap(node, 0); if (!irqc->base) { pr_err("%pOF: unable to map registers\n", node); @@ -196,21 +210,11 @@ static int __init lpc32xx_of_ic_init(struct device_node *node, return -EINVAL; } - irqc->chip.irq_ack = lpc32xx_irq_ack; - irqc->chip.irq_mask = lpc32xx_irq_mask; - irqc->chip.irq_unmask = lpc32xx_irq_unmask; - irqc->chip.irq_set_type = lpc32xx_irq_set_type; - if (is_mic) - irqc->chip.name = kasprintf(GFP_KERNEL, "%08x.mic", addr); - else - irqc->chip.name = kasprintf(GFP_KERNEL, "%08x.sic", addr); - - irqc->domain = irq_domain_add_linear(node, NR_LPC32XX_IC_IRQS, - &lpc32xx_irq_domain_ops, irqc); + irqc->domain = irq_domain_create_linear(of_fwnode_handle(node), NR_LPC32XX_IC_IRQS, + &lpc32xx_irq_domain_ops, irqc); if (!irqc->domain) { pr_err("unable to add irq domain\n"); iounmap(irqc->base); - kfree(irqc->chip.name); kfree(irqc); return -ENODEV; } diff --git a/drivers/irqchip/irq-ls-extirq.c b/drivers/irqchip/irq-ls-extirq.c new file mode 100644 index 000000000000..50a7b38381b9 --- /dev/null +++ b/drivers/irqchip/irq-ls-extirq.c @@ -0,0 +1,232 @@ +// SPDX-License-Identifier: GPL-2.0 + +#define pr_fmt(fmt) "irq-ls-extirq: " fmt + +#include <linux/irq.h> +#include <linux/irqchip.h> +#include <linux/irqdomain.h> +#include <linux/of.h> +#include <linux/of_address.h> +#include <linux/slab.h> + +#include <dt-bindings/interrupt-controller/arm-gic.h> + +#define MAXIRQ 12 +#define LS1021A_SCFGREVCR 0x200 + +struct ls_extirq_data { + void __iomem *intpcr; + raw_spinlock_t lock; + bool big_endian; + bool is_ls1021a_or_ls1043a; + u32 nirq; + struct irq_fwspec map[MAXIRQ]; +}; + +static void ls_extirq_intpcr_rmw(struct ls_extirq_data *priv, u32 mask, + u32 value) +{ + u32 intpcr; + + /* + * Serialize concurrent calls to ls_extirq_set_type() from multiple + * IRQ descriptors, making sure the read-modify-write is atomic. + */ + raw_spin_lock(&priv->lock); + + if (priv->big_endian) + intpcr = ioread32be(priv->intpcr); + else + intpcr = ioread32(priv->intpcr); + + intpcr &= ~mask; + intpcr |= value; + + if (priv->big_endian) + iowrite32be(intpcr, priv->intpcr); + else + iowrite32(intpcr, priv->intpcr); + + raw_spin_unlock(&priv->lock); +} + +static int +ls_extirq_set_type(struct irq_data *data, unsigned int type) +{ + struct ls_extirq_data *priv = data->chip_data; + irq_hw_number_t hwirq = data->hwirq; + u32 value, mask; + + if (priv->is_ls1021a_or_ls1043a) + mask = 1U << (31 - hwirq); + else + mask = 1U << hwirq; + + switch (type) { + case IRQ_TYPE_LEVEL_LOW: + type = IRQ_TYPE_LEVEL_HIGH; + value = mask; + break; + case IRQ_TYPE_EDGE_FALLING: + type = IRQ_TYPE_EDGE_RISING; + value = mask; + break; + case IRQ_TYPE_LEVEL_HIGH: + case IRQ_TYPE_EDGE_RISING: + value = 0; + break; + default: + return -EINVAL; + } + + ls_extirq_intpcr_rmw(priv, mask, value); + + return irq_chip_set_type_parent(data, type); +} + +static struct irq_chip ls_extirq_chip = { + .name = "ls-extirq", + .irq_mask = irq_chip_mask_parent, + .irq_unmask = irq_chip_unmask_parent, + .irq_eoi = irq_chip_eoi_parent, + .irq_set_type = ls_extirq_set_type, + .irq_retrigger = irq_chip_retrigger_hierarchy, + .irq_set_affinity = irq_chip_set_affinity_parent, + .flags = IRQCHIP_SET_TYPE_MASKED | IRQCHIP_SKIP_SET_WAKE, +}; + +static int +ls_extirq_domain_alloc(struct irq_domain *domain, unsigned int virq, + unsigned int nr_irqs, void *arg) +{ + struct ls_extirq_data *priv = domain->host_data; + struct irq_fwspec *fwspec = arg; + irq_hw_number_t hwirq; + + if (fwspec->param_count != 2) + return -EINVAL; + + hwirq = fwspec->param[0]; + if (hwirq >= priv->nirq) + return -EINVAL; + + irq_domain_set_hwirq_and_chip(domain, virq, hwirq, &ls_extirq_chip, + priv); + + return irq_domain_alloc_irqs_parent(domain, virq, 1, &priv->map[hwirq]); +} + +static const struct irq_domain_ops extirq_domain_ops = { + .xlate = irq_domain_xlate_twocell, + .alloc = ls_extirq_domain_alloc, + .free = irq_domain_free_irqs_common, +}; + +static int +ls_extirq_parse_map(struct ls_extirq_data *priv, struct device_node *node) +{ + const __be32 *map; + u32 mapsize; + int ret; + + map = of_get_property(node, "interrupt-map", &mapsize); + if (!map) + return -ENOENT; + if (mapsize % sizeof(*map)) + return -EINVAL; + mapsize /= sizeof(*map); + + while (mapsize) { + struct device_node *ipar; + u32 hwirq, intsize, j; + + if (mapsize < 3) + return -EINVAL; + hwirq = be32_to_cpup(map); + if (hwirq >= MAXIRQ) + return -EINVAL; + priv->nirq = max(priv->nirq, hwirq + 1); + + ipar = of_find_node_by_phandle(be32_to_cpup(map + 2)); + map += 3; + mapsize -= 3; + if (!ipar) + return -EINVAL; + priv->map[hwirq].fwnode = &ipar->fwnode; + ret = of_property_read_u32(ipar, "#interrupt-cells", &intsize); + if (ret) + return ret; + + if (intsize > mapsize) + return -EINVAL; + + priv->map[hwirq].param_count = intsize; + for (j = 0; j < intsize; ++j) + priv->map[hwirq].param[j] = be32_to_cpup(map++); + mapsize -= intsize; + } + return 0; +} + +static int __init +ls_extirq_of_init(struct device_node *node, struct device_node *parent) +{ + struct irq_domain *domain, *parent_domain; + struct ls_extirq_data *priv; + int ret; + + parent_domain = irq_find_host(parent); + if (!parent_domain) { + pr_err("Cannot find parent domain\n"); + ret = -ENODEV; + goto err_irq_find_host; + } + + priv = kzalloc(sizeof(*priv), GFP_KERNEL); + if (!priv) { + ret = -ENOMEM; + goto err_alloc_priv; + } + + /* + * All extirq OF nodes are under a scfg/syscon node with + * the 'ranges' property + */ + priv->intpcr = of_iomap(node, 0); + if (!priv->intpcr) { + pr_err("Cannot ioremap OF node %pOF\n", node); + ret = -ENOMEM; + goto err_iomap; + } + + ret = ls_extirq_parse_map(priv, node); + if (ret) + goto err_parse_map; + + priv->big_endian = of_device_is_big_endian(node->parent); + priv->is_ls1021a_or_ls1043a = of_device_is_compatible(node, "fsl,ls1021a-extirq") || + of_device_is_compatible(node, "fsl,ls1043a-extirq"); + raw_spin_lock_init(&priv->lock); + + domain = irq_domain_create_hierarchy(parent_domain, 0, priv->nirq, of_fwnode_handle(node), + &extirq_domain_ops, priv); + if (!domain) { + ret = -ENOMEM; + goto err_add_hierarchy; + } + + return 0; + +err_add_hierarchy: +err_parse_map: + iounmap(priv->intpcr); +err_iomap: + kfree(priv); +err_alloc_priv: +err_irq_find_host: + return ret; +} + +IRQCHIP_DECLARE(ls1021a_extirq, "fsl,ls1021a-extirq", ls_extirq_of_init); +IRQCHIP_DECLARE(ls1043a_extirq, "fsl,ls1043a-extirq", ls_extirq_of_init); +IRQCHIP_DECLARE(ls1088a_extirq, "fsl,ls1088a-extirq", ls_extirq_of_init); diff --git a/drivers/irqchip/irq-ls-scfg-msi.c b/drivers/irqchip/irq-ls-scfg-msi.c index c671b3212010..4910f364e568 100644 --- a/drivers/irqchip/irq-ls-scfg-msi.c +++ b/drivers/irqchip/irq-ls-scfg-msi.c @@ -1,27 +1,26 @@ +// SPDX-License-Identifier: GPL-2.0-only /* * Freescale SCFG MSI(-X) support * * Copyright (C) 2016 Freescale Semiconductor. * * Author: Minghuan Lian <Minghuan.Lian@nxp.com> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/msi.h> #include <linux/interrupt.h> +#include <linux/iommu.h> #include <linux/irq.h> #include <linux/irqchip/chained_irq.h> +#include <linux/irqchip/irq-msi-lib.h> #include <linux/irqdomain.h> #include <linux/of_irq.h> #include <linux/of_pci.h> -#include <linux/of_platform.h> +#include <linux/platform_device.h> +#include <linux/property.h> #include <linux/spinlock.h> -#include <linux/dma-iommu.h> #define MSI_IRQS_PER_MSIR 32 #define MSI_MSIR_OFFSET 4 @@ -49,7 +48,6 @@ struct ls_scfg_msi { spinlock_t lock; struct platform_device *pdev; struct irq_domain *parent; - struct irq_domain *msi_domain; void __iomem *regs; phys_addr_t msiir_addr; struct ls_scfg_msi_cfg *cfg; @@ -59,17 +57,18 @@ struct ls_scfg_msi { unsigned long *used; }; -static struct irq_chip ls_scfg_msi_irq_chip = { - .name = "MSI", - .irq_mask = pci_msi_mask_irq, - .irq_unmask = pci_msi_unmask_irq, -}; - -static struct msi_domain_info ls_scfg_msi_domain_info = { - .flags = (MSI_FLAG_USE_DEF_DOM_OPS | - MSI_FLAG_USE_DEF_CHIP_OPS | - MSI_FLAG_PCI_MSIX), - .chip = &ls_scfg_msi_irq_chip, +#define MPIC_MSI_FLAGS_REQUIRED (MSI_FLAG_USE_DEF_DOM_OPS | \ + MSI_FLAG_USE_DEF_CHIP_OPS) +#define MPIC_MSI_FLAGS_SUPPORTED (MSI_FLAG_PCI_MSIX | \ + MSI_GENERIC_FLAGS_MASK) + +static const struct msi_parent_ops ls_scfg_msi_parent_ops = { + .required_flags = MPIC_MSI_FLAGS_REQUIRED, + .supported_flags = MPIC_MSI_FLAGS_SUPPORTED, + .bus_select_token = DOMAIN_BUS_NEXUS, + .bus_select_mask = MATCH_PCI_MSI, + .prefix = "MSI-", + .init_dev_msi_info = msi_lib_init_dev_msi_info, }; static int msi_affinity_flag = 1; @@ -89,8 +88,6 @@ static void ls_scfg_msi_compose_msg(struct irq_data *data, struct msi_msg *msg) { struct ls_scfg_msi *msi_data = irq_data_get_irq_chip_data(data); - msg->address_hi = upper_32_bits(msi_data->msiir_addr); - msg->address_lo = lower_32_bits(msi_data->msiir_addr); msg->data = data->hwirq; if (msi_affinity_flag) { @@ -100,7 +97,8 @@ static void ls_scfg_msi_compose_msg(struct irq_data *data, struct msi_msg *msg) msg->data |= cpumask_first(mask); } - iommu_dma_map_msi_msg(data->irq, msg); + msi_msg_set_addr(irq_data_get_msi_desc(data), msg, + msi_data->msiir_addr); } static int ls_scfg_msi_set_affinity(struct irq_data *irq_data, @@ -141,6 +139,7 @@ static int ls_scfg_msi_domain_irq_alloc(struct irq_domain *domain, unsigned int nr_irqs, void *args) { + msi_alloc_info_t *info = args; struct ls_scfg_msi *msi_data = domain->host_data; int pos, err = 0; @@ -157,6 +156,10 @@ static int ls_scfg_msi_domain_irq_alloc(struct irq_domain *domain, if (err) return err; + err = iommu_dma_prepare_msi(info->desc, msi_data->msiir_addr); + if (err) + return err; + irq_domain_set_info(domain, virq, pos, &ls_scfg_msi_parent_chip, msi_data, handle_simple_irq, NULL, NULL); @@ -183,6 +186,7 @@ static void ls_scfg_msi_domain_irq_free(struct irq_domain *domain, } static const struct irq_domain_ops ls_scfg_msi_domain_ops = { + .select = msi_lib_irq_domain_select, .alloc = ls_scfg_msi_domain_irq_alloc, .free = ls_scfg_msi_domain_irq_free, }; @@ -192,7 +196,7 @@ static void ls_scfg_msi_irq_handler(struct irq_desc *desc) struct ls_scfg_msir *msir = irq_desc_get_handler_data(desc); struct ls_scfg_msi *msi_data = msir->msi_data; unsigned long val; - int pos, size, virq, hwirq; + int pos, size, hwirq; chained_irq_enter(irq_desc_get_chip(desc), desc); @@ -204,9 +208,7 @@ static void ls_scfg_msi_irq_handler(struct irq_desc *desc) for_each_set_bit_from(pos, &val, size) { hwirq = ((msir->bit_end - pos) << msi_data->cfg->ibs_shift) | msir->srs; - virq = irq_find_mapping(msi_data->parent, hwirq); - if (virq) - generic_handle_irq(virq); + generic_handle_domain_irq(msi_data->parent, hwirq); } chained_irq_exit(irq_desc_get_chip(desc), desc); @@ -214,23 +216,16 @@ static void ls_scfg_msi_irq_handler(struct irq_desc *desc) static int ls_scfg_msi_domains_init(struct ls_scfg_msi *msi_data) { - /* Initialize MSI domain parent */ - msi_data->parent = irq_domain_add_linear(NULL, - msi_data->irqs_num, - &ls_scfg_msi_domain_ops, - msi_data); + struct irq_domain_info info = { + .fwnode = of_fwnode_handle(msi_data->pdev->dev.of_node), + .ops = &ls_scfg_msi_domain_ops, + .host_data = msi_data, + .size = msi_data->irqs_num, + }; + + msi_data->parent = msi_create_parent_irq_domain(&info, &ls_scfg_msi_parent_ops); if (!msi_data->parent) { - dev_err(&msi_data->pdev->dev, "failed to create IRQ domain\n"); - return -ENOMEM; - } - - msi_data->msi_domain = pci_msi_create_irq_domain( - of_node_to_fwnode(msi_data->pdev->dev.of_node), - &ls_scfg_msi_domain_info, - msi_data->parent); - if (!msi_data->msi_domain) { dev_err(&msi_data->pdev->dev, "failed to create MSI domain\n"); - irq_domain_remove(msi_data->parent); return -ENOMEM; } @@ -334,23 +329,19 @@ MODULE_DEVICE_TABLE(of, ls_scfg_msi_id); static int ls_scfg_msi_probe(struct platform_device *pdev) { - const struct of_device_id *match; struct ls_scfg_msi *msi_data; struct resource *res; int i, ret; - match = of_match_device(ls_scfg_msi_id, &pdev->dev); - if (!match) - return -ENODEV; - msi_data = devm_kzalloc(&pdev->dev, sizeof(*msi_data), GFP_KERNEL); if (!msi_data) return -ENOMEM; - msi_data->cfg = (struct ls_scfg_msi_cfg *) match->data; + msi_data->cfg = (struct ls_scfg_msi_cfg *)device_get_match_data(&pdev->dev); + if (!msi_data->cfg) + return -ENODEV; - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - msi_data->regs = devm_ioremap_resource(&pdev->dev, res); + msi_data->regs = devm_platform_get_and_ioremap_resource(pdev, 0, &res); if (IS_ERR(msi_data->regs)) { dev_err(&pdev->dev, "failed to initialize 'regs'\n"); return PTR_ERR(msi_data->regs); @@ -362,10 +353,7 @@ static int ls_scfg_msi_probe(struct platform_device *pdev) msi_data->irqs_num = MSI_IRQS_PER_MSIR * (1 << msi_data->cfg->ibs_shift); - msi_data->used = devm_kcalloc(&pdev->dev, - BITS_TO_LONGS(msi_data->irqs_num), - sizeof(*msi_data->used), - GFP_KERNEL); + msi_data->used = devm_bitmap_zalloc(&pdev->dev, msi_data->irqs_num, GFP_KERNEL); if (!msi_data->used) return -ENOMEM; /* @@ -404,7 +392,7 @@ static int ls_scfg_msi_probe(struct platform_device *pdev) return 0; } -static int ls_scfg_msi_remove(struct platform_device *pdev) +static void ls_scfg_msi_remove(struct platform_device *pdev) { struct ls_scfg_msi *msi_data = platform_get_drvdata(pdev); int i; @@ -412,25 +400,21 @@ static int ls_scfg_msi_remove(struct platform_device *pdev) for (i = 0; i < msi_data->msir_num; i++) ls_scfg_msi_teardown_hwirq(&msi_data->msir[i]); - irq_domain_remove(msi_data->msi_domain); irq_domain_remove(msi_data->parent); platform_set_drvdata(pdev, NULL); - - return 0; } static struct platform_driver ls_scfg_msi_driver = { .driver = { - .name = "ls-scfg-msi", - .of_match_table = ls_scfg_msi_id, + .name = "ls-scfg-msi", + .of_match_table = ls_scfg_msi_id, }, - .probe = ls_scfg_msi_probe, - .remove = ls_scfg_msi_remove, + .probe = ls_scfg_msi_probe, + .remove = ls_scfg_msi_remove, }; module_platform_driver(ls_scfg_msi_driver); MODULE_AUTHOR("Minghuan Lian <Minghuan.Lian@nxp.com>"); MODULE_DESCRIPTION("Freescale Layerscape SCFG MSI controller driver"); -MODULE_LICENSE("GPL v2"); diff --git a/drivers/irqchip/irq-ls1x.c b/drivers/irqchip/irq-ls1x.c new file mode 100644 index 000000000000..589d32007fca --- /dev/null +++ b/drivers/irqchip/irq-ls1x.c @@ -0,0 +1,193 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2019, Jiaxun Yang <jiaxun.yang@flygoat.com> + * Loongson-1 platform IRQ support + */ + +#include <linux/errno.h> +#include <linux/init.h> +#include <linux/types.h> +#include <linux/interrupt.h> +#include <linux/ioport.h> +#include <linux/irqchip.h> +#include <linux/of_address.h> +#include <linux/of_irq.h> +#include <linux/io.h> +#include <linux/irqchip/chained_irq.h> + +#define LS_REG_INTC_STATUS 0x00 +#define LS_REG_INTC_EN 0x04 +#define LS_REG_INTC_SET 0x08 +#define LS_REG_INTC_CLR 0x0c +#define LS_REG_INTC_POL 0x10 +#define LS_REG_INTC_EDGE 0x14 + +/** + * struct ls1x_intc_priv - private ls1x-intc data. + * @domain: IRQ domain. + * @intc_base: IO Base of intc registers. + */ + +struct ls1x_intc_priv { + struct irq_domain *domain; + void __iomem *intc_base; +}; + + +static void ls1x_chained_handle_irq(struct irq_desc *desc) +{ + struct ls1x_intc_priv *priv = irq_desc_get_handler_data(desc); + struct irq_chip *chip = irq_desc_get_chip(desc); + u32 pending; + + chained_irq_enter(chip, desc); + pending = readl(priv->intc_base + LS_REG_INTC_STATUS) & + readl(priv->intc_base + LS_REG_INTC_EN); + + if (!pending) + spurious_interrupt(); + + while (pending) { + int bit = __ffs(pending); + + generic_handle_domain_irq(priv->domain, bit); + pending &= ~BIT(bit); + } + + chained_irq_exit(chip, desc); +} + +static void ls_intc_set_bit(struct irq_chip_generic *gc, + unsigned int offset, + u32 mask, bool set) +{ + if (set) + writel(readl(gc->reg_base + offset) | mask, + gc->reg_base + offset); + else + writel(readl(gc->reg_base + offset) & ~mask, + gc->reg_base + offset); +} + +static int ls_intc_set_type(struct irq_data *data, unsigned int type) +{ + struct irq_chip_generic *gc = irq_data_get_irq_chip_data(data); + u32 mask = data->mask; + + switch (type) { + case IRQ_TYPE_LEVEL_HIGH: + ls_intc_set_bit(gc, LS_REG_INTC_EDGE, mask, false); + ls_intc_set_bit(gc, LS_REG_INTC_POL, mask, true); + break; + case IRQ_TYPE_LEVEL_LOW: + ls_intc_set_bit(gc, LS_REG_INTC_EDGE, mask, false); + ls_intc_set_bit(gc, LS_REG_INTC_POL, mask, false); + break; + case IRQ_TYPE_EDGE_RISING: + ls_intc_set_bit(gc, LS_REG_INTC_EDGE, mask, true); + ls_intc_set_bit(gc, LS_REG_INTC_POL, mask, true); + break; + case IRQ_TYPE_EDGE_FALLING: + ls_intc_set_bit(gc, LS_REG_INTC_EDGE, mask, true); + ls_intc_set_bit(gc, LS_REG_INTC_POL, mask, false); + break; + default: + return -EINVAL; + } + + irqd_set_trigger_type(data, type); + return irq_setup_alt_chip(data, type); +} + + +static int __init ls1x_intc_of_init(struct device_node *node, + struct device_node *parent) +{ + struct irq_chip_generic *gc; + struct irq_chip_type *ct; + struct ls1x_intc_priv *priv; + int parent_irq, err = 0; + + priv = kzalloc(sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + priv->intc_base = of_iomap(node, 0); + if (!priv->intc_base) { + err = -ENODEV; + goto out_free_priv; + } + + parent_irq = irq_of_parse_and_map(node, 0); + if (!parent_irq) { + pr_err("ls1x-irq: unable to get parent irq\n"); + err = -ENODEV; + goto out_iounmap; + } + + /* Set up an IRQ domain */ + priv->domain = irq_domain_create_linear(of_fwnode_handle(node), 32, &irq_generic_chip_ops, + NULL); + if (!priv->domain) { + pr_err("ls1x-irq: cannot add IRQ domain\n"); + err = -ENOMEM; + goto out_iounmap; + } + + err = irq_alloc_domain_generic_chips(priv->domain, 32, 2, + node->full_name, handle_level_irq, + IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN, 0, + IRQ_GC_INIT_MASK_CACHE); + if (err) { + pr_err("ls1x-irq: unable to register IRQ domain\n"); + goto out_free_domain; + } + + /* Mask all irqs */ + writel(0x0, priv->intc_base + LS_REG_INTC_EN); + + /* Ack all irqs */ + writel(0xffffffff, priv->intc_base + LS_REG_INTC_CLR); + + /* Set all irqs to high level triggered */ + writel(0xffffffff, priv->intc_base + LS_REG_INTC_POL); + + gc = irq_get_domain_generic_chip(priv->domain, 0); + + gc->reg_base = priv->intc_base; + + ct = gc->chip_types; + ct[0].type = IRQ_TYPE_LEVEL_MASK; + ct[0].regs.mask = LS_REG_INTC_EN; + ct[0].regs.ack = LS_REG_INTC_CLR; + ct[0].chip.irq_unmask = irq_gc_mask_set_bit; + ct[0].chip.irq_mask = irq_gc_mask_clr_bit; + ct[0].chip.irq_ack = irq_gc_ack_set_bit; + ct[0].chip.irq_set_type = ls_intc_set_type; + ct[0].handler = handle_level_irq; + + ct[1].type = IRQ_TYPE_EDGE_BOTH; + ct[1].regs.mask = LS_REG_INTC_EN; + ct[1].regs.ack = LS_REG_INTC_CLR; + ct[1].chip.irq_unmask = irq_gc_mask_set_bit; + ct[1].chip.irq_mask = irq_gc_mask_clr_bit; + ct[1].chip.irq_ack = irq_gc_ack_set_bit; + ct[1].chip.irq_set_type = ls_intc_set_type; + ct[1].handler = handle_edge_irq; + + irq_set_chained_handler_and_data(parent_irq, + ls1x_chained_handle_irq, priv); + + return 0; + +out_free_domain: + irq_domain_remove(priv->domain); +out_iounmap: + iounmap(priv->intc_base); +out_free_priv: + kfree(priv); + + return err; +} + +IRQCHIP_DECLARE(ls1x_intc, "loongson,ls1x-intc", ls1x_intc_of_init); diff --git a/drivers/irqchip/irq-madera.c b/drivers/irqchip/irq-madera.c index e9256dee1a45..b32982c11515 100644 --- a/drivers/irqchip/irq-madera.c +++ b/drivers/irqchip/irq-madera.c @@ -7,17 +7,13 @@ */ #include <linux/module.h> -#include <linux/gpio.h> #include <linux/interrupt.h> #include <linux/irq.h> #include <linux/irqdomain.h> +#include <linux/platform_device.h> #include <linux/pm_runtime.h> #include <linux/regmap.h> #include <linux/slab.h> -#include <linux/of.h> -#include <linux/of_device.h> -#include <linux/of_gpio.h> -#include <linux/of_irq.h> #include <linux/irqchip/irq-madera.h> #include <linux/mfd/madera/core.h> #include <linux/mfd/madera/pdata.h> @@ -226,7 +222,7 @@ static int madera_irq_probe(struct platform_device *pdev) return 0; } -static int madera_irq_remove(struct platform_device *pdev) +static void madera_irq_remove(struct platform_device *pdev) { struct madera *madera = dev_get_drvdata(pdev->dev.parent); @@ -236,13 +232,11 @@ static int madera_irq_remove(struct platform_device *pdev) */ madera->irq_dev = NULL; regmap_del_irq_chip(madera->irq, madera->irq_data); - - return 0; } static struct platform_driver madera_irq_driver = { - .probe = &madera_irq_probe, - .remove = &madera_irq_remove, + .probe = madera_irq_probe, + .remove = madera_irq_remove, .driver = { .name = "madera-irq", .pm = &madera_irq_pm_ops, diff --git a/drivers/irqchip/irq-mbigen.c b/drivers/irqchip/irq-mbigen.c index 567b29c47608..6f69f4e5dbac 100644 --- a/drivers/irqchip/irq-mbigen.c +++ b/drivers/irqchip/irq-mbigen.c @@ -1,19 +1,8 @@ +// SPDX-License-Identifier: GPL-2.0-only /* - * Copyright (C) 2015 Hisilicon Limited, All Rights Reserved. + * Copyright (C) 2015 HiSilicon Limited, All Rights Reserved. * Author: Jun Ma <majun258@huawei.com> * Author: Yun Wu <wuyun.wu@huawei.com> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see <http://www.gnu.org/licenses/>. */ #include <linux/acpi.h> @@ -36,7 +25,7 @@ /* The maximum IRQ pin number of mbigen chip(start from 0) */ #define MAXIMUM_IRQ_PIN_NUM 1407 -/** +/* * In mbigen vector register * bit[21:12]: event id value * bit[11:0]: device id @@ -50,14 +39,14 @@ /* offset of vector register in mbigen node */ #define REG_MBIGEN_VEC_OFFSET 0x200 -/** +/* * offset of clear register in mbigen node * This register is used to clear the status * of interrupt */ #define REG_MBIGEN_CLEAR_OFFSET 0xa000 -/** +/* * offset of interrupt type register * This register is used to configure interrupt * trigger type @@ -75,6 +64,20 @@ struct mbigen_device { void __iomem *base; }; +static inline unsigned int get_mbigen_node_offset(unsigned int nid) +{ + unsigned int offset = nid * MBIGEN_NODE_OFFSET; + + /* + * To avoid touched clear register in unexpected way, we need to directly + * skip clear register when access to more than 10 mbigen nodes. + */ + if (nid >= (REG_MBIGEN_CLEAR_OFFSET / MBIGEN_NODE_OFFSET)) + offset += MBIGEN_NODE_OFFSET; + + return offset; +} + static inline unsigned int get_mbigen_vec_reg(irq_hw_number_t hwirq) { unsigned int nid, pin; @@ -83,8 +86,7 @@ static inline unsigned int get_mbigen_vec_reg(irq_hw_number_t hwirq) nid = hwirq / IRQS_PER_MBIGEN_NODE + 1; pin = hwirq % IRQS_PER_MBIGEN_NODE; - return pin * 4 + nid * MBIGEN_NODE_OFFSET - + REG_MBIGEN_VEC_OFFSET; + return pin * 4 + get_mbigen_node_offset(nid) + REG_MBIGEN_VEC_OFFSET; } static inline void get_mbigen_type_reg(irq_hw_number_t hwirq, @@ -99,8 +101,7 @@ static inline void get_mbigen_type_reg(irq_hw_number_t hwirq, *mask = 1 << (irq_ofst % 32); ofst = irq_ofst / 32 * 4; - *addr = ofst + nid * MBIGEN_NODE_OFFSET - + REG_MBIGEN_TYPE_OFFSET; + *addr = ofst + get_mbigen_node_offset(nid) + REG_MBIGEN_TYPE_OFFSET; } static inline void get_mbigen_clear_reg(irq_hw_number_t hwirq, @@ -146,21 +147,14 @@ static int mbigen_set_type(struct irq_data *data, unsigned int type) return 0; } -static struct irq_chip mbigen_irq_chip = { - .name = "mbigen-v2", - .irq_mask = irq_chip_mask_parent, - .irq_unmask = irq_chip_unmask_parent, - .irq_eoi = mbigen_eoi_irq, - .irq_set_type = mbigen_set_type, - .irq_set_affinity = irq_chip_set_affinity_parent, -}; - -static void mbigen_write_msg(struct msi_desc *desc, struct msi_msg *msg) +static void mbigen_write_msi_msg(struct irq_data *d, struct msi_msg *msg) { - struct irq_data *d = irq_get_irq_data(desc->irq); void __iomem *base = d->chip_data; u32 val; + if (!msg->address_lo && !msg->address_hi) + return; + base += get_mbigen_vec_reg(d->hwirq); val = readl_relaxed(base); @@ -173,10 +167,8 @@ static void mbigen_write_msg(struct msi_desc *desc, struct msi_msg *msg) writel_relaxed(val, base); } -static int mbigen_domain_translate(struct irq_domain *d, - struct irq_fwspec *fwspec, - unsigned long *hwirq, - unsigned int *type) +static int mbigen_domain_translate(struct irq_domain *d, struct irq_fwspec *fwspec, + unsigned long *hwirq, unsigned int *type) { if (is_of_node(fwspec->fwnode) || is_acpi_device_node(fwspec->fwnode)) { if (fwspec->param_count != 2) @@ -200,55 +192,55 @@ static int mbigen_domain_translate(struct irq_domain *d, return -EINVAL; } -static int mbigen_irq_domain_alloc(struct irq_domain *domain, - unsigned int virq, - unsigned int nr_irqs, - void *args) +static void mbigen_domain_set_desc(msi_alloc_info_t *arg, struct msi_desc *desc) { - struct irq_fwspec *fwspec = args; - irq_hw_number_t hwirq; - unsigned int type; - struct mbigen_device *mgn_chip; - int i, err; + arg->desc = desc; + arg->hwirq = (u32)desc->data.icookie.value; +} - err = mbigen_domain_translate(domain, fwspec, &hwirq, &type); - if (err) - return err; +static const struct msi_domain_template mbigen_msi_template = { + .chip = { + .name = "mbigen-v2", + .irq_mask = irq_chip_mask_parent, + .irq_unmask = irq_chip_unmask_parent, + .irq_eoi = mbigen_eoi_irq, + .irq_set_type = mbigen_set_type, + .irq_write_msi_msg = mbigen_write_msi_msg, + }, - err = platform_msi_domain_alloc(domain, virq, nr_irqs); - if (err) - return err; + .ops = { + .set_desc = mbigen_domain_set_desc, + .msi_translate = mbigen_domain_translate, + }, - mgn_chip = platform_msi_get_host_data(domain); + .info = { + .bus_token = DOMAIN_BUS_WIRED_TO_MSI, + .flags = MSI_FLAG_USE_DEV_FWNODE, + }, +}; - for (i = 0; i < nr_irqs; i++) - irq_domain_set_hwirq_and_chip(domain, virq + i, hwirq + i, - &mbigen_irq_chip, mgn_chip->base); +static bool mbigen_create_device_domain(struct device *dev, unsigned int size, + struct mbigen_device *mgn_chip) +{ + if (WARN_ON_ONCE(!dev->msi.domain)) + return false; - return 0; + return msi_create_device_irq_domain(dev, MSI_DEFAULT_DOMAIN, + &mbigen_msi_template, size, + NULL, mgn_chip->base); } -static const struct irq_domain_ops mbigen_domain_ops = { - .translate = mbigen_domain_translate, - .alloc = mbigen_irq_domain_alloc, - .free = irq_domain_free_irqs_common, -}; - static int mbigen_of_create_domain(struct platform_device *pdev, struct mbigen_device *mgn_chip) { - struct device *parent; struct platform_device *child; - struct irq_domain *domain; - struct device_node *np; u32 num_pins; - for_each_child_of_node(pdev->dev.of_node, np) { + for_each_child_of_node_scoped(pdev->dev.of_node, np) { if (!of_property_read_bool(np, "interrupt-controller")) continue; - parent = platform_bus_type.dev_root; - child = of_platform_device_create(np, NULL, parent); + child = of_platform_device_create(np, NULL, NULL); if (!child) return -ENOMEM; @@ -258,11 +250,7 @@ static int mbigen_of_create_domain(struct platform_device *pdev, return -EINVAL; } - domain = platform_msi_create_device_domain(&child->dev, num_pins, - mbigen_write_msg, - &mbigen_domain_ops, - mgn_chip); - if (!domain) + if (!mbigen_create_device_domain(&child->dev, num_pins, mgn_chip)) return -ENOMEM; } @@ -270,10 +258,15 @@ static int mbigen_of_create_domain(struct platform_device *pdev, } #ifdef CONFIG_ACPI +static const struct acpi_device_id mbigen_acpi_match[] = { + { "HISI0152", 0 }, + {} +}; +MODULE_DEVICE_TABLE(acpi, mbigen_acpi_match); + static int mbigen_acpi_create_domain(struct platform_device *pdev, struct mbigen_device *mgn_chip) { - struct irq_domain *domain; u32 num_pins = 0; int ret; @@ -304,11 +297,7 @@ static int mbigen_acpi_create_domain(struct platform_device *pdev, if (ret || num_pins == 0) return -EINVAL; - domain = platform_msi_create_device_domain(&pdev->dev, num_pins, - mbigen_write_msg, - &mbigen_domain_ops, - mgn_chip); - if (!domain) + if (!mbigen_create_device_domain(&pdev->dev, num_pins, mgn_chip)) return -ENOMEM; return 0; @@ -352,8 +341,7 @@ static int mbigen_device_probe(struct platform_device *pdev) err = -EINVAL; if (err) { - dev_err(&pdev->dev, "Failed to create mbi-gen@%p irqdomain", - mgn_chip->base); + dev_err(&pdev->dev, "Failed to create mbi-gen irqdomain\n"); return err; } @@ -367,17 +355,12 @@ static const struct of_device_id mbigen_of_match[] = { }; MODULE_DEVICE_TABLE(of, mbigen_of_match); -static const struct acpi_device_id mbigen_acpi_match[] = { - { "HISI0152", 0 }, - {} -}; -MODULE_DEVICE_TABLE(acpi, mbigen_acpi_match); - static struct platform_driver mbigen_platform_driver = { .driver = { .name = "Hisilicon MBIGEN-V2", .of_match_table = mbigen_of_match, .acpi_match_table = ACPI_PTR(mbigen_acpi_match), + .suppress_bind_attrs = true, }, .probe = mbigen_device_probe, }; @@ -386,5 +369,4 @@ module_platform_driver(mbigen_platform_driver); MODULE_AUTHOR("Jun Ma <majun258@huawei.com>"); MODULE_AUTHOR("Yun Wu <wuyun.wu@huawei.com>"); -MODULE_LICENSE("GPL"); -MODULE_DESCRIPTION("Hisilicon MBI Generator driver"); +MODULE_DESCRIPTION("HiSilicon MBI Generator driver"); diff --git a/drivers/irqchip/irq-mchp-eic.c b/drivers/irqchip/irq-mchp-eic.c new file mode 100644 index 000000000000..2474fa467a05 --- /dev/null +++ b/drivers/irqchip/irq-mchp-eic.c @@ -0,0 +1,285 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Microchip External Interrupt Controller driver + * + * Copyright (C) 2021 Microchip Technology Inc. and its subsidiaries + * + * Author: Claudiu Beznea <claudiu.beznea@microchip.com> + */ +#include <linux/clk.h> +#include <linux/delay.h> +#include <linux/interrupt.h> +#include <linux/irqchip.h> +#include <linux/of_address.h> +#include <linux/of_irq.h> +#include <linux/syscore_ops.h> + +#include <dt-bindings/interrupt-controller/arm-gic.h> + +#define MCHP_EIC_GFCS (0x0) +#define MCHP_EIC_SCFG(x) (0x4 + (x) * 0x4) +#define MCHP_EIC_SCFG_EN BIT(16) +#define MCHP_EIC_SCFG_LVL BIT(9) +#define MCHP_EIC_SCFG_POL BIT(8) + +#define MCHP_EIC_NIRQ (2) + +/* + * struct mchp_eic - EIC private data structure + * @base: base address + * @clk: peripheral clock + * @domain: irq domain + * @irqs: irqs b/w eic and gic + * @scfg: backup for scfg registers (necessary for backup and self-refresh mode) + * @wakeup_source: wakeup source mask + */ +struct mchp_eic { + void __iomem *base; + struct clk *clk; + struct irq_domain *domain; + u32 irqs[MCHP_EIC_NIRQ]; + u32 scfg[MCHP_EIC_NIRQ]; + u32 wakeup_source; +}; + +static struct mchp_eic *eic; + +static void mchp_eic_irq_mask(struct irq_data *d) +{ + unsigned int tmp; + + tmp = readl_relaxed(eic->base + MCHP_EIC_SCFG(d->hwirq)); + tmp &= ~MCHP_EIC_SCFG_EN; + writel_relaxed(tmp, eic->base + MCHP_EIC_SCFG(d->hwirq)); + + irq_chip_mask_parent(d); +} + +static void mchp_eic_irq_unmask(struct irq_data *d) +{ + unsigned int tmp; + + tmp = readl_relaxed(eic->base + MCHP_EIC_SCFG(d->hwirq)); + tmp |= MCHP_EIC_SCFG_EN; + writel_relaxed(tmp, eic->base + MCHP_EIC_SCFG(d->hwirq)); + + irq_chip_unmask_parent(d); +} + +static int mchp_eic_irq_set_type(struct irq_data *d, unsigned int type) +{ + unsigned int parent_irq_type; + unsigned int tmp; + + tmp = readl_relaxed(eic->base + MCHP_EIC_SCFG(d->hwirq)); + tmp &= ~(MCHP_EIC_SCFG_POL | MCHP_EIC_SCFG_LVL); + switch (type) { + case IRQ_TYPE_LEVEL_HIGH: + tmp |= MCHP_EIC_SCFG_POL | MCHP_EIC_SCFG_LVL; + parent_irq_type = IRQ_TYPE_LEVEL_HIGH; + break; + case IRQ_TYPE_LEVEL_LOW: + tmp |= MCHP_EIC_SCFG_LVL; + parent_irq_type = IRQ_TYPE_LEVEL_HIGH; + break; + case IRQ_TYPE_EDGE_RISING: + parent_irq_type = IRQ_TYPE_EDGE_RISING; + break; + case IRQ_TYPE_EDGE_FALLING: + tmp |= MCHP_EIC_SCFG_POL; + parent_irq_type = IRQ_TYPE_EDGE_RISING; + break; + default: + return -EINVAL; + } + + writel_relaxed(tmp, eic->base + MCHP_EIC_SCFG(d->hwirq)); + + return irq_chip_set_type_parent(d, parent_irq_type); +} + +static int mchp_eic_irq_set_wake(struct irq_data *d, unsigned int on) +{ + irq_set_irq_wake(eic->irqs[d->hwirq], on); + if (on) + eic->wakeup_source |= BIT(d->hwirq); + else + eic->wakeup_source &= ~BIT(d->hwirq); + + return 0; +} + +static int mchp_eic_irq_suspend(void *data) +{ + unsigned int hwirq; + + for (hwirq = 0; hwirq < MCHP_EIC_NIRQ; hwirq++) + eic->scfg[hwirq] = readl_relaxed(eic->base + + MCHP_EIC_SCFG(hwirq)); + + if (!eic->wakeup_source) + clk_disable_unprepare(eic->clk); + + return 0; +} + +static void mchp_eic_irq_resume(void *data) +{ + unsigned int hwirq; + + if (!eic->wakeup_source) + clk_prepare_enable(eic->clk); + + for (hwirq = 0; hwirq < MCHP_EIC_NIRQ; hwirq++) + writel_relaxed(eic->scfg[hwirq], eic->base + + MCHP_EIC_SCFG(hwirq)); +} + +static const struct syscore_ops mchp_eic_syscore_ops = { + .suspend = mchp_eic_irq_suspend, + .resume = mchp_eic_irq_resume, +}; + +static struct syscore mchp_eic_syscore = { + .ops = &mchp_eic_syscore_ops, +}; + +static struct irq_chip mchp_eic_chip = { + .name = "eic", + .flags = IRQCHIP_MASK_ON_SUSPEND | IRQCHIP_SET_TYPE_MASKED, + .irq_mask = mchp_eic_irq_mask, + .irq_unmask = mchp_eic_irq_unmask, + .irq_set_type = mchp_eic_irq_set_type, + .irq_ack = irq_chip_ack_parent, + .irq_eoi = irq_chip_eoi_parent, + .irq_retrigger = irq_chip_retrigger_hierarchy, + .irq_set_wake = mchp_eic_irq_set_wake, +}; + +static int mchp_eic_domain_alloc(struct irq_domain *domain, unsigned int virq, + unsigned int nr_irqs, void *data) +{ + struct irq_fwspec *fwspec = data; + struct irq_fwspec parent_fwspec; + irq_hw_number_t hwirq; + unsigned int type; + int ret; + + if (WARN_ON(nr_irqs != 1)) + return -EINVAL; + + ret = irq_domain_translate_twocell(domain, fwspec, &hwirq, &type); + if (ret || hwirq >= MCHP_EIC_NIRQ) + return ret; + + switch (type) { + case IRQ_TYPE_EDGE_RISING: + case IRQ_TYPE_LEVEL_HIGH: + break; + case IRQ_TYPE_EDGE_FALLING: + type = IRQ_TYPE_EDGE_RISING; + break; + case IRQ_TYPE_LEVEL_LOW: + type = IRQ_TYPE_LEVEL_HIGH; + break; + default: + return -EINVAL; + } + + irq_domain_set_hwirq_and_chip(domain, virq, hwirq, &mchp_eic_chip, eic); + + parent_fwspec.fwnode = domain->parent->fwnode; + parent_fwspec.param_count = 3; + parent_fwspec.param[0] = GIC_SPI; + parent_fwspec.param[1] = eic->irqs[hwirq]; + parent_fwspec.param[2] = type; + + return irq_domain_alloc_irqs_parent(domain, virq, 1, &parent_fwspec); +} + +static const struct irq_domain_ops mchp_eic_domain_ops = { + .translate = irq_domain_translate_twocell, + .alloc = mchp_eic_domain_alloc, + .free = irq_domain_free_irqs_common, +}; + +static int mchp_eic_probe(struct platform_device *pdev, struct device_node *parent) +{ + struct device_node *node = pdev->dev.of_node; + struct irq_domain *parent_domain = NULL; + int ret, i; + + eic = kzalloc(sizeof(*eic), GFP_KERNEL); + if (!eic) + return -ENOMEM; + + eic->base = of_iomap(node, 0); + if (!eic->base) { + ret = -ENOMEM; + goto free; + } + + parent_domain = irq_find_host(parent); + if (!parent_domain) { + ret = -ENODEV; + goto unmap; + } + + eic->clk = of_clk_get_by_name(node, "pclk"); + if (IS_ERR(eic->clk)) { + ret = PTR_ERR(eic->clk); + goto unmap; + } + + ret = clk_prepare_enable(eic->clk); + if (ret) + goto unmap; + + for (i = 0; i < MCHP_EIC_NIRQ; i++) { + struct of_phandle_args irq; + + /* Disable it, if any. */ + writel_relaxed(0UL, eic->base + MCHP_EIC_SCFG(i)); + + ret = of_irq_parse_one(node, i, &irq); + if (ret) + goto clk_unprepare; + + if (WARN_ON(irq.args_count != 3)) { + ret = -EINVAL; + goto clk_unprepare; + } + + eic->irqs[i] = irq.args[1]; + } + + eic->domain = irq_domain_create_hierarchy(parent_domain, 0, MCHP_EIC_NIRQ, + of_fwnode_handle(node), &mchp_eic_domain_ops, + eic); + if (!eic->domain) { + pr_err("%pOF: Failed to add domain\n", node); + ret = -ENODEV; + goto clk_unprepare; + } + + register_syscore(&mchp_eic_syscore); + + pr_info("%pOF: EIC registered, nr_irqs %u\n", node, MCHP_EIC_NIRQ); + + return 0; + +clk_unprepare: + clk_disable_unprepare(eic->clk); +unmap: + iounmap(eic->base); +free: + kfree(eic); + return ret; +} + +IRQCHIP_PLATFORM_DRIVER_BEGIN(mchp_eic) +IRQCHIP_MATCH("microchip,sama7g5-eic", mchp_eic_probe) +IRQCHIP_PLATFORM_DRIVER_END(mchp_eic) + +MODULE_DESCRIPTION("Microchip External Interrupt Controller"); +MODULE_AUTHOR("Claudiu Beznea <claudiu.beznea@microchip.com>"); diff --git a/drivers/irqchip/irq-meson-gpio.c b/drivers/irqchip/irq-meson-gpio.c index 7b531fd075b8..3fcbb044ae60 100644 --- a/drivers/irqchip/irq-meson-gpio.c +++ b/drivers/irqchip/irq-meson-gpio.c @@ -1,22 +1,9 @@ +// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (c) 2015 Endless Mobile, Inc. * Author: Carlo Caione <carlo@endlessm.com> * Copyright (c) 2016 BayLibre, SAS. * Author: Jerome Brunet <jbrunet@baylibre.com> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, see <http://www.gnu.org/licenses/>. - * The full GNU General Public License is included in this distribution - * in the file called COPYING. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt @@ -29,7 +16,7 @@ #include <linux/of.h> #include <linux/of_address.h> -#define NUM_CHANNEL 8 +#define MAX_NUM_CHANNEL 64 #define MAX_INPUT_MUX 256 #define REG_EDGE_POL 0x00 @@ -37,67 +24,254 @@ #define REG_PIN_47_SEL 0x08 #define REG_FILTER_SEL 0x0c -#define REG_EDGE_POL_MASK(x) (BIT(x) | BIT(16 + (x))) -#define REG_EDGE_POL_EDGE(x) BIT(x) -#define REG_EDGE_POL_LOW(x) BIT(16 + (x)) +/* use for A1 like chips */ +#define REG_PIN_A1_SEL 0x04 + +/* + * Note: The S905X3 datasheet reports that BOTH_EDGE is controlled by + * bits 24 to 31. Tests on the actual HW show that these bits are + * stuck at 0. Bits 8 to 15 are responsive and have the expected + * effect. + */ +#define REG_EDGE_POL_EDGE(params, x) BIT((params)->edge_single_offset + (x)) +#define REG_EDGE_POL_LOW(params, x) BIT((params)->pol_low_offset + (x)) +#define REG_BOTH_EDGE(params, x) BIT((params)->edge_both_offset + (x)) +#define REG_EDGE_POL_MASK(params, x) ( \ + REG_EDGE_POL_EDGE(params, x) | \ + REG_EDGE_POL_LOW(params, x) | \ + REG_BOTH_EDGE(params, x)) #define REG_PIN_SEL_SHIFT(x) (((x) % 4) * 8) #define REG_FILTER_SEL_SHIFT(x) ((x) * 4) +struct meson_gpio_irq_controller; +static void meson8_gpio_irq_sel_pin(struct meson_gpio_irq_controller *ctl, + unsigned int channel, unsigned long hwirq); +static void meson_gpio_irq_init_dummy(struct meson_gpio_irq_controller *ctl); +static void meson_a1_gpio_irq_sel_pin(struct meson_gpio_irq_controller *ctl, + unsigned int channel, + unsigned long hwirq); +static void meson_a1_gpio_irq_init(struct meson_gpio_irq_controller *ctl); +static int meson8_gpio_irq_set_type(struct meson_gpio_irq_controller *ctl, + unsigned int type, u32 *channel_hwirq); +static int meson_s4_gpio_irq_set_type(struct meson_gpio_irq_controller *ctl, + unsigned int type, u32 *channel_hwirq); + +struct irq_ctl_ops { + void (*gpio_irq_sel_pin)(struct meson_gpio_irq_controller *ctl, + unsigned int channel, unsigned long hwirq); + void (*gpio_irq_init)(struct meson_gpio_irq_controller *ctl); + int (*gpio_irq_set_type)(struct meson_gpio_irq_controller *ctl, + unsigned int type, u32 *channel_hwirq); +}; + struct meson_gpio_irq_params { unsigned int nr_hwirq; + unsigned int nr_channels; + bool support_edge_both; + unsigned int edge_both_offset; + unsigned int edge_single_offset; + unsigned int edge_pol_reg; + unsigned int pol_low_offset; + unsigned int pin_sel_mask; + struct irq_ctl_ops ops; }; +#define INIT_MESON_COMMON(irqs, init, sel, type) \ + .nr_hwirq = irqs, \ + .ops = { \ + .gpio_irq_init = init, \ + .gpio_irq_sel_pin = sel, \ + .gpio_irq_set_type = type, \ + }, + +#define INIT_MESON8_COMMON_DATA(irqs) \ + INIT_MESON_COMMON(irqs, meson_gpio_irq_init_dummy, \ + meson8_gpio_irq_sel_pin, \ + meson8_gpio_irq_set_type) \ + .edge_single_offset = 0, \ + .pol_low_offset = 16, \ + .pin_sel_mask = 0xff, \ + .nr_channels = 8, \ + +#define INIT_MESON_A1_COMMON_DATA(irqs) \ + INIT_MESON_COMMON(irqs, meson_a1_gpio_irq_init, \ + meson_a1_gpio_irq_sel_pin, \ + meson8_gpio_irq_set_type) \ + .support_edge_both = true, \ + .edge_both_offset = 16, \ + .edge_single_offset = 8, \ + .pol_low_offset = 0, \ + .pin_sel_mask = 0x7f, \ + .nr_channels = 8, \ + +#define INIT_MESON_A4_AO_COMMON_DATA(irqs) \ + INIT_MESON_COMMON(irqs, meson_a1_gpio_irq_init, \ + meson_a1_gpio_irq_sel_pin, \ + meson_s4_gpio_irq_set_type) \ + .support_edge_both = true, \ + .edge_both_offset = 0, \ + .edge_single_offset = 12, \ + .edge_pol_reg = 0x8, \ + .pol_low_offset = 0, \ + .pin_sel_mask = 0xff, \ + .nr_channels = 2, \ + +#define INIT_MESON_S4_COMMON_DATA(irqs) \ + INIT_MESON_COMMON(irqs, meson_a1_gpio_irq_init, \ + meson_a1_gpio_irq_sel_pin, \ + meson_s4_gpio_irq_set_type) \ + .support_edge_both = true, \ + .edge_both_offset = 0, \ + .edge_single_offset = 12, \ + .edge_pol_reg = 0x1c, \ + .pol_low_offset = 0, \ + .pin_sel_mask = 0xff, \ + .nr_channels = 12, \ + static const struct meson_gpio_irq_params meson8_params = { - .nr_hwirq = 134, + INIT_MESON8_COMMON_DATA(134) }; static const struct meson_gpio_irq_params meson8b_params = { - .nr_hwirq = 119, + INIT_MESON8_COMMON_DATA(119) }; static const struct meson_gpio_irq_params gxbb_params = { - .nr_hwirq = 133, + INIT_MESON8_COMMON_DATA(133) }; static const struct meson_gpio_irq_params gxl_params = { - .nr_hwirq = 110, + INIT_MESON8_COMMON_DATA(110) }; static const struct meson_gpio_irq_params axg_params = { - .nr_hwirq = 100, + INIT_MESON8_COMMON_DATA(100) +}; + +static const struct meson_gpio_irq_params sm1_params = { + INIT_MESON8_COMMON_DATA(100) + .support_edge_both = true, + .edge_both_offset = 8, }; -static const struct of_device_id meson_irq_gpio_matches[] = { +static const struct meson_gpio_irq_params a1_params = { + INIT_MESON_A1_COMMON_DATA(62) +}; + +static const struct meson_gpio_irq_params a4_params = { + INIT_MESON_S4_COMMON_DATA(81) +}; + +static const struct meson_gpio_irq_params a4_ao_params = { + INIT_MESON_A4_AO_COMMON_DATA(8) +}; + +static const struct meson_gpio_irq_params a5_params = { + INIT_MESON_S4_COMMON_DATA(99) +}; + +static const struct meson_gpio_irq_params s4_params = { + INIT_MESON_S4_COMMON_DATA(82) +}; + +static const struct meson_gpio_irq_params s6_params = { + INIT_MESON_S4_COMMON_DATA(100) +}; + +static const struct meson_gpio_irq_params s7_params = { + INIT_MESON_S4_COMMON_DATA(84) +}; + +static const struct meson_gpio_irq_params c3_params = { + INIT_MESON_S4_COMMON_DATA(55) +}; + +static const struct meson_gpio_irq_params t7_params = { + INIT_MESON_S4_COMMON_DATA(157) +}; + +static const struct of_device_id meson_irq_gpio_matches[] __maybe_unused = { { .compatible = "amlogic,meson8-gpio-intc", .data = &meson8_params }, { .compatible = "amlogic,meson8b-gpio-intc", .data = &meson8b_params }, { .compatible = "amlogic,meson-gxbb-gpio-intc", .data = &gxbb_params }, { .compatible = "amlogic,meson-gxl-gpio-intc", .data = &gxl_params }, { .compatible = "amlogic,meson-axg-gpio-intc", .data = &axg_params }, + { .compatible = "amlogic,meson-g12a-gpio-intc", .data = &axg_params }, + { .compatible = "amlogic,meson-sm1-gpio-intc", .data = &sm1_params }, + { .compatible = "amlogic,meson-a1-gpio-intc", .data = &a1_params }, + { .compatible = "amlogic,meson-s4-gpio-intc", .data = &s4_params }, + { .compatible = "amlogic,a4-gpio-ao-intc", .data = &a4_ao_params }, + { .compatible = "amlogic,a4-gpio-intc", .data = &a4_params }, + { .compatible = "amlogic,a5-gpio-intc", .data = &a5_params }, + { .compatible = "amlogic,s6-gpio-intc", .data = &s6_params }, + { .compatible = "amlogic,s7-gpio-intc", .data = &s7_params }, + { .compatible = "amlogic,s7d-gpio-intc", .data = &s7_params }, + { .compatible = "amlogic,c3-gpio-intc", .data = &c3_params }, + { .compatible = "amlogic,t7-gpio-intc", .data = &t7_params }, { } }; struct meson_gpio_irq_controller { - unsigned int nr_hwirq; + const struct meson_gpio_irq_params *params; void __iomem *base; - u32 channel_irqs[NUM_CHANNEL]; - DECLARE_BITMAP(channel_map, NUM_CHANNEL); - spinlock_t lock; + u32 channel_irqs[MAX_NUM_CHANNEL]; + DECLARE_BITMAP(channel_map, MAX_NUM_CHANNEL); + raw_spinlock_t lock; }; static void meson_gpio_irq_update_bits(struct meson_gpio_irq_controller *ctl, unsigned int reg, u32 mask, u32 val) { + unsigned long flags; u32 tmp; + raw_spin_lock_irqsave(&ctl->lock, flags); + tmp = readl_relaxed(ctl->base + reg); tmp &= ~mask; tmp |= val; writel_relaxed(tmp, ctl->base + reg); + + raw_spin_unlock_irqrestore(&ctl->lock, flags); } -static unsigned int meson_gpio_irq_channel_to_reg(unsigned int channel) +static void meson_gpio_irq_init_dummy(struct meson_gpio_irq_controller *ctl) { - return (channel < 4) ? REG_PIN_03_SEL : REG_PIN_47_SEL; +} + +static void meson8_gpio_irq_sel_pin(struct meson_gpio_irq_controller *ctl, + unsigned int channel, unsigned long hwirq) +{ + unsigned int reg_offset; + unsigned int bit_offset; + + reg_offset = (channel < 4) ? REG_PIN_03_SEL : REG_PIN_47_SEL; + bit_offset = REG_PIN_SEL_SHIFT(channel); + + meson_gpio_irq_update_bits(ctl, reg_offset, + ctl->params->pin_sel_mask << bit_offset, + hwirq << bit_offset); +} + +static void meson_a1_gpio_irq_sel_pin(struct meson_gpio_irq_controller *ctl, + unsigned int channel, + unsigned long hwirq) +{ + unsigned int reg_offset; + unsigned int bit_offset; + + bit_offset = ((channel % 2) == 0) ? 0 : 16; + reg_offset = REG_PIN_A1_SEL + ((channel / 2) << 2); + + meson_gpio_irq_update_bits(ctl, reg_offset, + ctl->params->pin_sel_mask << bit_offset, + hwirq << bit_offset); +} + +/* For a1 or later chips like a1 there is a switch to enable/disable irq */ +static void meson_a1_gpio_irq_init(struct meson_gpio_irq_controller *ctl) +{ + meson_gpio_irq_update_bits(ctl, REG_EDGE_POL, BIT(31), BIT(31)); } static int @@ -105,14 +279,15 @@ meson_gpio_irq_request_channel(struct meson_gpio_irq_controller *ctl, unsigned long hwirq, u32 **channel_hwirq) { - unsigned int reg, idx; + unsigned long flags; + unsigned int idx; - spin_lock(&ctl->lock); + raw_spin_lock_irqsave(&ctl->lock, flags); /* Find a free channel */ - idx = find_first_zero_bit(ctl->channel_map, NUM_CHANNEL); - if (idx >= NUM_CHANNEL) { - spin_unlock(&ctl->lock); + idx = find_first_zero_bit(ctl->channel_map, ctl->params->nr_channels); + if (idx >= ctl->params->nr_channels) { + raw_spin_unlock_irqrestore(&ctl->lock, flags); pr_err("No channel available\n"); return -ENOSPC; } @@ -120,25 +295,22 @@ meson_gpio_irq_request_channel(struct meson_gpio_irq_controller *ctl, /* Mark the channel as used */ set_bit(idx, ctl->channel_map); + raw_spin_unlock_irqrestore(&ctl->lock, flags); + /* * Setup the mux of the channel to route the signal of the pad * to the appropriate input of the GIC */ - reg = meson_gpio_irq_channel_to_reg(idx); - meson_gpio_irq_update_bits(ctl, reg, - 0xff << REG_PIN_SEL_SHIFT(idx), - hwirq << REG_PIN_SEL_SHIFT(idx)); + ctl->params->ops.gpio_irq_sel_pin(ctl, idx, hwirq); /* * Get the hwirq number assigned to this channel through - * a pointer the channel_irq table. The added benifit of this + * a pointer the channel_irq table. The added benefit of this * method is that we can also retrieve the channel index with * it, using the table base. */ *channel_hwirq = &(ctl->channel_irqs[idx]); - spin_unlock(&ctl->lock); - pr_debug("hwirq %lu assigned to channel %d - irq %u\n", hwirq, idx, **channel_hwirq); @@ -162,12 +334,12 @@ meson_gpio_irq_release_channel(struct meson_gpio_irq_controller *ctl, clear_bit(idx, ctl->channel_map); } -static int meson_gpio_irq_type_setup(struct meson_gpio_irq_controller *ctl, - unsigned int type, - u32 *channel_hwirq) +static int meson8_gpio_irq_set_type(struct meson_gpio_irq_controller *ctl, + unsigned int type, u32 *channel_hwirq) { - u32 val = 0; + const struct meson_gpio_irq_params *params = ctl->params; unsigned int idx; + u32 val = 0; idx = meson_gpio_irq_get_channel_idx(ctl, channel_hwirq); @@ -180,25 +352,74 @@ static int meson_gpio_irq_type_setup(struct meson_gpio_irq_controller *ctl, */ type &= IRQ_TYPE_SENSE_MASK; - if (type == IRQ_TYPE_EDGE_BOTH) - return -EINVAL; - - if (type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING)) - val |= REG_EDGE_POL_EDGE(idx); + /* + * New controller support EDGE_BOTH trigger. This setting takes + * precedence over the other edge/polarity settings + */ + if (type == IRQ_TYPE_EDGE_BOTH) { + if (!params->support_edge_both) + return -EINVAL; - if (type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_EDGE_FALLING)) - val |= REG_EDGE_POL_LOW(idx); + val |= REG_BOTH_EDGE(params, idx); + } else { + if (type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING)) + val |= REG_EDGE_POL_EDGE(params, idx); - spin_lock(&ctl->lock); + if (type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_EDGE_FALLING)) + val |= REG_EDGE_POL_LOW(params, idx); + } meson_gpio_irq_update_bits(ctl, REG_EDGE_POL, - REG_EDGE_POL_MASK(idx), val); - - spin_unlock(&ctl->lock); + REG_EDGE_POL_MASK(params, idx), val); return 0; } +/* + * gpio irq relative registers for s4 + * -PADCTRL_GPIO_IRQ_CTRL0 + * bit[31]: enable/disable all the irq lines + * bit[12-23]: single edge trigger + * bit[0-11]: polarity trigger + * + * -PADCTRL_GPIO_IRQ_CTRL[X] + * bit[0-16]: 7 bits to choose gpio source for irq line 2*[X] - 2 + * bit[16-22]:7 bits to choose gpio source for irq line 2*[X] - 1 + * where X = 1-6 + * + * -PADCTRL_GPIO_IRQ_CTRL[7] + * bit[0-11]: both edge trigger + */ +static int meson_s4_gpio_irq_set_type(struct meson_gpio_irq_controller *ctl, + unsigned int type, u32 *channel_hwirq) +{ + const struct meson_gpio_irq_params *params = ctl->params; + unsigned int idx; + u32 val = 0; + + idx = meson_gpio_irq_get_channel_idx(ctl, channel_hwirq); + + type &= IRQ_TYPE_SENSE_MASK; + + meson_gpio_irq_update_bits(ctl, params->edge_pol_reg, BIT(idx), 0); + + if (type == IRQ_TYPE_EDGE_BOTH) { + val = BIT(ctl->params->edge_both_offset + idx); + meson_gpio_irq_update_bits(ctl, params->edge_pol_reg, val, val); + return 0; + } + + if (type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_EDGE_FALLING)) + val |= BIT(ctl->params->pol_low_offset + idx); + + if (type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING)) + val |= BIT(ctl->params->edge_single_offset + idx); + + meson_gpio_irq_update_bits(ctl, params->edge_pol_reg, + BIT(idx) | BIT(12 + idx), val); + return 0; +}; + static unsigned int meson_gpio_irq_type_output(unsigned int type) { unsigned int sense = type & IRQ_TYPE_SENSE_MASK; @@ -211,7 +432,7 @@ static unsigned int meson_gpio_irq_type_output(unsigned int type) */ if (sense & (IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_LEVEL_LOW)) type |= IRQ_TYPE_LEVEL_HIGH; - else if (sense & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING)) + else type |= IRQ_TYPE_EDGE_RISING; return type; @@ -223,7 +444,7 @@ static int meson_gpio_irq_set_type(struct irq_data *data, unsigned int type) u32 *channel_hwirq = irq_data_get_irq_chip_data(data); int ret; - ret = meson_gpio_irq_type_setup(ctl, type, channel_hwirq); + ret = ctl->params->ops.gpio_irq_set_type(ctl, type, channel_hwirq); if (ret) return ret; @@ -336,36 +557,35 @@ static const struct irq_domain_ops meson_gpio_irq_domain_ops = { .translate = meson_gpio_irq_domain_translate, }; -static int __init meson_gpio_irq_parse_dt(struct device_node *node, - struct meson_gpio_irq_controller *ctl) +static int meson_gpio_irq_parse_dt(struct device_node *node, struct meson_gpio_irq_controller *ctl) { const struct of_device_id *match; - const struct meson_gpio_irq_params *params; int ret; match = of_match_node(meson_irq_gpio_matches, node); if (!match) return -ENODEV; - params = match->data; - ctl->nr_hwirq = params->nr_hwirq; + ctl->params = match->data; ret = of_property_read_variable_u32_array(node, "amlogic,channel-interrupts", ctl->channel_irqs, - NUM_CHANNEL, - NUM_CHANNEL); + ctl->params->nr_channels, + ctl->params->nr_channels); if (ret < 0) { - pr_err("can't get %d channel interrupts\n", NUM_CHANNEL); + pr_err("can't get %d channel interrupts\n", ctl->params->nr_channels); return ret; } + ctl->params->ops.gpio_irq_init(ctl); + return 0; } -static int __init meson_gpio_irq_of_init(struct device_node *node, - struct device_node *parent) +static int meson_gpio_irq_probe(struct platform_device *pdev, struct device_node *parent) { + struct device_node *node = pdev->dev.of_node; struct irq_domain *domain, *parent_domain; struct meson_gpio_irq_controller *ctl; int ret; @@ -385,7 +605,7 @@ static int __init meson_gpio_irq_of_init(struct device_node *node, if (!ctl) return -ENOMEM; - spin_lock_init(&ctl->lock); + raw_spin_lock_init(&ctl->lock); ctl->base = of_iomap(node, 0); if (!ctl->base) { @@ -397,8 +617,9 @@ static int __init meson_gpio_irq_of_init(struct device_node *node, if (ret) goto free_channel_irqs; - domain = irq_domain_create_hierarchy(parent_domain, 0, ctl->nr_hwirq, - of_node_to_fwnode(node), + domain = irq_domain_create_hierarchy(parent_domain, 0, + ctl->params->nr_hwirq, + of_fwnode_handle(node), &meson_gpio_irq_domain_ops, ctl); if (!domain) { @@ -408,7 +629,7 @@ static int __init meson_gpio_irq_of_init(struct device_node *node, } pr_info("%d to %d gpio interrupt mux initialized\n", - ctl->nr_hwirq, NUM_CHANNEL); + ctl->params->nr_hwirq, ctl->params->nr_channels); return 0; @@ -420,5 +641,10 @@ free_ctl: return ret; } -IRQCHIP_DECLARE(meson_gpio_intc, "amlogic,meson-gpio-intc", - meson_gpio_irq_of_init); +IRQCHIP_PLATFORM_DRIVER_BEGIN(meson_gpio_intc) +IRQCHIP_MATCH("amlogic,meson-gpio-intc", meson_gpio_irq_probe) +IRQCHIP_PLATFORM_DRIVER_END(meson_gpio_intc) + +MODULE_AUTHOR("Jerome Brunet <jbrunet@baylibre.com>"); +MODULE_DESCRIPTION("Meson GPIO Interrupt Multiplexer driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/irqchip/irq-mips-cpu.c b/drivers/irqchip/irq-mips-cpu.c index 66f97fde13d8..ac784ef3ed4b 100644 --- a/drivers/irqchip/irq-mips-cpu.c +++ b/drivers/irqchip/irq-mips-cpu.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright 2001 MontaVista Software Inc. * Author: Jun Sun, jsun@mvista.com or jsun@junsun.net @@ -7,11 +8,6 @@ * Author: Maciej W. Rozycki <macro@mips.com> * * This file define the irq handler for MIPS CPU interrupts. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the - * Free Software Foundation; either version 2 of the License, or (at your - * option) any later version. */ /* @@ -131,7 +127,6 @@ static struct irq_chip mips_mt_cpu_irq_controller = { asmlinkage void __weak plat_irq_dispatch(void) { unsigned long pending = read_c0_cause() & read_c0_status() & ST0_IM; - unsigned int virq; int irq; if (!pending) { @@ -141,12 +136,15 @@ asmlinkage void __weak plat_irq_dispatch(void) pending >>= CAUSEB_IP; while (pending) { + struct irq_domain *d; + irq = fls(pending) - 1; if (IS_ENABLED(CONFIG_GENERIC_IRQ_IPI) && irq < 2) - virq = irq_linear_revmap(ipi_domain, irq); + d = ipi_domain; else - virq = irq_linear_revmap(irq_domain, irq); - do_IRQ(virq); + d = irq_domain; + + do_domain_IRQ(d, irq); pending &= ~BIT(irq); } } @@ -201,6 +199,13 @@ static int mips_cpu_ipi_alloc(struct irq_domain *domain, unsigned int virq, if (ret) return ret; + ret = irq_domain_set_hwirq_and_chip(domain->parent, virq + i, hwirq, + &mips_mt_cpu_irq_controller, + NULL); + + if (ret) + return ret; + ret = irq_set_irq_type(virq + i, IRQ_TYPE_LEVEL_HIGH); if (ret) return ret; @@ -233,11 +238,9 @@ static void mips_cpu_register_ipi_domain(struct device_node *of_node) struct cpu_ipi_domain_state *ipi_domain_state; ipi_domain_state = kzalloc(sizeof(*ipi_domain_state), GFP_KERNEL); - ipi_domain = irq_domain_add_hierarchy(irq_domain, - IRQ_DOMAIN_FLAG_IPI_SINGLE, - 2, of_node, - &mips_cpu_ipi_chip_ops, - ipi_domain_state); + ipi_domain = irq_domain_create_hierarchy(irq_domain, IRQ_DOMAIN_FLAG_IPI_SINGLE, 2, + of_fwnode_handle(of_node), + &mips_cpu_ipi_chip_ops, ipi_domain_state); if (!ipi_domain) panic("Failed to add MIPS CPU IPI domain"); irq_domain_update_bus_token(ipi_domain, DOMAIN_BUS_IPI); @@ -255,9 +258,8 @@ static void __init __mips_cpu_irq_init(struct device_node *of_node) clear_c0_status(ST0_IM); clear_c0_cause(CAUSEF_IP); - irq_domain = irq_domain_add_legacy(of_node, 8, MIPS_CPU_IRQ_BASE, 0, - &mips_cpu_intc_irq_domain_ops, - NULL); + irq_domain = irq_domain_create_legacy(of_fwnode_handle(of_node), 8, MIPS_CPU_IRQ_BASE, 0, + &mips_cpu_intc_irq_domain_ops, NULL); if (!irq_domain) panic("Failed to add irqdomain for MIPS CPU"); diff --git a/drivers/irqchip/irq-mips-gic.c b/drivers/irqchip/irq-mips-gic.c index d32268cc1174..19a57c5e2b2e 100644 --- a/drivers/irqchip/irq-mips-gic.c +++ b/drivers/irqchip/irq-mips-gic.c @@ -9,6 +9,7 @@ #define pr_fmt(fmt) "irq-mips-gic: " fmt +#include <linux/bitfield.h> #include <linux/bitmap.h> #include <linux/clocksource.h> #include <linux/cpuhotplug.h> @@ -16,6 +17,7 @@ #include <linux/interrupt.h> #include <linux/irq.h> #include <linux/irqchip.h> +#include <linux/irqdomain.h> #include <linux/of_address.h> #include <linux/percpu.h> #include <linux/sched.h> @@ -46,23 +48,105 @@ void __iomem *mips_gic_base; -DEFINE_PER_CPU_READ_MOSTLY(unsigned long[GIC_MAX_LONGS], pcpu_masks); +static DEFINE_PER_CPU_READ_MOSTLY(unsigned long[GIC_MAX_LONGS], pcpu_masks); -static DEFINE_SPINLOCK(gic_lock); +static DEFINE_RAW_SPINLOCK(gic_lock); static struct irq_domain *gic_irq_domain; -static struct irq_domain *gic_ipi_domain; static int gic_shared_intrs; static unsigned int gic_cpu_pin; -static unsigned int timer_cpu_pin; static struct irq_chip gic_level_irq_controller, gic_edge_irq_controller; + +#ifdef CONFIG_GENERIC_IRQ_IPI static DECLARE_BITMAP(ipi_resrv, GIC_MAX_INTRS); static DECLARE_BITMAP(ipi_available, GIC_MAX_INTRS); +#endif /* CONFIG_GENERIC_IRQ_IPI */ static struct gic_all_vpes_chip_data { u32 map; bool mask; } gic_all_vpes_chip_data[GIC_NUM_LOCAL_INTRS]; +static int __gic_with_next_online_cpu(int prev) +{ + unsigned int cpu; + + /* Discover the next online CPU */ + cpu = cpumask_next(prev, cpu_online_mask); + + /* If there isn't one, we're done */ + if (cpu >= nr_cpu_ids) + return cpu; + + /* + * Move the access lock to the next CPU's GIC local register block. + * + * Set GIC_VL_OTHER. Since the caller holds gic_lock nothing can + * clobber the written value. + */ + write_gic_vl_other(mips_cm_vp_id(cpu)); + + return cpu; +} + +static inline void gic_unlock_cluster(void) +{ + if (mips_cps_multicluster_cpus()) + mips_cm_unlock_other(); +} + +/** + * for_each_online_cpu_gic() - Iterate over online CPUs, access local registers + * @cpu: An integer variable to hold the current CPU number + * @gic_lock: A pointer to raw spin lock used as a guard + * + * Iterate over online CPUs & configure the other/redirect register region to + * access each CPUs GIC local register block, which can be accessed from the + * loop body using read_gic_vo_*() or write_gic_vo_*() accessor functions or + * their derivatives. + */ +#define for_each_online_cpu_gic(cpu, gic_lock) \ + guard(raw_spinlock_irqsave)(gic_lock); \ + for ((cpu) = __gic_with_next_online_cpu(-1); \ + (cpu) < nr_cpu_ids; \ + gic_unlock_cluster(), \ + (cpu) = __gic_with_next_online_cpu(cpu)) + +/** + * gic_irq_lock_cluster() - Lock redirect block access to IRQ's cluster + * @d: struct irq_data corresponding to the interrupt we're interested in + * + * Locks redirect register block access to the global register block of the GIC + * within the remote cluster that the IRQ corresponding to @d is affine to, + * returning true when this redirect block setup & locking has been performed. + * + * If @d is affine to the local cluster then no locking is performed and this + * function will return false, indicating to the caller that it should access + * the local clusters registers without the overhead of indirection through the + * redirect block. + * + * In summary, if this function returns true then the caller should access GIC + * registers using redirect register block accessors & then call + * mips_cm_unlock_other() when done. If this function returns false then the + * caller should trivially access GIC registers in the local cluster. + * + * Returns true if locking performed, else false. + */ +static bool gic_irq_lock_cluster(struct irq_data *d) +{ + unsigned int cpu, cl; + + cpu = cpumask_first(irq_data_get_effective_affinity_mask(d)); + BUG_ON(cpu >= NR_CPUS); + + cl = cpu_cluster(&cpu_data[cpu]); + if (cl == cpu_cluster(¤t_cpu_data)) + return false; + if (mips_cps_numcores(cl) == 0) + return false; + mips_cm_lock_other(cl, 0, 0, CM_GCR_Cx_OTHER_BLOCK_GLOBAL); + return true; +} + static void gic_clear_pcpu_masks(unsigned int intr) { unsigned int i; @@ -109,7 +193,12 @@ static void gic_send_ipi(struct irq_data *d, unsigned int cpu) { irq_hw_number_t hwirq = GIC_HWIRQ_TO_SHARED(irqd_to_hwirq(d)); - write_gic_wedge(GIC_WEDGE_RW | hwirq); + if (gic_irq_lock_cluster(d)) { + write_gic_redir_wedge(GIC_WEDGE_RW | hwirq); + mips_cm_unlock_other(); + } else { + write_gic_wedge(GIC_WEDGE_RW | hwirq); + } } int gic_get_c0_compare_int(void) @@ -147,7 +236,7 @@ int gic_get_c0_fdc_int(void) static void gic_handle_shared_int(bool chained) { - unsigned int intr, virq; + unsigned int intr; unsigned long *pcpu_mask; DECLARE_BITMAP(pending, GIC_MAX_INTRS); @@ -164,12 +253,12 @@ static void gic_handle_shared_int(bool chained) bitmap_and(pending, pending, pcpu_mask, gic_shared_intrs); for_each_set_bit(intr, pending, gic_shared_intrs) { - virq = irq_linear_revmap(gic_irq_domain, - GIC_SHARED_TO_HWIRQ(intr)); if (chained) - generic_handle_irq(virq); + generic_handle_domain_irq(gic_irq_domain, + GIC_SHARED_TO_HWIRQ(intr)); else - do_IRQ(virq); + do_domain_IRQ(gic_irq_domain, + GIC_SHARED_TO_HWIRQ(intr)); } } @@ -177,7 +266,13 @@ static void gic_mask_irq(struct irq_data *d) { unsigned int intr = GIC_HWIRQ_TO_SHARED(d->hwirq); - write_gic_rmask(intr); + if (gic_irq_lock_cluster(d)) { + write_gic_redir_rmask(intr); + mips_cm_unlock_other(); + } else { + write_gic_rmask(intr); + } + gic_clear_pcpu_masks(intr); } @@ -186,7 +281,12 @@ static void gic_unmask_irq(struct irq_data *d) unsigned int intr = GIC_HWIRQ_TO_SHARED(d->hwirq); unsigned int cpu; - write_gic_smask(intr); + if (gic_irq_lock_cluster(d)) { + write_gic_redir_smask(intr); + mips_cm_unlock_other(); + } else { + write_gic_smask(intr); + } gic_clear_pcpu_masks(intr); cpu = cpumask_first(irq_data_get_effective_affinity_mask(d)); @@ -197,7 +297,12 @@ static void gic_ack_irq(struct irq_data *d) { unsigned int irq = GIC_HWIRQ_TO_SHARED(d->hwirq); - write_gic_wedge(irq); + if (gic_irq_lock_cluster(d)) { + write_gic_redir_wedge(irq); + mips_cm_unlock_other(); + } else { + write_gic_wedge(irq); + } } static int gic_set_type(struct irq_data *d, unsigned int type) @@ -207,7 +312,7 @@ static int gic_set_type(struct irq_data *d, unsigned int type) irq = GIC_HWIRQ_TO_SHARED(d->hwirq); - spin_lock_irqsave(&gic_lock, flags); + raw_spin_lock_irqsave(&gic_lock, flags); switch (type & IRQ_TYPE_SENSE_MASK) { case IRQ_TYPE_EDGE_FALLING: pol = GIC_POL_FALLING_EDGE; @@ -237,9 +342,16 @@ static int gic_set_type(struct irq_data *d, unsigned int type) break; } - change_gic_pol(irq, pol); - change_gic_trig(irq, trig); - change_gic_dual(irq, dual); + if (gic_irq_lock_cluster(d)) { + change_gic_redir_pol(irq, pol); + change_gic_redir_trig(irq, trig); + change_gic_redir_dual(irq, dual); + mips_cm_unlock_other(); + } else { + change_gic_pol(irq, pol); + change_gic_trig(irq, trig); + change_gic_dual(irq, dual); + } if (trig == GIC_TRIG_EDGE) irq_set_chip_handler_name_locked(d, &gic_edge_irq_controller, @@ -247,7 +359,7 @@ static int gic_set_type(struct irq_data *d, unsigned int type) else irq_set_chip_handler_name_locked(d, &gic_level_irq_controller, handle_level_irq, NULL); - spin_unlock_irqrestore(&gic_lock, flags); + raw_spin_unlock_irqrestore(&gic_lock, flags); return 0; } @@ -257,26 +369,77 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *cpumask, bool force) { unsigned int irq = GIC_HWIRQ_TO_SHARED(d->hwirq); + unsigned int cpu, cl, old_cpu, old_cl; unsigned long flags; - unsigned int cpu; - cpu = cpumask_first_and(cpumask, cpu_online_mask); + /* + * The GIC specifies that we can only route an interrupt to one VP(E), + * ie. CPU in Linux parlance, at a time. Therefore we always route to + * the first forced or online CPU in the mask. + */ + if (force) + cpu = cpumask_first(cpumask); + else + cpu = cpumask_first_and(cpumask, cpu_online_mask); + if (cpu >= NR_CPUS) return -EINVAL; - /* Assumption : cpumask refers to a single CPU */ - spin_lock_irqsave(&gic_lock, flags); + old_cpu = cpumask_first(irq_data_get_effective_affinity_mask(d)); + old_cl = cpu_cluster(&cpu_data[old_cpu]); + cl = cpu_cluster(&cpu_data[cpu]); - /* Re-route this IRQ */ - write_gic_map_vp(irq, BIT(mips_cm_vp_id(cpu))); + raw_spin_lock_irqsave(&gic_lock, flags); - /* Update the pcpu_masks */ - gic_clear_pcpu_masks(irq); - if (read_gic_mask(irq)) - set_bit(irq, per_cpu_ptr(pcpu_masks, cpu)); + /* + * If we're moving affinity between clusters, stop routing the + * interrupt to any VP(E) in the old cluster. + */ + if (cl != old_cl) { + if (gic_irq_lock_cluster(d)) { + write_gic_redir_map_vp(irq, 0); + mips_cm_unlock_other(); + } else { + write_gic_map_vp(irq, 0); + } + } + /* + * Update effective affinity - after this gic_irq_lock_cluster() will + * begin operating on the new cluster. + */ irq_data_update_effective_affinity(d, cpumask_of(cpu)); - spin_unlock_irqrestore(&gic_lock, flags); + + /* + * If we're moving affinity between clusters, configure the interrupt + * trigger type in the new cluster. + */ + if (cl != old_cl) + gic_set_type(d, irqd_get_trigger_type(d)); + + /* Route the interrupt to its new VP(E) */ + if (gic_irq_lock_cluster(d)) { + write_gic_redir_map_pin(irq, + GIC_MAP_PIN_MAP_TO_PIN | gic_cpu_pin); + write_gic_redir_map_vp(irq, BIT(mips_cm_vp_id(cpu))); + + /* Update the pcpu_masks */ + gic_clear_pcpu_masks(irq); + if (read_gic_redir_mask(irq)) + set_bit(irq, per_cpu_ptr(pcpu_masks, cpu)); + + mips_cm_unlock_other(); + } else { + write_gic_map_pin(irq, GIC_MAP_PIN_MAP_TO_PIN | gic_cpu_pin); + write_gic_map_vp(irq, BIT(mips_cm_vp_id(cpu))); + + /* Update the pcpu_masks */ + gic_clear_pcpu_masks(irq); + if (read_gic_mask(irq)) + set_bit(irq, per_cpu_ptr(pcpu_masks, cpu)); + } + + raw_spin_unlock_irqrestore(&gic_lock, flags); return IRQ_SET_MASK_OK; } @@ -307,7 +470,7 @@ static struct irq_chip gic_edge_irq_controller = { static void gic_handle_local_int(bool chained) { unsigned long pending, masked; - unsigned int intr, virq; + unsigned int intr; pending = read_gic_vl_pend(); masked = read_gic_vl_mask(); @@ -315,12 +478,12 @@ static void gic_handle_local_int(bool chained) bitmap_and(&pending, &pending, &masked, GIC_NUM_LOCAL_INTRS); for_each_set_bit(intr, &pending, GIC_NUM_LOCAL_INTRS) { - virq = irq_linear_revmap(gic_irq_domain, - GIC_LOCAL_TO_HWIRQ(intr)); if (chained) - generic_handle_irq(virq); + generic_handle_domain_irq(gic_irq_domain, + GIC_LOCAL_TO_HWIRQ(intr)); else - do_IRQ(virq); + do_domain_IRQ(gic_irq_domain, + GIC_LOCAL_TO_HWIRQ(intr)); } } @@ -347,57 +510,66 @@ static struct irq_chip gic_local_irq_controller = { static void gic_mask_local_irq_all_vpes(struct irq_data *d) { struct gic_all_vpes_chip_data *cd; - unsigned long flags; int intr, cpu; + if (!mips_cps_multicluster_cpus()) + return; + intr = GIC_HWIRQ_TO_LOCAL(d->hwirq); cd = irq_data_get_irq_chip_data(d); cd->mask = false; - spin_lock_irqsave(&gic_lock, flags); - for_each_online_cpu(cpu) { - write_gic_vl_other(mips_cm_vp_id(cpu)); + for_each_online_cpu_gic(cpu, &gic_lock) write_gic_vo_rmask(BIT(intr)); - } - spin_unlock_irqrestore(&gic_lock, flags); } static void gic_unmask_local_irq_all_vpes(struct irq_data *d) { struct gic_all_vpes_chip_data *cd; - unsigned long flags; int intr, cpu; + if (!mips_cps_multicluster_cpus()) + return; + intr = GIC_HWIRQ_TO_LOCAL(d->hwirq); cd = irq_data_get_irq_chip_data(d); cd->mask = true; - spin_lock_irqsave(&gic_lock, flags); - for_each_online_cpu(cpu) { - write_gic_vl_other(mips_cm_vp_id(cpu)); + for_each_online_cpu_gic(cpu, &gic_lock) write_gic_vo_smask(BIT(intr)); - } - spin_unlock_irqrestore(&gic_lock, flags); } -static void gic_all_vpes_irq_cpu_online(struct irq_data *d) +static void gic_all_vpes_irq_cpu_online(void) { - struct gic_all_vpes_chip_data *cd; - unsigned int intr; + static const unsigned int local_intrs[] = { + GIC_LOCAL_INT_TIMER, + GIC_LOCAL_INT_PERFCTR, + GIC_LOCAL_INT_FDC, + }; + unsigned long flags; + int i; - intr = GIC_HWIRQ_TO_LOCAL(d->hwirq); - cd = irq_data_get_irq_chip_data(d); + raw_spin_lock_irqsave(&gic_lock, flags); + + for (i = 0; i < ARRAY_SIZE(local_intrs); i++) { + unsigned int intr = local_intrs[i]; + struct gic_all_vpes_chip_data *cd; - write_gic_vl_map(intr, cd->map); - if (cd->mask) - write_gic_vl_smask(BIT(intr)); + if (!gic_local_irq_is_routable(intr)) + continue; + cd = &gic_all_vpes_chip_data[intr]; + write_gic_vl_map(mips_gic_vx_map_reg(intr), cd->map); + if (cd->mask) + write_gic_vl_smask(BIT(intr)); + } + + raw_spin_unlock_irqrestore(&gic_lock, flags); } static struct irq_chip gic_all_vpes_local_irq_controller = { .name = "MIPS GIC Local", .irq_mask = gic_mask_local_irq_all_vpes, .irq_unmask = gic_unmask_local_irq_all_vpes, - .irq_cpu_online = gic_all_vpes_irq_cpu_online, }; static void __gic_irq_dispatch(void) @@ -420,12 +592,22 @@ static int gic_shared_irq_domain_map(struct irq_domain *d, unsigned int virq, unsigned long flags; data = irq_get_irq_data(virq); - - spin_lock_irqsave(&gic_lock, flags); - write_gic_map_pin(intr, GIC_MAP_PIN_MAP_TO_PIN | gic_cpu_pin); - write_gic_map_vp(intr, BIT(mips_cm_vp_id(cpu))); irq_data_update_effective_affinity(data, cpumask_of(cpu)); - spin_unlock_irqrestore(&gic_lock, flags); + + raw_spin_lock_irqsave(&gic_lock, flags); + + /* Route the interrupt to its VP(E) */ + if (gic_irq_lock_cluster(data)) { + write_gic_redir_map_pin(intr, + GIC_MAP_PIN_MAP_TO_PIN | gic_cpu_pin); + write_gic_redir_map_vp(intr, BIT(mips_cm_vp_id(cpu))); + mips_cm_unlock_other(); + } else { + write_gic_map_pin(intr, GIC_MAP_PIN_MAP_TO_PIN | gic_cpu_pin); + write_gic_map_vp(intr, BIT(mips_cm_vp_id(cpu))); + } + + raw_spin_unlock_irqrestore(&gic_lock, flags); return 0; } @@ -453,15 +635,16 @@ static int gic_irq_domain_map(struct irq_domain *d, unsigned int virq, irq_hw_number_t hwirq) { struct gic_all_vpes_chip_data *cd; - unsigned long flags; unsigned int intr; int err, cpu; u32 map; if (hwirq >= GIC_SHARED_HWIRQ_BASE) { +#ifdef CONFIG_GENERIC_IRQ_IPI /* verify that shared irqs don't conflict with an IPI irq */ if (test_bit(GIC_HWIRQ_TO_SHARED(hwirq), ipi_resrv)) return -EBUSY; +#endif /* CONFIG_GENERIC_IRQ_IPI */ err = irq_domain_set_hwirq_and_chip(d, virq, hwirq, &gic_level_irq_controller, @@ -476,11 +659,12 @@ static int gic_irq_domain_map(struct irq_domain *d, unsigned int virq, intr = GIC_HWIRQ_TO_LOCAL(hwirq); map = GIC_MAP_PIN_MAP_TO_PIN | gic_cpu_pin; + /* + * If adding support for more per-cpu interrupts, keep the + * array in gic_all_vpes_irq_cpu_online() in sync. + */ switch (intr) { case GIC_LOCAL_INT_TIMER: - /* CONFIG_MIPS_CMP workaround (see __gic_init) */ - map = GIC_MAP_PIN_MAP_TO_PIN | timer_cpu_pin; - /* fall-through */ case GIC_LOCAL_INT_PERFCTR: case GIC_LOCAL_INT_FDC: /* @@ -514,12 +698,10 @@ static int gic_irq_domain_map(struct irq_domain *d, unsigned int virq, if (!gic_local_irq_is_routable(intr)) return -EPERM; - spin_lock_irqsave(&gic_lock, flags); - for_each_online_cpu(cpu) { - write_gic_vl_other(mips_cm_vp_id(cpu)); - write_gic_vo_map(intr, map); + if (mips_cps_multicluster_cpus()) { + for_each_online_cpu_gic(cpu, &gic_lock) + write_gic_vo_map(mips_gic_vx_map_reg(intr), map); } - spin_unlock_irqrestore(&gic_lock, flags); return 0; } @@ -538,7 +720,7 @@ static int gic_irq_domain_alloc(struct irq_domain *d, unsigned int virq, return gic_irq_domain_map(d, virq, hwirq); } -void gic_irq_domain_free(struct irq_domain *d, unsigned int virq, +static void gic_irq_domain_free(struct irq_domain *d, unsigned int virq, unsigned int nr_irqs) { } @@ -550,6 +732,8 @@ static const struct irq_domain_ops gic_irq_domain_ops = { .map = gic_irq_domain_map, }; +#ifdef CONFIG_GENERIC_IRQ_IPI + static int gic_ipi_domain_xlate(struct irq_domain *d, struct device_node *ctrlr, const u32 *intspec, unsigned int intsize, irq_hw_number_t *out_hwirq, @@ -600,6 +784,9 @@ static int gic_ipi_domain_alloc(struct irq_domain *d, unsigned int virq, if (ret) goto error; + /* Set affinity to cpu. */ + irq_data_update_effective_affinity(irq_get_irq_data(virq + i), + cpumask_of(cpu)); ret = irq_set_irq_type(virq + i, IRQ_TYPE_EDGE_RISING); if (ret) goto error; @@ -617,8 +804,8 @@ error: return ret; } -void gic_ipi_domain_free(struct irq_domain *d, unsigned int virq, - unsigned int nr_irqs) +static void gic_ipi_domain_free(struct irq_domain *d, unsigned int virq, + unsigned int nr_irqs) { irq_hw_number_t base_hwirq; struct irq_data *data; @@ -631,8 +818,8 @@ void gic_ipi_domain_free(struct irq_domain *d, unsigned int virq, bitmap_set(ipi_available, base_hwirq, nr_irqs); } -int gic_ipi_domain_match(struct irq_domain *d, struct device_node *node, - enum irq_domain_bus_token bus_token) +static int gic_ipi_domain_match(struct irq_domain *d, struct device_node *node, + enum irq_domain_bus_token bus_token) { bool is_ipi; @@ -653,6 +840,48 @@ static const struct irq_domain_ops gic_ipi_domain_ops = { .match = gic_ipi_domain_match, }; +static int gic_register_ipi_domain(struct device_node *node) +{ + struct irq_domain *gic_ipi_domain; + unsigned int v[2], num_ipis; + + gic_ipi_domain = irq_domain_create_hierarchy(gic_irq_domain, IRQ_DOMAIN_FLAG_IPI_PER_CPU, + GIC_NUM_LOCAL_INTRS + gic_shared_intrs, + of_fwnode_handle(node), &gic_ipi_domain_ops, + NULL); + if (!gic_ipi_domain) { + pr_err("Failed to add IPI domain"); + return -ENXIO; + } + + irq_domain_update_bus_token(gic_ipi_domain, DOMAIN_BUS_IPI); + + if (node && + !of_property_read_u32_array(node, "mti,reserved-ipi-vectors", v, 2)) { + bitmap_set(ipi_resrv, v[0], v[1]); + } else { + /* + * Reserve 2 interrupts per possible CPU/VP for use as IPIs, + * meeting the requirements of arch/mips SMP. + */ + num_ipis = 2 * num_possible_cpus(); + bitmap_set(ipi_resrv, gic_shared_intrs - num_ipis, num_ipis); + } + + bitmap_copy(ipi_available, ipi_resrv, GIC_MAX_INTRS); + + return 0; +} + +#else /* !CONFIG_GENERIC_IRQ_IPI */ + +static inline int gic_register_ipi_domain(struct device_node *node) +{ + return 0; +} + +#endif /* !CONFIG_GENERIC_IRQ_IPI */ + static int gic_cpu_startup(unsigned int cpu) { /* Enable or disable EIC */ @@ -662,8 +891,8 @@ static int gic_cpu_startup(unsigned int cpu) /* Clear all local IRQ masks (ie. disable all local interrupts) */ write_gic_vl_rmask(~0); - /* Invoke irq_cpu_online callbacks to enable desired interrupts */ - irq_cpu_online(); + /* Enable desired interrupts */ + gic_all_vpes_irq_cpu_online(); return 0; } @@ -671,11 +900,12 @@ static int gic_cpu_startup(unsigned int cpu) static int __init gic_of_init(struct device_node *node, struct device_node *parent) { - unsigned int cpu_vec, i, gicconfig, v[2], num_ipis; + unsigned int cpu_vec, i, gicconfig, cl, nclusters; unsigned long reserved; phys_addr_t gic_base; struct resource res; size_t gic_len; + int ret; /* Find the first available CPU vector. */ i = 0; @@ -716,86 +946,68 @@ static int __init gic_of_init(struct device_node *node, __sync(); } - mips_gic_base = ioremap_nocache(gic_base, gic_len); + mips_gic_base = ioremap(gic_base, gic_len); + if (!mips_gic_base) { + pr_err("Failed to ioremap gic_base\n"); + return -ENOMEM; + } gicconfig = read_gic_config(); - gic_shared_intrs = gicconfig & GIC_CONFIG_NUMINTERRUPTS; - gic_shared_intrs >>= __ffs(GIC_CONFIG_NUMINTERRUPTS); + gic_shared_intrs = FIELD_GET(GIC_CONFIG_NUMINTERRUPTS, gicconfig); gic_shared_intrs = (gic_shared_intrs + 1) * 8; if (cpu_has_veic) { /* Always use vector 1 in EIC mode */ gic_cpu_pin = 0; - timer_cpu_pin = gic_cpu_pin; set_vi_handler(gic_cpu_pin + GIC_PIN_TO_VEC_OFFSET, __gic_irq_dispatch); } else { gic_cpu_pin = cpu_vec - GIC_CPU_PIN_OFFSET; irq_set_chained_handler(MIPS_CPU_IRQ_BASE + cpu_vec, gic_irq_dispatch); - /* - * With the CMP implementation of SMP (deprecated), other CPUs - * are started by the bootloader and put into a timer based - * waiting poll loop. We must not re-route those CPU's local - * timer interrupts as the wait instruction will never finish, - * so just handle whatever CPU interrupt it is routed to by - * default. - * - * This workaround should be removed when CMP support is - * dropped. - */ - if (IS_ENABLED(CONFIG_MIPS_CMP) && - gic_local_irq_is_routable(GIC_LOCAL_INT_TIMER)) { - timer_cpu_pin = read_gic_vl_timer_map() & GIC_MAP_PIN_MAP; - irq_set_chained_handler(MIPS_CPU_IRQ_BASE + - GIC_CPU_PIN_OFFSET + - timer_cpu_pin, - gic_irq_dispatch); - } else { - timer_cpu_pin = gic_cpu_pin; - } } - gic_irq_domain = irq_domain_add_simple(node, GIC_NUM_LOCAL_INTRS + - gic_shared_intrs, 0, - &gic_irq_domain_ops, NULL); + gic_irq_domain = irq_domain_create_simple(of_fwnode_handle(node), + GIC_NUM_LOCAL_INTRS + + gic_shared_intrs, 0, + &gic_irq_domain_ops, NULL); if (!gic_irq_domain) { pr_err("Failed to add IRQ domain"); return -ENXIO; } - gic_ipi_domain = irq_domain_add_hierarchy(gic_irq_domain, - IRQ_DOMAIN_FLAG_IPI_PER_CPU, - GIC_NUM_LOCAL_INTRS + gic_shared_intrs, - node, &gic_ipi_domain_ops, NULL); - if (!gic_ipi_domain) { - pr_err("Failed to add IPI domain"); - return -ENXIO; - } - - irq_domain_update_bus_token(gic_ipi_domain, DOMAIN_BUS_IPI); - - if (node && - !of_property_read_u32_array(node, "mti,reserved-ipi-vectors", v, 2)) { - bitmap_set(ipi_resrv, v[0], v[1]); - } else { - /* - * Reserve 2 interrupts per possible CPU/VP for use as IPIs, - * meeting the requirements of arch/mips SMP. - */ - num_ipis = 2 * num_possible_cpus(); - bitmap_set(ipi_resrv, gic_shared_intrs - num_ipis, num_ipis); - } - - bitmap_copy(ipi_available, ipi_resrv, GIC_MAX_INTRS); + ret = gic_register_ipi_domain(node); + if (ret) + return ret; board_bind_eic_interrupt = &gic_bind_eic_interrupt; - /* Setup defaults */ - for (i = 0; i < gic_shared_intrs; i++) { - change_gic_pol(i, GIC_POL_ACTIVE_HIGH); - change_gic_trig(i, GIC_TRIG_LEVEL); - write_gic_rmask(i); + /* + * Initialise each cluster's GIC shared registers to sane default + * values. + * Otherwise, the IPI set up will be erased if we move code + * to gic_cpu_startup for each cpu. + */ + nclusters = mips_cps_numclusters(); + for (cl = 0; cl < nclusters; cl++) { + if (cl == cpu_cluster(¤t_cpu_data)) { + for (i = 0; i < gic_shared_intrs; i++) { + change_gic_pol(i, GIC_POL_ACTIVE_HIGH); + change_gic_trig(i, GIC_TRIG_LEVEL); + write_gic_rmask(i); + } + } else if (mips_cps_numcores(cl) != 0) { + mips_cm_lock_other(cl, 0, 0, CM_GCR_Cx_OTHER_BLOCK_GLOBAL); + for (i = 0; i < gic_shared_intrs; i++) { + change_gic_redir_pol(i, GIC_POL_ACTIVE_HIGH); + change_gic_redir_trig(i, GIC_TRIG_LEVEL); + write_gic_redir_rmask(i); + } + mips_cm_unlock_other(); + + } else { + pr_warn("No CPU cores on the cluster %d skip it\n", cl); + } } return cpuhp_setup_state(CPUHP_AP_IRQ_MIPS_GIC_STARTING, diff --git a/drivers/irqchip/irq-mmp.c b/drivers/irqchip/irq-mmp.c index 25f32e1d7764..09e640430208 100644 --- a/drivers/irqchip/irq-mmp.c +++ b/drivers/irqchip/irq-mmp.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0-only /* * linux/arch/arm/mach-mmp/irq.c * @@ -6,16 +7,13 @@ * * Author: Bin Yang <bin.yang@marvell.com> * Haojian Zhuang <haojian.zhuang@gmail.com> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. */ #include <linux/module.h> #include <linux/init.h> #include <linux/irq.h> #include <linux/irqchip.h> +#include <linux/irqchip/chained_irq.h> #include <linux/irqdomain.h> #include <linux/io.h> #include <linux/ioport.h> @@ -34,6 +32,9 @@ #define SEL_INT_PENDING (1 << 6) #define SEL_INT_NUM_MASK 0x3f +#define MMP2_ICU_INT_ROUTE_PJ4_IRQ (1 << 5) +#define MMP2_ICU_INT_ROUTE_PJ4_FIQ (1 << 6) + struct icu_chip_data { int nr_irqs; unsigned int virq_base; @@ -43,6 +44,7 @@ struct icu_chip_data { unsigned int conf_enable; unsigned int conf_disable; unsigned int conf_mask; + unsigned int conf2_mask; unsigned int clr_mfp_irq_base; unsigned int clr_mfp_hwirq; struct irq_domain *domain; @@ -52,9 +54,11 @@ struct mmp_intc_conf { unsigned int conf_enable; unsigned int conf_disable; unsigned int conf_mask; + unsigned int conf2_mask; }; static void __iomem *mmp_icu_base; +static void __iomem *mmp_icu2_base; static struct icu_chip_data icu_data[MAX_ICU_NR]; static int max_icu_nr; @@ -97,6 +101,16 @@ static void icu_mask_irq(struct irq_data *d) r &= ~data->conf_mask; r |= data->conf_disable; writel_relaxed(r, mmp_icu_base + (hwirq << 2)); + + if (data->conf2_mask) { + /* + * ICU1 (above) only controls PJ4 MP1; if using SMP, + * we need to also mask the MP2 and MM cores via ICU2. + */ + r = readl_relaxed(mmp_icu2_base + (hwirq << 2)); + r &= ~data->conf2_mask; + writel_relaxed(r, mmp_icu2_base + (hwirq << 2)); + } } else { r = readl_relaxed(data->reg_mask) | (1 << hwirq); writel_relaxed(r, data->reg_mask); @@ -132,11 +146,14 @@ struct irq_chip icu_irq_chip = { static void icu_mux_irq_demux(struct irq_desc *desc) { unsigned int irq = irq_desc_get_irq(desc); + struct irq_chip *chip = irq_desc_get_chip(desc); struct irq_domain *domain; struct icu_chip_data *data; int i; unsigned long mask, status, n; + chained_irq_enter(chip, desc); + for (i = 1; i < max_icu_nr; i++) { if (irq == icu_data[i].cascade_irq) { domain = icu_data[i].domain; @@ -146,7 +163,7 @@ static void icu_mux_irq_demux(struct irq_desc *desc) } if (i >= max_icu_nr) { pr_err("Spurious irq %d in MMP INTC\n", irq); - return; + goto out; } mask = readl_relaxed(data->reg_mask); @@ -158,6 +175,9 @@ static void icu_mux_irq_demux(struct irq_desc *desc) generic_handle_irq(icu_data[i].virq_base + n); } } + +out: + chained_irq_exit(chip, desc); } static int mmp_irq_domain_map(struct irq_domain *d, unsigned int irq, @@ -176,7 +196,7 @@ static int mmp_irq_domain_xlate(struct irq_domain *d, struct device_node *node, return 0; } -const struct irq_domain_ops mmp_irq_domain_ops = { +static const struct irq_domain_ops mmp_irq_domain_ops = { .map = mmp_irq_domain_map, .xlate = mmp_irq_domain_xlate, }; @@ -190,7 +210,16 @@ static const struct mmp_intc_conf mmp_conf = { static const struct mmp_intc_conf mmp2_conf = { .conf_enable = 0x20, .conf_disable = 0x0, - .conf_mask = 0x7f, + .conf_mask = MMP2_ICU_INT_ROUTE_PJ4_IRQ | + MMP2_ICU_INT_ROUTE_PJ4_FIQ, +}; + +static struct mmp_intc_conf mmp3_conf = { + .conf_enable = 0x20, + .conf_disable = 0x0, + .conf_mask = MMP2_ICU_INT_ROUTE_PJ4_IRQ | + MMP2_ICU_INT_ROUTE_PJ4_FIQ, + .conf2_mask = 0xf0, }; static void __exception_irq_entry mmp_handle_irq(struct pt_regs *regs) @@ -201,7 +230,7 @@ static void __exception_irq_entry mmp_handle_irq(struct pt_regs *regs) if (!(hwirq & SEL_INT_PENDING)) return; hwirq &= SEL_INT_NUM_MASK; - handle_domain_irq(icu_data[0].domain, hwirq, regs); + generic_handle_domain_irq(icu_data[0].domain, hwirq); } static void __exception_irq_entry mmp2_handle_irq(struct pt_regs *regs) @@ -212,135 +241,9 @@ static void __exception_irq_entry mmp2_handle_irq(struct pt_regs *regs) if (!(hwirq & SEL_INT_PENDING)) return; hwirq &= SEL_INT_NUM_MASK; - handle_domain_irq(icu_data[0].domain, hwirq, regs); -} - -/* MMP (ARMv5) */ -void __init icu_init_irq(void) -{ - int irq; - - max_icu_nr = 1; - mmp_icu_base = ioremap(0xd4282000, 0x1000); - icu_data[0].conf_enable = mmp_conf.conf_enable; - icu_data[0].conf_disable = mmp_conf.conf_disable; - icu_data[0].conf_mask = mmp_conf.conf_mask; - icu_data[0].nr_irqs = 64; - icu_data[0].virq_base = 0; - icu_data[0].domain = irq_domain_add_legacy(NULL, 64, 0, 0, - &irq_domain_simple_ops, - &icu_data[0]); - for (irq = 0; irq < 64; irq++) { - icu_mask_irq(irq_get_irq_data(irq)); - irq_set_chip_and_handler(irq, &icu_irq_chip, handle_level_irq); - } - irq_set_default_host(icu_data[0].domain); - set_handle_irq(mmp_handle_irq); + generic_handle_domain_irq(icu_data[0].domain, hwirq); } -/* MMP2 (ARMv7) */ -void __init mmp2_init_icu(void) -{ - int irq, end; - - max_icu_nr = 8; - mmp_icu_base = ioremap(0xd4282000, 0x1000); - icu_data[0].conf_enable = mmp2_conf.conf_enable; - icu_data[0].conf_disable = mmp2_conf.conf_disable; - icu_data[0].conf_mask = mmp2_conf.conf_mask; - icu_data[0].nr_irqs = 64; - icu_data[0].virq_base = 0; - icu_data[0].domain = irq_domain_add_legacy(NULL, 64, 0, 0, - &irq_domain_simple_ops, - &icu_data[0]); - icu_data[1].reg_status = mmp_icu_base + 0x150; - icu_data[1].reg_mask = mmp_icu_base + 0x168; - icu_data[1].clr_mfp_irq_base = icu_data[0].virq_base + - icu_data[0].nr_irqs; - icu_data[1].clr_mfp_hwirq = 1; /* offset to IRQ_MMP2_PMIC_BASE */ - icu_data[1].nr_irqs = 2; - icu_data[1].cascade_irq = 4; - icu_data[1].virq_base = icu_data[0].virq_base + icu_data[0].nr_irqs; - icu_data[1].domain = irq_domain_add_legacy(NULL, icu_data[1].nr_irqs, - icu_data[1].virq_base, 0, - &irq_domain_simple_ops, - &icu_data[1]); - icu_data[2].reg_status = mmp_icu_base + 0x154; - icu_data[2].reg_mask = mmp_icu_base + 0x16c; - icu_data[2].nr_irqs = 2; - icu_data[2].cascade_irq = 5; - icu_data[2].virq_base = icu_data[1].virq_base + icu_data[1].nr_irqs; - icu_data[2].domain = irq_domain_add_legacy(NULL, icu_data[2].nr_irqs, - icu_data[2].virq_base, 0, - &irq_domain_simple_ops, - &icu_data[2]); - icu_data[3].reg_status = mmp_icu_base + 0x180; - icu_data[3].reg_mask = mmp_icu_base + 0x17c; - icu_data[3].nr_irqs = 3; - icu_data[3].cascade_irq = 9; - icu_data[3].virq_base = icu_data[2].virq_base + icu_data[2].nr_irqs; - icu_data[3].domain = irq_domain_add_legacy(NULL, icu_data[3].nr_irqs, - icu_data[3].virq_base, 0, - &irq_domain_simple_ops, - &icu_data[3]); - icu_data[4].reg_status = mmp_icu_base + 0x158; - icu_data[4].reg_mask = mmp_icu_base + 0x170; - icu_data[4].nr_irqs = 5; - icu_data[4].cascade_irq = 17; - icu_data[4].virq_base = icu_data[3].virq_base + icu_data[3].nr_irqs; - icu_data[4].domain = irq_domain_add_legacy(NULL, icu_data[4].nr_irqs, - icu_data[4].virq_base, 0, - &irq_domain_simple_ops, - &icu_data[4]); - icu_data[5].reg_status = mmp_icu_base + 0x15c; - icu_data[5].reg_mask = mmp_icu_base + 0x174; - icu_data[5].nr_irqs = 15; - icu_data[5].cascade_irq = 35; - icu_data[5].virq_base = icu_data[4].virq_base + icu_data[4].nr_irqs; - icu_data[5].domain = irq_domain_add_legacy(NULL, icu_data[5].nr_irqs, - icu_data[5].virq_base, 0, - &irq_domain_simple_ops, - &icu_data[5]); - icu_data[6].reg_status = mmp_icu_base + 0x160; - icu_data[6].reg_mask = mmp_icu_base + 0x178; - icu_data[6].nr_irqs = 2; - icu_data[6].cascade_irq = 51; - icu_data[6].virq_base = icu_data[5].virq_base + icu_data[5].nr_irqs; - icu_data[6].domain = irq_domain_add_legacy(NULL, icu_data[6].nr_irqs, - icu_data[6].virq_base, 0, - &irq_domain_simple_ops, - &icu_data[6]); - icu_data[7].reg_status = mmp_icu_base + 0x188; - icu_data[7].reg_mask = mmp_icu_base + 0x184; - icu_data[7].nr_irqs = 2; - icu_data[7].cascade_irq = 55; - icu_data[7].virq_base = icu_data[6].virq_base + icu_data[6].nr_irqs; - icu_data[7].domain = irq_domain_add_legacy(NULL, icu_data[7].nr_irqs, - icu_data[7].virq_base, 0, - &irq_domain_simple_ops, - &icu_data[7]); - end = icu_data[7].virq_base + icu_data[7].nr_irqs; - for (irq = 0; irq < end; irq++) { - icu_mask_irq(irq_get_irq_data(irq)); - if (irq == icu_data[1].cascade_irq || - irq == icu_data[2].cascade_irq || - irq == icu_data[3].cascade_irq || - irq == icu_data[4].cascade_irq || - irq == icu_data[5].cascade_irq || - irq == icu_data[6].cascade_irq || - irq == icu_data[7].cascade_irq) { - irq_set_chip(irq, &icu_irq_chip); - irq_set_chained_handler(irq, icu_mux_irq_demux); - } else { - irq_set_chip_and_handler(irq, &icu_irq_chip, - handle_level_irq); - } - } - irq_set_default_host(icu_data[0].domain); - set_handle_irq(mmp2_handle_irq); -} - -#ifdef CONFIG_OF static int __init mmp_init_bases(struct device_node *node) { int ret, nr_irqs, irq, i = 0; @@ -358,9 +261,9 @@ static int __init mmp_init_bases(struct device_node *node) } icu_data[0].virq_base = 0; - icu_data[0].domain = irq_domain_add_linear(node, nr_irqs, - &mmp_irq_domain_ops, - &icu_data[0]); + icu_data[0].domain = irq_domain_create_linear(of_fwnode_handle(node), nr_irqs, + &mmp_irq_domain_ops, + &icu_data[0]); for (irq = 0; irq < nr_irqs; irq++) { ret = irq_create_mapping(icu_data[0].domain, irq); if (!ret) { @@ -394,7 +297,6 @@ static int __init mmp_of_init(struct device_node *node, icu_data[0].conf_enable = mmp_conf.conf_enable; icu_data[0].conf_disable = mmp_conf.conf_disable; icu_data[0].conf_mask = mmp_conf.conf_mask; - irq_set_default_host(icu_data[0].domain); set_handle_irq(mmp_handle_irq); max_icu_nr = 1; return 0; @@ -413,19 +315,50 @@ static int __init mmp2_of_init(struct device_node *node, icu_data[0].conf_enable = mmp2_conf.conf_enable; icu_data[0].conf_disable = mmp2_conf.conf_disable; icu_data[0].conf_mask = mmp2_conf.conf_mask; - irq_set_default_host(icu_data[0].domain); set_handle_irq(mmp2_handle_irq); max_icu_nr = 1; return 0; } IRQCHIP_DECLARE(mmp2_intc, "mrvl,mmp2-intc", mmp2_of_init); +static int __init mmp3_of_init(struct device_node *node, + struct device_node *parent) +{ + int ret; + + mmp_icu2_base = of_iomap(node, 1); + if (!mmp_icu2_base) { + pr_err("Failed to get interrupt controller register #2\n"); + return -ENODEV; + } + + ret = mmp_init_bases(node); + if (ret < 0) { + iounmap(mmp_icu2_base); + return ret; + } + + icu_data[0].conf_enable = mmp3_conf.conf_enable; + icu_data[0].conf_disable = mmp3_conf.conf_disable; + icu_data[0].conf_mask = mmp3_conf.conf_mask; + icu_data[0].conf2_mask = mmp3_conf.conf2_mask; + + if (!parent) { + /* This is the main interrupt controller. */ + set_handle_irq(mmp2_handle_irq); + } + + max_icu_nr = 1; + return 0; +} +IRQCHIP_DECLARE(mmp3_intc, "marvell,mmp3-intc", mmp3_of_init); + static int __init mmp2_mux_of_init(struct device_node *node, struct device_node *parent) { - struct resource res; int i, ret, irq, j = 0; u32 nr_irqs, mfp_irq; + u32 reg[4]; if (!parent) return -ENODEV; @@ -437,26 +370,30 @@ static int __init mmp2_mux_of_init(struct device_node *node, pr_err("Not found mrvl,intc-nr-irqs property\n"); return -EINVAL; } - ret = of_address_to_resource(node, 0, &res); - if (ret < 0) { - pr_err("Not found reg property\n"); - return -EINVAL; - } - icu_data[i].reg_status = mmp_icu_base + res.start; - ret = of_address_to_resource(node, 1, &res); + + /* + * For historical reasons, the "regs" property of the + * mrvl,mmp2-mux-intc is not a regular "regs" property containing + * addresses on the parent bus, but offsets from the intc's base. + * That is why we can't use of_address_to_resource() here. + */ + ret = of_property_read_variable_u32_array(node, "reg", reg, + ARRAY_SIZE(reg), + ARRAY_SIZE(reg)); if (ret < 0) { pr_err("Not found reg property\n"); return -EINVAL; } - icu_data[i].reg_mask = mmp_icu_base + res.start; + icu_data[i].reg_status = mmp_icu_base + reg[0]; + icu_data[i].reg_mask = mmp_icu_base + reg[2]; icu_data[i].cascade_irq = irq_of_parse_and_map(node, 0); if (!icu_data[i].cascade_irq) return -EINVAL; icu_data[i].virq_base = 0; - icu_data[i].domain = irq_domain_add_linear(node, nr_irqs, - &mmp_irq_domain_ops, - &icu_data[i]); + icu_data[i].domain = irq_domain_create_linear(of_fwnode_handle(node), nr_irqs, + &mmp_irq_domain_ops, + &icu_data[i]); for (irq = 0; irq < nr_irqs; irq++) { ret = irq_create_mapping(icu_data[i].domain, irq); if (!ret) { @@ -485,4 +422,3 @@ err: return -EINVAL; } IRQCHIP_DECLARE(mmp2_mux_intc, "mrvl,mmp2-mux-intc", mmp2_mux_of_init); -#endif diff --git a/drivers/irqchip/irq-mscc-ocelot.c b/drivers/irqchip/irq-mscc-ocelot.c index 88143c0b700c..8cbc191f750b 100644 --- a/drivers/irqchip/irq-mscc-ocelot.c +++ b/drivers/irqchip/irq-mscc-ocelot.c @@ -12,54 +12,116 @@ #include <linux/irqchip/chained_irq.h> #include <linux/interrupt.h> -#define ICPU_CFG_INTR_INTR_STICKY 0x10 -#define ICPU_CFG_INTR_INTR_ENA 0x18 -#define ICPU_CFG_INTR_INTR_ENA_CLR 0x1c -#define ICPU_CFG_INTR_INTR_ENA_SET 0x20 -#define ICPU_CFG_INTR_DST_INTR_IDENT(x) (0x38 + 0x4 * (x)) -#define ICPU_CFG_INTR_INTR_TRIGGER(x) (0x5c + 0x4 * (x)) - -#define OCELOT_NR_IRQ 24 +#define ICPU_CFG_INTR_DST_INTR_IDENT(_p, x) ((_p)->reg_off_ident + 0x4 * (x)) +#define ICPU_CFG_INTR_INTR_TRIGGER(_p, x) ((_p)->reg_off_trigger + 0x4 * (x)) + +#define FLAGS_HAS_TRIGGER BIT(0) +#define FLAGS_NEED_INIT_ENABLE BIT(1) + +struct chip_props { + u8 flags; + u8 reg_off_sticky; + u8 reg_off_ena; + u8 reg_off_ena_clr; + u8 reg_off_ena_set; + u8 reg_off_ident; + u8 reg_off_trigger; + u8 reg_off_ena_irq0; + u8 n_irq; +}; + +static struct chip_props ocelot_props = { + .flags = FLAGS_HAS_TRIGGER, + .reg_off_sticky = 0x10, + .reg_off_ena = 0x18, + .reg_off_ena_clr = 0x1c, + .reg_off_ena_set = 0x20, + .reg_off_ident = 0x38, + .reg_off_trigger = 0x4, + .n_irq = 24, +}; + +static struct chip_props serval_props = { + .flags = FLAGS_HAS_TRIGGER, + .reg_off_sticky = 0xc, + .reg_off_ena = 0x14, + .reg_off_ena_clr = 0x18, + .reg_off_ena_set = 0x1c, + .reg_off_ident = 0x20, + .reg_off_trigger = 0x4, + .n_irq = 24, +}; + +static struct chip_props luton_props = { + .flags = FLAGS_NEED_INIT_ENABLE, + .reg_off_sticky = 0, + .reg_off_ena = 0x4, + .reg_off_ena_clr = 0x8, + .reg_off_ena_set = 0xc, + .reg_off_ident = 0x18, + .reg_off_ena_irq0 = 0x14, + .n_irq = 28, +}; + +static struct chip_props jaguar2_props = { + .flags = FLAGS_HAS_TRIGGER, + .reg_off_sticky = 0x10, + .reg_off_ena = 0x18, + .reg_off_ena_clr = 0x1c, + .reg_off_ena_set = 0x20, + .reg_off_ident = 0x38, + .reg_off_trigger = 0x4, + .n_irq = 29, +}; static void ocelot_irq_unmask(struct irq_data *data) { struct irq_chip_generic *gc = irq_data_get_irq_chip_data(data); + struct irq_domain *d = data->domain; + struct chip_props *p = d->host_data; struct irq_chip_type *ct = irq_data_get_chip_type(data); unsigned int mask = data->mask; u32 val; - irq_gc_lock(gc); - val = irq_reg_readl(gc, ICPU_CFG_INTR_INTR_TRIGGER(0)) | - irq_reg_readl(gc, ICPU_CFG_INTR_INTR_TRIGGER(1)); + guard(raw_spinlock)(&gc->lock); + /* + * Clear sticky bits for edge mode interrupts. + * Serval has only one trigger register replication, but the adjacent + * register is always read as zero, so there's no need to handle this + * case separately. + */ + val = irq_reg_readl(gc, ICPU_CFG_INTR_INTR_TRIGGER(p, 0)) | + irq_reg_readl(gc, ICPU_CFG_INTR_INTR_TRIGGER(p, 1)); if (!(val & mask)) - irq_reg_writel(gc, mask, ICPU_CFG_INTR_INTR_STICKY); + irq_reg_writel(gc, mask, p->reg_off_sticky); *ct->mask_cache &= ~mask; - irq_reg_writel(gc, mask, ICPU_CFG_INTR_INTR_ENA_SET); - irq_gc_unlock(gc); + irq_reg_writel(gc, mask, p->reg_off_ena_set); } static void ocelot_irq_handler(struct irq_desc *desc) { struct irq_chip *chip = irq_desc_get_chip(desc); struct irq_domain *d = irq_desc_get_handler_data(desc); + struct chip_props *p = d->host_data; struct irq_chip_generic *gc = irq_get_domain_generic_chip(d, 0); - u32 reg = irq_reg_readl(gc, ICPU_CFG_INTR_DST_INTR_IDENT(0)); + u32 reg = irq_reg_readl(gc, ICPU_CFG_INTR_DST_INTR_IDENT(p, 0)); chained_irq_enter(chip, desc); while (reg) { u32 hwirq = __fls(reg); - generic_handle_irq(irq_find_mapping(d, hwirq)); + generic_handle_domain_irq(d, hwirq); reg &= ~(BIT(hwirq)); } chained_irq_exit(chip, desc); } -static int __init ocelot_irq_init(struct device_node *node, - struct device_node *parent) +static int __init vcoreiii_irq_init(struct device_node *node, + struct device_node *parent, + struct chip_props *p) { struct irq_domain *domain; struct irq_chip_generic *gc; @@ -69,14 +131,14 @@ static int __init ocelot_irq_init(struct device_node *node, if (!parent_irq) return -EINVAL; - domain = irq_domain_add_linear(node, OCELOT_NR_IRQ, - &irq_generic_chip_ops, NULL); + domain = irq_domain_create_linear(of_fwnode_handle(node), p->n_irq, + &irq_generic_chip_ops, NULL); if (!domain) { pr_err("%pOFn: unable to add irq domain\n", node); return -ENOMEM; } - ret = irq_alloc_domain_generic_chips(domain, OCELOT_NR_IRQ, 1, + ret = irq_alloc_domain_generic_chips(domain, p->n_irq, 1, "icpu", handle_level_irq, 0, 0, 0); if (ret) { @@ -92,16 +154,28 @@ static int __init ocelot_irq_init(struct device_node *node, goto err_gc_free; } - gc->chip_types[0].regs.ack = ICPU_CFG_INTR_INTR_STICKY; - gc->chip_types[0].regs.mask = ICPU_CFG_INTR_INTR_ENA_CLR; gc->chip_types[0].chip.irq_ack = irq_gc_ack_set_bit; - gc->chip_types[0].chip.irq_mask = irq_gc_mask_set_bit; - gc->chip_types[0].chip.irq_unmask = ocelot_irq_unmask; + gc->chip_types[0].regs.ack = p->reg_off_sticky; + if (p->flags & FLAGS_HAS_TRIGGER) { + gc->chip_types[0].regs.mask = p->reg_off_ena_clr; + gc->chip_types[0].chip.irq_unmask = ocelot_irq_unmask; + gc->chip_types[0].chip.irq_mask = irq_gc_mask_set_bit; + } else { + gc->chip_types[0].regs.enable = p->reg_off_ena_set; + gc->chip_types[0].regs.disable = p->reg_off_ena_clr; + gc->chip_types[0].chip.irq_mask = irq_gc_mask_disable_reg; + gc->chip_types[0].chip.irq_unmask = irq_gc_unmask_enable_reg; + } /* Mask and ack all interrupts */ - irq_reg_writel(gc, 0, ICPU_CFG_INTR_INTR_ENA); - irq_reg_writel(gc, 0xffffffff, ICPU_CFG_INTR_INTR_STICKY); + irq_reg_writel(gc, 0, p->reg_off_ena); + irq_reg_writel(gc, 0xffffffff, p->reg_off_sticky); + + /* Overall init */ + if (p->flags & FLAGS_NEED_INIT_ENABLE) + irq_reg_writel(gc, BIT(0), p->reg_off_ena_irq0); + domain->host_data = p; irq_set_chained_handler_and_data(parent_irq, ocelot_irq_handler, domain); @@ -115,4 +189,35 @@ err_domain_remove: return ret; } + +static int __init ocelot_irq_init(struct device_node *node, + struct device_node *parent) +{ + return vcoreiii_irq_init(node, parent, &ocelot_props); +} + IRQCHIP_DECLARE(ocelot_icpu, "mscc,ocelot-icpu-intr", ocelot_irq_init); + +static int __init serval_irq_init(struct device_node *node, + struct device_node *parent) +{ + return vcoreiii_irq_init(node, parent, &serval_props); +} + +IRQCHIP_DECLARE(serval_icpu, "mscc,serval-icpu-intr", serval_irq_init); + +static int __init luton_irq_init(struct device_node *node, + struct device_node *parent) +{ + return vcoreiii_irq_init(node, parent, &luton_props); +} + +IRQCHIP_DECLARE(luton_icpu, "mscc,luton-icpu-intr", luton_irq_init); + +static int __init jaguar2_irq_init(struct device_node *node, + struct device_node *parent) +{ + return vcoreiii_irq_init(node, parent, &jaguar2_props); +} + +IRQCHIP_DECLARE(jaguar2_icpu, "mscc,jaguar2-icpu-intr", jaguar2_irq_init); diff --git a/drivers/irqchip/irq-msi-lib.c b/drivers/irqchip/irq-msi-lib.c new file mode 100644 index 000000000000..d5eefc3d7215 --- /dev/null +++ b/drivers/irqchip/irq-msi-lib.c @@ -0,0 +1,166 @@ +// SPDX-License-Identifier: GPL-2.0-only +// Copyright (C) 2022 Linutronix GmbH +// Copyright (C) 2022 Intel + +#include <linux/export.h> + +#include <linux/irqchip/irq-msi-lib.h> + +/** + * msi_lib_init_dev_msi_info - Domain info setup for MSI domains + * @dev: The device for which the domain is created for + * @domain: The domain providing this callback + * @real_parent: The real parent domain of the domain to be initialized + * which might be a domain built on top of @domain or + * @domain itself + * @info: The domain info for the domain to be initialize + * + * This function is to be used for all types of MSI domains above the root + * parent domain and any intermediates. The topmost parent domain specific + * functionality is determined via @real_parent. + * + * All intermediate domains between the root and the device domain must + * have either msi_parent_ops.init_dev_msi_info = msi_parent_init_dev_msi_info + * or invoke it down the line. + */ +bool msi_lib_init_dev_msi_info(struct device *dev, struct irq_domain *domain, + struct irq_domain *real_parent, + struct msi_domain_info *info) +{ + const struct msi_parent_ops *pops = real_parent->msi_parent_ops; + struct irq_chip *chip = info->chip; + u32 required_flags; + + /* Parent ops available? */ + if (WARN_ON_ONCE(!pops)) + return false; + + /* + * MSI parent domain specific settings. For now there is only the + * root parent domain, e.g. NEXUS, acting as a MSI parent, but it is + * possible to stack MSI parents. See x86 vector -> irq remapping + */ + if (domain->bus_token == pops->bus_select_token) { + if (WARN_ON_ONCE(domain != real_parent)) + return false; + } else { + WARN_ON_ONCE(1); + return false; + } + + required_flags = pops->required_flags; + + /* Is the target domain bus token supported? */ + switch(info->bus_token) { + case DOMAIN_BUS_PCI_DEVICE_MSI: + case DOMAIN_BUS_PCI_DEVICE_MSIX: + if (WARN_ON_ONCE(!IS_ENABLED(CONFIG_PCI_MSI))) + return false; + + break; + case DOMAIN_BUS_DEVICE_MSI: + /* + * Per device MSI should never have any MSI feature bits + * set. It's sole purpose is to create a dumb interrupt + * chip which has a device specific irq_write_msi_msg() + * callback. + */ + if (WARN_ON_ONCE(info->flags)) + return false; + + /* Core managed MSI descriptors */ + info->flags = MSI_FLAG_ALLOC_SIMPLE_MSI_DESCS | MSI_FLAG_FREE_MSI_DESCS; + fallthrough; + case DOMAIN_BUS_WIRED_TO_MSI: + /* Remove PCI specific flags */ + required_flags &= ~MSI_FLAG_PCI_MSI_MASK_PARENT; + break; + default: + /* + * This should never be reached. See + * msi_lib_irq_domain_select() + */ + WARN_ON_ONCE(1); + return false; + } + + /* + * Mask out the domain specific MSI feature flags which are not + * supported by the real parent. + */ + info->flags &= pops->supported_flags; + /* Enforce the required flags */ + info->flags |= required_flags; + + /* Chip updates for all child bus types */ + if (!chip->irq_eoi && (pops->chip_flags & MSI_CHIP_FLAG_SET_EOI)) + chip->irq_eoi = irq_chip_eoi_parent; + if (!chip->irq_ack && (pops->chip_flags & MSI_CHIP_FLAG_SET_ACK)) + chip->irq_ack = irq_chip_ack_parent; + + /* + * The device MSI domain can never have a set affinity callback. It + * always has to rely on the parent domain to handle affinity + * settings. The device MSI domain just has to write the resulting + * MSI message into the hardware which is the whole purpose of the + * device MSI domain aside of mask/unmask which is provided e.g. by + * PCI/MSI device domains. + * + * The exception to the rule is when the underlying domain + * tells you that affinity is not a thing -- for example when + * everything is muxed behind a single interrupt. + */ + if (!chip->irq_set_affinity && !(info->flags & MSI_FLAG_NO_AFFINITY)) + chip->irq_set_affinity = msi_domain_set_affinity; + + /* + * If the parent domain insists on being in charge of masking, obey + * blindly. The interrupt is un-masked at the PCI level on startup + * and masked on shutdown to prevent rogue interrupts after the + * driver freed the interrupt. Not masking it at the PCI level + * speeds up operation for disable/enable_irq() as it avoids + * getting all the way out to the PCI device. + */ + if (info->flags & MSI_FLAG_PCI_MSI_MASK_PARENT) { + chip->irq_mask = irq_chip_mask_parent; + chip->irq_unmask = irq_chip_unmask_parent; + } + + return true; +} +EXPORT_SYMBOL_GPL(msi_lib_init_dev_msi_info); + +/** + * msi_lib_irq_domain_select - Shared select function for NEXUS domains + * @d: Pointer to the irq domain on which select is invoked + * @fwspec: Firmware spec describing what is searched + * @bus_token: The bus token for which a matching irq domain is looked up + * + * Returns: %0 if @d is not what is being looked for + * + * %1 if @d is either the domain which is directly searched for or + * if @d is providing the parent MSI domain for the functionality + * requested with @bus_token. + */ +int msi_lib_irq_domain_select(struct irq_domain *d, struct irq_fwspec *fwspec, + enum irq_domain_bus_token bus_token) +{ + const struct msi_parent_ops *ops = d->msi_parent_ops; + u32 busmask = BIT(bus_token); + + if (!ops) + return 0; + + struct fwnode_handle *fwh __free(fwnode_handle) = + d->flags & IRQ_DOMAIN_FLAG_FWNODE_PARENT ? fwnode_get_parent(fwspec->fwnode) + : fwnode_handle_get(fwspec->fwnode); + if (fwh != d->fwnode || fwspec->param_count != 0) + return 0; + + /* Handle pure domain searches */ + if (bus_token == ops->bus_select_token) + return 1; + + return !!(ops->bus_select_mask & busmask); +} +EXPORT_SYMBOL_GPL(msi_lib_irq_domain_select); diff --git a/drivers/irqchip/irq-mst-intc.c b/drivers/irqchip/irq-mst-intc.c new file mode 100644 index 000000000000..7f760f555a76 --- /dev/null +++ b/drivers/irqchip/irq-mst-intc.c @@ -0,0 +1,295 @@ +// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) +/* + * Copyright (c) 2020 MediaTek Inc. + * Author Mark-PK Tsai <mark-pk.tsai@mediatek.com> + */ +#include <linux/interrupt.h> +#include <linux/io.h> +#include <linux/irq.h> +#include <linux/irqchip.h> +#include <linux/irqdomain.h> +#include <linux/of.h> +#include <linux/of_address.h> +#include <linux/of_irq.h> +#include <linux/slab.h> +#include <linux/spinlock.h> +#include <linux/syscore_ops.h> + +#define MST_INTC_MAX_IRQS 64 + +#define INTC_MASK 0x0 +#define INTC_REV_POLARITY 0x10 +#define INTC_EOI 0x20 + +#ifdef CONFIG_PM_SLEEP +static LIST_HEAD(mst_intc_list); +#endif + +struct mst_intc_chip_data { + raw_spinlock_t lock; + unsigned int irq_start, nr_irqs; + void __iomem *base; + bool no_eoi; +#ifdef CONFIG_PM_SLEEP + struct list_head entry; + u16 saved_polarity_conf[DIV_ROUND_UP(MST_INTC_MAX_IRQS, 16)]; +#endif +}; + +static void mst_set_irq(struct irq_data *d, u32 offset) +{ + irq_hw_number_t hwirq = irqd_to_hwirq(d); + struct mst_intc_chip_data *cd = irq_data_get_irq_chip_data(d); + u16 val, mask; + unsigned long flags; + + mask = 1 << (hwirq % 16); + offset += (hwirq / 16) * 4; + + raw_spin_lock_irqsave(&cd->lock, flags); + val = readw_relaxed(cd->base + offset) | mask; + writew_relaxed(val, cd->base + offset); + raw_spin_unlock_irqrestore(&cd->lock, flags); +} + +static void mst_clear_irq(struct irq_data *d, u32 offset) +{ + irq_hw_number_t hwirq = irqd_to_hwirq(d); + struct mst_intc_chip_data *cd = irq_data_get_irq_chip_data(d); + u16 val, mask; + unsigned long flags; + + mask = 1 << (hwirq % 16); + offset += (hwirq / 16) * 4; + + raw_spin_lock_irqsave(&cd->lock, flags); + val = readw_relaxed(cd->base + offset) & ~mask; + writew_relaxed(val, cd->base + offset); + raw_spin_unlock_irqrestore(&cd->lock, flags); +} + +static void mst_intc_mask_irq(struct irq_data *d) +{ + mst_set_irq(d, INTC_MASK); + irq_chip_mask_parent(d); +} + +static void mst_intc_unmask_irq(struct irq_data *d) +{ + mst_clear_irq(d, INTC_MASK); + irq_chip_unmask_parent(d); +} + +static void mst_intc_eoi_irq(struct irq_data *d) +{ + struct mst_intc_chip_data *cd = irq_data_get_irq_chip_data(d); + + if (!cd->no_eoi) + mst_set_irq(d, INTC_EOI); + + irq_chip_eoi_parent(d); +} + +static int mst_irq_chip_set_type(struct irq_data *data, unsigned int type) +{ + switch (type) { + case IRQ_TYPE_LEVEL_LOW: + case IRQ_TYPE_EDGE_FALLING: + mst_set_irq(data, INTC_REV_POLARITY); + break; + case IRQ_TYPE_LEVEL_HIGH: + case IRQ_TYPE_EDGE_RISING: + mst_clear_irq(data, INTC_REV_POLARITY); + break; + default: + return -EINVAL; + } + + return irq_chip_set_type_parent(data, IRQ_TYPE_LEVEL_HIGH); +} + +static struct irq_chip mst_intc_chip = { + .name = "mst-intc", + .irq_mask = mst_intc_mask_irq, + .irq_unmask = mst_intc_unmask_irq, + .irq_eoi = mst_intc_eoi_irq, + .irq_get_irqchip_state = irq_chip_get_parent_state, + .irq_set_irqchip_state = irq_chip_set_parent_state, + .irq_set_affinity = irq_chip_set_affinity_parent, + .irq_set_vcpu_affinity = irq_chip_set_vcpu_affinity_parent, + .irq_set_type = mst_irq_chip_set_type, + .irq_retrigger = irq_chip_retrigger_hierarchy, + .flags = IRQCHIP_SET_TYPE_MASKED | + IRQCHIP_SKIP_SET_WAKE | + IRQCHIP_MASK_ON_SUSPEND, +}; + +#ifdef CONFIG_PM_SLEEP +static void mst_intc_polarity_save(struct mst_intc_chip_data *cd) +{ + int i; + void __iomem *addr = cd->base + INTC_REV_POLARITY; + + for (i = 0; i < DIV_ROUND_UP(cd->nr_irqs, 16); i++) + cd->saved_polarity_conf[i] = readw_relaxed(addr + i * 4); +} + +static void mst_intc_polarity_restore(struct mst_intc_chip_data *cd) +{ + int i; + void __iomem *addr = cd->base + INTC_REV_POLARITY; + + for (i = 0; i < DIV_ROUND_UP(cd->nr_irqs, 16); i++) + writew_relaxed(cd->saved_polarity_conf[i], addr + i * 4); +} + +static void mst_irq_resume(void *data) +{ + struct mst_intc_chip_data *cd; + + list_for_each_entry(cd, &mst_intc_list, entry) + mst_intc_polarity_restore(cd); +} + +static int mst_irq_suspend(void *data) +{ + struct mst_intc_chip_data *cd; + + list_for_each_entry(cd, &mst_intc_list, entry) + mst_intc_polarity_save(cd); + return 0; +} + +static const struct syscore_ops mst_irq_syscore_ops = { + .suspend = mst_irq_suspend, + .resume = mst_irq_resume, +}; + +static struct syscore mst_irq_syscore = { + .ops = &mst_irq_syscore_ops, +}; + +static int __init mst_irq_pm_init(void) +{ + register_syscore(&mst_irq_syscore); + return 0; +} +late_initcall(mst_irq_pm_init); +#endif + +static int mst_intc_domain_translate(struct irq_domain *d, + struct irq_fwspec *fwspec, + unsigned long *hwirq, + unsigned int *type) +{ + struct mst_intc_chip_data *cd = d->host_data; + + if (is_of_node(fwspec->fwnode)) { + if (fwspec->param_count != 3) + return -EINVAL; + + /* No PPI should point to this domain */ + if (fwspec->param[0] != 0) + return -EINVAL; + + if (fwspec->param[1] >= cd->nr_irqs) + return -EINVAL; + + *hwirq = fwspec->param[1]; + *type = fwspec->param[2] & IRQ_TYPE_SENSE_MASK; + return 0; + } + + return -EINVAL; +} + +static int mst_intc_domain_alloc(struct irq_domain *domain, unsigned int virq, + unsigned int nr_irqs, void *data) +{ + int i; + irq_hw_number_t hwirq; + struct irq_fwspec parent_fwspec, *fwspec = data; + struct mst_intc_chip_data *cd = domain->host_data; + + /* Not GIC compliant */ + if (fwspec->param_count != 3) + return -EINVAL; + + /* No PPI should point to this domain */ + if (fwspec->param[0]) + return -EINVAL; + + hwirq = fwspec->param[1]; + for (i = 0; i < nr_irqs; i++) + irq_domain_set_hwirq_and_chip(domain, virq + i, hwirq + i, + &mst_intc_chip, + domain->host_data); + + parent_fwspec = *fwspec; + parent_fwspec.fwnode = domain->parent->fwnode; + parent_fwspec.param[1] = cd->irq_start + hwirq; + + /* + * mst-intc latch the interrupt request if it's edge triggered, + * so the output signal to parent GIC is always level sensitive. + * And if the irq signal is active low, configure it to active high + * to meet GIC SPI spec in mst_irq_chip_set_type via REV_POLARITY bit. + */ + parent_fwspec.param[2] = IRQ_TYPE_LEVEL_HIGH; + + return irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, &parent_fwspec); +} + +static const struct irq_domain_ops mst_intc_domain_ops = { + .translate = mst_intc_domain_translate, + .alloc = mst_intc_domain_alloc, + .free = irq_domain_free_irqs_common, +}; + +static int __init mst_intc_of_init(struct device_node *dn, + struct device_node *parent) +{ + struct irq_domain *domain, *domain_parent; + struct mst_intc_chip_data *cd; + u32 irq_start, irq_end; + + domain_parent = irq_find_host(parent); + if (!domain_parent) { + pr_err("mst-intc: interrupt-parent not found\n"); + return -EINVAL; + } + + if (of_property_read_u32_index(dn, "mstar,irqs-map-range", 0, &irq_start) || + of_property_read_u32_index(dn, "mstar,irqs-map-range", 1, &irq_end)) + return -EINVAL; + + cd = kzalloc(sizeof(*cd), GFP_KERNEL); + if (!cd) + return -ENOMEM; + + cd->base = of_iomap(dn, 0); + if (!cd->base) { + kfree(cd); + return -ENOMEM; + } + + cd->no_eoi = of_property_read_bool(dn, "mstar,intc-no-eoi"); + raw_spin_lock_init(&cd->lock); + cd->irq_start = irq_start; + cd->nr_irqs = irq_end - irq_start + 1; + domain = irq_domain_create_hierarchy(domain_parent, 0, cd->nr_irqs, of_fwnode_handle(dn), + &mst_intc_domain_ops, cd); + if (!domain) { + iounmap(cd->base); + kfree(cd); + return -ENOMEM; + } + +#ifdef CONFIG_PM_SLEEP + INIT_LIST_HEAD(&cd->entry); + list_add_tail(&cd->entry, &mst_intc_list); +#endif + return 0; +} + +IRQCHIP_DECLARE(mst_intc, "mstar,mst-intc", mst_intc_of_init); diff --git a/drivers/irqchip/irq-mtk-cirq.c b/drivers/irqchip/irq-mtk-cirq.c index 18c65c16de28..9571f622774e 100644 --- a/drivers/irqchip/irq-mtk-cirq.c +++ b/drivers/irqchip/irq-mtk-cirq.c @@ -1,15 +1,7 @@ +// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (c) 2016 MediaTek Inc. * Author: Youlin.Pei <youlin.pei@mediatek.com> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. */ #include <linux/interrupt.h> @@ -23,14 +15,41 @@ #include <linux/slab.h> #include <linux/syscore_ops.h> -#define CIRQ_ACK 0x40 -#define CIRQ_MASK_SET 0xc0 -#define CIRQ_MASK_CLR 0x100 -#define CIRQ_SENS_SET 0x180 -#define CIRQ_SENS_CLR 0x1c0 -#define CIRQ_POL_SET 0x240 -#define CIRQ_POL_CLR 0x280 -#define CIRQ_CONTROL 0x300 +enum mtk_cirq_regoffs_index { + CIRQ_STA, + CIRQ_ACK, + CIRQ_MASK_SET, + CIRQ_MASK_CLR, + CIRQ_SENS_SET, + CIRQ_SENS_CLR, + CIRQ_POL_SET, + CIRQ_POL_CLR, + CIRQ_CONTROL +}; + +static const u32 mtk_cirq_regoffs_v1[] = { + [CIRQ_STA] = 0x0, + [CIRQ_ACK] = 0x40, + [CIRQ_MASK_SET] = 0xc0, + [CIRQ_MASK_CLR] = 0x100, + [CIRQ_SENS_SET] = 0x180, + [CIRQ_SENS_CLR] = 0x1c0, + [CIRQ_POL_SET] = 0x240, + [CIRQ_POL_CLR] = 0x280, + [CIRQ_CONTROL] = 0x300, +}; + +static const u32 mtk_cirq_regoffs_v2[] = { + [CIRQ_STA] = 0x0, + [CIRQ_ACK] = 0x80, + [CIRQ_MASK_SET] = 0x180, + [CIRQ_MASK_CLR] = 0x200, + [CIRQ_SENS_SET] = 0x300, + [CIRQ_SENS_CLR] = 0x380, + [CIRQ_POL_SET] = 0x480, + [CIRQ_POL_CLR] = 0x500, + [CIRQ_CONTROL] = 0x600, +}; #define CIRQ_EN 0x1 #define CIRQ_EDGE 0x2 @@ -40,18 +59,32 @@ struct mtk_cirq_chip_data { void __iomem *base; unsigned int ext_irq_start; unsigned int ext_irq_end; + const u32 *offsets; struct irq_domain *domain; }; static struct mtk_cirq_chip_data *cirq_data; -static void mtk_cirq_write_mask(struct irq_data *data, unsigned int offset) +static void __iomem *mtk_cirq_reg(struct mtk_cirq_chip_data *chip_data, + enum mtk_cirq_regoffs_index idx) +{ + return chip_data->base + chip_data->offsets[idx]; +} + +static void __iomem *mtk_cirq_irq_reg(struct mtk_cirq_chip_data *chip_data, + enum mtk_cirq_regoffs_index idx, + unsigned int cirq_num) +{ + return mtk_cirq_reg(chip_data, idx) + (cirq_num / 32) * 4; +} + +static void mtk_cirq_write_mask(struct irq_data *data, enum mtk_cirq_regoffs_index idx) { struct mtk_cirq_chip_data *chip_data = data->chip_data; unsigned int cirq_num = data->hwirq; u32 mask = 1 << (cirq_num % 32); - writel_relaxed(mask, chip_data->base + offset + (cirq_num / 32) * 4); + writel_relaxed(mask, mtk_cirq_irq_reg(chip_data, idx, cirq_num)); } static void mtk_cirq_mask(struct irq_data *data) @@ -166,8 +199,9 @@ static const struct irq_domain_ops cirq_domain_ops = { }; #ifdef CONFIG_PM_SLEEP -static int mtk_cirq_suspend(void) +static int mtk_cirq_suspend(void *data) { + void __iomem *reg; u32 value, mask; unsigned int irq, hwirq_num; bool pending, masked; @@ -208,50 +242,66 @@ static int mtk_cirq_suspend(void) continue; } + reg = mtk_cirq_irq_reg(cirq_data, CIRQ_ACK, i); mask = 1 << (i % 32); - writel_relaxed(mask, cirq_data->base + CIRQ_ACK + (i / 32) * 4); + writel_relaxed(mask, reg); } /* set edge_only mode, record edge-triggerd interrupts */ /* enable cirq */ - value = readl_relaxed(cirq_data->base + CIRQ_CONTROL); + reg = mtk_cirq_reg(cirq_data, CIRQ_CONTROL); + value = readl_relaxed(reg); value |= (CIRQ_EDGE | CIRQ_EN); - writel_relaxed(value, cirq_data->base + CIRQ_CONTROL); + writel_relaxed(value, reg); return 0; } -static void mtk_cirq_resume(void) +static void mtk_cirq_resume(void *data) { + void __iomem *reg = mtk_cirq_reg(cirq_data, CIRQ_CONTROL); u32 value; - /* flush recored interrupts, will send signals to parent controller */ - value = readl_relaxed(cirq_data->base + CIRQ_CONTROL); - writel_relaxed(value | CIRQ_FLUSH, cirq_data->base + CIRQ_CONTROL); + /* flush recorded interrupts, will send signals to parent controller */ + value = readl_relaxed(reg); + writel_relaxed(value | CIRQ_FLUSH, reg); /* disable cirq */ - value = readl_relaxed(cirq_data->base + CIRQ_CONTROL); + value = readl_relaxed(reg); value &= ~(CIRQ_EDGE | CIRQ_EN); - writel_relaxed(value, cirq_data->base + CIRQ_CONTROL); + writel_relaxed(value, reg); } -static struct syscore_ops mtk_cirq_syscore_ops = { +static const struct syscore_ops mtk_cirq_syscore_ops = { .suspend = mtk_cirq_suspend, .resume = mtk_cirq_resume, }; +static struct syscore mtk_cirq_syscore = { + .ops = &mtk_cirq_syscore_ops, +}; + static void mtk_cirq_syscore_init(void) { - register_syscore_ops(&mtk_cirq_syscore_ops); + register_syscore(&mtk_cirq_syscore); } #else static inline void mtk_cirq_syscore_init(void) {} #endif +static const struct of_device_id mtk_cirq_of_match[] = { + { .compatible = "mediatek,mt2701-cirq", .data = &mtk_cirq_regoffs_v1 }, + { .compatible = "mediatek,mt8135-cirq", .data = &mtk_cirq_regoffs_v1 }, + { .compatible = "mediatek,mt8173-cirq", .data = &mtk_cirq_regoffs_v1 }, + { .compatible = "mediatek,mt8192-cirq", .data = &mtk_cirq_regoffs_v2 }, + { /* sentinel */ } +}; + static int __init mtk_cirq_of_init(struct device_node *node, struct device_node *parent) { struct irq_domain *domain, *domain_parent; + const struct of_device_id *match; unsigned int irq_num; int ret; @@ -282,10 +332,16 @@ static int __init mtk_cirq_of_init(struct device_node *node, if (ret) goto out_unmap; + match = of_match_node(mtk_cirq_of_match, node); + if (!match) { + ret = -ENODEV; + goto out_unmap; + } + cirq_data->offsets = match->data; + irq_num = cirq_data->ext_irq_end - cirq_data->ext_irq_start + 1; - domain = irq_domain_add_hierarchy(domain_parent, 0, - irq_num, node, - &cirq_domain_ops, cirq_data); + domain = irq_domain_create_hierarchy(domain_parent, 0, irq_num, of_fwnode_handle(node), + &cirq_domain_ops, cirq_data); if (!domain) { ret = -ENOMEM; goto out_unmap; diff --git a/drivers/irqchip/irq-mtk-sysirq.c b/drivers/irqchip/irq-mtk-sysirq.c index 90aaf190157f..6895e7096b27 100644 --- a/drivers/irqchip/irq-mtk-sysirq.c +++ b/drivers/irqchip/irq-mtk-sysirq.c @@ -1,15 +1,7 @@ +// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (c) 2014 MediaTek Inc. * Author: Joe.C <yingjoe.chen@mediatek.com> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. */ #include <linux/irq.h> @@ -23,7 +15,7 @@ #include <linux/spinlock.h> struct mtk_sysirq_chip_data { - spinlock_t lock; + raw_spinlock_t lock; u32 nr_intpol_bases; void __iomem **intpol_bases; u32 *intpol_words; @@ -45,7 +37,7 @@ static int mtk_sysirq_set_type(struct irq_data *data, unsigned int type) reg_index = chip_data->which_word[hwirq]; offset = hwirq & 0x1f; - spin_lock_irqsave(&chip_data->lock, flags); + raw_spin_lock_irqsave(&chip_data->lock, flags); value = readl_relaxed(base + reg_index * 4); if (type == IRQ_TYPE_LEVEL_LOW || type == IRQ_TYPE_EDGE_FALLING) { if (type == IRQ_TYPE_LEVEL_LOW) @@ -61,7 +53,7 @@ static int mtk_sysirq_set_type(struct irq_data *data, unsigned int type) data = data->parent_data; ret = data->chip->irq_set_type(data, type); - spin_unlock_irqrestore(&chip_data->lock, flags); + raw_spin_unlock_irqrestore(&chip_data->lock, flags); return ret; } @@ -73,6 +65,7 @@ static struct irq_chip mtk_sysirq_chip = { .irq_set_type = mtk_sysirq_set_type, .irq_retrigger = irq_chip_retrigger_hierarchy, .irq_set_affinity = irq_chip_set_affinity_parent, + .flags = IRQCHIP_SKIP_SET_WAKE, }; static int mtk_sysirq_domain_translate(struct irq_domain *d, @@ -214,13 +207,13 @@ static int __init mtk_sysirq_of_init(struct device_node *node, chip_data->which_word[i] = word; } - domain = irq_domain_add_hierarchy(domain_parent, 0, intpol_num, node, - &sysirq_domain_ops, chip_data); + domain = irq_domain_create_hierarchy(domain_parent, 0, intpol_num, of_fwnode_handle(node), + &sysirq_domain_ops, chip_data); if (!domain) { ret = -ENOMEM; goto out_free_which_word; } - spin_lock_init(&chip_data->lock); + raw_spin_lock_init(&chip_data->lock); return 0; diff --git a/drivers/irqchip/irq-mvebu-gicp.c b/drivers/irqchip/irq-mvebu-gicp.c index 3be5c5dba1da..667bde3c651f 100644 --- a/drivers/irqchip/irq-mvebu-gicp.c +++ b/drivers/irqchip/irq-mvebu-gicp.c @@ -17,6 +17,8 @@ #include <linux/of_platform.h> #include <linux/platform_device.h> +#include <linux/irqchip/irq-msi-lib.h> + #include <dt-bindings/interrupt-controller/arm-gic.h> #define GICP_SETSPI_NSR_OFFSET 0x0 @@ -145,32 +147,37 @@ static void gicp_irq_domain_free(struct irq_domain *domain, } static const struct irq_domain_ops gicp_domain_ops = { + .select = msi_lib_irq_domain_select, .alloc = gicp_irq_domain_alloc, .free = gicp_irq_domain_free, }; -static struct irq_chip gicp_msi_irq_chip = { - .name = "GICP", - .irq_set_type = irq_chip_set_type_parent, - .flags = IRQCHIP_SUPPORTS_LEVEL_MSI, -}; +#define GICP_MSI_FLAGS_REQUIRED (MSI_FLAG_USE_DEF_DOM_OPS | \ + MSI_FLAG_USE_DEF_CHIP_OPS) -static struct msi_domain_ops gicp_msi_ops = { -}; +#define GICP_MSI_FLAGS_SUPPORTED (MSI_GENERIC_FLAGS_MASK | \ + MSI_FLAG_LEVEL_CAPABLE) -static struct msi_domain_info gicp_msi_domain_info = { - .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | - MSI_FLAG_LEVEL_CAPABLE), - .ops = &gicp_msi_ops, - .chip = &gicp_msi_irq_chip, +static const struct msi_parent_ops gicp_msi_parent_ops = { + .supported_flags = GICP_MSI_FLAGS_SUPPORTED, + .required_flags = GICP_MSI_FLAGS_REQUIRED, + .chip_flags = MSI_CHIP_FLAG_SET_EOI, + .bus_select_token = DOMAIN_BUS_GENERIC_MSI, + .bus_select_mask = MATCH_PLATFORM_MSI, + .prefix = "GICP-", + .init_dev_msi_info = msi_lib_init_dev_msi_info, }; static int mvebu_gicp_probe(struct platform_device *pdev) { - struct mvebu_gicp *gicp; - struct irq_domain *inner_domain, *plat_domain, *parent_domain; struct device_node *node = pdev->dev.of_node; struct device_node *irq_parent_dn; + struct irq_domain_info info = { + .fwnode = of_fwnode_handle(node), + .ops = &gicp_domain_ops, + }; + struct mvebu_gicp *gicp; + void __iomem *base; int ret, i; gicp = devm_kzalloc(&pdev->dev, sizeof(*gicp), GFP_KERNEL); @@ -210,43 +217,36 @@ static int mvebu_gicp_probe(struct platform_device *pdev) gicp->spi_cnt += gicp->spi_ranges[i].count; } - gicp->spi_bitmap = devm_kcalloc(&pdev->dev, - BITS_TO_LONGS(gicp->spi_cnt), sizeof(long), - GFP_KERNEL); + gicp->spi_bitmap = devm_bitmap_zalloc(&pdev->dev, gicp->spi_cnt, GFP_KERNEL); if (!gicp->spi_bitmap) return -ENOMEM; + info.size = gicp->spi_cnt; + info.host_data = gicp; + irq_parent_dn = of_irq_find_parent(node); if (!irq_parent_dn) { dev_err(&pdev->dev, "failed to find parent IRQ node\n"); return -ENODEV; } - parent_domain = irq_find_host(irq_parent_dn); - if (!parent_domain) { + info.parent = irq_find_host(irq_parent_dn); + of_node_put(irq_parent_dn); + if (!info.parent) { dev_err(&pdev->dev, "failed to find parent IRQ domain\n"); return -ENODEV; } - inner_domain = irq_domain_create_hierarchy(parent_domain, 0, - gicp->spi_cnt, - of_node_to_fwnode(node), - &gicp_domain_ops, gicp); - if (!inner_domain) - return -ENOMEM; - - - plat_domain = platform_msi_create_irq_domain(of_node_to_fwnode(node), - &gicp_msi_domain_info, - inner_domain); - if (!plat_domain) { - irq_domain_remove(inner_domain); - return -ENOMEM; + base = ioremap(gicp->res->start, resource_size(gicp->res)); + if (!base) { + dev_err(&pdev->dev, "ioremap() failed. Unable to clear pending interrupts.\n"); + } else { + for (i = 0; i < 64; i++) + writel(i, base + GICP_CLRSPI_NSR_OFFSET); + iounmap(base); } - platform_set_drvdata(pdev, gicp); - - return 0; + return msi_create_parent_irq_domain(&info, &gicp_msi_parent_ops) ? 0 : -ENOMEM; } static const struct of_device_id mvebu_gicp_of_match[] = { diff --git a/drivers/irqchip/irq-mvebu-icu.c b/drivers/irqchip/irq-mvebu-icu.c index 547045d89c4b..db5dbc6e88b0 100644 --- a/drivers/irqchip/irq-mvebu-icu.c +++ b/drivers/irqchip/irq-mvebu-icu.c @@ -20,6 +20,8 @@ #include <linux/of_platform.h> #include <linux/platform_device.h> +#include <linux/irqchip/irq-msi-lib.h> + #include <dt-bindings/interrupt-controller/mvebu-icu.h> /* ICU registers */ @@ -60,100 +62,15 @@ struct mvebu_icu_msi_data { const struct mvebu_icu_subset_data *subset_data; }; -struct mvebu_icu_irq_data { - struct mvebu_icu *icu; - unsigned int icu_group; - unsigned int type; -}; - -DEFINE_STATIC_KEY_FALSE(legacy_bindings); - -static void mvebu_icu_init(struct mvebu_icu *icu, - struct mvebu_icu_msi_data *msi_data, - struct msi_msg *msg) -{ - const struct mvebu_icu_subset_data *subset = msi_data->subset_data; - - if (atomic_cmpxchg(&msi_data->initialized, false, true)) - return; - - /* Set 'SET' ICU SPI message address in AP */ - writel_relaxed(msg[0].address_hi, icu->base + subset->offset_set_ah); - writel_relaxed(msg[0].address_lo, icu->base + subset->offset_set_al); - - if (subset->icu_group != ICU_GRP_NSR) - return; - - /* Set 'CLEAR' ICU SPI message address in AP (level-MSI only) */ - writel_relaxed(msg[1].address_hi, icu->base + subset->offset_clr_ah); - writel_relaxed(msg[1].address_lo, icu->base + subset->offset_clr_al); -} - -static void mvebu_icu_write_msg(struct msi_desc *desc, struct msi_msg *msg) -{ - struct irq_data *d = irq_get_irq_data(desc->irq); - struct mvebu_icu_msi_data *msi_data = platform_msi_get_host_data(d->domain); - struct mvebu_icu_irq_data *icu_irqd = d->chip_data; - struct mvebu_icu *icu = icu_irqd->icu; - unsigned int icu_int; - - if (msg->address_lo || msg->address_hi) { - /* One off initialization per domain */ - mvebu_icu_init(icu, msi_data, msg); - /* Configure the ICU with irq number & type */ - icu_int = msg->data | ICU_INT_ENABLE; - if (icu_irqd->type & IRQ_TYPE_EDGE_RISING) - icu_int |= ICU_IS_EDGE; - icu_int |= icu_irqd->icu_group << ICU_GROUP_SHIFT; - } else { - /* De-configure the ICU */ - icu_int = 0; - } - - writel_relaxed(icu_int, icu->base + ICU_INT_CFG(d->hwirq)); - - /* - * The SATA unit has 2 ports, and a dedicated ICU entry per - * port. The ahci sata driver supports only one irq interrupt - * per SATA unit. To solve this conflict, we configure the 2 - * SATA wired interrupts in the south bridge into 1 GIC - * interrupt in the north bridge. Even if only a single port - * is enabled, if sata node is enabled, both interrupts are - * configured (regardless of which port is actually in use). - */ - if (d->hwirq == ICU_SATA0_ICU_ID || d->hwirq == ICU_SATA1_ICU_ID) { - writel_relaxed(icu_int, - icu->base + ICU_INT_CFG(ICU_SATA0_ICU_ID)); - writel_relaxed(icu_int, - icu->base + ICU_INT_CFG(ICU_SATA1_ICU_ID)); - } -} - -static struct irq_chip mvebu_icu_nsr_chip = { - .name = "ICU-NSR", - .irq_mask = irq_chip_mask_parent, - .irq_unmask = irq_chip_unmask_parent, - .irq_eoi = irq_chip_eoi_parent, - .irq_set_type = irq_chip_set_type_parent, - .irq_set_affinity = irq_chip_set_affinity_parent, -}; - -static struct irq_chip mvebu_icu_sei_chip = { - .name = "ICU-SEI", - .irq_ack = irq_chip_ack_parent, - .irq_mask = irq_chip_mask_parent, - .irq_unmask = irq_chip_unmask_parent, - .irq_set_type = irq_chip_set_type_parent, - .irq_set_affinity = irq_chip_set_affinity_parent, -}; +static DEFINE_STATIC_KEY_FALSE(legacy_bindings); -static int -mvebu_icu_irq_domain_translate(struct irq_domain *d, struct irq_fwspec *fwspec, +static int mvebu_icu_translate(struct irq_domain *d, struct irq_fwspec *fwspec, unsigned long *hwirq, unsigned int *type) { - struct mvebu_icu_msi_data *msi_data = platform_msi_get_host_data(d); - struct mvebu_icu *icu = platform_msi_get_host_data(d); unsigned int param_count = static_branch_unlikely(&legacy_bindings) ? 3 : 2; + struct msi_domain_info *info = d->host_data; + struct mvebu_icu_msi_data *msi_data = info->chip_data; + struct mvebu_icu *icu = msi_data->icu; /* Check the count of the parameters in dt */ if (WARN_ON(fwspec->param_count != param_count)) { @@ -192,81 +109,126 @@ mvebu_icu_irq_domain_translate(struct irq_domain *d, struct irq_fwspec *fwspec, return 0; } -static int -mvebu_icu_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, - unsigned int nr_irqs, void *args) +static void mvebu_icu_init(struct mvebu_icu *icu, + struct mvebu_icu_msi_data *msi_data, + struct msi_msg *msg) { - int err; - unsigned long hwirq; - struct irq_fwspec *fwspec = args; - struct mvebu_icu_msi_data *msi_data = platform_msi_get_host_data(domain); - struct mvebu_icu *icu = msi_data->icu; - struct mvebu_icu_irq_data *icu_irqd; - struct irq_chip *chip = &mvebu_icu_nsr_chip; + const struct mvebu_icu_subset_data *subset = msi_data->subset_data; - icu_irqd = kmalloc(sizeof(*icu_irqd), GFP_KERNEL); - if (!icu_irqd) - return -ENOMEM; + if (atomic_cmpxchg(&msi_data->initialized, false, true)) + return; - err = mvebu_icu_irq_domain_translate(domain, fwspec, &hwirq, - &icu_irqd->type); - if (err) { - dev_err(icu->dev, "failed to translate ICU parameters\n"); - goto free_irqd; - } + /* Set 'SET' ICU SPI message address in AP */ + writel_relaxed(msg[0].address_hi, icu->base + subset->offset_set_ah); + writel_relaxed(msg[0].address_lo, icu->base + subset->offset_set_al); - if (static_branch_unlikely(&legacy_bindings)) - icu_irqd->icu_group = fwspec->param[0]; - else - icu_irqd->icu_group = msi_data->subset_data->icu_group; - icu_irqd->icu = icu; + if (subset->icu_group != ICU_GRP_NSR) + return; - err = platform_msi_domain_alloc(domain, virq, nr_irqs); - if (err) { - dev_err(icu->dev, "failed to allocate ICU interrupt in parent domain\n"); - goto free_irqd; - } + /* Set 'CLEAR' ICU SPI message address in AP (level-MSI only) */ + writel_relaxed(msg[1].address_hi, icu->base + subset->offset_clr_ah); + writel_relaxed(msg[1].address_lo, icu->base + subset->offset_clr_al); +} - /* Make sure there is no interrupt left pending by the firmware */ - err = irq_set_irqchip_state(virq, IRQCHIP_STATE_PENDING, false); - if (err) - goto free_msi; +static int mvebu_icu_msi_init(struct irq_domain *domain, struct msi_domain_info *info, + unsigned int virq, irq_hw_number_t hwirq, msi_alloc_info_t *arg) +{ + irq_domain_set_hwirq_and_chip(domain, virq, hwirq, info->chip, info->chip_data); + return irq_set_irqchip_state(virq, IRQCHIP_STATE_PENDING, false); +} - if (icu_irqd->icu_group == ICU_GRP_SEI) - chip = &mvebu_icu_sei_chip; +static void mvebu_icu_set_desc(msi_alloc_info_t *arg, struct msi_desc *desc) +{ + arg->desc = desc; + arg->hwirq = (u32)desc->data.icookie.value; +} - err = irq_domain_set_hwirq_and_chip(domain, virq, hwirq, - chip, icu_irqd); - if (err) { - dev_err(icu->dev, "failed to set the data to IRQ domain\n"); - goto free_msi; +static void mvebu_icu_write_msi_msg(struct irq_data *d, struct msi_msg *msg) +{ + struct mvebu_icu_msi_data *msi_data = d->chip_data; + unsigned int icu_group = msi_data->subset_data->icu_group; + struct msi_desc *desc = irq_data_get_msi_desc(d); + struct mvebu_icu *icu = msi_data->icu; + unsigned int type; + u32 icu_int; + + if (msg->address_lo || msg->address_hi) { + /* One off initialization per domain */ + mvebu_icu_init(icu, msi_data, msg); + /* Configure the ICU with irq number & type */ + icu_int = msg->data | ICU_INT_ENABLE; + type = (unsigned int)(desc->data.icookie.value >> 32); + if (type & IRQ_TYPE_EDGE_RISING) + icu_int |= ICU_IS_EDGE; + icu_int |= icu_group << ICU_GROUP_SHIFT; + } else { + /* De-configure the ICU */ + icu_int = 0; } - return 0; + writel_relaxed(icu_int, icu->base + ICU_INT_CFG(d->hwirq)); -free_msi: - platform_msi_domain_free(domain, virq, nr_irqs); -free_irqd: - kfree(icu_irqd); - return err; + /* + * The SATA unit has 2 ports, and a dedicated ICU entry per + * port. The ahci sata driver supports only one irq interrupt + * per SATA unit. To solve this conflict, we configure the 2 + * SATA wired interrupts in the south bridge into 1 GIC + * interrupt in the north bridge. Even if only a single port + * is enabled, if sata node is enabled, both interrupts are + * configured (regardless of which port is actually in use). + */ + if (d->hwirq == ICU_SATA0_ICU_ID || d->hwirq == ICU_SATA1_ICU_ID) { + writel_relaxed(icu_int, icu->base + ICU_INT_CFG(ICU_SATA0_ICU_ID)); + writel_relaxed(icu_int, icu->base + ICU_INT_CFG(ICU_SATA1_ICU_ID)); + } } -static void -mvebu_icu_irq_domain_free(struct irq_domain *domain, unsigned int virq, - unsigned int nr_irqs) -{ - struct irq_data *d = irq_get_irq_data(virq); - struct mvebu_icu_irq_data *icu_irqd = d->chip_data; +static const struct msi_domain_template mvebu_icu_nsr_msi_template = { + .chip = { + .name = "ICU-NSR", + .irq_mask = irq_chip_mask_parent, + .irq_unmask = irq_chip_unmask_parent, + .irq_eoi = irq_chip_eoi_parent, + .irq_set_type = irq_chip_set_type_parent, + .irq_write_msi_msg = mvebu_icu_write_msi_msg, + .flags = IRQCHIP_SUPPORTS_LEVEL_MSI, + }, - kfree(icu_irqd); + .ops = { + .msi_translate = mvebu_icu_translate, + .msi_init = mvebu_icu_msi_init, + .set_desc = mvebu_icu_set_desc, + }, - platform_msi_domain_free(domain, virq, nr_irqs); -} + .info = { + .bus_token = DOMAIN_BUS_WIRED_TO_MSI, + .flags = MSI_FLAG_LEVEL_CAPABLE | + MSI_FLAG_USE_DEV_FWNODE, + }, +}; -static const struct irq_domain_ops mvebu_icu_domain_ops = { - .translate = mvebu_icu_irq_domain_translate, - .alloc = mvebu_icu_irq_domain_alloc, - .free = mvebu_icu_irq_domain_free, +static const struct msi_domain_template mvebu_icu_sei_msi_template = { + .chip = { + .name = "ICU-SEI", + .irq_mask = irq_chip_mask_parent, + .irq_unmask = irq_chip_unmask_parent, + .irq_ack = irq_chip_ack_parent, + .irq_set_type = irq_chip_set_type_parent, + .irq_write_msi_msg = mvebu_icu_write_msi_msg, + .flags = IRQCHIP_SUPPORTS_LEVEL_MSI, + }, + + .ops = { + .msi_translate = mvebu_icu_translate, + .msi_init = mvebu_icu_msi_init, + .set_desc = mvebu_icu_set_desc, + }, + + .info = { + .bus_token = DOMAIN_BUS_WIRED_TO_MSI, + .flags = MSI_FLAG_LEVEL_CAPABLE | + MSI_FLAG_USE_DEV_FWNODE, + }, }; static const struct mvebu_icu_subset_data mvebu_icu_nsr_subset_data = { @@ -297,10 +259,10 @@ static const struct of_device_id mvebu_icu_subset_of_match[] = { static int mvebu_icu_subset_probe(struct platform_device *pdev) { + const struct msi_domain_template *tmpl; struct mvebu_icu_msi_data *msi_data; - struct device_node *msi_parent_dn; struct device *dev = &pdev->dev; - struct irq_domain *irq_domain; + bool sei; msi_data = devm_kzalloc(dev, sizeof(*msi_data), GFP_KERNEL); if (!msi_data) @@ -314,20 +276,18 @@ static int mvebu_icu_subset_probe(struct platform_device *pdev) msi_data->subset_data = of_device_get_match_data(dev); } - dev->msi_domain = of_msi_get_domain(dev, dev->of_node, - DOMAIN_BUS_PLATFORM_MSI); - if (!dev->msi_domain) + dev->msi.domain = of_msi_get_domain(dev, dev->of_node, DOMAIN_BUS_PLATFORM_MSI); + if (!dev->msi.domain) return -EPROBE_DEFER; - msi_parent_dn = irq_domain_get_of_node(dev->msi_domain); - if (!msi_parent_dn) + if (!irq_domain_get_of_node(dev->msi.domain)) return -ENODEV; - irq_domain = platform_msi_create_device_tree_domain(dev, ICU_MAX_IRQS, - mvebu_icu_write_msg, - &mvebu_icu_domain_ops, - msi_data); - if (!irq_domain) { + sei = msi_data->subset_data->icu_group == ICU_GRP_SEI; + tmpl = sei ? &mvebu_icu_sei_msi_template : &mvebu_icu_nsr_msi_template; + + if (!msi_create_device_irq_domain(dev, MSI_DEFAULT_DOMAIN, tmpl, + ICU_MAX_IRQS, NULL, msi_data)) { dev_err(dev, "Failed to create ICU MSI domain\n"); return -ENOMEM; } @@ -347,7 +307,6 @@ builtin_platform_driver(mvebu_icu_subset_driver); static int mvebu_icu_probe(struct platform_device *pdev) { struct mvebu_icu *icu; - struct resource *res; int i; icu = devm_kzalloc(&pdev->dev, sizeof(struct mvebu_icu), @@ -357,12 +316,9 @@ static int mvebu_icu_probe(struct platform_device *pdev) icu->dev = &pdev->dev; - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - icu->base = devm_ioremap_resource(&pdev->dev, res); - if (IS_ERR(icu->base)) { - dev_err(&pdev->dev, "Failed to map icu base address.\n"); + icu->base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(icu->base)) return PTR_ERR(icu->base); - } /* * Legacy bindings: ICU is one node with one MSI parent: force manually diff --git a/drivers/irqchip/irq-mvebu-odmi.c b/drivers/irqchip/irq-mvebu-odmi.c index b4d367868dbb..e5b2bde3d933 100644 --- a/drivers/irqchip/irq-mvebu-odmi.c +++ b/drivers/irqchip/irq-mvebu-odmi.c @@ -17,6 +17,9 @@ #include <linux/msi.h> #include <linux/of_address.h> #include <linux/slab.h> + +#include <linux/irqchip/irq-msi-lib.h> + #include <dt-bindings/interrupt-controller/arm-gic.h> #define GICP_ODMIN_SET 0x40 @@ -141,27 +144,35 @@ static void odmi_irq_domain_free(struct irq_domain *domain, } static const struct irq_domain_ops odmi_domain_ops = { + .select = msi_lib_irq_domain_select, .alloc = odmi_irq_domain_alloc, .free = odmi_irq_domain_free, }; -static struct irq_chip odmi_msi_irq_chip = { - .name = "ODMI", -}; +#define ODMI_MSI_FLAGS_REQUIRED (MSI_FLAG_USE_DEF_DOM_OPS | \ + MSI_FLAG_USE_DEF_CHIP_OPS) -static struct msi_domain_ops odmi_msi_ops = { -}; +#define ODMI_MSI_FLAGS_SUPPORTED (MSI_GENERIC_FLAGS_MASK) -static struct msi_domain_info odmi_msi_domain_info = { - .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS), - .ops = &odmi_msi_ops, - .chip = &odmi_msi_irq_chip, +static const struct msi_parent_ops odmi_msi_parent_ops = { + .supported_flags = ODMI_MSI_FLAGS_SUPPORTED, + .required_flags = ODMI_MSI_FLAGS_REQUIRED, + .chip_flags = MSI_CHIP_FLAG_SET_EOI, + .bus_select_token = DOMAIN_BUS_GENERIC_MSI, + .bus_select_mask = MATCH_PLATFORM_MSI, + .prefix = "ODMI-", + .init_dev_msi_info = msi_lib_init_dev_msi_info, }; static int __init mvebu_odmi_init(struct device_node *node, struct device_node *parent) { - struct irq_domain *inner_domain, *plat_domain; + struct irq_domain_info info = { + .fwnode = of_fwnode_handle(node), + .ops = &odmi_domain_ops, + .size = odmis_count * NODMIS_PER_FRAME, + .parent = irq_find_host(parent), + }; int ret, i; if (of_property_read_u32(node, "marvell,odmi-frames", &odmis_count)) @@ -171,8 +182,7 @@ static int __init mvebu_odmi_init(struct device_node *node, if (!odmis) return -ENOMEM; - odmis_bm = kcalloc(BITS_TO_LONGS(odmis_count * NODMIS_PER_FRAME), - sizeof(long), GFP_KERNEL); + odmis_bm = bitmap_zalloc(odmis_count * NODMIS_PER_FRAME, GFP_KERNEL); if (!odmis_bm) { ret = -ENOMEM; goto err_alloc; @@ -198,28 +208,11 @@ static int __init mvebu_odmi_init(struct device_node *node, } } - inner_domain = irq_domain_create_linear(of_node_to_fwnode(node), - odmis_count * NODMIS_PER_FRAME, - &odmi_domain_ops, NULL); - if (!inner_domain) { - ret = -ENOMEM; - goto err_unmap; - } + if (msi_create_parent_irq_domain(&info, &odmi_msi_parent_ops)) + return 0; - inner_domain->parent = irq_find_host(parent); - - plat_domain = platform_msi_create_irq_domain(of_node_to_fwnode(node), - &odmi_msi_domain_info, - inner_domain); - if (!plat_domain) { - ret = -ENOMEM; - goto err_remove_inner; - } - - return 0; + ret = -ENOMEM; -err_remove_inner: - irq_domain_remove(inner_domain); err_unmap: for (i = 0; i < odmis_count; i++) { struct odmi_data *odmi = &odmis[i]; @@ -227,7 +220,7 @@ err_unmap: if (odmi->base && !IS_ERR(odmi->base)) iounmap(odmis[i].base); } - kfree(odmis_bm); + bitmap_free(odmis_bm); err_alloc: kfree(odmis); return ret; diff --git a/drivers/irqchip/irq-mvebu-pic.c b/drivers/irqchip/irq-mvebu-pic.c index eec63951129a..10b85128183a 100644 --- a/drivers/irqchip/irq-mvebu-pic.c +++ b/drivers/irqchip/irq-mvebu-pic.c @@ -18,6 +18,7 @@ #include <linux/module.h> #include <linux/of_irq.h> #include <linux/platform_device.h> +#include <linux/seq_file.h> #define PIC_CAUSE 0x0 #define PIC_MASK 0x4 @@ -29,7 +30,7 @@ struct mvebu_pic { void __iomem *base; u32 parent_irq; struct irq_domain *domain; - struct irq_chip irq_chip; + struct platform_device *pdev; }; static void mvebu_pic_reset(struct mvebu_pic *pic) @@ -66,6 +67,20 @@ static void mvebu_pic_unmask_irq(struct irq_data *d) writel(reg, pic->base + PIC_MASK); } +static void mvebu_pic_print_chip(struct irq_data *d, struct seq_file *p) +{ + struct mvebu_pic *pic = irq_data_get_irq_chip_data(d); + + seq_puts(p, dev_name(&pic->pdev->dev)); +} + +static const struct irq_chip mvebu_pic_chip = { + .irq_mask = mvebu_pic_mask_irq, + .irq_unmask = mvebu_pic_unmask_irq, + .irq_eoi = mvebu_pic_eoi_irq, + .irq_print_chip = mvebu_pic_print_chip, +}; + static int mvebu_pic_irq_map(struct irq_domain *domain, unsigned int virq, irq_hw_number_t hwirq) { @@ -73,8 +88,7 @@ static int mvebu_pic_irq_map(struct irq_domain *domain, unsigned int virq, irq_set_percpu_devid(virq); irq_set_chip_data(virq, pic); - irq_set_chip_and_handler(virq, &pic->irq_chip, - handle_percpu_devid_irq); + irq_set_chip_and_handler(virq, &mvebu_pic_chip, handle_percpu_devid_irq); irq_set_status_flags(virq, IRQ_LEVEL); irq_set_probe(virq); @@ -91,15 +105,12 @@ static void mvebu_pic_handle_cascade_irq(struct irq_desc *desc) struct mvebu_pic *pic = irq_desc_get_handler_data(desc); struct irq_chip *chip = irq_desc_get_chip(desc); unsigned long irqmap, irqn; - unsigned int cascade_irq; irqmap = readl_relaxed(pic->base + PIC_CAUSE); chained_irq_enter(chip, desc); - for_each_set_bit(irqn, &irqmap, BITS_PER_LONG) { - cascade_irq = irq_find_mapping(pic->domain, irqn); - generic_handle_irq(cascade_irq); - } + for_each_set_bit(irqn, &irqmap, BITS_PER_LONG) + generic_handle_domain_irq(pic->domain, irqn); chained_irq_exit(chip, desc); } @@ -123,32 +134,24 @@ static int mvebu_pic_probe(struct platform_device *pdev) { struct device_node *node = pdev->dev.of_node; struct mvebu_pic *pic; - struct irq_chip *irq_chip; - struct resource *res; pic = devm_kzalloc(&pdev->dev, sizeof(struct mvebu_pic), GFP_KERNEL); if (!pic) return -ENOMEM; - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - pic->base = devm_ioremap_resource(&pdev->dev, res); + pic->pdev = pdev; + pic->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(pic->base)) return PTR_ERR(pic->base); - irq_chip = &pic->irq_chip; - irq_chip->name = dev_name(&pdev->dev); - irq_chip->irq_mask = mvebu_pic_mask_irq; - irq_chip->irq_unmask = mvebu_pic_unmask_irq; - irq_chip->irq_eoi = mvebu_pic_eoi_irq; - pic->parent_irq = irq_of_parse_and_map(node, 0); if (pic->parent_irq <= 0) { dev_err(&pdev->dev, "Failed to parse parent interrupt\n"); return -EINVAL; } - pic->domain = irq_domain_add_linear(node, PIC_MAX_IRQS, - &mvebu_pic_domain_ops, pic); + pic->domain = irq_domain_create_linear(dev_fwnode(&pdev->dev), PIC_MAX_IRQS, + &mvebu_pic_domain_ops, pic); if (!pic->domain) { dev_err(&pdev->dev, "Failed to allocate irq domain\n"); return -ENOMEM; @@ -164,14 +167,12 @@ static int mvebu_pic_probe(struct platform_device *pdev) return 0; } -static int mvebu_pic_remove(struct platform_device *pdev) +static void mvebu_pic_remove(struct platform_device *pdev) { struct mvebu_pic *pic = platform_get_drvdata(pdev); on_each_cpu(mvebu_pic_disable_percpu_irq, pic, 1); irq_domain_remove(pic->domain); - - return 0; } static const struct of_device_id mvebu_pic_of_match[] = { @@ -181,17 +182,16 @@ static const struct of_device_id mvebu_pic_of_match[] = { MODULE_DEVICE_TABLE(of, mvebu_pic_of_match); static struct platform_driver mvebu_pic_driver = { - .probe = mvebu_pic_probe, - .remove = mvebu_pic_remove, + .probe = mvebu_pic_probe, + .remove = mvebu_pic_remove, .driver = { - .name = "mvebu-pic", - .of_match_table = mvebu_pic_of_match, + .name = "mvebu-pic", + .of_match_table = mvebu_pic_of_match, }, }; module_platform_driver(mvebu_pic_driver); MODULE_AUTHOR("Yehuda Yitschak <yehuday@marvell.com>"); MODULE_AUTHOR("Thomas Petazzoni <thomas.petazzoni@free-electrons.com>"); +MODULE_DESCRIPTION("Marvell Armada 7K/8K PIC driver"); MODULE_LICENSE("GPL v2"); -MODULE_ALIAS("platform:mvebu_pic"); - diff --git a/drivers/irqchip/irq-mvebu-sei.c b/drivers/irqchip/irq-mvebu-sei.c index add4c9c934c8..5822ea864765 100644 --- a/drivers/irqchip/irq-mvebu-sei.c +++ b/drivers/irqchip/irq-mvebu-sei.c @@ -14,6 +14,8 @@ #include <linux/of_irq.h> #include <linux/of_platform.h> +#include <linux/irqchip/irq-msi-lib.h> + /* Cause register */ #define GICP_SECR(idx) (0x0 + ((idx) * 0x4)) /* Mask register */ @@ -303,25 +305,11 @@ static void mvebu_sei_cp_domain_free(struct irq_domain *domain, } static const struct irq_domain_ops mvebu_sei_cp_domain_ops = { + .select = msi_lib_irq_domain_select, .alloc = mvebu_sei_cp_domain_alloc, .free = mvebu_sei_cp_domain_free, }; -static struct irq_chip mvebu_sei_msi_irq_chip = { - .name = "SEI pMSI", - .irq_ack = irq_chip_ack_parent, - .irq_set_type = irq_chip_set_type_parent, -}; - -static struct msi_domain_ops mvebu_sei_msi_ops = { -}; - -static struct msi_domain_info mvebu_sei_msi_domain_info = { - .flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS, - .ops = &mvebu_sei_msi_ops, - .chip = &mvebu_sei_msi_irq_chip, -}; - static void mvebu_sei_handle_cascade_irq(struct irq_desc *desc) { struct mvebu_sei *sei = irq_desc_get_handler_data(desc); @@ -337,17 +325,12 @@ static void mvebu_sei_handle_cascade_irq(struct irq_desc *desc) irqmap = readl_relaxed(sei->base + GICP_SECR(idx)); for_each_set_bit(bit, &irqmap, SEI_IRQ_COUNT_PER_REG) { unsigned long hwirq; - unsigned int virq; + int err; hwirq = idx * SEI_IRQ_COUNT_PER_REG + bit; - virq = irq_find_mapping(sei->sei_domain, hwirq); - if (likely(virq)) { - generic_handle_irq(virq); - continue; - } - - dev_warn(sei->dev, - "Spurious IRQ detected (hwirq %lu)\n", hwirq); + err = generic_handle_domain_irq(sei->sei_domain, hwirq); + if (unlikely(err)) + dev_warn(sei->dev, "Spurious IRQ detected (hwirq %lu)\n", hwirq); } } @@ -365,10 +348,28 @@ static void mvebu_sei_reset(struct mvebu_sei *sei) } } +#define SEI_MSI_FLAGS_REQUIRED (MSI_FLAG_USE_DEF_DOM_OPS | \ + MSI_FLAG_USE_DEF_CHIP_OPS) + +#define SEI_MSI_FLAGS_SUPPORTED (MSI_GENERIC_FLAGS_MASK) + +static const struct msi_parent_ops sei_msi_parent_ops = { + .supported_flags = SEI_MSI_FLAGS_SUPPORTED, + .required_flags = SEI_MSI_FLAGS_REQUIRED, + .chip_flags = MSI_CHIP_FLAG_SET_EOI | MSI_CHIP_FLAG_SET_ACK, + .bus_select_mask = MATCH_PLATFORM_MSI, + .bus_select_token = DOMAIN_BUS_GENERIC_MSI, + .prefix = "SEI-", + .init_dev_msi_info = msi_lib_init_dev_msi_info, +}; + static int mvebu_sei_probe(struct platform_device *pdev) { struct device_node *node = pdev->dev.of_node; - struct irq_domain *plat_domain; + struct irq_domain_info info = { + .fwnode = of_fwnode_handle(node), + .ops = &mvebu_sei_cp_domain_ops, + }; struct mvebu_sei *sei; u32 parent_irq; int ret; @@ -382,12 +383,9 @@ static int mvebu_sei_probe(struct platform_device *pdev) mutex_init(&sei->cp_msi_lock); raw_spin_lock_init(&sei->mask_lock); - sei->res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - sei->base = devm_ioremap_resource(sei->dev, sei->res); - if (IS_ERR(sei->base)) { - dev_err(sei->dev, "Failed to remap SEI resource\n"); + sei->base = devm_platform_get_and_ioremap_resource(pdev, 0, &sei->res); + if (IS_ERR(sei->base)) return PTR_ERR(sei->base); - } /* Retrieve the SEI capabilities with the interrupt ranges */ sei->caps = of_device_get_match_data(&pdev->dev); @@ -408,7 +406,7 @@ static int mvebu_sei_probe(struct platform_device *pdev) } /* Create the root SEI domain */ - sei->sei_domain = irq_domain_create_linear(of_node_to_fwnode(node), + sei->sei_domain = irq_domain_create_linear(of_fwnode_handle(node), (sei->caps->ap_range.size + sei->caps->cp_range.size), &mvebu_sei_domain_ops, @@ -424,7 +422,7 @@ static int mvebu_sei_probe(struct platform_device *pdev) /* Create the 'wired' domain */ sei->ap_domain = irq_domain_create_hierarchy(sei->sei_domain, 0, sei->caps->ap_range.size, - of_node_to_fwnode(node), + of_fwnode_handle(node), &mvebu_sei_ap_domain_ops, sei); if (!sei->ap_domain) { @@ -436,49 +434,32 @@ static int mvebu_sei_probe(struct platform_device *pdev) irq_domain_update_bus_token(sei->ap_domain, DOMAIN_BUS_WIRED); /* Create the 'MSI' domain */ - sei->cp_domain = irq_domain_create_hierarchy(sei->sei_domain, 0, - sei->caps->cp_range.size, - of_node_to_fwnode(node), - &mvebu_sei_cp_domain_ops, - sei); + info.size = sei->caps->cp_range.size; + info.host_data = sei; + info.parent = sei->sei_domain; + + sei->cp_domain = msi_create_parent_irq_domain(&info, &sei_msi_parent_ops); if (!sei->cp_domain) { pr_err("Failed to create CPs IRQ domain\n"); ret = -ENOMEM; goto remove_ap_domain; } - irq_domain_update_bus_token(sei->cp_domain, DOMAIN_BUS_GENERIC_MSI); - - plat_domain = platform_msi_create_irq_domain(of_node_to_fwnode(node), - &mvebu_sei_msi_domain_info, - sei->cp_domain); - if (!plat_domain) { - pr_err("Failed to create CPs MSI domain\n"); - ret = -ENOMEM; - goto remove_cp_domain; - } - mvebu_sei_reset(sei); - irq_set_chained_handler_and_data(parent_irq, - mvebu_sei_handle_cascade_irq, - sei); - + irq_set_chained_handler_and_data(parent_irq, mvebu_sei_handle_cascade_irq, sei); return 0; -remove_cp_domain: - irq_domain_remove(sei->cp_domain); remove_ap_domain: irq_domain_remove(sei->ap_domain); remove_sei_domain: irq_domain_remove(sei->sei_domain); dispose_irq: irq_dispose_mapping(parent_irq); - return ret; } -struct mvebu_sei_caps mvebu_sei_ap806_caps = { +static struct mvebu_sei_caps mvebu_sei_ap806_caps = { .ap_range = { .first = 0, .size = 21, diff --git a/drivers/irqchip/irq-mxs.c b/drivers/irqchip/irq-mxs.c index e8b31f52e071..0bb423dd5280 100644 --- a/drivers/irqchip/irq-mxs.c +++ b/drivers/irqchip/irq-mxs.c @@ -1,21 +1,8 @@ +// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright (C) 2009-2010 Freescale Semiconductor, Inc. All Rights Reserved. * Copyright (C) 2014 Oleksij Rempel <linux@rempel-privat.de> * Add Alphascale ASM9260 support. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License along - * with this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #include <linux/kernel.h> @@ -71,7 +58,7 @@ struct icoll_priv { static struct icoll_priv icoll_priv; static struct irq_domain *icoll_domain; -/* calculate bit offset depending on number of intterupt per register */ +/* calculate bit offset depending on number of interrupt per register */ static u32 icoll_intr_bitshift(struct irq_data *d, u32 bit) { /* @@ -81,7 +68,7 @@ static u32 icoll_intr_bitshift(struct irq_data *d, u32 bit) return bit << ((d->hwirq & 3) << 3); } -/* calculate mem offset depending on number of intterupt per register */ +/* calculate mem offset depending on number of interrupt per register */ static void __iomem *icoll_intr_reg(struct irq_data *d) { /* offset = hwirq / intr_per_reg * 0x10 */ @@ -143,13 +130,13 @@ static struct irq_chip asm9260_icoll_chip = { IRQCHIP_SKIP_SET_WAKE, }; -asmlinkage void __exception_irq_entry icoll_handle_irq(struct pt_regs *regs) +static void __exception_irq_entry icoll_handle_irq(struct pt_regs *regs) { u32 irqnr; irqnr = __raw_readl(icoll_priv.stat); __raw_writel(irqnr, icoll_priv.vector); - handle_domain_irq(icoll_domain, irqnr, regs); + generic_handle_domain_irq(icoll_domain, irqnr); } static int icoll_irq_domain_map(struct irq_domain *d, unsigned int virq, @@ -175,8 +162,8 @@ static const struct irq_domain_ops icoll_irq_domain_ops = { static void __init icoll_add_domain(struct device_node *np, int num) { - icoll_domain = irq_domain_add_linear(np, num, - &icoll_irq_domain_ops, NULL); + icoll_domain = irq_domain_create_linear(of_fwnode_handle(np), num, + &icoll_irq_domain_ops, NULL); if (!icoll_domain) panic("%pOF: unable to create irq domain", np); @@ -214,6 +201,7 @@ static int __init icoll_of_init(struct device_node *np, stmp_reset_block(icoll_priv.ctrl); icoll_add_domain(np, ICOLL_NUM_IRQS); + set_handle_irq(icoll_handle_irq); return 0; } diff --git a/drivers/irqchip/irq-nvic.c b/drivers/irqchip/irq-nvic.c index b1777104fd9f..2191a2b79578 100644 --- a/drivers/irqchip/irq-nvic.c +++ b/drivers/irqchip/irq-nvic.c @@ -1,13 +1,10 @@ +// SPDX-License-Identifier: GPL-2.0-only /* * drivers/irq/irq-nvic.c * * Copyright (C) 2008 ARM Limited, All Rights Reserved. * Copyright (C) 2013 Pengutronix * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * * Support for the Nested Vectored Interrupt Controller found on the * ARMv7-M CPUs (Cortex-M3/M4) */ @@ -29,7 +26,7 @@ #define NVIC_ISER 0x000 #define NVIC_ICER 0x080 -#define NVIC_IPR 0x300 +#define NVIC_IPR 0x400 #define NVIC_MAX_BANKS 16 /* @@ -40,23 +37,12 @@ static struct irq_domain *nvic_irq_domain; -asmlinkage void __exception_irq_entry -nvic_handle_irq(irq_hw_number_t hwirq, struct pt_regs *regs) +static void __irq_entry nvic_handle_irq(struct pt_regs *regs) { - unsigned int irq = irq_linear_revmap(nvic_irq_domain, hwirq); - - handle_IRQ(irq, regs); -} + unsigned long icsr = readl_relaxed(BASEADDR_V7M_SCB + V7M_SCB_ICSR); + irq_hw_number_t hwirq = (icsr & V7M_SCB_ICSR_VECTACTIVE) - 16; -static int nvic_irq_domain_translate(struct irq_domain *d, - struct irq_fwspec *fwspec, - unsigned long *hwirq, unsigned int *type) -{ - if (WARN_ON(fwspec->param_count < 1)) - return -EINVAL; - *hwirq = fwspec->param[0]; - *type = IRQ_TYPE_NONE; - return 0; + generic_handle_domain_irq(nvic_irq_domain, hwirq); } static int nvic_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, @@ -67,7 +53,7 @@ static int nvic_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, unsigned int type = IRQ_TYPE_NONE; struct irq_fwspec *fwspec = arg; - ret = nvic_irq_domain_translate(domain, fwspec, &hwirq, &type); + ret = irq_domain_translate_onecell(domain, fwspec, &hwirq, &type); if (ret) return ret; @@ -78,7 +64,7 @@ static int nvic_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, } static const struct irq_domain_ops nvic_irq_domain_ops = { - .translate = nvic_irq_domain_translate, + .translate = irq_domain_translate_onecell, .alloc = nvic_irq_domain_alloc, .free = irq_domain_free_irqs_top, }; @@ -87,8 +73,9 @@ static int __init nvic_of_init(struct device_node *node, struct device_node *parent) { unsigned int clr = IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN; - unsigned int irqs, i, ret, numbanks; + unsigned int irqs, i, numbanks; void __iomem *nvic_base; + int ret; numbanks = (readl_relaxed(V7M_SCS_ICTR) & V7M_SCS_ICTR_INTLINESNUM_MASK) + 1; @@ -104,10 +91,11 @@ static int __init nvic_of_init(struct device_node *node, irqs = NVIC_MAX_IRQ; nvic_irq_domain = - irq_domain_add_linear(node, irqs, &nvic_irq_domain_ops, NULL); + irq_domain_create_linear(of_fwnode_handle(node), irqs, &nvic_irq_domain_ops, NULL); if (!nvic_irq_domain) { pr_warn("Failed to allocate irq domain\n"); + iounmap(nvic_base); return -ENOMEM; } @@ -117,6 +105,7 @@ static int __init nvic_of_init(struct device_node *node, if (ret) { pr_warn("Failed to allocate irq chips\n"); irq_domain_remove(nvic_irq_domain); + iounmap(nvic_base); return ret; } @@ -142,6 +131,7 @@ static int __init nvic_of_init(struct device_node *node, for (i = 0; i < irqs; i += 4) writel_relaxed(0, nvic_base + NVIC_IPR + i); + set_handle_irq(nvic_handle_irq); return 0; } IRQCHIP_DECLARE(armv7m_nvic, "arm,armv7m-nvic", nvic_of_init); diff --git a/drivers/irqchip/irq-omap-intc.c b/drivers/irqchip/irq-omap-intc.c index d360a6eddd6d..16f00db570e7 100644 --- a/drivers/irqchip/irq-omap-intc.c +++ b/drivers/irqchip/irq-omap-intc.c @@ -248,7 +248,7 @@ static int __init omap_init_irq_of(struct device_node *node) if (WARN_ON(!omap_irq_base)) return -ENOMEM; - domain = irq_domain_add_linear(node, omap_nr_irqs, + domain = irq_domain_create_linear(of_fwnode_handle(node), omap_nr_irqs, &irq_generic_chip_ops, NULL); omap_irq_soft_reset(); @@ -274,7 +274,7 @@ static int __init omap_init_irq_legacy(u32 base, struct device_node *node) irq_base = 0; } - domain = irq_domain_add_legacy(node, omap_nr_irqs, irq_base, 0, + domain = irq_domain_create_legacy(of_fwnode_handle(node), omap_nr_irqs, irq_base, 0, &irq_domain_simple_ops, NULL); omap_irq_soft_reset(); @@ -325,8 +325,7 @@ static int __init omap_init_irq(u32 base, struct device_node *node) return ret; } -static asmlinkage void __exception_irq_entry -omap_intc_handle_irq(struct pt_regs *regs) +static void __exception_irq_entry omap_intc_handle_irq(struct pt_regs *regs) { extern unsigned long irq_err_count; u32 irqnr; @@ -357,7 +356,7 @@ omap_intc_handle_irq(struct pt_regs *regs) } irqnr &= ACTIVEIRQ_MASK; - handle_domain_irq(domain, irqnr, regs); + generic_handle_domain_irq(domain, irqnr); } static int __init intc_of_init(struct device_node *node, diff --git a/drivers/irqchip/irq-or1k-pic.c b/drivers/irqchip/irq-or1k-pic.c index dd9d5d12fea2..48126067c54b 100644 --- a/drivers/irqchip/irq-or1k-pic.c +++ b/drivers/irqchip/irq-or1k-pic.c @@ -1,11 +1,7 @@ +// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se> * Copyright (C) 2014 Stefan Kristansson <stefan.kristiansson@saunalahti.fi> - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version - * 2 of the License, or (at your option) any later version. */ #include <linux/irq.h> @@ -70,7 +66,6 @@ static struct or1k_pic_dev or1k_pic_level = { .name = "or1k-PIC-level", .irq_unmask = or1k_pic_unmask, .irq_mask = or1k_pic_mask, - .irq_mask_ack = or1k_pic_mask_ack, }, .handle = handle_level_irq, .flags = IRQ_LEVEL | IRQ_NOPROBE, @@ -120,7 +115,7 @@ static void or1k_pic_handle_irq(struct pt_regs *regs) int irq = -1; while ((irq = pic_get_irq(irq + 1)) != NO_IRQ) - handle_domain_irq(root_domain, irq, regs); + generic_handle_domain_irq(root_domain, irq); } static int or1k_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hw) @@ -149,8 +144,8 @@ static int __init or1k_pic_init(struct device_node *node, /* Disable all interrupts until explicitly requested */ mtspr(SPR_PICMR, (0UL)); - root_domain = irq_domain_add_linear(node, 32, &or1k_irq_domain_ops, - pic); + root_domain = irq_domain_create_linear(of_fwnode_handle(node), 32, &or1k_irq_domain_ops, + pic); set_handle_irq(or1k_pic_handle_irq); diff --git a/drivers/irqchip/irq-orion.c b/drivers/irqchip/irq-orion.c index c4b5ffb61954..dddbc05917c0 100644 --- a/drivers/irqchip/irq-orion.c +++ b/drivers/irqchip/irq-orion.c @@ -42,8 +42,8 @@ __exception_irq_entry orion_handle_irq(struct pt_regs *regs) gc->mask_cache; while (stat) { u32 hwirq = __fls(stat); - handle_domain_irq(orion_irq_domain, - gc->irq_base + hwirq, regs); + generic_handle_domain_irq(orion_irq_domain, + gc->irq_base + hwirq); stat &= ~(1 << hwirq); } } @@ -57,10 +57,9 @@ static int __init orion_irq_init(struct device_node *np, struct resource r; /* count number of irq chips by valid reg addresses */ - while (of_address_to_resource(np, num_chips, &r) == 0) - num_chips++; + num_chips = of_address_count(np); - orion_irq_domain = irq_domain_add_linear(np, + orion_irq_domain = irq_domain_create_linear(of_fwnode_handle(np), num_chips * ORION_IRQS_PER_CHIP, &irq_generic_chip_ops, NULL); if (!orion_irq_domain) @@ -117,7 +116,7 @@ static void orion_bridge_irq_handler(struct irq_desc *desc) while (stat) { u32 hwirq = __fls(stat); - generic_handle_irq(irq_find_mapping(d, gc->irq_base + hwirq)); + generic_handle_domain_irq(d, gc->irq_base + hwirq); stat &= ~(1 << hwirq); } } @@ -147,8 +146,8 @@ static int __init orion_bridge_irq_init(struct device_node *np, /* get optional number of interrupts provided */ of_property_read_u32(np, "marvell,#interrupts", &nrirqs); - domain = irq_domain_add_linear(np, nrirqs, - &irq_generic_chip_ops, NULL); + domain = irq_domain_create_linear(of_fwnode_handle(np), nrirqs, + &irq_generic_chip_ops, NULL); if (!domain) { pr_err("%pOFn: unable to add irq domain\n", np); return -ENOMEM; diff --git a/drivers/irqchip/irq-owl-sirq.c b/drivers/irqchip/irq-owl-sirq.c new file mode 100644 index 000000000000..3d93d21f6732 --- /dev/null +++ b/drivers/irqchip/irq-owl-sirq.c @@ -0,0 +1,359 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Actions Semi Owl SoCs SIRQ interrupt controller driver + * + * Copyright (C) 2014 Actions Semi Inc. + * David Liu <liuwei@actions-semi.com> + * + * Author: Parthiban Nallathambi <pn@denx.de> + * Author: Saravanan Sekar <sravanhome@gmail.com> + * Author: Cristian Ciocaltea <cristian.ciocaltea@gmail.com> + */ + +#include <linux/bitfield.h> +#include <linux/interrupt.h> +#include <linux/irqchip.h> +#include <linux/of_address.h> +#include <linux/of_irq.h> + +#include <dt-bindings/interrupt-controller/arm-gic.h> + +#define NUM_SIRQ 3 + +#define INTC_EXTCTL_PENDING BIT(0) +#define INTC_EXTCTL_CLK_SEL BIT(4) +#define INTC_EXTCTL_EN BIT(5) +#define INTC_EXTCTL_TYPE_MASK GENMASK(7, 6) +#define INTC_EXTCTL_TYPE_HIGH 0 +#define INTC_EXTCTL_TYPE_LOW BIT(6) +#define INTC_EXTCTL_TYPE_RISING BIT(7) +#define INTC_EXTCTL_TYPE_FALLING (BIT(6) | BIT(7)) + +/* S500 & S700 SIRQ control register masks */ +#define INTC_EXTCTL_SIRQ0_MASK GENMASK(23, 16) +#define INTC_EXTCTL_SIRQ1_MASK GENMASK(15, 8) +#define INTC_EXTCTL_SIRQ2_MASK GENMASK(7, 0) + +/* S900 SIRQ control register offsets, relative to controller base address */ +#define INTC_EXTCTL0 0x0000 +#define INTC_EXTCTL1 0x0328 +#define INTC_EXTCTL2 0x032c + +struct owl_sirq_params { + /* INTC_EXTCTL reg shared for all three SIRQ lines */ + bool reg_shared; + /* INTC_EXTCTL reg offsets relative to controller base address */ + u16 reg_offset[NUM_SIRQ]; +}; + +struct owl_sirq_chip_data { + const struct owl_sirq_params *params; + void __iomem *base; + raw_spinlock_t lock; + u32 ext_irqs[NUM_SIRQ]; +}; + +/* S500 & S700 SoCs */ +static const struct owl_sirq_params owl_sirq_s500_params = { + .reg_shared = true, + .reg_offset = { 0, 0, 0 }, +}; + +/* S900 SoC */ +static const struct owl_sirq_params owl_sirq_s900_params = { + .reg_shared = false, + .reg_offset = { INTC_EXTCTL0, INTC_EXTCTL1, INTC_EXTCTL2 }, +}; + +static u32 owl_field_get(u32 val, u32 index) +{ + switch (index) { + case 0: + return FIELD_GET(INTC_EXTCTL_SIRQ0_MASK, val); + case 1: + return FIELD_GET(INTC_EXTCTL_SIRQ1_MASK, val); + case 2: + default: + return FIELD_GET(INTC_EXTCTL_SIRQ2_MASK, val); + } +} + +static u32 owl_field_prep(u32 val, u32 index) +{ + switch (index) { + case 0: + return FIELD_PREP(INTC_EXTCTL_SIRQ0_MASK, val); + case 1: + return FIELD_PREP(INTC_EXTCTL_SIRQ1_MASK, val); + case 2: + default: + return FIELD_PREP(INTC_EXTCTL_SIRQ2_MASK, val); + } +} + +static u32 owl_sirq_read_extctl(struct owl_sirq_chip_data *data, u32 index) +{ + u32 val; + + val = readl_relaxed(data->base + data->params->reg_offset[index]); + if (data->params->reg_shared) + val = owl_field_get(val, index); + + return val; +} + +static void owl_sirq_write_extctl(struct owl_sirq_chip_data *data, + u32 extctl, u32 index) +{ + u32 val; + + if (data->params->reg_shared) { + val = readl_relaxed(data->base + data->params->reg_offset[index]); + val &= ~owl_field_prep(0xff, index); + extctl = owl_field_prep(extctl, index) | val; + } + + writel_relaxed(extctl, data->base + data->params->reg_offset[index]); +} + +static void owl_sirq_clear_set_extctl(struct owl_sirq_chip_data *d, + u32 clear, u32 set, u32 index) +{ + unsigned long flags; + u32 val; + + raw_spin_lock_irqsave(&d->lock, flags); + val = owl_sirq_read_extctl(d, index); + val &= ~clear; + val |= set; + owl_sirq_write_extctl(d, val, index); + raw_spin_unlock_irqrestore(&d->lock, flags); +} + +static void owl_sirq_eoi(struct irq_data *data) +{ + struct owl_sirq_chip_data *chip_data = irq_data_get_irq_chip_data(data); + + /* + * Software must clear external interrupt pending, when interrupt type + * is edge triggered, so we need per SIRQ based clearing. + */ + if (!irqd_is_level_type(data)) + owl_sirq_clear_set_extctl(chip_data, 0, INTC_EXTCTL_PENDING, + data->hwirq); + + irq_chip_eoi_parent(data); +} + +static void owl_sirq_mask(struct irq_data *data) +{ + struct owl_sirq_chip_data *chip_data = irq_data_get_irq_chip_data(data); + + owl_sirq_clear_set_extctl(chip_data, INTC_EXTCTL_EN, 0, data->hwirq); + irq_chip_mask_parent(data); +} + +static void owl_sirq_unmask(struct irq_data *data) +{ + struct owl_sirq_chip_data *chip_data = irq_data_get_irq_chip_data(data); + + owl_sirq_clear_set_extctl(chip_data, 0, INTC_EXTCTL_EN, data->hwirq); + irq_chip_unmask_parent(data); +} + +/* + * GIC does not handle falling edge or active low, hence SIRQ shall be + * programmed to convert falling edge to rising edge signal and active + * low to active high signal. + */ +static int owl_sirq_set_type(struct irq_data *data, unsigned int type) +{ + struct owl_sirq_chip_data *chip_data = irq_data_get_irq_chip_data(data); + u32 sirq_type; + + switch (type) { + case IRQ_TYPE_LEVEL_LOW: + sirq_type = INTC_EXTCTL_TYPE_LOW; + type = IRQ_TYPE_LEVEL_HIGH; + break; + case IRQ_TYPE_LEVEL_HIGH: + sirq_type = INTC_EXTCTL_TYPE_HIGH; + break; + case IRQ_TYPE_EDGE_FALLING: + sirq_type = INTC_EXTCTL_TYPE_FALLING; + type = IRQ_TYPE_EDGE_RISING; + break; + case IRQ_TYPE_EDGE_RISING: + sirq_type = INTC_EXTCTL_TYPE_RISING; + break; + default: + return -EINVAL; + } + + owl_sirq_clear_set_extctl(chip_data, INTC_EXTCTL_TYPE_MASK, sirq_type, + data->hwirq); + + return irq_chip_set_type_parent(data, type); +} + +static struct irq_chip owl_sirq_chip = { + .name = "owl-sirq", + .irq_mask = owl_sirq_mask, + .irq_unmask = owl_sirq_unmask, + .irq_eoi = owl_sirq_eoi, + .irq_set_type = owl_sirq_set_type, + .irq_retrigger = irq_chip_retrigger_hierarchy, +#ifdef CONFIG_SMP + .irq_set_affinity = irq_chip_set_affinity_parent, +#endif +}; + +static int owl_sirq_domain_translate(struct irq_domain *d, + struct irq_fwspec *fwspec, + unsigned long *hwirq, + unsigned int *type) +{ + if (!is_of_node(fwspec->fwnode)) + return -EINVAL; + + if (fwspec->param_count != 2 || fwspec->param[0] >= NUM_SIRQ) + return -EINVAL; + + *hwirq = fwspec->param[0]; + *type = fwspec->param[1]; + + return 0; +} + +static int owl_sirq_domain_alloc(struct irq_domain *domain, unsigned int virq, + unsigned int nr_irqs, void *data) +{ + struct owl_sirq_chip_data *chip_data = domain->host_data; + struct irq_fwspec *fwspec = data; + struct irq_fwspec parent_fwspec; + irq_hw_number_t hwirq; + unsigned int type; + int ret; + + if (WARN_ON(nr_irqs != 1)) + return -EINVAL; + + ret = owl_sirq_domain_translate(domain, fwspec, &hwirq, &type); + if (ret) + return ret; + + switch (type) { + case IRQ_TYPE_EDGE_RISING: + case IRQ_TYPE_LEVEL_HIGH: + break; + case IRQ_TYPE_EDGE_FALLING: + type = IRQ_TYPE_EDGE_RISING; + break; + case IRQ_TYPE_LEVEL_LOW: + type = IRQ_TYPE_LEVEL_HIGH; + break; + default: + return -EINVAL; + } + + irq_domain_set_hwirq_and_chip(domain, virq, hwirq, &owl_sirq_chip, + chip_data); + + parent_fwspec.fwnode = domain->parent->fwnode; + parent_fwspec.param_count = 3; + parent_fwspec.param[0] = GIC_SPI; + parent_fwspec.param[1] = chip_data->ext_irqs[hwirq]; + parent_fwspec.param[2] = type; + + return irq_domain_alloc_irqs_parent(domain, virq, 1, &parent_fwspec); +} + +static const struct irq_domain_ops owl_sirq_domain_ops = { + .translate = owl_sirq_domain_translate, + .alloc = owl_sirq_domain_alloc, + .free = irq_domain_free_irqs_common, +}; + +static int __init owl_sirq_init(const struct owl_sirq_params *params, + struct device_node *node, + struct device_node *parent) +{ + struct irq_domain *domain, *parent_domain; + struct owl_sirq_chip_data *chip_data; + int ret, i; + + parent_domain = irq_find_host(parent); + if (!parent_domain) { + pr_err("%pOF: failed to find sirq parent domain\n", node); + return -ENXIO; + } + + chip_data = kzalloc(sizeof(*chip_data), GFP_KERNEL); + if (!chip_data) + return -ENOMEM; + + raw_spin_lock_init(&chip_data->lock); + + chip_data->params = params; + + chip_data->base = of_iomap(node, 0); + if (!chip_data->base) { + pr_err("%pOF: failed to map sirq registers\n", node); + ret = -ENXIO; + goto out_free; + } + + for (i = 0; i < NUM_SIRQ; i++) { + struct of_phandle_args irq; + + ret = of_irq_parse_one(node, i, &irq); + if (ret) { + pr_err("%pOF: failed to parse interrupt %d\n", node, i); + goto out_unmap; + } + + if (WARN_ON(irq.args_count != 3)) { + ret = -EINVAL; + goto out_unmap; + } + + chip_data->ext_irqs[i] = irq.args[1]; + + /* Set 24MHz external interrupt clock freq */ + owl_sirq_clear_set_extctl(chip_data, 0, INTC_EXTCTL_CLK_SEL, i); + } + + domain = irq_domain_create_hierarchy(parent_domain, 0, NUM_SIRQ, of_fwnode_handle(node), + &owl_sirq_domain_ops, chip_data); + if (!domain) { + pr_err("%pOF: failed to add domain\n", node); + ret = -ENOMEM; + goto out_unmap; + } + + return 0; + +out_unmap: + iounmap(chip_data->base); +out_free: + kfree(chip_data); + + return ret; +} + +static int __init owl_sirq_s500_of_init(struct device_node *node, + struct device_node *parent) +{ + return owl_sirq_init(&owl_sirq_s500_params, node, parent); +} + +IRQCHIP_DECLARE(owl_sirq_s500, "actions,s500-sirq", owl_sirq_s500_of_init); +IRQCHIP_DECLARE(owl_sirq_s700, "actions,s700-sirq", owl_sirq_s500_of_init); + +static int __init owl_sirq_s900_of_init(struct device_node *node, + struct device_node *parent) +{ + return owl_sirq_init(&owl_sirq_s900_params, node, parent); +} + +IRQCHIP_DECLARE(owl_sirq_s900, "actions,s900-sirq", owl_sirq_s900_of_init); diff --git a/drivers/irqchip/irq-partition-percpu.c b/drivers/irqchip/irq-partition-percpu.c deleted file mode 100644 index 1f7cc5933cd5..000000000000 --- a/drivers/irqchip/irq-partition-percpu.c +++ /dev/null @@ -1,256 +0,0 @@ -/* - * Copyright (C) 2016 ARM Limited, All Rights Reserved. - * Author: Marc Zyngier <marc.zyngier@arm.com> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see <http://www.gnu.org/licenses/>. - */ - -#include <linux/bitops.h> -#include <linux/interrupt.h> -#include <linux/irqchip.h> -#include <linux/irqchip/chained_irq.h> -#include <linux/irqchip/irq-partition-percpu.h> -#include <linux/irqdomain.h> -#include <linux/seq_file.h> -#include <linux/slab.h> - -struct partition_desc { - int nr_parts; - struct partition_affinity *parts; - struct irq_domain *domain; - struct irq_desc *chained_desc; - unsigned long *bitmap; - struct irq_domain_ops ops; -}; - -static bool partition_check_cpu(struct partition_desc *part, - unsigned int cpu, unsigned int hwirq) -{ - return cpumask_test_cpu(cpu, &part->parts[hwirq].mask); -} - -static void partition_irq_mask(struct irq_data *d) -{ - struct partition_desc *part = irq_data_get_irq_chip_data(d); - struct irq_chip *chip = irq_desc_get_chip(part->chained_desc); - struct irq_data *data = irq_desc_get_irq_data(part->chained_desc); - - if (partition_check_cpu(part, smp_processor_id(), d->hwirq) && - chip->irq_mask) - chip->irq_mask(data); -} - -static void partition_irq_unmask(struct irq_data *d) -{ - struct partition_desc *part = irq_data_get_irq_chip_data(d); - struct irq_chip *chip = irq_desc_get_chip(part->chained_desc); - struct irq_data *data = irq_desc_get_irq_data(part->chained_desc); - - if (partition_check_cpu(part, smp_processor_id(), d->hwirq) && - chip->irq_unmask) - chip->irq_unmask(data); -} - -static int partition_irq_set_irqchip_state(struct irq_data *d, - enum irqchip_irq_state which, - bool val) -{ - struct partition_desc *part = irq_data_get_irq_chip_data(d); - struct irq_chip *chip = irq_desc_get_chip(part->chained_desc); - struct irq_data *data = irq_desc_get_irq_data(part->chained_desc); - - if (partition_check_cpu(part, smp_processor_id(), d->hwirq) && - chip->irq_set_irqchip_state) - return chip->irq_set_irqchip_state(data, which, val); - - return -EINVAL; -} - -static int partition_irq_get_irqchip_state(struct irq_data *d, - enum irqchip_irq_state which, - bool *val) -{ - struct partition_desc *part = irq_data_get_irq_chip_data(d); - struct irq_chip *chip = irq_desc_get_chip(part->chained_desc); - struct irq_data *data = irq_desc_get_irq_data(part->chained_desc); - - if (partition_check_cpu(part, smp_processor_id(), d->hwirq) && - chip->irq_get_irqchip_state) - return chip->irq_get_irqchip_state(data, which, val); - - return -EINVAL; -} - -static int partition_irq_set_type(struct irq_data *d, unsigned int type) -{ - struct partition_desc *part = irq_data_get_irq_chip_data(d); - struct irq_chip *chip = irq_desc_get_chip(part->chained_desc); - struct irq_data *data = irq_desc_get_irq_data(part->chained_desc); - - if (chip->irq_set_type) - return chip->irq_set_type(data, type); - - return -EINVAL; -} - -static void partition_irq_print_chip(struct irq_data *d, struct seq_file *p) -{ - struct partition_desc *part = irq_data_get_irq_chip_data(d); - struct irq_chip *chip = irq_desc_get_chip(part->chained_desc); - struct irq_data *data = irq_desc_get_irq_data(part->chained_desc); - - seq_printf(p, " %5s-%lu", chip->name, data->hwirq); -} - -static struct irq_chip partition_irq_chip = { - .irq_mask = partition_irq_mask, - .irq_unmask = partition_irq_unmask, - .irq_set_type = partition_irq_set_type, - .irq_get_irqchip_state = partition_irq_get_irqchip_state, - .irq_set_irqchip_state = partition_irq_set_irqchip_state, - .irq_print_chip = partition_irq_print_chip, -}; - -static void partition_handle_irq(struct irq_desc *desc) -{ - struct partition_desc *part = irq_desc_get_handler_data(desc); - struct irq_chip *chip = irq_desc_get_chip(desc); - int cpu = smp_processor_id(); - int hwirq; - - chained_irq_enter(chip, desc); - - for_each_set_bit(hwirq, part->bitmap, part->nr_parts) { - if (partition_check_cpu(part, cpu, hwirq)) - break; - } - - if (unlikely(hwirq == part->nr_parts)) { - handle_bad_irq(desc); - } else { - unsigned int irq; - irq = irq_find_mapping(part->domain, hwirq); - generic_handle_irq(irq); - } - - chained_irq_exit(chip, desc); -} - -static int partition_domain_alloc(struct irq_domain *domain, unsigned int virq, - unsigned int nr_irqs, void *arg) -{ - int ret; - irq_hw_number_t hwirq; - unsigned int type; - struct irq_fwspec *fwspec = arg; - struct partition_desc *part; - - BUG_ON(nr_irqs != 1); - ret = domain->ops->translate(domain, fwspec, &hwirq, &type); - if (ret) - return ret; - - part = domain->host_data; - - set_bit(hwirq, part->bitmap); - irq_set_chained_handler_and_data(irq_desc_get_irq(part->chained_desc), - partition_handle_irq, part); - irq_set_percpu_devid_partition(virq, &part->parts[hwirq].mask); - irq_domain_set_info(domain, virq, hwirq, &partition_irq_chip, part, - handle_percpu_devid_irq, NULL, NULL); - irq_set_status_flags(virq, IRQ_NOAUTOEN); - - return 0; -} - -static void partition_domain_free(struct irq_domain *domain, unsigned int virq, - unsigned int nr_irqs) -{ - struct irq_data *d; - - BUG_ON(nr_irqs != 1); - - d = irq_domain_get_irq_data(domain, virq); - irq_set_handler(virq, NULL); - irq_domain_reset_irq_data(d); -} - -int partition_translate_id(struct partition_desc *desc, void *partition_id) -{ - struct partition_affinity *part = NULL; - int i; - - for (i = 0; i < desc->nr_parts; i++) { - if (desc->parts[i].partition_id == partition_id) { - part = &desc->parts[i]; - break; - } - } - - if (WARN_ON(!part)) { - pr_err("Failed to find partition\n"); - return -EINVAL; - } - - return i; -} - -struct partition_desc *partition_create_desc(struct fwnode_handle *fwnode, - struct partition_affinity *parts, - int nr_parts, - int chained_irq, - const struct irq_domain_ops *ops) -{ - struct partition_desc *desc; - struct irq_domain *d; - - BUG_ON(!ops->select || !ops->translate); - - desc = kzalloc(sizeof(*desc), GFP_KERNEL); - if (!desc) - return NULL; - - desc->ops = *ops; - desc->ops.free = partition_domain_free; - desc->ops.alloc = partition_domain_alloc; - - d = irq_domain_create_linear(fwnode, nr_parts, &desc->ops, desc); - if (!d) - goto out; - desc->domain = d; - - desc->bitmap = kcalloc(BITS_TO_LONGS(nr_parts), sizeof(long), - GFP_KERNEL); - if (WARN_ON(!desc->bitmap)) - goto out; - - desc->chained_desc = irq_to_desc(chained_irq); - desc->nr_parts = nr_parts; - desc->parts = parts; - - return desc; -out: - if (d) - irq_domain_remove(d); - kfree(desc); - - return NULL; -} - -struct irq_domain *partition_get_domain(struct partition_desc *dsc) -{ - if (dsc) - return dsc->domain; - - return NULL; -} diff --git a/drivers/irqchip/irq-pic32-evic.c b/drivers/irqchip/irq-pic32-evic.c index 73addb4b625b..5dfda8e8df10 100644 --- a/drivers/irqchip/irq-pic32-evic.c +++ b/drivers/irqchip/irq-pic32-evic.c @@ -1,12 +1,8 @@ +// SPDX-License-Identifier: GPL-2.0-or-later /* * Cristian Birsan <cristian.birsan@microchip.com> * Joshua Henderson <joshua.henderson@microchip.com> * Copyright (C) 2016 Microchip Technology Inc. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the - * Free Software Foundation; either version 2 of the License, or (at your - * option) any later version. */ #include <linux/kernel.h> #include <linux/module.h> @@ -46,11 +42,10 @@ static void __iomem *evic_base; asmlinkage void __weak plat_irq_dispatch(void) { - unsigned int irq, hwirq; + unsigned int hwirq; hwirq = readl(evic_base + REG_INTSTAT) & 0xFF; - irq = irq_linear_revmap(evic_irq_domain, hwirq); - do_IRQ(irq); + do_domain_IRQ(evic_irq_domain, hwirq); } static struct evic_chip_data *irqd_to_priv(struct irq_data *data) @@ -166,9 +161,9 @@ static int pic32_irq_domain_map(struct irq_domain *d, unsigned int virq, return ret; } -int pic32_irq_domain_xlate(struct irq_domain *d, struct device_node *ctrlr, - const u32 *intspec, unsigned int intsize, - irq_hw_number_t *out_hwirq, unsigned int *out_type) +static int pic32_irq_domain_xlate(struct irq_domain *d, struct device_node *ctrlr, + const u32 *intspec, unsigned int intsize, + irq_hw_number_t *out_hwirq, unsigned int *out_type) { struct evic_chip_data *priv = d->host_data; @@ -195,13 +190,11 @@ static void __init pic32_ext_irq_of_init(struct irq_domain *domain) { struct device_node *node = irq_domain_get_of_node(domain); struct evic_chip_data *priv = domain->host_data; - struct property *prop; - const __le32 *p; u32 hwirq; int i = 0; const char *pname = "microchip,external-irqs"; - of_property_for_each_u32(node, pname, prop, p, hwirq) { + of_property_for_each_u32(node, pname, hwirq) { if (i >= ARRAY_SIZE(priv->ext_irqs)) { pr_warn("More than %d external irq, skip rest\n", ARRAY_SIZE(priv->ext_irqs)); @@ -234,9 +227,9 @@ static int __init pic32_of_init(struct device_node *node, goto err_iounmap; } - evic_irq_domain = irq_domain_add_linear(node, nchips * 32, - &pic32_irq_domain_ops, - priv); + evic_irq_domain = irq_domain_create_linear(of_fwnode_handle(node), nchips * 32, + &pic32_irq_domain_ops, + priv); if (!evic_irq_domain) { ret = -ENOMEM; goto err_free_priv; @@ -298,7 +291,7 @@ static int __init pic32_of_init(struct device_node *node, gc->private = &priv[i]; } - irq_set_default_host(evic_irq_domain); + irq_set_default_domain(evic_irq_domain); /* * External interrupts have software configurable edge polarity. These diff --git a/drivers/irqchip/irq-pruss-intc.c b/drivers/irqchip/irq-pruss-intc.c new file mode 100644 index 000000000000..81078d56f38d --- /dev/null +++ b/drivers/irqchip/irq-pruss-intc.c @@ -0,0 +1,658 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * PRU-ICSS INTC IRQChip driver for various TI SoCs + * + * Copyright (C) 2016-2020 Texas Instruments Incorporated - http://www.ti.com/ + * + * Author(s): + * Andrew F. Davis <afd@ti.com> + * Suman Anna <s-anna@ti.com> + * Grzegorz Jaszczyk <grzegorz.jaszczyk@linaro.org> for Texas Instruments + * + * Copyright (C) 2019 David Lechner <david@lechnology.com> + */ + +#include <linux/interrupt.h> +#include <linux/irq.h> +#include <linux/irqchip/chained_irq.h> +#include <linux/irqdomain.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/platform_device.h> + +/* + * Number of host interrupts reaching the main MPU sub-system. Note that this + * is not the same as the total number of host interrupts supported by the PRUSS + * INTC instance + */ +#define MAX_NUM_HOST_IRQS 8 + +/* minimum starting host interrupt number for MPU */ +#define FIRST_PRU_HOST_INT 2 + +/* PRU_ICSS_INTC registers */ +#define PRU_INTC_REVID 0x0000 +#define PRU_INTC_CR 0x0004 +#define PRU_INTC_GER 0x0010 +#define PRU_INTC_GNLR 0x001c +#define PRU_INTC_SISR 0x0020 +#define PRU_INTC_SICR 0x0024 +#define PRU_INTC_EISR 0x0028 +#define PRU_INTC_EICR 0x002c +#define PRU_INTC_HIEISR 0x0034 +#define PRU_INTC_HIDISR 0x0038 +#define PRU_INTC_GPIR 0x0080 +#define PRU_INTC_SRSR(x) (0x0200 + (x) * 4) +#define PRU_INTC_SECR(x) (0x0280 + (x) * 4) +#define PRU_INTC_ESR(x) (0x0300 + (x) * 4) +#define PRU_INTC_ECR(x) (0x0380 + (x) * 4) +#define PRU_INTC_CMR(x) (0x0400 + (x) * 4) +#define PRU_INTC_HMR(x) (0x0800 + (x) * 4) +#define PRU_INTC_HIPIR(x) (0x0900 + (x) * 4) +#define PRU_INTC_SIPR(x) (0x0d00 + (x) * 4) +#define PRU_INTC_SITR(x) (0x0d80 + (x) * 4) +#define PRU_INTC_HINLR(x) (0x1100 + (x) * 4) +#define PRU_INTC_HIER 0x1500 + +/* CMR register bit-field macros */ +#define CMR_EVT_MAP_MASK 0xf +#define CMR_EVT_MAP_BITS 8 +#define CMR_EVT_PER_REG 4 + +/* HMR register bit-field macros */ +#define HMR_CH_MAP_MASK 0xf +#define HMR_CH_MAP_BITS 8 +#define HMR_CH_PER_REG 4 + +/* HIPIR register bit-fields */ +#define INTC_HIPIR_NONE_HINT 0x80000000 + +#define MAX_PRU_SYS_EVENTS 160 +#define MAX_PRU_CHANNELS 20 + +/** + * struct pruss_intc_map_record - keeps track of actual mapping state + * @value: The currently mapped value (channel or host) + * @ref_count: Keeps track of number of current users of this resource + */ +struct pruss_intc_map_record { + u8 value; + u8 ref_count; +}; + +/** + * struct pruss_intc_match_data - match data to handle SoC variations + * @num_system_events: number of input system events handled by the PRUSS INTC + * @num_host_events: number of host events (which is equal to number of + * channels) supported by the PRUSS INTC + */ +struct pruss_intc_match_data { + u8 num_system_events; + u8 num_host_events; +}; + +/** + * struct pruss_intc - PRUSS interrupt controller structure + * @event_channel: current state of system event to channel mappings + * @channel_host: current state of channel to host mappings + * @irqs: kernel irq numbers corresponding to PRUSS host interrupts + * @base: base virtual address of INTC register space + * @domain: irq domain for this interrupt controller + * @soc_config: cached PRUSS INTC IP configuration data + * @dev: PRUSS INTC device pointer + * @lock: mutex to serialize interrupts mapping + */ +struct pruss_intc { + struct pruss_intc_map_record event_channel[MAX_PRU_SYS_EVENTS]; + struct pruss_intc_map_record channel_host[MAX_PRU_CHANNELS]; + unsigned int irqs[MAX_NUM_HOST_IRQS]; + void __iomem *base; + struct irq_domain *domain; + const struct pruss_intc_match_data *soc_config; + struct device *dev; + struct mutex lock; /* PRUSS INTC lock */ +}; + +/** + * struct pruss_host_irq_data - PRUSS host irq data structure + * @intc: PRUSS interrupt controller pointer + * @host_irq: host irq number + */ +struct pruss_host_irq_data { + struct pruss_intc *intc; + u8 host_irq; +}; + +static inline u32 pruss_intc_read_reg(struct pruss_intc *intc, unsigned int reg) +{ + return readl_relaxed(intc->base + reg); +} + +static inline void pruss_intc_write_reg(struct pruss_intc *intc, + unsigned int reg, u32 val) +{ + writel_relaxed(val, intc->base + reg); +} + +static void pruss_intc_update_cmr(struct pruss_intc *intc, unsigned int evt, + u8 ch) +{ + u32 idx, offset, val; + + idx = evt / CMR_EVT_PER_REG; + offset = (evt % CMR_EVT_PER_REG) * CMR_EVT_MAP_BITS; + + val = pruss_intc_read_reg(intc, PRU_INTC_CMR(idx)); + val &= ~(CMR_EVT_MAP_MASK << offset); + val |= ch << offset; + pruss_intc_write_reg(intc, PRU_INTC_CMR(idx), val); + + dev_dbg(intc->dev, "SYSEV%u -> CH%d (CMR%d 0x%08x)\n", evt, ch, + idx, pruss_intc_read_reg(intc, PRU_INTC_CMR(idx))); +} + +static void pruss_intc_update_hmr(struct pruss_intc *intc, u8 ch, u8 host) +{ + u32 idx, offset, val; + + idx = ch / HMR_CH_PER_REG; + offset = (ch % HMR_CH_PER_REG) * HMR_CH_MAP_BITS; + + val = pruss_intc_read_reg(intc, PRU_INTC_HMR(idx)); + val &= ~(HMR_CH_MAP_MASK << offset); + val |= host << offset; + pruss_intc_write_reg(intc, PRU_INTC_HMR(idx), val); + + dev_dbg(intc->dev, "CH%d -> HOST%d (HMR%d 0x%08x)\n", ch, host, idx, + pruss_intc_read_reg(intc, PRU_INTC_HMR(idx))); +} + +/** + * pruss_intc_map() - configure the PRUSS INTC + * @intc: PRUSS interrupt controller pointer + * @hwirq: the system event number + * + * Configures the PRUSS INTC with the provided configuration from the one parsed + * in the xlate function. + */ +static void pruss_intc_map(struct pruss_intc *intc, unsigned long hwirq) +{ + struct device *dev = intc->dev; + u8 ch, host, reg_idx; + u32 val; + + mutex_lock(&intc->lock); + + intc->event_channel[hwirq].ref_count++; + + ch = intc->event_channel[hwirq].value; + host = intc->channel_host[ch].value; + + pruss_intc_update_cmr(intc, hwirq, ch); + + reg_idx = hwirq / 32; + val = BIT(hwirq % 32); + + /* clear and enable system event */ + pruss_intc_write_reg(intc, PRU_INTC_ESR(reg_idx), val); + pruss_intc_write_reg(intc, PRU_INTC_SECR(reg_idx), val); + + if (++intc->channel_host[ch].ref_count == 1) { + pruss_intc_update_hmr(intc, ch, host); + + /* enable host interrupts */ + pruss_intc_write_reg(intc, PRU_INTC_HIEISR, host); + } + + dev_dbg(dev, "mapped system_event = %lu channel = %d host = %d", + hwirq, ch, host); + + mutex_unlock(&intc->lock); +} + +/** + * pruss_intc_unmap() - unconfigure the PRUSS INTC + * @intc: PRUSS interrupt controller pointer + * @hwirq: the system event number + * + * Undo whatever was done in pruss_intc_map() for a PRU core. + * Mappings are reference counted, so resources are only disabled when there + * are no longer any users. + */ +static void pruss_intc_unmap(struct pruss_intc *intc, unsigned long hwirq) +{ + u8 ch, host, reg_idx; + u32 val; + + mutex_lock(&intc->lock); + + ch = intc->event_channel[hwirq].value; + host = intc->channel_host[ch].value; + + if (--intc->channel_host[ch].ref_count == 0) { + /* disable host interrupts */ + pruss_intc_write_reg(intc, PRU_INTC_HIDISR, host); + + /* clear the map using reset value 0 */ + pruss_intc_update_hmr(intc, ch, 0); + } + + intc->event_channel[hwirq].ref_count--; + reg_idx = hwirq / 32; + val = BIT(hwirq % 32); + + /* disable system events */ + pruss_intc_write_reg(intc, PRU_INTC_ECR(reg_idx), val); + /* clear any pending status */ + pruss_intc_write_reg(intc, PRU_INTC_SECR(reg_idx), val); + + /* clear the map using reset value 0 */ + pruss_intc_update_cmr(intc, hwirq, 0); + + dev_dbg(intc->dev, "unmapped system_event = %lu channel = %d host = %d\n", + hwirq, ch, host); + + mutex_unlock(&intc->lock); +} + +static void pruss_intc_init(struct pruss_intc *intc) +{ + const struct pruss_intc_match_data *soc_config = intc->soc_config; + int num_chnl_map_regs, num_host_intr_regs, num_event_type_regs, i; + + num_chnl_map_regs = DIV_ROUND_UP(soc_config->num_system_events, + CMR_EVT_PER_REG); + num_host_intr_regs = DIV_ROUND_UP(soc_config->num_host_events, + HMR_CH_PER_REG); + num_event_type_regs = DIV_ROUND_UP(soc_config->num_system_events, 32); + + /* + * configure polarity (SIPR register) to active high and + * type (SITR register) to level interrupt for all system events + */ + for (i = 0; i < num_event_type_regs; i++) { + pruss_intc_write_reg(intc, PRU_INTC_SIPR(i), 0xffffffff); + pruss_intc_write_reg(intc, PRU_INTC_SITR(i), 0); + } + + /* clear all interrupt channel map registers, 4 events per register */ + for (i = 0; i < num_chnl_map_regs; i++) + pruss_intc_write_reg(intc, PRU_INTC_CMR(i), 0); + + /* clear all host interrupt map registers, 4 channels per register */ + for (i = 0; i < num_host_intr_regs; i++) + pruss_intc_write_reg(intc, PRU_INTC_HMR(i), 0); + + /* global interrupt enable */ + pruss_intc_write_reg(intc, PRU_INTC_GER, 1); +} + +static void pruss_intc_irq_ack(struct irq_data *data) +{ + struct pruss_intc *intc = irq_data_get_irq_chip_data(data); + unsigned int hwirq = data->hwirq; + + pruss_intc_write_reg(intc, PRU_INTC_SICR, hwirq); +} + +static void pruss_intc_irq_mask(struct irq_data *data) +{ + struct pruss_intc *intc = irq_data_get_irq_chip_data(data); + unsigned int hwirq = data->hwirq; + + pruss_intc_write_reg(intc, PRU_INTC_EICR, hwirq); +} + +static void pruss_intc_irq_unmask(struct irq_data *data) +{ + struct pruss_intc *intc = irq_data_get_irq_chip_data(data); + unsigned int hwirq = data->hwirq; + + pruss_intc_write_reg(intc, PRU_INTC_EISR, hwirq); +} + +static int pruss_intc_irq_reqres(struct irq_data *data) +{ + if (!try_module_get(THIS_MODULE)) + return -ENODEV; + + return 0; +} + +static void pruss_intc_irq_relres(struct irq_data *data) +{ + module_put(THIS_MODULE); +} + +static int pruss_intc_irq_get_irqchip_state(struct irq_data *data, + enum irqchip_irq_state which, + bool *state) +{ + struct pruss_intc *intc = irq_data_get_irq_chip_data(data); + u32 reg, mask, srsr; + + if (which != IRQCHIP_STATE_PENDING) + return -EINVAL; + + reg = PRU_INTC_SRSR(data->hwirq / 32); + mask = BIT(data->hwirq % 32); + + srsr = pruss_intc_read_reg(intc, reg); + + *state = !!(srsr & mask); + + return 0; +} + +static int pruss_intc_irq_set_irqchip_state(struct irq_data *data, + enum irqchip_irq_state which, + bool state) +{ + struct pruss_intc *intc = irq_data_get_irq_chip_data(data); + + if (which != IRQCHIP_STATE_PENDING) + return -EINVAL; + + if (state) + pruss_intc_write_reg(intc, PRU_INTC_SISR, data->hwirq); + else + pruss_intc_write_reg(intc, PRU_INTC_SICR, data->hwirq); + + return 0; +} + +static struct irq_chip pruss_irqchip = { + .name = "pruss-intc", + .irq_ack = pruss_intc_irq_ack, + .irq_mask = pruss_intc_irq_mask, + .irq_unmask = pruss_intc_irq_unmask, + .irq_request_resources = pruss_intc_irq_reqres, + .irq_release_resources = pruss_intc_irq_relres, + .irq_get_irqchip_state = pruss_intc_irq_get_irqchip_state, + .irq_set_irqchip_state = pruss_intc_irq_set_irqchip_state, +}; + +static int pruss_intc_validate_mapping(struct pruss_intc *intc, int event, + int channel, int host) +{ + struct device *dev = intc->dev; + int ret = 0; + + mutex_lock(&intc->lock); + + /* check if sysevent already assigned */ + if (intc->event_channel[event].ref_count > 0 && + intc->event_channel[event].value != channel) { + dev_err(dev, "event %d (req. ch %d) already assigned to channel %d\n", + event, channel, intc->event_channel[event].value); + ret = -EBUSY; + goto unlock; + } + + /* check if channel already assigned */ + if (intc->channel_host[channel].ref_count > 0 && + intc->channel_host[channel].value != host) { + dev_err(dev, "channel %d (req. host %d) already assigned to host %d\n", + channel, host, intc->channel_host[channel].value); + ret = -EBUSY; + goto unlock; + } + + intc->event_channel[event].value = channel; + intc->channel_host[channel].value = host; + +unlock: + mutex_unlock(&intc->lock); + return ret; +} + +static int +pruss_intc_irq_domain_xlate(struct irq_domain *d, struct device_node *node, + const u32 *intspec, unsigned int intsize, + unsigned long *out_hwirq, unsigned int *out_type) +{ + struct pruss_intc *intc = d->host_data; + struct device *dev = intc->dev; + int ret, sys_event, channel, host; + + if (intsize < 3) + return -EINVAL; + + sys_event = intspec[0]; + if (sys_event < 0 || sys_event >= intc->soc_config->num_system_events) { + dev_err(dev, "%d is not valid event number\n", sys_event); + return -EINVAL; + } + + channel = intspec[1]; + if (channel < 0 || channel >= intc->soc_config->num_host_events) { + dev_err(dev, "%d is not valid channel number", channel); + return -EINVAL; + } + + host = intspec[2]; + if (host < 0 || host >= intc->soc_config->num_host_events) { + dev_err(dev, "%d is not valid host irq number\n", host); + return -EINVAL; + } + + /* check if requested sys_event was already mapped, if so validate it */ + ret = pruss_intc_validate_mapping(intc, sys_event, channel, host); + if (ret) + return ret; + + *out_hwirq = sys_event; + *out_type = IRQ_TYPE_LEVEL_HIGH; + + return 0; +} + +static int pruss_intc_irq_domain_map(struct irq_domain *d, unsigned int virq, + irq_hw_number_t hw) +{ + struct pruss_intc *intc = d->host_data; + + pruss_intc_map(intc, hw); + + irq_set_chip_data(virq, intc); + irq_set_chip_and_handler(virq, &pruss_irqchip, handle_level_irq); + + return 0; +} + +static void pruss_intc_irq_domain_unmap(struct irq_domain *d, unsigned int virq) +{ + struct pruss_intc *intc = d->host_data; + unsigned long hwirq = irqd_to_hwirq(irq_get_irq_data(virq)); + + irq_set_chip_and_handler(virq, NULL, NULL); + irq_set_chip_data(virq, NULL); + pruss_intc_unmap(intc, hwirq); +} + +static const struct irq_domain_ops pruss_intc_irq_domain_ops = { + .xlate = pruss_intc_irq_domain_xlate, + .map = pruss_intc_irq_domain_map, + .unmap = pruss_intc_irq_domain_unmap, +}; + +static void pruss_intc_irq_handler(struct irq_desc *desc) +{ + unsigned int irq = irq_desc_get_irq(desc); + struct irq_chip *chip = irq_desc_get_chip(desc); + struct pruss_host_irq_data *host_irq_data = irq_get_handler_data(irq); + struct pruss_intc *intc = host_irq_data->intc; + u8 host_irq = host_irq_data->host_irq + FIRST_PRU_HOST_INT; + + chained_irq_enter(chip, desc); + + while (true) { + u32 hipir; + int hwirq, err; + + /* get highest priority pending PRUSS system event */ + hipir = pruss_intc_read_reg(intc, PRU_INTC_HIPIR(host_irq)); + if (hipir & INTC_HIPIR_NONE_HINT) + break; + + hwirq = hipir & GENMASK(9, 0); + err = generic_handle_domain_irq(intc->domain, hwirq); + + /* + * NOTE: manually ACK any system events that do not have a + * handler mapped yet + */ + if (WARN_ON_ONCE(err)) + pruss_intc_write_reg(intc, PRU_INTC_SICR, hwirq); + } + + chained_irq_exit(chip, desc); +} + +static const char * const irq_names[MAX_NUM_HOST_IRQS] = { + "host_intr0", "host_intr1", "host_intr2", "host_intr3", + "host_intr4", "host_intr5", "host_intr6", "host_intr7", +}; + +static int pruss_intc_probe(struct platform_device *pdev) +{ + const struct pruss_intc_match_data *data; + struct device *dev = &pdev->dev; + struct pruss_intc *intc; + struct pruss_host_irq_data *host_data; + int i, irq, ret; + u8 max_system_events, irqs_reserved = 0; + + data = of_device_get_match_data(dev); + if (!data) + return -ENODEV; + + max_system_events = data->num_system_events; + + intc = devm_kzalloc(dev, sizeof(*intc), GFP_KERNEL); + if (!intc) + return -ENOMEM; + + intc->soc_config = data; + intc->dev = dev; + platform_set_drvdata(pdev, intc); + + intc->base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(intc->base)) + return PTR_ERR(intc->base); + + ret = of_property_read_u8(dev->of_node, "ti,irqs-reserved", + &irqs_reserved); + + /* + * The irqs-reserved is used only for some SoC's therefore not having + * this property is still valid + */ + if (ret < 0 && ret != -EINVAL) + return ret; + + pruss_intc_init(intc); + + mutex_init(&intc->lock); + + intc->domain = irq_domain_create_linear(dev_fwnode(dev), max_system_events, + &pruss_intc_irq_domain_ops, intc); + if (!intc->domain) + return -ENOMEM; + + for (i = 0; i < MAX_NUM_HOST_IRQS; i++) { + if (irqs_reserved & BIT(i)) + continue; + + irq = platform_get_irq_byname(pdev, irq_names[i]); + if (irq < 0) { + ret = irq; + goto fail_irq; + } + + intc->irqs[i] = irq; + + host_data = devm_kzalloc(dev, sizeof(*host_data), GFP_KERNEL); + if (!host_data) { + ret = -ENOMEM; + goto fail_irq; + } + + host_data->intc = intc; + host_data->host_irq = i; + + irq_set_chained_handler_and_data(irq, pruss_intc_irq_handler, host_data); + } + + return 0; + +fail_irq: + while (--i >= 0) { + if (intc->irqs[i]) + irq_set_chained_handler_and_data(intc->irqs[i], NULL, + NULL); + } + + irq_domain_remove(intc->domain); + + return ret; +} + +static void pruss_intc_remove(struct platform_device *pdev) +{ + struct pruss_intc *intc = platform_get_drvdata(pdev); + u8 max_system_events = intc->soc_config->num_system_events; + unsigned int hwirq; + int i; + + for (i = 0; i < MAX_NUM_HOST_IRQS; i++) { + if (intc->irqs[i]) + irq_set_chained_handler_and_data(intc->irqs[i], NULL, + NULL); + } + + for (hwirq = 0; hwirq < max_system_events; hwirq++) + irq_dispose_mapping(irq_find_mapping(intc->domain, hwirq)); + + irq_domain_remove(intc->domain); +} + +static const struct pruss_intc_match_data pruss_intc_data = { + .num_system_events = 64, + .num_host_events = 10, +}; + +static const struct pruss_intc_match_data icssg_intc_data = { + .num_system_events = 160, + .num_host_events = 20, +}; + +static const struct of_device_id pruss_intc_of_match[] = { + { + .compatible = "ti,pruss-intc", + .data = &pruss_intc_data, + }, + { + .compatible = "ti,icssg-intc", + .data = &icssg_intc_data, + }, + { /* sentinel */ }, +}; +MODULE_DEVICE_TABLE(of, pruss_intc_of_match); + +static struct platform_driver pruss_intc_driver = { + .driver = { + .name = "pruss-intc", + .of_match_table = pruss_intc_of_match, + .suppress_bind_attrs = true, + }, + .probe = pruss_intc_probe, + .remove = pruss_intc_remove, +}; +module_platform_driver(pruss_intc_driver); + +MODULE_AUTHOR("Andrew F. Davis <afd@ti.com>"); +MODULE_AUTHOR("Suman Anna <s-anna@ti.com>"); +MODULE_AUTHOR("Grzegorz Jaszczyk <grzegorz.jaszczyk@linaro.org>"); +MODULE_DESCRIPTION("TI PRU-ICSS INTC Driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/irqchip/irq-qcom-mpm.c b/drivers/irqchip/irq-qcom-mpm.c new file mode 100644 index 000000000000..83f31ea657b7 --- /dev/null +++ b/drivers/irqchip/irq-qcom-mpm.c @@ -0,0 +1,484 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2021, Linaro Limited + * Copyright (c) 2010-2020, The Linux Foundation. All rights reserved. + */ + +#include <linux/delay.h> +#include <linux/err.h> +#include <linux/init.h> +#include <linux/interrupt.h> +#include <linux/io.h> +#include <linux/irqchip.h> +#include <linux/irqdomain.h> +#include <linux/mailbox_client.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/of_address.h> +#include <linux/of_platform.h> +#include <linux/platform_device.h> +#include <linux/pm_domain.h> +#include <linux/slab.h> +#include <linux/soc/qcom/irq.h> +#include <linux/spinlock.h> + +/* + * This is the driver for Qualcomm MPM (MSM Power Manager) interrupt controller, + * which is commonly found on Qualcomm SoCs built on the RPM architecture. + * Sitting in always-on domain, MPM monitors the wakeup interrupts when SoC is + * asleep, and wakes up the AP when one of those interrupts occurs. This driver + * doesn't directly access physical MPM registers though. Instead, the access + * is bridged via a piece of internal memory (SRAM) that is accessible to both + * AP and RPM. This piece of memory is called 'vMPM' in the driver. + * + * When SoC is awake, the vMPM is owned by AP and the register setup by this + * driver all happens on vMPM. When AP is about to get power collapsed, the + * driver sends a mailbox notification to RPM, which will take over the vMPM + * ownership and dump vMPM into physical MPM registers. On wakeup, AP is woken + * up by a MPM pin/interrupt, and RPM will copy STATUS registers into vMPM. + * Then AP start owning vMPM again. + * + * vMPM register map: + * + * 31 0 + * +--------------------------------+ + * | TIMER0 | 0x00 + * +--------------------------------+ + * | TIMER1 | 0x04 + * +--------------------------------+ + * | ENABLE0 | 0x08 + * +--------------------------------+ + * | ... | ... + * +--------------------------------+ + * | ENABLEn | + * +--------------------------------+ + * | FALLING_EDGE0 | + * +--------------------------------+ + * | ... | + * +--------------------------------+ + * | STATUSn | + * +--------------------------------+ + * + * n = DIV_ROUND_UP(pin_cnt, 32) + * + */ + +#define MPM_REG_ENABLE 0 +#define MPM_REG_FALLING_EDGE 1 +#define MPM_REG_RISING_EDGE 2 +#define MPM_REG_POLARITY 3 +#define MPM_REG_STATUS 4 + +/* MPM pin map to GIC hwirq */ +struct mpm_gic_map { + int pin; + irq_hw_number_t hwirq; +}; + +struct qcom_mpm_priv { + void __iomem *base; + raw_spinlock_t lock; + struct mbox_client mbox_client; + struct mbox_chan *mbox_chan; + struct mpm_gic_map *maps; + unsigned int map_cnt; + unsigned int reg_stride; + struct irq_domain *domain; + struct generic_pm_domain genpd; +}; + +static u32 qcom_mpm_read(struct qcom_mpm_priv *priv, unsigned int reg, + unsigned int index) +{ + unsigned int offset = (reg * priv->reg_stride + index + 2) * 4; + + return readl_relaxed(priv->base + offset); +} + +static void qcom_mpm_write(struct qcom_mpm_priv *priv, unsigned int reg, + unsigned int index, u32 val) +{ + unsigned int offset = (reg * priv->reg_stride + index + 2) * 4; + + writel_relaxed(val, priv->base + offset); + + /* Ensure the write is completed */ + wmb(); +} + +static void qcom_mpm_enable_irq(struct irq_data *d, bool en) +{ + struct qcom_mpm_priv *priv = d->chip_data; + int pin = d->hwirq; + unsigned int index = pin / 32; + unsigned int shift = pin % 32; + unsigned long flags, val; + + raw_spin_lock_irqsave(&priv->lock, flags); + + val = qcom_mpm_read(priv, MPM_REG_ENABLE, index); + __assign_bit(shift, &val, en); + qcom_mpm_write(priv, MPM_REG_ENABLE, index, val); + + raw_spin_unlock_irqrestore(&priv->lock, flags); +} + +static void qcom_mpm_mask(struct irq_data *d) +{ + qcom_mpm_enable_irq(d, false); + + if (d->parent_data) + irq_chip_mask_parent(d); +} + +static void qcom_mpm_unmask(struct irq_data *d) +{ + qcom_mpm_enable_irq(d, true); + + if (d->parent_data) + irq_chip_unmask_parent(d); +} + +static void mpm_set_type(struct qcom_mpm_priv *priv, bool set, unsigned int reg, + unsigned int index, unsigned int shift) +{ + unsigned long flags, val; + + raw_spin_lock_irqsave(&priv->lock, flags); + + val = qcom_mpm_read(priv, reg, index); + __assign_bit(shift, &val, set); + qcom_mpm_write(priv, reg, index, val); + + raw_spin_unlock_irqrestore(&priv->lock, flags); +} + +static int qcom_mpm_set_type(struct irq_data *d, unsigned int type) +{ + struct qcom_mpm_priv *priv = d->chip_data; + int pin = d->hwirq; + unsigned int index = pin / 32; + unsigned int shift = pin % 32; + + if (type & IRQ_TYPE_EDGE_RISING) + mpm_set_type(priv, true, MPM_REG_RISING_EDGE, index, shift); + else + mpm_set_type(priv, false, MPM_REG_RISING_EDGE, index, shift); + + if (type & IRQ_TYPE_EDGE_FALLING) + mpm_set_type(priv, true, MPM_REG_FALLING_EDGE, index, shift); + else + mpm_set_type(priv, false, MPM_REG_FALLING_EDGE, index, shift); + + if (type & IRQ_TYPE_LEVEL_HIGH) + mpm_set_type(priv, true, MPM_REG_POLARITY, index, shift); + else + mpm_set_type(priv, false, MPM_REG_POLARITY, index, shift); + + if (!d->parent_data) + return 0; + + if (type & IRQ_TYPE_EDGE_BOTH) + type = IRQ_TYPE_EDGE_RISING; + + if (type & IRQ_TYPE_LEVEL_MASK) + type = IRQ_TYPE_LEVEL_HIGH; + + return irq_chip_set_type_parent(d, type); +} + +static struct irq_chip qcom_mpm_chip = { + .name = "mpm", + .irq_eoi = irq_chip_eoi_parent, + .irq_mask = qcom_mpm_mask, + .irq_unmask = qcom_mpm_unmask, + .irq_retrigger = irq_chip_retrigger_hierarchy, + .irq_set_type = qcom_mpm_set_type, + .irq_set_affinity = irq_chip_set_affinity_parent, + .flags = IRQCHIP_MASK_ON_SUSPEND | + IRQCHIP_SKIP_SET_WAKE, +}; + +static struct mpm_gic_map *get_mpm_gic_map(struct qcom_mpm_priv *priv, int pin) +{ + struct mpm_gic_map *maps = priv->maps; + int i; + + for (i = 0; i < priv->map_cnt; i++) { + if (maps[i].pin == pin) + return &maps[i]; + } + + return NULL; +} + +static int qcom_mpm_alloc(struct irq_domain *domain, unsigned int virq, + unsigned int nr_irqs, void *data) +{ + struct qcom_mpm_priv *priv = domain->host_data; + struct irq_fwspec *fwspec = data; + struct irq_fwspec parent_fwspec; + struct mpm_gic_map *map; + irq_hw_number_t pin; + unsigned int type; + int ret; + + ret = irq_domain_translate_twocell(domain, fwspec, &pin, &type); + if (ret) + return ret; + + if (pin == GPIO_NO_WAKE_IRQ) + return irq_domain_disconnect_hierarchy(domain, virq); + + ret = irq_domain_set_hwirq_and_chip(domain, virq, pin, + &qcom_mpm_chip, priv); + if (ret) + return ret; + + map = get_mpm_gic_map(priv, pin); + if (map == NULL) + return irq_domain_disconnect_hierarchy(domain->parent, virq); + + if (type & IRQ_TYPE_EDGE_BOTH) + type = IRQ_TYPE_EDGE_RISING; + + if (type & IRQ_TYPE_LEVEL_MASK) + type = IRQ_TYPE_LEVEL_HIGH; + + parent_fwspec.fwnode = domain->parent->fwnode; + parent_fwspec.param_count = 3; + parent_fwspec.param[0] = 0; + parent_fwspec.param[1] = map->hwirq; + parent_fwspec.param[2] = type; + + return irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, + &parent_fwspec); +} + +static const struct irq_domain_ops qcom_mpm_ops = { + .alloc = qcom_mpm_alloc, + .free = irq_domain_free_irqs_common, + .translate = irq_domain_translate_twocell, +}; + +/* Triggered by RPM when system resumes from deep sleep */ +static irqreturn_t qcom_mpm_handler(int irq, void *dev_id) +{ + struct qcom_mpm_priv *priv = dev_id; + unsigned long enable, pending; + irqreturn_t ret = IRQ_NONE; + unsigned long flags; + int i, j; + + for (i = 0; i < priv->reg_stride; i++) { + raw_spin_lock_irqsave(&priv->lock, flags); + enable = qcom_mpm_read(priv, MPM_REG_ENABLE, i); + pending = qcom_mpm_read(priv, MPM_REG_STATUS, i); + pending &= enable; + raw_spin_unlock_irqrestore(&priv->lock, flags); + + for_each_set_bit(j, &pending, 32) { + unsigned int pin = 32 * i + j; + struct irq_desc *desc = irq_resolve_mapping(priv->domain, pin); + struct irq_data *d = &desc->irq_data; + + if (!irqd_is_level_type(d)) + irq_set_irqchip_state(d->irq, + IRQCHIP_STATE_PENDING, true); + ret = IRQ_HANDLED; + } + } + + return ret; +} + +static int mpm_pd_power_off(struct generic_pm_domain *genpd) +{ + struct qcom_mpm_priv *priv = container_of(genpd, struct qcom_mpm_priv, + genpd); + int i, ret; + + for (i = 0; i < priv->reg_stride; i++) + qcom_mpm_write(priv, MPM_REG_STATUS, i, 0); + + /* Notify RPM to write vMPM into HW */ + ret = mbox_send_message(priv->mbox_chan, NULL); + if (ret < 0) + return ret; + + return 0; +} + +static bool gic_hwirq_is_mapped(struct mpm_gic_map *maps, int cnt, u32 hwirq) +{ + int i; + + for (i = 0; i < cnt; i++) + if (maps[i].hwirq == hwirq) + return true; + + return false; +} + +static int qcom_mpm_probe(struct platform_device *pdev, struct device_node *parent) +{ + struct device_node *np = pdev->dev.of_node; + struct device *dev = &pdev->dev; + struct irq_domain *parent_domain; + struct generic_pm_domain *genpd; + struct device_node *msgram_np; + struct qcom_mpm_priv *priv; + unsigned int pin_cnt; + struct resource res; + int i, irq; + int ret; + + priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + ret = of_property_read_u32(np, "qcom,mpm-pin-count", &pin_cnt); + if (ret) { + dev_err(dev, "failed to read qcom,mpm-pin-count: %d\n", ret); + return ret; + } + + priv->reg_stride = DIV_ROUND_UP(pin_cnt, 32); + + ret = of_property_count_u32_elems(np, "qcom,mpm-pin-map"); + if (ret < 0) { + dev_err(dev, "failed to read qcom,mpm-pin-map: %d\n", ret); + return ret; + } + + if (ret % 2) { + dev_err(dev, "invalid qcom,mpm-pin-map\n"); + return -EINVAL; + } + + priv->map_cnt = ret / 2; + priv->maps = devm_kcalloc(dev, priv->map_cnt, sizeof(*priv->maps), + GFP_KERNEL); + if (!priv->maps) + return -ENOMEM; + + for (i = 0; i < priv->map_cnt; i++) { + u32 pin, hwirq; + + of_property_read_u32_index(np, "qcom,mpm-pin-map", i * 2, &pin); + of_property_read_u32_index(np, "qcom,mpm-pin-map", i * 2 + 1, &hwirq); + + if (gic_hwirq_is_mapped(priv->maps, i, hwirq)) { + dev_warn(dev, "failed to map pin %d as GIC hwirq %d is already mapped\n", + pin, hwirq); + continue; + } + + priv->maps[i].pin = pin; + priv->maps[i].hwirq = hwirq; + } + + raw_spin_lock_init(&priv->lock); + + /* If we have a handle to an RPM message ram partition, use it. */ + msgram_np = of_parse_phandle(np, "qcom,rpm-msg-ram", 0); + if (msgram_np) { + ret = of_address_to_resource(msgram_np, 0, &res); + if (ret) { + of_node_put(msgram_np); + return ret; + } + + /* Don't use devm_ioremap_resource, as we're accessing a shared region. */ + priv->base = devm_ioremap(dev, res.start, resource_size(&res)); + of_node_put(msgram_np); + if (!priv->base) + return -ENOMEM; + } else { + /* Otherwise, fall back to simple MMIO. */ + priv->base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(priv->base)) + return PTR_ERR(priv->base); + } + + for (i = 0; i < priv->reg_stride; i++) { + qcom_mpm_write(priv, MPM_REG_ENABLE, i, 0); + qcom_mpm_write(priv, MPM_REG_FALLING_EDGE, i, 0); + qcom_mpm_write(priv, MPM_REG_RISING_EDGE, i, 0); + qcom_mpm_write(priv, MPM_REG_POLARITY, i, 0); + qcom_mpm_write(priv, MPM_REG_STATUS, i, 0); + } + + irq = platform_get_irq(pdev, 0); + if (irq < 0) + return irq; + + genpd = &priv->genpd; + genpd->flags = GENPD_FLAG_IRQ_SAFE; + genpd->power_off = mpm_pd_power_off; + + genpd->name = devm_kasprintf(dev, GFP_KERNEL, "%s", dev_name(dev)); + if (!genpd->name) + return -ENOMEM; + + ret = pm_genpd_init(genpd, NULL, false); + if (ret) { + dev_err(dev, "failed to init genpd: %d\n", ret); + return ret; + } + + ret = of_genpd_add_provider_simple(np, genpd); + if (ret) { + dev_err(dev, "failed to add genpd provider: %d\n", ret); + goto remove_genpd; + } + + priv->mbox_client.dev = dev; + priv->mbox_chan = mbox_request_channel(&priv->mbox_client, 0); + if (IS_ERR(priv->mbox_chan)) { + ret = PTR_ERR(priv->mbox_chan); + dev_err(dev, "failed to acquire IPC channel: %d\n", ret); + return ret; + } + + parent_domain = irq_find_host(parent); + if (!parent_domain) { + dev_err(dev, "failed to find MPM parent domain\n"); + ret = -ENXIO; + goto free_mbox; + } + + priv->domain = irq_domain_create_hierarchy(parent_domain, + IRQ_DOMAIN_FLAG_QCOM_MPM_WAKEUP, pin_cnt, + of_fwnode_handle(np), &qcom_mpm_ops, priv); + if (!priv->domain) { + dev_err(dev, "failed to create MPM domain\n"); + ret = -ENOMEM; + goto free_mbox; + } + + irq_domain_update_bus_token(priv->domain, DOMAIN_BUS_WAKEUP); + + ret = devm_request_irq(dev, irq, qcom_mpm_handler, IRQF_NO_SUSPEND, + "qcom_mpm", priv); + if (ret) { + dev_err(dev, "failed to request irq: %d\n", ret); + goto remove_domain; + } + + return 0; + +remove_domain: + irq_domain_remove(priv->domain); +free_mbox: + mbox_free_channel(priv->mbox_chan); +remove_genpd: + pm_genpd_remove(genpd); + return ret; +} + +IRQCHIP_PLATFORM_DRIVER_BEGIN(qcom_mpm) +IRQCHIP_MATCH("qcom,mpm", qcom_mpm_probe) +IRQCHIP_PLATFORM_DRIVER_END(qcom_mpm) +MODULE_DESCRIPTION("Qualcomm Technologies, Inc. MSM Power Manager"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/irqchip/irq-rda-intc.c b/drivers/irqchip/irq-rda-intc.c index 960168303b73..9f0144a73777 100644 --- a/drivers/irqchip/irq-rda-intc.c +++ b/drivers/irqchip/irq-rda-intc.c @@ -53,7 +53,7 @@ static void __exception_irq_entry rda_handle_irq(struct pt_regs *regs) while (stat) { hwirq = __fls(stat); - handle_domain_irq(rda_irq_domain, hwirq, regs); + generic_handle_domain_irq(rda_irq_domain, hwirq); stat &= ~BIT(hwirq); } } diff --git a/drivers/irqchip/irq-realtek-rtl.c b/drivers/irqchip/irq-realtek-rtl.c new file mode 100644 index 000000000000..942c1f8c363d --- /dev/null +++ b/drivers/irqchip/irq-realtek-rtl.c @@ -0,0 +1,174 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2020 Birger Koblitz <mail@birger-koblitz.de> + * Copyright (C) 2020 Bert Vermeulen <bert@biot.com> + * Copyright (C) 2020 John Crispin <john@phrozen.org> + */ + +#include <linux/of_irq.h> +#include <linux/irqchip.h> +#include <linux/spinlock.h> +#include <linux/of_address.h> +#include <linux/irqchip/chained_irq.h> + +/* Global Interrupt Mask Register */ +#define RTL_ICTL_GIMR 0x00 +/* Global Interrupt Status Register */ +#define RTL_ICTL_GISR 0x04 +/* Interrupt Routing Registers */ +#define RTL_ICTL_IRR0 0x08 +#define RTL_ICTL_IRR1 0x0c +#define RTL_ICTL_IRR2 0x10 +#define RTL_ICTL_IRR3 0x14 + +#define RTL_ICTL_NUM_INPUTS 32 + +#define REG(x) (realtek_ictl_base + x) + +static DEFINE_RAW_SPINLOCK(irq_lock); +static void __iomem *realtek_ictl_base; + +/* + * IRR0-IRR3 store 4 bits per interrupt, but Realtek uses inverted numbering, + * placing IRQ 31 in the first four bits. A routing value of '0' means the + * interrupt is left disconnected. Routing values {1..15} connect to output + * lines {0..14}. + */ +#define IRR_OFFSET(idx) (4 * (3 - (idx * 4) / 32)) +#define IRR_SHIFT(idx) ((idx * 4) % 32) + +static void write_irr(void __iomem *irr0, int idx, u32 value) +{ + unsigned int offset = IRR_OFFSET(idx); + unsigned int shift = IRR_SHIFT(idx); + u32 irr; + + irr = readl(irr0 + offset) & ~(0xf << shift); + irr |= (value & 0xf) << shift; + writel(irr, irr0 + offset); +} + +static void realtek_ictl_unmask_irq(struct irq_data *i) +{ + unsigned long flags; + u32 value; + + raw_spin_lock_irqsave(&irq_lock, flags); + + value = readl(REG(RTL_ICTL_GIMR)); + value |= BIT(i->hwirq); + writel(value, REG(RTL_ICTL_GIMR)); + + raw_spin_unlock_irqrestore(&irq_lock, flags); +} + +static void realtek_ictl_mask_irq(struct irq_data *i) +{ + unsigned long flags; + u32 value; + + raw_spin_lock_irqsave(&irq_lock, flags); + + value = readl(REG(RTL_ICTL_GIMR)); + value &= ~BIT(i->hwirq); + writel(value, REG(RTL_ICTL_GIMR)); + + raw_spin_unlock_irqrestore(&irq_lock, flags); +} + +static struct irq_chip realtek_ictl_irq = { + .name = "realtek-rtl-intc", + .irq_mask = realtek_ictl_mask_irq, + .irq_unmask = realtek_ictl_unmask_irq, +}; + +static int intc_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hw) +{ + unsigned long flags; + + irq_set_chip_and_handler(irq, &realtek_ictl_irq, handle_level_irq); + + raw_spin_lock_irqsave(&irq_lock, flags); + write_irr(REG(RTL_ICTL_IRR0), hw, 1); + raw_spin_unlock_irqrestore(&irq_lock, flags); + + return 0; +} + +static const struct irq_domain_ops irq_domain_ops = { + .map = intc_map, + .xlate = irq_domain_xlate_onecell, +}; + +static void realtek_irq_dispatch(struct irq_desc *desc) +{ + struct irq_chip *chip = irq_desc_get_chip(desc); + struct irq_domain *domain; + unsigned long pending; + unsigned int soc_int; + + chained_irq_enter(chip, desc); + pending = readl(REG(RTL_ICTL_GIMR)) & readl(REG(RTL_ICTL_GISR)); + + if (unlikely(!pending)) { + spurious_interrupt(); + goto out; + } + + domain = irq_desc_get_handler_data(desc); + for_each_set_bit(soc_int, &pending, 32) + generic_handle_domain_irq(domain, soc_int); + +out: + chained_irq_exit(chip, desc); +} + +static int __init realtek_rtl_of_init(struct device_node *node, struct device_node *parent) +{ + struct of_phandle_args oirq; + struct irq_domain *domain; + unsigned int soc_irq; + int parent_irq; + + realtek_ictl_base = of_iomap(node, 0); + if (!realtek_ictl_base) + return -ENXIO; + + /* Disable all cascaded interrupts and clear routing */ + writel(0, REG(RTL_ICTL_GIMR)); + for (soc_irq = 0; soc_irq < RTL_ICTL_NUM_INPUTS; soc_irq++) + write_irr(REG(RTL_ICTL_IRR0), soc_irq, 0); + + if (WARN_ON(!of_irq_count(node))) { + /* + * If DT contains no parent interrupts, assume MIPS CPU IRQ 2 + * (HW0) is connected to the first output. This is the case for + * all known hardware anyway. "interrupt-map" is deprecated, so + * don't bother trying to parse that. + */ + oirq.np = of_find_compatible_node(NULL, NULL, "mti,cpu-interrupt-controller"); + oirq.args_count = 1; + oirq.args[0] = 2; + + parent_irq = irq_create_of_mapping(&oirq); + + of_node_put(oirq.np); + } else { + parent_irq = of_irq_get(node, 0); + } + + if (parent_irq < 0) + return parent_irq; + else if (!parent_irq) + return -ENODEV; + + domain = irq_domain_create_linear(of_fwnode_handle(node), RTL_ICTL_NUM_INPUTS, &irq_domain_ops, NULL); + if (!domain) + return -ENOMEM; + + irq_set_chained_handler_and_data(parent_irq, realtek_irq_dispatch, domain); + + return 0; +} + +IRQCHIP_DECLARE(realtek_rtl_intc, "realtek,rtl-intc", realtek_rtl_of_init); diff --git a/drivers/irqchip/irq-renesas-h8300h.c b/drivers/irqchip/irq-renesas-h8300h.c deleted file mode 100644 index 1054d74b7edd..000000000000 --- a/drivers/irqchip/irq-renesas-h8300h.c +++ /dev/null @@ -1,94 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * H8/300H interrupt controller driver - * - * Copyright 2015 Yoshinori Sato <ysato@users.sourceforge.jp> - */ - -#include <linux/init.h> -#include <linux/irq.h> -#include <linux/irqchip.h> -#include <linux/of_address.h> -#include <linux/of_irq.h> -#include <asm/io.h> - -static const char ipr_bit[] = { - 7, 6, 5, 5, - 4, 4, 4, 4, 3, 3, 3, 3, - 2, 2, 2, 2, 1, 1, 1, 1, - 0, 0, 0, 0, 15, 15, 15, 15, - 14, 14, 14, 14, 13, 13, 13, 13, - -1, -1, -1, -1, 11, 11, 11, 11, - 10, 10, 10, 10, 9, 9, 9, 9, -}; - -static void __iomem *intc_baseaddr; - -#define IPR (intc_baseaddr + 6) - -static void h8300h_disable_irq(struct irq_data *data) -{ - int bit; - int irq = data->irq - 12; - - bit = ipr_bit[irq]; - if (bit >= 0) { - if (bit < 8) - ctrl_bclr(bit & 7, IPR); - else - ctrl_bclr(bit & 7, (IPR+1)); - } -} - -static void h8300h_enable_irq(struct irq_data *data) -{ - int bit; - int irq = data->irq - 12; - - bit = ipr_bit[irq]; - if (bit >= 0) { - if (bit < 8) - ctrl_bset(bit & 7, IPR); - else - ctrl_bset(bit & 7, (IPR+1)); - } -} - -struct irq_chip h8300h_irq_chip = { - .name = "H8/300H-INTC", - .irq_enable = h8300h_enable_irq, - .irq_disable = h8300h_disable_irq, -}; - -static int irq_map(struct irq_domain *h, unsigned int virq, - irq_hw_number_t hw_irq_num) -{ - irq_set_chip_and_handler(virq, &h8300h_irq_chip, handle_simple_irq); - - return 0; -} - -static const struct irq_domain_ops irq_ops = { - .map = irq_map, - .xlate = irq_domain_xlate_onecell, -}; - -static int __init h8300h_intc_of_init(struct device_node *intc, - struct device_node *parent) -{ - struct irq_domain *domain; - - intc_baseaddr = of_iomap(intc, 0); - BUG_ON(!intc_baseaddr); - - /* All interrupt priority low */ - writeb(0x00, IPR + 0); - writeb(0x00, IPR + 1); - - domain = irq_domain_add_linear(intc, NR_IRQS, &irq_ops, NULL); - BUG_ON(!domain); - irq_set_default_host(domain); - return 0; -} - -IRQCHIP_DECLARE(h8300h_intc, "renesas,h8300h-intc", h8300h_intc_of_init); diff --git a/drivers/irqchip/irq-renesas-h8s.c b/drivers/irqchip/irq-renesas-h8s.c deleted file mode 100644 index 4e2461bae944..000000000000 --- a/drivers/irqchip/irq-renesas-h8s.c +++ /dev/null @@ -1,102 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * H8S interrupt controller driver - * - * Copyright 2015 Yoshinori Sato <ysato@users.sourceforge.jp> - */ - -#include <linux/irq.h> -#include <linux/irqchip.h> -#include <linux/of_address.h> -#include <linux/of_irq.h> -#include <asm/io.h> - -static void *intc_baseaddr; -#define IPRA (intc_baseaddr) - -static const unsigned char ipr_table[] = { - 0x03, 0x02, 0x01, 0x00, 0x13, 0x12, 0x11, 0x10, /* 16 - 23 */ - 0x23, 0x22, 0x21, 0x20, 0x33, 0x32, 0x31, 0x30, /* 24 - 31 */ - 0x43, 0x42, 0x41, 0x40, 0x53, 0x53, 0x52, 0x52, /* 32 - 39 */ - 0x51, 0x51, 0x51, 0x51, 0x51, 0x51, 0x51, 0x51, /* 40 - 47 */ - 0x50, 0x50, 0x50, 0x50, 0x63, 0x63, 0x63, 0x63, /* 48 - 55 */ - 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, /* 56 - 63 */ - 0x61, 0x61, 0x61, 0x61, 0x60, 0x60, 0x60, 0x60, /* 64 - 71 */ - 0x73, 0x73, 0x73, 0x73, 0x72, 0x72, 0x72, 0x72, /* 72 - 79 */ - 0x71, 0x71, 0x71, 0x71, 0x70, 0x83, 0x82, 0x81, /* 80 - 87 */ - 0x80, 0x80, 0x80, 0x80, 0x93, 0x93, 0x93, 0x93, /* 88 - 95 */ - 0x92, 0x92, 0x92, 0x92, 0x91, 0x91, 0x91, 0x91, /* 96 - 103 */ - 0x90, 0x90, 0x90, 0x90, 0xa3, 0xa3, 0xa3, 0xa3, /* 104 - 111 */ - 0xa2, 0xa2, 0xa2, 0xa2, 0xa1, 0xa1, 0xa1, 0xa1, /* 112 - 119 */ - 0xa0, 0xa0, 0xa0, 0xa0, 0xa0, 0xa0, 0xa0, 0xa0, /* 120 - 127 */ -}; - -static void h8s_disable_irq(struct irq_data *data) -{ - int pos; - void __iomem *addr; - unsigned short pri; - int irq = data->irq; - - addr = IPRA + ((ipr_table[irq - 16] & 0xf0) >> 3); - pos = (ipr_table[irq - 16] & 0x0f) * 4; - pri = ~(0x000f << pos); - pri &= readw(addr); - writew(pri, addr); -} - -static void h8s_enable_irq(struct irq_data *data) -{ - int pos; - void __iomem *addr; - unsigned short pri; - int irq = data->irq; - - addr = IPRA + ((ipr_table[irq - 16] & 0xf0) >> 3); - pos = (ipr_table[irq - 16] & 0x0f) * 4; - pri = ~(0x000f << pos); - pri &= readw(addr); - pri |= 1 << pos; - writew(pri, addr); -} - -struct irq_chip h8s_irq_chip = { - .name = "H8S-INTC", - .irq_enable = h8s_enable_irq, - .irq_disable = h8s_disable_irq, -}; - -static __init int irq_map(struct irq_domain *h, unsigned int virq, - irq_hw_number_t hw_irq_num) -{ - irq_set_chip_and_handler(virq, &h8s_irq_chip, handle_simple_irq); - - return 0; -} - -static const struct irq_domain_ops irq_ops = { - .map = irq_map, - .xlate = irq_domain_xlate_onecell, -}; - -static int __init h8s_intc_of_init(struct device_node *intc, - struct device_node *parent) -{ - struct irq_domain *domain; - int n; - - intc_baseaddr = of_iomap(intc, 0); - BUG_ON(!intc_baseaddr); - - /* All interrupt priority is 0 (disable) */ - /* IPRA to IPRK */ - for (n = 0; n <= 'k' - 'a'; n++) - writew(0x0000, IPRA + (n * 2)); - - domain = irq_domain_add_linear(intc, NR_IRQS, &irq_ops, NULL); - BUG_ON(!domain); - irq_set_default_host(domain); - return 0; -} - -IRQCHIP_DECLARE(h8s_intc, "renesas,h8s-intc", h8s_intc_of_init); diff --git a/drivers/irqchip/irq-renesas-intc-irqpin.c b/drivers/irqchip/irq-renesas-intc-irqpin.c index 8c039525703f..7951292d2d9b 100644 --- a/drivers/irqchip/irq-renesas-intc-irqpin.c +++ b/drivers/irqchip/irq-renesas-intc-irqpin.c @@ -17,7 +17,6 @@ #include <linux/err.h> #include <linux/slab.h> #include <linux/module.h> -#include <linux/of_device.h> #include <linux/pm_runtime.h> #define INTC_IRQPIN_MAX 8 /* maximum 8 interrupts per driver instance */ @@ -71,8 +70,7 @@ struct intc_irqpin_priv { }; struct intc_irqpin_config { - unsigned int irlm_bit; - unsigned needs_irlm:1; + int irlm_bit; /* -1 if non-existent */ }; static unsigned long intc_irqpin_read32(void __iomem *iomem) @@ -349,11 +347,10 @@ static const struct irq_domain_ops intc_irqpin_irq_domain_ops = { static const struct intc_irqpin_config intc_irqpin_irlm_r8a777x = { .irlm_bit = 23, /* ICR0.IRLM0 */ - .needs_irlm = 1, }; static const struct intc_irqpin_config intc_irqpin_rmobile = { - .needs_irlm = 0, + .irlm_bit = -1, }; static const struct of_device_id intc_irqpin_dt_ids[] = { @@ -377,7 +374,6 @@ static int intc_irqpin_probe(struct platform_device *pdev) struct intc_irqpin_priv *p; struct intc_irqpin_iomem *i; struct resource *io[INTC_IRQPIN_REG_NR]; - struct resource *irq; struct irq_chip *irq_chip; void (*enable_fn)(struct irq_data *d); void (*disable_fn)(struct irq_data *d); @@ -389,10 +385,8 @@ static int intc_irqpin_probe(struct platform_device *pdev) int k; p = devm_kzalloc(dev, sizeof(*p), GFP_KERNEL); - if (!p) { - dev_err(dev, "failed to allocate driver data\n"); + if (!p) return -ENOMEM; - } /* deal with driver instance configuration */ of_property_read_u32(dev->of_node, "sense-bitfield-width", @@ -422,12 +416,14 @@ static int intc_irqpin_probe(struct platform_device *pdev) /* allow any number of IRQs between 1 and INTC_IRQPIN_MAX */ for (k = 0; k < INTC_IRQPIN_MAX; k++) { - irq = platform_get_resource(pdev, IORESOURCE_IRQ, k); - if (!irq) + ret = platform_get_irq_optional(pdev, k); + if (ret == -ENXIO) break; + if (ret < 0) + goto err0; p->irq[k].p = p; - p->irq[k].requested_irq = irq->start; + p->irq[k].requested_irq = ret; } nirqs = k; @@ -462,8 +458,8 @@ static int intc_irqpin_probe(struct platform_device *pdev) goto err0; } - i->iomem = devm_ioremap_nocache(dev, io[k]->start, - resource_size(io[k])); + i->iomem = devm_ioremap(dev, io[k]->start, + resource_size(io[k])); if (!i->iomem) { dev_err(dev, "failed to remap IOMEM\n"); ret = -ENXIO; @@ -472,7 +468,7 @@ static int intc_irqpin_probe(struct platform_device *pdev) } /* configure "individual IRQ mode" where needed */ - if (config && config->needs_irlm) { + if (config && config->irlm_bit >= 0) { if (io[INTC_IRQPIN_REG_IRLM]) intc_irqpin_read_modify_write(p, INTC_IRQPIN_REG_IRLM, config->irlm_bit, 1, 1); @@ -510,21 +506,23 @@ static int intc_irqpin_probe(struct platform_device *pdev) } irq_chip = &p->irq_chip; - irq_chip->name = name; + irq_chip->name = "intc-irqpin"; irq_chip->irq_mask = disable_fn; irq_chip->irq_unmask = enable_fn; irq_chip->irq_set_type = intc_irqpin_irq_set_type; irq_chip->irq_set_wake = intc_irqpin_irq_set_wake; irq_chip->flags = IRQCHIP_MASK_ON_SUSPEND; - p->irq_domain = irq_domain_add_simple(dev->of_node, nirqs, 0, - &intc_irqpin_irq_domain_ops, p); + p->irq_domain = irq_domain_create_simple(dev_fwnode(dev), nirqs, 0, + &intc_irqpin_irq_domain_ops, p); if (!p->irq_domain) { ret = -ENXIO; dev_err(dev, "cannot initialize irq domain\n"); goto err0; } + irq_domain_set_pm_device(p->irq_domain, dev); + if (p->shared_irqs) { /* request one shared interrupt */ if (devm_request_irq(dev, p->irq[0].requested_irq, @@ -563,17 +561,16 @@ err0: return ret; } -static int intc_irqpin_remove(struct platform_device *pdev) +static void intc_irqpin_remove(struct platform_device *pdev) { struct intc_irqpin_priv *p = platform_get_drvdata(pdev); irq_domain_remove(p->irq_domain); pm_runtime_put(&pdev->dev); pm_runtime_disable(&pdev->dev); - return 0; } -static int __maybe_unused intc_irqpin_suspend(struct device *dev) +static int intc_irqpin_suspend(struct device *dev) { struct intc_irqpin_priv *p = dev_get_drvdata(dev); @@ -583,15 +580,15 @@ static int __maybe_unused intc_irqpin_suspend(struct device *dev) return 0; } -static SIMPLE_DEV_PM_OPS(intc_irqpin_pm_ops, intc_irqpin_suspend, NULL); +static DEFINE_SIMPLE_DEV_PM_OPS(intc_irqpin_pm_ops, intc_irqpin_suspend, NULL); static struct platform_driver intc_irqpin_device_driver = { .probe = intc_irqpin_probe, .remove = intc_irqpin_remove, .driver = { - .name = "renesas_intc_irqpin", - .of_match_table = intc_irqpin_dt_ids, - .pm = &intc_irqpin_pm_ops, + .name = "renesas_intc_irqpin", + .of_match_table = intc_irqpin_dt_ids, + .pm = pm_sleep_ptr(&intc_irqpin_pm_ops), } }; @@ -609,4 +606,3 @@ module_exit(intc_irqpin_exit); MODULE_AUTHOR("Magnus Damm"); MODULE_DESCRIPTION("Renesas INTC External IRQ Pin Driver"); -MODULE_LICENSE("GPL v2"); diff --git a/drivers/irqchip/irq-renesas-irqc.c b/drivers/irqchip/irq-renesas-irqc.c index a449a7c839b3..a20a6471b0e4 100644 --- a/drivers/irqchip/irq-renesas-irqc.c +++ b/drivers/irqchip/irq-renesas-irqc.c @@ -7,7 +7,6 @@ #include <linux/init.h> #include <linux/platform_device.h> -#include <linux/spinlock.h> #include <linux/interrupt.h> #include <linux/ioport.h> #include <linux/io.h> @@ -48,7 +47,7 @@ struct irqc_priv { void __iomem *cpu_int_base; struct irqc_irq irq[IRQC_IRQ_MAX]; unsigned int number_of_irqs; - struct platform_device *pdev; + struct device *dev; struct irq_chip_generic *gc; struct irq_domain *irq_domain; atomic_t wakeup_path; @@ -61,8 +60,7 @@ static struct irqc_priv *irq_data_to_priv(struct irq_data *data) static void irqc_dbg(struct irqc_irq *i, char *str) { - dev_dbg(&i->p->pdev->dev, "%s (%d:%d)\n", - str, i->requested_irq, i->hw_irq); + dev_dbg(i->p->dev, "%s (%d:%d)\n", str, i->requested_irq, i->hw_irq); } static unsigned char irqc_sense[IRQ_TYPE_SENSE_MASK + 1] = { @@ -117,7 +115,7 @@ static irqreturn_t irqc_irq_handler(int irq, void *dev_id) if (ioread32(p->iomem + DETECT_STATUS) & bit) { iowrite32(bit, p->iomem + DETECT_STATUS); irqc_dbg(i, "demux2"); - generic_handle_irq(irq_find_mapping(p->irq_domain, i->hw_irq)); + generic_handle_domain_irq(p->irq_domain, i->hw_irq); return IRQ_HANDLED; } return IRQ_NONE; @@ -125,77 +123,65 @@ static irqreturn_t irqc_irq_handler(int irq, void *dev_id) static int irqc_probe(struct platform_device *pdev) { + struct device *dev = &pdev->dev; + const char *name = dev_name(dev); struct irqc_priv *p; - struct resource *io; - struct resource *irq; - const char *name = dev_name(&pdev->dev); int ret; int k; - p = kzalloc(sizeof(*p), GFP_KERNEL); - if (!p) { - dev_err(&pdev->dev, "failed to allocate driver data\n"); - ret = -ENOMEM; - goto err0; - } + p = devm_kzalloc(dev, sizeof(*p), GFP_KERNEL); + if (!p) + return -ENOMEM; - p->pdev = pdev; + p->dev = dev; platform_set_drvdata(pdev, p); - pm_runtime_enable(&pdev->dev); - pm_runtime_get_sync(&pdev->dev); - - /* get hold of manadatory IOMEM */ - io = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!io) { - dev_err(&pdev->dev, "not enough IOMEM resources\n"); - ret = -EINVAL; - goto err1; - } + pm_runtime_enable(dev); + pm_runtime_get_sync(dev); /* allow any number of IRQs between 1 and IRQC_IRQ_MAX */ for (k = 0; k < IRQC_IRQ_MAX; k++) { - irq = platform_get_resource(pdev, IORESOURCE_IRQ, k); - if (!irq) + ret = platform_get_irq_optional(pdev, k); + if (ret == -ENXIO) break; + if (ret < 0) + goto err_runtime_pm_disable; p->irq[k].p = p; p->irq[k].hw_irq = k; - p->irq[k].requested_irq = irq->start; + p->irq[k].requested_irq = ret; } p->number_of_irqs = k; if (p->number_of_irqs < 1) { - dev_err(&pdev->dev, "not enough IRQ resources\n"); + dev_err(dev, "not enough IRQ resources\n"); ret = -EINVAL; - goto err1; + goto err_runtime_pm_disable; } /* ioremap IOMEM and setup read/write callbacks */ - p->iomem = ioremap_nocache(io->start, resource_size(io)); - if (!p->iomem) { - dev_err(&pdev->dev, "failed to remap IOMEM\n"); - ret = -ENXIO; - goto err2; + p->iomem = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(p->iomem)) { + ret = PTR_ERR(p->iomem); + goto err_runtime_pm_disable; } p->cpu_int_base = p->iomem + IRQC_INT_CPU_BASE(0); /* SYS-SPI */ - p->irq_domain = irq_domain_add_linear(pdev->dev.of_node, - p->number_of_irqs, - &irq_generic_chip_ops, p); + p->irq_domain = irq_domain_create_linear(dev_fwnode(dev), p->number_of_irqs, + &irq_generic_chip_ops, p); if (!p->irq_domain) { ret = -ENXIO; - dev_err(&pdev->dev, "cannot initialize irq domain\n"); - goto err2; + dev_err(dev, "cannot initialize irq domain\n"); + goto err_runtime_pm_disable; } ret = irq_alloc_domain_generic_chips(p->irq_domain, p->number_of_irqs, - 1, name, handle_level_irq, + 1, "irqc", handle_level_irq, 0, 0, IRQ_GC_INIT_NESTED_LOCK); if (ret) { - dev_err(&pdev->dev, "cannot allocate generic chip\n"); - goto err3; + dev_err(dev, "cannot allocate generic chip\n"); + goto err_remove_domain; } p->gc = irq_get_domain_generic_chip(p->irq_domain, 0); @@ -208,52 +194,40 @@ static int irqc_probe(struct platform_device *pdev) p->gc->chip_types[0].chip.irq_set_wake = irqc_irq_set_wake; p->gc->chip_types[0].chip.flags = IRQCHIP_MASK_ON_SUSPEND; + irq_domain_set_pm_device(p->irq_domain, dev); + /* request interrupts one by one */ for (k = 0; k < p->number_of_irqs; k++) { - if (request_irq(p->irq[k].requested_irq, irqc_irq_handler, - 0, name, &p->irq[k])) { - dev_err(&pdev->dev, "failed to request IRQ\n"); + if (devm_request_irq(dev, p->irq[k].requested_irq, + irqc_irq_handler, 0, name, &p->irq[k])) { + dev_err(dev, "failed to request IRQ\n"); ret = -ENOENT; - goto err4; + goto err_remove_domain; } } - dev_info(&pdev->dev, "driving %d irqs\n", p->number_of_irqs); + dev_info(dev, "driving %d irqs\n", p->number_of_irqs); return 0; -err4: - while (--k >= 0) - free_irq(p->irq[k].requested_irq, &p->irq[k]); -err3: +err_remove_domain: irq_domain_remove(p->irq_domain); -err2: - iounmap(p->iomem); -err1: - pm_runtime_put(&pdev->dev); - pm_runtime_disable(&pdev->dev); - kfree(p); -err0: +err_runtime_pm_disable: + pm_runtime_put(dev); + pm_runtime_disable(dev); return ret; } -static int irqc_remove(struct platform_device *pdev) +static void irqc_remove(struct platform_device *pdev) { struct irqc_priv *p = platform_get_drvdata(pdev); - int k; - - for (k = 0; k < p->number_of_irqs; k++) - free_irq(p->irq[k].requested_irq, &p->irq[k]); irq_domain_remove(p->irq_domain); - iounmap(p->iomem); pm_runtime_put(&pdev->dev); pm_runtime_disable(&pdev->dev); - kfree(p); - return 0; } -static int __maybe_unused irqc_suspend(struct device *dev) +static int irqc_suspend(struct device *dev) { struct irqc_priv *p = dev_get_drvdata(dev); @@ -263,7 +237,7 @@ static int __maybe_unused irqc_suspend(struct device *dev) return 0; } -static SIMPLE_DEV_PM_OPS(irqc_pm_ops, irqc_suspend, NULL); +static DEFINE_SIMPLE_DEV_PM_OPS(irqc_pm_ops, irqc_suspend, NULL); static const struct of_device_id irqc_dt_ids[] = { { .compatible = "renesas,irqc", }, @@ -275,9 +249,9 @@ static struct platform_driver irqc_device_driver = { .probe = irqc_probe, .remove = irqc_remove, .driver = { - .name = "renesas_irqc", + .name = "renesas_irqc", .of_match_table = irqc_dt_ids, - .pm = &irqc_pm_ops, + .pm = pm_sleep_ptr(&irqc_pm_ops), } }; @@ -295,4 +269,3 @@ module_exit(irqc_exit); MODULE_AUTHOR("Magnus Damm"); MODULE_DESCRIPTION("Renesas IRQC Driver"); -MODULE_LICENSE("GPL v2"); diff --git a/drivers/irqchip/irq-renesas-rza1.c b/drivers/irqchip/irq-renesas-rza1.c new file mode 100644 index 000000000000..6047a524ac77 --- /dev/null +++ b/drivers/irqchip/irq-renesas-rza1.c @@ -0,0 +1,282 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Renesas RZ/A1 IRQC Driver + * + * Copyright (C) 2019 Glider bvba + */ + +#include <linux/err.h> +#include <linux/init.h> +#include <linux/interrupt.h> +#include <linux/io.h> +#include <linux/irqdomain.h> +#include <linux/irq.h> +#include <linux/module.h> +#include <linux/of_irq.h> +#include <linux/platform_device.h> +#include <linux/slab.h> + +#include <dt-bindings/interrupt-controller/arm-gic.h> + +#define IRQC_NUM_IRQ 8 + +#define ICR0 0 /* Interrupt Control Register 0 */ + +#define ICR0_NMIL BIT(15) /* NMI Input Level (0=low, 1=high) */ +#define ICR0_NMIE BIT(8) /* Edge Select (0=falling, 1=rising) */ +#define ICR0_NMIF BIT(1) /* NMI Interrupt Request */ + +#define ICR1 2 /* Interrupt Control Register 1 */ + +#define ICR1_IRQS(n, sense) ((sense) << ((n) * 2)) /* IRQ Sense Select */ +#define ICR1_IRQS_LEVEL_LOW 0 +#define ICR1_IRQS_EDGE_FALLING 1 +#define ICR1_IRQS_EDGE_RISING 2 +#define ICR1_IRQS_EDGE_BOTH 3 +#define ICR1_IRQS_MASK(n) ICR1_IRQS((n), 3) + +#define IRQRR 4 /* IRQ Interrupt Request Register */ + + +struct rza1_irqc_priv { + struct device *dev; + void __iomem *base; + struct irq_chip chip; + struct irq_domain *irq_domain; + struct of_phandle_args map[IRQC_NUM_IRQ]; +}; + +static struct rza1_irqc_priv *irq_data_to_priv(struct irq_data *data) +{ + return data->domain->host_data; +} + +static void rza1_irqc_eoi(struct irq_data *d) +{ + struct rza1_irqc_priv *priv = irq_data_to_priv(d); + u16 bit = BIT(irqd_to_hwirq(d)); + u16 tmp; + + tmp = readw_relaxed(priv->base + IRQRR); + if (tmp & bit) + writew_relaxed(GENMASK(IRQC_NUM_IRQ - 1, 0) & ~bit, + priv->base + IRQRR); + + irq_chip_eoi_parent(d); +} + +static int rza1_irqc_set_type(struct irq_data *d, unsigned int type) +{ + struct rza1_irqc_priv *priv = irq_data_to_priv(d); + unsigned int hw_irq = irqd_to_hwirq(d); + u16 sense, tmp; + + switch (type & IRQ_TYPE_SENSE_MASK) { + case IRQ_TYPE_LEVEL_LOW: + sense = ICR1_IRQS_LEVEL_LOW; + break; + + case IRQ_TYPE_EDGE_FALLING: + sense = ICR1_IRQS_EDGE_FALLING; + break; + + case IRQ_TYPE_EDGE_RISING: + sense = ICR1_IRQS_EDGE_RISING; + break; + + case IRQ_TYPE_EDGE_BOTH: + sense = ICR1_IRQS_EDGE_BOTH; + break; + + default: + return -EINVAL; + } + + tmp = readw_relaxed(priv->base + ICR1); + tmp &= ~ICR1_IRQS_MASK(hw_irq); + tmp |= ICR1_IRQS(hw_irq, sense); + writew_relaxed(tmp, priv->base + ICR1); + return 0; +} + +static int rza1_irqc_alloc(struct irq_domain *domain, unsigned int virq, + unsigned int nr_irqs, void *arg) +{ + struct rza1_irqc_priv *priv = domain->host_data; + struct irq_fwspec *fwspec = arg; + unsigned int hwirq = fwspec->param[0]; + struct irq_fwspec spec; + unsigned int i; + int ret; + + ret = irq_domain_set_hwirq_and_chip(domain, virq, hwirq, &priv->chip, + priv); + if (ret) + return ret; + + spec.fwnode = &priv->dev->of_node->fwnode; + spec.param_count = priv->map[hwirq].args_count; + for (i = 0; i < spec.param_count; i++) + spec.param[i] = priv->map[hwirq].args[i]; + + return irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, &spec); +} + +static int rza1_irqc_translate(struct irq_domain *domain, + struct irq_fwspec *fwspec, unsigned long *hwirq, + unsigned int *type) +{ + if (fwspec->param_count != 2 || fwspec->param[0] >= IRQC_NUM_IRQ) + return -EINVAL; + + *hwirq = fwspec->param[0]; + *type = fwspec->param[1]; + return 0; +} + +static const struct irq_domain_ops rza1_irqc_domain_ops = { + .alloc = rza1_irqc_alloc, + .translate = rza1_irqc_translate, +}; + +static int rza1_irqc_parse_map(struct rza1_irqc_priv *priv, + struct device_node *gic_node) +{ + struct device *dev = priv->dev; + unsigned int imaplen, i, j; + struct device_node *ipar; + const __be32 *imap; + u32 intsize; + int ret; + + imap = of_get_property(dev->of_node, "interrupt-map", &imaplen); + if (!imap) + return -EINVAL; + + for (i = 0; i < IRQC_NUM_IRQ; i++) { + if (imaplen < 3) + return -EINVAL; + + /* Check interrupt number, ignore sense */ + if (be32_to_cpup(imap) != i) + return -EINVAL; + + ipar = of_find_node_by_phandle(be32_to_cpup(imap + 2)); + if (ipar != gic_node) { + of_node_put(ipar); + return -EINVAL; + } + + imap += 3; + imaplen -= 3; + + ret = of_property_read_u32(ipar, "#interrupt-cells", &intsize); + of_node_put(ipar); + if (ret) + return ret; + + if (imaplen < intsize) + return -EINVAL; + + priv->map[i].args_count = intsize; + for (j = 0; j < intsize; j++) + priv->map[i].args[j] = be32_to_cpup(imap++); + + imaplen -= intsize; + } + + return 0; +} + +static int rza1_irqc_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct device_node *np = dev->of_node; + struct irq_domain *parent = NULL; + struct device_node *gic_node; + struct rza1_irqc_priv *priv; + int ret; + + priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + platform_set_drvdata(pdev, priv); + priv->dev = dev; + + priv->base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(priv->base)) + return PTR_ERR(priv->base); + + gic_node = of_irq_find_parent(np); + if (gic_node) + parent = irq_find_host(gic_node); + + if (!parent) { + dev_err(dev, "cannot find parent domain\n"); + ret = -ENODEV; + goto out_put_node; + } + + ret = rza1_irqc_parse_map(priv, gic_node); + if (ret) { + dev_err(dev, "cannot parse %s: %d\n", "interrupt-map", ret); + goto out_put_node; + } + + priv->chip.name = "rza1-irqc"; + priv->chip.irq_mask = irq_chip_mask_parent; + priv->chip.irq_unmask = irq_chip_unmask_parent; + priv->chip.irq_eoi = rza1_irqc_eoi; + priv->chip.irq_retrigger = irq_chip_retrigger_hierarchy; + priv->chip.irq_set_type = rza1_irqc_set_type; + priv->chip.flags = IRQCHIP_MASK_ON_SUSPEND | IRQCHIP_SKIP_SET_WAKE; + + priv->irq_domain = irq_domain_create_hierarchy(parent, 0, IRQC_NUM_IRQ, dev_fwnode(dev), + &rza1_irqc_domain_ops, priv); + if (!priv->irq_domain) { + dev_err(dev, "cannot initialize irq domain\n"); + ret = -ENOMEM; + } + +out_put_node: + of_node_put(gic_node); + return ret; +} + +static void rza1_irqc_remove(struct platform_device *pdev) +{ + struct rza1_irqc_priv *priv = platform_get_drvdata(pdev); + + irq_domain_remove(priv->irq_domain); +} + +static const struct of_device_id rza1_irqc_dt_ids[] = { + { .compatible = "renesas,rza1-irqc" }, + {}, +}; +MODULE_DEVICE_TABLE(of, rza1_irqc_dt_ids); + +static struct platform_driver rza1_irqc_device_driver = { + .probe = rza1_irqc_probe, + .remove = rza1_irqc_remove, + .driver = { + .name = "renesas_rza1_irqc", + .of_match_table = rza1_irqc_dt_ids, + } +}; + +static int __init rza1_irqc_init(void) +{ + return platform_driver_register(&rza1_irqc_device_driver); +} +postcore_initcall(rza1_irqc_init); + +static void __exit rza1_irqc_exit(void) +{ + platform_driver_unregister(&rza1_irqc_device_driver); +} +module_exit(rza1_irqc_exit); + +MODULE_AUTHOR("Geert Uytterhoeven <geert+renesas@glider.be>"); +MODULE_DESCRIPTION("Renesas RZ/A1 IRQC Driver"); diff --git a/drivers/irqchip/irq-renesas-rzg2l.c b/drivers/irqchip/irq-renesas-rzg2l.c new file mode 100644 index 000000000000..e73d426cea6d --- /dev/null +++ b/drivers/irqchip/irq-renesas-rzg2l.c @@ -0,0 +1,604 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Renesas RZ/G2L IRQC Driver + * + * Copyright (C) 2022 Renesas Electronics Corporation. + * + * Author: Lad Prabhakar <prabhakar.mahadev-lad.rj@bp.renesas.com> + */ + +#include <linux/bitfield.h> +#include <linux/clk.h> +#include <linux/err.h> +#include <linux/io.h> +#include <linux/irqchip.h> +#include <linux/irqdomain.h> +#include <linux/of_address.h> +#include <linux/of_platform.h> +#include <linux/pm_runtime.h> +#include <linux/reset.h> +#include <linux/spinlock.h> +#include <linux/syscore_ops.h> + +#define IRQC_IRQ_START 1 +#define IRQC_IRQ_COUNT 8 +#define IRQC_TINT_START (IRQC_IRQ_START + IRQC_IRQ_COUNT) +#define IRQC_TINT_COUNT 32 +#define IRQC_NUM_IRQ (IRQC_TINT_START + IRQC_TINT_COUNT) + +#define ISCR 0x10 +#define IITSR 0x14 +#define TSCR 0x20 +#define TITSR(n) (0x24 + (n) * 4) +#define TITSR0_MAX_INT 16 +#define TITSEL_WIDTH 0x2 +#define TSSR(n) (0x30 + ((n) * 4)) +#define TIEN BIT(7) +#define TSSEL_SHIFT(n) (8 * (n)) +#define TSSEL_MASK GENMASK(7, 0) +#define IRQ_MASK 0x3 +#define IMSK 0x10010 +#define TMSK 0x10020 + +#define TSSR_OFFSET(n) ((n) % 4) +#define TSSR_INDEX(n) ((n) / 4) + +#define TITSR_TITSEL_EDGE_RISING 0 +#define TITSR_TITSEL_EDGE_FALLING 1 +#define TITSR_TITSEL_LEVEL_HIGH 2 +#define TITSR_TITSEL_LEVEL_LOW 3 + +#define IITSR_IITSEL(n, sense) ((sense) << ((n) * 2)) +#define IITSR_IITSEL_LEVEL_LOW 0 +#define IITSR_IITSEL_EDGE_FALLING 1 +#define IITSR_IITSEL_EDGE_RISING 2 +#define IITSR_IITSEL_EDGE_BOTH 3 +#define IITSR_IITSEL_MASK(n) IITSR_IITSEL((n), 3) + +#define TINT_EXTRACT_HWIRQ(x) FIELD_GET(GENMASK(15, 0), (x)) +#define TINT_EXTRACT_GPIOINT(x) FIELD_GET(GENMASK(31, 16), (x)) + +/** + * struct rzg2l_irqc_reg_cache - registers cache (necessary for suspend/resume) + * @iitsr: IITSR register + * @titsr: TITSR registers + */ +struct rzg2l_irqc_reg_cache { + u32 iitsr; + u32 titsr[2]; +}; + +/** + * struct rzg2l_irqc_priv - IRQ controller private data structure + * @base: Controller's base address + * @irqchip: Pointer to struct irq_chip + * @fwspec: IRQ firmware specific data + * @lock: Lock to serialize access to hardware registers + * @cache: Registers cache for suspend/resume + */ +static struct rzg2l_irqc_priv { + void __iomem *base; + const struct irq_chip *irqchip; + struct irq_fwspec fwspec[IRQC_NUM_IRQ]; + raw_spinlock_t lock; + struct rzg2l_irqc_reg_cache cache; +} *rzg2l_irqc_data; + +static struct rzg2l_irqc_priv *irq_data_to_priv(struct irq_data *data) +{ + return data->domain->host_data; +} + +static void rzg2l_clear_irq_int(struct rzg2l_irqc_priv *priv, unsigned int hwirq) +{ + unsigned int hw_irq = hwirq - IRQC_IRQ_START; + u32 bit = BIT(hw_irq); + u32 iitsr, iscr; + + iscr = readl_relaxed(priv->base + ISCR); + iitsr = readl_relaxed(priv->base + IITSR); + + /* + * ISCR can only be cleared if the type is falling-edge, rising-edge or + * falling/rising-edge. + */ + if ((iscr & bit) && (iitsr & IITSR_IITSEL_MASK(hw_irq))) { + writel_relaxed(iscr & ~bit, priv->base + ISCR); + /* + * Enforce that the posted write is flushed to prevent that the + * just handled interrupt is raised again. + */ + readl_relaxed(priv->base + ISCR); + } +} + +static void rzg2l_clear_tint_int(struct rzg2l_irqc_priv *priv, unsigned int hwirq) +{ + u32 bit = BIT(hwirq - IRQC_TINT_START); + u32 reg; + + reg = readl_relaxed(priv->base + TSCR); + if (reg & bit) { + writel_relaxed(reg & ~bit, priv->base + TSCR); + /* + * Enforce that the posted write is flushed to prevent that the + * just handled interrupt is raised again. + */ + readl_relaxed(priv->base + TSCR); + } +} + +static void rzg2l_irqc_eoi(struct irq_data *d) +{ + struct rzg2l_irqc_priv *priv = irq_data_to_priv(d); + unsigned int hw_irq = irqd_to_hwirq(d); + + raw_spin_lock(&priv->lock); + if (hw_irq >= IRQC_IRQ_START && hw_irq <= IRQC_IRQ_COUNT) + rzg2l_clear_irq_int(priv, hw_irq); + else if (hw_irq >= IRQC_TINT_START && hw_irq < IRQC_NUM_IRQ) + rzg2l_clear_tint_int(priv, hw_irq); + raw_spin_unlock(&priv->lock); + irq_chip_eoi_parent(d); +} + +static void rzfive_irqc_mask_irq_interrupt(struct rzg2l_irqc_priv *priv, + unsigned int hwirq) +{ + u32 bit = BIT(hwirq - IRQC_IRQ_START); + + writel_relaxed(readl_relaxed(priv->base + IMSK) | bit, priv->base + IMSK); +} + +static void rzfive_irqc_unmask_irq_interrupt(struct rzg2l_irqc_priv *priv, + unsigned int hwirq) +{ + u32 bit = BIT(hwirq - IRQC_IRQ_START); + + writel_relaxed(readl_relaxed(priv->base + IMSK) & ~bit, priv->base + IMSK); +} + +static void rzfive_irqc_mask_tint_interrupt(struct rzg2l_irqc_priv *priv, + unsigned int hwirq) +{ + u32 bit = BIT(hwirq - IRQC_TINT_START); + + writel_relaxed(readl_relaxed(priv->base + TMSK) | bit, priv->base + TMSK); +} + +static void rzfive_irqc_unmask_tint_interrupt(struct rzg2l_irqc_priv *priv, + unsigned int hwirq) +{ + u32 bit = BIT(hwirq - IRQC_TINT_START); + + writel_relaxed(readl_relaxed(priv->base + TMSK) & ~bit, priv->base + TMSK); +} + +static void rzfive_irqc_mask(struct irq_data *d) +{ + struct rzg2l_irqc_priv *priv = irq_data_to_priv(d); + unsigned int hwirq = irqd_to_hwirq(d); + + raw_spin_lock(&priv->lock); + if (hwirq >= IRQC_IRQ_START && hwirq <= IRQC_IRQ_COUNT) + rzfive_irqc_mask_irq_interrupt(priv, hwirq); + else if (hwirq >= IRQC_TINT_START && hwirq < IRQC_NUM_IRQ) + rzfive_irqc_mask_tint_interrupt(priv, hwirq); + raw_spin_unlock(&priv->lock); + irq_chip_mask_parent(d); +} + +static void rzfive_irqc_unmask(struct irq_data *d) +{ + struct rzg2l_irqc_priv *priv = irq_data_to_priv(d); + unsigned int hwirq = irqd_to_hwirq(d); + + raw_spin_lock(&priv->lock); + if (hwirq >= IRQC_IRQ_START && hwirq <= IRQC_IRQ_COUNT) + rzfive_irqc_unmask_irq_interrupt(priv, hwirq); + else if (hwirq >= IRQC_TINT_START && hwirq < IRQC_NUM_IRQ) + rzfive_irqc_unmask_tint_interrupt(priv, hwirq); + raw_spin_unlock(&priv->lock); + irq_chip_unmask_parent(d); +} + +static void rzfive_tint_irq_endisable(struct irq_data *d, bool enable) +{ + struct rzg2l_irqc_priv *priv = irq_data_to_priv(d); + unsigned int hwirq = irqd_to_hwirq(d); + + if (hwirq >= IRQC_TINT_START && hwirq < IRQC_NUM_IRQ) { + u32 offset = hwirq - IRQC_TINT_START; + u32 tssr_offset = TSSR_OFFSET(offset); + u8 tssr_index = TSSR_INDEX(offset); + u32 reg; + + raw_spin_lock(&priv->lock); + if (enable) + rzfive_irqc_unmask_tint_interrupt(priv, hwirq); + else + rzfive_irqc_mask_tint_interrupt(priv, hwirq); + reg = readl_relaxed(priv->base + TSSR(tssr_index)); + if (enable) + reg |= TIEN << TSSEL_SHIFT(tssr_offset); + else + reg &= ~(TIEN << TSSEL_SHIFT(tssr_offset)); + writel_relaxed(reg, priv->base + TSSR(tssr_index)); + raw_spin_unlock(&priv->lock); + } else { + raw_spin_lock(&priv->lock); + if (enable) + rzfive_irqc_unmask_irq_interrupt(priv, hwirq); + else + rzfive_irqc_mask_irq_interrupt(priv, hwirq); + raw_spin_unlock(&priv->lock); + } +} + +static void rzfive_irqc_irq_disable(struct irq_data *d) +{ + irq_chip_disable_parent(d); + rzfive_tint_irq_endisable(d, false); +} + +static void rzfive_irqc_irq_enable(struct irq_data *d) +{ + rzfive_tint_irq_endisable(d, true); + irq_chip_enable_parent(d); +} + +static void rzg2l_tint_irq_endisable(struct irq_data *d, bool enable) +{ + unsigned int hw_irq = irqd_to_hwirq(d); + + if (hw_irq >= IRQC_TINT_START && hw_irq < IRQC_NUM_IRQ) { + struct rzg2l_irqc_priv *priv = irq_data_to_priv(d); + u32 offset = hw_irq - IRQC_TINT_START; + u32 tssr_offset = TSSR_OFFSET(offset); + u8 tssr_index = TSSR_INDEX(offset); + u32 reg; + + raw_spin_lock(&priv->lock); + reg = readl_relaxed(priv->base + TSSR(tssr_index)); + if (enable) + reg |= TIEN << TSSEL_SHIFT(tssr_offset); + else + reg &= ~(TIEN << TSSEL_SHIFT(tssr_offset)); + writel_relaxed(reg, priv->base + TSSR(tssr_index)); + raw_spin_unlock(&priv->lock); + } +} + +static void rzg2l_irqc_irq_disable(struct irq_data *d) +{ + irq_chip_disable_parent(d); + rzg2l_tint_irq_endisable(d, false); +} + +static void rzg2l_irqc_irq_enable(struct irq_data *d) +{ + rzg2l_tint_irq_endisable(d, true); + irq_chip_enable_parent(d); +} + +static int rzg2l_irq_set_type(struct irq_data *d, unsigned int type) +{ + struct rzg2l_irqc_priv *priv = irq_data_to_priv(d); + unsigned int hwirq = irqd_to_hwirq(d); + u32 iitseln = hwirq - IRQC_IRQ_START; + bool clear_irq_int = false; + u16 sense, tmp; + + switch (type & IRQ_TYPE_SENSE_MASK) { + case IRQ_TYPE_LEVEL_LOW: + sense = IITSR_IITSEL_LEVEL_LOW; + break; + + case IRQ_TYPE_EDGE_FALLING: + sense = IITSR_IITSEL_EDGE_FALLING; + clear_irq_int = true; + break; + + case IRQ_TYPE_EDGE_RISING: + sense = IITSR_IITSEL_EDGE_RISING; + clear_irq_int = true; + break; + + case IRQ_TYPE_EDGE_BOTH: + sense = IITSR_IITSEL_EDGE_BOTH; + clear_irq_int = true; + break; + + default: + return -EINVAL; + } + + raw_spin_lock(&priv->lock); + tmp = readl_relaxed(priv->base + IITSR); + tmp &= ~IITSR_IITSEL_MASK(iitseln); + tmp |= IITSR_IITSEL(iitseln, sense); + if (clear_irq_int) + rzg2l_clear_irq_int(priv, hwirq); + writel_relaxed(tmp, priv->base + IITSR); + raw_spin_unlock(&priv->lock); + + return 0; +} + +static u32 rzg2l_disable_tint_and_set_tint_source(struct irq_data *d, struct rzg2l_irqc_priv *priv, + u32 reg, u32 tssr_offset, u8 tssr_index) +{ + u32 tint = (u32)(uintptr_t)irq_data_get_irq_chip_data(d); + u32 tien = reg & (TIEN << TSSEL_SHIFT(tssr_offset)); + + /* Clear the relevant byte in reg */ + reg &= ~(TSSEL_MASK << TSSEL_SHIFT(tssr_offset)); + /* Set TINT and leave TIEN clear */ + reg |= tint << TSSEL_SHIFT(tssr_offset); + writel_relaxed(reg, priv->base + TSSR(tssr_index)); + + return reg | tien; +} + +static int rzg2l_tint_set_edge(struct irq_data *d, unsigned int type) +{ + struct rzg2l_irqc_priv *priv = irq_data_to_priv(d); + unsigned int hwirq = irqd_to_hwirq(d); + u32 titseln = hwirq - IRQC_TINT_START; + u32 tssr_offset = TSSR_OFFSET(titseln); + u8 tssr_index = TSSR_INDEX(titseln); + u8 index, sense; + u32 reg, tssr; + + switch (type & IRQ_TYPE_SENSE_MASK) { + case IRQ_TYPE_EDGE_RISING: + sense = TITSR_TITSEL_EDGE_RISING; + break; + + case IRQ_TYPE_EDGE_FALLING: + sense = TITSR_TITSEL_EDGE_FALLING; + break; + + default: + return -EINVAL; + } + + index = 0; + if (titseln >= TITSR0_MAX_INT) { + titseln -= TITSR0_MAX_INT; + index = 1; + } + + raw_spin_lock(&priv->lock); + tssr = readl_relaxed(priv->base + TSSR(tssr_index)); + tssr = rzg2l_disable_tint_and_set_tint_source(d, priv, tssr, tssr_offset, tssr_index); + reg = readl_relaxed(priv->base + TITSR(index)); + reg &= ~(IRQ_MASK << (titseln * TITSEL_WIDTH)); + reg |= sense << (titseln * TITSEL_WIDTH); + writel_relaxed(reg, priv->base + TITSR(index)); + rzg2l_clear_tint_int(priv, hwirq); + writel_relaxed(tssr, priv->base + TSSR(tssr_index)); + raw_spin_unlock(&priv->lock); + + return 0; +} + +static int rzg2l_irqc_set_type(struct irq_data *d, unsigned int type) +{ + unsigned int hw_irq = irqd_to_hwirq(d); + int ret = -EINVAL; + + if (hw_irq >= IRQC_IRQ_START && hw_irq <= IRQC_IRQ_COUNT) + ret = rzg2l_irq_set_type(d, type); + else if (hw_irq >= IRQC_TINT_START && hw_irq < IRQC_NUM_IRQ) + ret = rzg2l_tint_set_edge(d, type); + if (ret) + return ret; + + return irq_chip_set_type_parent(d, IRQ_TYPE_LEVEL_HIGH); +} + +static int rzg2l_irqc_irq_suspend(void *data) +{ + struct rzg2l_irqc_reg_cache *cache = &rzg2l_irqc_data->cache; + void __iomem *base = rzg2l_irqc_data->base; + + cache->iitsr = readl_relaxed(base + IITSR); + for (u8 i = 0; i < 2; i++) + cache->titsr[i] = readl_relaxed(base + TITSR(i)); + + return 0; +} + +static void rzg2l_irqc_irq_resume(void *data) +{ + struct rzg2l_irqc_reg_cache *cache = &rzg2l_irqc_data->cache; + void __iomem *base = rzg2l_irqc_data->base; + + /* + * Restore only interrupt type. TSSRx will be restored at the + * request of pin controller to avoid spurious interrupts due + * to invalid PIN states. + */ + for (u8 i = 0; i < 2; i++) + writel_relaxed(cache->titsr[i], base + TITSR(i)); + writel_relaxed(cache->iitsr, base + IITSR); +} + +static const struct syscore_ops rzg2l_irqc_syscore_ops = { + .suspend = rzg2l_irqc_irq_suspend, + .resume = rzg2l_irqc_irq_resume, +}; + +static struct syscore rzg2l_irqc_syscore = { + .ops = &rzg2l_irqc_syscore_ops, +}; + +static const struct irq_chip rzg2l_irqc_chip = { + .name = "rzg2l-irqc", + .irq_eoi = rzg2l_irqc_eoi, + .irq_mask = irq_chip_mask_parent, + .irq_unmask = irq_chip_unmask_parent, + .irq_disable = rzg2l_irqc_irq_disable, + .irq_enable = rzg2l_irqc_irq_enable, + .irq_get_irqchip_state = irq_chip_get_parent_state, + .irq_set_irqchip_state = irq_chip_set_parent_state, + .irq_retrigger = irq_chip_retrigger_hierarchy, + .irq_set_type = rzg2l_irqc_set_type, + .irq_set_affinity = irq_chip_set_affinity_parent, + .flags = IRQCHIP_MASK_ON_SUSPEND | + IRQCHIP_SET_TYPE_MASKED | + IRQCHIP_SKIP_SET_WAKE, +}; + +static const struct irq_chip rzfive_irqc_chip = { + .name = "rzfive-irqc", + .irq_eoi = rzg2l_irqc_eoi, + .irq_mask = rzfive_irqc_mask, + .irq_unmask = rzfive_irqc_unmask, + .irq_disable = rzfive_irqc_irq_disable, + .irq_enable = rzfive_irqc_irq_enable, + .irq_get_irqchip_state = irq_chip_get_parent_state, + .irq_set_irqchip_state = irq_chip_set_parent_state, + .irq_retrigger = irq_chip_retrigger_hierarchy, + .irq_set_type = rzg2l_irqc_set_type, + .irq_set_affinity = irq_chip_set_affinity_parent, + .flags = IRQCHIP_MASK_ON_SUSPEND | + IRQCHIP_SET_TYPE_MASKED | + IRQCHIP_SKIP_SET_WAKE, +}; + +static int rzg2l_irqc_alloc(struct irq_domain *domain, unsigned int virq, + unsigned int nr_irqs, void *arg) +{ + struct rzg2l_irqc_priv *priv = domain->host_data; + unsigned long tint = 0; + irq_hw_number_t hwirq; + unsigned int type; + int ret; + + ret = irq_domain_translate_twocell(domain, arg, &hwirq, &type); + if (ret) + return ret; + + /* + * For TINT interrupts ie where pinctrl driver is child of irqc domain + * the hwirq and TINT are encoded in fwspec->param[0]. + * hwirq for TINT range from 9-40, hwirq is embedded 0-15 bits and TINT + * from 16-31 bits. TINT from the pinctrl driver needs to be programmed + * in IRQC registers to enable a given gpio pin as interrupt. + */ + if (hwirq > IRQC_IRQ_COUNT) { + tint = TINT_EXTRACT_GPIOINT(hwirq); + hwirq = TINT_EXTRACT_HWIRQ(hwirq); + + if (hwirq < IRQC_TINT_START) + return -EINVAL; + } + + if (hwirq > (IRQC_NUM_IRQ - 1)) + return -EINVAL; + + ret = irq_domain_set_hwirq_and_chip(domain, virq, hwirq, priv->irqchip, + (void *)(uintptr_t)tint); + if (ret) + return ret; + + return irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, &priv->fwspec[hwirq]); +} + +static const struct irq_domain_ops rzg2l_irqc_domain_ops = { + .alloc = rzg2l_irqc_alloc, + .free = irq_domain_free_irqs_common, + .translate = irq_domain_translate_twocell, +}; + +static int rzg2l_irqc_parse_interrupts(struct rzg2l_irqc_priv *priv, + struct device_node *np) +{ + struct of_phandle_args map; + unsigned int i; + int ret; + + for (i = 0; i < IRQC_NUM_IRQ; i++) { + ret = of_irq_parse_one(np, i, &map); + if (ret) + return ret; + of_phandle_args_to_fwspec(np, map.args, map.args_count, + &priv->fwspec[i]); + } + + return 0; +} + +static int rzg2l_irqc_common_probe(struct platform_device *pdev, struct device_node *parent, + const struct irq_chip *irq_chip) +{ + struct irq_domain *irq_domain, *parent_domain; + struct device_node *node = pdev->dev.of_node; + struct device *dev = &pdev->dev; + struct reset_control *resetn; + int ret; + + parent_domain = irq_find_host(parent); + if (!parent_domain) + return dev_err_probe(dev, -ENODEV, "cannot find parent domain\n"); + + rzg2l_irqc_data = devm_kzalloc(dev, sizeof(*rzg2l_irqc_data), GFP_KERNEL); + if (!rzg2l_irqc_data) + return -ENOMEM; + + rzg2l_irqc_data->irqchip = irq_chip; + + rzg2l_irqc_data->base = devm_of_iomap(dev, dev->of_node, 0, NULL); + if (IS_ERR(rzg2l_irqc_data->base)) + return PTR_ERR(rzg2l_irqc_data->base); + + ret = rzg2l_irqc_parse_interrupts(rzg2l_irqc_data, node); + if (ret) + return dev_err_probe(dev, ret, "cannot parse interrupts: %d\n", ret); + + resetn = devm_reset_control_get_exclusive_deasserted(dev, NULL); + if (IS_ERR(resetn)) { + return dev_err_probe(dev, PTR_ERR(resetn), + "failed to acquire deasserted reset: %d\n", ret); + } + + ret = devm_pm_runtime_enable(dev); + if (ret) + return dev_err_probe(dev, ret, "devm_pm_runtime_enable failed: %d\n", ret); + + ret = pm_runtime_resume_and_get(dev); + if (ret) + return dev_err_probe(dev, ret, "pm_runtime_resume_and_get failed: %d\n", ret); + + raw_spin_lock_init(&rzg2l_irqc_data->lock); + + irq_domain = irq_domain_create_hierarchy(parent_domain, 0, IRQC_NUM_IRQ, dev_fwnode(dev), + &rzg2l_irqc_domain_ops, rzg2l_irqc_data); + if (!irq_domain) { + pm_runtime_put(dev); + return -ENOMEM; + } + + register_syscore(&rzg2l_irqc_syscore); + + return 0; +} + +static int rzg2l_irqc_probe(struct platform_device *pdev, struct device_node *parent) +{ + return rzg2l_irqc_common_probe(pdev, parent, &rzg2l_irqc_chip); +} + +static int rzfive_irqc_probe(struct platform_device *pdev, struct device_node *parent) +{ + return rzg2l_irqc_common_probe(pdev, parent, &rzfive_irqc_chip); +} + +IRQCHIP_PLATFORM_DRIVER_BEGIN(rzg2l_irqc) +IRQCHIP_MATCH("renesas,rzg2l-irqc", rzg2l_irqc_probe) +IRQCHIP_MATCH("renesas,r9a07g043f-irqc", rzfive_irqc_probe) +IRQCHIP_PLATFORM_DRIVER_END(rzg2l_irqc) +MODULE_AUTHOR("Lad Prabhakar <prabhakar.mahadev-lad.rj@bp.renesas.com>"); +MODULE_DESCRIPTION("Renesas RZ/G2L IRQC Driver"); diff --git a/drivers/irqchip/irq-renesas-rzv2h.c b/drivers/irqchip/irq-renesas-rzv2h.c new file mode 100644 index 000000000000..899a423b5da8 --- /dev/null +++ b/drivers/irqchip/irq-renesas-rzv2h.c @@ -0,0 +1,622 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Renesas RZ/V2H(P) ICU Driver + * + * Based on irq-renesas-rzg2l.c + * + * Copyright (C) 2024 Renesas Electronics Corporation. + * + * Author: Fabrizio Castro <fabrizio.castro.jz@renesas.com> + */ + +#include <linux/bitfield.h> +#include <linux/cleanup.h> +#include <linux/err.h> +#include <linux/io.h> +#include <linux/irqchip.h> +#include <linux/irqchip/irq-renesas-rzv2h.h> +#include <linux/irqdomain.h> +#include <linux/of_platform.h> +#include <linux/pm_runtime.h> +#include <linux/reset.h> +#include <linux/spinlock.h> + +/* DT "interrupts" indexes */ +#define ICU_IRQ_START 1 +#define ICU_IRQ_COUNT 16 +#define ICU_TINT_START (ICU_IRQ_START + ICU_IRQ_COUNT) +#define ICU_TINT_COUNT 32 +#define ICU_NUM_IRQ (ICU_TINT_START + ICU_TINT_COUNT) + +/* Registers */ +#define ICU_NSCNT 0x00 +#define ICU_NSCLR 0x04 +#define ICU_NITSR 0x08 +#define ICU_ISCTR 0x10 +#define ICU_ISCLR 0x14 +#define ICU_IITSR 0x18 +#define ICU_TSCTR 0x20 +#define ICU_TSCLR 0x24 +#define ICU_TITSR(k) (0x28 + (k) * 4) +#define ICU_TSSR(k) (0x30 + (k) * 4) +#define ICU_DMkSELy(k, y) (0x420 + (k) * 0x20 + (y) * 4) +#define ICU_DMACKSELk(k) (0x500 + (k) * 4) + +/* NMI */ +#define ICU_NMI_EDGE_FALLING 0 +#define ICU_NMI_EDGE_RISING 1 + +#define ICU_NSCLR_NCLR BIT(0) + +/* IRQ */ +#define ICU_IRQ_LEVEL_LOW 0 +#define ICU_IRQ_EDGE_FALLING 1 +#define ICU_IRQ_EDGE_RISING 2 +#define ICU_IRQ_EDGE_BOTH 3 + +#define ICU_IITSR_IITSEL_PREP(iitsel, n) ((iitsel) << ((n) * 2)) +#define ICU_IITSR_IITSEL_GET(iitsr, n) (((iitsr) >> ((n) * 2)) & 0x03) +#define ICU_IITSR_IITSEL_MASK(n) ICU_IITSR_IITSEL_PREP(0x03, n) + +/* TINT */ +#define ICU_TINT_EDGE_RISING 0 +#define ICU_TINT_EDGE_FALLING 1 +#define ICU_TINT_LEVEL_HIGH 2 +#define ICU_TINT_LEVEL_LOW 3 + +#define ICU_TSSR_TSSEL_PREP(tssel, n, field_width) ((tssel) << ((n) * (field_width))) +#define ICU_TSSR_TSSEL_MASK(n, field_width) \ +({\ + typeof(field_width) (_field_width) = (field_width); \ + ICU_TSSR_TSSEL_PREP((GENMASK(((_field_width) - 2), 0)), (n), _field_width); \ +}) + +#define ICU_TSSR_TIEN(n, field_width) \ +({\ + typeof(field_width) (_field_width) = (field_width); \ + BIT((_field_width) - 1) << ((n) * (_field_width)); \ +}) + +#define ICU_TITSR_K(tint_nr) ((tint_nr) / 16) +#define ICU_TITSR_TITSEL_N(tint_nr) ((tint_nr) % 16) +#define ICU_TITSR_TITSEL_PREP(titsel, n) ICU_IITSR_IITSEL_PREP(titsel, n) +#define ICU_TITSR_TITSEL_MASK(n) ICU_IITSR_IITSEL_MASK(n) +#define ICU_TITSR_TITSEL_GET(titsr, n) ICU_IITSR_IITSEL_GET(titsr, n) + +#define ICU_TINT_EXTRACT_HWIRQ(x) FIELD_GET(GENMASK(15, 0), (x)) +#define ICU_TINT_EXTRACT_GPIOINT(x) FIELD_GET(GENMASK(31, 16), (x)) +#define ICU_RZG3E_TINT_OFFSET 0x800 +#define ICU_RZG3E_TSSEL_MAX_VAL 0x8c +#define ICU_RZV2H_TSSEL_MAX_VAL 0x55 + +/** + * struct rzv2h_hw_info - Interrupt Control Unit controller hardware info structure. + * @tssel_lut: TINT lookup table + * @t_offs: TINT offset + * @max_tssel: TSSEL max value + * @field_width: TSSR field width + */ +struct rzv2h_hw_info { + const u8 *tssel_lut; + u16 t_offs; + u8 max_tssel; + u8 field_width; +}; + +/* DMAC */ +#define ICU_DMAC_DkRQ_SEL_MASK GENMASK(9, 0) + +#define ICU_DMAC_DMAREQ_SHIFT(up) ((up) * 16) +#define ICU_DMAC_DMAREQ_MASK(up) (ICU_DMAC_DkRQ_SEL_MASK \ + << ICU_DMAC_DMAREQ_SHIFT(up)) +#define ICU_DMAC_PREP_DMAREQ(sel, up) (FIELD_PREP(ICU_DMAC_DkRQ_SEL_MASK, (sel)) \ + << ICU_DMAC_DMAREQ_SHIFT(up)) + +/** + * struct rzv2h_icu_priv - Interrupt Control Unit controller private data structure. + * @base: Controller's base address + * @fwspec: IRQ firmware specific data + * @lock: Lock to serialize access to hardware registers + * @info: Pointer to struct rzv2h_hw_info + */ +struct rzv2h_icu_priv { + void __iomem *base; + struct irq_fwspec fwspec[ICU_NUM_IRQ]; + raw_spinlock_t lock; + const struct rzv2h_hw_info *info; +}; + +void rzv2h_icu_register_dma_req(struct platform_device *icu_dev, u8 dmac_index, u8 dmac_channel, + u16 req_no) +{ + struct rzv2h_icu_priv *priv = platform_get_drvdata(icu_dev); + u32 icu_dmksely, dmareq, dmareq_mask; + u8 y, upper; + + y = dmac_channel / 2; + upper = dmac_channel % 2; + + dmareq = ICU_DMAC_PREP_DMAREQ(req_no, upper); + dmareq_mask = ICU_DMAC_DMAREQ_MASK(upper); + + guard(raw_spinlock_irqsave)(&priv->lock); + + icu_dmksely = readl(priv->base + ICU_DMkSELy(dmac_index, y)); + icu_dmksely = (icu_dmksely & ~dmareq_mask) | dmareq; + writel(icu_dmksely, priv->base + ICU_DMkSELy(dmac_index, y)); +} +EXPORT_SYMBOL_GPL(rzv2h_icu_register_dma_req); + +static inline struct rzv2h_icu_priv *irq_data_to_priv(struct irq_data *data) +{ + return data->domain->host_data; +} + +static void rzv2h_icu_eoi(struct irq_data *d) +{ + struct rzv2h_icu_priv *priv = irq_data_to_priv(d); + unsigned int hw_irq = irqd_to_hwirq(d); + unsigned int tintirq_nr; + u32 bit; + + scoped_guard(raw_spinlock, &priv->lock) { + if (hw_irq >= ICU_TINT_START) { + tintirq_nr = hw_irq - ICU_TINT_START; + bit = BIT(tintirq_nr); + if (!irqd_is_level_type(d)) + writel_relaxed(bit, priv->base + priv->info->t_offs + ICU_TSCLR); + } else if (hw_irq >= ICU_IRQ_START) { + tintirq_nr = hw_irq - ICU_IRQ_START; + bit = BIT(tintirq_nr); + if (!irqd_is_level_type(d)) + writel_relaxed(bit, priv->base + ICU_ISCLR); + } else { + writel_relaxed(ICU_NSCLR_NCLR, priv->base + ICU_NSCLR); + } + } + + irq_chip_eoi_parent(d); +} + +static void rzv2h_tint_irq_endisable(struct irq_data *d, bool enable) +{ + struct rzv2h_icu_priv *priv = irq_data_to_priv(d); + unsigned int hw_irq = irqd_to_hwirq(d); + u32 tint_nr, tssel_n, k, tssr; + u8 nr_tint; + + if (hw_irq < ICU_TINT_START) + return; + + tint_nr = hw_irq - ICU_TINT_START; + nr_tint = 32 / priv->info->field_width; + k = tint_nr / nr_tint; + tssel_n = tint_nr % nr_tint; + + guard(raw_spinlock)(&priv->lock); + tssr = readl_relaxed(priv->base + priv->info->t_offs + ICU_TSSR(k)); + if (enable) + tssr |= ICU_TSSR_TIEN(tssel_n, priv->info->field_width); + else + tssr &= ~ICU_TSSR_TIEN(tssel_n, priv->info->field_width); + writel_relaxed(tssr, priv->base + priv->info->t_offs + ICU_TSSR(k)); + + /* + * A glitch in the edge detection circuit can cause a spurious + * interrupt. Clear the status flag after setting the ICU_TSSRk + * registers, which is recommended by the hardware manual as a + * countermeasure. + */ + writel_relaxed(BIT(tint_nr), priv->base + priv->info->t_offs + ICU_TSCLR); +} + +static void rzv2h_icu_irq_disable(struct irq_data *d) +{ + irq_chip_disable_parent(d); + rzv2h_tint_irq_endisable(d, false); +} + +static void rzv2h_icu_irq_enable(struct irq_data *d) +{ + rzv2h_tint_irq_endisable(d, true); + irq_chip_enable_parent(d); +} + +static int rzv2h_nmi_set_type(struct irq_data *d, unsigned int type) +{ + struct rzv2h_icu_priv *priv = irq_data_to_priv(d); + u32 sense; + + switch (type & IRQ_TYPE_SENSE_MASK) { + case IRQ_TYPE_EDGE_FALLING: + sense = ICU_NMI_EDGE_FALLING; + break; + + case IRQ_TYPE_EDGE_RISING: + sense = ICU_NMI_EDGE_RISING; + break; + + default: + return -EINVAL; + } + + writel_relaxed(sense, priv->base + ICU_NITSR); + + return 0; +} + +static void rzv2h_clear_irq_int(struct rzv2h_icu_priv *priv, unsigned int hwirq) +{ + unsigned int irq_nr = hwirq - ICU_IRQ_START; + u32 isctr, iitsr, iitsel; + u32 bit = BIT(irq_nr); + + isctr = readl_relaxed(priv->base + ICU_ISCTR); + iitsr = readl_relaxed(priv->base + ICU_IITSR); + iitsel = ICU_IITSR_IITSEL_GET(iitsr, irq_nr); + + /* + * When level sensing is used, the interrupt flag gets automatically cleared when the + * interrupt signal is de-asserted by the source of the interrupt request, therefore clear + * the interrupt only for edge triggered interrupts. + */ + if ((isctr & bit) && (iitsel != ICU_IRQ_LEVEL_LOW)) + writel_relaxed(bit, priv->base + ICU_ISCLR); +} + +static int rzv2h_irq_set_type(struct irq_data *d, unsigned int type) +{ + struct rzv2h_icu_priv *priv = irq_data_to_priv(d); + unsigned int hwirq = irqd_to_hwirq(d); + u32 irq_nr = hwirq - ICU_IRQ_START; + u32 iitsr, sense; + + switch (type & IRQ_TYPE_SENSE_MASK) { + case IRQ_TYPE_LEVEL_LOW: + sense = ICU_IRQ_LEVEL_LOW; + break; + + case IRQ_TYPE_EDGE_FALLING: + sense = ICU_IRQ_EDGE_FALLING; + break; + + case IRQ_TYPE_EDGE_RISING: + sense = ICU_IRQ_EDGE_RISING; + break; + + case IRQ_TYPE_EDGE_BOTH: + sense = ICU_IRQ_EDGE_BOTH; + break; + + default: + return -EINVAL; + } + + guard(raw_spinlock)(&priv->lock); + iitsr = readl_relaxed(priv->base + ICU_IITSR); + iitsr &= ~ICU_IITSR_IITSEL_MASK(irq_nr); + iitsr |= ICU_IITSR_IITSEL_PREP(sense, irq_nr); + rzv2h_clear_irq_int(priv, hwirq); + writel_relaxed(iitsr, priv->base + ICU_IITSR); + + return 0; +} + +static void rzv2h_clear_tint_int(struct rzv2h_icu_priv *priv, unsigned int hwirq) +{ + unsigned int tint_nr = hwirq - ICU_TINT_START; + int titsel_n = ICU_TITSR_TITSEL_N(tint_nr); + u32 tsctr, titsr, titsel; + u32 bit = BIT(tint_nr); + int k = tint_nr / 16; + + tsctr = readl_relaxed(priv->base + priv->info->t_offs + ICU_TSCTR); + titsr = readl_relaxed(priv->base + priv->info->t_offs + ICU_TITSR(k)); + titsel = ICU_TITSR_TITSEL_GET(titsr, titsel_n); + + /* + * Writing 1 to the corresponding flag from register ICU_TSCTR only has effect if + * TSTATn = 1b and if it's a rising edge or a falling edge interrupt. + */ + if ((tsctr & bit) && ((titsel == ICU_TINT_EDGE_RISING) || + (titsel == ICU_TINT_EDGE_FALLING))) + writel_relaxed(bit, priv->base + priv->info->t_offs + ICU_TSCLR); +} + +static int rzv2h_tint_set_type(struct irq_data *d, unsigned int type) +{ + u32 titsr, titsr_k, titsel_n, tien; + struct rzv2h_icu_priv *priv; + u32 tssr, tssr_k, tssel_n; + unsigned int hwirq; + u32 tint, sense; + int tint_nr; + u8 nr_tint; + + switch (type & IRQ_TYPE_SENSE_MASK) { + case IRQ_TYPE_LEVEL_LOW: + sense = ICU_TINT_LEVEL_LOW; + break; + + case IRQ_TYPE_LEVEL_HIGH: + sense = ICU_TINT_LEVEL_HIGH; + break; + + case IRQ_TYPE_EDGE_RISING: + sense = ICU_TINT_EDGE_RISING; + break; + + case IRQ_TYPE_EDGE_FALLING: + sense = ICU_TINT_EDGE_FALLING; + break; + + default: + return -EINVAL; + } + + priv = irq_data_to_priv(d); + tint = (u32)(uintptr_t)irq_data_get_irq_chip_data(d); + if (tint > priv->info->max_tssel) + return -EINVAL; + + if (priv->info->tssel_lut) + tint = priv->info->tssel_lut[tint]; + + hwirq = irqd_to_hwirq(d); + tint_nr = hwirq - ICU_TINT_START; + + nr_tint = 32 / priv->info->field_width; + tssr_k = tint_nr / nr_tint; + tssel_n = tint_nr % nr_tint; + tien = ICU_TSSR_TIEN(tssel_n, priv->info->field_width); + + titsr_k = ICU_TITSR_K(tint_nr); + titsel_n = ICU_TITSR_TITSEL_N(tint_nr); + + guard(raw_spinlock)(&priv->lock); + + tssr = readl_relaxed(priv->base + priv->info->t_offs + ICU_TSSR(tssr_k)); + tssr &= ~(ICU_TSSR_TSSEL_MASK(tssel_n, priv->info->field_width) | tien); + tssr |= ICU_TSSR_TSSEL_PREP(tint, tssel_n, priv->info->field_width); + + writel_relaxed(tssr, priv->base + priv->info->t_offs + ICU_TSSR(tssr_k)); + + titsr = readl_relaxed(priv->base + priv->info->t_offs + ICU_TITSR(titsr_k)); + titsr &= ~ICU_TITSR_TITSEL_MASK(titsel_n); + titsr |= ICU_TITSR_TITSEL_PREP(sense, titsel_n); + + writel_relaxed(titsr, priv->base + priv->info->t_offs + ICU_TITSR(titsr_k)); + + rzv2h_clear_tint_int(priv, hwirq); + + writel_relaxed(tssr | tien, priv->base + priv->info->t_offs + ICU_TSSR(tssr_k)); + + return 0; +} + +static int rzv2h_icu_set_type(struct irq_data *d, unsigned int type) +{ + unsigned int hw_irq = irqd_to_hwirq(d); + int ret; + + if (hw_irq >= ICU_TINT_START) + ret = rzv2h_tint_set_type(d, type); + else if (hw_irq >= ICU_IRQ_START) + ret = rzv2h_irq_set_type(d, type); + else + ret = rzv2h_nmi_set_type(d, type); + + if (ret) + return ret; + + return irq_chip_set_type_parent(d, IRQ_TYPE_LEVEL_HIGH); +} + +static const struct irq_chip rzv2h_icu_chip = { + .name = "rzv2h-icu", + .irq_eoi = rzv2h_icu_eoi, + .irq_mask = irq_chip_mask_parent, + .irq_unmask = irq_chip_unmask_parent, + .irq_disable = rzv2h_icu_irq_disable, + .irq_enable = rzv2h_icu_irq_enable, + .irq_get_irqchip_state = irq_chip_get_parent_state, + .irq_set_irqchip_state = irq_chip_set_parent_state, + .irq_retrigger = irq_chip_retrigger_hierarchy, + .irq_set_type = rzv2h_icu_set_type, + .irq_set_affinity = irq_chip_set_affinity_parent, + .flags = IRQCHIP_MASK_ON_SUSPEND | + IRQCHIP_SET_TYPE_MASKED | + IRQCHIP_SKIP_SET_WAKE, +}; + +static int rzv2h_icu_alloc(struct irq_domain *domain, unsigned int virq, unsigned int nr_irqs, + void *arg) +{ + struct rzv2h_icu_priv *priv = domain->host_data; + unsigned long tint = 0; + irq_hw_number_t hwirq; + unsigned int type; + int ret; + + ret = irq_domain_translate_twocell(domain, arg, &hwirq, &type); + if (ret) + return ret; + + /* + * For TINT interrupts the hwirq and TINT are encoded in + * fwspec->param[0]. + * hwirq is embedded in bits 0-15. + * TINT is embedded in bits 16-31. + */ + if (hwirq >= ICU_TINT_START) { + tint = ICU_TINT_EXTRACT_GPIOINT(hwirq); + hwirq = ICU_TINT_EXTRACT_HWIRQ(hwirq); + + if (hwirq < ICU_TINT_START) + return -EINVAL; + } + + if (hwirq > (ICU_NUM_IRQ - 1)) + return -EINVAL; + + ret = irq_domain_set_hwirq_and_chip(domain, virq, hwirq, &rzv2h_icu_chip, + (void *)(uintptr_t)tint); + if (ret) + return ret; + + return irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, &priv->fwspec[hwirq]); +} + +static const struct irq_domain_ops rzv2h_icu_domain_ops = { + .alloc = rzv2h_icu_alloc, + .free = irq_domain_free_irqs_common, + .translate = irq_domain_translate_twocell, +}; + +static int rzv2h_icu_parse_interrupts(struct rzv2h_icu_priv *priv, struct device_node *np) +{ + struct of_phandle_args map; + unsigned int i; + int ret; + + for (i = 0; i < ICU_NUM_IRQ; i++) { + ret = of_irq_parse_one(np, i, &map); + if (ret) + return ret; + + of_phandle_args_to_fwspec(np, map.args, map.args_count, &priv->fwspec[i]); + } + + return 0; +} + +static int rzv2h_icu_probe_common(struct platform_device *pdev, struct device_node *parent, + const struct rzv2h_hw_info *hw_info) +{ + struct irq_domain *irq_domain, *parent_domain; + struct device_node *node = pdev->dev.of_node; + struct rzv2h_icu_priv *rzv2h_icu_data; + struct reset_control *resetn; + int ret; + + parent_domain = irq_find_host(parent); + if (!parent_domain) { + dev_err(&pdev->dev, "cannot find parent domain\n"); + return -ENODEV; + } + + rzv2h_icu_data = devm_kzalloc(&pdev->dev, sizeof(*rzv2h_icu_data), GFP_KERNEL); + if (!rzv2h_icu_data) + return -ENOMEM; + + platform_set_drvdata(pdev, rzv2h_icu_data); + + rzv2h_icu_data->base = devm_of_iomap(&pdev->dev, pdev->dev.of_node, 0, NULL); + if (IS_ERR(rzv2h_icu_data->base)) + return PTR_ERR(rzv2h_icu_data->base); + + ret = rzv2h_icu_parse_interrupts(rzv2h_icu_data, node); + if (ret) { + dev_err(&pdev->dev, "cannot parse interrupts: %d\n", ret); + return ret; + } + + resetn = devm_reset_control_get_exclusive_deasserted(&pdev->dev, NULL); + if (IS_ERR(resetn)) { + ret = PTR_ERR(resetn); + dev_err(&pdev->dev, "failed to acquire deasserted reset: %d\n", ret); + return ret; + } + + ret = devm_pm_runtime_enable(&pdev->dev); + if (ret < 0) { + dev_err(&pdev->dev, "devm_pm_runtime_enable failed, %d\n", ret); + return ret; + } + + ret = pm_runtime_resume_and_get(&pdev->dev); + if (ret < 0) { + dev_err(&pdev->dev, "pm_runtime_resume_and_get failed: %d\n", ret); + return ret; + } + + raw_spin_lock_init(&rzv2h_icu_data->lock); + + irq_domain = irq_domain_create_hierarchy(parent_domain, 0, ICU_NUM_IRQ, + dev_fwnode(&pdev->dev), &rzv2h_icu_domain_ops, + rzv2h_icu_data); + if (!irq_domain) { + dev_err(&pdev->dev, "failed to add irq domain\n"); + ret = -ENOMEM; + goto pm_put; + } + + rzv2h_icu_data->info = hw_info; + + /* + * coccicheck complains about a missing put_device call before returning, but it's a false + * positive. We still need &pdev->dev after successfully returning from this function. + */ + return 0; + +pm_put: + pm_runtime_put(&pdev->dev); + + return ret; +} + +/* Mapping based on port index on Table 4.2-6 and TSSEL bits on Table 4.6-4 */ +static const u8 rzg3e_tssel_lut[] = { + 81, 82, 83, 84, 85, 86, 87, 88, /* P00-P07 */ + 89, 90, 91, 92, 93, 94, 95, 96, /* P10-P17 */ + 111, 112, /* P20-P21 */ + 97, 98, 99, 100, 101, 102, 103, 104, /* P30-P37 */ + 105, 106, 107, 108, 109, 110, /* P40-P45 */ + 113, 114, 115, 116, 117, 118, 119, /* P50-P56 */ + 120, 121, 122, 123, 124, 125, 126, /* P60-P66 */ + 127, 128, 129, 130, 131, 132, 133, 134, /* P70-P77 */ + 135, 136, 137, 138, 139, 140, /* P80-P85 */ + 43, 44, 45, 46, 47, 48, 49, 50, /* PA0-PA7 */ + 51, 52, 53, 54, 55, 56, 57, 58, /* PB0-PB7 */ + 59, 60, 61, /* PC0-PC2 */ + 62, 63, 64, 65, 66, 67, 68, 69, /* PD0-PD7 */ + 70, 71, 72, 73, 74, 75, 76, 77, /* PE0-PE7 */ + 78, 79, 80, /* PF0-PF2 */ + 25, 26, 27, 28, 29, 30, 31, 32, /* PG0-PG7 */ + 33, 34, 35, 36, 37, 38, /* PH0-PH5 */ + 4, 5, 6, 7, 8, /* PJ0-PJ4 */ + 39, 40, 41, 42, /* PK0-PK3 */ + 9, 10, 11, 12, 21, 22, 23, 24, /* PL0-PL7 */ + 13, 14, 15, 16, 17, 18, 19, 20, /* PM0-PM7 */ + 0, 1, 2, 3 /* PS0-PS3 */ +}; + +static const struct rzv2h_hw_info rzg3e_hw_params = { + .tssel_lut = rzg3e_tssel_lut, + .t_offs = ICU_RZG3E_TINT_OFFSET, + .max_tssel = ICU_RZG3E_TSSEL_MAX_VAL, + .field_width = 16, +}; + +static const struct rzv2h_hw_info rzv2h_hw_params = { + .t_offs = 0, + .max_tssel = ICU_RZV2H_TSSEL_MAX_VAL, + .field_width = 8, +}; + +static int rzg3e_icu_probe(struct platform_device *pdev, struct device_node *parent) +{ + return rzv2h_icu_probe_common(pdev, parent, &rzg3e_hw_params); +} + +static int rzv2h_icu_probe(struct platform_device *pdev, struct device_node *parent) +{ + return rzv2h_icu_probe_common(pdev, parent, &rzv2h_hw_params); +} + +IRQCHIP_PLATFORM_DRIVER_BEGIN(rzv2h_icu) +IRQCHIP_MATCH("renesas,r9a09g047-icu", rzg3e_icu_probe) +IRQCHIP_MATCH("renesas,r9a09g057-icu", rzv2h_icu_probe) +IRQCHIP_PLATFORM_DRIVER_END(rzv2h_icu) +MODULE_AUTHOR("Fabrizio Castro <fabrizio.castro.jz@renesas.com>"); +MODULE_DESCRIPTION("Renesas RZ/V2H(P) ICU Driver"); diff --git a/drivers/irqchip/irq-riscv-aplic-direct.c b/drivers/irqchip/irq-riscv-aplic-direct.c new file mode 100644 index 000000000000..c2a75bf3d20c --- /dev/null +++ b/drivers/irqchip/irq-riscv-aplic-direct.c @@ -0,0 +1,333 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2021 Western Digital Corporation or its affiliates. + * Copyright (C) 2022 Ventana Micro Systems Inc. + */ + +#include <linux/acpi.h> +#include <linux/bitfield.h> +#include <linux/bitops.h> +#include <linux/cpu.h> +#include <linux/interrupt.h> +#include <linux/irqchip.h> +#include <linux/irqchip/chained_irq.h> +#include <linux/irqchip/riscv-aplic.h> +#include <linux/module.h> +#include <linux/of_address.h> +#include <linux/printk.h> +#include <linux/smp.h> + +#include "irq-riscv-aplic-main.h" + +#define APLIC_DISABLE_IDELIVERY 0 +#define APLIC_ENABLE_IDELIVERY 1 +#define APLIC_DISABLE_ITHRESHOLD 1 +#define APLIC_ENABLE_ITHRESHOLD 0 + +struct aplic_direct { + struct aplic_priv priv; + struct irq_domain *irqdomain; + struct cpumask lmask; +}; + +struct aplic_idc { + u32 hart_index; + void __iomem *regs; + struct aplic_direct *direct; +}; + +static unsigned int aplic_direct_parent_irq; +static DEFINE_PER_CPU(struct aplic_idc, aplic_idcs); + +static void aplic_direct_irq_eoi(struct irq_data *d) +{ + /* + * The fasteoi_handler requires irq_eoi() callback hence + * provide a dummy handler. + */ +} + +#ifdef CONFIG_SMP +static int aplic_direct_set_affinity(struct irq_data *d, const struct cpumask *mask_val, + bool force) +{ + struct aplic_priv *priv = irq_data_get_irq_chip_data(d); + struct aplic_direct *direct = container_of(priv, struct aplic_direct, priv); + struct aplic_idc *idc; + unsigned int cpu, val; + void __iomem *target; + + if (force) + cpu = cpumask_first_and(&direct->lmask, mask_val); + else + cpu = cpumask_first_and_and(&direct->lmask, mask_val, cpu_online_mask); + + if (cpu >= nr_cpu_ids) + return -EINVAL; + + idc = per_cpu_ptr(&aplic_idcs, cpu); + target = priv->regs + APLIC_TARGET_BASE + (d->hwirq - 1) * sizeof(u32); + val = FIELD_PREP(APLIC_TARGET_HART_IDX, idc->hart_index); + val |= FIELD_PREP(APLIC_TARGET_IPRIO, APLIC_DEFAULT_PRIORITY); + writel(val, target); + + irq_data_update_effective_affinity(d, cpumask_of(cpu)); + + return IRQ_SET_MASK_OK_DONE; +} +#endif + +static struct irq_chip aplic_direct_chip = { + .name = "APLIC-DIRECT", + .irq_mask = aplic_irq_mask, + .irq_unmask = aplic_irq_unmask, + .irq_set_type = aplic_irq_set_type, + .irq_eoi = aplic_direct_irq_eoi, +#ifdef CONFIG_SMP + .irq_set_affinity = aplic_direct_set_affinity, +#endif + .flags = IRQCHIP_SET_TYPE_MASKED | + IRQCHIP_SKIP_SET_WAKE | + IRQCHIP_MASK_ON_SUSPEND, +}; + +static int aplic_direct_irqdomain_translate(struct irq_domain *d, struct irq_fwspec *fwspec, + unsigned long *hwirq, unsigned int *type) +{ + struct aplic_priv *priv = d->host_data; + + return aplic_irqdomain_translate(fwspec, priv->gsi_base, hwirq, type); +} + +static int aplic_direct_irqdomain_alloc(struct irq_domain *domain, unsigned int virq, + unsigned int nr_irqs, void *arg) +{ + struct aplic_priv *priv = domain->host_data; + struct aplic_direct *direct = container_of(priv, struct aplic_direct, priv); + struct irq_fwspec *fwspec = arg; + irq_hw_number_t hwirq; + unsigned int type; + int i, ret; + + ret = aplic_irqdomain_translate(fwspec, priv->gsi_base, &hwirq, &type); + if (ret) + return ret; + + for (i = 0; i < nr_irqs; i++) { + irq_domain_set_info(domain, virq + i, hwirq + i, &aplic_direct_chip, + priv, handle_fasteoi_irq, NULL, NULL); + irq_set_affinity(virq + i, &direct->lmask); + } + + return 0; +} + +static const struct irq_domain_ops aplic_direct_irqdomain_ops = { + .translate = aplic_direct_irqdomain_translate, + .alloc = aplic_direct_irqdomain_alloc, + .free = irq_domain_free_irqs_top, +}; + +/* + * To handle an APLIC direct interrupts, we just read the CLAIMI register + * which will return highest priority pending interrupt and clear the + * pending bit of the interrupt. This process is repeated until CLAIMI + * register return zero value. + */ +static void aplic_direct_handle_irq(struct irq_desc *desc) +{ + struct aplic_idc *idc = this_cpu_ptr(&aplic_idcs); + struct irq_domain *irqdomain = idc->direct->irqdomain; + struct irq_chip *chip = irq_desc_get_chip(desc); + irq_hw_number_t hw_irq; + int irq; + + chained_irq_enter(chip, desc); + + while ((hw_irq = readl(idc->regs + APLIC_IDC_CLAIMI))) { + hw_irq = hw_irq >> APLIC_IDC_TOPI_ID_SHIFT; + irq = irq_find_mapping(irqdomain, hw_irq); + + if (unlikely(irq <= 0)) { + dev_warn_ratelimited(idc->direct->priv.dev, + "hw_irq %lu mapping not found\n", hw_irq); + } else { + generic_handle_irq(irq); + } + } + + chained_irq_exit(chip, desc); +} + +static void aplic_idc_set_delivery(struct aplic_idc *idc, bool en) +{ + u32 de = (en) ? APLIC_ENABLE_IDELIVERY : APLIC_DISABLE_IDELIVERY; + u32 th = (en) ? APLIC_ENABLE_ITHRESHOLD : APLIC_DISABLE_ITHRESHOLD; + + /* Priority must be less than threshold for interrupt triggering */ + writel(th, idc->regs + APLIC_IDC_ITHRESHOLD); + + /* Delivery must be set to 1 for interrupt triggering */ + writel(de, idc->regs + APLIC_IDC_IDELIVERY); +} + +static int aplic_direct_dying_cpu(unsigned int cpu) +{ + if (aplic_direct_parent_irq) + disable_percpu_irq(aplic_direct_parent_irq); + + return 0; +} + +static int aplic_direct_starting_cpu(unsigned int cpu) +{ + if (aplic_direct_parent_irq) { + enable_percpu_irq(aplic_direct_parent_irq, + irq_get_trigger_type(aplic_direct_parent_irq)); + } + + return 0; +} + +static int aplic_direct_parse_parent_hwirq(struct device *dev, u32 index, + u32 *parent_hwirq, unsigned long *parent_hartid, + struct aplic_priv *priv) +{ + struct of_phandle_args parent; + unsigned long hartid; + int rc; + + if (!is_of_node(dev->fwnode)) { + hartid = acpi_rintc_ext_parent_to_hartid(priv->acpi_aplic_id, index); + if (hartid == INVALID_HARTID) + return -ENODEV; + + *parent_hartid = hartid; + *parent_hwirq = RV_IRQ_EXT; + return 0; + } + + rc = of_irq_parse_one(to_of_node(dev->fwnode), index, &parent); + if (rc) + return rc; + + rc = riscv_of_parent_hartid(parent.np, parent_hartid); + if (rc) + return rc; + + *parent_hwirq = parent.args[0]; + return 0; +} + +int aplic_direct_setup(struct device *dev, void __iomem *regs) +{ + int i, j, rc, cpu, current_cpu, setup_count = 0; + struct aplic_direct *direct; + struct irq_domain *domain; + struct aplic_priv *priv; + struct aplic_idc *idc; + unsigned long hartid; + u32 v, hwirq; + + direct = devm_kzalloc(dev, sizeof(*direct), GFP_KERNEL); + if (!direct) + return -ENOMEM; + priv = &direct->priv; + + rc = aplic_setup_priv(priv, dev, regs); + if (rc) { + dev_err(dev, "failed to create APLIC context\n"); + return rc; + } + + /* Setup per-CPU IDC and target CPU mask */ + current_cpu = get_cpu(); + for (i = 0; i < priv->nr_idcs; i++) { + rc = aplic_direct_parse_parent_hwirq(dev, i, &hwirq, &hartid, priv); + if (rc) { + dev_warn(dev, "parent irq for IDC%d not found\n", i); + continue; + } + + /* + * Skip interrupts other than external interrupts for + * current privilege level. + */ + if (hwirq != RV_IRQ_EXT) + continue; + + cpu = riscv_hartid_to_cpuid(hartid); + if (cpu < 0) { + dev_warn(dev, "invalid cpuid for IDC%d\n", i); + continue; + } + + cpumask_set_cpu(cpu, &direct->lmask); + + idc = per_cpu_ptr(&aplic_idcs, cpu); + rc = riscv_get_hart_index(dev->fwnode, i, &idc->hart_index); + if (rc) { + dev_warn(dev, "hart index not found for IDC%d\n", i); + continue; + } + idc->regs = priv->regs + APLIC_IDC_BASE + idc->hart_index * APLIC_IDC_SIZE; + idc->direct = direct; + + aplic_idc_set_delivery(idc, true); + + /* + * Boot cpu might not have APLIC hart_index = 0 so check + * and update target registers of all interrupts. + */ + if (cpu == current_cpu && idc->hart_index) { + v = FIELD_PREP(APLIC_TARGET_HART_IDX, idc->hart_index); + v |= FIELD_PREP(APLIC_TARGET_IPRIO, APLIC_DEFAULT_PRIORITY); + for (j = 1; j <= priv->nr_irqs; j++) + writel(v, priv->regs + APLIC_TARGET_BASE + (j - 1) * sizeof(u32)); + } + + setup_count++; + } + put_cpu(); + + /* Find parent domain and register chained handler */ + domain = irq_find_matching_fwnode(riscv_get_intc_hwnode(), + DOMAIN_BUS_ANY); + if (!aplic_direct_parent_irq && domain) { + aplic_direct_parent_irq = irq_create_mapping(domain, RV_IRQ_EXT); + if (aplic_direct_parent_irq) { + irq_set_chained_handler(aplic_direct_parent_irq, + aplic_direct_handle_irq); + + /* + * Setup CPUHP notifier to enable parent + * interrupt on all CPUs + */ + cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, + "irqchip/riscv/aplic:starting", + aplic_direct_starting_cpu, + aplic_direct_dying_cpu); + } + } + + /* Fail if we were not able to setup IDC for any CPU */ + if (!setup_count) + return -ENODEV; + + /* Setup global config and interrupt delivery */ + aplic_init_hw_global(priv, false); + + /* Create irq domain instance for the APLIC */ + direct->irqdomain = irq_domain_create_linear(dev->fwnode, priv->nr_irqs + 1, + &aplic_direct_irqdomain_ops, priv); + if (!direct->irqdomain) { + dev_err(dev, "failed to create direct irq domain\n"); + return -ENOMEM; + } + + /* Advertise the interrupt controller */ + dev_info(dev, "%d interrupts directly connected to %d CPUs\n", + priv->nr_irqs, priv->nr_idcs); + + return 0; +} diff --git a/drivers/irqchip/irq-riscv-aplic-main.c b/drivers/irqchip/irq-riscv-aplic-main.c new file mode 100644 index 000000000000..93e7c51f944a --- /dev/null +++ b/drivers/irqchip/irq-riscv-aplic-main.c @@ -0,0 +1,234 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2021 Western Digital Corporation or its affiliates. + * Copyright (C) 2022 Ventana Micro Systems Inc. + */ + +#include <linux/acpi.h> +#include <linux/bitfield.h> +#include <linux/irqchip/riscv-aplic.h> +#include <linux/irqchip/riscv-imsic.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/of_irq.h> +#include <linux/platform_device.h> +#include <linux/printk.h> + +#include "irq-riscv-aplic-main.h" + +void aplic_irq_unmask(struct irq_data *d) +{ + struct aplic_priv *priv = irq_data_get_irq_chip_data(d); + + writel(d->hwirq, priv->regs + APLIC_SETIENUM); +} + +void aplic_irq_mask(struct irq_data *d) +{ + struct aplic_priv *priv = irq_data_get_irq_chip_data(d); + + writel(d->hwirq, priv->regs + APLIC_CLRIENUM); +} + +int aplic_irq_set_type(struct irq_data *d, unsigned int type) +{ + struct aplic_priv *priv = irq_data_get_irq_chip_data(d); + void __iomem *sourcecfg; + u32 val = 0; + + switch (type) { + case IRQ_TYPE_NONE: + val = APLIC_SOURCECFG_SM_INACTIVE; + break; + case IRQ_TYPE_LEVEL_LOW: + val = APLIC_SOURCECFG_SM_LEVEL_LOW; + break; + case IRQ_TYPE_LEVEL_HIGH: + val = APLIC_SOURCECFG_SM_LEVEL_HIGH; + break; + case IRQ_TYPE_EDGE_FALLING: + val = APLIC_SOURCECFG_SM_EDGE_FALL; + break; + case IRQ_TYPE_EDGE_RISING: + val = APLIC_SOURCECFG_SM_EDGE_RISE; + break; + default: + return -EINVAL; + } + + sourcecfg = priv->regs + APLIC_SOURCECFG_BASE; + sourcecfg += (d->hwirq - 1) * sizeof(u32); + writel(val, sourcecfg); + + return 0; +} + +int aplic_irqdomain_translate(struct irq_fwspec *fwspec, u32 gsi_base, + unsigned long *hwirq, unsigned int *type) +{ + if (WARN_ON(fwspec->param_count < 2)) + return -EINVAL; + if (WARN_ON(!fwspec->param[0])) + return -EINVAL; + + /* For DT, gsi_base is always zero. */ + *hwirq = fwspec->param[0] - gsi_base; + *type = fwspec->param[1] & IRQ_TYPE_SENSE_MASK; + + WARN_ON(*type == IRQ_TYPE_NONE); + + return 0; +} + +void aplic_init_hw_global(struct aplic_priv *priv, bool msi_mode) +{ + u32 val; +#ifdef CONFIG_RISCV_M_MODE + u32 valh; + + if (msi_mode) { + val = lower_32_bits(priv->msicfg.base_ppn); + valh = FIELD_PREP(APLIC_xMSICFGADDRH_BAPPN, upper_32_bits(priv->msicfg.base_ppn)); + valh |= FIELD_PREP(APLIC_xMSICFGADDRH_LHXW, priv->msicfg.lhxw); + valh |= FIELD_PREP(APLIC_xMSICFGADDRH_HHXW, priv->msicfg.hhxw); + valh |= FIELD_PREP(APLIC_xMSICFGADDRH_LHXS, priv->msicfg.lhxs); + valh |= FIELD_PREP(APLIC_xMSICFGADDRH_HHXS, priv->msicfg.hhxs); + writel(val, priv->regs + APLIC_xMSICFGADDR); + writel(valh, priv->regs + APLIC_xMSICFGADDRH); + } +#endif + + /* Setup APLIC domaincfg register */ + val = readl(priv->regs + APLIC_DOMAINCFG); + val |= APLIC_DOMAINCFG_IE; + if (msi_mode) + val |= APLIC_DOMAINCFG_DM; + writel(val, priv->regs + APLIC_DOMAINCFG); + if (readl(priv->regs + APLIC_DOMAINCFG) != val) + dev_warn(priv->dev, "unable to write 0x%x in domaincfg\n", val); +} + +static void aplic_init_hw_irqs(struct aplic_priv *priv) +{ + int i; + + /* Disable all interrupts */ + for (i = 0; i <= priv->nr_irqs; i += 32) + writel(-1U, priv->regs + APLIC_CLRIE_BASE + (i / 32) * sizeof(u32)); + + /* Set interrupt type and default priority for all interrupts */ + for (i = 1; i <= priv->nr_irqs; i++) { + writel(0, priv->regs + APLIC_SOURCECFG_BASE + (i - 1) * sizeof(u32)); + writel(APLIC_DEFAULT_PRIORITY, + priv->regs + APLIC_TARGET_BASE + (i - 1) * sizeof(u32)); + } + + /* Clear APLIC domaincfg */ + writel(0, priv->regs + APLIC_DOMAINCFG); +} + +#ifdef CONFIG_ACPI +static const struct acpi_device_id aplic_acpi_match[] = { + { "RSCV0002", 0 }, + {} +}; +MODULE_DEVICE_TABLE(acpi, aplic_acpi_match); + +#endif + +int aplic_setup_priv(struct aplic_priv *priv, struct device *dev, void __iomem *regs) +{ + struct device_node *np = to_of_node(dev->fwnode); + struct of_phandle_args parent; + int rc; + + /* Save device pointer and register base */ + priv->dev = dev; + priv->regs = regs; + + if (np) { + /* Find out number of interrupt sources */ + rc = of_property_read_u32(np, "riscv,num-sources", &priv->nr_irqs); + if (rc) { + dev_err(dev, "failed to get number of interrupt sources\n"); + return rc; + } + + /* + * Find out number of IDCs based on parent interrupts + * + * If "msi-parent" property is present then we ignore the + * APLIC IDCs which forces the APLIC driver to use MSI mode. + */ + if (!of_property_present(np, "msi-parent")) { + while (!of_irq_parse_one(np, priv->nr_idcs, &parent)) + priv->nr_idcs++; + } + } else { + rc = riscv_acpi_get_gsi_info(dev->fwnode, &priv->gsi_base, &priv->acpi_aplic_id, + &priv->nr_irqs, &priv->nr_idcs); + if (rc) { + dev_err(dev, "failed to find GSI mapping\n"); + return rc; + } + } + + /* Setup initial state APLIC interrupts */ + aplic_init_hw_irqs(priv); + + return 0; +} + +static int aplic_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + bool msi_mode = false; + void __iomem *regs; + int rc; + + /* Map the MMIO registers */ + regs = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(regs)) { + dev_err(dev, "failed map MMIO registers\n"); + return PTR_ERR(regs); + } + + /* + * If msi-parent property is present then setup APLIC MSI + * mode otherwise setup APLIC direct mode. + */ + if (is_of_node(dev->fwnode)) + msi_mode = of_property_present(to_of_node(dev->fwnode), "msi-parent"); + else + msi_mode = imsic_acpi_get_fwnode(NULL) ? 1 : 0; + + if (msi_mode) + rc = aplic_msi_setup(dev, regs); + else + rc = aplic_direct_setup(dev, regs); + if (rc) + dev_err_probe(dev, rc, "failed to setup APLIC in %s mode\n", + msi_mode ? "MSI" : "direct"); + +#ifdef CONFIG_ACPI + if (!acpi_disabled) + acpi_dev_clear_dependencies(ACPI_COMPANION(dev)); +#endif + + return rc; +} + +static const struct of_device_id aplic_match[] = { + { .compatible = "riscv,aplic" }, + {} +}; + +static struct platform_driver aplic_driver = { + .driver = { + .name = "riscv-aplic", + .of_match_table = aplic_match, + .acpi_match_table = ACPI_PTR(aplic_acpi_match), + }, + .probe = aplic_probe, +}; +builtin_platform_driver(aplic_driver); diff --git a/drivers/irqchip/irq-riscv-aplic-main.h b/drivers/irqchip/irq-riscv-aplic-main.h new file mode 100644 index 000000000000..b0ad8cde69b1 --- /dev/null +++ b/drivers/irqchip/irq-riscv-aplic-main.h @@ -0,0 +1,53 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2021 Western Digital Corporation or its affiliates. + * Copyright (C) 2022 Ventana Micro Systems Inc. + */ + +#ifndef _IRQ_RISCV_APLIC_MAIN_H +#define _IRQ_RISCV_APLIC_MAIN_H + +#include <linux/device.h> +#include <linux/io.h> +#include <linux/irq.h> +#include <linux/irqdomain.h> +#include <linux/fwnode.h> + +#define APLIC_DEFAULT_PRIORITY 1 + +struct aplic_msicfg { + phys_addr_t base_ppn; + u32 hhxs; + u32 hhxw; + u32 lhxs; + u32 lhxw; +}; + +struct aplic_priv { + struct device *dev; + u32 gsi_base; + u32 nr_irqs; + u32 nr_idcs; + u32 acpi_aplic_id; + void __iomem *regs; + struct aplic_msicfg msicfg; +}; + +void aplic_irq_unmask(struct irq_data *d); +void aplic_irq_mask(struct irq_data *d); +int aplic_irq_set_type(struct irq_data *d, unsigned int type); +int aplic_irqdomain_translate(struct irq_fwspec *fwspec, u32 gsi_base, + unsigned long *hwirq, unsigned int *type); +void aplic_init_hw_global(struct aplic_priv *priv, bool msi_mode); +int aplic_setup_priv(struct aplic_priv *priv, struct device *dev, void __iomem *regs); +int aplic_direct_setup(struct device *dev, void __iomem *regs); +#ifdef CONFIG_RISCV_APLIC_MSI +int aplic_msi_setup(struct device *dev, void __iomem *regs); +#else +static inline int aplic_msi_setup(struct device *dev, void __iomem *regs) +{ + return -ENODEV; +} +#endif + +#endif diff --git a/drivers/irqchip/irq-riscv-aplic-msi.c b/drivers/irqchip/irq-riscv-aplic-msi.c new file mode 100644 index 000000000000..fb8d1838609f --- /dev/null +++ b/drivers/irqchip/irq-riscv-aplic-msi.c @@ -0,0 +1,285 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2021 Western Digital Corporation or its affiliates. + * Copyright (C) 2022 Ventana Micro Systems Inc. + */ + +#include <linux/bitfield.h> +#include <linux/bitops.h> +#include <linux/cpu.h> +#include <linux/interrupt.h> +#include <linux/irqchip.h> +#include <linux/irqchip/riscv-aplic.h> +#include <linux/irqchip/riscv-imsic.h> +#include <linux/module.h> +#include <linux/msi.h> +#include <linux/of_irq.h> +#include <linux/platform_device.h> +#include <linux/printk.h> +#include <linux/smp.h> + +#include "irq-riscv-aplic-main.h" + +static void aplic_msi_irq_mask(struct irq_data *d) +{ + aplic_irq_mask(d); + irq_chip_mask_parent(d); +} + +static void aplic_msi_irq_unmask(struct irq_data *d) +{ + irq_chip_unmask_parent(d); + aplic_irq_unmask(d); +} + +static void aplic_msi_irq_retrigger_level(struct irq_data *d) +{ + struct aplic_priv *priv = irq_data_get_irq_chip_data(d); + + switch (irqd_get_trigger_type(d)) { + case IRQ_TYPE_LEVEL_LOW: + case IRQ_TYPE_LEVEL_HIGH: + /* + * The section "4.9.2 Special consideration for level-sensitive interrupt + * sources" of the RISC-V AIA specification says: + * + * A second option is for the interrupt service routine to write the + * APLIC’s source identity number for the interrupt to the domain’s + * setipnum register just before exiting. This will cause the interrupt’s + * pending bit to be set to one again if the source is still asserting + * an interrupt, but not if the source is not asserting an interrupt. + */ + writel(d->hwirq, priv->regs + APLIC_SETIPNUM_LE); + break; + } +} + +static void aplic_msi_irq_eoi(struct irq_data *d) +{ + /* + * EOI handling is required only for level-triggered interrupts + * when APLIC is in MSI mode. + */ + aplic_msi_irq_retrigger_level(d); +} + +static int aplic_msi_irq_set_type(struct irq_data *d, unsigned int type) +{ + int rc = aplic_irq_set_type(d, type); + + if (rc) + return rc; + /* + * Updating sourcecfg register for level-triggered interrupts + * requires interrupt retriggering when APLIC is in MSI mode. + */ + aplic_msi_irq_retrigger_level(d); + return 0; +} + +static void aplic_msi_write_msg(struct irq_data *d, struct msi_msg *msg) +{ + unsigned int group_index, hart_index, guest_index, val; + struct aplic_priv *priv = irq_data_get_irq_chip_data(d); + struct aplic_msicfg *mc = &priv->msicfg; + phys_addr_t tppn, tbppn, msg_addr; + void __iomem *target; + + /* For zeroed MSI, simply write zero into the target register */ + if (!msg->address_hi && !msg->address_lo && !msg->data) { + target = priv->regs + APLIC_TARGET_BASE; + target += (d->hwirq - 1) * sizeof(u32); + writel(0, target); + return; + } + + /* Sanity check on message data */ + WARN_ON(msg->data > APLIC_TARGET_EIID_MASK); + + /* Compute target MSI address */ + msg_addr = (((u64)msg->address_hi) << 32) | msg->address_lo; + tppn = msg_addr >> APLIC_xMSICFGADDR_PPN_SHIFT; + + /* Compute target HART Base PPN */ + tbppn = tppn; + tbppn &= ~APLIC_xMSICFGADDR_PPN_HART(mc->lhxs); + tbppn &= ~APLIC_xMSICFGADDR_PPN_LHX(mc->lhxw, mc->lhxs); + tbppn &= ~APLIC_xMSICFGADDR_PPN_HHX(mc->hhxw, mc->hhxs); + WARN_ON(tbppn != mc->base_ppn); + + /* Compute target group and hart indexes */ + group_index = (tppn >> APLIC_xMSICFGADDR_PPN_HHX_SHIFT(mc->hhxs)) & + APLIC_xMSICFGADDR_PPN_HHX_MASK(mc->hhxw); + hart_index = (tppn >> APLIC_xMSICFGADDR_PPN_LHX_SHIFT(mc->lhxs)) & + APLIC_xMSICFGADDR_PPN_LHX_MASK(mc->lhxw); + hart_index |= (group_index << mc->lhxw); + WARN_ON(hart_index > APLIC_TARGET_HART_IDX_MASK); + + /* Compute target guest index */ + guest_index = tppn & APLIC_xMSICFGADDR_PPN_HART(mc->lhxs); + WARN_ON(guest_index > APLIC_TARGET_GUEST_IDX_MASK); + + /* Update IRQ TARGET register */ + target = priv->regs + APLIC_TARGET_BASE; + target += (d->hwirq - 1) * sizeof(u32); + val = FIELD_PREP(APLIC_TARGET_HART_IDX, hart_index); + val |= FIELD_PREP(APLIC_TARGET_GUEST_IDX, guest_index); + val |= FIELD_PREP(APLIC_TARGET_EIID, msg->data); + writel(val, target); +} + +static void aplic_msi_set_desc(msi_alloc_info_t *arg, struct msi_desc *desc) +{ + arg->desc = desc; + arg->hwirq = (u32)desc->data.icookie.value; +} + +static int aplic_msi_translate(struct irq_domain *d, struct irq_fwspec *fwspec, + unsigned long *hwirq, unsigned int *type) +{ + struct msi_domain_info *info = d->host_data; + struct aplic_priv *priv = info->data; + + return aplic_irqdomain_translate(fwspec, priv->gsi_base, hwirq, type); +} + +static const struct msi_domain_template aplic_msi_template = { + .chip = { + .name = "APLIC-MSI", + .irq_mask = aplic_msi_irq_mask, + .irq_unmask = aplic_msi_irq_unmask, + .irq_set_type = aplic_msi_irq_set_type, + .irq_eoi = aplic_msi_irq_eoi, +#ifdef CONFIG_SMP + .irq_set_affinity = irq_chip_set_affinity_parent, +#endif + .irq_write_msi_msg = aplic_msi_write_msg, + .flags = IRQCHIP_SET_TYPE_MASKED | + IRQCHIP_SKIP_SET_WAKE | + IRQCHIP_MASK_ON_SUSPEND, + }, + + .ops = { + .set_desc = aplic_msi_set_desc, + .msi_translate = aplic_msi_translate, + }, + + .info = { + .bus_token = DOMAIN_BUS_WIRED_TO_MSI, + .flags = MSI_FLAG_USE_DEV_FWNODE, + .handler = handle_fasteoi_irq, + .handler_name = "fasteoi", + }, +}; + +int aplic_msi_setup(struct device *dev, void __iomem *regs) +{ + const struct imsic_global_config *imsic_global; + struct irq_domain *msi_domain; + struct aplic_priv *priv; + struct aplic_msicfg *mc; + phys_addr_t pa; + int rc; + + priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + rc = aplic_setup_priv(priv, dev, regs); + if (rc) { + dev_err(dev, "failed to create APLIC context\n"); + return rc; + } + mc = &priv->msicfg; + + /* + * The APLIC outgoing MSI config registers assume target MSI + * controller to be RISC-V AIA IMSIC controller. + */ + imsic_global = imsic_get_global_config(); + if (!imsic_global) { + dev_err(dev, "IMSIC global config not found\n"); + return -ENODEV; + } + + /* Find number of guest index bits (LHXS) */ + mc->lhxs = imsic_global->guest_index_bits; + if (APLIC_xMSICFGADDRH_LHXS_MASK < mc->lhxs) { + dev_err(dev, "IMSIC guest index bits big for APLIC LHXS\n"); + return -EINVAL; + } + + /* Find number of HART index bits (LHXW) */ + mc->lhxw = imsic_global->hart_index_bits; + if (APLIC_xMSICFGADDRH_LHXW_MASK < mc->lhxw) { + dev_err(dev, "IMSIC hart index bits big for APLIC LHXW\n"); + return -EINVAL; + } + + /* Find number of group index bits (HHXW) */ + mc->hhxw = imsic_global->group_index_bits; + if (APLIC_xMSICFGADDRH_HHXW_MASK < mc->hhxw) { + dev_err(dev, "IMSIC group index bits big for APLIC HHXW\n"); + return -EINVAL; + } + + /* Find first bit position of group index (HHXS) */ + mc->hhxs = imsic_global->group_index_shift; + if (mc->hhxs < (2 * APLIC_xMSICFGADDR_PPN_SHIFT)) { + dev_err(dev, "IMSIC group index shift should be >= %d\n", + (2 * APLIC_xMSICFGADDR_PPN_SHIFT)); + return -EINVAL; + } + mc->hhxs -= (2 * APLIC_xMSICFGADDR_PPN_SHIFT); + if (APLIC_xMSICFGADDRH_HHXS_MASK < mc->hhxs) { + dev_err(dev, "IMSIC group index shift big for APLIC HHXS\n"); + return -EINVAL; + } + + /* Compute PPN base */ + mc->base_ppn = imsic_global->base_addr >> APLIC_xMSICFGADDR_PPN_SHIFT; + mc->base_ppn &= ~APLIC_xMSICFGADDR_PPN_HART(mc->lhxs); + mc->base_ppn &= ~APLIC_xMSICFGADDR_PPN_LHX(mc->lhxw, mc->lhxs); + mc->base_ppn &= ~APLIC_xMSICFGADDR_PPN_HHX(mc->hhxw, mc->hhxs); + + /* Setup global config and interrupt delivery */ + aplic_init_hw_global(priv, true); + + /* Set the APLIC device MSI domain if not available */ + if (!dev_get_msi_domain(dev)) { + /* + * The device MSI domain for OF devices is only set at the + * time of populating/creating OF device. If the device MSI + * domain is discovered later after the OF device is created + * then we need to set it explicitly before using any platform + * MSI functions. + * + * In case of APLIC device, the parent MSI domain is always + * IMSIC and the IMSIC MSI domains are created later through + * the platform driver probing so we set it explicitly here. + */ + if (is_of_node(dev->fwnode)) { + of_msi_configure(dev, to_of_node(dev->fwnode)); + } else { + msi_domain = irq_find_matching_fwnode(imsic_acpi_get_fwnode(dev), + DOMAIN_BUS_PLATFORM_MSI); + if (msi_domain) + dev_set_msi_domain(dev, msi_domain); + } + + if (!dev_get_msi_domain(dev)) + return -EPROBE_DEFER; + } + + if (!msi_create_device_irq_domain(dev, MSI_DEFAULT_DOMAIN, &aplic_msi_template, + priv->nr_irqs + 1, priv, priv)) { + dev_err(dev, "failed to create MSI irq domain\n"); + return -ENOMEM; + } + + /* Advertise the interrupt controller */ + pa = priv->msicfg.base_ppn << APLIC_xMSICFGADDR_PPN_SHIFT; + dev_info(dev, "%d interrupts forwarded to MSI base %pa\n", priv->nr_irqs, &pa); + + return 0; +} diff --git a/drivers/irqchip/irq-riscv-imsic-early.c b/drivers/irqchip/irq-riscv-imsic-early.c new file mode 100644 index 000000000000..6bac67cc0b6d --- /dev/null +++ b/drivers/irqchip/irq-riscv-imsic-early.c @@ -0,0 +1,282 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2021 Western Digital Corporation or its affiliates. + * Copyright (C) 2022 Ventana Micro Systems Inc. + */ + +#define pr_fmt(fmt) "riscv-imsic: " fmt +#include <linux/acpi.h> +#include <linux/cpu.h> +#include <linux/export.h> +#include <linux/interrupt.h> +#include <linux/init.h> +#include <linux/io.h> +#include <linux/irq.h> +#include <linux/irqchip.h> +#include <linux/irqchip/chained_irq.h> +#include <linux/irqchip/riscv-imsic.h> +#include <linux/module.h> +#include <linux/pci.h> +#include <linux/spinlock.h> +#include <linux/smp.h> + +#include "irq-riscv-imsic-state.h" + +static int imsic_parent_irq; +bool imsic_noipi __ro_after_init; + +static int __init imsic_noipi_cfg(char *buf) +{ + imsic_noipi = true; + return 0; +} +early_param("irqchip.riscv_imsic_noipi", imsic_noipi_cfg); + +#ifdef CONFIG_SMP +static void imsic_ipi_send(unsigned int cpu) +{ + struct imsic_local_config *local = per_cpu_ptr(imsic->global.local, cpu); + + writel(IMSIC_IPI_ID, local->msi_va); +} + +static void imsic_ipi_starting_cpu(void) +{ + if (imsic_noipi) + return; + + /* Enable IPIs for current CPU. */ + __imsic_id_set_enable(IMSIC_IPI_ID); +} + +static void imsic_ipi_dying_cpu(void) +{ + if (imsic_noipi) + return; + + /* Disable IPIs for current CPU. */ + __imsic_id_clear_enable(IMSIC_IPI_ID); +} + +static int __init imsic_ipi_domain_init(void) +{ + int virq; + + if (imsic_noipi) + return 0; + + /* Create IMSIC IPI multiplexing */ + virq = ipi_mux_create(IMSIC_NR_IPI, imsic_ipi_send); + if (virq <= 0) + return virq < 0 ? virq : -ENOMEM; + + /* Set vIRQ range */ + riscv_ipi_set_virq_range(virq, IMSIC_NR_IPI); + + /* Announce that IMSIC is providing IPIs */ + pr_info("%pfwP: providing IPIs using interrupt %d\n", imsic->fwnode, IMSIC_IPI_ID); + + return 0; +} +#else +static void imsic_ipi_starting_cpu(void) { } +static void imsic_ipi_dying_cpu(void) { } +static int __init imsic_ipi_domain_init(void) { return 0; } +#endif + +/* + * To handle an interrupt, we read the TOPEI CSR and write zero in one + * instruction. If TOPEI CSR is non-zero then we translate TOPEI.ID to + * Linux interrupt number and let Linux IRQ subsystem handle it. + */ +static void imsic_handle_irq(struct irq_desc *desc) +{ + struct imsic_local_priv *lpriv = this_cpu_ptr(imsic->lpriv); + struct irq_chip *chip = irq_desc_get_chip(desc); + unsigned long local_id; + + /* + * Process pending local synchronization instead of waiting + * for per-CPU local timer to expire. + */ + imsic_local_sync_all(false); + + chained_irq_enter(chip, desc); + + while ((local_id = csr_swap(CSR_TOPEI, 0))) { + local_id >>= TOPEI_ID_SHIFT; + + if (!imsic_noipi && local_id == IMSIC_IPI_ID) { + if (IS_ENABLED(CONFIG_SMP)) + ipi_mux_process(); + continue; + } + + if (unlikely(local_id > imsic->global.nr_ids)) { + pr_warn_ratelimited("vector not found for local ID 0x%lx\n", local_id); + continue; + } + + generic_handle_irq(lpriv->vectors[local_id].irq); + } + + chained_irq_exit(chip, desc); +} + +static int imsic_starting_cpu(unsigned int cpu) +{ + /* Mark per-CPU IMSIC state as online */ + imsic_state_online(); + + /* Enable per-CPU parent interrupt */ + enable_percpu_irq(imsic_parent_irq, irq_get_trigger_type(imsic_parent_irq)); + + /* Setup IPIs */ + imsic_ipi_starting_cpu(); + + /* + * Interrupts identities might have been enabled/disabled while + * this CPU was not running so sync-up local enable/disable state. + */ + imsic_local_sync_all(true); + + /* Enable local interrupt delivery */ + imsic_local_delivery(true); + + return 0; +} + +static int imsic_dying_cpu(unsigned int cpu) +{ + /* Cleanup IPIs */ + imsic_ipi_dying_cpu(); + + /* Mark per-CPU IMSIC state as offline */ + imsic_state_offline(); + + return 0; +} + +static int __init imsic_early_probe(struct fwnode_handle *fwnode) +{ + struct irq_domain *domain; + int rc; + + /* Find parent domain and register chained handler */ + domain = irq_find_matching_fwnode(riscv_get_intc_hwnode(), DOMAIN_BUS_ANY); + if (!domain) { + pr_err("%pfwP: Failed to find INTC domain\n", fwnode); + return -ENOENT; + } + imsic_parent_irq = irq_create_mapping(domain, RV_IRQ_EXT); + if (!imsic_parent_irq) { + pr_err("%pfwP: Failed to create INTC mapping\n", fwnode); + return -ENOENT; + } + + /* Initialize IPI domain */ + rc = imsic_ipi_domain_init(); + if (rc) { + pr_err("%pfwP: Failed to initialize IPI domain\n", fwnode); + return rc; + } + + /* Setup chained handler to the parent domain interrupt */ + irq_set_chained_handler(imsic_parent_irq, imsic_handle_irq); + + /* + * Setup cpuhp state (must be done after setting imsic_parent_irq) + * + * Don't disable per-CPU IMSIC file when CPU goes offline + * because this affects IPI and the masking/unmasking of + * virtual IPIs is done via generic IPI-Mux + */ + cpuhp_setup_state(CPUHP_AP_IRQ_RISCV_IMSIC_STARTING, "irqchip/riscv/imsic:starting", + imsic_starting_cpu, imsic_dying_cpu); + + return 0; +} + +static int __init imsic_early_dt_init(struct device_node *node, struct device_node *parent) +{ + struct fwnode_handle *fwnode = &node->fwnode; + int rc; + + /* Setup IMSIC state */ + rc = imsic_setup_state(fwnode, NULL); + if (rc) { + pr_err("%pfwP: failed to setup state (error %d)\n", fwnode, rc); + return rc; + } + + /* Do early setup of IPIs */ + rc = imsic_early_probe(fwnode); + if (rc) + return rc; + + /* Ensure that OF platform device gets probed */ + of_node_clear_flag(node, OF_POPULATED); + return 0; +} + +IRQCHIP_DECLARE(riscv_imsic, "riscv,imsics", imsic_early_dt_init); + +#ifdef CONFIG_ACPI + +static struct fwnode_handle *imsic_acpi_fwnode; + +struct fwnode_handle *imsic_acpi_get_fwnode(struct device *dev) +{ + return imsic_acpi_fwnode; +} +EXPORT_SYMBOL_GPL(imsic_acpi_get_fwnode); + +static int __init imsic_early_acpi_init(union acpi_subtable_headers *header, + const unsigned long end) +{ + struct acpi_madt_imsic *imsic = (struct acpi_madt_imsic *)header; + int rc; + + imsic_acpi_fwnode = irq_domain_alloc_named_fwnode("imsic"); + if (!imsic_acpi_fwnode) { + pr_err("unable to allocate IMSIC FW node\n"); + return -ENOMEM; + } + + /* Setup IMSIC state */ + rc = imsic_setup_state(imsic_acpi_fwnode, imsic); + if (rc) { + pr_err("%pfwP: failed to setup state (error %d)\n", imsic_acpi_fwnode, rc); + return rc; + } + + /* Do early setup of IMSIC state and IPIs */ + rc = imsic_early_probe(imsic_acpi_fwnode); + if (rc) { + irq_domain_free_fwnode(imsic_acpi_fwnode); + imsic_acpi_fwnode = NULL; + return rc; + } + + rc = imsic_platform_acpi_probe(imsic_acpi_fwnode); + +#ifdef CONFIG_PCI + if (!rc) + pci_msi_register_fwnode_provider(&imsic_acpi_get_fwnode); +#endif + + if (rc) + pr_err("%pfwP: failed to register IMSIC for MSI functionality (error %d)\n", + imsic_acpi_fwnode, rc); + + /* + * Even if imsic_platform_acpi_probe() fails, the IPI part of IMSIC can + * continue to work. So, no need to return failure. This is similar to + * DT where IPI works but MSI probe fails for some reason. + */ + return 0; +} + +IRQCHIP_ACPI_DECLARE(riscv_imsic, ACPI_MADT_TYPE_IMSIC, NULL, + 1, imsic_early_acpi_init); +#endif diff --git a/drivers/irqchip/irq-riscv-imsic-platform.c b/drivers/irqchip/irq-riscv-imsic-platform.c new file mode 100644 index 000000000000..7228a33f6c37 --- /dev/null +++ b/drivers/irqchip/irq-riscv-imsic-platform.c @@ -0,0 +1,390 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2021 Western Digital Corporation or its affiliates. + * Copyright (C) 2022 Ventana Micro Systems Inc. + */ + +#define pr_fmt(fmt) "riscv-imsic: " fmt +#include <linux/acpi.h> +#include <linux/bitmap.h> +#include <linux/cpu.h> +#include <linux/interrupt.h> +#include <linux/io.h> +#include <linux/irq.h> +#include <linux/irqchip.h> +#include <linux/irqdomain.h> +#include <linux/module.h> +#include <linux/msi.h> +#include <linux/pci.h> +#include <linux/platform_device.h> +#include <linux/spinlock.h> +#include <linux/smp.h> + +#include <linux/irqchip/irq-msi-lib.h> +#include "irq-riscv-imsic-state.h" + +static bool imsic_cpu_page_phys(unsigned int cpu, unsigned int guest_index, + phys_addr_t *out_msi_pa) +{ + struct imsic_global_config *global; + struct imsic_local_config *local; + + global = &imsic->global; + local = per_cpu_ptr(global->local, cpu); + + if (BIT(global->guest_index_bits) <= guest_index) + return false; + + if (out_msi_pa) + *out_msi_pa = local->msi_pa + (guest_index * IMSIC_MMIO_PAGE_SZ); + + return true; +} + +static void imsic_irq_mask(struct irq_data *d) +{ + imsic_vector_mask(irq_data_get_irq_chip_data(d)); +} + +static void imsic_irq_unmask(struct irq_data *d) +{ + imsic_vector_unmask(irq_data_get_irq_chip_data(d)); +} + +static int imsic_irq_retrigger(struct irq_data *d) +{ + struct imsic_vector *vec = irq_data_get_irq_chip_data(d); + struct imsic_local_config *local; + + if (WARN_ON(!vec)) + return -ENOENT; + + local = per_cpu_ptr(imsic->global.local, vec->cpu); + writel_relaxed(vec->local_id, local->msi_va); + return 0; +} + +static void imsic_irq_ack(struct irq_data *d) +{ + irq_move_irq(d); +} + +static void imsic_irq_compose_vector_msg(struct imsic_vector *vec, struct msi_msg *msg) +{ + phys_addr_t msi_addr; + + if (WARN_ON(!vec)) + return; + + if (WARN_ON(!imsic_cpu_page_phys(vec->cpu, 0, &msi_addr))) + return; + + msg->address_hi = upper_32_bits(msi_addr); + msg->address_lo = lower_32_bits(msi_addr); + msg->data = vec->local_id; +} + +static void imsic_irq_compose_msg(struct irq_data *d, struct msi_msg *msg) +{ + imsic_irq_compose_vector_msg(irq_data_get_irq_chip_data(d), msg); +} + +#ifdef CONFIG_SMP +static void imsic_msi_update_msg(struct irq_data *d, struct imsic_vector *vec) +{ + struct msi_msg msg = { }; + + imsic_irq_compose_vector_msg(vec, &msg); + irq_data_get_irq_chip(d)->irq_write_msi_msg(d, &msg); +} + +static int imsic_irq_set_affinity(struct irq_data *d, const struct cpumask *mask_val, + bool force) +{ + struct imsic_vector *old_vec, *new_vec; + struct imsic_vector tmp_vec; + + /* + * Requirements for the downstream irqdomains (or devices): + * + * 1) Downstream irqdomains (or devices) with atomic MSI update can + * happily do imsic_irq_set_affinity() in the process-context on + * any CPU so the irqchip of such irqdomains must not set the + * IRQCHIP_MOVE_DEFERRED flag. + * + * 2) Downstream irqdomains (or devices) with non-atomic MSI update + * must use imsic_irq_set_affinity() in nterrupt-context upon + * the next device interrupt so the irqchip of such irqdomains + * must set the IRQCHIP_MOVE_DEFERRED flag. + */ + + old_vec = irq_data_get_irq_chip_data(d); + if (WARN_ON(!old_vec)) + return -ENOENT; + + /* If old vector cpu belongs to the target cpumask then do nothing */ + if (cpumask_test_cpu(old_vec->cpu, mask_val)) + return IRQ_SET_MASK_OK_DONE; + + /* If move is already in-flight then return failure */ + if (imsic_vector_get_move(old_vec)) + return -EBUSY; + + /* Get a new vector on the desired set of CPUs */ + new_vec = imsic_vector_alloc(old_vec->irq, mask_val); + if (!new_vec) + return -ENOSPC; + + /* + * Device having non-atomic MSI update might see an intermediate + * state when changing target IMSIC vector from one CPU to another. + * + * To avoid losing interrupt to such intermediate state, do the + * following (just like x86 APIC): + * + * 1) First write a temporary IMSIC vector to the device which + * has MSI address same as the old IMSIC vector but MSI data + * matches the new IMSIC vector. + * + * 2) Next write the new IMSIC vector to the device. + * + * Based on the above, __imsic_local_sync() must check pending + * status of both old MSI data and new MSI data on the old CPU. + */ + if (!irq_can_move_in_process_context(d) && + new_vec->local_id != old_vec->local_id) { + /* Setup temporary vector */ + tmp_vec.cpu = old_vec->cpu; + tmp_vec.local_id = new_vec->local_id; + + /* Point device to the temporary vector */ + imsic_msi_update_msg(d, &tmp_vec); + } + + /* Point device to the new vector */ + imsic_msi_update_msg(d, new_vec); + + /* Update irq descriptors with the new vector */ + d->chip_data = new_vec; + + /* Update effective affinity */ + irq_data_update_effective_affinity(d, cpumask_of(new_vec->cpu)); + + /* Move state of the old vector to the new vector */ + imsic_vector_move(old_vec, new_vec); + + return IRQ_SET_MASK_OK_DONE; +} + +static void imsic_irq_force_complete_move(struct irq_data *d) +{ + struct imsic_vector *mvec, *vec = irq_data_get_irq_chip_data(d); + unsigned int cpu = smp_processor_id(); + + if (WARN_ON(!vec)) + return; + + /* Do nothing if there is no in-flight move */ + mvec = imsic_vector_get_move(vec); + if (!mvec) + return; + + /* Do nothing if the old IMSIC vector does not belong to current CPU */ + if (mvec->cpu != cpu) + return; + + /* + * The best we can do is force cleanup the old IMSIC vector. + * + * The challenges over here are same as x86 vector domain so + * refer to the comments in irq_force_complete_move() function + * implemented at arch/x86/kernel/apic/vector.c. + */ + + /* Force cleanup in-flight move */ + pr_info("IRQ fixup: irq %d move in progress, old vector cpu %d local_id %d\n", + d->irq, mvec->cpu, mvec->local_id); + imsic_vector_force_move_cleanup(vec); +} +#endif + +static struct irq_chip imsic_irq_base_chip = { + .name = "IMSIC", + .irq_mask = imsic_irq_mask, + .irq_unmask = imsic_irq_unmask, +#ifdef CONFIG_SMP + .irq_set_affinity = imsic_irq_set_affinity, + .irq_force_complete_move = imsic_irq_force_complete_move, +#endif + .irq_retrigger = imsic_irq_retrigger, + .irq_ack = imsic_irq_ack, + .irq_compose_msi_msg = imsic_irq_compose_msg, + .flags = IRQCHIP_SKIP_SET_WAKE | IRQCHIP_MASK_ON_SUSPEND, +}; + +static int imsic_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, + unsigned int nr_irqs, void *args) +{ + struct imsic_vector *vec; + + /* Multi-MSI is not supported yet. */ + if (nr_irqs > 1) + return -EOPNOTSUPP; + + vec = imsic_vector_alloc(virq, cpu_online_mask); + if (!vec) + return -ENOSPC; + + irq_domain_set_info(domain, virq, virq, &imsic_irq_base_chip, vec, + handle_edge_irq, NULL, NULL); + irq_set_noprobe(virq); + irq_set_affinity(virq, cpu_online_mask); + irq_data_update_effective_affinity(irq_get_irq_data(virq), cpumask_of(vec->cpu)); + + return 0; +} + +static void imsic_irq_domain_free(struct irq_domain *domain, unsigned int virq, + unsigned int nr_irqs) +{ + struct irq_data *d = irq_domain_get_irq_data(domain, virq); + + imsic_vector_free(irq_data_get_irq_chip_data(d)); + irq_domain_free_irqs_parent(domain, virq, nr_irqs); +} + +#ifdef CONFIG_GENERIC_IRQ_DEBUGFS +static void imsic_irq_debug_show(struct seq_file *m, struct irq_domain *d, + struct irq_data *irqd, int ind) +{ + if (!irqd) { + imsic_vector_debug_show_summary(m, ind); + return; + } + + imsic_vector_debug_show(m, irq_data_get_irq_chip_data(irqd), ind); +} +#endif + +static const struct irq_domain_ops imsic_base_domain_ops = { + .alloc = imsic_irq_domain_alloc, + .free = imsic_irq_domain_free, + .select = msi_lib_irq_domain_select, +#ifdef CONFIG_GENERIC_IRQ_DEBUGFS + .debug_show = imsic_irq_debug_show, +#endif +}; + +static bool imsic_init_dev_msi_info(struct device *dev, struct irq_domain *domain, + struct irq_domain *real_parent, struct msi_domain_info *info) +{ + if (!msi_lib_init_dev_msi_info(dev, domain, real_parent, info)) + return false; + + switch (info->bus_token) { + case DOMAIN_BUS_PCI_DEVICE_MSI: + case DOMAIN_BUS_PCI_DEVICE_MSIX: + info->chip->flags |= IRQCHIP_MOVE_DEFERRED; + break; + default: + break; + } + + return true; +} + +static const struct msi_parent_ops imsic_msi_parent_ops = { + .supported_flags = MSI_GENERIC_FLAGS_MASK | + MSI_FLAG_PCI_MSIX, + .required_flags = MSI_FLAG_USE_DEF_DOM_OPS | + MSI_FLAG_USE_DEF_CHIP_OPS | + MSI_FLAG_PCI_MSI_MASK_PARENT, + .chip_flags = MSI_CHIP_FLAG_SET_ACK, + .bus_select_token = DOMAIN_BUS_NEXUS, + .bus_select_mask = MATCH_PCI_MSI | MATCH_PLATFORM_MSI, + .init_dev_msi_info = imsic_init_dev_msi_info, +}; + +int imsic_irqdomain_init(void) +{ + struct irq_domain_info info = { + .ops = &imsic_base_domain_ops, + .host_data = imsic, + }; + struct imsic_global_config *global; + + if (!imsic || !imsic->fwnode) { + pr_err("early driver not probed\n"); + return -ENODEV; + } + + if (imsic->base_domain) { + pr_err("%pfwP: irq domain already created\n", imsic->fwnode); + return -ENODEV; + } + + /* Create Base IRQ domain */ + info.fwnode = imsic->fwnode, + imsic->base_domain = msi_create_parent_irq_domain(&info, &imsic_msi_parent_ops); + if (!imsic->base_domain) { + pr_err("%pfwP: failed to create IMSIC base domain\n", imsic->fwnode); + return -ENOMEM; + } + + global = &imsic->global; + pr_info("%pfwP: hart-index-bits: %d, guest-index-bits: %d\n", + imsic->fwnode, global->hart_index_bits, global->guest_index_bits); + pr_info("%pfwP: group-index-bits: %d, group-index-shift: %d\n", + imsic->fwnode, global->group_index_bits, global->group_index_shift); + pr_info("%pfwP: per-CPU IDs %d at base address %pa\n", + imsic->fwnode, global->nr_ids, &global->base_addr); + pr_info("%pfwP: total %d interrupts available\n", + imsic->fwnode, num_possible_cpus() * (global->nr_ids - 1)); + + return 0; +} + +static int imsic_platform_probe_common(struct fwnode_handle *fwnode) +{ + if (imsic && imsic->fwnode != fwnode) { + pr_err("%pfwP: fwnode mismatch\n", fwnode); + return -ENODEV; + } + + return imsic_irqdomain_init(); +} + +static int imsic_platform_dt_probe(struct platform_device *pdev) +{ + return imsic_platform_probe_common(pdev->dev.fwnode); +} + +#ifdef CONFIG_ACPI + +/* + * On ACPI based systems, PCI enumeration happens early during boot in + * acpi_scan_init(). PCI enumeration expects MSI domain setup before + * it calls pci_set_msi_domain(). Hence, unlike in DT where + * imsic-platform drive probe happens late during boot, ACPI based + * systems need to setup the MSI domain early. + */ +int imsic_platform_acpi_probe(struct fwnode_handle *fwnode) +{ + return imsic_platform_probe_common(fwnode); +} + +#endif + +static const struct of_device_id imsic_platform_match[] = { + { .compatible = "riscv,imsics" }, + {} +}; + +static struct platform_driver imsic_platform_driver = { + .driver = { + .name = "riscv-imsic", + .of_match_table = imsic_platform_match, + }, + .probe = imsic_platform_dt_probe, +}; +builtin_platform_driver(imsic_platform_driver); diff --git a/drivers/irqchip/irq-riscv-imsic-state.c b/drivers/irqchip/irq-riscv-imsic-state.c new file mode 100644 index 000000000000..385368052d5c --- /dev/null +++ b/drivers/irqchip/irq-riscv-imsic-state.c @@ -0,0 +1,953 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2021 Western Digital Corporation or its affiliates. + * Copyright (C) 2022 Ventana Micro Systems Inc. + */ + +#define pr_fmt(fmt) "riscv-imsic: " fmt +#include <linux/acpi.h> +#include <linux/cpu.h> +#include <linux/bitmap.h> +#include <linux/interrupt.h> +#include <linux/irq.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/of_address.h> +#include <linux/of_irq.h> +#include <linux/seq_file.h> +#include <linux/spinlock.h> +#include <linux/smp.h> +#include <asm/hwcap.h> + +#include "irq-riscv-imsic-state.h" + +#define IMSIC_DISABLE_EIDELIVERY 0 +#define IMSIC_ENABLE_EIDELIVERY 1 +#define IMSIC_DISABLE_EITHRESHOLD 1 +#define IMSIC_ENABLE_EITHRESHOLD 0 + +static inline void imsic_csr_write(unsigned long reg, unsigned long val) +{ + csr_write(CSR_ISELECT, reg); + csr_write(CSR_IREG, val); +} + +static inline unsigned long imsic_csr_read(unsigned long reg) +{ + csr_write(CSR_ISELECT, reg); + return csr_read(CSR_IREG); +} + +static inline unsigned long imsic_csr_read_clear(unsigned long reg, unsigned long val) +{ + csr_write(CSR_ISELECT, reg); + return csr_read_clear(CSR_IREG, val); +} + +static inline void imsic_csr_set(unsigned long reg, unsigned long val) +{ + csr_write(CSR_ISELECT, reg); + csr_set(CSR_IREG, val); +} + +static inline void imsic_csr_clear(unsigned long reg, unsigned long val) +{ + csr_write(CSR_ISELECT, reg); + csr_clear(CSR_IREG, val); +} + +struct imsic_priv *imsic; + +const struct imsic_global_config *imsic_get_global_config(void) +{ + return imsic ? &imsic->global : NULL; +} +EXPORT_SYMBOL_GPL(imsic_get_global_config); + +static bool __imsic_eix_read_clear(unsigned long id, bool pend) +{ + unsigned long isel, imask; + + isel = id / BITS_PER_LONG; + isel *= BITS_PER_LONG / IMSIC_EIPx_BITS; + isel += pend ? IMSIC_EIP0 : IMSIC_EIE0; + imask = BIT(id & (__riscv_xlen - 1)); + + return !!(imsic_csr_read_clear(isel, imask) & imask); +} + +static inline bool __imsic_id_read_clear_enabled(unsigned long id) +{ + return __imsic_eix_read_clear(id, false); +} + +static inline bool __imsic_id_read_clear_pending(unsigned long id) +{ + return __imsic_eix_read_clear(id, true); +} + +void __imsic_eix_update(unsigned long base_id, unsigned long num_id, bool pend, bool val) +{ + unsigned long id = base_id, last_id = base_id + num_id; + unsigned long i, isel, ireg; + + while (id < last_id) { + isel = id / BITS_PER_LONG; + isel *= BITS_PER_LONG / IMSIC_EIPx_BITS; + isel += pend ? IMSIC_EIP0 : IMSIC_EIE0; + + /* + * Prepare the ID mask to be programmed in the + * IMSIC EIEx and EIPx registers. These registers + * are XLEN-wide and we must not touch IDs which + * are < base_id and >= (base_id + num_id). + */ + ireg = 0; + for (i = id & (__riscv_xlen - 1); id < last_id && i < __riscv_xlen; i++) { + ireg |= BIT(i); + id++; + } + + /* + * The IMSIC EIEx and EIPx registers are indirectly + * accessed via using ISELECT and IREG CSRs so we + * need to access these CSRs without getting preempted. + * + * All existing users of this function call this + * function with local IRQs disabled so we don't + * need to do anything special here. + */ + if (val) + imsic_csr_set(isel, ireg); + else + imsic_csr_clear(isel, ireg); + } +} + +static bool __imsic_local_sync(struct imsic_local_priv *lpriv) +{ + struct imsic_local_config *tlocal, *mlocal; + struct imsic_vector *vec, *tvec, *mvec; + bool ret = true; + int i; + + lockdep_assert_held(&lpriv->lock); + + for_each_set_bit(i, lpriv->dirty_bitmap, imsic->global.nr_ids + 1) { + if (!i || (!imsic_noipi && i == IMSIC_IPI_ID)) + goto skip; + vec = &lpriv->vectors[i]; + + if (READ_ONCE(vec->enable)) + __imsic_id_set_enable(i); + else + __imsic_id_clear_enable(i); + + /* + * Clear the previous vector pointer of the new vector only + * after the movement is complete on the old CPU. + */ + mvec = READ_ONCE(vec->move_prev); + if (mvec) { + /* + * If the old vector has not been updated then + * try again in the next sync-up call. + */ + if (READ_ONCE(mvec->move_next)) { + ret = false; + continue; + } + + WRITE_ONCE(vec->move_prev, NULL); + } + + /* + * If a vector was being moved to a new vector on some other + * CPU then we can get a MSI during the movement so check the + * ID pending bit and re-trigger the new ID on other CPU using + * MMIO write. + */ + mvec = READ_ONCE(vec->move_next); + if (mvec) { + /* + * Devices having non-atomic MSI update might see + * an intermediate state so check both old ID and + * new ID for pending interrupts. + * + * For details, see imsic_irq_set_affinity(). + */ + tvec = vec->local_id == mvec->local_id ? + NULL : &lpriv->vectors[mvec->local_id]; + + if (tvec && !irq_can_move_in_process_context(irq_get_irq_data(vec->irq)) && + __imsic_id_read_clear_pending(tvec->local_id)) { + /* Retrigger temporary vector if it was already in-use */ + if (READ_ONCE(tvec->enable)) { + tlocal = per_cpu_ptr(imsic->global.local, tvec->cpu); + writel_relaxed(tvec->local_id, tlocal->msi_va); + } + + mlocal = per_cpu_ptr(imsic->global.local, mvec->cpu); + writel_relaxed(mvec->local_id, mlocal->msi_va); + } + + if (__imsic_id_read_clear_pending(vec->local_id)) { + mlocal = per_cpu_ptr(imsic->global.local, mvec->cpu); + writel_relaxed(mvec->local_id, mlocal->msi_va); + } + + WRITE_ONCE(vec->move_next, NULL); + imsic_vector_free(vec); + } + +skip: + bitmap_clear(lpriv->dirty_bitmap, i, 1); + } + + return ret; +} + +#ifdef CONFIG_SMP +static void __imsic_local_timer_start(struct imsic_local_priv *lpriv, unsigned int cpu) +{ + lockdep_assert_held(&lpriv->lock); + + if (!timer_pending(&lpriv->timer)) { + lpriv->timer.expires = jiffies + 1; + add_timer_on(&lpriv->timer, cpu); + } +} +#else +static inline void __imsic_local_timer_start(struct imsic_local_priv *lpriv, unsigned int cpu) +{ +} +#endif + +void imsic_local_sync_all(bool force_all) +{ + struct imsic_local_priv *lpriv = this_cpu_ptr(imsic->lpriv); + unsigned long flags; + + raw_spin_lock_irqsave(&lpriv->lock, flags); + + if (force_all) + bitmap_fill(lpriv->dirty_bitmap, imsic->global.nr_ids + 1); + if (!__imsic_local_sync(lpriv)) + __imsic_local_timer_start(lpriv, smp_processor_id()); + + raw_spin_unlock_irqrestore(&lpriv->lock, flags); +} + +void imsic_local_delivery(bool enable) +{ + if (enable) { + imsic_csr_write(IMSIC_EITHRESHOLD, IMSIC_ENABLE_EITHRESHOLD); + imsic_csr_write(IMSIC_EIDELIVERY, IMSIC_ENABLE_EIDELIVERY); + return; + } + + imsic_csr_write(IMSIC_EIDELIVERY, IMSIC_DISABLE_EIDELIVERY); + imsic_csr_write(IMSIC_EITHRESHOLD, IMSIC_DISABLE_EITHRESHOLD); +} + +#ifdef CONFIG_SMP +static void imsic_local_timer_callback(struct timer_list *timer) +{ + imsic_local_sync_all(false); +} + +static void __imsic_remote_sync(struct imsic_local_priv *lpriv, unsigned int cpu) +{ + lockdep_assert_held(&lpriv->lock); + + /* + * The spinlock acquire/release semantics ensure that changes + * to vector enable, vector move and dirty bitmap are visible + * to the target CPU. + */ + + /* + * We schedule a timer on the target CPU if the target CPU is not + * same as the current CPU. An offline CPU will unconditionally + * synchronize IDs through imsic_starting_cpu() when the + * CPU is brought up. + */ + if (cpu_online(cpu)) { + if (cpu == smp_processor_id()) { + if (__imsic_local_sync(lpriv)) + return; + } + + __imsic_local_timer_start(lpriv, cpu); + } +} +#else +static void __imsic_remote_sync(struct imsic_local_priv *lpriv, unsigned int cpu) +{ + lockdep_assert_held(&lpriv->lock); + __imsic_local_sync(lpriv); +} +#endif + +void imsic_vector_mask(struct imsic_vector *vec) +{ + struct imsic_local_priv *lpriv; + + lpriv = per_cpu_ptr(imsic->lpriv, vec->cpu); + if (WARN_ON_ONCE(&lpriv->vectors[vec->local_id] != vec)) + return; + + /* + * This function is called through Linux irq subsystem with + * irqs disabled so no need to save/restore irq flags. + */ + + raw_spin_lock(&lpriv->lock); + + WRITE_ONCE(vec->enable, false); + bitmap_set(lpriv->dirty_bitmap, vec->local_id, 1); + __imsic_remote_sync(lpriv, vec->cpu); + + raw_spin_unlock(&lpriv->lock); +} + +void imsic_vector_unmask(struct imsic_vector *vec) +{ + struct imsic_local_priv *lpriv; + + lpriv = per_cpu_ptr(imsic->lpriv, vec->cpu); + if (WARN_ON_ONCE(&lpriv->vectors[vec->local_id] != vec)) + return; + + /* + * This function is called through Linux irq subsystem with + * irqs disabled so no need to save/restore irq flags. + */ + + raw_spin_lock(&lpriv->lock); + + WRITE_ONCE(vec->enable, true); + bitmap_set(lpriv->dirty_bitmap, vec->local_id, 1); + __imsic_remote_sync(lpriv, vec->cpu); + + raw_spin_unlock(&lpriv->lock); +} + +void imsic_vector_force_move_cleanup(struct imsic_vector *vec) +{ + struct imsic_local_priv *lpriv; + struct imsic_vector *mvec; + unsigned long flags; + + lpriv = per_cpu_ptr(imsic->lpriv, vec->cpu); + raw_spin_lock_irqsave(&lpriv->lock, flags); + + mvec = READ_ONCE(vec->move_prev); + WRITE_ONCE(vec->move_prev, NULL); + if (mvec) + imsic_vector_free(mvec); + + raw_spin_unlock_irqrestore(&lpriv->lock, flags); +} + +static bool imsic_vector_move_update(struct imsic_local_priv *lpriv, + struct imsic_vector *vec, bool is_old_vec, + bool new_enable, struct imsic_vector *move_vec) +{ + unsigned long flags; + bool enabled; + + raw_spin_lock_irqsave(&lpriv->lock, flags); + + /* Update enable and move details */ + enabled = READ_ONCE(vec->enable); + WRITE_ONCE(vec->enable, new_enable); + if (is_old_vec) + WRITE_ONCE(vec->move_next, move_vec); + else + WRITE_ONCE(vec->move_prev, move_vec); + + /* Mark the vector as dirty and synchronize */ + bitmap_set(lpriv->dirty_bitmap, vec->local_id, 1); + __imsic_remote_sync(lpriv, vec->cpu); + + raw_spin_unlock_irqrestore(&lpriv->lock, flags); + + return enabled; +} + +void imsic_vector_move(struct imsic_vector *old_vec, struct imsic_vector *new_vec) +{ + struct imsic_local_priv *old_lpriv, *new_lpriv; + bool enabled; + + if (WARN_ON_ONCE(old_vec->cpu == new_vec->cpu)) + return; + + old_lpriv = per_cpu_ptr(imsic->lpriv, old_vec->cpu); + if (WARN_ON_ONCE(&old_lpriv->vectors[old_vec->local_id] != old_vec)) + return; + + new_lpriv = per_cpu_ptr(imsic->lpriv, new_vec->cpu); + if (WARN_ON_ONCE(&new_lpriv->vectors[new_vec->local_id] != new_vec)) + return; + + /* + * Move and re-trigger the new vector based on the pending + * state of the old vector because we might get a device + * interrupt on the old vector while device was being moved + * to the new vector. + */ + enabled = imsic_vector_move_update(old_lpriv, old_vec, true, false, new_vec); + imsic_vector_move_update(new_lpriv, new_vec, false, enabled, old_vec); +} + +#ifdef CONFIG_GENERIC_IRQ_DEBUGFS +void imsic_vector_debug_show(struct seq_file *m, struct imsic_vector *vec, int ind) +{ + struct imsic_local_priv *lpriv; + struct imsic_vector *mvec; + bool is_enabled; + + lpriv = per_cpu_ptr(imsic->lpriv, vec->cpu); + if (WARN_ON_ONCE(&lpriv->vectors[vec->local_id] != vec)) + return; + + is_enabled = imsic_vector_isenabled(vec); + mvec = imsic_vector_get_move(vec); + + seq_printf(m, "%*starget_cpu : %5u\n", ind, "", vec->cpu); + seq_printf(m, "%*starget_local_id : %5u\n", ind, "", vec->local_id); + seq_printf(m, "%*sis_reserved : %5u\n", ind, "", + (!imsic_noipi && vec->local_id <= IMSIC_IPI_ID) ? 1 : 0); + seq_printf(m, "%*sis_enabled : %5u\n", ind, "", is_enabled ? 1 : 0); + seq_printf(m, "%*sis_move_pending : %5u\n", ind, "", mvec ? 1 : 0); + if (mvec) { + seq_printf(m, "%*smove_cpu : %5u\n", ind, "", mvec->cpu); + seq_printf(m, "%*smove_local_id : %5u\n", ind, "", mvec->local_id); + } +} + +void imsic_vector_debug_show_summary(struct seq_file *m, int ind) +{ + irq_matrix_debug_show(m, imsic->matrix, ind); +} +#endif + +struct imsic_vector *imsic_vector_alloc(unsigned int irq, const struct cpumask *mask) +{ + struct imsic_vector *vec = NULL; + struct imsic_local_priv *lpriv; + unsigned long flags; + unsigned int cpu; + int local_id; + + raw_spin_lock_irqsave(&imsic->matrix_lock, flags); + local_id = irq_matrix_alloc(imsic->matrix, mask, false, &cpu); + raw_spin_unlock_irqrestore(&imsic->matrix_lock, flags); + if (local_id < 0) + return NULL; + + lpriv = per_cpu_ptr(imsic->lpriv, cpu); + vec = &lpriv->vectors[local_id]; + vec->irq = irq; + vec->enable = false; + vec->move_next = NULL; + vec->move_prev = NULL; + + return vec; +} + +void imsic_vector_free(struct imsic_vector *vec) +{ + unsigned long flags; + + raw_spin_lock_irqsave(&imsic->matrix_lock, flags); + vec->irq = 0; + irq_matrix_free(imsic->matrix, vec->cpu, vec->local_id, false); + raw_spin_unlock_irqrestore(&imsic->matrix_lock, flags); +} + +static void __init imsic_local_cleanup(void) +{ + struct imsic_local_priv *lpriv; + int cpu; + + for_each_possible_cpu(cpu) { + lpriv = per_cpu_ptr(imsic->lpriv, cpu); + + bitmap_free(lpriv->dirty_bitmap); + } + + free_percpu(imsic->lpriv); +} + +static int __init imsic_local_init(void) +{ + struct imsic_global_config *global = &imsic->global; + struct imsic_local_priv *lpriv; + struct imsic_vector *vec; + int cpu, i; + + /* Allocate per-CPU private state */ + imsic->lpriv = __alloc_percpu(struct_size(imsic->lpriv, vectors, global->nr_ids + 1), + __alignof__(*imsic->lpriv)); + if (!imsic->lpriv) + return -ENOMEM; + + /* Setup per-CPU private state */ + for_each_possible_cpu(cpu) { + lpriv = per_cpu_ptr(imsic->lpriv, cpu); + + raw_spin_lock_init(&lpriv->lock); + + /* Allocate dirty bitmap */ + lpriv->dirty_bitmap = bitmap_zalloc(global->nr_ids + 1, GFP_KERNEL); + if (!lpriv->dirty_bitmap) + goto fail_local_cleanup; + +#ifdef CONFIG_SMP + /* Setup lazy timer for synchronization */ + timer_setup(&lpriv->timer, imsic_local_timer_callback, TIMER_PINNED); +#endif + + /* Setup vector array */ + for (i = 0; i <= global->nr_ids; i++) { + vec = &lpriv->vectors[i]; + vec->cpu = cpu; + vec->local_id = i; + vec->irq = 0; + } + } + + return 0; + +fail_local_cleanup: + imsic_local_cleanup(); + return -ENOMEM; +} + +void imsic_state_online(void) +{ + unsigned long flags; + + raw_spin_lock_irqsave(&imsic->matrix_lock, flags); + irq_matrix_online(imsic->matrix); + raw_spin_unlock_irqrestore(&imsic->matrix_lock, flags); +} + +void imsic_state_offline(void) +{ + unsigned long flags; + + raw_spin_lock_irqsave(&imsic->matrix_lock, flags); + irq_matrix_offline(imsic->matrix); + raw_spin_unlock_irqrestore(&imsic->matrix_lock, flags); + +#ifdef CONFIG_SMP + struct imsic_local_priv *lpriv = this_cpu_ptr(imsic->lpriv); + + raw_spin_lock_irqsave(&lpriv->lock, flags); + WARN_ON_ONCE(timer_delete_sync_try(&lpriv->timer) < 0); + raw_spin_unlock_irqrestore(&lpriv->lock, flags); +#endif +} + +static int __init imsic_matrix_init(void) +{ + struct imsic_global_config *global = &imsic->global; + + raw_spin_lock_init(&imsic->matrix_lock); + imsic->matrix = irq_alloc_matrix(global->nr_ids + 1, + 0, global->nr_ids + 1); + if (!imsic->matrix) + return -ENOMEM; + + /* Reserve ID#0 because it is special and never implemented */ + irq_matrix_assign_system(imsic->matrix, 0, false); + + /* Reserve IPI ID because it is special and used internally */ + if (!imsic_noipi) + irq_matrix_assign_system(imsic->matrix, IMSIC_IPI_ID, false); + + return 0; +} + +static int __init imsic_populate_global_dt(struct fwnode_handle *fwnode, + struct imsic_global_config *global, + u32 *nr_parent_irqs) +{ + int rc; + + /* Find number of guest index bits in MSI address */ + rc = of_property_read_u32(to_of_node(fwnode), "riscv,guest-index-bits", + &global->guest_index_bits); + if (rc) + global->guest_index_bits = 0; + + /* Find number of HART index bits */ + rc = of_property_read_u32(to_of_node(fwnode), "riscv,hart-index-bits", + &global->hart_index_bits); + if (rc) { + /* Assume default value */ + global->hart_index_bits = __fls(*nr_parent_irqs); + if (BIT(global->hart_index_bits) < *nr_parent_irqs) + global->hart_index_bits++; + } + + /* Find number of group index bits */ + rc = of_property_read_u32(to_of_node(fwnode), "riscv,group-index-bits", + &global->group_index_bits); + if (rc) + global->group_index_bits = 0; + + /* + * Find first bit position of group index. + * If not specified assumed the default APLIC-IMSIC configuration. + */ + rc = of_property_read_u32(to_of_node(fwnode), "riscv,group-index-shift", + &global->group_index_shift); + if (rc) + global->group_index_shift = IMSIC_MMIO_PAGE_SHIFT * 2; + + /* Find number of interrupt identities */ + rc = of_property_read_u32(to_of_node(fwnode), "riscv,num-ids", + &global->nr_ids); + if (rc) { + pr_err("%pfwP: number of interrupt identities not found\n", fwnode); + return rc; + } + + /* Find number of guest interrupt identities */ + rc = of_property_read_u32(to_of_node(fwnode), "riscv,num-guest-ids", + &global->nr_guest_ids); + if (rc) + global->nr_guest_ids = global->nr_ids; + + return 0; +} + +static int __init imsic_populate_global_acpi(struct fwnode_handle *fwnode, + struct imsic_global_config *global, + u32 *nr_parent_irqs, void *opaque) +{ + struct acpi_madt_imsic *imsic = (struct acpi_madt_imsic *)opaque; + + global->guest_index_bits = imsic->guest_index_bits; + global->hart_index_bits = imsic->hart_index_bits; + global->group_index_bits = imsic->group_index_bits; + global->group_index_shift = imsic->group_index_shift; + global->nr_ids = imsic->num_ids; + global->nr_guest_ids = imsic->num_guest_ids; + return 0; +} + +static int __init imsic_get_parent_hartid(struct fwnode_handle *fwnode, + u32 index, unsigned long *hartid) +{ + struct of_phandle_args parent; + int rc; + + if (!is_of_node(fwnode)) { + if (hartid) + *hartid = acpi_rintc_index_to_hartid(index); + + if (!hartid || (*hartid == INVALID_HARTID)) + return -EINVAL; + + return 0; + } + + rc = of_irq_parse_one(to_of_node(fwnode), index, &parent); + if (rc) + return rc; + + /* + * Skip interrupts other than external interrupts for + * current privilege level. + */ + if (parent.args[0] != RV_IRQ_EXT) + return -EINVAL; + + return riscv_of_parent_hartid(parent.np, hartid); +} + +static int __init imsic_get_mmio_resource(struct fwnode_handle *fwnode, + u32 index, struct resource *res) +{ + if (!is_of_node(fwnode)) + return acpi_rintc_get_imsic_mmio_info(index, res); + + return of_address_to_resource(to_of_node(fwnode), index, res); +} + +static int __init imsic_parse_fwnode(struct fwnode_handle *fwnode, + struct imsic_global_config *global, + u32 *nr_parent_irqs, + u32 *nr_mmios, + void *opaque) +{ + unsigned long hartid; + struct resource res; + int rc; + u32 i; + + *nr_parent_irqs = 0; + *nr_mmios = 0; + + /* Find number of parent interrupts */ + while (!imsic_get_parent_hartid(fwnode, *nr_parent_irqs, &hartid)) + (*nr_parent_irqs)++; + if (!*nr_parent_irqs) { + pr_err("%pfwP: no parent irqs available\n", fwnode); + return -EINVAL; + } + + if (is_of_node(fwnode)) + rc = imsic_populate_global_dt(fwnode, global, nr_parent_irqs); + else + rc = imsic_populate_global_acpi(fwnode, global, nr_parent_irqs, opaque); + + if (rc) + return rc; + + /* Sanity check guest index bits */ + i = BITS_PER_LONG - IMSIC_MMIO_PAGE_SHIFT; + if (i < global->guest_index_bits) { + pr_err("%pfwP: guest index bits too big\n", fwnode); + return -EINVAL; + } + + /* Sanity check HART index bits */ + i = BITS_PER_LONG - IMSIC_MMIO_PAGE_SHIFT - global->guest_index_bits; + if (i < global->hart_index_bits) { + pr_err("%pfwP: HART index bits too big\n", fwnode); + return -EINVAL; + } + + /* Sanity check group index bits */ + i = BITS_PER_LONG - IMSIC_MMIO_PAGE_SHIFT - + global->guest_index_bits - global->hart_index_bits; + if (i < global->group_index_bits) { + pr_err("%pfwP: group index bits too big\n", fwnode); + return -EINVAL; + } + + /* Sanity check group index shift */ + i = global->group_index_bits + global->group_index_shift - 1; + if (i >= BITS_PER_LONG) { + pr_err("%pfwP: group index shift too big\n", fwnode); + return -EINVAL; + } + + /* Sanity check number of interrupt identities */ + if (global->nr_ids < IMSIC_MIN_ID || + global->nr_ids >= IMSIC_MAX_ID || + (global->nr_ids & IMSIC_MIN_ID) != IMSIC_MIN_ID) { + pr_err("%pfwP: invalid number of interrupt identities\n", fwnode); + return -EINVAL; + } + + /* Sanity check number of guest interrupt identities */ + if (global->nr_guest_ids < IMSIC_MIN_ID || + global->nr_guest_ids >= IMSIC_MAX_ID || + (global->nr_guest_ids & IMSIC_MIN_ID) != IMSIC_MIN_ID) { + pr_err("%pfwP: invalid number of guest interrupt identities\n", fwnode); + return -EINVAL; + } + + /* Compute base address */ + rc = imsic_get_mmio_resource(fwnode, 0, &res); + if (rc) { + pr_err("%pfwP: first MMIO resource not found\n", fwnode); + return -EINVAL; + } + global->base_addr = res.start; + global->base_addr &= ~(BIT(global->guest_index_bits + + global->hart_index_bits + + IMSIC_MMIO_PAGE_SHIFT) - 1); + global->base_addr &= ~((BIT(global->group_index_bits) - 1) << + global->group_index_shift); + + /* Find number of MMIO register sets */ + while (!imsic_get_mmio_resource(fwnode, *nr_mmios, &res)) + (*nr_mmios)++; + + return 0; +} + +int __init imsic_setup_state(struct fwnode_handle *fwnode, void *opaque) +{ + u32 i, j, index, nr_parent_irqs, nr_mmios, nr_handlers = 0; + struct imsic_global_config *global; + struct imsic_local_config *local; + void __iomem **mmios_va = NULL; + struct resource *mmios = NULL; + unsigned long reloff, hartid; + phys_addr_t base_addr; + int rc, cpu; + + /* + * Only one IMSIC instance allowed in a platform for clean + * implementation of SMP IRQ affinity and per-CPU IPIs. + * + * This means on a multi-socket (or multi-die) platform we + * will have multiple MMIO regions for one IMSIC instance. + */ + if (imsic) { + pr_err("%pfwP: already initialized hence ignoring\n", fwnode); + return -EALREADY; + } + + if (!riscv_isa_extension_available(NULL, SxAIA)) { + pr_err("%pfwP: AIA support not available\n", fwnode); + return -ENODEV; + } + + imsic = kzalloc(sizeof(*imsic), GFP_KERNEL); + if (!imsic) + return -ENOMEM; + imsic->fwnode = fwnode; + global = &imsic->global; + + global->local = alloc_percpu(typeof(*global->local)); + if (!global->local) { + rc = -ENOMEM; + goto out_free_priv; + } + + /* Parse IMSIC fwnode */ + rc = imsic_parse_fwnode(fwnode, global, &nr_parent_irqs, &nr_mmios, opaque); + if (rc) + goto out_free_local; + + /* Allocate MMIO resource array */ + mmios = kcalloc(nr_mmios, sizeof(*mmios), GFP_KERNEL); + if (!mmios) { + rc = -ENOMEM; + goto out_free_local; + } + + /* Allocate MMIO virtual address array */ + mmios_va = kcalloc(nr_mmios, sizeof(*mmios_va), GFP_KERNEL); + if (!mmios_va) { + rc = -ENOMEM; + goto out_iounmap; + } + + /* Parse and map MMIO register sets */ + for (i = 0; i < nr_mmios; i++) { + rc = imsic_get_mmio_resource(fwnode, i, &mmios[i]); + if (rc) { + pr_err("%pfwP: unable to parse MMIO regset %d\n", fwnode, i); + goto out_iounmap; + } + + base_addr = mmios[i].start; + base_addr &= ~(BIT(global->guest_index_bits + + global->hart_index_bits + + IMSIC_MMIO_PAGE_SHIFT) - 1); + base_addr &= ~((BIT(global->group_index_bits) - 1) << + global->group_index_shift); + if (base_addr != global->base_addr) { + rc = -EINVAL; + pr_err("%pfwP: address mismatch for regset %d\n", fwnode, i); + goto out_iounmap; + } + + mmios_va[i] = ioremap(mmios[i].start, resource_size(&mmios[i])); + if (!mmios_va[i]) { + rc = -EIO; + pr_err("%pfwP: unable to map MMIO regset %d\n", fwnode, i); + goto out_iounmap; + } + } + + /* Initialize local (or per-CPU )state */ + rc = imsic_local_init(); + if (rc) { + pr_err("%pfwP: failed to initialize local state\n", + fwnode); + goto out_iounmap; + } + + /* Configure handlers for target CPUs */ + for (i = 0; i < nr_parent_irqs; i++) { + rc = imsic_get_parent_hartid(fwnode, i, &hartid); + if (rc) { + pr_warn("%pfwP: hart ID for parent irq%d not found\n", fwnode, i); + continue; + } + + cpu = riscv_hartid_to_cpuid(hartid); + if (cpu < 0) { + pr_warn("%pfwP: invalid cpuid for parent irq%d\n", fwnode, i); + continue; + } + + /* Find MMIO location of MSI page */ + index = nr_mmios; + reloff = i * BIT(global->guest_index_bits) * + IMSIC_MMIO_PAGE_SZ; + for (j = 0; nr_mmios; j++) { + if (reloff < resource_size(&mmios[j])) { + index = j; + break; + } + + /* + * MMIO region size may not be aligned to + * BIT(global->guest_index_bits) * IMSIC_MMIO_PAGE_SZ + * if holes are present. + */ + reloff -= ALIGN(resource_size(&mmios[j]), + BIT(global->guest_index_bits) * IMSIC_MMIO_PAGE_SZ); + } + if (index >= nr_mmios) { + pr_warn("%pfwP: MMIO not found for parent irq%d\n", fwnode, i); + continue; + } + + local = per_cpu_ptr(global->local, cpu); + local->msi_pa = mmios[index].start + reloff; + local->msi_va = mmios_va[index] + reloff; + + nr_handlers++; + } + + /* If no CPU handlers found then can't take interrupts */ + if (!nr_handlers) { + pr_err("%pfwP: No CPU handlers found\n", fwnode); + rc = -ENODEV; + goto out_local_cleanup; + } + + /* Initialize matrix allocator */ + rc = imsic_matrix_init(); + if (rc) { + pr_err("%pfwP: failed to create matrix allocator\n", fwnode); + goto out_local_cleanup; + } + + /* We don't need MMIO arrays anymore so let's free-up */ + kfree(mmios_va); + kfree(mmios); + + return 0; + +out_local_cleanup: + imsic_local_cleanup(); +out_iounmap: + for (i = 0; i < nr_mmios; i++) { + if (mmios_va[i]) + iounmap(mmios_va[i]); + } + kfree(mmios_va); + kfree(mmios); +out_free_local: + free_percpu(imsic->global.local); +out_free_priv: + kfree(imsic); + imsic = NULL; + return rc; +} diff --git a/drivers/irqchip/irq-riscv-imsic-state.h b/drivers/irqchip/irq-riscv-imsic-state.h new file mode 100644 index 000000000000..6332501dcbd8 --- /dev/null +++ b/drivers/irqchip/irq-riscv-imsic-state.h @@ -0,0 +1,109 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2021 Western Digital Corporation or its affiliates. + * Copyright (C) 2022 Ventana Micro Systems Inc. + */ + +#ifndef _IRQ_RISCV_IMSIC_STATE_H +#define _IRQ_RISCV_IMSIC_STATE_H + +#include <linux/irqchip/riscv-imsic.h> +#include <linux/irqdomain.h> +#include <linux/fwnode.h> +#include <linux/timer.h> + +#define IMSIC_IPI_ID 1 +#define IMSIC_NR_IPI 8 + +struct imsic_vector { + /* Fixed details of the vector */ + unsigned int cpu; + unsigned int local_id; + /* Details saved by driver in the vector */ + unsigned int irq; + /* Details accessed using local lock held */ + bool enable; + struct imsic_vector *move_next; + struct imsic_vector *move_prev; +}; + +struct imsic_local_priv { + /* Local lock to protect vector enable/move variables and dirty bitmap */ + raw_spinlock_t lock; + + /* Local dirty bitmap for synchronization */ + unsigned long *dirty_bitmap; + +#ifdef CONFIG_SMP + /* Local timer for synchronization */ + struct timer_list timer; +#endif + + /* Local vector table */ + struct imsic_vector vectors[]; +}; + +struct imsic_priv { + /* Device details */ + struct fwnode_handle *fwnode; + + /* Global configuration common for all HARTs */ + struct imsic_global_config global; + + /* Per-CPU state */ + struct imsic_local_priv __percpu *lpriv; + + /* State of IRQ matrix allocator */ + raw_spinlock_t matrix_lock; + struct irq_matrix *matrix; + + /* IRQ domains (created by platform driver) */ + struct irq_domain *base_domain; +}; + +extern bool imsic_noipi; +extern struct imsic_priv *imsic; + +void __imsic_eix_update(unsigned long base_id, unsigned long num_id, bool pend, bool val); + +static inline void __imsic_id_set_enable(unsigned long id) +{ + __imsic_eix_update(id, 1, false, true); +} + +static inline void __imsic_id_clear_enable(unsigned long id) +{ + __imsic_eix_update(id, 1, false, false); +} + +void imsic_local_sync_all(bool force_all); +void imsic_local_delivery(bool enable); + +void imsic_vector_mask(struct imsic_vector *vec); +void imsic_vector_unmask(struct imsic_vector *vec); + +static inline bool imsic_vector_isenabled(struct imsic_vector *vec) +{ + return READ_ONCE(vec->enable); +} + +static inline struct imsic_vector *imsic_vector_get_move(struct imsic_vector *vec) +{ + return READ_ONCE(vec->move_prev); +} + +void imsic_vector_force_move_cleanup(struct imsic_vector *vec); +void imsic_vector_move(struct imsic_vector *old_vec, struct imsic_vector *new_vec); + +struct imsic_vector *imsic_vector_alloc(unsigned int irq, const struct cpumask *mask); +void imsic_vector_free(struct imsic_vector *vector); + +void imsic_vector_debug_show(struct seq_file *m, struct imsic_vector *vec, int ind); +void imsic_vector_debug_show_summary(struct seq_file *m, int ind); + +void imsic_state_online(void); +void imsic_state_offline(void); +int imsic_setup_state(struct fwnode_handle *fwnode, void *opaque); +int imsic_irqdomain_init(void); + +#endif diff --git a/drivers/irqchip/irq-riscv-intc.c b/drivers/irqchip/irq-riscv-intc.c new file mode 100644 index 000000000000..70290b35b317 --- /dev/null +++ b/drivers/irqchip/irq-riscv-intc.c @@ -0,0 +1,394 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2012 Regents of the University of California + * Copyright (C) 2017-2018 SiFive + * Copyright (C) 2020 Western Digital Corporation or its affiliates. + */ + +#define pr_fmt(fmt) "riscv-intc: " fmt +#include <linux/acpi.h> +#include <linux/atomic.h> +#include <linux/bits.h> +#include <linux/cpu.h> +#include <linux/irq.h> +#include <linux/irqchip.h> +#include <linux/irqdomain.h> +#include <linux/interrupt.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/smp.h> +#include <linux/soc/andes/irq.h> + +#include <asm/hwcap.h> + +static struct irq_domain *intc_domain; +static unsigned int riscv_intc_nr_irqs __ro_after_init = BITS_PER_LONG; +static unsigned int riscv_intc_custom_base __ro_after_init = BITS_PER_LONG; +static unsigned int riscv_intc_custom_nr_irqs __ro_after_init; + +static void riscv_intc_irq(struct pt_regs *regs) +{ + unsigned long cause = regs->cause & ~CAUSE_IRQ_FLAG; + + if (generic_handle_domain_irq(intc_domain, cause)) + pr_warn_ratelimited("Failed to handle interrupt (cause: %ld)\n", cause); +} + +static void riscv_intc_aia_irq(struct pt_regs *regs) +{ + unsigned long topi; + + while ((topi = csr_read(CSR_TOPI))) + generic_handle_domain_irq(intc_domain, topi >> TOPI_IID_SHIFT); +} + +/* + * On RISC-V systems local interrupts are masked or unmasked by writing + * the SIE (Supervisor Interrupt Enable) CSR. As CSRs can only be written + * on the local hart, these functions can only be called on the hart that + * corresponds to the IRQ chip. + */ + +static void riscv_intc_irq_mask(struct irq_data *d) +{ + if (IS_ENABLED(CONFIG_32BIT) && d->hwirq >= BITS_PER_LONG) + csr_clear(CSR_IEH, BIT(d->hwirq - BITS_PER_LONG)); + else + csr_clear(CSR_IE, BIT(d->hwirq)); +} + +static void riscv_intc_irq_unmask(struct irq_data *d) +{ + if (IS_ENABLED(CONFIG_32BIT) && d->hwirq >= BITS_PER_LONG) + csr_set(CSR_IEH, BIT(d->hwirq - BITS_PER_LONG)); + else + csr_set(CSR_IE, BIT(d->hwirq)); +} + +static void andes_intc_irq_mask(struct irq_data *d) +{ + /* + * Andes specific S-mode local interrupt causes (hwirq) + * are defined as (256 + n) and controlled by n-th bit + * of SLIE. + */ + unsigned int mask = BIT(d->hwirq % BITS_PER_LONG); + + if (d->hwirq < ANDES_SLI_CAUSE_BASE) + csr_clear(CSR_IE, mask); + else + csr_clear(ANDES_CSR_SLIE, mask); +} + +static void andes_intc_irq_unmask(struct irq_data *d) +{ + unsigned int mask = BIT(d->hwirq % BITS_PER_LONG); + + if (d->hwirq < ANDES_SLI_CAUSE_BASE) + csr_set(CSR_IE, mask); + else + csr_set(ANDES_CSR_SLIE, mask); +} + +static void riscv_intc_irq_eoi(struct irq_data *d) +{ + /* + * The RISC-V INTC driver uses handle_percpu_devid_irq() flow + * for the per-HART local interrupts and child irqchip drivers + * (such as PLIC, SBI IPI, CLINT, APLIC, IMSIC, etc) implement + * chained handlers for the per-HART local interrupts. + * + * In the absence of irq_eoi(), the chained_irq_enter() and + * chained_irq_exit() functions (used by child irqchip drivers) + * will do unnecessary mask/unmask of per-HART local interrupts + * at the time of handling interrupts. To avoid this, we provide + * an empty irq_eoi() callback for RISC-V INTC irqchip. + */ +} + +static struct irq_chip riscv_intc_chip = { + .name = "RISC-V INTC", + .irq_mask = riscv_intc_irq_mask, + .irq_unmask = riscv_intc_irq_unmask, + .irq_eoi = riscv_intc_irq_eoi, +}; + +static struct irq_chip andes_intc_chip = { + .name = "RISC-V INTC", + .irq_mask = andes_intc_irq_mask, + .irq_unmask = andes_intc_irq_unmask, + .irq_eoi = riscv_intc_irq_eoi, +}; + +static int riscv_intc_domain_map(struct irq_domain *d, unsigned int irq, + irq_hw_number_t hwirq) +{ + struct irq_chip *chip = d->host_data; + + irq_set_percpu_devid(irq); + irq_domain_set_info(d, irq, hwirq, chip, NULL, handle_percpu_devid_irq, + NULL, NULL); + + return 0; +} + +static int riscv_intc_domain_alloc(struct irq_domain *domain, + unsigned int virq, unsigned int nr_irqs, + void *arg) +{ + int i, ret; + irq_hw_number_t hwirq; + unsigned int type = IRQ_TYPE_NONE; + struct irq_fwspec *fwspec = arg; + + ret = irq_domain_translate_onecell(domain, fwspec, &hwirq, &type); + if (ret) + return ret; + + /* + * Only allow hwirq for which we have corresponding standard or + * custom interrupt enable register. + */ + if (hwirq >= riscv_intc_nr_irqs && + (hwirq < riscv_intc_custom_base || + hwirq >= riscv_intc_custom_base + riscv_intc_custom_nr_irqs)) + return -EINVAL; + + for (i = 0; i < nr_irqs; i++) { + ret = riscv_intc_domain_map(domain, virq + i, hwirq + i); + if (ret) + return ret; + } + + return 0; +} + +static const struct irq_domain_ops riscv_intc_domain_ops = { + .map = riscv_intc_domain_map, + .xlate = irq_domain_xlate_onecell, + .alloc = riscv_intc_domain_alloc, + .free = irq_domain_free_irqs_top, +}; + +static struct fwnode_handle *riscv_intc_hwnode(void) +{ + return intc_domain->fwnode; +} + +static int __init riscv_intc_init_common(struct fwnode_handle *fn, struct irq_chip *chip) +{ + int rc; + + intc_domain = irq_domain_create_tree(fn, &riscv_intc_domain_ops, chip); + if (!intc_domain) { + pr_err("unable to add IRQ domain\n"); + return -ENXIO; + } + + if (riscv_isa_extension_available(NULL, SxAIA)) { + riscv_intc_nr_irqs = 64; + rc = set_handle_irq(&riscv_intc_aia_irq); + } else { + rc = set_handle_irq(&riscv_intc_irq); + } + if (rc) { + pr_err("failed to set irq handler\n"); + return rc; + } + + riscv_set_intc_hwnode_fn(riscv_intc_hwnode); + + pr_info("%d local interrupts mapped%s\n", + riscv_intc_nr_irqs, + riscv_isa_extension_available(NULL, SxAIA) ? " using AIA" : ""); + if (riscv_intc_custom_nr_irqs) + pr_info("%d custom local interrupts mapped\n", riscv_intc_custom_nr_irqs); + + return 0; +} + +static int __init riscv_intc_init(struct device_node *node, + struct device_node *parent) +{ + struct irq_chip *chip = &riscv_intc_chip; + unsigned long hartid; + int rc; + + rc = riscv_of_parent_hartid(node, &hartid); + if (rc < 0) { + pr_warn("unable to find hart id for %pOF\n", node); + return 0; + } + + /* + * The DT will have one INTC DT node under each CPU (or HART) + * DT node so riscv_intc_init() function will be called once + * for each INTC DT node. We only need to do INTC initialization + * for the INTC DT node belonging to boot CPU (or boot HART). + */ + if (riscv_hartid_to_cpuid(hartid) != smp_processor_id()) { + /* + * The INTC nodes of each CPU are suppliers for downstream + * interrupt controllers (such as PLIC, IMSIC and APLIC + * direct-mode) so we should mark an INTC node as initialized + * if we are not creating IRQ domain for it. + */ + fwnode_dev_initialized(of_fwnode_handle(node), true); + return 0; + } + + if (of_device_is_compatible(node, "andestech,cpu-intc")) { + riscv_intc_custom_base = ANDES_SLI_CAUSE_BASE; + riscv_intc_custom_nr_irqs = ANDES_RV_IRQ_LAST; + chip = &andes_intc_chip; + } + + return riscv_intc_init_common(of_fwnode_handle(node), chip); +} + +IRQCHIP_DECLARE(riscv, "riscv,cpu-intc", riscv_intc_init); +IRQCHIP_DECLARE(andes, "andestech,cpu-intc", riscv_intc_init); + +#ifdef CONFIG_ACPI + +struct rintc_data { + union { + u32 ext_intc_id; + struct { + u32 context_id : 16, + reserved : 8, + aplic_plic_id : 8; + }; + }; + unsigned long hart_id; + u64 imsic_addr; + u32 imsic_size; +}; + +static u32 nr_rintc; +static struct rintc_data **rintc_acpi_data; + +#define for_each_matching_plic(_plic_id) \ + unsigned int _plic; \ + \ + for (_plic = 0; _plic < nr_rintc; _plic++) \ + if (rintc_acpi_data[_plic]->aplic_plic_id != _plic_id) \ + continue; \ + else + +unsigned int acpi_rintc_get_plic_nr_contexts(unsigned int plic_id) +{ + unsigned int nctx = 0; + + for_each_matching_plic(plic_id) + nctx++; + + return nctx; +} + +static struct rintc_data *get_plic_context(unsigned int plic_id, unsigned int ctxt_idx) +{ + unsigned int ctxt = 0; + + for_each_matching_plic(plic_id) { + if (ctxt == ctxt_idx) + return rintc_acpi_data[_plic]; + + ctxt++; + } + + return NULL; +} + +unsigned long acpi_rintc_ext_parent_to_hartid(unsigned int plic_id, unsigned int ctxt_idx) +{ + struct rintc_data *data = get_plic_context(plic_id, ctxt_idx); + + return data ? data->hart_id : INVALID_HARTID; +} + +unsigned int acpi_rintc_get_plic_context(unsigned int plic_id, unsigned int ctxt_idx) +{ + struct rintc_data *data = get_plic_context(plic_id, ctxt_idx); + + return data ? data->context_id : INVALID_CONTEXT; +} + +unsigned long acpi_rintc_index_to_hartid(u32 index) +{ + return index >= nr_rintc ? INVALID_HARTID : rintc_acpi_data[index]->hart_id; +} + +int acpi_rintc_get_imsic_mmio_info(u32 index, struct resource *res) +{ + if (index >= nr_rintc) + return -1; + + res->start = rintc_acpi_data[index]->imsic_addr; + res->end = res->start + rintc_acpi_data[index]->imsic_size - 1; + res->flags = IORESOURCE_MEM; + return 0; +} + +static int __init riscv_intc_acpi_match(union acpi_subtable_headers *header, + const unsigned long end) +{ + return 0; +} + +static int __init riscv_intc_acpi_init(union acpi_subtable_headers *header, + const unsigned long end) +{ + struct acpi_madt_rintc *rintc; + struct fwnode_handle *fn; + int count; + int rc; + + if (!rintc_acpi_data) { + count = acpi_table_parse_madt(ACPI_MADT_TYPE_RINTC, riscv_intc_acpi_match, 0); + if (count <= 0) + return -EINVAL; + + rintc_acpi_data = kcalloc(count, sizeof(*rintc_acpi_data), GFP_KERNEL); + if (!rintc_acpi_data) + return -ENOMEM; + } + + rintc = (struct acpi_madt_rintc *)header; + rintc_acpi_data[nr_rintc] = kzalloc(sizeof(*rintc_acpi_data[0]), GFP_KERNEL); + if (!rintc_acpi_data[nr_rintc]) + return -ENOMEM; + + rintc_acpi_data[nr_rintc]->ext_intc_id = rintc->ext_intc_id; + rintc_acpi_data[nr_rintc]->hart_id = rintc->hart_id; + rintc_acpi_data[nr_rintc]->imsic_addr = rintc->imsic_addr; + rintc_acpi_data[nr_rintc]->imsic_size = rintc->imsic_size; + nr_rintc++; + + /* + * The ACPI MADT will have one INTC for each CPU (or HART) + * so riscv_intc_acpi_init() function will be called once + * for each INTC. We only do INTC initialization + * for the INTC belonging to the boot CPU (or boot HART). + */ + if (riscv_hartid_to_cpuid(rintc->hart_id) != smp_processor_id()) + return 0; + + fn = irq_domain_alloc_named_fwnode("RISCV-INTC"); + if (!fn) { + pr_err("unable to allocate INTC FW node\n"); + return -ENOMEM; + } + + rc = riscv_intc_init_common(fn, &riscv_intc_chip); + if (rc) + irq_domain_free_fwnode(fn); + else + acpi_set_irq_model(ACPI_IRQ_MODEL_RINTC, riscv_acpi_get_gsi_domain_id); + + return rc; +} + +IRQCHIP_ACPI_DECLARE(riscv_intc, ACPI_MADT_TYPE_RINTC, NULL, + ACPI_MADT_RINTC_VERSION_V1, riscv_intc_acpi_init); +#endif diff --git a/drivers/irqchip/irq-riscv-rpmi-sysmsi.c b/drivers/irqchip/irq-riscv-rpmi-sysmsi.c new file mode 100644 index 000000000000..5c74c561ce31 --- /dev/null +++ b/drivers/irqchip/irq-riscv-rpmi-sysmsi.c @@ -0,0 +1,328 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2025 Ventana Micro Systems Inc. */ + +#include <linux/acpi.h> +#include <linux/bits.h> +#include <linux/bug.h> +#include <linux/device.h> +#include <linux/device/devres.h> +#include <linux/dev_printk.h> +#include <linux/errno.h> +#include <linux/irq.h> +#include <linux/irqdomain.h> +#include <linux/irqchip/riscv-imsic.h> +#include <linux/mailbox_client.h> +#include <linux/mailbox/riscv-rpmi-message.h> +#include <linux/module.h> +#include <linux/msi.h> +#include <linux/of_irq.h> +#include <linux/platform_device.h> +#include <linux/types.h> + +struct rpmi_sysmsi_get_attrs_rx { + __le32 status; + __le32 sys_num_msi; + __le32 flag0; + __le32 flag1; +}; + +#define RPMI_SYSMSI_MSI_ATTRIBUTES_FLAG0_PREF_PRIV BIT(0) + +struct rpmi_sysmsi_set_msi_state_tx { + __le32 sys_msi_index; + __le32 sys_msi_state; +}; + +struct rpmi_sysmsi_set_msi_state_rx { + __le32 status; +}; + +#define RPMI_SYSMSI_MSI_STATE_ENABLE BIT(0) +#define RPMI_SYSMSI_MSI_STATE_PENDING BIT(1) + +struct rpmi_sysmsi_set_msi_target_tx { + __le32 sys_msi_index; + __le32 sys_msi_address_low; + __le32 sys_msi_address_high; + __le32 sys_msi_data; +}; + +struct rpmi_sysmsi_set_msi_target_rx { + __le32 status; +}; + +struct rpmi_sysmsi_priv { + struct device *dev; + struct mbox_client client; + struct mbox_chan *chan; + u32 nr_irqs; + u32 gsi_base; +}; + +static int rpmi_sysmsi_get_num_msi(struct rpmi_sysmsi_priv *priv) +{ + struct rpmi_sysmsi_get_attrs_rx rx; + struct rpmi_mbox_message msg; + int ret; + + rpmi_mbox_init_send_with_response(&msg, RPMI_SYSMSI_SRV_GET_ATTRIBUTES, + NULL, 0, &rx, sizeof(rx)); + ret = rpmi_mbox_send_message(priv->chan, &msg); + if (ret) + return ret; + if (rx.status) + return rpmi_to_linux_error(le32_to_cpu(rx.status)); + + return le32_to_cpu(rx.sys_num_msi); +} + +static int rpmi_sysmsi_set_msi_state(struct rpmi_sysmsi_priv *priv, + u32 sys_msi_index, u32 sys_msi_state) +{ + struct rpmi_sysmsi_set_msi_state_tx tx; + struct rpmi_sysmsi_set_msi_state_rx rx; + struct rpmi_mbox_message msg; + int ret; + + tx.sys_msi_index = cpu_to_le32(sys_msi_index); + tx.sys_msi_state = cpu_to_le32(sys_msi_state); + rpmi_mbox_init_send_with_response(&msg, RPMI_SYSMSI_SRV_SET_MSI_STATE, + &tx, sizeof(tx), &rx, sizeof(rx)); + ret = rpmi_mbox_send_message(priv->chan, &msg); + if (ret) + return ret; + if (rx.status) + return rpmi_to_linux_error(le32_to_cpu(rx.status)); + + return 0; +} + +static int rpmi_sysmsi_set_msi_target(struct rpmi_sysmsi_priv *priv, + u32 sys_msi_index, struct msi_msg *m) +{ + struct rpmi_sysmsi_set_msi_target_tx tx; + struct rpmi_sysmsi_set_msi_target_rx rx; + struct rpmi_mbox_message msg; + int ret; + + tx.sys_msi_index = cpu_to_le32(sys_msi_index); + tx.sys_msi_address_low = cpu_to_le32(m->address_lo); + tx.sys_msi_address_high = cpu_to_le32(m->address_hi); + tx.sys_msi_data = cpu_to_le32(m->data); + rpmi_mbox_init_send_with_response(&msg, RPMI_SYSMSI_SRV_SET_MSI_TARGET, + &tx, sizeof(tx), &rx, sizeof(rx)); + ret = rpmi_mbox_send_message(priv->chan, &msg); + if (ret) + return ret; + if (rx.status) + return rpmi_to_linux_error(le32_to_cpu(rx.status)); + + return 0; +} + +static void rpmi_sysmsi_irq_mask(struct irq_data *d) +{ + struct rpmi_sysmsi_priv *priv = irq_data_get_irq_chip_data(d); + irq_hw_number_t hwirq = irqd_to_hwirq(d); + int ret; + + ret = rpmi_sysmsi_set_msi_state(priv, hwirq, 0); + if (ret) + dev_warn(priv->dev, "Failed to mask hwirq %lu (error %d)\n", hwirq, ret); + irq_chip_mask_parent(d); +} + +static void rpmi_sysmsi_irq_unmask(struct irq_data *d) +{ + struct rpmi_sysmsi_priv *priv = irq_data_get_irq_chip_data(d); + irq_hw_number_t hwirq = irqd_to_hwirq(d); + int ret; + + irq_chip_unmask_parent(d); + ret = rpmi_sysmsi_set_msi_state(priv, hwirq, RPMI_SYSMSI_MSI_STATE_ENABLE); + if (ret) + dev_warn(priv->dev, "Failed to unmask hwirq %lu (error %d)\n", hwirq, ret); +} + +static void rpmi_sysmsi_write_msg(struct irq_data *d, struct msi_msg *msg) +{ + struct rpmi_sysmsi_priv *priv = irq_data_get_irq_chip_data(d); + irq_hw_number_t hwirq = irqd_to_hwirq(d); + int ret; + + /* For zeroed MSI, do nothing as of now */ + if (!msg->address_hi && !msg->address_lo && !msg->data) + return; + + ret = rpmi_sysmsi_set_msi_target(priv, hwirq, msg); + if (ret) + dev_warn(priv->dev, "Failed to set target for hwirq %lu (error %d)\n", hwirq, ret); +} + +static void rpmi_sysmsi_set_desc(msi_alloc_info_t *arg, struct msi_desc *desc) +{ + arg->desc = desc; + arg->hwirq = desc->data.icookie.value; +} + +static int rpmi_sysmsi_translate(struct irq_domain *d, struct irq_fwspec *fwspec, + unsigned long *hwirq, unsigned int *type) +{ + struct msi_domain_info *info = d->host_data; + struct rpmi_sysmsi_priv *priv = info->data; + + if (WARN_ON(fwspec->param_count < 1)) + return -EINVAL; + + /* For DT, gsi_base is always zero. */ + *hwirq = fwspec->param[0] - priv->gsi_base; + *type = IRQ_TYPE_NONE; + return 0; +} + +static const struct msi_domain_template rpmi_sysmsi_template = { + .chip = { + .name = "RPMI-SYSMSI", + .irq_mask = rpmi_sysmsi_irq_mask, + .irq_unmask = rpmi_sysmsi_irq_unmask, +#ifdef CONFIG_SMP + .irq_set_affinity = irq_chip_set_affinity_parent, +#endif + .irq_write_msi_msg = rpmi_sysmsi_write_msg, + .flags = IRQCHIP_SET_TYPE_MASKED | + IRQCHIP_SKIP_SET_WAKE | + IRQCHIP_MASK_ON_SUSPEND, + }, + + .ops = { + .set_desc = rpmi_sysmsi_set_desc, + .msi_translate = rpmi_sysmsi_translate, + }, + + .info = { + .bus_token = DOMAIN_BUS_WIRED_TO_MSI, + .flags = MSI_FLAG_USE_DEV_FWNODE, + .handler = handle_simple_irq, + .handler_name = "simple", + }, +}; + +static int rpmi_sysmsi_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct rpmi_sysmsi_priv *priv; + struct fwnode_handle *fwnode; + u32 id; + int rc; + + priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + priv->dev = dev; + + /* Setup mailbox client */ + priv->client.dev = priv->dev; + priv->client.rx_callback = NULL; + priv->client.tx_block = false; + priv->client.knows_txdone = true; + priv->client.tx_tout = 0; + + /* Request mailbox channel */ + priv->chan = mbox_request_channel(&priv->client, 0); + if (IS_ERR(priv->chan)) + return PTR_ERR(priv->chan); + + /* Get number of system MSIs */ + rc = rpmi_sysmsi_get_num_msi(priv); + if (rc < 1) { + mbox_free_channel(priv->chan); + if (rc) + return dev_err_probe(dev, rc, "Failed to get number of system MSIs\n"); + else + return dev_err_probe(dev, -ENODEV, "No system MSIs found\n"); + } + priv->nr_irqs = rc; + + fwnode = dev_fwnode(dev); + if (is_acpi_node(fwnode)) { + u32 nr_irqs; + + rc = riscv_acpi_get_gsi_info(fwnode, &priv->gsi_base, &id, + &nr_irqs, NULL); + if (rc) { + dev_err(dev, "failed to find GSI mapping\n"); + return rc; + } + + /* Update with actual GSI range */ + if (nr_irqs != priv->nr_irqs) + riscv_acpi_update_gsi_range(priv->gsi_base, priv->nr_irqs); + } + + /* + * The device MSI domain for platform devices on RISC-V architecture + * is only available after the MSI controller driver is probed so, + * explicitly configure here. + */ + if (!dev_get_msi_domain(dev)) { + /* + * The device MSI domain for OF devices is only set at the + * time of populating/creating OF device. If the device MSI + * domain is discovered later after the OF device is created + * then we need to set it explicitly before using any platform + * MSI functions. + */ + if (is_of_node(fwnode)) { + of_msi_configure(dev, dev_of_node(dev)); + } else if (is_acpi_device_node(fwnode)) { + struct irq_domain *msi_domain; + + msi_domain = irq_find_matching_fwnode(imsic_acpi_get_fwnode(dev), + DOMAIN_BUS_PLATFORM_MSI); + dev_set_msi_domain(dev, msi_domain); + } + + if (!dev_get_msi_domain(dev)) { + mbox_free_channel(priv->chan); + return -EPROBE_DEFER; + } + } + + if (!msi_create_device_irq_domain(dev, MSI_DEFAULT_DOMAIN, + &rpmi_sysmsi_template, + priv->nr_irqs, priv, priv)) { + mbox_free_channel(priv->chan); + return dev_err_probe(dev, -ENOMEM, "failed to create MSI irq domain\n"); + } + +#ifdef CONFIG_ACPI + struct acpi_device *adev = ACPI_COMPANION(dev); + + if (adev) + acpi_dev_clear_dependencies(adev); +#endif + + dev_info(dev, "%u system MSIs registered\n", priv->nr_irqs); + return 0; +} + +static const struct of_device_id rpmi_sysmsi_match[] = { + { .compatible = "riscv,rpmi-system-msi" }, + {} +}; + +static const struct acpi_device_id acpi_rpmi_sysmsi_match[] = { + { "RSCV0006" }, + {} +}; +MODULE_DEVICE_TABLE(acpi, acpi_rpmi_sysmsi_match); + +static struct platform_driver rpmi_sysmsi_driver = { + .driver = { + .name = "rpmi-sysmsi", + .of_match_table = rpmi_sysmsi_match, + .acpi_match_table = acpi_rpmi_sysmsi_match, + }, + .probe = rpmi_sysmsi_probe, +}; +builtin_platform_driver(rpmi_sysmsi_driver); diff --git a/drivers/irqchip/irq-s3c24xx.c b/drivers/irqchip/irq-s3c24xx.c deleted file mode 100644 index b623f300f1b1..000000000000 --- a/drivers/irqchip/irq-s3c24xx.c +++ /dev/null @@ -1,1339 +0,0 @@ -/* - * S3C24XX IRQ handling - * - * Copyright (c) 2003-2004 Simtec Electronics - * Ben Dooks <ben@simtec.co.uk> - * Copyright (c) 2012 Heiko Stuebner <heiko@sntech.de> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. -*/ - -#include <linux/init.h> -#include <linux/slab.h> -#include <linux/module.h> -#include <linux/io.h> -#include <linux/err.h> -#include <linux/interrupt.h> -#include <linux/ioport.h> -#include <linux/device.h> -#include <linux/irqdomain.h> -#include <linux/irqchip.h> -#include <linux/irqchip/chained_irq.h> -#include <linux/of.h> -#include <linux/of_irq.h> -#include <linux/of_address.h> - -#include <asm/exception.h> -#include <asm/mach/irq.h> - -#include <mach/regs-irq.h> -#include <mach/regs-gpio.h> - -#include <plat/cpu.h> -#include <plat/regs-irqtype.h> -#include <plat/pm.h> - -#define S3C_IRQTYPE_NONE 0 -#define S3C_IRQTYPE_EINT 1 -#define S3C_IRQTYPE_EDGE 2 -#define S3C_IRQTYPE_LEVEL 3 - -struct s3c_irq_data { - unsigned int type; - unsigned long offset; - unsigned long parent_irq; - - /* data gets filled during init */ - struct s3c_irq_intc *intc; - unsigned long sub_bits; - struct s3c_irq_intc *sub_intc; -}; - -/* - * Structure holding the controller data - * @reg_pending register holding pending irqs - * @reg_intpnd special register intpnd in main intc - * @reg_mask mask register - * @domain irq_domain of the controller - * @parent parent controller for ext and sub irqs - * @irqs irq-data, always s3c_irq_data[32] - */ -struct s3c_irq_intc { - void __iomem *reg_pending; - void __iomem *reg_intpnd; - void __iomem *reg_mask; - struct irq_domain *domain; - struct s3c_irq_intc *parent; - struct s3c_irq_data *irqs; -}; - -/* - * Array holding pointers to the global controller structs - * [0] ... main_intc - * [1] ... sub_intc - * [2] ... main_intc2 on s3c2416 - */ -static struct s3c_irq_intc *s3c_intc[3]; - -static void s3c_irq_mask(struct irq_data *data) -{ - struct s3c_irq_data *irq_data = irq_data_get_irq_chip_data(data); - struct s3c_irq_intc *intc = irq_data->intc; - struct s3c_irq_intc *parent_intc = intc->parent; - struct s3c_irq_data *parent_data; - unsigned long mask; - unsigned int irqno; - - mask = readl_relaxed(intc->reg_mask); - mask |= (1UL << irq_data->offset); - writel_relaxed(mask, intc->reg_mask); - - if (parent_intc) { - parent_data = &parent_intc->irqs[irq_data->parent_irq]; - - /* check to see if we need to mask the parent IRQ - * The parent_irq is always in main_intc, so the hwirq - * for find_mapping does not need an offset in any case. - */ - if ((mask & parent_data->sub_bits) == parent_data->sub_bits) { - irqno = irq_find_mapping(parent_intc->domain, - irq_data->parent_irq); - s3c_irq_mask(irq_get_irq_data(irqno)); - } - } -} - -static void s3c_irq_unmask(struct irq_data *data) -{ - struct s3c_irq_data *irq_data = irq_data_get_irq_chip_data(data); - struct s3c_irq_intc *intc = irq_data->intc; - struct s3c_irq_intc *parent_intc = intc->parent; - unsigned long mask; - unsigned int irqno; - - mask = readl_relaxed(intc->reg_mask); - mask &= ~(1UL << irq_data->offset); - writel_relaxed(mask, intc->reg_mask); - - if (parent_intc) { - irqno = irq_find_mapping(parent_intc->domain, - irq_data->parent_irq); - s3c_irq_unmask(irq_get_irq_data(irqno)); - } -} - -static inline void s3c_irq_ack(struct irq_data *data) -{ - struct s3c_irq_data *irq_data = irq_data_get_irq_chip_data(data); - struct s3c_irq_intc *intc = irq_data->intc; - unsigned long bitval = 1UL << irq_data->offset; - - writel_relaxed(bitval, intc->reg_pending); - if (intc->reg_intpnd) - writel_relaxed(bitval, intc->reg_intpnd); -} - -static int s3c_irq_type(struct irq_data *data, unsigned int type) -{ - switch (type) { - case IRQ_TYPE_NONE: - break; - case IRQ_TYPE_EDGE_RISING: - case IRQ_TYPE_EDGE_FALLING: - case IRQ_TYPE_EDGE_BOTH: - irq_set_handler(data->irq, handle_edge_irq); - break; - case IRQ_TYPE_LEVEL_LOW: - case IRQ_TYPE_LEVEL_HIGH: - irq_set_handler(data->irq, handle_level_irq); - break; - default: - pr_err("No such irq type %d\n", type); - return -EINVAL; - } - - return 0; -} - -static int s3c_irqext_type_set(void __iomem *gpcon_reg, - void __iomem *extint_reg, - unsigned long gpcon_offset, - unsigned long extint_offset, - unsigned int type) -{ - unsigned long newvalue = 0, value; - - /* Set the GPIO to external interrupt mode */ - value = readl_relaxed(gpcon_reg); - value = (value & ~(3 << gpcon_offset)) | (0x02 << gpcon_offset); - writel_relaxed(value, gpcon_reg); - - /* Set the external interrupt to pointed trigger type */ - switch (type) - { - case IRQ_TYPE_NONE: - pr_warn("No edge setting!\n"); - break; - - case IRQ_TYPE_EDGE_RISING: - newvalue = S3C2410_EXTINT_RISEEDGE; - break; - - case IRQ_TYPE_EDGE_FALLING: - newvalue = S3C2410_EXTINT_FALLEDGE; - break; - - case IRQ_TYPE_EDGE_BOTH: - newvalue = S3C2410_EXTINT_BOTHEDGE; - break; - - case IRQ_TYPE_LEVEL_LOW: - newvalue = S3C2410_EXTINT_LOWLEV; - break; - - case IRQ_TYPE_LEVEL_HIGH: - newvalue = S3C2410_EXTINT_HILEV; - break; - - default: - pr_err("No such irq type %d\n", type); - return -EINVAL; - } - - value = readl_relaxed(extint_reg); - value = (value & ~(7 << extint_offset)) | (newvalue << extint_offset); - writel_relaxed(value, extint_reg); - - return 0; -} - -static int s3c_irqext_type(struct irq_data *data, unsigned int type) -{ - void __iomem *extint_reg; - void __iomem *gpcon_reg; - unsigned long gpcon_offset, extint_offset; - - if ((data->hwirq >= 4) && (data->hwirq <= 7)) { - gpcon_reg = S3C2410_GPFCON; - extint_reg = S3C24XX_EXTINT0; - gpcon_offset = (data->hwirq) * 2; - extint_offset = (data->hwirq) * 4; - } else if ((data->hwirq >= 8) && (data->hwirq <= 15)) { - gpcon_reg = S3C2410_GPGCON; - extint_reg = S3C24XX_EXTINT1; - gpcon_offset = (data->hwirq - 8) * 2; - extint_offset = (data->hwirq - 8) * 4; - } else if ((data->hwirq >= 16) && (data->hwirq <= 23)) { - gpcon_reg = S3C2410_GPGCON; - extint_reg = S3C24XX_EXTINT2; - gpcon_offset = (data->hwirq - 8) * 2; - extint_offset = (data->hwirq - 16) * 4; - } else { - return -EINVAL; - } - - return s3c_irqext_type_set(gpcon_reg, extint_reg, gpcon_offset, - extint_offset, type); -} - -static int s3c_irqext0_type(struct irq_data *data, unsigned int type) -{ - void __iomem *extint_reg; - void __iomem *gpcon_reg; - unsigned long gpcon_offset, extint_offset; - - if (data->hwirq <= 3) { - gpcon_reg = S3C2410_GPFCON; - extint_reg = S3C24XX_EXTINT0; - gpcon_offset = (data->hwirq) * 2; - extint_offset = (data->hwirq) * 4; - } else { - return -EINVAL; - } - - return s3c_irqext_type_set(gpcon_reg, extint_reg, gpcon_offset, - extint_offset, type); -} - -static struct irq_chip s3c_irq_chip = { - .name = "s3c", - .irq_ack = s3c_irq_ack, - .irq_mask = s3c_irq_mask, - .irq_unmask = s3c_irq_unmask, - .irq_set_type = s3c_irq_type, - .irq_set_wake = s3c_irq_wake -}; - -static struct irq_chip s3c_irq_level_chip = { - .name = "s3c-level", - .irq_mask = s3c_irq_mask, - .irq_unmask = s3c_irq_unmask, - .irq_ack = s3c_irq_ack, - .irq_set_type = s3c_irq_type, -}; - -static struct irq_chip s3c_irqext_chip = { - .name = "s3c-ext", - .irq_mask = s3c_irq_mask, - .irq_unmask = s3c_irq_unmask, - .irq_ack = s3c_irq_ack, - .irq_set_type = s3c_irqext_type, - .irq_set_wake = s3c_irqext_wake -}; - -static struct irq_chip s3c_irq_eint0t4 = { - .name = "s3c-ext0", - .irq_ack = s3c_irq_ack, - .irq_mask = s3c_irq_mask, - .irq_unmask = s3c_irq_unmask, - .irq_set_wake = s3c_irq_wake, - .irq_set_type = s3c_irqext0_type, -}; - -static void s3c_irq_demux(struct irq_desc *desc) -{ - struct irq_chip *chip = irq_desc_get_chip(desc); - struct s3c_irq_data *irq_data = irq_desc_get_chip_data(desc); - struct s3c_irq_intc *intc = irq_data->intc; - struct s3c_irq_intc *sub_intc = irq_data->sub_intc; - unsigned int n, offset, irq; - unsigned long src, msk; - - /* we're using individual domains for the non-dt case - * and one big domain for the dt case where the subintc - * starts at hwirq number 32. - */ - offset = irq_domain_get_of_node(intc->domain) ? 32 : 0; - - chained_irq_enter(chip, desc); - - src = readl_relaxed(sub_intc->reg_pending); - msk = readl_relaxed(sub_intc->reg_mask); - - src &= ~msk; - src &= irq_data->sub_bits; - - while (src) { - n = __ffs(src); - src &= ~(1 << n); - irq = irq_find_mapping(sub_intc->domain, offset + n); - generic_handle_irq(irq); - } - - chained_irq_exit(chip, desc); -} - -static inline int s3c24xx_handle_intc(struct s3c_irq_intc *intc, - struct pt_regs *regs, int intc_offset) -{ - int pnd; - int offset; - - pnd = readl_relaxed(intc->reg_intpnd); - if (!pnd) - return false; - - /* non-dt machines use individual domains */ - if (!irq_domain_get_of_node(intc->domain)) - intc_offset = 0; - - /* We have a problem that the INTOFFSET register does not always - * show one interrupt. Occasionally we get two interrupts through - * the prioritiser, and this causes the INTOFFSET register to show - * what looks like the logical-or of the two interrupt numbers. - * - * Thanks to Klaus, Shannon, et al for helping to debug this problem - */ - offset = readl_relaxed(intc->reg_intpnd + 4); - - /* Find the bit manually, when the offset is wrong. - * The pending register only ever contains the one bit of the next - * interrupt to handle. - */ - if (!(pnd & (1 << offset))) - offset = __ffs(pnd); - - handle_domain_irq(intc->domain, intc_offset + offset, regs); - return true; -} - -asmlinkage void __exception_irq_entry s3c24xx_handle_irq(struct pt_regs *regs) -{ - do { - if (likely(s3c_intc[0])) - if (s3c24xx_handle_intc(s3c_intc[0], regs, 0)) - continue; - - if (s3c_intc[2]) - if (s3c24xx_handle_intc(s3c_intc[2], regs, 64)) - continue; - - break; - } while (1); -} - -#ifdef CONFIG_FIQ -/** - * s3c24xx_set_fiq - set the FIQ routing - * @irq: IRQ number to route to FIQ on processor. - * @on: Whether to route @irq to the FIQ, or to remove the FIQ routing. - * - * Change the state of the IRQ to FIQ routing depending on @irq and @on. If - * @on is true, the @irq is checked to see if it can be routed and the - * interrupt controller updated to route the IRQ. If @on is false, the FIQ - * routing is cleared, regardless of which @irq is specified. - */ -int s3c24xx_set_fiq(unsigned int irq, bool on) -{ - u32 intmod; - unsigned offs; - - if (on) { - offs = irq - FIQ_START; - if (offs > 31) - return -EINVAL; - - intmod = 1 << offs; - } else { - intmod = 0; - } - - writel_relaxed(intmod, S3C2410_INTMOD); - return 0; -} - -EXPORT_SYMBOL_GPL(s3c24xx_set_fiq); -#endif - -static int s3c24xx_irq_map(struct irq_domain *h, unsigned int virq, - irq_hw_number_t hw) -{ - struct s3c_irq_intc *intc = h->host_data; - struct s3c_irq_data *irq_data = &intc->irqs[hw]; - struct s3c_irq_intc *parent_intc; - struct s3c_irq_data *parent_irq_data; - unsigned int irqno; - - /* attach controller pointer to irq_data */ - irq_data->intc = intc; - irq_data->offset = hw; - - parent_intc = intc->parent; - - /* set handler and flags */ - switch (irq_data->type) { - case S3C_IRQTYPE_NONE: - return 0; - case S3C_IRQTYPE_EINT: - /* On the S3C2412, the EINT0to3 have a parent irq - * but need the s3c_irq_eint0t4 chip - */ - if (parent_intc && (!soc_is_s3c2412() || hw >= 4)) - irq_set_chip_and_handler(virq, &s3c_irqext_chip, - handle_edge_irq); - else - irq_set_chip_and_handler(virq, &s3c_irq_eint0t4, - handle_edge_irq); - break; - case S3C_IRQTYPE_EDGE: - if (parent_intc || intc->reg_pending == S3C2416_SRCPND2) - irq_set_chip_and_handler(virq, &s3c_irq_level_chip, - handle_edge_irq); - else - irq_set_chip_and_handler(virq, &s3c_irq_chip, - handle_edge_irq); - break; - case S3C_IRQTYPE_LEVEL: - if (parent_intc) - irq_set_chip_and_handler(virq, &s3c_irq_level_chip, - handle_level_irq); - else - irq_set_chip_and_handler(virq, &s3c_irq_chip, - handle_level_irq); - break; - default: - pr_err("irq-s3c24xx: unsupported irqtype %d\n", irq_data->type); - return -EINVAL; - } - - irq_set_chip_data(virq, irq_data); - - if (parent_intc && irq_data->type != S3C_IRQTYPE_NONE) { - if (irq_data->parent_irq > 31) { - pr_err("irq-s3c24xx: parent irq %lu is out of range\n", - irq_data->parent_irq); - return -EINVAL; - } - - parent_irq_data = &parent_intc->irqs[irq_data->parent_irq]; - parent_irq_data->sub_intc = intc; - parent_irq_data->sub_bits |= (1UL << hw); - - /* attach the demuxer to the parent irq */ - irqno = irq_find_mapping(parent_intc->domain, - irq_data->parent_irq); - if (!irqno) { - pr_err("irq-s3c24xx: could not find mapping for parent irq %lu\n", - irq_data->parent_irq); - return -EINVAL; - } - irq_set_chained_handler(irqno, s3c_irq_demux); - } - - return 0; -} - -static const struct irq_domain_ops s3c24xx_irq_ops = { - .map = s3c24xx_irq_map, - .xlate = irq_domain_xlate_twocell, -}; - -static void s3c24xx_clear_intc(struct s3c_irq_intc *intc) -{ - void __iomem *reg_source; - unsigned long pend; - unsigned long last; - int i; - - /* if intpnd is set, read the next pending irq from there */ - reg_source = intc->reg_intpnd ? intc->reg_intpnd : intc->reg_pending; - - last = 0; - for (i = 0; i < 4; i++) { - pend = readl_relaxed(reg_source); - - if (pend == 0 || pend == last) - break; - - writel_relaxed(pend, intc->reg_pending); - if (intc->reg_intpnd) - writel_relaxed(pend, intc->reg_intpnd); - - pr_info("irq: clearing pending status %08x\n", (int)pend); - last = pend; - } -} - -static struct s3c_irq_intc * __init s3c24xx_init_intc(struct device_node *np, - struct s3c_irq_data *irq_data, - struct s3c_irq_intc *parent, - unsigned long address) -{ - struct s3c_irq_intc *intc; - void __iomem *base = (void *)0xf6000000; /* static mapping */ - int irq_num; - int irq_start; - int ret; - - intc = kzalloc(sizeof(struct s3c_irq_intc), GFP_KERNEL); - if (!intc) - return ERR_PTR(-ENOMEM); - - intc->irqs = irq_data; - - if (parent) - intc->parent = parent; - - /* select the correct data for the controller. - * Need to hard code the irq num start and offset - * to preserve the static mapping for now - */ - switch (address) { - case 0x4a000000: - pr_debug("irq: found main intc\n"); - intc->reg_pending = base; - intc->reg_mask = base + 0x08; - intc->reg_intpnd = base + 0x10; - irq_num = 32; - irq_start = S3C2410_IRQ(0); - break; - case 0x4a000018: - pr_debug("irq: found subintc\n"); - intc->reg_pending = base + 0x18; - intc->reg_mask = base + 0x1c; - irq_num = 29; - irq_start = S3C2410_IRQSUB(0); - break; - case 0x4a000040: - pr_debug("irq: found intc2\n"); - intc->reg_pending = base + 0x40; - intc->reg_mask = base + 0x48; - intc->reg_intpnd = base + 0x50; - irq_num = 8; - irq_start = S3C2416_IRQ(0); - break; - case 0x560000a4: - pr_debug("irq: found eintc\n"); - base = (void *)0xfd000000; - - intc->reg_mask = base + 0xa4; - intc->reg_pending = base + 0xa8; - irq_num = 24; - irq_start = S3C2410_IRQ(32); - break; - default: - pr_err("irq: unsupported controller address\n"); - ret = -EINVAL; - goto err; - } - - /* now that all the data is complete, init the irq-domain */ - s3c24xx_clear_intc(intc); - intc->domain = irq_domain_add_legacy(np, irq_num, irq_start, - 0, &s3c24xx_irq_ops, - intc); - if (!intc->domain) { - pr_err("irq: could not create irq-domain\n"); - ret = -EINVAL; - goto err; - } - - set_handle_irq(s3c24xx_handle_irq); - - return intc; - -err: - kfree(intc); - return ERR_PTR(ret); -} - -static struct s3c_irq_data __maybe_unused init_eint[32] = { - { .type = S3C_IRQTYPE_NONE, }, /* reserved */ - { .type = S3C_IRQTYPE_NONE, }, /* reserved */ - { .type = S3C_IRQTYPE_NONE, }, /* reserved */ - { .type = S3C_IRQTYPE_NONE, }, /* reserved */ - { .type = S3C_IRQTYPE_EINT, .parent_irq = 4 }, /* EINT4 */ - { .type = S3C_IRQTYPE_EINT, .parent_irq = 4 }, /* EINT5 */ - { .type = S3C_IRQTYPE_EINT, .parent_irq = 4 }, /* EINT6 */ - { .type = S3C_IRQTYPE_EINT, .parent_irq = 4 }, /* EINT7 */ - { .type = S3C_IRQTYPE_EINT, .parent_irq = 5 }, /* EINT8 */ - { .type = S3C_IRQTYPE_EINT, .parent_irq = 5 }, /* EINT9 */ - { .type = S3C_IRQTYPE_EINT, .parent_irq = 5 }, /* EINT10 */ - { .type = S3C_IRQTYPE_EINT, .parent_irq = 5 }, /* EINT11 */ - { .type = S3C_IRQTYPE_EINT, .parent_irq = 5 }, /* EINT12 */ - { .type = S3C_IRQTYPE_EINT, .parent_irq = 5 }, /* EINT13 */ - { .type = S3C_IRQTYPE_EINT, .parent_irq = 5 }, /* EINT14 */ - { .type = S3C_IRQTYPE_EINT, .parent_irq = 5 }, /* EINT15 */ - { .type = S3C_IRQTYPE_EINT, .parent_irq = 5 }, /* EINT16 */ - { .type = S3C_IRQTYPE_EINT, .parent_irq = 5 }, /* EINT17 */ - { .type = S3C_IRQTYPE_EINT, .parent_irq = 5 }, /* EINT18 */ - { .type = S3C_IRQTYPE_EINT, .parent_irq = 5 }, /* EINT19 */ - { .type = S3C_IRQTYPE_EINT, .parent_irq = 5 }, /* EINT20 */ - { .type = S3C_IRQTYPE_EINT, .parent_irq = 5 }, /* EINT21 */ - { .type = S3C_IRQTYPE_EINT, .parent_irq = 5 }, /* EINT22 */ - { .type = S3C_IRQTYPE_EINT, .parent_irq = 5 }, /* EINT23 */ -}; - -#ifdef CONFIG_CPU_S3C2410 -static struct s3c_irq_data init_s3c2410base[32] = { - { .type = S3C_IRQTYPE_EINT, }, /* EINT0 */ - { .type = S3C_IRQTYPE_EINT, }, /* EINT1 */ - { .type = S3C_IRQTYPE_EINT, }, /* EINT2 */ - { .type = S3C_IRQTYPE_EINT, }, /* EINT3 */ - { .type = S3C_IRQTYPE_LEVEL, }, /* EINT4to7 */ - { .type = S3C_IRQTYPE_LEVEL, }, /* EINT8to23 */ - { .type = S3C_IRQTYPE_NONE, }, /* reserved */ - { .type = S3C_IRQTYPE_EDGE, }, /* nBATT_FLT */ - { .type = S3C_IRQTYPE_EDGE, }, /* TICK */ - { .type = S3C_IRQTYPE_EDGE, }, /* WDT */ - { .type = S3C_IRQTYPE_EDGE, }, /* TIMER0 */ - { .type = S3C_IRQTYPE_EDGE, }, /* TIMER1 */ - { .type = S3C_IRQTYPE_EDGE, }, /* TIMER2 */ - { .type = S3C_IRQTYPE_EDGE, }, /* TIMER3 */ - { .type = S3C_IRQTYPE_EDGE, }, /* TIMER4 */ - { .type = S3C_IRQTYPE_LEVEL, }, /* UART2 */ - { .type = S3C_IRQTYPE_EDGE, }, /* LCD */ - { .type = S3C_IRQTYPE_EDGE, }, /* DMA0 */ - { .type = S3C_IRQTYPE_EDGE, }, /* DMA1 */ - { .type = S3C_IRQTYPE_EDGE, }, /* DMA2 */ - { .type = S3C_IRQTYPE_EDGE, }, /* DMA3 */ - { .type = S3C_IRQTYPE_EDGE, }, /* SDI */ - { .type = S3C_IRQTYPE_EDGE, }, /* SPI0 */ - { .type = S3C_IRQTYPE_LEVEL, }, /* UART1 */ - { .type = S3C_IRQTYPE_NONE, }, /* reserved */ - { .type = S3C_IRQTYPE_EDGE, }, /* USBD */ - { .type = S3C_IRQTYPE_EDGE, }, /* USBH */ - { .type = S3C_IRQTYPE_EDGE, }, /* IIC */ - { .type = S3C_IRQTYPE_LEVEL, }, /* UART0 */ - { .type = S3C_IRQTYPE_EDGE, }, /* SPI1 */ - { .type = S3C_IRQTYPE_EDGE, }, /* RTC */ - { .type = S3C_IRQTYPE_LEVEL, }, /* ADCPARENT */ -}; - -static struct s3c_irq_data init_s3c2410subint[32] = { - { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 28 }, /* UART0-RX */ - { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 28 }, /* UART0-TX */ - { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 28 }, /* UART0-ERR */ - { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 23 }, /* UART1-RX */ - { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 23 }, /* UART1-TX */ - { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 23 }, /* UART1-ERR */ - { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 15 }, /* UART2-RX */ - { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 15 }, /* UART2-TX */ - { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 15 }, /* UART2-ERR */ - { .type = S3C_IRQTYPE_EDGE, .parent_irq = 31 }, /* TC */ - { .type = S3C_IRQTYPE_EDGE, .parent_irq = 31 }, /* ADC */ -}; - -void __init s3c2410_init_irq(void) -{ -#ifdef CONFIG_FIQ - init_FIQ(FIQ_START); -#endif - - s3c_intc[0] = s3c24xx_init_intc(NULL, &init_s3c2410base[0], NULL, - 0x4a000000); - if (IS_ERR(s3c_intc[0])) { - pr_err("irq: could not create main interrupt controller\n"); - return; - } - - s3c_intc[1] = s3c24xx_init_intc(NULL, &init_s3c2410subint[0], - s3c_intc[0], 0x4a000018); - s3c24xx_init_intc(NULL, &init_eint[0], s3c_intc[0], 0x560000a4); -} -#endif - -#ifdef CONFIG_CPU_S3C2412 -static struct s3c_irq_data init_s3c2412base[32] = { - { .type = S3C_IRQTYPE_LEVEL, }, /* EINT0 */ - { .type = S3C_IRQTYPE_LEVEL, }, /* EINT1 */ - { .type = S3C_IRQTYPE_LEVEL, }, /* EINT2 */ - { .type = S3C_IRQTYPE_LEVEL, }, /* EINT3 */ - { .type = S3C_IRQTYPE_LEVEL, }, /* EINT4to7 */ - { .type = S3C_IRQTYPE_LEVEL, }, /* EINT8to23 */ - { .type = S3C_IRQTYPE_NONE, }, /* reserved */ - { .type = S3C_IRQTYPE_EDGE, }, /* nBATT_FLT */ - { .type = S3C_IRQTYPE_EDGE, }, /* TICK */ - { .type = S3C_IRQTYPE_EDGE, }, /* WDT */ - { .type = S3C_IRQTYPE_EDGE, }, /* TIMER0 */ - { .type = S3C_IRQTYPE_EDGE, }, /* TIMER1 */ - { .type = S3C_IRQTYPE_EDGE, }, /* TIMER2 */ - { .type = S3C_IRQTYPE_EDGE, }, /* TIMER3 */ - { .type = S3C_IRQTYPE_EDGE, }, /* TIMER4 */ - { .type = S3C_IRQTYPE_LEVEL, }, /* UART2 */ - { .type = S3C_IRQTYPE_EDGE, }, /* LCD */ - { .type = S3C_IRQTYPE_EDGE, }, /* DMA0 */ - { .type = S3C_IRQTYPE_EDGE, }, /* DMA1 */ - { .type = S3C_IRQTYPE_EDGE, }, /* DMA2 */ - { .type = S3C_IRQTYPE_EDGE, }, /* DMA3 */ - { .type = S3C_IRQTYPE_LEVEL, }, /* SDI/CF */ - { .type = S3C_IRQTYPE_EDGE, }, /* SPI0 */ - { .type = S3C_IRQTYPE_LEVEL, }, /* UART1 */ - { .type = S3C_IRQTYPE_NONE, }, /* reserved */ - { .type = S3C_IRQTYPE_EDGE, }, /* USBD */ - { .type = S3C_IRQTYPE_EDGE, }, /* USBH */ - { .type = S3C_IRQTYPE_EDGE, }, /* IIC */ - { .type = S3C_IRQTYPE_LEVEL, }, /* UART0 */ - { .type = S3C_IRQTYPE_EDGE, }, /* SPI1 */ - { .type = S3C_IRQTYPE_EDGE, }, /* RTC */ - { .type = S3C_IRQTYPE_LEVEL, }, /* ADCPARENT */ -}; - -static struct s3c_irq_data init_s3c2412eint[32] = { - { .type = S3C_IRQTYPE_EINT, .parent_irq = 0 }, /* EINT0 */ - { .type = S3C_IRQTYPE_EINT, .parent_irq = 1 }, /* EINT1 */ - { .type = S3C_IRQTYPE_EINT, .parent_irq = 2 }, /* EINT2 */ - { .type = S3C_IRQTYPE_EINT, .parent_irq = 3 }, /* EINT3 */ - { .type = S3C_IRQTYPE_EINT, .parent_irq = 4 }, /* EINT4 */ - { .type = S3C_IRQTYPE_EINT, .parent_irq = 4 }, /* EINT5 */ - { .type = S3C_IRQTYPE_EINT, .parent_irq = 4 }, /* EINT6 */ - { .type = S3C_IRQTYPE_EINT, .parent_irq = 4 }, /* EINT7 */ - { .type = S3C_IRQTYPE_EINT, .parent_irq = 5 }, /* EINT8 */ - { .type = S3C_IRQTYPE_EINT, .parent_irq = 5 }, /* EINT9 */ - { .type = S3C_IRQTYPE_EINT, .parent_irq = 5 }, /* EINT10 */ - { .type = S3C_IRQTYPE_EINT, .parent_irq = 5 }, /* EINT11 */ - { .type = S3C_IRQTYPE_EINT, .parent_irq = 5 }, /* EINT12 */ - { .type = S3C_IRQTYPE_EINT, .parent_irq = 5 }, /* EINT13 */ - { .type = S3C_IRQTYPE_EINT, .parent_irq = 5 }, /* EINT14 */ - { .type = S3C_IRQTYPE_EINT, .parent_irq = 5 }, /* EINT15 */ - { .type = S3C_IRQTYPE_EINT, .parent_irq = 5 }, /* EINT16 */ - { .type = S3C_IRQTYPE_EINT, .parent_irq = 5 }, /* EINT17 */ - { .type = S3C_IRQTYPE_EINT, .parent_irq = 5 }, /* EINT18 */ - { .type = S3C_IRQTYPE_EINT, .parent_irq = 5 }, /* EINT19 */ - { .type = S3C_IRQTYPE_EINT, .parent_irq = 5 }, /* EINT20 */ - { .type = S3C_IRQTYPE_EINT, .parent_irq = 5 }, /* EINT21 */ - { .type = S3C_IRQTYPE_EINT, .parent_irq = 5 }, /* EINT22 */ - { .type = S3C_IRQTYPE_EINT, .parent_irq = 5 }, /* EINT23 */ -}; - -static struct s3c_irq_data init_s3c2412subint[32] = { - { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 28 }, /* UART0-RX */ - { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 28 }, /* UART0-TX */ - { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 28 }, /* UART0-ERR */ - { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 23 }, /* UART1-RX */ - { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 23 }, /* UART1-TX */ - { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 23 }, /* UART1-ERR */ - { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 15 }, /* UART2-RX */ - { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 15 }, /* UART2-TX */ - { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 15 }, /* UART2-ERR */ - { .type = S3C_IRQTYPE_EDGE, .parent_irq = 31 }, /* TC */ - { .type = S3C_IRQTYPE_EDGE, .parent_irq = 31 }, /* ADC */ - { .type = S3C_IRQTYPE_NONE, }, - { .type = S3C_IRQTYPE_NONE, }, - { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 21 }, /* SDI */ - { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 21 }, /* CF */ -}; - -void __init s3c2412_init_irq(void) -{ - pr_info("S3C2412: IRQ Support\n"); - -#ifdef CONFIG_FIQ - init_FIQ(FIQ_START); -#endif - - s3c_intc[0] = s3c24xx_init_intc(NULL, &init_s3c2412base[0], NULL, - 0x4a000000); - if (IS_ERR(s3c_intc[0])) { - pr_err("irq: could not create main interrupt controller\n"); - return; - } - - s3c24xx_init_intc(NULL, &init_s3c2412eint[0], s3c_intc[0], 0x560000a4); - s3c_intc[1] = s3c24xx_init_intc(NULL, &init_s3c2412subint[0], - s3c_intc[0], 0x4a000018); -} -#endif - -#ifdef CONFIG_CPU_S3C2416 -static struct s3c_irq_data init_s3c2416base[32] = { - { .type = S3C_IRQTYPE_EINT, }, /* EINT0 */ - { .type = S3C_IRQTYPE_EINT, }, /* EINT1 */ - { .type = S3C_IRQTYPE_EINT, }, /* EINT2 */ - { .type = S3C_IRQTYPE_EINT, }, /* EINT3 */ - { .type = S3C_IRQTYPE_LEVEL, }, /* EINT4to7 */ - { .type = S3C_IRQTYPE_LEVEL, }, /* EINT8to23 */ - { .type = S3C_IRQTYPE_NONE, }, /* reserved */ - { .type = S3C_IRQTYPE_EDGE, }, /* nBATT_FLT */ - { .type = S3C_IRQTYPE_EDGE, }, /* TICK */ - { .type = S3C_IRQTYPE_LEVEL, }, /* WDT/AC97 */ - { .type = S3C_IRQTYPE_EDGE, }, /* TIMER0 */ - { .type = S3C_IRQTYPE_EDGE, }, /* TIMER1 */ - { .type = S3C_IRQTYPE_EDGE, }, /* TIMER2 */ - { .type = S3C_IRQTYPE_EDGE, }, /* TIMER3 */ - { .type = S3C_IRQTYPE_EDGE, }, /* TIMER4 */ - { .type = S3C_IRQTYPE_LEVEL, }, /* UART2 */ - { .type = S3C_IRQTYPE_LEVEL, }, /* LCD */ - { .type = S3C_IRQTYPE_LEVEL, }, /* DMA */ - { .type = S3C_IRQTYPE_LEVEL, }, /* UART3 */ - { .type = S3C_IRQTYPE_NONE, }, /* reserved */ - { .type = S3C_IRQTYPE_EDGE, }, /* SDI1 */ - { .type = S3C_IRQTYPE_EDGE, }, /* SDI0 */ - { .type = S3C_IRQTYPE_EDGE, }, /* SPI0 */ - { .type = S3C_IRQTYPE_LEVEL, }, /* UART1 */ - { .type = S3C_IRQTYPE_EDGE, }, /* NAND */ - { .type = S3C_IRQTYPE_EDGE, }, /* USBD */ - { .type = S3C_IRQTYPE_EDGE, }, /* USBH */ - { .type = S3C_IRQTYPE_EDGE, }, /* IIC */ - { .type = S3C_IRQTYPE_LEVEL, }, /* UART0 */ - { .type = S3C_IRQTYPE_NONE, }, - { .type = S3C_IRQTYPE_EDGE, }, /* RTC */ - { .type = S3C_IRQTYPE_LEVEL, }, /* ADCPARENT */ -}; - -static struct s3c_irq_data init_s3c2416subint[32] = { - { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 28 }, /* UART0-RX */ - { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 28 }, /* UART0-TX */ - { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 28 }, /* UART0-ERR */ - { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 23 }, /* UART1-RX */ - { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 23 }, /* UART1-TX */ - { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 23 }, /* UART1-ERR */ - { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 15 }, /* UART2-RX */ - { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 15 }, /* UART2-TX */ - { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 15 }, /* UART2-ERR */ - { .type = S3C_IRQTYPE_EDGE, .parent_irq = 31 }, /* TC */ - { .type = S3C_IRQTYPE_EDGE, .parent_irq = 31 }, /* ADC */ - { .type = S3C_IRQTYPE_NONE }, /* reserved */ - { .type = S3C_IRQTYPE_NONE }, /* reserved */ - { .type = S3C_IRQTYPE_NONE }, /* reserved */ - { .type = S3C_IRQTYPE_NONE }, /* reserved */ - { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 16 }, /* LCD2 */ - { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 16 }, /* LCD3 */ - { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 16 }, /* LCD4 */ - { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 17 }, /* DMA0 */ - { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 17 }, /* DMA1 */ - { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 17 }, /* DMA2 */ - { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 17 }, /* DMA3 */ - { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 17 }, /* DMA4 */ - { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 17 }, /* DMA5 */ - { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 18 }, /* UART3-RX */ - { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 18 }, /* UART3-TX */ - { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 18 }, /* UART3-ERR */ - { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 9 }, /* WDT */ - { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 9 }, /* AC97 */ -}; - -static struct s3c_irq_data init_s3c2416_second[32] = { - { .type = S3C_IRQTYPE_EDGE }, /* 2D */ - { .type = S3C_IRQTYPE_NONE }, /* reserved */ - { .type = S3C_IRQTYPE_NONE }, /* reserved */ - { .type = S3C_IRQTYPE_NONE }, /* reserved */ - { .type = S3C_IRQTYPE_EDGE }, /* PCM0 */ - { .type = S3C_IRQTYPE_NONE }, /* reserved */ - { .type = S3C_IRQTYPE_EDGE }, /* I2S0 */ -}; - -void __init s3c2416_init_irq(void) -{ - pr_info("S3C2416: IRQ Support\n"); - -#ifdef CONFIG_FIQ - init_FIQ(FIQ_START); -#endif - - s3c_intc[0] = s3c24xx_init_intc(NULL, &init_s3c2416base[0], NULL, - 0x4a000000); - if (IS_ERR(s3c_intc[0])) { - pr_err("irq: could not create main interrupt controller\n"); - return; - } - - s3c24xx_init_intc(NULL, &init_eint[0], s3c_intc[0], 0x560000a4); - s3c_intc[1] = s3c24xx_init_intc(NULL, &init_s3c2416subint[0], - s3c_intc[0], 0x4a000018); - - s3c_intc[2] = s3c24xx_init_intc(NULL, &init_s3c2416_second[0], - NULL, 0x4a000040); -} - -#endif - -#ifdef CONFIG_CPU_S3C2440 -static struct s3c_irq_data init_s3c2440base[32] = { - { .type = S3C_IRQTYPE_EINT, }, /* EINT0 */ - { .type = S3C_IRQTYPE_EINT, }, /* EINT1 */ - { .type = S3C_IRQTYPE_EINT, }, /* EINT2 */ - { .type = S3C_IRQTYPE_EINT, }, /* EINT3 */ - { .type = S3C_IRQTYPE_LEVEL, }, /* EINT4to7 */ - { .type = S3C_IRQTYPE_LEVEL, }, /* EINT8to23 */ - { .type = S3C_IRQTYPE_LEVEL, }, /* CAM */ - { .type = S3C_IRQTYPE_EDGE, }, /* nBATT_FLT */ - { .type = S3C_IRQTYPE_EDGE, }, /* TICK */ - { .type = S3C_IRQTYPE_LEVEL, }, /* WDT/AC97 */ - { .type = S3C_IRQTYPE_EDGE, }, /* TIMER0 */ - { .type = S3C_IRQTYPE_EDGE, }, /* TIMER1 */ - { .type = S3C_IRQTYPE_EDGE, }, /* TIMER2 */ - { .type = S3C_IRQTYPE_EDGE, }, /* TIMER3 */ - { .type = S3C_IRQTYPE_EDGE, }, /* TIMER4 */ - { .type = S3C_IRQTYPE_LEVEL, }, /* UART2 */ - { .type = S3C_IRQTYPE_EDGE, }, /* LCD */ - { .type = S3C_IRQTYPE_EDGE, }, /* DMA0 */ - { .type = S3C_IRQTYPE_EDGE, }, /* DMA1 */ - { .type = S3C_IRQTYPE_EDGE, }, /* DMA2 */ - { .type = S3C_IRQTYPE_EDGE, }, /* DMA3 */ - { .type = S3C_IRQTYPE_EDGE, }, /* SDI */ - { .type = S3C_IRQTYPE_EDGE, }, /* SPI0 */ - { .type = S3C_IRQTYPE_LEVEL, }, /* UART1 */ - { .type = S3C_IRQTYPE_LEVEL, }, /* NFCON */ - { .type = S3C_IRQTYPE_EDGE, }, /* USBD */ - { .type = S3C_IRQTYPE_EDGE, }, /* USBH */ - { .type = S3C_IRQTYPE_EDGE, }, /* IIC */ - { .type = S3C_IRQTYPE_LEVEL, }, /* UART0 */ - { .type = S3C_IRQTYPE_EDGE, }, /* SPI1 */ - { .type = S3C_IRQTYPE_EDGE, }, /* RTC */ - { .type = S3C_IRQTYPE_LEVEL, }, /* ADCPARENT */ -}; - -static struct s3c_irq_data init_s3c2440subint[32] = { - { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 28 }, /* UART0-RX */ - { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 28 }, /* UART0-TX */ - { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 28 }, /* UART0-ERR */ - { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 23 }, /* UART1-RX */ - { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 23 }, /* UART1-TX */ - { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 23 }, /* UART1-ERR */ - { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 15 }, /* UART2-RX */ - { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 15 }, /* UART2-TX */ - { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 15 }, /* UART2-ERR */ - { .type = S3C_IRQTYPE_EDGE, .parent_irq = 31 }, /* TC */ - { .type = S3C_IRQTYPE_EDGE, .parent_irq = 31 }, /* ADC */ - { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 6 }, /* CAM_C */ - { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 6 }, /* CAM_P */ - { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 9 }, /* WDT */ - { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 9 }, /* AC97 */ -}; - -void __init s3c2440_init_irq(void) -{ - pr_info("S3C2440: IRQ Support\n"); - -#ifdef CONFIG_FIQ - init_FIQ(FIQ_START); -#endif - - s3c_intc[0] = s3c24xx_init_intc(NULL, &init_s3c2440base[0], NULL, - 0x4a000000); - if (IS_ERR(s3c_intc[0])) { - pr_err("irq: could not create main interrupt controller\n"); - return; - } - - s3c24xx_init_intc(NULL, &init_eint[0], s3c_intc[0], 0x560000a4); - s3c_intc[1] = s3c24xx_init_intc(NULL, &init_s3c2440subint[0], - s3c_intc[0], 0x4a000018); -} -#endif - -#ifdef CONFIG_CPU_S3C2442 -static struct s3c_irq_data init_s3c2442base[32] = { - { .type = S3C_IRQTYPE_EINT, }, /* EINT0 */ - { .type = S3C_IRQTYPE_EINT, }, /* EINT1 */ - { .type = S3C_IRQTYPE_EINT, }, /* EINT2 */ - { .type = S3C_IRQTYPE_EINT, }, /* EINT3 */ - { .type = S3C_IRQTYPE_LEVEL, }, /* EINT4to7 */ - { .type = S3C_IRQTYPE_LEVEL, }, /* EINT8to23 */ - { .type = S3C_IRQTYPE_LEVEL, }, /* CAM */ - { .type = S3C_IRQTYPE_EDGE, }, /* nBATT_FLT */ - { .type = S3C_IRQTYPE_EDGE, }, /* TICK */ - { .type = S3C_IRQTYPE_EDGE, }, /* WDT */ - { .type = S3C_IRQTYPE_EDGE, }, /* TIMER0 */ - { .type = S3C_IRQTYPE_EDGE, }, /* TIMER1 */ - { .type = S3C_IRQTYPE_EDGE, }, /* TIMER2 */ - { .type = S3C_IRQTYPE_EDGE, }, /* TIMER3 */ - { .type = S3C_IRQTYPE_EDGE, }, /* TIMER4 */ - { .type = S3C_IRQTYPE_LEVEL, }, /* UART2 */ - { .type = S3C_IRQTYPE_EDGE, }, /* LCD */ - { .type = S3C_IRQTYPE_EDGE, }, /* DMA0 */ - { .type = S3C_IRQTYPE_EDGE, }, /* DMA1 */ - { .type = S3C_IRQTYPE_EDGE, }, /* DMA2 */ - { .type = S3C_IRQTYPE_EDGE, }, /* DMA3 */ - { .type = S3C_IRQTYPE_EDGE, }, /* SDI */ - { .type = S3C_IRQTYPE_EDGE, }, /* SPI0 */ - { .type = S3C_IRQTYPE_LEVEL, }, /* UART1 */ - { .type = S3C_IRQTYPE_LEVEL, }, /* NFCON */ - { .type = S3C_IRQTYPE_EDGE, }, /* USBD */ - { .type = S3C_IRQTYPE_EDGE, }, /* USBH */ - { .type = S3C_IRQTYPE_EDGE, }, /* IIC */ - { .type = S3C_IRQTYPE_LEVEL, }, /* UART0 */ - { .type = S3C_IRQTYPE_EDGE, }, /* SPI1 */ - { .type = S3C_IRQTYPE_EDGE, }, /* RTC */ - { .type = S3C_IRQTYPE_LEVEL, }, /* ADCPARENT */ -}; - -static struct s3c_irq_data init_s3c2442subint[32] = { - { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 28 }, /* UART0-RX */ - { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 28 }, /* UART0-TX */ - { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 28 }, /* UART0-ERR */ - { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 23 }, /* UART1-RX */ - { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 23 }, /* UART1-TX */ - { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 23 }, /* UART1-ERR */ - { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 15 }, /* UART2-RX */ - { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 15 }, /* UART2-TX */ - { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 15 }, /* UART2-ERR */ - { .type = S3C_IRQTYPE_EDGE, .parent_irq = 31 }, /* TC */ - { .type = S3C_IRQTYPE_EDGE, .parent_irq = 31 }, /* ADC */ - { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 6 }, /* CAM_C */ - { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 6 }, /* CAM_P */ -}; - -void __init s3c2442_init_irq(void) -{ - pr_info("S3C2442: IRQ Support\n"); - -#ifdef CONFIG_FIQ - init_FIQ(FIQ_START); -#endif - - s3c_intc[0] = s3c24xx_init_intc(NULL, &init_s3c2442base[0], NULL, - 0x4a000000); - if (IS_ERR(s3c_intc[0])) { - pr_err("irq: could not create main interrupt controller\n"); - return; - } - - s3c24xx_init_intc(NULL, &init_eint[0], s3c_intc[0], 0x560000a4); - s3c_intc[1] = s3c24xx_init_intc(NULL, &init_s3c2442subint[0], - s3c_intc[0], 0x4a000018); -} -#endif - -#ifdef CONFIG_CPU_S3C2443 -static struct s3c_irq_data init_s3c2443base[32] = { - { .type = S3C_IRQTYPE_EINT, }, /* EINT0 */ - { .type = S3C_IRQTYPE_EINT, }, /* EINT1 */ - { .type = S3C_IRQTYPE_EINT, }, /* EINT2 */ - { .type = S3C_IRQTYPE_EINT, }, /* EINT3 */ - { .type = S3C_IRQTYPE_LEVEL, }, /* EINT4to7 */ - { .type = S3C_IRQTYPE_LEVEL, }, /* EINT8to23 */ - { .type = S3C_IRQTYPE_LEVEL, }, /* CAM */ - { .type = S3C_IRQTYPE_EDGE, }, /* nBATT_FLT */ - { .type = S3C_IRQTYPE_EDGE, }, /* TICK */ - { .type = S3C_IRQTYPE_LEVEL, }, /* WDT/AC97 */ - { .type = S3C_IRQTYPE_EDGE, }, /* TIMER0 */ - { .type = S3C_IRQTYPE_EDGE, }, /* TIMER1 */ - { .type = S3C_IRQTYPE_EDGE, }, /* TIMER2 */ - { .type = S3C_IRQTYPE_EDGE, }, /* TIMER3 */ - { .type = S3C_IRQTYPE_EDGE, }, /* TIMER4 */ - { .type = S3C_IRQTYPE_LEVEL, }, /* UART2 */ - { .type = S3C_IRQTYPE_LEVEL, }, /* LCD */ - { .type = S3C_IRQTYPE_LEVEL, }, /* DMA */ - { .type = S3C_IRQTYPE_LEVEL, }, /* UART3 */ - { .type = S3C_IRQTYPE_EDGE, }, /* CFON */ - { .type = S3C_IRQTYPE_EDGE, }, /* SDI1 */ - { .type = S3C_IRQTYPE_EDGE, }, /* SDI0 */ - { .type = S3C_IRQTYPE_EDGE, }, /* SPI0 */ - { .type = S3C_IRQTYPE_LEVEL, }, /* UART1 */ - { .type = S3C_IRQTYPE_EDGE, }, /* NAND */ - { .type = S3C_IRQTYPE_EDGE, }, /* USBD */ - { .type = S3C_IRQTYPE_EDGE, }, /* USBH */ - { .type = S3C_IRQTYPE_EDGE, }, /* IIC */ - { .type = S3C_IRQTYPE_LEVEL, }, /* UART0 */ - { .type = S3C_IRQTYPE_EDGE, }, /* SPI1 */ - { .type = S3C_IRQTYPE_EDGE, }, /* RTC */ - { .type = S3C_IRQTYPE_LEVEL, }, /* ADCPARENT */ -}; - - -static struct s3c_irq_data init_s3c2443subint[32] = { - { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 28 }, /* UART0-RX */ - { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 28 }, /* UART0-TX */ - { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 28 }, /* UART0-ERR */ - { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 23 }, /* UART1-RX */ - { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 23 }, /* UART1-TX */ - { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 23 }, /* UART1-ERR */ - { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 15 }, /* UART2-RX */ - { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 15 }, /* UART2-TX */ - { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 15 }, /* UART2-ERR */ - { .type = S3C_IRQTYPE_EDGE, .parent_irq = 31 }, /* TC */ - { .type = S3C_IRQTYPE_EDGE, .parent_irq = 31 }, /* ADC */ - { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 6 }, /* CAM_C */ - { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 6 }, /* CAM_P */ - { .type = S3C_IRQTYPE_NONE }, /* reserved */ - { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 16 }, /* LCD1 */ - { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 16 }, /* LCD2 */ - { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 16 }, /* LCD3 */ - { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 16 }, /* LCD4 */ - { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 17 }, /* DMA0 */ - { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 17 }, /* DMA1 */ - { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 17 }, /* DMA2 */ - { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 17 }, /* DMA3 */ - { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 17 }, /* DMA4 */ - { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 17 }, /* DMA5 */ - { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 18 }, /* UART3-RX */ - { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 18 }, /* UART3-TX */ - { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 18 }, /* UART3-ERR */ - { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 9 }, /* WDT */ - { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 9 }, /* AC97 */ -}; - -void __init s3c2443_init_irq(void) -{ - pr_info("S3C2443: IRQ Support\n"); - -#ifdef CONFIG_FIQ - init_FIQ(FIQ_START); -#endif - - s3c_intc[0] = s3c24xx_init_intc(NULL, &init_s3c2443base[0], NULL, - 0x4a000000); - if (IS_ERR(s3c_intc[0])) { - pr_err("irq: could not create main interrupt controller\n"); - return; - } - - s3c24xx_init_intc(NULL, &init_eint[0], s3c_intc[0], 0x560000a4); - s3c_intc[1] = s3c24xx_init_intc(NULL, &init_s3c2443subint[0], - s3c_intc[0], 0x4a000018); -} -#endif - -#ifdef CONFIG_OF -static int s3c24xx_irq_map_of(struct irq_domain *h, unsigned int virq, - irq_hw_number_t hw) -{ - unsigned int ctrl_num = hw / 32; - unsigned int intc_hw = hw % 32; - struct s3c_irq_intc *intc = s3c_intc[ctrl_num]; - struct s3c_irq_intc *parent_intc = intc->parent; - struct s3c_irq_data *irq_data = &intc->irqs[intc_hw]; - - /* attach controller pointer to irq_data */ - irq_data->intc = intc; - irq_data->offset = intc_hw; - - if (!parent_intc) - irq_set_chip_and_handler(virq, &s3c_irq_chip, handle_edge_irq); - else - irq_set_chip_and_handler(virq, &s3c_irq_level_chip, - handle_edge_irq); - - irq_set_chip_data(virq, irq_data); - - return 0; -} - -/* Translate our of irq notation - * format: <ctrl_num ctrl_irq parent_irq type> - */ -static int s3c24xx_irq_xlate_of(struct irq_domain *d, struct device_node *n, - const u32 *intspec, unsigned int intsize, - irq_hw_number_t *out_hwirq, unsigned int *out_type) -{ - struct s3c_irq_intc *intc; - struct s3c_irq_intc *parent_intc; - struct s3c_irq_data *irq_data; - struct s3c_irq_data *parent_irq_data; - int irqno; - - if (WARN_ON(intsize < 4)) - return -EINVAL; - - if (intspec[0] > 2 || !s3c_intc[intspec[0]]) { - pr_err("controller number %d invalid\n", intspec[0]); - return -EINVAL; - } - intc = s3c_intc[intspec[0]]; - - *out_hwirq = intspec[0] * 32 + intspec[2]; - *out_type = intspec[3] & IRQ_TYPE_SENSE_MASK; - - parent_intc = intc->parent; - if (parent_intc) { - irq_data = &intc->irqs[intspec[2]]; - irq_data->parent_irq = intspec[1]; - parent_irq_data = &parent_intc->irqs[irq_data->parent_irq]; - parent_irq_data->sub_intc = intc; - parent_irq_data->sub_bits |= (1UL << intspec[2]); - - /* parent_intc is always s3c_intc[0], so no offset */ - irqno = irq_create_mapping(parent_intc->domain, intspec[1]); - if (irqno < 0) { - pr_err("irq: could not map parent interrupt\n"); - return irqno; - } - - irq_set_chained_handler(irqno, s3c_irq_demux); - } - - return 0; -} - -static const struct irq_domain_ops s3c24xx_irq_ops_of = { - .map = s3c24xx_irq_map_of, - .xlate = s3c24xx_irq_xlate_of, -}; - -struct s3c24xx_irq_of_ctrl { - char *name; - unsigned long offset; - struct s3c_irq_intc **handle; - struct s3c_irq_intc **parent; - struct irq_domain_ops *ops; -}; - -static int __init s3c_init_intc_of(struct device_node *np, - struct device_node *interrupt_parent, - struct s3c24xx_irq_of_ctrl *s3c_ctrl, int num_ctrl) -{ - struct s3c_irq_intc *intc; - struct s3c24xx_irq_of_ctrl *ctrl; - struct irq_domain *domain; - void __iomem *reg_base; - int i; - - reg_base = of_iomap(np, 0); - if (!reg_base) { - pr_err("irq-s3c24xx: could not map irq registers\n"); - return -EINVAL; - } - - domain = irq_domain_add_linear(np, num_ctrl * 32, - &s3c24xx_irq_ops_of, NULL); - if (!domain) { - pr_err("irq: could not create irq-domain\n"); - return -EINVAL; - } - - for (i = 0; i < num_ctrl; i++) { - ctrl = &s3c_ctrl[i]; - - pr_debug("irq: found controller %s\n", ctrl->name); - - intc = kzalloc(sizeof(struct s3c_irq_intc), GFP_KERNEL); - if (!intc) - return -ENOMEM; - - intc->domain = domain; - intc->irqs = kcalloc(32, sizeof(struct s3c_irq_data), - GFP_KERNEL); - if (!intc->irqs) { - kfree(intc); - return -ENOMEM; - } - - if (ctrl->parent) { - intc->reg_pending = reg_base + ctrl->offset; - intc->reg_mask = reg_base + ctrl->offset + 0x4; - - if (*(ctrl->parent)) { - intc->parent = *(ctrl->parent); - } else { - pr_warn("irq: parent of %s missing\n", - ctrl->name); - kfree(intc->irqs); - kfree(intc); - continue; - } - } else { - intc->reg_pending = reg_base + ctrl->offset; - intc->reg_mask = reg_base + ctrl->offset + 0x08; - intc->reg_intpnd = reg_base + ctrl->offset + 0x10; - } - - s3c24xx_clear_intc(intc); - s3c_intc[i] = intc; - } - - set_handle_irq(s3c24xx_handle_irq); - - return 0; -} - -static struct s3c24xx_irq_of_ctrl s3c2410_ctrl[] = { - { - .name = "intc", - .offset = 0, - }, { - .name = "subintc", - .offset = 0x18, - .parent = &s3c_intc[0], - } -}; - -int __init s3c2410_init_intc_of(struct device_node *np, - struct device_node *interrupt_parent) -{ - return s3c_init_intc_of(np, interrupt_parent, - s3c2410_ctrl, ARRAY_SIZE(s3c2410_ctrl)); -} -IRQCHIP_DECLARE(s3c2410_irq, "samsung,s3c2410-irq", s3c2410_init_intc_of); - -static struct s3c24xx_irq_of_ctrl s3c2416_ctrl[] = { - { - .name = "intc", - .offset = 0, - }, { - .name = "subintc", - .offset = 0x18, - .parent = &s3c_intc[0], - }, { - .name = "intc2", - .offset = 0x40, - } -}; - -int __init s3c2416_init_intc_of(struct device_node *np, - struct device_node *interrupt_parent) -{ - return s3c_init_intc_of(np, interrupt_parent, - s3c2416_ctrl, ARRAY_SIZE(s3c2416_ctrl)); -} -IRQCHIP_DECLARE(s3c2416_irq, "samsung,s3c2416-irq", s3c2416_init_intc_of); -#endif diff --git a/drivers/irqchip/irq-sa11x0.c b/drivers/irqchip/irq-sa11x0.c index 61bb28d7b19b..e5f24c5f3f41 100644 --- a/drivers/irqchip/irq-sa11x0.c +++ b/drivers/irqchip/irq-sa11x0.c @@ -1,12 +1,9 @@ +// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2015 Dmitry Eremin-Solenikov * Copyright (C) 1999-2001 Nicolas Pitre * * Generic IRQ handling for the SA11x0. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. */ #include <linux/init.h> #include <linux/module.h> @@ -88,7 +85,7 @@ static struct sa1100irq_state { unsigned int iccr; } sa1100irq_state; -static int sa1100irq_suspend(void) +static int sa1100irq_suspend(void *data) { struct sa1100irq_state *st = &sa1100irq_state; @@ -105,7 +102,7 @@ static int sa1100irq_suspend(void) return 0; } -static void sa1100irq_resume(void) +static void sa1100irq_resume(void *data) { struct sa1100irq_state *st = &sa1100irq_state; @@ -117,21 +114,24 @@ static void sa1100irq_resume(void) } } -static struct syscore_ops sa1100irq_syscore_ops = { +static const struct syscore_ops sa1100irq_syscore_ops = { .suspend = sa1100irq_suspend, .resume = sa1100irq_resume, }; +static struct syscore sa1100irq_syscore = { + .ops = &sa1100irq_syscore_ops, +}; + static int __init sa1100irq_init_devicefs(void) { - register_syscore_ops(&sa1100irq_syscore_ops); + register_syscore(&sa1100irq_syscore); return 0; } device_initcall(sa1100irq_init_devicefs); -static asmlinkage void __exception_irq_entry -sa1100_handle_irq(struct pt_regs *regs) +static void __exception_irq_entry sa1100_handle_irq(struct pt_regs *regs) { uint32_t icip, icmr, mask; @@ -143,8 +143,8 @@ sa1100_handle_irq(struct pt_regs *regs) if (mask == 0) break; - handle_domain_irq(sa1100_normal_irqdomain, - ffs(mask) - 1, regs); + generic_handle_domain_irq(sa1100_normal_irqdomain, + ffs(mask) - 1); } while (1); } @@ -166,7 +166,7 @@ void __init sa11x0_init_irq_nodt(int irq_start, resource_size_t io_start) */ writel_relaxed(1, iobase + ICCR); - sa1100_normal_irqdomain = irq_domain_add_simple(NULL, + sa1100_normal_irqdomain = irq_domain_create_simple(NULL, 32, irq_start, &sa1100_normal_irqdomain_ops, NULL); diff --git a/drivers/irqchip/irq-sg2042-msi.c b/drivers/irqchip/irq-sg2042-msi.c new file mode 100644 index 000000000000..f7cf0dc72eab --- /dev/null +++ b/drivers/irqchip/irq-sg2042-msi.c @@ -0,0 +1,340 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * SG2042 MSI Controller + * + * Copyright (C) 2024 Sophgo Technology Inc. + * Copyright (C) 2024 Chen Wang <unicorn_wang@outlook.com> + */ + +#include <linux/cleanup.h> +#include <linux/io.h> +#include <linux/irq.h> +#include <linux/irqdomain.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/msi.h> +#include <linux/platform_device.h> +#include <linux/property.h> +#include <linux/slab.h> + +#include <linux/irqchip/irq-msi-lib.h> + +struct sg204x_msi_chip_info { + const struct irq_chip *irqchip; + const struct msi_parent_ops *parent_ops; +}; + +/** + * struct sg204x_msi_chipdata - chip data for the SG204x MSI IRQ controller + * @reg_clr: clear reg, see TRM, 10.1.33, GP_INTR0_CLR + * @doorbell_addr: see TRM, 10.1.32, GP_INTR0_SET + * @irq_first: First vectors number that MSIs starts + * @num_irqs: Number of vectors for MSIs + * @irq_type: IRQ type for MSIs + * @msi_map: mapping for allocated MSI vectors. + * @msi_map_lock: Lock for msi_map + * @chip_info: chip specific infomations + */ +struct sg204x_msi_chipdata { + void __iomem *reg_clr; + + phys_addr_t doorbell_addr; + + u32 irq_first; + u32 num_irqs; + unsigned int irq_type; + + unsigned long *msi_map; + struct mutex msi_map_lock; + + const struct sg204x_msi_chip_info *chip_info; +}; + +static int sg204x_msi_allocate_hwirq(struct sg204x_msi_chipdata *data, int num_req) +{ + int first; + + guard(mutex)(&data->msi_map_lock); + first = bitmap_find_free_region(data->msi_map, data->num_irqs, + get_count_order(num_req)); + return first >= 0 ? first : -ENOSPC; +} + +static void sg204x_msi_free_hwirq(struct sg204x_msi_chipdata *data, int hwirq, int num_req) +{ + guard(mutex)(&data->msi_map_lock); + bitmap_release_region(data->msi_map, hwirq, get_count_order(num_req)); +} + +static void sg2042_msi_irq_ack(struct irq_data *d) +{ + struct sg204x_msi_chipdata *data = irq_data_get_irq_chip_data(d); + int bit_off = d->hwirq; + + writel(1 << bit_off, data->reg_clr); + + irq_chip_ack_parent(d); +} + +static void sg2042_msi_irq_compose_msi_msg(struct irq_data *d, struct msi_msg *msg) +{ + struct sg204x_msi_chipdata *data = irq_data_get_irq_chip_data(d); + + msg->address_hi = upper_32_bits(data->doorbell_addr); + msg->address_lo = lower_32_bits(data->doorbell_addr); + msg->data = 1 << d->hwirq; +} + +static const struct irq_chip sg2042_msi_middle_irq_chip = { + .name = "SG2042 MSI", + .irq_startup = irq_chip_startup_parent, + .irq_shutdown = irq_chip_shutdown_parent, + .irq_ack = sg2042_msi_irq_ack, + .irq_mask = irq_chip_mask_parent, + .irq_unmask = irq_chip_unmask_parent, +#ifdef CONFIG_SMP + .irq_set_affinity = irq_chip_set_affinity_parent, +#endif + .irq_compose_msi_msg = sg2042_msi_irq_compose_msi_msg, +}; + +static void sg2044_msi_irq_ack(struct irq_data *d) +{ + struct sg204x_msi_chipdata *data = irq_data_get_irq_chip_data(d); + + writel(0, (u32 __iomem *)data->reg_clr + d->hwirq); + irq_chip_ack_parent(d); +} + +static void sg2044_msi_irq_compose_msi_msg(struct irq_data *d, struct msi_msg *msg) +{ + struct sg204x_msi_chipdata *data = irq_data_get_irq_chip_data(d); + phys_addr_t doorbell = data->doorbell_addr + 4 * (d->hwirq / 32); + + msg->address_lo = lower_32_bits(doorbell); + msg->address_hi = upper_32_bits(doorbell); + msg->data = d->hwirq % 32; +} + +static struct irq_chip sg2044_msi_middle_irq_chip = { + .name = "SG2044 MSI", + .irq_startup = irq_chip_startup_parent, + .irq_shutdown = irq_chip_shutdown_parent, + .irq_ack = sg2044_msi_irq_ack, + .irq_mask = irq_chip_mask_parent, + .irq_unmask = irq_chip_unmask_parent, +#ifdef CONFIG_SMP + .irq_set_affinity = irq_chip_set_affinity_parent, +#endif + .irq_compose_msi_msg = sg2044_msi_irq_compose_msi_msg, +}; + +static int sg204x_msi_parent_domain_alloc(struct irq_domain *domain, unsigned int virq, int hwirq) +{ + struct sg204x_msi_chipdata *data = domain->host_data; + struct irq_fwspec fwspec; + struct irq_data *d; + int ret; + + fwspec.fwnode = domain->parent->fwnode; + fwspec.param_count = 2; + fwspec.param[0] = data->irq_first + hwirq; + fwspec.param[1] = data->irq_type; + + ret = irq_domain_alloc_irqs_parent(domain, virq, 1, &fwspec); + if (ret) + return ret; + + d = irq_domain_get_irq_data(domain->parent, virq); + return d->chip->irq_set_type(d, data->irq_type); +} + +static int sg204x_msi_middle_domain_alloc(struct irq_domain *domain, unsigned int virq, + unsigned int nr_irqs, void *args) +{ + struct sg204x_msi_chipdata *data = domain->host_data; + int hwirq, err, i; + + hwirq = sg204x_msi_allocate_hwirq(data, nr_irqs); + if (hwirq < 0) + return hwirq; + + for (i = 0; i < nr_irqs; i++) { + err = sg204x_msi_parent_domain_alloc(domain, virq + i, hwirq + i); + if (err) + goto err_hwirq; + + irq_domain_set_hwirq_and_chip(domain, virq + i, hwirq + i, + data->chip_info->irqchip, data); + } + return 0; + +err_hwirq: + sg204x_msi_free_hwirq(data, hwirq, nr_irqs); + irq_domain_free_irqs_parent(domain, virq, i); + return err; +} + +static void sg204x_msi_middle_domain_free(struct irq_domain *domain, unsigned int virq, + unsigned int nr_irqs) +{ + struct irq_data *d = irq_domain_get_irq_data(domain, virq); + struct sg204x_msi_chipdata *data = irq_data_get_irq_chip_data(d); + + irq_domain_free_irqs_parent(domain, virq, nr_irqs); + sg204x_msi_free_hwirq(data, d->hwirq, nr_irqs); +} + +static const struct irq_domain_ops sg204x_msi_middle_domain_ops = { + .alloc = sg204x_msi_middle_domain_alloc, + .free = sg204x_msi_middle_domain_free, + .select = msi_lib_irq_domain_select, +}; + +#define SG2042_MSI_FLAGS_REQUIRED (MSI_FLAG_USE_DEF_DOM_OPS | \ + MSI_FLAG_USE_DEF_CHIP_OPS | \ + MSI_FLAG_PCI_MSI_MASK_PARENT | \ + MSI_FLAG_PCI_MSI_STARTUP_PARENT) + +#define SG2042_MSI_FLAGS_SUPPORTED MSI_GENERIC_FLAGS_MASK + +static const struct msi_parent_ops sg2042_msi_parent_ops = { + .required_flags = SG2042_MSI_FLAGS_REQUIRED, + .supported_flags = SG2042_MSI_FLAGS_SUPPORTED, + .chip_flags = MSI_CHIP_FLAG_SET_ACK, + .bus_select_mask = MATCH_PCI_MSI, + .bus_select_token = DOMAIN_BUS_NEXUS, + .prefix = "SG2042-", + .init_dev_msi_info = msi_lib_init_dev_msi_info, +}; + +#define SG2044_MSI_FLAGS_REQUIRED (MSI_FLAG_USE_DEF_DOM_OPS | \ + MSI_FLAG_USE_DEF_CHIP_OPS | \ + MSI_FLAG_PCI_MSI_MASK_PARENT | \ + MSI_FLAG_PCI_MSI_STARTUP_PARENT) + +#define SG2044_MSI_FLAGS_SUPPORTED (MSI_GENERIC_FLAGS_MASK | \ + MSI_FLAG_MULTI_PCI_MSI | \ + MSI_FLAG_PCI_MSIX) + +static const struct msi_parent_ops sg2044_msi_parent_ops = { + .required_flags = SG2044_MSI_FLAGS_REQUIRED, + .supported_flags = SG2044_MSI_FLAGS_SUPPORTED, + .chip_flags = MSI_CHIP_FLAG_SET_EOI | MSI_CHIP_FLAG_SET_ACK, + .bus_select_mask = MATCH_PCI_MSI, + .bus_select_token = DOMAIN_BUS_NEXUS, + .prefix = "SG2044-", + .init_dev_msi_info = msi_lib_init_dev_msi_info, +}; + +static int sg204x_msi_init_domains(struct sg204x_msi_chipdata *data, + struct irq_domain *plic_domain, struct device *dev) +{ + struct irq_domain_info info = { + .ops = &sg204x_msi_middle_domain_ops, + .parent = plic_domain, + .size = data->num_irqs, + .fwnode = dev_fwnode(dev), + .host_data = data, + }; + + if (!msi_create_parent_irq_domain(&info, data->chip_info->parent_ops)) { + pr_err("Failed to create the MSI middle domain\n"); + return -ENOMEM; + } + return 0; +} + +static int sg2042_msi_probe(struct platform_device *pdev) +{ + struct fwnode_reference_args args = { }; + struct sg204x_msi_chipdata *data; + struct device *dev = &pdev->dev; + struct irq_domain *plic_domain; + struct resource *res; + int ret; + + data = devm_kzalloc(dev, sizeof(struct sg204x_msi_chipdata), GFP_KERNEL); + if (!data) + return -ENOMEM; + + data->chip_info = device_get_match_data(&pdev->dev); + if (!data->chip_info) { + dev_err(&pdev->dev, "Failed to get irqchip\n"); + return -EINVAL; + } + + data->reg_clr = devm_platform_ioremap_resource_byname(pdev, "clr"); + if (IS_ERR(data->reg_clr)) { + dev_err(dev, "Failed to map clear register\n"); + return PTR_ERR(data->reg_clr); + } + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "doorbell"); + if (!res) { + dev_err(dev, "Failed get resource from set\n"); + return -EINVAL; + } + data->doorbell_addr = res->start; + + ret = fwnode_property_get_reference_args(dev_fwnode(dev), "msi-ranges", + "#interrupt-cells", 0, 0, &args); + if (ret) { + dev_err(dev, "Unable to parse MSI vec base\n"); + return ret; + } + fwnode_handle_put(args.fwnode); + + ret = fwnode_property_get_reference_args(dev_fwnode(dev), "msi-ranges", NULL, + args.nargs + 1, 0, &args); + if (ret) { + dev_err(dev, "Unable to parse MSI vec number\n"); + return ret; + } + + plic_domain = irq_find_matching_fwnode(args.fwnode, DOMAIN_BUS_ANY); + fwnode_handle_put(args.fwnode); + if (!plic_domain) { + pr_err("Failed to find the PLIC domain\n"); + return -ENXIO; + } + + data->irq_first = (u32)args.args[0]; + data->irq_type = (unsigned int)args.args[1]; + data->num_irqs = (u32)args.args[args.nargs - 1]; + + mutex_init(&data->msi_map_lock); + + data->msi_map = devm_bitmap_zalloc(&pdev->dev, data->num_irqs, GFP_KERNEL); + if (!data->msi_map) { + dev_err(&pdev->dev, "Unable to allocate msi mapping\n"); + return -ENOMEM; + } + + return sg204x_msi_init_domains(data, plic_domain, dev); +} + +static const struct sg204x_msi_chip_info sg2042_chip_info = { + .irqchip = &sg2042_msi_middle_irq_chip, + .parent_ops = &sg2042_msi_parent_ops, +}; + +static const struct sg204x_msi_chip_info sg2044_chip_info = { + .irqchip = &sg2044_msi_middle_irq_chip, + .parent_ops = &sg2044_msi_parent_ops, +}; + +static const struct of_device_id sg2042_msi_of_match[] = { + { .compatible = "sophgo,sg2042-msi", .data = &sg2042_chip_info }, + { .compatible = "sophgo,sg2044-msi", .data = &sg2044_chip_info }, + { } +}; + +static struct platform_driver sg2042_msi_driver = { + .driver = { + .name = "sg2042-msi", + .of_match_table = sg2042_msi_of_match, + }, + .probe = sg2042_msi_probe, +}; +builtin_platform_driver(sg2042_msi_driver); diff --git a/drivers/irqchip/irq-sifive-plic.c b/drivers/irqchip/irq-sifive-plic.c index 357e9daf94ae..210a57959637 100644 --- a/drivers/irqchip/irq-sifive-plic.c +++ b/drivers/irqchip/irq-sifive-plic.c @@ -3,11 +3,14 @@ * Copyright (C) 2017 SiFive * Copyright (C) 2018 Christoph Hellwig */ -#define pr_fmt(fmt) "plic: " fmt +#define pr_fmt(fmt) "riscv-plic: " fmt +#include <linux/acpi.h> +#include <linux/cpu.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/irq.h> #include <linux/irqchip.h> +#include <linux/irqchip/chained_irq.h> #include <linux/irqdomain.h> #include <linux/module.h> #include <linux/of.h> @@ -15,6 +18,7 @@ #include <linux/of_irq.h> #include <linux/platform_device.h> #include <linux/spinlock.h> +#include <linux/syscore_ops.h> #include <asm/smp.h> /* @@ -42,8 +46,10 @@ * Each hart context has a vector of interrupt enable bits associated with it. * There's one bit for each interrupt source. */ -#define ENABLE_BASE 0x2000 -#define ENABLE_PER_HART 0x80 +#define CONTEXT_ENABLE_BASE 0x2000 +#define CONTEXT_ENABLE_SIZE 0x80 + +#define PENDING_BASE 0x1000 /* * Each hart context has a set of control registers associated with it. Right @@ -51,212 +57,779 @@ * take an interrupt, and a register to claim interrupts. */ #define CONTEXT_BASE 0x200000 -#define CONTEXT_PER_HART 0x1000 +#define CONTEXT_SIZE 0x1000 #define CONTEXT_THRESHOLD 0x00 #define CONTEXT_CLAIM 0x04 -static void __iomem *plic_regs; +#define PLIC_DISABLE_THRESHOLD 0x7 +#define PLIC_ENABLE_THRESHOLD 0 + +#define PLIC_QUIRK_EDGE_INTERRUPT 0 +#define PLIC_QUIRK_CP100_CLAIM_REGISTER_ERRATUM 1 + +struct plic_priv { + struct fwnode_handle *fwnode; + struct cpumask lmask; + struct irq_domain *irqdomain; + void __iomem *regs; + unsigned long plic_quirks; + unsigned int nr_irqs; + unsigned long *prio_save; + u32 gsi_base; + int acpi_plic_id; +}; struct plic_handler { bool present; - int ctxid; + void __iomem *hart_base; + /* + * Protect mask operations on the registers given that we can't + * assume atomic memory operations work on them. + */ + raw_spinlock_t enable_lock; + void __iomem *enable_base; + u32 *enable_save; + struct plic_priv *priv; }; +static int plic_parent_irq __ro_after_init; +static bool plic_global_setup_done __ro_after_init; static DEFINE_PER_CPU(struct plic_handler, plic_handlers); -static inline void __iomem *plic_hart_offset(int ctxid) -{ - return plic_regs + CONTEXT_BASE + ctxid * CONTEXT_PER_HART; -} - -static inline u32 __iomem *plic_enable_base(int ctxid) -{ - return plic_regs + ENABLE_BASE + ctxid * ENABLE_PER_HART; -} +static int plic_irq_set_type(struct irq_data *d, unsigned int type); -/* - * Protect mask operations on the registers given that we can't assume that - * atomic memory operations work on them. - */ -static DEFINE_RAW_SPINLOCK(plic_toggle_lock); - -static inline void plic_toggle(int ctxid, int hwirq, int enable) +static void __plic_toggle(struct plic_handler *handler, int hwirq, int enable) { - u32 __iomem *reg = plic_enable_base(ctxid) + (hwirq / 32); + u32 __iomem *base = handler->enable_base; u32 hwirq_mask = 1 << (hwirq % 32); + int group = hwirq / 32; + u32 value; + + value = readl(base + group); - raw_spin_lock(&plic_toggle_lock); if (enable) - writel(readl(reg) | hwirq_mask, reg); + value |= hwirq_mask; else - writel(readl(reg) & ~hwirq_mask, reg); - raw_spin_unlock(&plic_toggle_lock); + value &= ~hwirq_mask; + + handler->enable_save[group] = value; + writel(value, base + group); } -static inline void plic_irq_toggle(struct irq_data *d, int enable) +static void plic_toggle(struct plic_handler *handler, int hwirq, int enable) +{ + unsigned long flags; + + raw_spin_lock_irqsave(&handler->enable_lock, flags); + __plic_toggle(handler, hwirq, enable); + raw_spin_unlock_irqrestore(&handler->enable_lock, flags); +} + +static inline void plic_irq_toggle(const struct cpumask *mask, + struct irq_data *d, int enable) { int cpu; - writel(enable, plic_regs + PRIORITY_BASE + d->hwirq * PRIORITY_PER_ID); - for_each_cpu(cpu, irq_data_get_affinity_mask(d)) { + for_each_cpu(cpu, mask) { struct plic_handler *handler = per_cpu_ptr(&plic_handlers, cpu); - if (handler->present) - plic_toggle(handler->ctxid, d->hwirq, enable); + plic_toggle(handler, d->hwirq, enable); } } +static void plic_irq_unmask(struct irq_data *d) +{ + struct plic_priv *priv = irq_data_get_irq_chip_data(d); + + writel(1, priv->regs + PRIORITY_BASE + d->hwirq * PRIORITY_PER_ID); +} + +static void plic_irq_mask(struct irq_data *d) +{ + struct plic_priv *priv = irq_data_get_irq_chip_data(d); + + writel(0, priv->regs + PRIORITY_BASE + d->hwirq * PRIORITY_PER_ID); +} + static void plic_irq_enable(struct irq_data *d) { - plic_irq_toggle(d, 1); + plic_irq_toggle(irq_data_get_effective_affinity_mask(d), d, 1); + plic_irq_unmask(d); } static void plic_irq_disable(struct irq_data *d) { - plic_irq_toggle(d, 0); + plic_irq_toggle(irq_data_get_effective_affinity_mask(d), d, 0); } +static void plic_irq_eoi(struct irq_data *d) +{ + struct plic_handler *handler = this_cpu_ptr(&plic_handlers); + + if (unlikely(irqd_irq_disabled(d))) { + plic_toggle(handler, d->hwirq, 1); + writel(d->hwirq, handler->hart_base + CONTEXT_CLAIM); + plic_toggle(handler, d->hwirq, 0); + } else { + writel(d->hwirq, handler->hart_base + CONTEXT_CLAIM); + } +} + +#ifdef CONFIG_SMP +static int plic_set_affinity(struct irq_data *d, + const struct cpumask *mask_val, bool force) +{ + unsigned int cpu; + struct plic_priv *priv = irq_data_get_irq_chip_data(d); + + if (force) + cpu = cpumask_first_and(&priv->lmask, mask_val); + else + cpu = cpumask_first_and_and(&priv->lmask, mask_val, cpu_online_mask); + + if (cpu >= nr_cpu_ids) + return -EINVAL; + + /* Invalidate the original routing entry */ + plic_irq_toggle(irq_data_get_effective_affinity_mask(d), d, 0); + + irq_data_update_effective_affinity(d, cpumask_of(cpu)); + + /* Setting the new routing entry if irq is enabled */ + if (!irqd_irq_disabled(d)) + plic_irq_toggle(irq_data_get_effective_affinity_mask(d), d, 1); + + return IRQ_SET_MASK_OK_DONE; +} +#endif + +static struct irq_chip plic_edge_chip = { + .name = "SiFive PLIC", + .irq_enable = plic_irq_enable, + .irq_disable = plic_irq_disable, + .irq_ack = plic_irq_eoi, + .irq_mask = plic_irq_mask, + .irq_unmask = plic_irq_unmask, +#ifdef CONFIG_SMP + .irq_set_affinity = plic_set_affinity, +#endif + .irq_set_type = plic_irq_set_type, + .flags = IRQCHIP_SKIP_SET_WAKE | + IRQCHIP_AFFINITY_PRE_STARTUP, +}; + static struct irq_chip plic_chip = { .name = "SiFive PLIC", - /* - * There is no need to mask/unmask PLIC interrupts. They are "masked" - * by reading claim and "unmasked" when writing it back. - */ .irq_enable = plic_irq_enable, .irq_disable = plic_irq_disable, + .irq_mask = plic_irq_mask, + .irq_unmask = plic_irq_unmask, + .irq_eoi = plic_irq_eoi, +#ifdef CONFIG_SMP + .irq_set_affinity = plic_set_affinity, +#endif + .irq_set_type = plic_irq_set_type, + .flags = IRQCHIP_SKIP_SET_WAKE | + IRQCHIP_AFFINITY_PRE_STARTUP, +}; + +static int plic_irq_set_type(struct irq_data *d, unsigned int type) +{ + struct plic_priv *priv = irq_data_get_irq_chip_data(d); + + if (!test_bit(PLIC_QUIRK_EDGE_INTERRUPT, &priv->plic_quirks)) + return IRQ_SET_MASK_OK_NOCOPY; + + switch (type) { + case IRQ_TYPE_EDGE_RISING: + irq_set_chip_handler_name_locked(d, &plic_edge_chip, + handle_edge_irq, NULL); + break; + case IRQ_TYPE_LEVEL_HIGH: + irq_set_chip_handler_name_locked(d, &plic_chip, + handle_fasteoi_irq, NULL); + break; + default: + return -EINVAL; + } + + return IRQ_SET_MASK_OK; +} + +static int plic_irq_suspend(void *data) +{ + struct plic_priv *priv; + + priv = per_cpu_ptr(&plic_handlers, smp_processor_id())->priv; + + /* irq ID 0 is reserved */ + for (unsigned int i = 1; i < priv->nr_irqs; i++) { + __assign_bit(i, priv->prio_save, + readl(priv->regs + PRIORITY_BASE + i * PRIORITY_PER_ID)); + } + + return 0; +} + +static void plic_irq_resume(void *data) +{ + unsigned int i, index, cpu; + unsigned long flags; + u32 __iomem *reg; + struct plic_priv *priv; + + priv = per_cpu_ptr(&plic_handlers, smp_processor_id())->priv; + + /* irq ID 0 is reserved */ + for (i = 1; i < priv->nr_irqs; i++) { + index = BIT_WORD(i); + writel((priv->prio_save[index] & BIT_MASK(i)) ? 1 : 0, + priv->regs + PRIORITY_BASE + i * PRIORITY_PER_ID); + } + + for_each_present_cpu(cpu) { + struct plic_handler *handler = per_cpu_ptr(&plic_handlers, cpu); + + if (!handler->present) + continue; + + raw_spin_lock_irqsave(&handler->enable_lock, flags); + for (i = 0; i < DIV_ROUND_UP(priv->nr_irqs, 32); i++) { + reg = handler->enable_base + i * sizeof(u32); + writel(handler->enable_save[i], reg); + } + raw_spin_unlock_irqrestore(&handler->enable_lock, flags); + } +} + +static const struct syscore_ops plic_irq_syscore_ops = { + .suspend = plic_irq_suspend, + .resume = plic_irq_resume, +}; + +static struct syscore plic_irq_syscore = { + .ops = &plic_irq_syscore_ops, }; static int plic_irqdomain_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hwirq) { - irq_set_chip_and_handler(irq, &plic_chip, handle_simple_irq); - irq_set_chip_data(irq, NULL); + struct plic_priv *priv = d->host_data; + + irq_domain_set_info(d, irq, hwirq, &plic_chip, d->host_data, + handle_fasteoi_irq, NULL, NULL); irq_set_noprobe(irq); + irq_set_affinity(irq, &priv->lmask); + return 0; +} + +static int plic_irq_domain_translate(struct irq_domain *d, + struct irq_fwspec *fwspec, + unsigned long *hwirq, + unsigned int *type) +{ + struct plic_priv *priv = d->host_data; + + /* For DT, gsi_base is always zero. */ + if (fwspec->param[0] >= priv->gsi_base) + fwspec->param[0] = fwspec->param[0] - priv->gsi_base; + + if (test_bit(PLIC_QUIRK_EDGE_INTERRUPT, &priv->plic_quirks)) + return irq_domain_translate_twocell(d, fwspec, hwirq, type); + + return irq_domain_translate_onecell(d, fwspec, hwirq, type); +} + +static int plic_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, + unsigned int nr_irqs, void *arg) +{ + int i, ret; + irq_hw_number_t hwirq; + unsigned int type; + struct irq_fwspec *fwspec = arg; + + ret = plic_irq_domain_translate(domain, fwspec, &hwirq, &type); + if (ret) + return ret; + + for (i = 0; i < nr_irqs; i++) { + ret = plic_irqdomain_map(domain, virq + i, hwirq + i); + if (ret) + return ret; + } + return 0; } static const struct irq_domain_ops plic_irqdomain_ops = { - .map = plic_irqdomain_map, - .xlate = irq_domain_xlate_onecell, + .translate = plic_irq_domain_translate, + .alloc = plic_irq_domain_alloc, + .free = irq_domain_free_irqs_top, }; -static struct irq_domain *plic_irqdomain; - /* * Handling an interrupt is a two-step process: first you claim the interrupt * by reading the claim register, then you complete the interrupt by writing * that source ID back to the same claim register. This automatically enables * and disables the interrupt, so there's nothing else to do. */ -static void plic_handle_irq(struct pt_regs *regs) +static void plic_handle_irq(struct irq_desc *desc) { struct plic_handler *handler = this_cpu_ptr(&plic_handlers); - void __iomem *claim = plic_hart_offset(handler->ctxid) + CONTEXT_CLAIM; + struct irq_chip *chip = irq_desc_get_chip(desc); + void __iomem *claim = handler->hart_base + CONTEXT_CLAIM; irq_hw_number_t hwirq; WARN_ON_ONCE(!handler->present); - csr_clear(sie, SIE_SEIE); + chained_irq_enter(chip, desc); + while ((hwirq = readl(claim))) { - int irq = irq_find_mapping(plic_irqdomain, hwirq); - - if (unlikely(irq <= 0)) - pr_warn_ratelimited("can't find mapping for hwirq %lu\n", - hwirq); - else - generic_handle_irq(irq); - writel(hwirq, claim); + int err = generic_handle_domain_irq(handler->priv->irqdomain, + hwirq); + if (unlikely(err)) { + pr_warn_ratelimited("%pfwP: can't find mapping for hwirq %lu\n", + handler->priv->fwnode, hwirq); + } } - csr_set(sie, SIE_SEIE); + + chained_irq_exit(chip, desc); } -/* - * Walk up the DT tree until we find an active RISC-V core (HART) node and - * extract the cpuid from it. - */ -static int plic_find_hart_id(struct device_node *node) +static u32 cp100_isolate_pending_irq(int nr_irq_groups, struct plic_handler *handler) { - for (; node; node = node->parent) { - if (of_device_is_compatible(node, "riscv")) - return riscv_of_processor_hartid(node); + u32 __iomem *pending = handler->priv->regs + PENDING_BASE; + u32 __iomem *enable = handler->enable_base; + u32 pending_irqs = 0; + int i, j; + + /* Look for first pending interrupt */ + for (i = 0; i < nr_irq_groups; i++) { + /* Any pending interrupts would be annihilated, so skip checking them */ + if (!handler->enable_save[i]) + continue; + + pending_irqs = handler->enable_save[i] & readl_relaxed(pending + i); + if (pending_irqs) + break; } - return -1; + if (!pending_irqs) + return 0; + + /* Isolate lowest set bit */ + pending_irqs &= -pending_irqs; + + /* Disable all interrupts but the first pending one */ + for (j = 0; j < nr_irq_groups; j++) { + u32 new_mask = j == i ? pending_irqs : 0; + + if (new_mask != handler->enable_save[j]) + writel_relaxed(new_mask, enable + j); + } + return pending_irqs; } -static int __init plic_init(struct device_node *node, - struct device_node *parent) +static irq_hw_number_t cp100_get_hwirq(struct plic_handler *handler, void __iomem *claim) { - int error = 0, nr_handlers, nr_mapped = 0, i; - u32 nr_irqs; + int nr_irq_groups = DIV_ROUND_UP(handler->priv->nr_irqs, 32); + u32 __iomem *enable = handler->enable_base; + irq_hw_number_t hwirq = 0; + u32 iso_mask; + int i; + + guard(raw_spinlock)(&handler->enable_lock); - if (plic_regs) { - pr_warn("PLIC already present.\n"); - return -ENXIO; + /* Existing enable state is already cached in enable_save */ + iso_mask = cp100_isolate_pending_irq(nr_irq_groups, handler); + if (!iso_mask) + return 0; + + /* + * Interrupts delievered to hardware still become pending, but only + * interrupts that are both pending and enabled can be claimed. + * Clearing the enable bit for all interrupts but the first pending + * one avoids a hardware bug that occurs during read from the claim + * register with more than one eligible interrupt. + */ + hwirq = readl(claim); + + /* Restore previous state */ + for (i = 0; i < nr_irq_groups; i++) { + u32 written = i == hwirq / 32 ? iso_mask : 0; + u32 stored = handler->enable_save[i]; + + if (stored != written) + writel_relaxed(stored, enable + i); } + return hwirq; +} + +static void plic_handle_irq_cp100(struct irq_desc *desc) +{ + struct plic_handler *handler = this_cpu_ptr(&plic_handlers); + struct irq_chip *chip = irq_desc_get_chip(desc); + void __iomem *claim = handler->hart_base + CONTEXT_CLAIM; + irq_hw_number_t hwirq; + + WARN_ON_ONCE(!handler->present); + + chained_irq_enter(chip, desc); + + while ((hwirq = cp100_get_hwirq(handler, claim))) { + int err = generic_handle_domain_irq(handler->priv->irqdomain, hwirq); + + if (unlikely(err)) { + pr_warn_ratelimited("%pfwP: can't find mapping for hwirq %lu\n", + handler->priv->fwnode, hwirq); + } + } + + chained_irq_exit(chip, desc); +} + +static void plic_set_threshold(struct plic_handler *handler, u32 threshold) +{ + /* priority must be > threshold to trigger an interrupt */ + writel(threshold, handler->hart_base + CONTEXT_THRESHOLD); +} + +static int plic_dying_cpu(unsigned int cpu) +{ + if (plic_parent_irq) + disable_percpu_irq(plic_parent_irq); + + return 0; +} + +static int plic_starting_cpu(unsigned int cpu) +{ + struct plic_handler *handler = this_cpu_ptr(&plic_handlers); + + if (plic_parent_irq) + enable_percpu_irq(plic_parent_irq, + irq_get_trigger_type(plic_parent_irq)); + else + pr_warn("%pfwP: cpu%d: parent irq not available\n", + handler->priv->fwnode, cpu); + plic_set_threshold(handler, PLIC_ENABLE_THRESHOLD); + + return 0; +} + +static const struct of_device_id plic_match[] = { + { .compatible = "sifive,plic-1.0.0" }, + { .compatible = "riscv,plic0" }, + { .compatible = "andestech,nceplic100", + .data = (const void *)BIT(PLIC_QUIRK_EDGE_INTERRUPT) }, + { .compatible = "thead,c900-plic", + .data = (const void *)BIT(PLIC_QUIRK_EDGE_INTERRUPT) }, + { .compatible = "ultrarisc,cp100-plic", + .data = (const void *)BIT(PLIC_QUIRK_CP100_CLAIM_REGISTER_ERRATUM) }, + {} +}; + +#ifdef CONFIG_ACPI + +static const struct acpi_device_id plic_acpi_match[] = { + { "RSCV0001", 0 }, + {} +}; +MODULE_DEVICE_TABLE(acpi, plic_acpi_match); + +#endif +static int plic_parse_nr_irqs_and_contexts(struct fwnode_handle *fwnode, + u32 *nr_irqs, u32 *nr_contexts, + u32 *gsi_base, u32 *id) +{ + int rc; + + if (!is_of_node(fwnode)) { + rc = riscv_acpi_get_gsi_info(fwnode, gsi_base, id, nr_irqs, NULL); + if (rc) { + pr_err("%pfwP: failed to find GSI mapping\n", fwnode); + return rc; + } + + *nr_contexts = acpi_rintc_get_plic_nr_contexts(*id); + if (WARN_ON(!*nr_contexts)) { + pr_err("%pfwP: no PLIC context available\n", fwnode); + return -EINVAL; + } + + return 0; + } + + rc = of_property_read_u32(to_of_node(fwnode), "riscv,ndev", nr_irqs); + if (rc) { + pr_err("%pfwP: riscv,ndev property not available\n", fwnode); + return rc; + } + + *nr_contexts = of_irq_count(to_of_node(fwnode)); + if (WARN_ON(!(*nr_contexts))) { + pr_err("%pfwP: no PLIC context available\n", fwnode); + return -EINVAL; + } + + *gsi_base = 0; + *id = 0; - plic_regs = of_iomap(node, 0); - if (WARN_ON(!plic_regs)) - return -EIO; - - error = -EINVAL; - of_property_read_u32(node, "riscv,ndev", &nr_irqs); - if (WARN_ON(!nr_irqs)) - goto out_iounmap; - - nr_handlers = of_irq_count(node); - if (WARN_ON(!nr_handlers)) - goto out_iounmap; - if (WARN_ON(nr_handlers < num_possible_cpus())) - goto out_iounmap; - - error = -ENOMEM; - plic_irqdomain = irq_domain_add_linear(node, nr_irqs + 1, - &plic_irqdomain_ops, NULL); - if (WARN_ON(!plic_irqdomain)) - goto out_iounmap; - - for (i = 0; i < nr_handlers; i++) { - struct of_phandle_args parent; - struct plic_handler *handler; - irq_hw_number_t hwirq; - int cpu, hartid; - - if (of_irq_parse_one(node, i, &parent)) { - pr_err("failed to parse parent for context %d.\n", i); + return 0; +} + +static int plic_parse_context_parent(struct fwnode_handle *fwnode, u32 context, + u32 *parent_hwirq, int *parent_cpu, u32 id) +{ + struct of_phandle_args parent; + unsigned long hartid; + int rc; + + if (!is_of_node(fwnode)) { + hartid = acpi_rintc_ext_parent_to_hartid(id, context); + if (hartid == INVALID_HARTID) + return -EINVAL; + + *parent_cpu = riscv_hartid_to_cpuid(hartid); + *parent_hwirq = RV_IRQ_EXT; + return 0; + } + + rc = of_irq_parse_one(to_of_node(fwnode), context, &parent); + if (rc) + return rc; + + rc = riscv_of_parent_hartid(parent.np, &hartid); + if (rc) + return rc; + + *parent_hwirq = parent.args[0]; + *parent_cpu = riscv_hartid_to_cpuid(hartid); + return 0; +} + +static int plic_probe(struct fwnode_handle *fwnode) +{ + int error = 0, nr_contexts, nr_handlers = 0, cpu, i; + unsigned long plic_quirks = 0; + struct plic_handler *handler; + u32 nr_irqs, parent_hwirq; + struct plic_priv *priv; + irq_hw_number_t hwirq; + void __iomem *regs; + int id, context_id; + u32 gsi_base; + + if (is_of_node(fwnode)) { + const struct of_device_id *id; + + id = of_match_node(plic_match, to_of_node(fwnode)); + if (id) + plic_quirks = (unsigned long)id->data; + + regs = of_iomap(to_of_node(fwnode), 0); + if (!regs) + return -ENOMEM; + } else { + regs = devm_platform_ioremap_resource(to_platform_device(fwnode->dev), 0); + if (IS_ERR(regs)) + return PTR_ERR(regs); + } + + error = plic_parse_nr_irqs_and_contexts(fwnode, &nr_irqs, &nr_contexts, &gsi_base, &id); + if (error) + goto fail_free_regs; + + priv = kzalloc(sizeof(*priv), GFP_KERNEL); + if (!priv) { + error = -ENOMEM; + goto fail_free_regs; + } + + priv->fwnode = fwnode; + priv->plic_quirks = plic_quirks; + priv->nr_irqs = nr_irqs; + priv->regs = regs; + priv->gsi_base = gsi_base; + priv->acpi_plic_id = id; + + priv->prio_save = bitmap_zalloc(nr_irqs, GFP_KERNEL); + if (!priv->prio_save) { + error = -ENOMEM; + goto fail_free_priv; + } + + for (i = 0; i < nr_contexts; i++) { + error = plic_parse_context_parent(fwnode, i, &parent_hwirq, &cpu, + priv->acpi_plic_id); + if (error) { + pr_warn("%pfwP: hwirq for context%d not found\n", fwnode, i); continue; } - /* skip context holes */ - if (parent.args[0] == -1) + if (is_of_node(fwnode)) { + context_id = i; + } else { + context_id = acpi_rintc_get_plic_context(priv->acpi_plic_id, i); + if (context_id == INVALID_CONTEXT) { + pr_warn("%pfwP: invalid context id for context%d\n", fwnode, i); + continue; + } + } + + /* + * Skip contexts other than external interrupts for our + * privilege level. + */ + if (parent_hwirq != RV_IRQ_EXT) { + /* Disable S-mode enable bits if running in M-mode. */ + if (IS_ENABLED(CONFIG_RISCV_M_MODE)) { + u32 __iomem *enable_base = priv->regs + CONTEXT_ENABLE_BASE + + i * CONTEXT_ENABLE_SIZE; + + for (int j = 0; j <= nr_irqs / 32; j++) + writel(0, enable_base + j); + } continue; + } - hartid = plic_find_hart_id(parent.np); - if (hartid < 0) { - pr_warn("failed to parse hart ID for context %d.\n", i); + if (cpu < 0) { + pr_warn("%pfwP: Invalid cpuid for context %d\n", fwnode, i); continue; } - cpu = riscv_hartid_to_cpuid(hartid); + /* + * When running in M-mode we need to ignore the S-mode handler. + * Here we assume it always comes later, but that might be a + * little fragile. + */ handler = per_cpu_ptr(&plic_handlers, cpu); + if (handler->present) { + pr_warn("%pfwP: handler already present for context %d.\n", fwnode, i); + plic_set_threshold(handler, PLIC_DISABLE_THRESHOLD); + goto done; + } + + cpumask_set_cpu(cpu, &priv->lmask); handler->present = true; - handler->ctxid = i; + handler->hart_base = priv->regs + CONTEXT_BASE + + context_id * CONTEXT_SIZE; + raw_spin_lock_init(&handler->enable_lock); + handler->enable_base = priv->regs + CONTEXT_ENABLE_BASE + + context_id * CONTEXT_ENABLE_SIZE; + handler->priv = priv; + + handler->enable_save = kcalloc(DIV_ROUND_UP(nr_irqs, 32), + sizeof(*handler->enable_save), GFP_KERNEL); + if (!handler->enable_save) { + error = -ENOMEM; + goto fail_cleanup_contexts; + } +done: + for (hwirq = 1; hwirq <= nr_irqs; hwirq++) { + plic_toggle(handler, hwirq, 0); + writel(1, priv->regs + PRIORITY_BASE + + hwirq * PRIORITY_PER_ID); + } + nr_handlers++; + } - /* priority must be > threshold to trigger an interrupt */ - writel(0, plic_hart_offset(i) + CONTEXT_THRESHOLD); - for (hwirq = 1; hwirq <= nr_irqs; hwirq++) - plic_toggle(i, hwirq, 0); - nr_mapped++; + priv->irqdomain = irq_domain_create_linear(fwnode, nr_irqs + 1, + &plic_irqdomain_ops, priv); + if (WARN_ON(!priv->irqdomain)) { + error = -ENOMEM; + goto fail_cleanup_contexts; } - pr_info("mapped %d interrupts to %d (out of %d) handlers.\n", - nr_irqs, nr_mapped, nr_handlers); - set_handle_irq(plic_handle_irq); + /* + * We can have multiple PLIC instances so setup global state + * and register syscore operations only once after context + * handlers of all online CPUs are initialized. + */ + if (!plic_global_setup_done) { + struct irq_domain *domain; + bool global_setup = true; + + for_each_online_cpu(cpu) { + handler = per_cpu_ptr(&plic_handlers, cpu); + if (!handler->present) { + global_setup = false; + break; + } + } + + if (global_setup) { + void (*handler_fn)(struct irq_desc *) = plic_handle_irq; + + if (test_bit(PLIC_QUIRK_CP100_CLAIM_REGISTER_ERRATUM, &handler->priv->plic_quirks)) + handler_fn = plic_handle_irq_cp100; + + /* Find parent domain and register chained handler */ + domain = irq_find_matching_fwnode(riscv_get_intc_hwnode(), DOMAIN_BUS_ANY); + if (domain) + plic_parent_irq = irq_create_mapping(domain, RV_IRQ_EXT); + if (plic_parent_irq) + irq_set_chained_handler(plic_parent_irq, handler_fn); + + cpuhp_setup_state(CPUHP_AP_IRQ_SIFIVE_PLIC_STARTING, + "irqchip/sifive/plic:starting", + plic_starting_cpu, plic_dying_cpu); + register_syscore(&plic_irq_syscore); + plic_global_setup_done = true; + } + } + +#ifdef CONFIG_ACPI + if (!acpi_disabled) + acpi_dev_clear_dependencies(ACPI_COMPANION(fwnode->dev)); +#endif + + pr_info("%pfwP: mapped %d interrupts with %d handlers for %d contexts.\n", + fwnode, nr_irqs, nr_handlers, nr_contexts); return 0; -out_iounmap: - iounmap(plic_regs); +fail_cleanup_contexts: + for (i = 0; i < nr_contexts; i++) { + if (plic_parse_context_parent(fwnode, i, &parent_hwirq, &cpu, priv->acpi_plic_id)) + continue; + if (parent_hwirq != RV_IRQ_EXT || cpu < 0) + continue; + + handler = per_cpu_ptr(&plic_handlers, cpu); + handler->present = false; + handler->hart_base = NULL; + handler->enable_base = NULL; + kfree(handler->enable_save); + handler->enable_save = NULL; + handler->priv = NULL; + } + bitmap_free(priv->prio_save); +fail_free_priv: + kfree(priv); +fail_free_regs: + iounmap(regs); return error; } -IRQCHIP_DECLARE(sifive_plic, "sifive,plic-1.0.0", plic_init); -IRQCHIP_DECLARE(riscv_plic0, "riscv,plic0", plic_init); /* for legacy systems */ +static int plic_platform_probe(struct platform_device *pdev) +{ + return plic_probe(pdev->dev.fwnode); +} + +static struct platform_driver plic_driver = { + .driver = { + .name = "riscv-plic", + .of_match_table = plic_match, + .suppress_bind_attrs = true, + .acpi_match_table = ACPI_PTR(plic_acpi_match), + }, + .probe = plic_platform_probe, +}; +builtin_platform_driver(plic_driver); + +static int __init plic_early_probe(struct device_node *node, + struct device_node *parent) +{ + return plic_probe(&node->fwnode); +} + +IRQCHIP_DECLARE(riscv, "allwinner,sun20i-d1-plic", plic_early_probe); diff --git a/drivers/irqchip/irq-sirfsoc.c b/drivers/irqchip/irq-sirfsoc.c deleted file mode 100644 index e1336848affa..000000000000 --- a/drivers/irqchip/irq-sirfsoc.c +++ /dev/null @@ -1,135 +0,0 @@ -/* - * interrupt controller support for CSR SiRFprimaII - * - * Copyright (c) 2011 Cambridge Silicon Radio Limited, a CSR plc group company. - * - * Licensed under GPLv2 or later. - */ - -#include <linux/init.h> -#include <linux/io.h> -#include <linux/irq.h> -#include <linux/of.h> -#include <linux/of_address.h> -#include <linux/irqchip.h> -#include <linux/irqdomain.h> -#include <linux/syscore_ops.h> -#include <asm/mach/irq.h> -#include <asm/exception.h> - -#define SIRFSOC_INT_RISC_MASK0 0x0018 -#define SIRFSOC_INT_RISC_MASK1 0x001C -#define SIRFSOC_INT_RISC_LEVEL0 0x0020 -#define SIRFSOC_INT_RISC_LEVEL1 0x0024 -#define SIRFSOC_INIT_IRQ_ID 0x0038 -#define SIRFSOC_INT_BASE_OFFSET 0x0004 - -#define SIRFSOC_NUM_IRQS 64 -#define SIRFSOC_NUM_BANKS (SIRFSOC_NUM_IRQS / 32) - -static struct irq_domain *sirfsoc_irqdomain; - -static void __iomem *sirfsoc_irq_get_regbase(void) -{ - return (void __iomem __force *)sirfsoc_irqdomain->host_data; -} - -static __init void sirfsoc_alloc_gc(void __iomem *base) -{ - unsigned int clr = IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN; - unsigned int set = IRQ_LEVEL; - struct irq_chip_generic *gc; - struct irq_chip_type *ct; - int i; - - irq_alloc_domain_generic_chips(sirfsoc_irqdomain, 32, 1, "irq_sirfsoc", - handle_level_irq, clr, set, - IRQ_GC_INIT_MASK_CACHE); - - for (i = 0; i < SIRFSOC_NUM_BANKS; i++) { - gc = irq_get_domain_generic_chip(sirfsoc_irqdomain, i * 32); - gc->reg_base = base + i * SIRFSOC_INT_BASE_OFFSET; - ct = gc->chip_types; - ct->chip.irq_mask = irq_gc_mask_clr_bit; - ct->chip.irq_unmask = irq_gc_mask_set_bit; - ct->regs.mask = SIRFSOC_INT_RISC_MASK0; - } -} - -static void __exception_irq_entry sirfsoc_handle_irq(struct pt_regs *regs) -{ - void __iomem *base = sirfsoc_irq_get_regbase(); - u32 irqstat; - - irqstat = readl_relaxed(base + SIRFSOC_INIT_IRQ_ID); - handle_domain_irq(sirfsoc_irqdomain, irqstat & 0xff, regs); -} - -static int __init sirfsoc_irq_init(struct device_node *np, - struct device_node *parent) -{ - void __iomem *base = of_iomap(np, 0); - if (!base) - panic("unable to map intc cpu registers\n"); - - sirfsoc_irqdomain = irq_domain_add_linear(np, SIRFSOC_NUM_IRQS, - &irq_generic_chip_ops, base); - sirfsoc_alloc_gc(base); - - writel_relaxed(0, base + SIRFSOC_INT_RISC_LEVEL0); - writel_relaxed(0, base + SIRFSOC_INT_RISC_LEVEL1); - - writel_relaxed(0, base + SIRFSOC_INT_RISC_MASK0); - writel_relaxed(0, base + SIRFSOC_INT_RISC_MASK1); - - set_handle_irq(sirfsoc_handle_irq); - - return 0; -} -IRQCHIP_DECLARE(sirfsoc_intc, "sirf,prima2-intc", sirfsoc_irq_init); - -struct sirfsoc_irq_status { - u32 mask0; - u32 mask1; - u32 level0; - u32 level1; -}; - -static struct sirfsoc_irq_status sirfsoc_irq_st; - -static int sirfsoc_irq_suspend(void) -{ - void __iomem *base = sirfsoc_irq_get_regbase(); - - sirfsoc_irq_st.mask0 = readl_relaxed(base + SIRFSOC_INT_RISC_MASK0); - sirfsoc_irq_st.mask1 = readl_relaxed(base + SIRFSOC_INT_RISC_MASK1); - sirfsoc_irq_st.level0 = readl_relaxed(base + SIRFSOC_INT_RISC_LEVEL0); - sirfsoc_irq_st.level1 = readl_relaxed(base + SIRFSOC_INT_RISC_LEVEL1); - - return 0; -} - -static void sirfsoc_irq_resume(void) -{ - void __iomem *base = sirfsoc_irq_get_regbase(); - - writel_relaxed(sirfsoc_irq_st.mask0, base + SIRFSOC_INT_RISC_MASK0); - writel_relaxed(sirfsoc_irq_st.mask1, base + SIRFSOC_INT_RISC_MASK1); - writel_relaxed(sirfsoc_irq_st.level0, base + SIRFSOC_INT_RISC_LEVEL0); - writel_relaxed(sirfsoc_irq_st.level1, base + SIRFSOC_INT_RISC_LEVEL1); -} - -static struct syscore_ops sirfsoc_irq_syscore_ops = { - .suspend = sirfsoc_irq_suspend, - .resume = sirfsoc_irq_resume, -}; - -static int __init sirfsoc_irq_pm_init(void) -{ - if (!sirfsoc_irqdomain) - return 0; - - register_syscore_ops(&sirfsoc_irq_syscore_ops); - return 0; -} -device_initcall(sirfsoc_irq_pm_init); diff --git a/drivers/irqchip/irq-sl28cpld.c b/drivers/irqchip/irq-sl28cpld.c new file mode 100644 index 000000000000..e50f9eaba4cd --- /dev/null +++ b/drivers/irqchip/irq-sl28cpld.c @@ -0,0 +1,94 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * sl28cpld interrupt controller driver + * + * Copyright 2020 Kontron Europe GmbH + */ + +#include <linux/interrupt.h> +#include <linux/kernel.h> +#include <linux/mod_devicetable.h> +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/property.h> +#include <linux/regmap.h> + +#define INTC_IE 0x00 +#define INTC_IP 0x01 + +static const struct regmap_irq sl28cpld_irqs[] = { + REGMAP_IRQ_REG_LINE(0, 8), + REGMAP_IRQ_REG_LINE(1, 8), + REGMAP_IRQ_REG_LINE(2, 8), + REGMAP_IRQ_REG_LINE(3, 8), + REGMAP_IRQ_REG_LINE(4, 8), + REGMAP_IRQ_REG_LINE(5, 8), + REGMAP_IRQ_REG_LINE(6, 8), + REGMAP_IRQ_REG_LINE(7, 8), +}; + +struct sl28cpld_intc { + struct regmap *regmap; + struct regmap_irq_chip chip; + struct regmap_irq_chip_data *irq_data; +}; + +static int sl28cpld_intc_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct sl28cpld_intc *irqchip; + int irq; + u32 base; + int ret; + + if (!dev->parent) + return -ENODEV; + + irqchip = devm_kzalloc(dev, sizeof(*irqchip), GFP_KERNEL); + if (!irqchip) + return -ENOMEM; + + irqchip->regmap = dev_get_regmap(dev->parent, NULL); + if (!irqchip->regmap) + return -ENODEV; + + irq = platform_get_irq(pdev, 0); + if (irq < 0) + return irq; + + ret = device_property_read_u32(&pdev->dev, "reg", &base); + if (ret) + return -EINVAL; + + irqchip->chip.name = "sl28cpld-intc"; + irqchip->chip.irqs = sl28cpld_irqs; + irqchip->chip.num_irqs = ARRAY_SIZE(sl28cpld_irqs); + irqchip->chip.num_regs = 1; + irqchip->chip.status_base = base + INTC_IP; + irqchip->chip.unmask_base = base + INTC_IE; + irqchip->chip.ack_base = base + INTC_IP; + + return devm_regmap_add_irq_chip_fwnode(dev, dev_fwnode(dev), + irqchip->regmap, irq, + IRQF_SHARED | IRQF_ONESHOT, 0, + &irqchip->chip, + &irqchip->irq_data); +} + +static const struct of_device_id sl28cpld_intc_of_match[] = { + { .compatible = "kontron,sl28cpld-intc" }, + {} +}; +MODULE_DEVICE_TABLE(of, sl28cpld_intc_of_match); + +static struct platform_driver sl28cpld_intc_driver = { + .probe = sl28cpld_intc_probe, + .driver = { + .name = "sl28cpld-intc", + .of_match_table = sl28cpld_intc_of_match, + } +}; +module_platform_driver(sl28cpld_intc_driver); + +MODULE_DESCRIPTION("sl28cpld Interrupt Controller Driver"); +MODULE_AUTHOR("Michael Walle <michael@walle.cc>"); diff --git a/drivers/irqchip/irq-sni-exiu.c b/drivers/irqchip/irq-sni-exiu.c index 1927b2f36ff6..0cad68aa8388 100644 --- a/drivers/irqchip/irq-sni-exiu.c +++ b/drivers/irqchip/irq-sni-exiu.c @@ -1,15 +1,12 @@ +// SPDX-License-Identifier: GPL-2.0-only /* * Driver for Socionext External Interrupt Unit (EXIU) * - * Copyright (c) 2017 Linaro, Ltd. <ard.biesheuvel@linaro.org> + * Copyright (c) 2017-2019 Linaro, Ltd. <ard.biesheuvel@linaro.org> * * Based on irq-tegra.c: * Copyright (C) 2011 Google, Inc. * Copyright (C) 2010,2013, NVIDIA Corporation - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. */ #include <linux/interrupt.h> @@ -20,6 +17,7 @@ #include <linux/of.h> #include <linux/of_address.h> #include <linux/of_irq.h> +#include <linux/platform_device.h> #include <dt-bindings/interrupt-controller/arm-gic.h> @@ -39,11 +37,26 @@ struct exiu_irq_data { u32 spi_base; }; -static void exiu_irq_eoi(struct irq_data *d) +static void exiu_irq_ack(struct irq_data *d) { struct exiu_irq_data *data = irq_data_get_irq_chip_data(d); writel(BIT(d->hwirq), data->base + EIREQCLR); +} + +static void exiu_irq_eoi(struct irq_data *d) +{ + struct exiu_irq_data *data = irq_data_get_irq_chip_data(d); + + /* + * Level triggered interrupts are latched and must be cleared during + * EOI or the interrupt will be jammed on. Of course if a level + * triggered interrupt is still asserted then the write will not clear + * the interrupt. + */ + if (irqd_is_level_type(d)) + writel(BIT(d->hwirq), data->base + EIREQCLR); + irq_chip_eoi_parent(d); } @@ -93,10 +106,13 @@ static int exiu_irq_set_type(struct irq_data *d, unsigned int type) writel_relaxed(val, data->base + EILVL); val = readl_relaxed(data->base + EIEDG); - if (type == IRQ_TYPE_LEVEL_LOW || type == IRQ_TYPE_LEVEL_HIGH) + if (type == IRQ_TYPE_LEVEL_LOW || type == IRQ_TYPE_LEVEL_HIGH) { val &= ~BIT(d->hwirq); - else + irq_set_handler_locked(d, handle_fasteoi_irq); + } else { val |= BIT(d->hwirq); + irq_set_handler_locked(d, handle_fasteoi_ack_irq); + } writel_relaxed(val, data->base + EIEDG); writel_relaxed(BIT(d->hwirq), data->base + EIREQCLR); @@ -106,6 +122,7 @@ static int exiu_irq_set_type(struct irq_data *d, unsigned int type) static struct irq_chip exiu_irq_chip = { .name = "EXIU", + .irq_ack = exiu_irq_ack, .irq_eoi = exiu_irq_eoi, .irq_enable = exiu_irq_enable, .irq_mask = exiu_irq_mask, @@ -134,9 +151,13 @@ static int exiu_domain_translate(struct irq_domain *domain, *hwirq = fwspec->param[1] - info->spi_base; *type = fwspec->param[2] & IRQ_TYPE_SENSE_MASK; - return 0; + } else { + if (fwspec->param_count != 2) + return -EINVAL; + *hwirq = fwspec->param[0]; + *type = fwspec->param[1] & IRQ_TYPE_SENSE_MASK; } - return -EINVAL; + return 0; } static int exiu_domain_alloc(struct irq_domain *dom, unsigned int virq, @@ -147,16 +168,21 @@ static int exiu_domain_alloc(struct irq_domain *dom, unsigned int virq, struct exiu_irq_data *info = dom->host_data; irq_hw_number_t hwirq; - if (fwspec->param_count != 3) - return -EINVAL; /* Not GIC compliant */ - if (fwspec->param[0] != GIC_SPI) - return -EINVAL; /* No PPI should point to this domain */ + parent_fwspec = *fwspec; + if (is_of_node(dom->parent->fwnode)) { + if (fwspec->param_count != 3) + return -EINVAL; /* Not GIC compliant */ + if (fwspec->param[0] != GIC_SPI) + return -EINVAL; /* No PPI should point to this domain */ + hwirq = fwspec->param[1] - info->spi_base; + } else { + hwirq = fwspec->param[0]; + parent_fwspec.param[0] = hwirq + info->spi_base + 32; + } WARN_ON(nr_irqs != 1); - hwirq = fwspec->param[1] - info->spi_base; irq_domain_set_hwirq_and_chip(dom, virq, hwirq, &exiu_irq_chip, info); - parent_fwspec = *fwspec; parent_fwspec.fwnode = dom->parent->fwnode; return irq_domain_alloc_irqs_parent(dom, virq, nr_irqs, &parent_fwspec); } @@ -167,35 +193,23 @@ static const struct irq_domain_ops exiu_domain_ops = { .free = irq_domain_free_irqs_common, }; -static int __init exiu_init(struct device_node *node, - struct device_node *parent) +static struct exiu_irq_data *exiu_init(const struct fwnode_handle *fwnode, + struct resource *res) { - struct irq_domain *parent_domain, *domain; struct exiu_irq_data *data; int err; - if (!parent) { - pr_err("%pOF: no parent, giving up\n", node); - return -ENODEV; - } - - parent_domain = irq_find_host(parent); - if (!parent_domain) { - pr_err("%pOF: unable to obtain parent domain\n", node); - return -ENXIO; - } - data = kzalloc(sizeof(*data), GFP_KERNEL); if (!data) - return -ENOMEM; + return ERR_PTR(-ENOMEM); - if (of_property_read_u32(node, "socionext,spi-base", &data->spi_base)) { - pr_err("%pOF: failed to parse 'spi-base' property\n", node); + if (fwnode_property_read_u32_array(fwnode, "socionext,spi-base", + &data->spi_base, 1)) { err = -ENODEV; goto out_free; } - data->base = of_iomap(node, 0); + data->base = ioremap(res->start, resource_size(res)); if (!data->base) { err = -ENODEV; goto out_free; @@ -205,11 +219,44 @@ static int __init exiu_init(struct device_node *node, writel_relaxed(0xFFFFFFFF, data->base + EIREQCLR); writel_relaxed(0xFFFFFFFF, data->base + EIMASK); - domain = irq_domain_add_hierarchy(parent_domain, 0, NUM_IRQS, node, - &exiu_domain_ops, data); + return data; + +out_free: + kfree(data); + return ERR_PTR(err); +} + +static int __init exiu_dt_init(struct device_node *node, + struct device_node *parent) +{ + struct irq_domain *parent_domain, *domain; + struct exiu_irq_data *data; + struct resource res; + + if (!parent) { + pr_err("%pOF: no parent, giving up\n", node); + return -ENODEV; + } + + parent_domain = irq_find_host(parent); + if (!parent_domain) { + pr_err("%pOF: unable to obtain parent domain\n", node); + return -ENXIO; + } + + if (of_address_to_resource(node, 0, &res)) { + pr_err("%pOF: failed to parse memory resource\n", node); + return -ENXIO; + } + + data = exiu_init(of_fwnode_handle(node), &res); + if (IS_ERR(data)) + return PTR_ERR(data); + + domain = irq_domain_create_hierarchy(parent_domain, 0, NUM_IRQS, of_fwnode_handle(node), + &exiu_domain_ops, data); if (!domain) { pr_err("%pOF: failed to allocate domain\n", node); - err = -ENOMEM; goto out_unmap; } @@ -220,8 +267,57 @@ static int __init exiu_init(struct device_node *node, out_unmap: iounmap(data->base); -out_free: kfree(data); - return err; + return -ENOMEM; +} +IRQCHIP_DECLARE(exiu, "socionext,synquacer-exiu", exiu_dt_init); + +#ifdef CONFIG_ACPI +static int exiu_acpi_probe(struct platform_device *pdev) +{ + struct irq_domain *domain; + struct exiu_irq_data *data; + struct resource *res; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) { + dev_err(&pdev->dev, "failed to parse memory resource\n"); + return -ENXIO; + } + + data = exiu_init(dev_fwnode(&pdev->dev), res); + if (IS_ERR(data)) + return PTR_ERR(data); + + domain = acpi_irq_create_hierarchy(0, NUM_IRQS, dev_fwnode(&pdev->dev), + &exiu_domain_ops, data); + if (!domain) { + dev_err(&pdev->dev, "failed to create IRQ domain\n"); + goto out_unmap; + } + + dev_info(&pdev->dev, "%d interrupts forwarded\n", NUM_IRQS); + + return 0; + +out_unmap: + iounmap(data->base); + kfree(data); + return -ENOMEM; } -IRQCHIP_DECLARE(exiu, "socionext,synquacer-exiu", exiu_init); + +static const struct acpi_device_id exiu_acpi_ids[] = { + { "SCX0008" }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(acpi, exiu_acpi_ids); + +static struct platform_driver exiu_driver = { + .driver = { + .name = "exiu", + .acpi_match_table = exiu_acpi_ids, + }, + .probe = exiu_acpi_probe, +}; +builtin_platform_driver(exiu_driver); +#endif diff --git a/drivers/irqchip/irq-sp7021-intc.c b/drivers/irqchip/irq-sp7021-intc.c new file mode 100644 index 000000000000..2a6eda9ab62e --- /dev/null +++ b/drivers/irqchip/irq-sp7021-intc.c @@ -0,0 +1,278 @@ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +/* + * Copyright (C) Sunplus Technology Co., Ltd. + * All rights reserved. + */ +#include <linux/irq.h> +#include <linux/irqdomain.h> +#include <linux/io.h> +#include <linux/irqchip.h> +#include <linux/irqchip/chained_irq.h> +#include <linux/of_address.h> +#include <linux/of_irq.h> + +#define SP_INTC_HWIRQ_MIN 0 +#define SP_INTC_HWIRQ_MAX 223 + +#define SP_INTC_NR_IRQS (SP_INTC_HWIRQ_MAX - SP_INTC_HWIRQ_MIN + 1) +#define SP_INTC_NR_GROUPS DIV_ROUND_UP(SP_INTC_NR_IRQS, 32) +#define SP_INTC_REG_SIZE (SP_INTC_NR_GROUPS * 4) + +/* REG_GROUP_0 regs */ +#define REG_INTR_TYPE (sp_intc.g0) +#define REG_INTR_POLARITY (REG_INTR_TYPE + SP_INTC_REG_SIZE) +#define REG_INTR_PRIORITY (REG_INTR_POLARITY + SP_INTC_REG_SIZE) +#define REG_INTR_MASK (REG_INTR_PRIORITY + SP_INTC_REG_SIZE) + +/* REG_GROUP_1 regs */ +#define REG_INTR_CLEAR (sp_intc.g1) +#define REG_MASKED_EXT1 (REG_INTR_CLEAR + SP_INTC_REG_SIZE) +#define REG_MASKED_EXT0 (REG_MASKED_EXT1 + SP_INTC_REG_SIZE) +#define REG_INTR_GROUP (REG_INTR_CLEAR + 31 * 4) + +#define GROUP_MASK (BIT(SP_INTC_NR_GROUPS) - 1) +#define GROUP_SHIFT_EXT1 (0) +#define GROUP_SHIFT_EXT0 (8) + +/* + * When GPIO_INT0~7 set to edge trigger, doesn't work properly. + * WORKAROUND: change it to level trigger, and toggle the polarity + * at ACK/Handler to make the HW work. + */ +#define GPIO_INT0_HWIRQ 120 +#define GPIO_INT7_HWIRQ 127 +#define IS_GPIO_INT(irq) \ +({ \ + u32 i = irq; \ + (i >= GPIO_INT0_HWIRQ) && (i <= GPIO_INT7_HWIRQ); \ +}) + +/* index of states */ +enum { + _IS_EDGE = 0, + _IS_LOW, + _IS_ACTIVE +}; + +#define STATE_BIT(irq, idx) (((irq) - GPIO_INT0_HWIRQ) * 3 + (idx)) +#define ASSIGN_STATE(irq, idx, v) assign_bit(STATE_BIT(irq, idx), sp_intc.states, v) +#define TEST_STATE(irq, idx) test_bit(STATE_BIT(irq, idx), sp_intc.states) + +static struct sp_intctl { + /* + * REG_GROUP_0: include type/polarity/priority/mask regs. + * REG_GROUP_1: include clear/masked_ext0/masked_ext1/group regs. + */ + void __iomem *g0; // REG_GROUP_0 base + void __iomem *g1; // REG_GROUP_1 base + + struct irq_domain *domain; + raw_spinlock_t lock; + + /* + * store GPIO_INT states + * each interrupt has 3 states: is_edge, is_low, is_active + */ + DECLARE_BITMAP(states, (GPIO_INT7_HWIRQ - GPIO_INT0_HWIRQ + 1) * 3); +} sp_intc; + +static struct irq_chip sp_intc_chip; + +static void sp_intc_assign_bit(u32 hwirq, void __iomem *base, bool value) +{ + u32 offset, mask; + unsigned long flags; + void __iomem *reg; + + offset = (hwirq / 32) * 4; + reg = base + offset; + + raw_spin_lock_irqsave(&sp_intc.lock, flags); + mask = readl_relaxed(reg); + if (value) + mask |= BIT(hwirq % 32); + else + mask &= ~BIT(hwirq % 32); + writel_relaxed(mask, reg); + raw_spin_unlock_irqrestore(&sp_intc.lock, flags); +} + +static void sp_intc_ack_irq(struct irq_data *d) +{ + u32 hwirq = d->hwirq; + + if (unlikely(IS_GPIO_INT(hwirq) && TEST_STATE(hwirq, _IS_EDGE))) { // WORKAROUND + sp_intc_assign_bit(hwirq, REG_INTR_POLARITY, !TEST_STATE(hwirq, _IS_LOW)); + ASSIGN_STATE(hwirq, _IS_ACTIVE, true); + } + + sp_intc_assign_bit(hwirq, REG_INTR_CLEAR, 1); +} + +static void sp_intc_mask_irq(struct irq_data *d) +{ + sp_intc_assign_bit(d->hwirq, REG_INTR_MASK, 0); +} + +static void sp_intc_unmask_irq(struct irq_data *d) +{ + sp_intc_assign_bit(d->hwirq, REG_INTR_MASK, 1); +} + +static int sp_intc_set_type(struct irq_data *d, unsigned int type) +{ + u32 hwirq = d->hwirq; + bool is_edge = !(type & IRQ_TYPE_LEVEL_MASK); + bool is_low = (type == IRQ_TYPE_LEVEL_LOW || type == IRQ_TYPE_EDGE_FALLING); + + irq_set_handler_locked(d, is_edge ? handle_edge_irq : handle_level_irq); + + if (unlikely(IS_GPIO_INT(hwirq) && is_edge)) { // WORKAROUND + /* store states */ + ASSIGN_STATE(hwirq, _IS_EDGE, is_edge); + ASSIGN_STATE(hwirq, _IS_LOW, is_low); + ASSIGN_STATE(hwirq, _IS_ACTIVE, false); + /* change to level */ + is_edge = false; + } + + sp_intc_assign_bit(hwirq, REG_INTR_TYPE, is_edge); + sp_intc_assign_bit(hwirq, REG_INTR_POLARITY, is_low); + + return 0; +} + +static int sp_intc_get_ext_irq(int ext_num) +{ + void __iomem *base = ext_num ? REG_MASKED_EXT1 : REG_MASKED_EXT0; + u32 shift = ext_num ? GROUP_SHIFT_EXT1 : GROUP_SHIFT_EXT0; + u32 groups; + u32 pending_group; + u32 group; + u32 pending_irq; + + groups = readl_relaxed(REG_INTR_GROUP); + pending_group = (groups >> shift) & GROUP_MASK; + if (!pending_group) + return -1; + + group = fls(pending_group) - 1; + pending_irq = readl_relaxed(base + group * 4); + if (!pending_irq) + return -1; + + return (group * 32) + fls(pending_irq) - 1; +} + +static void sp_intc_handle_ext_cascaded(struct irq_desc *desc) +{ + struct irq_chip *chip = irq_desc_get_chip(desc); + int ext_num = (uintptr_t)irq_desc_get_handler_data(desc); + int hwirq; + + chained_irq_enter(chip, desc); + + while ((hwirq = sp_intc_get_ext_irq(ext_num)) >= 0) { + if (unlikely(IS_GPIO_INT(hwirq) && TEST_STATE(hwirq, _IS_ACTIVE))) { // WORKAROUND + ASSIGN_STATE(hwirq, _IS_ACTIVE, false); + sp_intc_assign_bit(hwirq, REG_INTR_POLARITY, TEST_STATE(hwirq, _IS_LOW)); + } else { + generic_handle_domain_irq(sp_intc.domain, hwirq); + } + } + + chained_irq_exit(chip, desc); +} + +static struct irq_chip sp_intc_chip = { + .name = "sp_intc", + .irq_ack = sp_intc_ack_irq, + .irq_mask = sp_intc_mask_irq, + .irq_unmask = sp_intc_unmask_irq, + .irq_set_type = sp_intc_set_type, +}; + +static int sp_intc_irq_domain_map(struct irq_domain *domain, + unsigned int irq, irq_hw_number_t hwirq) +{ + irq_set_chip_and_handler(irq, &sp_intc_chip, handle_level_irq); + irq_set_chip_data(irq, &sp_intc_chip); + irq_set_noprobe(irq); + + return 0; +} + +static const struct irq_domain_ops sp_intc_dm_ops = { + .xlate = irq_domain_xlate_twocell, + .map = sp_intc_irq_domain_map, +}; + +static int sp_intc_irq_map(struct device_node *node, int i) +{ + unsigned int irq; + + irq = irq_of_parse_and_map(node, i); + if (!irq) + return -ENOENT; + + irq_set_chained_handler_and_data(irq, sp_intc_handle_ext_cascaded, (void *)(uintptr_t)i); + + return 0; +} + +static int __init sp_intc_init_dt(struct device_node *node, struct device_node *parent) +{ + int i, ret; + + sp_intc.g0 = of_iomap(node, 0); + if (!sp_intc.g0) + return -ENXIO; + + sp_intc.g1 = of_iomap(node, 1); + if (!sp_intc.g1) { + ret = -ENXIO; + goto out_unmap0; + } + + ret = sp_intc_irq_map(node, 0); // EXT_INT0 + if (ret) + goto out_unmap1; + + ret = sp_intc_irq_map(node, 1); // EXT_INT1 + if (ret) + goto out_unmap1; + + /* initial regs */ + for (i = 0; i < SP_INTC_NR_GROUPS; i++) { + /* all mask */ + writel_relaxed(0, REG_INTR_MASK + i * 4); + /* all edge */ + writel_relaxed(~0, REG_INTR_TYPE + i * 4); + /* all high-active */ + writel_relaxed(0, REG_INTR_POLARITY + i * 4); + /* all EXT_INT0 */ + writel_relaxed(~0, REG_INTR_PRIORITY + i * 4); + /* all clear */ + writel_relaxed(~0, REG_INTR_CLEAR + i * 4); + } + + sp_intc.domain = irq_domain_create_linear(of_fwnode_handle(node), SP_INTC_NR_IRQS, + &sp_intc_dm_ops, &sp_intc); + if (!sp_intc.domain) { + ret = -ENOMEM; + goto out_unmap1; + } + + raw_spin_lock_init(&sp_intc.lock); + + return 0; + +out_unmap1: + iounmap(sp_intc.g1); +out_unmap0: + iounmap(sp_intc.g0); + + return ret; +} + +IRQCHIP_DECLARE(sp_intc, "sunplus,sp7021-intc", sp_intc_init_dt); diff --git a/drivers/irqchip/irq-st.c b/drivers/irqchip/irq-st.c index 5e0e250db0be..de71bb350d57 100644 --- a/drivers/irqchip/irq-st.c +++ b/drivers/irqchip/irq-st.c @@ -1,27 +1,21 @@ +// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2014 STMicroelectronics – All Rights Reserved * * Author: Lee Jones <lee.jones@linaro.org> * * This is a re-write of Christophe Kerello's PMU driver. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. */ #include <dt-bindings/interrupt-controller/irq-st.h> #include <linux/err.h> #include <linux/mfd/syscon.h> -#include <linux/of_device.h> +#include <linux/of.h> #include <linux/platform_device.h> #include <linux/regmap.h> #include <linux/slab.h> -#define STIH415_SYSCFG_642 0x0a8 -#define STIH416_SYSCFG_7543 0x87c #define STIH407_SYSCFG_5102 0x198 -#define STID127_SYSCFG_734 0x088 #define ST_A9_IRQ_MASK 0x001FFFFF #define ST_A9_IRQ_MAX_CHANS 2 @@ -48,21 +42,9 @@ struct st_irq_syscfg { static const struct of_device_id st_irq_syscfg_match[] = { { - .compatible = "st,stih415-irq-syscfg", - .data = (void *)STIH415_SYSCFG_642, - }, - { - .compatible = "st,stih416-irq-syscfg", - .data = (void *)STIH416_SYSCFG_7543, - }, - { .compatible = "st,stih407-irq-syscfg", .data = (void *)STIH407_SYSCFG_5102, }, - { - .compatible = "st,stid127-irq-syscfg", - .data = (void *)STID127_SYSCFG_734, - }, {} }; @@ -156,18 +138,13 @@ static int st_irq_syscfg_enable(struct platform_device *pdev) static int st_irq_syscfg_probe(struct platform_device *pdev) { struct device_node *np = pdev->dev.of_node; - const struct of_device_id *match; struct st_irq_syscfg *ddata; ddata = devm_kzalloc(&pdev->dev, sizeof(*ddata), GFP_KERNEL); if (!ddata) return -ENOMEM; - match = of_match_device(st_irq_syscfg_match, &pdev->dev); - if (!match) - return -ENODEV; - - ddata->syscfg = (unsigned int)match->data; + ddata->syscfg = (unsigned int) device_get_match_data(&pdev->dev); ddata->regmap = syscon_regmap_lookup_by_phandle(np, "st,syscfg"); if (IS_ERR(ddata->regmap)) { diff --git a/drivers/irqchip/irq-starfive-jh8100-intc.c b/drivers/irqchip/irq-starfive-jh8100-intc.c new file mode 100644 index 000000000000..705361b4ebe0 --- /dev/null +++ b/drivers/irqchip/irq-starfive-jh8100-intc.c @@ -0,0 +1,207 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * StarFive JH8100 External Interrupt Controller driver + * + * Copyright (C) 2023 StarFive Technology Co., Ltd. + * + * Author: Changhuang Liang <changhuang.liang@starfivetech.com> + */ + +#define pr_fmt(fmt) "irq-starfive-jh8100: " fmt + +#include <linux/bitops.h> +#include <linux/clk.h> +#include <linux/irq.h> +#include <linux/irqchip.h> +#include <linux/irqchip/chained_irq.h> +#include <linux/irqdomain.h> +#include <linux/of_address.h> +#include <linux/of_irq.h> +#include <linux/reset.h> +#include <linux/spinlock.h> + +#define STARFIVE_INTC_SRC0_CLEAR 0x10 +#define STARFIVE_INTC_SRC0_MASK 0x14 +#define STARFIVE_INTC_SRC0_INT 0x1c + +#define STARFIVE_INTC_SRC_IRQ_NUM 32 + +struct starfive_irq_chip { + void __iomem *base; + struct irq_domain *domain; + raw_spinlock_t lock; +}; + +static void starfive_intc_bit_set(struct starfive_irq_chip *irqc, + u32 reg, u32 bit_mask) +{ + u32 value; + + value = ioread32(irqc->base + reg); + value |= bit_mask; + iowrite32(value, irqc->base + reg); +} + +static void starfive_intc_bit_clear(struct starfive_irq_chip *irqc, + u32 reg, u32 bit_mask) +{ + u32 value; + + value = ioread32(irqc->base + reg); + value &= ~bit_mask; + iowrite32(value, irqc->base + reg); +} + +static void starfive_intc_unmask(struct irq_data *d) +{ + struct starfive_irq_chip *irqc = irq_data_get_irq_chip_data(d); + + raw_spin_lock(&irqc->lock); + starfive_intc_bit_clear(irqc, STARFIVE_INTC_SRC0_MASK, BIT(d->hwirq)); + raw_spin_unlock(&irqc->lock); +} + +static void starfive_intc_mask(struct irq_data *d) +{ + struct starfive_irq_chip *irqc = irq_data_get_irq_chip_data(d); + + raw_spin_lock(&irqc->lock); + starfive_intc_bit_set(irqc, STARFIVE_INTC_SRC0_MASK, BIT(d->hwirq)); + raw_spin_unlock(&irqc->lock); +} + +static struct irq_chip intc_dev = { + .name = "StarFive JH8100 INTC", + .irq_unmask = starfive_intc_unmask, + .irq_mask = starfive_intc_mask, +}; + +static int starfive_intc_map(struct irq_domain *d, unsigned int irq, + irq_hw_number_t hwirq) +{ + irq_domain_set_info(d, irq, hwirq, &intc_dev, d->host_data, + handle_level_irq, NULL, NULL); + + return 0; +} + +static const struct irq_domain_ops starfive_intc_domain_ops = { + .xlate = irq_domain_xlate_onecell, + .map = starfive_intc_map, +}; + +static void starfive_intc_irq_handler(struct irq_desc *desc) +{ + struct starfive_irq_chip *irqc = irq_data_get_irq_handler_data(&desc->irq_data); + struct irq_chip *chip = irq_desc_get_chip(desc); + unsigned long value; + int hwirq; + + chained_irq_enter(chip, desc); + + value = ioread32(irqc->base + STARFIVE_INTC_SRC0_INT); + while (value) { + hwirq = ffs(value) - 1; + + generic_handle_domain_irq(irqc->domain, hwirq); + + starfive_intc_bit_set(irqc, STARFIVE_INTC_SRC0_CLEAR, BIT(hwirq)); + starfive_intc_bit_clear(irqc, STARFIVE_INTC_SRC0_CLEAR, BIT(hwirq)); + + __clear_bit(hwirq, &value); + } + + chained_irq_exit(chip, desc); +} + +static int starfive_intc_probe(struct platform_device *pdev, struct device_node *parent) +{ + struct device_node *intc = pdev->dev.of_node; + struct starfive_irq_chip *irqc; + struct reset_control *rst; + struct clk *clk; + int parent_irq; + int ret; + + irqc = kzalloc(sizeof(*irqc), GFP_KERNEL); + if (!irqc) + return -ENOMEM; + + irqc->base = of_iomap(intc, 0); + if (!irqc->base) { + pr_err("Unable to map registers\n"); + ret = -ENXIO; + goto err_free; + } + + rst = of_reset_control_get_exclusive(intc, NULL); + if (IS_ERR(rst)) { + pr_err("Unable to get reset control %pe\n", rst); + ret = PTR_ERR(rst); + goto err_unmap; + } + + clk = of_clk_get(intc, 0); + if (IS_ERR(clk)) { + pr_err("Unable to get clock %pe\n", clk); + ret = PTR_ERR(clk); + goto err_reset_put; + } + + ret = reset_control_deassert(rst); + if (ret) + goto err_clk_put; + + ret = clk_prepare_enable(clk); + if (ret) + goto err_reset_assert; + + raw_spin_lock_init(&irqc->lock); + + irqc->domain = irq_domain_create_linear(of_fwnode_handle(intc), STARFIVE_INTC_SRC_IRQ_NUM, + &starfive_intc_domain_ops, irqc); + if (!irqc->domain) { + pr_err("Unable to create IRQ domain\n"); + ret = -EINVAL; + goto err_clk_disable; + } + + parent_irq = of_irq_get(intc, 0); + if (parent_irq < 0) { + pr_err("Failed to get main IRQ: %d\n", parent_irq); + ret = parent_irq; + goto err_remove_domain; + } + + irq_set_chained_handler_and_data(parent_irq, starfive_intc_irq_handler, + irqc); + + pr_info("Interrupt controller register, nr_irqs %d\n", + STARFIVE_INTC_SRC_IRQ_NUM); + + return 0; + +err_remove_domain: + irq_domain_remove(irqc->domain); +err_clk_disable: + clk_disable_unprepare(clk); +err_reset_assert: + reset_control_assert(rst); +err_clk_put: + clk_put(clk); +err_reset_put: + reset_control_put(rst); +err_unmap: + iounmap(irqc->base); +err_free: + kfree(irqc); + return ret; +} + +IRQCHIP_PLATFORM_DRIVER_BEGIN(starfive_intc) +IRQCHIP_MATCH("starfive,jh8100-intc", starfive_intc_probe) +IRQCHIP_PLATFORM_DRIVER_END(starfive_intc) + +MODULE_DESCRIPTION("StarFive JH8100 External Interrupt Controller"); +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Changhuang Liang <changhuang.liang@starfivetech.com>"); diff --git a/drivers/irqchip/irq-stm32-exti.c b/drivers/irqchip/irq-stm32-exti.c index 6edfd4bfa169..978811f2abe8 100644 --- a/drivers/irqchip/irq-stm32-exti.c +++ b/drivers/irqchip/irq-stm32-exti.c @@ -1,13 +1,11 @@ // SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) Maxime Coquelin 2015 - * Copyright (C) STMicroelectronics 2017 + * Copyright (C) STMicroelectronics 2017-2024 * Author: Maxime Coquelin <mcoquelin.stm32@gmail.com> */ #include <linux/bitops.h> -#include <linux/delay.h> -#include <linux/hwspinlock.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/irq.h> @@ -16,14 +14,8 @@ #include <linux/irqdomain.h> #include <linux/of_address.h> #include <linux/of_irq.h> -#include <linux/syscore_ops.h> -#include <dt-bindings/interrupt-controller/arm-gic.h> - -#define IRQS_PER_BANK 32 - -#define HWSPNLCK_TIMEOUT 1000 /* usec */ -#define HWSPNLCK_RETRY_DELAY 100 /* usec */ +#define IRQS_PER_BANK 32 struct stm32_exti_bank { u32 imr_ofst; @@ -32,50 +24,31 @@ struct stm32_exti_bank { u32 ftsr_ofst; u32 swier_ofst; u32 rpr_ofst; - u32 fpr_ofst; -}; - -#define UNDEF_REG ~0 - -enum stm32_exti_hwspinlock { - HWSPINLOCK_UNKNOWN, - HWSPINLOCK_NONE, - HWSPINLOCK_READY, -}; - -struct stm32_desc_irq { - u32 exti; - u32 irq_parent; }; struct stm32_exti_drv_data { const struct stm32_exti_bank **exti_banks; - const struct stm32_desc_irq *desc_irqs; + const u8 *desc_irqs; u32 bank_nr; - u32 irq_nr; }; struct stm32_exti_chip_data { struct stm32_exti_host_data *host_data; const struct stm32_exti_bank *reg_bank; - struct raw_spinlock rlock; u32 wake_active; u32 mask_cache; u32 rtsr_cache; u32 ftsr_cache; + u32 event_reserved; }; struct stm32_exti_host_data { void __iomem *base; + struct device *dev; struct stm32_exti_chip_data *chips_data; const struct stm32_exti_drv_data *drv_data; - struct device_node *node; - enum stm32_exti_hwspinlock hwlock_state; - struct hwspinlock *hwlock; }; -static struct stm32_exti_host_data *stm32_host_data; - static const struct stm32_exti_bank stm32f4xx_exti_b1 = { .imr_ofst = 0x00, .emr_ofst = 0x04, @@ -83,7 +56,6 @@ static const struct stm32_exti_bank stm32f4xx_exti_b1 = { .ftsr_ofst = 0x0C, .swier_ofst = 0x10, .rpr_ofst = 0x14, - .fpr_ofst = UNDEF_REG, }; static const struct stm32_exti_bank *stm32f4xx_exti_banks[] = { @@ -102,7 +74,6 @@ static const struct stm32_exti_bank stm32h7xx_exti_b1 = { .ftsr_ofst = 0x04, .swier_ofst = 0x08, .rpr_ofst = 0x88, - .fpr_ofst = UNDEF_REG, }; static const struct stm32_exti_bank stm32h7xx_exti_b2 = { @@ -112,7 +83,6 @@ static const struct stm32_exti_bank stm32h7xx_exti_b2 = { .ftsr_ofst = 0x24, .swier_ofst = 0x28, .rpr_ofst = 0x98, - .fpr_ofst = UNDEF_REG, }; static const struct stm32_exti_bank stm32h7xx_exti_b3 = { @@ -122,7 +92,6 @@ static const struct stm32_exti_bank stm32h7xx_exti_b3 = { .ftsr_ofst = 0x44, .swier_ofst = 0x48, .rpr_ofst = 0xA8, - .fpr_ofst = UNDEF_REG, }; static const struct stm32_exti_bank *stm32h7xx_exti_banks[] = { @@ -136,108 +105,19 @@ static const struct stm32_exti_drv_data stm32h7xx_drv_data = { .bank_nr = ARRAY_SIZE(stm32h7xx_exti_banks), }; -static const struct stm32_exti_bank stm32mp1_exti_b1 = { - .imr_ofst = 0x80, - .emr_ofst = 0x84, - .rtsr_ofst = 0x00, - .ftsr_ofst = 0x04, - .swier_ofst = 0x08, - .rpr_ofst = 0x0C, - .fpr_ofst = 0x10, -}; - -static const struct stm32_exti_bank stm32mp1_exti_b2 = { - .imr_ofst = 0x90, - .emr_ofst = 0x94, - .rtsr_ofst = 0x20, - .ftsr_ofst = 0x24, - .swier_ofst = 0x28, - .rpr_ofst = 0x2C, - .fpr_ofst = 0x30, -}; - -static const struct stm32_exti_bank stm32mp1_exti_b3 = { - .imr_ofst = 0xA0, - .emr_ofst = 0xA4, - .rtsr_ofst = 0x40, - .ftsr_ofst = 0x44, - .swier_ofst = 0x48, - .rpr_ofst = 0x4C, - .fpr_ofst = 0x50, -}; - -static const struct stm32_exti_bank *stm32mp1_exti_banks[] = { - &stm32mp1_exti_b1, - &stm32mp1_exti_b2, - &stm32mp1_exti_b3, -}; - -static const struct stm32_desc_irq stm32mp1_desc_irq[] = { - { .exti = 0, .irq_parent = 6 }, - { .exti = 1, .irq_parent = 7 }, - { .exti = 2, .irq_parent = 8 }, - { .exti = 3, .irq_parent = 9 }, - { .exti = 4, .irq_parent = 10 }, - { .exti = 5, .irq_parent = 23 }, - { .exti = 6, .irq_parent = 64 }, - { .exti = 7, .irq_parent = 65 }, - { .exti = 8, .irq_parent = 66 }, - { .exti = 9, .irq_parent = 67 }, - { .exti = 10, .irq_parent = 40 }, - { .exti = 11, .irq_parent = 42 }, - { .exti = 12, .irq_parent = 76 }, - { .exti = 13, .irq_parent = 77 }, - { .exti = 14, .irq_parent = 121 }, - { .exti = 15, .irq_parent = 127 }, - { .exti = 16, .irq_parent = 1 }, - { .exti = 65, .irq_parent = 144 }, - { .exti = 68, .irq_parent = 143 }, - { .exti = 73, .irq_parent = 129 }, -}; - -static const struct stm32_exti_drv_data stm32mp1_drv_data = { - .exti_banks = stm32mp1_exti_banks, - .bank_nr = ARRAY_SIZE(stm32mp1_exti_banks), - .desc_irqs = stm32mp1_desc_irq, - .irq_nr = ARRAY_SIZE(stm32mp1_desc_irq), -}; - -static int stm32_exti_to_irq(const struct stm32_exti_drv_data *drv_data, - irq_hw_number_t hwirq) -{ - const struct stm32_desc_irq *desc_irq; - int i; - - if (!drv_data->desc_irqs) - return -EINVAL; - - for (i = 0; i < drv_data->irq_nr; i++) { - desc_irq = &drv_data->desc_irqs[i]; - if (desc_irq->exti == hwirq) - return desc_irq->irq_parent; - } - - return -EINVAL; -} - static unsigned long stm32_exti_pending(struct irq_chip_generic *gc) { struct stm32_exti_chip_data *chip_data = gc->private; const struct stm32_exti_bank *stm32_bank = chip_data->reg_bank; - unsigned long pending; - pending = irq_reg_readl(gc, stm32_bank->rpr_ofst); - if (stm32_bank->fpr_ofst != UNDEF_REG) - pending |= irq_reg_readl(gc, stm32_bank->fpr_ofst); - - return pending; + return irq_reg_readl(gc, stm32_bank->rpr_ofst); } static void stm32_irq_handler(struct irq_desc *desc) { struct irq_domain *domain = irq_desc_get_handler_data(desc); struct irq_chip *chip = irq_desc_get_chip(desc); - unsigned int virq, nbanks = domain->gc->num_chips; + unsigned int nbanks = domain->gc->num_chips; struct irq_chip_generic *gc; unsigned long pending; int n, i, irq_base = 0; @@ -248,10 +128,8 @@ static void stm32_irq_handler(struct irq_desc *desc) gc = irq_get_domain_generic_chip(domain, irq_base); while ((pending = stm32_exti_pending(gc))) { - for_each_set_bit(n, &pending, IRQS_PER_BANK) { - virq = irq_find_mapping(domain, irq_base + n); - generic_handle_irq(virq); - } + for_each_set_bit(n, &pending, IRQS_PER_BANK) + generic_handle_domain_irq(domain, irq_base + n); } } @@ -283,64 +161,6 @@ static int stm32_exti_set_type(struct irq_data *d, return 0; } -static int stm32_exti_hwspin_lock(struct stm32_exti_chip_data *chip_data) -{ - struct stm32_exti_host_data *host_data = chip_data->host_data; - struct hwspinlock *hwlock; - int id, ret = 0, timeout = 0; - - /* first time, check for hwspinlock availability */ - if (unlikely(host_data->hwlock_state == HWSPINLOCK_UNKNOWN)) { - id = of_hwspin_lock_get_id(host_data->node, 0); - if (id >= 0) { - hwlock = hwspin_lock_request_specific(id); - if (hwlock) { - /* found valid hwspinlock */ - host_data->hwlock_state = HWSPINLOCK_READY; - host_data->hwlock = hwlock; - pr_debug("%s hwspinlock = %d\n", __func__, id); - } else { - host_data->hwlock_state = HWSPINLOCK_NONE; - } - } else if (id != -EPROBE_DEFER) { - host_data->hwlock_state = HWSPINLOCK_NONE; - } else { - /* hwspinlock driver shall be ready at that stage */ - ret = -EPROBE_DEFER; - } - } - - if (likely(host_data->hwlock_state == HWSPINLOCK_READY)) { - /* - * Use the x_raw API since we are under spin_lock protection. - * Do not use the x_timeout API because we are under irq_disable - * mode (see __setup_irq()) - */ - do { - ret = hwspin_trylock_raw(host_data->hwlock); - if (!ret) - return 0; - - udelay(HWSPNLCK_RETRY_DELAY); - timeout += HWSPNLCK_RETRY_DELAY; - } while (timeout < HWSPNLCK_TIMEOUT); - - if (ret == -EBUSY) - ret = -ETIMEDOUT; - } - - if (ret) - pr_err("%s can't get hwspinlock (%d)\n", __func__, ret); - - return ret; -} - -static void stm32_exti_hwspin_unlock(struct stm32_exti_chip_data *chip_data) -{ - if (likely(chip_data->host_data->hwlock_state == HWSPINLOCK_READY)) - hwspin_unlock_raw(chip_data->host_data->hwlock); -} - static int stm32_irq_set_type(struct irq_data *d, unsigned int type) { struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); @@ -349,28 +169,18 @@ static int stm32_irq_set_type(struct irq_data *d, unsigned int type) u32 rtsr, ftsr; int err; - irq_gc_lock(gc); - - err = stm32_exti_hwspin_lock(chip_data); - if (err) - goto unlock; + guard(raw_spinlock)(&gc->lock); rtsr = irq_reg_readl(gc, stm32_bank->rtsr_ofst); ftsr = irq_reg_readl(gc, stm32_bank->ftsr_ofst); err = stm32_exti_set_type(d, type, &rtsr, &ftsr); if (err) - goto unspinlock; + return err; irq_reg_writel(gc, rtsr, stm32_bank->rtsr_ofst); irq_reg_writel(gc, ftsr, stm32_bank->ftsr_ofst); - -unspinlock: - stm32_exti_hwspin_unlock(chip_data); -unlock: - irq_gc_unlock(gc); - - return err; + return 0; } static void stm32_chip_suspend(struct stm32_exti_chip_data *chip_data, @@ -403,18 +213,16 @@ static void stm32_irq_suspend(struct irq_chip_generic *gc) { struct stm32_exti_chip_data *chip_data = gc->private; - irq_gc_lock(gc); + guard(raw_spinlock)(&gc->lock); stm32_chip_suspend(chip_data, gc->wake_active); - irq_gc_unlock(gc); } static void stm32_irq_resume(struct irq_chip_generic *gc) { struct stm32_exti_chip_data *chip_data = gc->private; - irq_gc_lock(gc); + guard(raw_spinlock)(&gc->lock); stm32_chip_resume(chip_data, gc->mask_cache); - irq_gc_unlock(gc); } static int stm32_exti_alloc(struct irq_domain *d, unsigned int virq, @@ -442,6 +250,7 @@ static const struct irq_domain_ops irq_exti_domain_ops = { .map = irq_map_generic_chip, .alloc = stm32_exti_alloc, .free = stm32_exti_free, + .xlate = irq_domain_xlate_twocell, }; static void stm32_irq_ack(struct irq_data *d) @@ -450,226 +259,8 @@ static void stm32_irq_ack(struct irq_data *d) struct stm32_exti_chip_data *chip_data = gc->private; const struct stm32_exti_bank *stm32_bank = chip_data->reg_bank; - irq_gc_lock(gc); - + guard(raw_spinlock)(&gc->lock); irq_reg_writel(gc, d->mask, stm32_bank->rpr_ofst); - if (stm32_bank->fpr_ofst != UNDEF_REG) - irq_reg_writel(gc, d->mask, stm32_bank->fpr_ofst); - - irq_gc_unlock(gc); -} - -static inline u32 stm32_exti_set_bit(struct irq_data *d, u32 reg) -{ - struct stm32_exti_chip_data *chip_data = irq_data_get_irq_chip_data(d); - void __iomem *base = chip_data->host_data->base; - u32 val; - - val = readl_relaxed(base + reg); - val |= BIT(d->hwirq % IRQS_PER_BANK); - writel_relaxed(val, base + reg); - - return val; -} - -static inline u32 stm32_exti_clr_bit(struct irq_data *d, u32 reg) -{ - struct stm32_exti_chip_data *chip_data = irq_data_get_irq_chip_data(d); - void __iomem *base = chip_data->host_data->base; - u32 val; - - val = readl_relaxed(base + reg); - val &= ~BIT(d->hwirq % IRQS_PER_BANK); - writel_relaxed(val, base + reg); - - return val; -} - -static void stm32_exti_h_eoi(struct irq_data *d) -{ - struct stm32_exti_chip_data *chip_data = irq_data_get_irq_chip_data(d); - const struct stm32_exti_bank *stm32_bank = chip_data->reg_bank; - - raw_spin_lock(&chip_data->rlock); - - stm32_exti_set_bit(d, stm32_bank->rpr_ofst); - if (stm32_bank->fpr_ofst != UNDEF_REG) - stm32_exti_set_bit(d, stm32_bank->fpr_ofst); - - raw_spin_unlock(&chip_data->rlock); - - if (d->parent_data->chip) - irq_chip_eoi_parent(d); -} - -static void stm32_exti_h_mask(struct irq_data *d) -{ - struct stm32_exti_chip_data *chip_data = irq_data_get_irq_chip_data(d); - const struct stm32_exti_bank *stm32_bank = chip_data->reg_bank; - - raw_spin_lock(&chip_data->rlock); - chip_data->mask_cache = stm32_exti_clr_bit(d, stm32_bank->imr_ofst); - raw_spin_unlock(&chip_data->rlock); - - if (d->parent_data->chip) - irq_chip_mask_parent(d); -} - -static void stm32_exti_h_unmask(struct irq_data *d) -{ - struct stm32_exti_chip_data *chip_data = irq_data_get_irq_chip_data(d); - const struct stm32_exti_bank *stm32_bank = chip_data->reg_bank; - - raw_spin_lock(&chip_data->rlock); - chip_data->mask_cache = stm32_exti_set_bit(d, stm32_bank->imr_ofst); - raw_spin_unlock(&chip_data->rlock); - - if (d->parent_data->chip) - irq_chip_unmask_parent(d); -} - -static int stm32_exti_h_set_type(struct irq_data *d, unsigned int type) -{ - struct stm32_exti_chip_data *chip_data = irq_data_get_irq_chip_data(d); - const struct stm32_exti_bank *stm32_bank = chip_data->reg_bank; - void __iomem *base = chip_data->host_data->base; - u32 rtsr, ftsr; - int err; - - raw_spin_lock(&chip_data->rlock); - - err = stm32_exti_hwspin_lock(chip_data); - if (err) - goto unlock; - - rtsr = readl_relaxed(base + stm32_bank->rtsr_ofst); - ftsr = readl_relaxed(base + stm32_bank->ftsr_ofst); - - err = stm32_exti_set_type(d, type, &rtsr, &ftsr); - if (err) - goto unspinlock; - - writel_relaxed(rtsr, base + stm32_bank->rtsr_ofst); - writel_relaxed(ftsr, base + stm32_bank->ftsr_ofst); - -unspinlock: - stm32_exti_hwspin_unlock(chip_data); -unlock: - raw_spin_unlock(&chip_data->rlock); - - return err; -} - -static int stm32_exti_h_set_wake(struct irq_data *d, unsigned int on) -{ - struct stm32_exti_chip_data *chip_data = irq_data_get_irq_chip_data(d); - u32 mask = BIT(d->hwirq % IRQS_PER_BANK); - - raw_spin_lock(&chip_data->rlock); - - if (on) - chip_data->wake_active |= mask; - else - chip_data->wake_active &= ~mask; - - raw_spin_unlock(&chip_data->rlock); - - return 0; -} - -static int stm32_exti_h_set_affinity(struct irq_data *d, - const struct cpumask *dest, bool force) -{ - if (d->parent_data->chip) - return irq_chip_set_affinity_parent(d, dest, force); - - return -EINVAL; -} - -#ifdef CONFIG_PM -static int stm32_exti_h_suspend(void) -{ - struct stm32_exti_chip_data *chip_data; - int i; - - for (i = 0; i < stm32_host_data->drv_data->bank_nr; i++) { - chip_data = &stm32_host_data->chips_data[i]; - raw_spin_lock(&chip_data->rlock); - stm32_chip_suspend(chip_data, chip_data->wake_active); - raw_spin_unlock(&chip_data->rlock); - } - - return 0; -} - -static void stm32_exti_h_resume(void) -{ - struct stm32_exti_chip_data *chip_data; - int i; - - for (i = 0; i < stm32_host_data->drv_data->bank_nr; i++) { - chip_data = &stm32_host_data->chips_data[i]; - raw_spin_lock(&chip_data->rlock); - stm32_chip_resume(chip_data, chip_data->mask_cache); - raw_spin_unlock(&chip_data->rlock); - } -} - -static struct syscore_ops stm32_exti_h_syscore_ops = { - .suspend = stm32_exti_h_suspend, - .resume = stm32_exti_h_resume, -}; - -static void stm32_exti_h_syscore_init(void) -{ - register_syscore_ops(&stm32_exti_h_syscore_ops); -} -#else -static inline void stm32_exti_h_syscore_init(void) {} -#endif - -static struct irq_chip stm32_exti_h_chip = { - .name = "stm32-exti-h", - .irq_eoi = stm32_exti_h_eoi, - .irq_mask = stm32_exti_h_mask, - .irq_unmask = stm32_exti_h_unmask, - .irq_retrigger = irq_chip_retrigger_hierarchy, - .irq_set_type = stm32_exti_h_set_type, - .irq_set_wake = stm32_exti_h_set_wake, - .flags = IRQCHIP_MASK_ON_SUSPEND, - .irq_set_affinity = IS_ENABLED(CONFIG_SMP) ? stm32_exti_h_set_affinity : NULL, -}; - -static int stm32_exti_h_domain_alloc(struct irq_domain *dm, - unsigned int virq, - unsigned int nr_irqs, void *data) -{ - struct stm32_exti_host_data *host_data = dm->host_data; - struct stm32_exti_chip_data *chip_data; - struct irq_fwspec *fwspec = data; - struct irq_fwspec p_fwspec; - irq_hw_number_t hwirq; - int p_irq, bank; - - hwirq = fwspec->param[0]; - bank = hwirq / IRQS_PER_BANK; - chip_data = &host_data->chips_data[bank]; - - irq_domain_set_hwirq_and_chip(dm, virq, hwirq, - &stm32_exti_h_chip, chip_data); - - p_irq = stm32_exti_to_irq(host_data->drv_data, hwirq); - if (p_irq >= 0) { - p_fwspec.fwnode = dm->parent->fwnode; - p_fwspec.param_count = 3; - p_fwspec.param[0] = GIC_SPI; - p_fwspec.param[1] = p_irq; - p_fwspec.param[2] = IRQ_TYPE_LEVEL_HIGH; - - return irq_domain_alloc_irqs_parent(dm, virq, 1, &p_fwspec); - } - - return 0; } static struct @@ -683,8 +274,6 @@ stm32_exti_host_data *stm32_exti_host_init(const struct stm32_exti_drv_data *dd, return NULL; host_data->drv_data = dd; - host_data->node = node; - host_data->hwlock_state = HWSPINLOCK_UNKNOWN; host_data->chips_data = kcalloc(dd->bank_nr, sizeof(struct stm32_exti_chip_data), GFP_KERNEL); @@ -697,8 +286,6 @@ stm32_exti_host_data *stm32_exti_host_init(const struct stm32_exti_drv_data *dd, goto free_chips_data; } - stm32_host_data = host_data; - return host_data; free_chips_data: @@ -711,37 +298,26 @@ free_host_data: static struct stm32_exti_chip_data *stm32_exti_chip_init(struct stm32_exti_host_data *h_data, - u32 bank_idx) + u32 bank_idx, + struct device_node *node) { const struct stm32_exti_bank *stm32_bank; struct stm32_exti_chip_data *chip_data; void __iomem *base = h_data->base; - u32 irqs_mask; stm32_bank = h_data->drv_data->exti_banks[bank_idx]; chip_data = &h_data->chips_data[bank_idx]; chip_data->host_data = h_data; chip_data->reg_bank = stm32_bank; - raw_spin_lock_init(&chip_data->rlock); - - /* Determine number of irqs supported */ - writel_relaxed(~0UL, base + stm32_bank->rtsr_ofst); - irqs_mask = readl_relaxed(base + stm32_bank->rtsr_ofst); - /* * This IP has no reset, so after hot reboot we should * clear registers to avoid residue */ writel_relaxed(0, base + stm32_bank->imr_ofst); writel_relaxed(0, base + stm32_bank->emr_ofst); - writel_relaxed(0, base + stm32_bank->rtsr_ofst); - writel_relaxed(0, base + stm32_bank->ftsr_ofst); - writel_relaxed(~0UL, base + stm32_bank->rpr_ofst); - if (stm32_bank->fpr_ofst != UNDEF_REG) - writel_relaxed(~0UL, base + stm32_bank->fpr_ofst); - pr_info("%pOF: bank%d\n", h_data->node, bank_idx); + pr_info("%pOF: bank%d\n", node, bank_idx); return chip_data; } @@ -759,8 +335,8 @@ static int __init stm32_exti_init(const struct stm32_exti_drv_data *drv_data, if (!host_data) return -ENOMEM; - domain = irq_domain_add_linear(node, drv_data->bank_nr * IRQS_PER_BANK, - &irq_exti_domain_ops, NULL); + domain = irq_domain_create_linear(of_fwnode_handle(node), drv_data->bank_nr * IRQS_PER_BANK, + &irq_exti_domain_ops, NULL); if (!domain) { pr_err("%pOFn: Could not register interrupt domain.\n", node); @@ -781,7 +357,7 @@ static int __init stm32_exti_init(const struct stm32_exti_drv_data *drv_data, struct stm32_exti_chip_data *chip_data; stm32_bank = drv_data->exti_banks[i]; - chip_data = stm32_exti_chip_init(host_data, i); + chip_data = stm32_exti_chip_init(host_data, i, node); gc = irq_get_domain_generic_chip(domain, i * IRQS_PER_BANK); @@ -819,55 +395,6 @@ out_unmap: return ret; } -static const struct irq_domain_ops stm32_exti_h_domain_ops = { - .alloc = stm32_exti_h_domain_alloc, - .free = irq_domain_free_irqs_common, -}; - -static int -__init stm32_exti_hierarchy_init(const struct stm32_exti_drv_data *drv_data, - struct device_node *node, - struct device_node *parent) -{ - struct irq_domain *parent_domain, *domain; - struct stm32_exti_host_data *host_data; - int ret, i; - - parent_domain = irq_find_host(parent); - if (!parent_domain) { - pr_err("interrupt-parent not found\n"); - return -EINVAL; - } - - host_data = stm32_exti_host_init(drv_data, node); - if (!host_data) - return -ENOMEM; - - for (i = 0; i < drv_data->bank_nr; i++) - stm32_exti_chip_init(host_data, i); - - domain = irq_domain_add_hierarchy(parent_domain, 0, - drv_data->bank_nr * IRQS_PER_BANK, - node, &stm32_exti_h_domain_ops, - host_data); - - if (!domain) { - pr_err("%pOFn: Could not register exti domain.\n", node); - ret = -ENOMEM; - goto out_unmap; - } - - stm32_exti_h_syscore_init(); - - return 0; - -out_unmap: - iounmap(host_data->base); - kfree(host_data->chips_data); - kfree(host_data); - return ret; -} - static int __init stm32f4_exti_of_init(struct device_node *np, struct device_node *parent) { @@ -883,11 +410,3 @@ static int __init stm32h7_exti_of_init(struct device_node *np, } IRQCHIP_DECLARE(stm32h7_exti, "st,stm32h7-exti", stm32h7_exti_of_init); - -static int __init stm32mp1_exti_of_init(struct device_node *np, - struct device_node *parent) -{ - return stm32_exti_hierarchy_init(&stm32mp1_drv_data, np, parent); -} - -IRQCHIP_DECLARE(stm32mp1_exti, "st,stm32mp1-exti", stm32mp1_exti_of_init); diff --git a/drivers/irqchip/irq-stm32mp-exti.c b/drivers/irqchip/irq-stm32mp-exti.c new file mode 100644 index 000000000000..a24f4f1a4f8f --- /dev/null +++ b/drivers/irqchip/irq-stm32mp-exti.c @@ -0,0 +1,725 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) Maxime Coquelin 2015 + * Copyright (C) STMicroelectronics 2017-2024 + * Author: Maxime Coquelin <mcoquelin.stm32@gmail.com> + */ + +#include <linux/bitops.h> +#include <linux/hwspinlock.h> +#include <linux/interrupt.h> +#include <linux/io.h> +#include <linux/irq.h> +#include <linux/irqchip.h> +#include <linux/irqdomain.h> +#include <linux/mod_devicetable.h> +#include <linux/module.h> +#include <linux/of_address.h> +#include <linux/of_irq.h> +#include <linux/platform_device.h> +#include <linux/pm.h> + +#include <dt-bindings/interrupt-controller/arm-gic.h> + +#define IRQS_PER_BANK 32 + +#define HWSPNLCK_TIMEOUT 1000 /* usec */ + +#define EXTI_EnCIDCFGR(n) (0x180 + (n) * 4) +#define EXTI_HWCFGR1 0x3f0 + +/* Register: EXTI_EnCIDCFGR(n) */ +#define EXTI_CIDCFGR_CFEN_MASK BIT(0) +#define EXTI_CIDCFGR_CID_MASK GENMASK(6, 4) +#define EXTI_CIDCFGR_CID_SHIFT 4 + +/* Register: EXTI_HWCFGR1 */ +#define EXTI_HWCFGR1_CIDWIDTH_MASK GENMASK(27, 24) + +#define EXTI_CID1 1 + +struct stm32mp_exti_bank { + u32 imr_ofst; + u32 rtsr_ofst; + u32 ftsr_ofst; + u32 swier_ofst; + u32 rpr_ofst; + u32 fpr_ofst; + u32 trg_ofst; + u32 seccfgr_ofst; +}; + +struct stm32mp_exti_drv_data { + const struct stm32mp_exti_bank **exti_banks; + const u8 *desc_irqs; + u32 bank_nr; +}; + +struct stm32mp_exti_chip_data { + struct stm32mp_exti_host_data *host_data; + const struct stm32mp_exti_bank *reg_bank; + struct raw_spinlock rlock; + u32 wake_active; + u32 mask_cache; + u32 rtsr_cache; + u32 ftsr_cache; + u32 event_reserved; +}; + +struct stm32mp_exti_host_data { + void __iomem *base; + struct device *dev; + struct stm32mp_exti_chip_data *chips_data; + const struct stm32mp_exti_drv_data *drv_data; + struct hwspinlock *hwlock; + /* skip internal desc_irqs array and get it from DT */ + bool dt_has_irqs_desc; +}; + +static const struct stm32mp_exti_bank stm32mp_exti_b1 = { + .imr_ofst = 0x80, + .rtsr_ofst = 0x00, + .ftsr_ofst = 0x04, + .swier_ofst = 0x08, + .rpr_ofst = 0x0C, + .fpr_ofst = 0x10, + .trg_ofst = 0x3EC, + .seccfgr_ofst = 0x14, +}; + +static const struct stm32mp_exti_bank stm32mp_exti_b2 = { + .imr_ofst = 0x90, + .rtsr_ofst = 0x20, + .ftsr_ofst = 0x24, + .swier_ofst = 0x28, + .rpr_ofst = 0x2C, + .fpr_ofst = 0x30, + .trg_ofst = 0x3E8, + .seccfgr_ofst = 0x34, +}; + +static const struct stm32mp_exti_bank stm32mp_exti_b3 = { + .imr_ofst = 0xA0, + .rtsr_ofst = 0x40, + .ftsr_ofst = 0x44, + .swier_ofst = 0x48, + .rpr_ofst = 0x4C, + .fpr_ofst = 0x50, + .trg_ofst = 0x3E4, + .seccfgr_ofst = 0x54, +}; + +static const struct stm32mp_exti_bank *stm32mp_exti_banks[] = { + &stm32mp_exti_b1, + &stm32mp_exti_b2, + &stm32mp_exti_b3, +}; + +static struct irq_chip stm32mp_exti_chip; +static struct irq_chip stm32mp_exti_chip_direct; + +#define EXTI_INVALID_IRQ U8_MAX +#define STM32MP_DESC_IRQ_SIZE (ARRAY_SIZE(stm32mp_exti_banks) * IRQS_PER_BANK) + +/* + * Use some intentionally tricky logic here to initialize the whole array to + * EXTI_INVALID_IRQ, but then override certain fields, requiring us to indicate + * that we "know" that there are overrides in this structure, and we'll need to + * disable that warning from W=1 builds. + */ +__diag_push(); +__diag_ignore_all("-Woverride-init", + "logic to initialize all and then override some is OK"); + +static const u8 stm32mp1_desc_irq[] = { + /* default value */ + [0 ... (STM32MP_DESC_IRQ_SIZE - 1)] = EXTI_INVALID_IRQ, + + [0] = 6, + [1] = 7, + [2] = 8, + [3] = 9, + [4] = 10, + [5] = 23, + [6] = 64, + [7] = 65, + [8] = 66, + [9] = 67, + [10] = 40, + [11] = 42, + [12] = 76, + [13] = 77, + [14] = 121, + [15] = 127, + [16] = 1, + [19] = 3, + [21] = 31, + [22] = 33, + [23] = 72, + [24] = 95, + [25] = 107, + [26] = 37, + [27] = 38, + [28] = 39, + [29] = 71, + [30] = 52, + [31] = 53, + [32] = 82, + [33] = 83, + [46] = 151, + [47] = 93, + [48] = 138, + [50] = 139, + [52] = 140, + [53] = 141, + [54] = 135, + [61] = 100, + [65] = 144, + [68] = 143, + [70] = 62, + [73] = 129, +}; + +static const u8 stm32mp13_desc_irq[] = { + /* default value */ + [0 ... (STM32MP_DESC_IRQ_SIZE - 1)] = EXTI_INVALID_IRQ, + + [0] = 6, + [1] = 7, + [2] = 8, + [3] = 9, + [4] = 10, + [5] = 24, + [6] = 65, + [7] = 66, + [8] = 67, + [9] = 68, + [10] = 41, + [11] = 43, + [12] = 77, + [13] = 78, + [14] = 106, + [15] = 109, + [16] = 1, + [19] = 3, + [21] = 32, + [22] = 34, + [23] = 73, + [24] = 93, + [25] = 114, + [26] = 38, + [27] = 39, + [28] = 40, + [29] = 72, + [30] = 53, + [31] = 54, + [32] = 83, + [33] = 84, + [44] = 96, + [47] = 92, + [48] = 116, + [50] = 117, + [52] = 118, + [53] = 119, + [68] = 63, + [70] = 98, +}; + +__diag_pop(); + +static const struct stm32mp_exti_drv_data stm32mp1_drv_data = { + .exti_banks = stm32mp_exti_banks, + .bank_nr = ARRAY_SIZE(stm32mp_exti_banks), + .desc_irqs = stm32mp1_desc_irq, +}; + +static const struct stm32mp_exti_drv_data stm32mp13_drv_data = { + .exti_banks = stm32mp_exti_banks, + .bank_nr = ARRAY_SIZE(stm32mp_exti_banks), + .desc_irqs = stm32mp13_desc_irq, +}; + +static int stm32mp_exti_convert_type(struct irq_data *d, unsigned int type, u32 *rtsr, u32 *ftsr) +{ + u32 mask = BIT(d->hwirq % IRQS_PER_BANK); + + switch (type) { + case IRQ_TYPE_EDGE_RISING: + *rtsr |= mask; + *ftsr &= ~mask; + break; + case IRQ_TYPE_EDGE_FALLING: + *rtsr &= ~mask; + *ftsr |= mask; + break; + case IRQ_TYPE_EDGE_BOTH: + *rtsr |= mask; + *ftsr |= mask; + break; + default: + return -EINVAL; + } + + return 0; +} + +static void stm32mp_chip_suspend(struct stm32mp_exti_chip_data *chip_data, u32 wake_active) +{ + const struct stm32mp_exti_bank *bank = chip_data->reg_bank; + void __iomem *base = chip_data->host_data->base; + + /* save rtsr, ftsr registers */ + chip_data->rtsr_cache = readl_relaxed(base + bank->rtsr_ofst); + chip_data->ftsr_cache = readl_relaxed(base + bank->ftsr_ofst); + + writel_relaxed(wake_active, base + bank->imr_ofst); +} + +static void stm32mp_chip_resume(struct stm32mp_exti_chip_data *chip_data, u32 mask_cache) +{ + const struct stm32mp_exti_bank *bank = chip_data->reg_bank; + void __iomem *base = chip_data->host_data->base; + + /* restore rtsr, ftsr, registers */ + writel_relaxed(chip_data->rtsr_cache, base + bank->rtsr_ofst); + writel_relaxed(chip_data->ftsr_cache, base + bank->ftsr_ofst); + + writel_relaxed(mask_cache, base + bank->imr_ofst); +} + +/* directly set the target bit without reading first. */ +static inline void stm32mp_exti_write_bit(struct irq_data *d, u32 reg) +{ + struct stm32mp_exti_chip_data *chip_data = irq_data_get_irq_chip_data(d); + void __iomem *base = chip_data->host_data->base; + u32 val = BIT(d->hwirq % IRQS_PER_BANK); + + writel_relaxed(val, base + reg); +} + +static inline u32 stm32mp_exti_set_bit(struct irq_data *d, u32 reg) +{ + struct stm32mp_exti_chip_data *chip_data = irq_data_get_irq_chip_data(d); + void __iomem *base = chip_data->host_data->base; + u32 val; + + val = readl_relaxed(base + reg); + val |= BIT(d->hwirq % IRQS_PER_BANK); + writel_relaxed(val, base + reg); + + return val; +} + +static inline u32 stm32mp_exti_clr_bit(struct irq_data *d, u32 reg) +{ + struct stm32mp_exti_chip_data *chip_data = irq_data_get_irq_chip_data(d); + void __iomem *base = chip_data->host_data->base; + u32 val; + + val = readl_relaxed(base + reg); + val &= ~BIT(d->hwirq % IRQS_PER_BANK); + writel_relaxed(val, base + reg); + + return val; +} + +static void stm32mp_exti_eoi(struct irq_data *d) +{ + struct stm32mp_exti_chip_data *chip_data = irq_data_get_irq_chip_data(d); + const struct stm32mp_exti_bank *bank = chip_data->reg_bank; + + raw_spin_lock(&chip_data->rlock); + + stm32mp_exti_write_bit(d, bank->rpr_ofst); + stm32mp_exti_write_bit(d, bank->fpr_ofst); + + raw_spin_unlock(&chip_data->rlock); + + if (d->parent_data->chip) + irq_chip_eoi_parent(d); +} + +static void stm32mp_exti_mask(struct irq_data *d) +{ + struct stm32mp_exti_chip_data *chip_data = irq_data_get_irq_chip_data(d); + const struct stm32mp_exti_bank *bank = chip_data->reg_bank; + + raw_spin_lock(&chip_data->rlock); + chip_data->mask_cache = stm32mp_exti_clr_bit(d, bank->imr_ofst); + raw_spin_unlock(&chip_data->rlock); + + if (d->parent_data->chip) + irq_chip_mask_parent(d); +} + +static void stm32mp_exti_unmask(struct irq_data *d) +{ + struct stm32mp_exti_chip_data *chip_data = irq_data_get_irq_chip_data(d); + const struct stm32mp_exti_bank *bank = chip_data->reg_bank; + + raw_spin_lock(&chip_data->rlock); + chip_data->mask_cache = stm32mp_exti_set_bit(d, bank->imr_ofst); + raw_spin_unlock(&chip_data->rlock); + + if (d->parent_data->chip) + irq_chip_unmask_parent(d); +} + +static int stm32mp_exti_set_type(struct irq_data *d, unsigned int type) +{ + struct stm32mp_exti_chip_data *chip_data = irq_data_get_irq_chip_data(d); + const struct stm32mp_exti_bank *bank = chip_data->reg_bank; + struct hwspinlock *hwlock = chip_data->host_data->hwlock; + void __iomem *base = chip_data->host_data->base; + u32 rtsr, ftsr; + int err; + + raw_spin_lock(&chip_data->rlock); + + if (hwlock) { + err = hwspin_lock_timeout_in_atomic(hwlock, HWSPNLCK_TIMEOUT); + if (err) { + pr_err("%s can't get hwspinlock (%d)\n", __func__, err); + goto unlock; + } + } + + rtsr = readl_relaxed(base + bank->rtsr_ofst); + ftsr = readl_relaxed(base + bank->ftsr_ofst); + + err = stm32mp_exti_convert_type(d, type, &rtsr, &ftsr); + if (!err) { + writel_relaxed(rtsr, base + bank->rtsr_ofst); + writel_relaxed(ftsr, base + bank->ftsr_ofst); + } + + if (hwlock) + hwspin_unlock_in_atomic(hwlock); +unlock: + raw_spin_unlock(&chip_data->rlock); + return err; +} + +static int stm32mp_exti_set_wake(struct irq_data *d, unsigned int on) +{ + struct stm32mp_exti_chip_data *chip_data = irq_data_get_irq_chip_data(d); + u32 mask = BIT(d->hwirq % IRQS_PER_BANK); + + raw_spin_lock(&chip_data->rlock); + + if (on) + chip_data->wake_active |= mask; + else + chip_data->wake_active &= ~mask; + + raw_spin_unlock(&chip_data->rlock); + + return 0; +} + +static int stm32mp_exti_set_affinity(struct irq_data *d, const struct cpumask *dest, bool force) +{ + if (d->parent_data->chip) + return irq_chip_set_affinity_parent(d, dest, force); + + return IRQ_SET_MASK_OK_DONE; +} + +static int stm32mp_exti_suspend(struct device *dev) +{ + struct stm32mp_exti_host_data *host_data = dev_get_drvdata(dev); + struct stm32mp_exti_chip_data *chip_data; + int i; + + for (i = 0; i < host_data->drv_data->bank_nr; i++) { + chip_data = &host_data->chips_data[i]; + stm32mp_chip_suspend(chip_data, chip_data->wake_active); + } + + return 0; +} + +static int stm32mp_exti_resume(struct device *dev) +{ + struct stm32mp_exti_host_data *host_data = dev_get_drvdata(dev); + struct stm32mp_exti_chip_data *chip_data; + int i; + + for (i = 0; i < host_data->drv_data->bank_nr; i++) { + chip_data = &host_data->chips_data[i]; + stm32mp_chip_resume(chip_data, chip_data->mask_cache); + } + + return 0; +} + +static int stm32mp_exti_retrigger(struct irq_data *d) +{ + struct stm32mp_exti_chip_data *chip_data = irq_data_get_irq_chip_data(d); + const struct stm32mp_exti_bank *bank = chip_data->reg_bank; + void __iomem *base = chip_data->host_data->base; + u32 mask = BIT(d->hwirq % IRQS_PER_BANK); + + writel_relaxed(mask, base + bank->swier_ofst); + + return 0; +} + +static struct irq_chip stm32mp_exti_chip = { + .name = "stm32mp-exti", + .irq_eoi = stm32mp_exti_eoi, + .irq_mask = stm32mp_exti_mask, + .irq_unmask = stm32mp_exti_unmask, + .irq_retrigger = stm32mp_exti_retrigger, + .irq_set_type = stm32mp_exti_set_type, + .irq_set_wake = stm32mp_exti_set_wake, + .flags = IRQCHIP_MASK_ON_SUSPEND, + .irq_set_affinity = IS_ENABLED(CONFIG_SMP) ? stm32mp_exti_set_affinity : NULL, +}; + +static struct irq_chip stm32mp_exti_chip_direct = { + .name = "stm32mp-exti-direct", + .irq_eoi = irq_chip_eoi_parent, + .irq_ack = irq_chip_ack_parent, + .irq_mask = stm32mp_exti_mask, + .irq_unmask = stm32mp_exti_unmask, + .irq_retrigger = irq_chip_retrigger_hierarchy, + .irq_set_type = irq_chip_set_type_parent, + .irq_set_wake = stm32mp_exti_set_wake, + .flags = IRQCHIP_MASK_ON_SUSPEND, + .irq_set_affinity = IS_ENABLED(CONFIG_SMP) ? irq_chip_set_affinity_parent : NULL, +}; + +static int stm32mp_exti_domain_alloc(struct irq_domain *dm, + unsigned int virq, + unsigned int nr_irqs, void *data) +{ + struct stm32mp_exti_host_data *host_data = dm->host_data; + struct stm32mp_exti_chip_data *chip_data; + struct irq_fwspec *fwspec = data; + struct irq_fwspec p_fwspec; + irq_hw_number_t hwirq; + struct irq_chip *chip; + u32 event_trg; + u8 desc_irq; + int bank; + + hwirq = fwspec->param[0]; + if (hwirq >= host_data->drv_data->bank_nr * IRQS_PER_BANK) + return -EINVAL; + + bank = hwirq / IRQS_PER_BANK; + chip_data = &host_data->chips_data[bank]; + + /* Check if event is reserved (Secure) */ + if (chip_data->event_reserved & BIT(hwirq % IRQS_PER_BANK)) { + dev_err(host_data->dev, "event %lu is reserved, secure\n", hwirq); + return -EPERM; + } + + event_trg = readl_relaxed(host_data->base + chip_data->reg_bank->trg_ofst); + chip = (event_trg & BIT(hwirq % IRQS_PER_BANK)) ? + &stm32mp_exti_chip : &stm32mp_exti_chip_direct; + + irq_domain_set_hwirq_and_chip(dm, virq, hwirq, chip, chip_data); + + if (host_data->dt_has_irqs_desc) { + struct of_phandle_args out_irq; + int ret; + + ret = of_irq_parse_one(host_data->dev->of_node, hwirq, &out_irq); + if (ret) + return ret; + /* we only support one parent, so far */ + if (of_fwnode_handle(out_irq.np) != dm->parent->fwnode) + return -EINVAL; + + of_phandle_args_to_fwspec(out_irq.np, out_irq.args, + out_irq.args_count, &p_fwspec); + + return irq_domain_alloc_irqs_parent(dm, virq, 1, &p_fwspec); + } + + if (!host_data->drv_data->desc_irqs) + return -EINVAL; + + desc_irq = host_data->drv_data->desc_irqs[hwirq]; + if (desc_irq != EXTI_INVALID_IRQ) { + p_fwspec.fwnode = dm->parent->fwnode; + p_fwspec.param_count = 3; + p_fwspec.param[0] = GIC_SPI; + p_fwspec.param[1] = desc_irq; + p_fwspec.param[2] = IRQ_TYPE_LEVEL_HIGH; + + return irq_domain_alloc_irqs_parent(dm, virq, 1, &p_fwspec); + } + + return 0; +} + +static struct stm32mp_exti_chip_data *stm32mp_exti_chip_init(struct stm32mp_exti_host_data *h_data, + u32 bank_idx, struct device_node *node) +{ + struct stm32mp_exti_chip_data *chip_data; + const struct stm32mp_exti_bank *bank; + void __iomem *base = h_data->base; + + bank = h_data->drv_data->exti_banks[bank_idx]; + chip_data = &h_data->chips_data[bank_idx]; + chip_data->host_data = h_data; + chip_data->reg_bank = bank; + + raw_spin_lock_init(&chip_data->rlock); + + /* + * This IP has no reset, so after hot reboot we should + * clear registers to avoid residue + */ + writel_relaxed(0, base + bank->imr_ofst); + + /* reserve Secure events */ + chip_data->event_reserved = readl_relaxed(base + bank->seccfgr_ofst); + + pr_info("%pOF: bank%d\n", node, bank_idx); + + return chip_data; +} + +static const struct irq_domain_ops stm32mp_exti_domain_ops = { + .alloc = stm32mp_exti_domain_alloc, + .free = irq_domain_free_irqs_common, + .xlate = irq_domain_xlate_twocell, +}; + +static void stm32mp_exti_check_rif(struct stm32mp_exti_host_data *host_data) +{ + unsigned int bank, i, event; + u32 cid, cidcfgr, hwcfgr1; + + /* quit on CID not supported */ + hwcfgr1 = readl_relaxed(host_data->base + EXTI_HWCFGR1); + if ((hwcfgr1 & EXTI_HWCFGR1_CIDWIDTH_MASK) == 0) + return; + + for (bank = 0; bank < host_data->drv_data->bank_nr; bank++) { + for (i = 0; i < IRQS_PER_BANK; i++) { + event = bank * IRQS_PER_BANK + i; + cidcfgr = readl_relaxed(host_data->base + EXTI_EnCIDCFGR(event)); + cid = (cidcfgr & EXTI_CIDCFGR_CID_MASK) >> EXTI_CIDCFGR_CID_SHIFT; + if ((cidcfgr & EXTI_CIDCFGR_CFEN_MASK) && cid != EXTI_CID1) + host_data->chips_data[bank].event_reserved |= BIT(i); + } + } +} + +static void stm32mp_exti_remove_irq(void *data) +{ + struct irq_domain *domain = data; + + irq_domain_remove(domain); +} + +static int stm32mp_exti_probe(struct platform_device *pdev) +{ + const struct stm32mp_exti_drv_data *drv_data; + struct irq_domain *parent_domain, *domain; + struct stm32mp_exti_host_data *host_data; + struct device *dev = &pdev->dev; + struct device_node *np = dev->of_node; + int ret, i; + + host_data = devm_kzalloc(dev, sizeof(*host_data), GFP_KERNEL); + if (!host_data) + return -ENOMEM; + + dev_set_drvdata(dev, host_data); + host_data->dev = dev; + + /* check for optional hwspinlock which may be not available yet */ + ret = of_hwspin_lock_get_id(np, 0); + if (ret == -EPROBE_DEFER) + /* hwspinlock framework not yet ready */ + return ret; + + if (ret >= 0) { + host_data->hwlock = devm_hwspin_lock_request_specific(dev, ret); + if (!host_data->hwlock) { + dev_err(dev, "Failed to request hwspinlock\n"); + return -EINVAL; + } + } else if (ret != -ENOENT) { + /* note: ENOENT is a valid case (means 'no hwspinlock') */ + dev_err(dev, "Failed to get hwspinlock\n"); + return ret; + } + + /* initialize host_data */ + drv_data = of_device_get_match_data(dev); + if (!drv_data) { + dev_err(dev, "no of match data\n"); + return -ENODEV; + } + host_data->drv_data = drv_data; + + host_data->chips_data = devm_kcalloc(dev, drv_data->bank_nr, + sizeof(*host_data->chips_data), + GFP_KERNEL); + if (!host_data->chips_data) + return -ENOMEM; + + host_data->base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(host_data->base)) + return PTR_ERR(host_data->base); + + for (i = 0; i < drv_data->bank_nr; i++) + stm32mp_exti_chip_init(host_data, i, np); + + stm32mp_exti_check_rif(host_data); + + parent_domain = irq_find_host(of_irq_find_parent(np)); + if (!parent_domain) { + dev_err(dev, "GIC interrupt-parent not found\n"); + return -EINVAL; + } + + domain = irq_domain_create_hierarchy(parent_domain, 0, drv_data->bank_nr * IRQS_PER_BANK, + dev_fwnode(dev), &stm32mp_exti_domain_ops, host_data); + if (!domain) { + dev_err(dev, "Could not register exti domain\n"); + return -ENOMEM; + } + + ret = devm_add_action_or_reset(dev, stm32mp_exti_remove_irq, domain); + if (ret) + return ret; + + host_data->dt_has_irqs_desc = of_property_present(np, "interrupts-extended"); + + return 0; +} + +static const struct of_device_id stm32mp_exti_ids[] = { + { .compatible = "st,stm32mp1-exti", .data = &stm32mp1_drv_data}, + { .compatible = "st,stm32mp13-exti", .data = &stm32mp13_drv_data}, + {}, +}; +MODULE_DEVICE_TABLE(of, stm32mp_exti_ids); + +static const struct dev_pm_ops stm32mp_exti_dev_pm_ops = { + NOIRQ_SYSTEM_SLEEP_PM_OPS(stm32mp_exti_suspend, stm32mp_exti_resume) +}; + +static struct platform_driver stm32mp_exti_driver = { + .probe = stm32mp_exti_probe, + .driver = { + .name = "stm32mp_exti", + .of_match_table = stm32mp_exti_ids, + .pm = &stm32mp_exti_dev_pm_ops, + }, +}; + +module_platform_driver(stm32mp_exti_driver); + +MODULE_AUTHOR("Maxime Coquelin <mcoquelin.stm32@gmail.com>"); +MODULE_DESCRIPTION("STM32MP EXTI driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/irqchip/irq-sun4i.c b/drivers/irqchip/irq-sun4i.c index fb78d6623556..9c2c9caeca2a 100644 --- a/drivers/irqchip/irq-sun4i.c +++ b/drivers/irqchip/irq-sun4i.c @@ -133,7 +133,7 @@ static int __init sun4i_of_init(struct device_node *node, /* Configure the external interrupt source type */ writel(0x00, irq_ic_data->irq_base + SUN4I_IRQ_NMI_CTRL_REG); - irq_ic_data->irq_domain = irq_domain_add_linear(node, 3 * 32, + irq_ic_data->irq_domain = irq_domain_create_linear(of_fwnode_handle(node), 3 * 32, &sun4i_irq_ops, NULL); if (!irq_ic_data->irq_domain) panic("%pOF: unable to create IRQ domain\n", node); @@ -147,10 +147,8 @@ static int __init sun4i_ic_of_init(struct device_node *node, struct device_node *parent) { irq_ic_data = kzalloc(sizeof(struct sun4i_irq_chip_data), GFP_KERNEL); - if (!irq_ic_data) { - pr_err("kzalloc failed!\n"); + if (!irq_ic_data) return -ENOMEM; - } irq_ic_data->enable_reg_offset = SUN4I_IRQ_ENABLE_REG_OFFSET; irq_ic_data->mask_reg_offset = SUN4I_IRQ_MASK_REG_OFFSET; @@ -164,10 +162,8 @@ static int __init suniv_ic_of_init(struct device_node *node, struct device_node *parent) { irq_ic_data = kzalloc(sizeof(struct sun4i_irq_chip_data), GFP_KERNEL); - if (!irq_ic_data) { - pr_err("kzalloc failed!\n"); + if (!irq_ic_data) return -ENOMEM; - } irq_ic_data->enable_reg_offset = SUNIV_IRQ_ENABLE_REG_OFFSET; irq_ic_data->mask_reg_offset = SUNIV_IRQ_MASK_REG_OFFSET; @@ -189,7 +185,7 @@ static void __exception_irq_entry sun4i_handle_irq(struct pt_regs *regs) * 3) spurious irq * So if we immediately get a reading of 0, check the irq-pending reg * to differentiate between 2 and 3. We only do this once to avoid - * the extra check in the common case of 1 hapening after having + * the extra check in the common case of 1 happening after having * read the vector-reg once. */ hwirq = readl(irq_ic_data->irq_base + SUN4I_IRQ_VECTOR_REG) >> 2; @@ -199,7 +195,7 @@ static void __exception_irq_entry sun4i_handle_irq(struct pt_regs *regs) return; do { - handle_domain_irq(irq_ic_data->irq_domain, hwirq, regs); + generic_handle_domain_irq(irq_ic_data->irq_domain, hwirq); hwirq = readl(irq_ic_data->irq_base + SUN4I_IRQ_VECTOR_REG) >> 2; } while (hwirq != 0); diff --git a/drivers/irqchip/irq-sun6i-r.c b/drivers/irqchip/irq-sun6i-r.c new file mode 100644 index 000000000000..23251831c06e --- /dev/null +++ b/drivers/irqchip/irq-sun6i-r.c @@ -0,0 +1,385 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * The R_INTC in Allwinner A31 and newer SoCs manages several types of + * interrupts, as shown below: + * + * NMI IRQ DIRECT IRQs MUXED IRQs + * bit 0 bits 1-15^ bits 19-31 + * + * +---------+ +---------+ +---------+ +---------+ + * | NMI Pad | | IRQ d | | IRQ m | | IRQ m+7 | + * +---------+ +---------+ +---------+ +---------+ + * | | | | | | | + * | | | | |......| | + * +------V------+ +------------+ | | | +--V------V--+ | + * | Invert/ | | Write 1 to | | | | | AND with | | + * | Edge Detect | | PENDING[0] | | | | | MUX[m/8] | | + * +-------------+ +------------+ | | | +------------+ | + * | | | | | | | + * +--V-------V--+ +--V--+ | +--V--+ | +--V--+ + * | Set Reset| | GIC | | | GIC | | | GIC | + * | Latch | | SPI | | | SPI |... | ...| SPI | + * +-------------+ | N+d | | | m | | | m+7 | + * | | +-----+ | +-----+ | +-----+ + * | | | | + * +-------V-+ +-V----------+ +---------V--+ +--------V--------+ + * | GIC SPI | | AND with | | AND with | | AND with | + * | N (=32) | | ENABLE[0] | | ENABLE[d] | | ENABLE[19+m/8] | + * +---------+ +------------+ +------------+ +-----------------+ + * | | | + * +------V-----+ +------V-----+ +--------V--------+ + * | Read | | Read | | Read | + * | PENDING[0] | | PENDING[d] | | PENDING[19+m/8] | + * +------------+ +------------+ +-----------------+ + * + * ^ bits 16-18 are direct IRQs for peripherals with banked interrupts, such as + * the MSGBOX. These IRQs do not map to any GIC SPI. + * + * The H6 variant adds two more (banked) direct IRQs and implements the full + * set of 128 mux bits. This requires a second set of top-level registers. + */ + +#include <linux/bitmap.h> +#include <linux/interrupt.h> +#include <linux/irq.h> +#include <linux/irqchip.h> +#include <linux/irqdomain.h> +#include <linux/of.h> +#include <linux/of_address.h> +#include <linux/of_irq.h> +#include <linux/syscore_ops.h> + +#include <dt-bindings/interrupt-controller/arm-gic.h> + +#define SUN6I_NMI_CTRL (0x0c) +#define SUN6I_IRQ_PENDING(n) (0x10 + 4 * (n)) +#define SUN6I_IRQ_ENABLE(n) (0x40 + 4 * (n)) +#define SUN6I_MUX_ENABLE(n) (0xc0 + 4 * (n)) + +#define SUN6I_NMI_SRC_TYPE_LEVEL_LOW 0 +#define SUN6I_NMI_SRC_TYPE_EDGE_FALLING 1 +#define SUN6I_NMI_SRC_TYPE_LEVEL_HIGH 2 +#define SUN6I_NMI_SRC_TYPE_EDGE_RISING 3 + +#define SUN6I_NMI_BIT BIT(0) + +#define SUN6I_NMI_NEEDS_ACK ((void *)1) + +#define SUN6I_NR_TOP_LEVEL_IRQS 64 +#define SUN6I_NR_DIRECT_IRQS 16 +#define SUN6I_NR_MUX_BITS 128 + +struct sun6i_r_intc_variant { + u32 first_mux_irq; + u32 nr_mux_irqs; + u32 mux_valid[BITS_TO_U32(SUN6I_NR_MUX_BITS)]; +}; + +static void __iomem *base; +static irq_hw_number_t nmi_hwirq; +static DECLARE_BITMAP(wake_irq_enabled, SUN6I_NR_TOP_LEVEL_IRQS); +static DECLARE_BITMAP(wake_mux_enabled, SUN6I_NR_MUX_BITS); +static DECLARE_BITMAP(wake_mux_valid, SUN6I_NR_MUX_BITS); + +static void sun6i_r_intc_ack_nmi(void) +{ + writel_relaxed(SUN6I_NMI_BIT, base + SUN6I_IRQ_PENDING(0)); +} + +static void sun6i_r_intc_nmi_ack(struct irq_data *data) +{ + if (irqd_get_trigger_type(data) & IRQ_TYPE_EDGE_BOTH) + sun6i_r_intc_ack_nmi(); + else + data->chip_data = SUN6I_NMI_NEEDS_ACK; +} + +static void sun6i_r_intc_nmi_eoi(struct irq_data *data) +{ + /* For oneshot IRQs, delay the ack until the IRQ is unmasked. */ + if (data->chip_data == SUN6I_NMI_NEEDS_ACK && !irqd_irq_masked(data)) { + data->chip_data = NULL; + sun6i_r_intc_ack_nmi(); + } + + irq_chip_eoi_parent(data); +} + +static void sun6i_r_intc_nmi_unmask(struct irq_data *data) +{ + if (data->chip_data == SUN6I_NMI_NEEDS_ACK) { + data->chip_data = NULL; + sun6i_r_intc_ack_nmi(); + } + + irq_chip_unmask_parent(data); +} + +static int sun6i_r_intc_nmi_set_type(struct irq_data *data, unsigned int type) +{ + u32 nmi_src_type; + + switch (type) { + case IRQ_TYPE_EDGE_RISING: + nmi_src_type = SUN6I_NMI_SRC_TYPE_EDGE_RISING; + break; + case IRQ_TYPE_EDGE_FALLING: + nmi_src_type = SUN6I_NMI_SRC_TYPE_EDGE_FALLING; + break; + case IRQ_TYPE_LEVEL_HIGH: + nmi_src_type = SUN6I_NMI_SRC_TYPE_LEVEL_HIGH; + break; + case IRQ_TYPE_LEVEL_LOW: + nmi_src_type = SUN6I_NMI_SRC_TYPE_LEVEL_LOW; + break; + default: + return -EINVAL; + } + + writel_relaxed(nmi_src_type, base + SUN6I_NMI_CTRL); + + /* + * The "External NMI" GIC input connects to a latch inside R_INTC, not + * directly to the pin. So the GIC trigger type does not depend on the + * NMI pin trigger type. + */ + return irq_chip_set_type_parent(data, IRQ_TYPE_LEVEL_HIGH); +} + +static int sun6i_r_intc_nmi_set_irqchip_state(struct irq_data *data, + enum irqchip_irq_state which, + bool state) +{ + if (which == IRQCHIP_STATE_PENDING && !state) + sun6i_r_intc_ack_nmi(); + + return irq_chip_set_parent_state(data, which, state); +} + +static int sun6i_r_intc_irq_set_wake(struct irq_data *data, unsigned int on) +{ + unsigned long offset_from_nmi = data->hwirq - nmi_hwirq; + + if (offset_from_nmi < SUN6I_NR_DIRECT_IRQS) + assign_bit(offset_from_nmi, wake_irq_enabled, on); + else if (test_bit(data->hwirq, wake_mux_valid)) + assign_bit(data->hwirq, wake_mux_enabled, on); + else + /* Not wakeup capable. */ + return -EPERM; + + return 0; +} + +static struct irq_chip sun6i_r_intc_nmi_chip = { + .name = "sun6i-r-intc", + .irq_ack = sun6i_r_intc_nmi_ack, + .irq_mask = irq_chip_mask_parent, + .irq_unmask = sun6i_r_intc_nmi_unmask, + .irq_eoi = sun6i_r_intc_nmi_eoi, + .irq_set_affinity = irq_chip_set_affinity_parent, + .irq_set_type = sun6i_r_intc_nmi_set_type, + .irq_set_irqchip_state = sun6i_r_intc_nmi_set_irqchip_state, + .irq_set_wake = sun6i_r_intc_irq_set_wake, + .flags = IRQCHIP_SET_TYPE_MASKED, +}; + +static struct irq_chip sun6i_r_intc_wakeup_chip = { + .name = "sun6i-r-intc", + .irq_mask = irq_chip_mask_parent, + .irq_unmask = irq_chip_unmask_parent, + .irq_eoi = irq_chip_eoi_parent, + .irq_set_affinity = irq_chip_set_affinity_parent, + .irq_set_type = irq_chip_set_type_parent, + .irq_set_wake = sun6i_r_intc_irq_set_wake, + .flags = IRQCHIP_SET_TYPE_MASKED, +}; + +static int sun6i_r_intc_domain_translate(struct irq_domain *domain, + struct irq_fwspec *fwspec, + unsigned long *hwirq, + unsigned int *type) +{ + /* Accept the old two-cell binding for the NMI only. */ + if (fwspec->param_count == 2 && fwspec->param[0] == 0) { + *hwirq = nmi_hwirq; + *type = fwspec->param[1] & IRQ_TYPE_SENSE_MASK; + return 0; + } + + /* Otherwise this binding should match the GIC SPI binding. */ + if (fwspec->param_count < 3) + return -EINVAL; + if (fwspec->param[0] != GIC_SPI) + return -EINVAL; + + *hwirq = fwspec->param[1]; + *type = fwspec->param[2] & IRQ_TYPE_SENSE_MASK; + + return 0; +} + +static int sun6i_r_intc_domain_alloc(struct irq_domain *domain, + unsigned int virq, + unsigned int nr_irqs, void *arg) +{ + struct irq_fwspec *fwspec = arg; + struct irq_fwspec gic_fwspec; + unsigned long hwirq; + unsigned int type; + int i, ret; + + ret = sun6i_r_intc_domain_translate(domain, fwspec, &hwirq, &type); + if (ret) + return ret; + if (hwirq + nr_irqs > SUN6I_NR_MUX_BITS) + return -EINVAL; + + /* Construct a GIC-compatible fwspec from this fwspec. */ + gic_fwspec = (struct irq_fwspec) { + .fwnode = domain->parent->fwnode, + .param_count = 3, + .param = { GIC_SPI, hwirq, type }, + }; + + ret = irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, &gic_fwspec); + if (ret) + return ret; + + for (i = 0; i < nr_irqs; ++i, ++hwirq, ++virq) { + if (hwirq == nmi_hwirq) { + irq_domain_set_hwirq_and_chip(domain, virq, hwirq, + &sun6i_r_intc_nmi_chip, + NULL); + irq_set_handler(virq, handle_fasteoi_ack_irq); + } else { + irq_domain_set_hwirq_and_chip(domain, virq, hwirq, + &sun6i_r_intc_wakeup_chip, + NULL); + } + } + + return 0; +} + +static const struct irq_domain_ops sun6i_r_intc_domain_ops = { + .translate = sun6i_r_intc_domain_translate, + .alloc = sun6i_r_intc_domain_alloc, + .free = irq_domain_free_irqs_common, +}; + +static int sun6i_r_intc_suspend(void *data) +{ + u32 buf[BITS_TO_U32(MAX(SUN6I_NR_TOP_LEVEL_IRQS, SUN6I_NR_MUX_BITS))]; + int i; + + /* Wake IRQs are enabled during system sleep and shutdown. */ + bitmap_to_arr32(buf, wake_irq_enabled, SUN6I_NR_TOP_LEVEL_IRQS); + for (i = 0; i < BITS_TO_U32(SUN6I_NR_TOP_LEVEL_IRQS); ++i) + writel_relaxed(buf[i], base + SUN6I_IRQ_ENABLE(i)); + bitmap_to_arr32(buf, wake_mux_enabled, SUN6I_NR_MUX_BITS); + for (i = 0; i < BITS_TO_U32(SUN6I_NR_MUX_BITS); ++i) + writel_relaxed(buf[i], base + SUN6I_MUX_ENABLE(i)); + + return 0; +} + +static void sun6i_r_intc_resume(void *data) +{ + int i; + + /* Only the NMI is relevant during normal operation. */ + writel_relaxed(SUN6I_NMI_BIT, base + SUN6I_IRQ_ENABLE(0)); + for (i = 1; i < BITS_TO_U32(SUN6I_NR_TOP_LEVEL_IRQS); ++i) + writel_relaxed(0, base + SUN6I_IRQ_ENABLE(i)); +} + +static void sun6i_r_intc_shutdown(void *data) +{ + sun6i_r_intc_suspend(data); +} + +static const struct syscore_ops sun6i_r_intc_syscore_ops = { + .suspend = sun6i_r_intc_suspend, + .resume = sun6i_r_intc_resume, + .shutdown = sun6i_r_intc_shutdown, +}; + +static struct syscore sun6i_r_intc_syscore = { + .ops = &sun6i_r_intc_syscore_ops, +}; + +static int __init sun6i_r_intc_init(struct device_node *node, + struct device_node *parent, + const struct sun6i_r_intc_variant *v) +{ + struct irq_domain *domain, *parent_domain; + struct of_phandle_args nmi_parent; + int ret; + + /* Extract the NMI hwirq number from the OF node. */ + ret = of_irq_parse_one(node, 0, &nmi_parent); + if (ret) + return ret; + if (nmi_parent.args_count < 3 || + nmi_parent.args[0] != GIC_SPI || + nmi_parent.args[2] != IRQ_TYPE_LEVEL_HIGH) + return -EINVAL; + nmi_hwirq = nmi_parent.args[1]; + + bitmap_set(wake_irq_enabled, v->first_mux_irq, v->nr_mux_irqs); + bitmap_from_arr32(wake_mux_valid, v->mux_valid, SUN6I_NR_MUX_BITS); + + parent_domain = irq_find_host(parent); + if (!parent_domain) { + pr_err("%pOF: Failed to obtain parent domain\n", node); + return -ENXIO; + } + + base = of_io_request_and_map(node, 0, NULL); + if (IS_ERR(base)) { + pr_err("%pOF: Failed to map MMIO region\n", node); + return PTR_ERR(base); + } + + domain = irq_domain_create_hierarchy(parent_domain, 0, 0, of_fwnode_handle(node), + &sun6i_r_intc_domain_ops, NULL); + if (!domain) { + pr_err("%pOF: Failed to allocate domain\n", node); + iounmap(base); + return -ENOMEM; + } + + register_syscore(&sun6i_r_intc_syscore); + + sun6i_r_intc_ack_nmi(); + sun6i_r_intc_resume(NULL); + + return 0; +} + +static const struct sun6i_r_intc_variant sun6i_a31_r_intc_variant __initconst = { + .first_mux_irq = 19, + .nr_mux_irqs = 13, + .mux_valid = { 0xffffffff, 0xfff80000, 0xffffffff, 0x0000000f }, +}; + +static int __init sun6i_a31_r_intc_init(struct device_node *node, + struct device_node *parent) +{ + return sun6i_r_intc_init(node, parent, &sun6i_a31_r_intc_variant); +} +IRQCHIP_DECLARE(sun6i_a31_r_intc, "allwinner,sun6i-a31-r-intc", sun6i_a31_r_intc_init); + +static const struct sun6i_r_intc_variant sun50i_h6_r_intc_variant __initconst = { + .first_mux_irq = 21, + .nr_mux_irqs = 16, + .mux_valid = { 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff }, +}; + +static int __init sun50i_h6_r_intc_init(struct device_node *node, + struct device_node *parent) +{ + return sun6i_r_intc_init(node, parent, &sun50i_h6_r_intc_variant); +} +IRQCHIP_DECLARE(sun50i_h6_r_intc, "allwinner,sun50i-h6-r-intc", sun50i_h6_r_intc_init); diff --git a/drivers/irqchip/irq-sunxi-nmi.c b/drivers/irqchip/irq-sunxi-nmi.c index a412b5d5d0fa..fe32dfdc2dd0 100644 --- a/drivers/irqchip/irq-sunxi-nmi.c +++ b/drivers/irqchip/irq-sunxi-nmi.c @@ -19,7 +19,6 @@ #include <linux/irqdomain.h> #include <linux/of_irq.h> #include <linux/of_address.h> -#include <linux/of_platform.h> #include <linux/irqchip.h> #include <linux/irqchip/chained_irq.h> @@ -27,18 +26,12 @@ #define SUNXI_NMI_IRQ_BIT BIT(0) -#define SUN6I_R_INTC_CTRL 0x0c -#define SUN6I_R_INTC_PENDING 0x10 -#define SUN6I_R_INTC_ENABLE 0x40 - /* * For deprecated sun6i-a31-sc-nmi compatible. - * Registers are offset by 0x0c. */ -#define SUN6I_R_INTC_NMI_OFFSET 0x0c -#define SUN6I_NMI_CTRL (SUN6I_R_INTC_CTRL - SUN6I_R_INTC_NMI_OFFSET) -#define SUN6I_NMI_PENDING (SUN6I_R_INTC_PENDING - SUN6I_R_INTC_NMI_OFFSET) -#define SUN6I_NMI_ENABLE (SUN6I_R_INTC_ENABLE - SUN6I_R_INTC_NMI_OFFSET) +#define SUN6I_NMI_CTRL 0x00 +#define SUN6I_NMI_PENDING 0x04 +#define SUN6I_NMI_ENABLE 0x34 #define SUN7I_NMI_CTRL 0x00 #define SUN7I_NMI_PENDING 0x04 @@ -55,38 +48,41 @@ enum { SUNXI_SRC_TYPE_EDGE_RISING, }; -struct sunxi_sc_nmi_reg_offs { - u32 ctrl; - u32 pend; - u32 enable; +struct sunxi_sc_nmi_data { + struct { + u32 ctrl; + u32 pend; + u32 enable; + } reg_offs; + u32 enable_val; }; -static const struct sunxi_sc_nmi_reg_offs sun6i_r_intc_reg_offs __initconst = { - .ctrl = SUN6I_R_INTC_CTRL, - .pend = SUN6I_R_INTC_PENDING, - .enable = SUN6I_R_INTC_ENABLE, +static const struct sunxi_sc_nmi_data sun6i_data __initconst = { + .reg_offs.ctrl = SUN6I_NMI_CTRL, + .reg_offs.pend = SUN6I_NMI_PENDING, + .reg_offs.enable = SUN6I_NMI_ENABLE, }; -static const struct sunxi_sc_nmi_reg_offs sun6i_reg_offs __initconst = { - .ctrl = SUN6I_NMI_CTRL, - .pend = SUN6I_NMI_PENDING, - .enable = SUN6I_NMI_ENABLE, +static const struct sunxi_sc_nmi_data sun7i_data __initconst = { + .reg_offs.ctrl = SUN7I_NMI_CTRL, + .reg_offs.pend = SUN7I_NMI_PENDING, + .reg_offs.enable = SUN7I_NMI_ENABLE, }; -static const struct sunxi_sc_nmi_reg_offs sun7i_reg_offs __initconst = { - .ctrl = SUN7I_NMI_CTRL, - .pend = SUN7I_NMI_PENDING, - .enable = SUN7I_NMI_ENABLE, +static const struct sunxi_sc_nmi_data sun9i_data __initconst = { + .reg_offs.ctrl = SUN9I_NMI_CTRL, + .reg_offs.pend = SUN9I_NMI_PENDING, + .reg_offs.enable = SUN9I_NMI_ENABLE, }; -static const struct sunxi_sc_nmi_reg_offs sun9i_reg_offs __initconst = { - .ctrl = SUN9I_NMI_CTRL, - .pend = SUN9I_NMI_PENDING, - .enable = SUN9I_NMI_ENABLE, +static const struct sunxi_sc_nmi_data sun55i_a523_data __initconst = { + .reg_offs.ctrl = SUN9I_NMI_CTRL, + .reg_offs.pend = SUN9I_NMI_PENDING, + .reg_offs.enable = SUN9I_NMI_ENABLE, + .enable_val = BIT(31), }; -static inline void sunxi_sc_nmi_write(struct irq_chip_generic *gc, u32 off, - u32 val) +static inline void sunxi_sc_nmi_write(struct irq_chip_generic *gc, u32 off, u32 val) { irq_reg_writel(gc, val, off); } @@ -100,10 +96,9 @@ static void sunxi_sc_nmi_handle_irq(struct irq_desc *desc) { struct irq_domain *domain = irq_desc_get_handler_data(desc); struct irq_chip *chip = irq_desc_get_chip(desc); - unsigned int virq = irq_find_mapping(domain, 0); chained_irq_enter(chip, desc); - generic_handle_irq(virq); + generic_handle_domain_irq(domain, 0); chained_irq_exit(chip, desc); } @@ -116,7 +111,7 @@ static int sunxi_sc_nmi_set_type(struct irq_data *data, unsigned int flow_type) unsigned int src_type; unsigned int i; - irq_gc_lock(gc); + guard(raw_spinlock)(&gc->lock); switch (flow_type & IRQF_TRIGGER_MASK) { case IRQ_TYPE_EDGE_FALLING: @@ -133,9 +128,7 @@ static int sunxi_sc_nmi_set_type(struct irq_data *data, unsigned int flow_type) src_type = SUNXI_SRC_TYPE_LEVEL_LOW; break; default: - irq_gc_unlock(gc); - pr_err("Cannot assign multiple trigger modes to IRQ %d.\n", - data->irq); + pr_err("Cannot assign multiple trigger modes to IRQ %d.\n", data->irq); return -EBADR; } @@ -150,23 +143,18 @@ static int sunxi_sc_nmi_set_type(struct irq_data *data, unsigned int flow_type) src_type_reg &= ~SUNXI_NMI_SRC_TYPE_MASK; src_type_reg |= src_type; sunxi_sc_nmi_write(gc, ctrl_off, src_type_reg); - - irq_gc_unlock(gc); - return IRQ_SET_MASK_OK; } static int __init sunxi_sc_nmi_irq_init(struct device_node *node, - const struct sunxi_sc_nmi_reg_offs *reg_offs) + const struct sunxi_sc_nmi_data *data) { - struct irq_domain *domain; + unsigned int irq, clr = IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN; struct irq_chip_generic *gc; - unsigned int irq; - unsigned int clr = IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN; + struct irq_domain *domain; int ret; - - domain = irq_domain_add_linear(node, 1, &irq_generic_chip_ops, NULL); + domain = irq_domain_create_linear(of_fwnode_handle(node), 1, &irq_generic_chip_ops, NULL); if (!domain) { pr_err("Could not register interrupt domain.\n"); return -ENOMEM; @@ -200,27 +188,28 @@ static int __init sunxi_sc_nmi_irq_init(struct device_node *node, gc->chip_types[0].chip.irq_unmask = irq_gc_mask_set_bit; gc->chip_types[0].chip.irq_eoi = irq_gc_ack_set_bit; gc->chip_types[0].chip.irq_set_type = sunxi_sc_nmi_set_type; - gc->chip_types[0].chip.flags = IRQCHIP_EOI_THREADED | IRQCHIP_EOI_IF_HANDLED; - gc->chip_types[0].regs.ack = reg_offs->pend; - gc->chip_types[0].regs.mask = reg_offs->enable; - gc->chip_types[0].regs.type = reg_offs->ctrl; + gc->chip_types[0].chip.flags = IRQCHIP_EOI_THREADED | + IRQCHIP_EOI_IF_HANDLED | + IRQCHIP_SKIP_SET_WAKE; + gc->chip_types[0].regs.ack = data->reg_offs.pend; + gc->chip_types[0].regs.mask = data->reg_offs.enable; + gc->chip_types[0].regs.type = data->reg_offs.ctrl; gc->chip_types[1].type = IRQ_TYPE_EDGE_BOTH; - gc->chip_types[1].chip.name = gc->chip_types[0].chip.name; gc->chip_types[1].chip.irq_ack = irq_gc_ack_set_bit; gc->chip_types[1].chip.irq_mask = irq_gc_mask_clr_bit; gc->chip_types[1].chip.irq_unmask = irq_gc_mask_set_bit; gc->chip_types[1].chip.irq_set_type = sunxi_sc_nmi_set_type; - gc->chip_types[1].regs.ack = reg_offs->pend; - gc->chip_types[1].regs.mask = reg_offs->enable; - gc->chip_types[1].regs.type = reg_offs->ctrl; + gc->chip_types[1].regs.ack = data->reg_offs.pend; + gc->chip_types[1].regs.mask = data->reg_offs.enable; + gc->chip_types[1].regs.type = data->reg_offs.ctrl; gc->chip_types[1].handler = handle_edge_irq; /* Disable any active interrupts */ - sunxi_sc_nmi_write(gc, reg_offs->enable, 0); + sunxi_sc_nmi_write(gc, data->reg_offs.enable, data->enable_val); /* Clear any pending NMI interrupts */ - sunxi_sc_nmi_write(gc, reg_offs->pend, SUNXI_NMI_IRQ_BIT); + sunxi_sc_nmi_write(gc, data->reg_offs.pend, SUNXI_NMI_IRQ_BIT); irq_set_chained_handler_and_data(irq, sunxi_sc_nmi_handle_irq, domain); @@ -232,31 +221,30 @@ fail_irqd_remove: return ret; } -static int __init sun6i_r_intc_irq_init(struct device_node *node, - struct device_node *parent) -{ - return sunxi_sc_nmi_irq_init(node, &sun6i_r_intc_reg_offs); -} -IRQCHIP_DECLARE(sun6i_r_intc, "allwinner,sun6i-a31-r-intc", - sun6i_r_intc_irq_init); - static int __init sun6i_sc_nmi_irq_init(struct device_node *node, struct device_node *parent) { - return sunxi_sc_nmi_irq_init(node, &sun6i_reg_offs); + return sunxi_sc_nmi_irq_init(node, &sun6i_data); } IRQCHIP_DECLARE(sun6i_sc_nmi, "allwinner,sun6i-a31-sc-nmi", sun6i_sc_nmi_irq_init); static int __init sun7i_sc_nmi_irq_init(struct device_node *node, struct device_node *parent) { - return sunxi_sc_nmi_irq_init(node, &sun7i_reg_offs); + return sunxi_sc_nmi_irq_init(node, &sun7i_data); } IRQCHIP_DECLARE(sun7i_sc_nmi, "allwinner,sun7i-a20-sc-nmi", sun7i_sc_nmi_irq_init); static int __init sun9i_nmi_irq_init(struct device_node *node, struct device_node *parent) { - return sunxi_sc_nmi_irq_init(node, &sun9i_reg_offs); + return sunxi_sc_nmi_irq_init(node, &sun9i_data); } IRQCHIP_DECLARE(sun9i_nmi, "allwinner,sun9i-a80-nmi", sun9i_nmi_irq_init); + +static int __init sun55i_nmi_irq_init(struct device_node *node, + struct device_node *parent) +{ + return sunxi_sc_nmi_irq_init(node, &sun55i_a523_data); +} +IRQCHIP_DECLARE(sun55i_nmi, "allwinner,sun55i-a523-nmi", sun55i_nmi_irq_init); diff --git a/drivers/irqchip/irq-tango.c b/drivers/irqchip/irq-tango.c deleted file mode 100644 index ae28d8648679..000000000000 --- a/drivers/irqchip/irq-tango.c +++ /dev/null @@ -1,231 +0,0 @@ -/* - * Copyright (C) 2014 Mans Rullgard <mans@mansr.com> - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the - * Free Software Foundation; either version 2 of the License, or (at your - * option) any later version. - */ - -#include <linux/init.h> -#include <linux/irq.h> -#include <linux/irqchip.h> -#include <linux/irqchip/chained_irq.h> -#include <linux/ioport.h> -#include <linux/io.h> -#include <linux/of_address.h> -#include <linux/of_irq.h> -#include <linux/slab.h> - -#define IRQ0_CTL_BASE 0x0000 -#define IRQ1_CTL_BASE 0x0100 -#define EDGE_CTL_BASE 0x0200 -#define IRQ2_CTL_BASE 0x0300 - -#define IRQ_CTL_HI 0x18 -#define EDGE_CTL_HI 0x20 - -#define IRQ_STATUS 0x00 -#define IRQ_RAWSTAT 0x04 -#define IRQ_EN_SET 0x08 -#define IRQ_EN_CLR 0x0c -#define IRQ_SOFT_SET 0x10 -#define IRQ_SOFT_CLR 0x14 - -#define EDGE_STATUS 0x00 -#define EDGE_RAWSTAT 0x04 -#define EDGE_CFG_RISE 0x08 -#define EDGE_CFG_FALL 0x0c -#define EDGE_CFG_RISE_SET 0x10 -#define EDGE_CFG_RISE_CLR 0x14 -#define EDGE_CFG_FALL_SET 0x18 -#define EDGE_CFG_FALL_CLR 0x1c - -struct tangox_irq_chip { - void __iomem *base; - unsigned long ctl; -}; - -static inline u32 intc_readl(struct tangox_irq_chip *chip, int reg) -{ - return readl_relaxed(chip->base + reg); -} - -static inline void intc_writel(struct tangox_irq_chip *chip, int reg, u32 val) -{ - writel_relaxed(val, chip->base + reg); -} - -static void tangox_dispatch_irqs(struct irq_domain *dom, unsigned int status, - int base) -{ - unsigned int hwirq; - unsigned int virq; - - while (status) { - hwirq = __ffs(status); - virq = irq_find_mapping(dom, base + hwirq); - if (virq) - generic_handle_irq(virq); - status &= ~BIT(hwirq); - } -} - -static void tangox_irq_handler(struct irq_desc *desc) -{ - struct irq_domain *dom = irq_desc_get_handler_data(desc); - struct irq_chip *host_chip = irq_desc_get_chip(desc); - struct tangox_irq_chip *chip = dom->host_data; - unsigned int status_lo, status_hi; - - chained_irq_enter(host_chip, desc); - - status_lo = intc_readl(chip, chip->ctl + IRQ_STATUS); - status_hi = intc_readl(chip, chip->ctl + IRQ_CTL_HI + IRQ_STATUS); - - tangox_dispatch_irqs(dom, status_lo, 0); - tangox_dispatch_irqs(dom, status_hi, 32); - - chained_irq_exit(host_chip, desc); -} - -static int tangox_irq_set_type(struct irq_data *d, unsigned int flow_type) -{ - struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); - struct tangox_irq_chip *chip = gc->domain->host_data; - struct irq_chip_regs *regs = &gc->chip_types[0].regs; - - switch (flow_type & IRQ_TYPE_SENSE_MASK) { - case IRQ_TYPE_EDGE_RISING: - intc_writel(chip, regs->type + EDGE_CFG_RISE_SET, d->mask); - intc_writel(chip, regs->type + EDGE_CFG_FALL_CLR, d->mask); - break; - - case IRQ_TYPE_EDGE_FALLING: - intc_writel(chip, regs->type + EDGE_CFG_RISE_CLR, d->mask); - intc_writel(chip, regs->type + EDGE_CFG_FALL_SET, d->mask); - break; - - case IRQ_TYPE_LEVEL_HIGH: - intc_writel(chip, regs->type + EDGE_CFG_RISE_CLR, d->mask); - intc_writel(chip, regs->type + EDGE_CFG_FALL_CLR, d->mask); - break; - - case IRQ_TYPE_LEVEL_LOW: - intc_writel(chip, regs->type + EDGE_CFG_RISE_SET, d->mask); - intc_writel(chip, regs->type + EDGE_CFG_FALL_SET, d->mask); - break; - - default: - pr_err("Invalid trigger mode %x for IRQ %d\n", - flow_type, d->irq); - return -EINVAL; - } - - return irq_setup_alt_chip(d, flow_type); -} - -static void __init tangox_irq_init_chip(struct irq_chip_generic *gc, - unsigned long ctl_offs, - unsigned long edge_offs) -{ - struct tangox_irq_chip *chip = gc->domain->host_data; - struct irq_chip_type *ct = gc->chip_types; - unsigned long ctl_base = chip->ctl + ctl_offs; - unsigned long edge_base = EDGE_CTL_BASE + edge_offs; - int i; - - gc->reg_base = chip->base; - gc->unused = 0; - - for (i = 0; i < 2; i++) { - ct[i].chip.irq_ack = irq_gc_ack_set_bit; - ct[i].chip.irq_mask = irq_gc_mask_disable_reg; - ct[i].chip.irq_mask_ack = irq_gc_mask_disable_and_ack_set; - ct[i].chip.irq_unmask = irq_gc_unmask_enable_reg; - ct[i].chip.irq_set_type = tangox_irq_set_type; - ct[i].chip.name = gc->domain->name; - - ct[i].regs.enable = ctl_base + IRQ_EN_SET; - ct[i].regs.disable = ctl_base + IRQ_EN_CLR; - ct[i].regs.ack = edge_base + EDGE_RAWSTAT; - ct[i].regs.type = edge_base; - } - - ct[0].type = IRQ_TYPE_LEVEL_MASK; - ct[0].handler = handle_level_irq; - - ct[1].type = IRQ_TYPE_EDGE_BOTH; - ct[1].handler = handle_edge_irq; - - intc_writel(chip, ct->regs.disable, 0xffffffff); - intc_writel(chip, ct->regs.ack, 0xffffffff); -} - -static void __init tangox_irq_domain_init(struct irq_domain *dom) -{ - struct irq_chip_generic *gc; - int i; - - for (i = 0; i < 2; i++) { - gc = irq_get_domain_generic_chip(dom, i * 32); - tangox_irq_init_chip(gc, i * IRQ_CTL_HI, i * EDGE_CTL_HI); - } -} - -static int __init tangox_irq_init(void __iomem *base, struct resource *baseres, - struct device_node *node) -{ - struct tangox_irq_chip *chip; - struct irq_domain *dom; - struct resource res; - int irq; - int err; - - irq = irq_of_parse_and_map(node, 0); - if (!irq) - panic("%pOFn: failed to get IRQ", node); - - err = of_address_to_resource(node, 0, &res); - if (err) - panic("%pOFn: failed to get address", node); - - chip = kzalloc(sizeof(*chip), GFP_KERNEL); - chip->ctl = res.start - baseres->start; - chip->base = base; - - dom = irq_domain_add_linear(node, 64, &irq_generic_chip_ops, chip); - if (!dom) - panic("%pOFn: failed to create irqdomain", node); - - err = irq_alloc_domain_generic_chips(dom, 32, 2, node->name, - handle_level_irq, 0, 0, 0); - if (err) - panic("%pOFn: failed to allocate irqchip", node); - - tangox_irq_domain_init(dom); - - irq_set_chained_handler_and_data(irq, tangox_irq_handler, dom); - - return 0; -} - -static int __init tangox_of_irq_init(struct device_node *node, - struct device_node *parent) -{ - struct device_node *c; - struct resource res; - void __iomem *base; - - base = of_iomap(node, 0); - if (!base) - panic("%pOFn: of_iomap failed", node); - - of_address_to_resource(node, 0, &res); - - for_each_child_of_node(node, c) - tangox_irq_init(base, &res, c); - - return 0; -} -IRQCHIP_DECLARE(tangox_intc, "sigma,smp8642-intc", tangox_of_irq_init); diff --git a/drivers/irqchip/irq-tb10x.c b/drivers/irqchip/irq-tb10x.c index 7e6708099a7b..94cbc5111d7e 100644 --- a/drivers/irqchip/irq-tb10x.c +++ b/drivers/irqchip/irq-tb10x.c @@ -1,22 +1,10 @@ +// SPDX-License-Identifier: GPL-2.0-only /* * Abilis Systems interrupt controller driver * * Copyright (C) Abilis Systems 2012 * * Author: Christian Ruppert <christian.ruppert@abilis.com> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/interrupt.h> @@ -25,7 +13,6 @@ #include <linux/irqchip.h> #include <linux/of_irq.h> #include <linux/of_address.h> -#include <linux/of_platform.h> #include <linux/io.h> #include <linux/slab.h> #include <linux/bitops.h> @@ -54,11 +41,9 @@ static inline u32 ab_irqctl_readreg(struct irq_chip_generic *gc, u32 reg) static int tb10x_irq_set_type(struct irq_data *data, unsigned int flow_type) { struct irq_chip_generic *gc = irq_data_get_irq_chip_data(data); - uint32_t im, mod, pol; + uint32_t mod, pol, im = data->mask; - im = data->mask; - - irq_gc_lock(gc); + guard(raw_spinlock)(&gc->lock); mod = ab_irqctl_readreg(gc, AB_IRQCTL_SRC_MODE) | im; pol = ab_irqctl_readreg(gc, AB_IRQCTL_SRC_POLARITY) | im; @@ -72,6 +57,7 @@ static int tb10x_irq_set_type(struct irq_data *data, unsigned int flow_type) break; case IRQ_TYPE_NONE: flow_type = IRQ_TYPE_LEVEL_LOW; + fallthrough; case IRQ_TYPE_LEVEL_LOW: mod ^= im; pol ^= im; @@ -79,9 +65,7 @@ static int tb10x_irq_set_type(struct irq_data *data, unsigned int flow_type) case IRQ_TYPE_EDGE_RISING: break; default: - irq_gc_unlock(gc); - pr_err("%s: Cannot assign multiple trigger modes to IRQ %d.\n", - __func__, data->irq); + pr_err("%s: Cannot assign multiple trigger modes to IRQ %d.\n", __func__, data->irq); return -EBADR; } @@ -91,9 +75,6 @@ static int tb10x_irq_set_type(struct irq_data *data, unsigned int flow_type) ab_irqctl_writereg(gc, AB_IRQCTL_SRC_MODE, mod); ab_irqctl_writereg(gc, AB_IRQCTL_SRC_POLARITY, pol); ab_irqctl_writereg(gc, AB_IRQCTL_INT_STATUS, im); - - irq_gc_unlock(gc); - return IRQ_SET_MASK_OK; } @@ -102,7 +83,7 @@ static void tb10x_irq_cascade(struct irq_desc *desc) struct irq_domain *domain = irq_desc_get_handler_data(desc); unsigned int irq = irq_desc_get_irq(desc); - generic_handle_irq(irq_find_mapping(domain, irq)); + generic_handle_domain_irq(domain, irq); } static int __init of_tb10x_init_irq(struct device_node *ictl, @@ -133,13 +114,13 @@ static int __init of_tb10x_init_irq(struct device_node *ictl, goto ioremap_fail; } - domain = irq_domain_add_linear(ictl, AB_IRQCTL_MAXIRQ, - &irq_generic_chip_ops, NULL); + domain = irq_domain_create_linear(of_fwnode_handle(ictl), AB_IRQCTL_MAXIRQ, + &irq_generic_chip_ops, NULL); if (!domain) { ret = -ENOMEM; pr_err("%pOFn: Could not register interrupt domain.\n", ictl); - goto irq_domain_add_fail; + goto irq_domain_create_fail; } ret = irq_alloc_domain_generic_chips(domain, AB_IRQCTL_MAXIRQ, @@ -162,7 +143,6 @@ static int __init of_tb10x_init_irq(struct device_node *ictl, gc->chip_types[0].regs.mask = AB_IRQCTL_INT_ENABLE; gc->chip_types[1].type = IRQ_TYPE_EDGE_BOTH; - gc->chip_types[1].chip.name = gc->chip_types[0].chip.name; gc->chip_types[1].chip.irq_ack = irq_gc_ack_set_bit; gc->chip_types[1].chip.irq_mask = irq_gc_mask_clr_bit; gc->chip_types[1].chip.irq_unmask = irq_gc_mask_set_bit; @@ -187,7 +167,7 @@ static int __init of_tb10x_init_irq(struct device_node *ictl, gc_alloc_fail: irq_domain_remove(domain); -irq_domain_add_fail: +irq_domain_create_fail: iounmap(reg_base); ioremap_fail: release_mem_region(mem.start, resource_size(&mem)); diff --git a/drivers/irqchip/irq-tegra.c b/drivers/irqchip/irq-tegra.c index 0abc0cd1c32e..b6382cf6359a 100644 --- a/drivers/irqchip/irq-tegra.c +++ b/drivers/irqchip/irq-tegra.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0-only /* * Driver code for Tegra's Legacy Interrupt Controller * @@ -10,16 +11,6 @@ * Colin Cross <ccross@android.com> * * Copyright (C) 2010,2013, NVIDIA Corporation - * - * This software is licensed under the terms of the GNU General Public - * License version 2, as published by the Free Software Foundation, and - * may be copied, distributed, and modified under those terms. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * */ #include <linux/io.h> @@ -141,7 +132,7 @@ static int tegra_set_wake(struct irq_data *d, unsigned int enable) return 0; } -static int tegra_ictlr_suspend(void) +static int tegra_ictlr_suspend(void *data) { unsigned long flags; unsigned int i; @@ -157,10 +148,10 @@ static int tegra_ictlr_suspend(void) lic->cop_iep[i] = readl_relaxed(ictlr + ICTLR_COP_IEP_CLASS); /* Disable COP interrupts */ - writel_relaxed(~0ul, ictlr + ICTLR_COP_IER_CLR); + writel_relaxed(GENMASK(31, 0), ictlr + ICTLR_COP_IER_CLR); /* Disable CPU interrupts */ - writel_relaxed(~0ul, ictlr + ICTLR_CPU_IER_CLR); + writel_relaxed(GENMASK(31, 0), ictlr + ICTLR_CPU_IER_CLR); /* Enable the wakeup sources of ictlr */ writel_relaxed(lic->ictlr_wake_mask[i], ictlr + ICTLR_CPU_IER_SET); @@ -170,7 +161,7 @@ static int tegra_ictlr_suspend(void) return 0; } -static void tegra_ictlr_resume(void) +static void tegra_ictlr_resume(void *data) { unsigned long flags; unsigned int i; @@ -181,26 +172,30 @@ static void tegra_ictlr_resume(void) writel_relaxed(lic->cpu_iep[i], ictlr + ICTLR_CPU_IEP_CLASS); - writel_relaxed(~0ul, ictlr + ICTLR_CPU_IER_CLR); + writel_relaxed(GENMASK(31, 0), ictlr + ICTLR_CPU_IER_CLR); writel_relaxed(lic->cpu_ier[i], ictlr + ICTLR_CPU_IER_SET); writel_relaxed(lic->cop_iep[i], ictlr + ICTLR_COP_IEP_CLASS); - writel_relaxed(~0ul, ictlr + ICTLR_COP_IER_CLR); + writel_relaxed(GENMASK(31, 0), ictlr + ICTLR_COP_IER_CLR); writel_relaxed(lic->cop_ier[i], ictlr + ICTLR_COP_IER_SET); } local_irq_restore(flags); } -static struct syscore_ops tegra_ictlr_syscore_ops = { +static const struct syscore_ops tegra_ictlr_syscore_ops = { .suspend = tegra_ictlr_suspend, .resume = tegra_ictlr_resume, }; +static struct syscore tegra_ictlr_syscore = { + .ops = &tegra_ictlr_syscore_ops, +}; + static void tegra_ictlr_syscore_init(void) { - register_syscore_ops(&tegra_ictlr_syscore_ops); + register_syscore(&tegra_ictlr_syscore); } #else #define tegra_set_wake NULL @@ -321,7 +316,7 @@ static int __init tegra_ictlr_init(struct device_node *node, lic->base[i] = base; /* Disable all interrupts */ - writel_relaxed(~0UL, base + ICTLR_CPU_IER_CLR); + writel_relaxed(GENMASK(31, 0), base + ICTLR_CPU_IER_CLR); /* All interrupts target IRQ */ writel_relaxed(0, base + ICTLR_CPU_IEP_CLASS); @@ -339,9 +334,8 @@ static int __init tegra_ictlr_init(struct device_node *node, node, num_ictlrs, soc->num_ictlrs); - domain = irq_domain_add_hierarchy(parent_domain, 0, num_ictlrs * 32, - node, &tegra_ictlr_domain_ops, - lic); + domain = irq_domain_create_hierarchy(parent_domain, 0, num_ictlrs * 32, + of_fwnode_handle(node), &tegra_ictlr_domain_ops, lic); if (!domain) { pr_err("%pOF: failed to allocated domain\n", node); err = -ENOMEM; diff --git a/drivers/irqchip/irq-ti-sci-inta.c b/drivers/irqchip/irq-ti-sci-inta.c new file mode 100644 index 000000000000..01963d36cfaf --- /dev/null +++ b/drivers/irqchip/irq-ti-sci-inta.c @@ -0,0 +1,745 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Texas Instruments' K3 Interrupt Aggregator irqchip driver + * + * Copyright (C) 2018-2019 Texas Instruments Incorporated - https://www.ti.com/ + * Lokesh Vutla <lokeshvutla@ti.com> + */ + +#include <linux/err.h> +#include <linux/io.h> +#include <linux/irq.h> +#include <linux/irqchip.h> +#include <linux/irqdomain.h> +#include <linux/interrupt.h> +#include <linux/msi.h> +#include <linux/module.h> +#include <linux/moduleparam.h> +#include <linux/of.h> +#include <linux/of_irq.h> +#include <linux/platform_device.h> +#include <linux/irqchip/chained_irq.h> +#include <linux/soc/ti/ti_sci_inta_msi.h> +#include <linux/soc/ti/ti_sci_protocol.h> +#include <asm-generic/msi.h> + +#define TI_SCI_DEV_ID_MASK 0xffff +#define TI_SCI_DEV_ID_SHIFT 16 +#define TI_SCI_IRQ_ID_MASK 0xffff +#define TI_SCI_IRQ_ID_SHIFT 0 +#define HWIRQ_TO_DEVID(hwirq) (((hwirq) >> (TI_SCI_DEV_ID_SHIFT)) & \ + (TI_SCI_DEV_ID_MASK)) +#define HWIRQ_TO_IRQID(hwirq) ((hwirq) & (TI_SCI_IRQ_ID_MASK)) +#define TO_HWIRQ(dev, index) ((((dev) & TI_SCI_DEV_ID_MASK) << \ + TI_SCI_DEV_ID_SHIFT) | \ + ((index) & TI_SCI_IRQ_ID_MASK)) + +#define MAX_EVENTS_PER_VINT 64 +#define VINT_ENABLE_SET_OFFSET 0x0 +#define VINT_ENABLE_CLR_OFFSET 0x8 +#define VINT_STATUS_OFFSET 0x18 +#define VINT_STATUS_MASKED_OFFSET 0x20 + +/** + * struct ti_sci_inta_event_desc - Description of an event coming to + * Interrupt Aggregator. This serves + * as a mapping table for global event, + * hwirq and vint bit. + * @global_event: Global event number corresponding to this event + * @hwirq: Hwirq of the incoming interrupt + * @vint_bit: Corresponding vint bit to which this event is attached. + */ +struct ti_sci_inta_event_desc { + u16 global_event; + u32 hwirq; + u8 vint_bit; +}; + +/** + * struct ti_sci_inta_vint_desc - Description of a virtual interrupt coming out + * of Interrupt Aggregator. + * @domain: Pointer to IRQ domain to which this vint belongs. + * @list: List entry for the vint list + * @event_map: Bitmap to manage the allocation of events to vint. + * @events: Array of event descriptors assigned to this vint. + * @parent_virq: Linux IRQ number that gets attached to parent + * @vint_id: TISCI vint ID + */ +struct ti_sci_inta_vint_desc { + struct irq_domain *domain; + struct list_head list; + DECLARE_BITMAP(event_map, MAX_EVENTS_PER_VINT); + struct ti_sci_inta_event_desc events[MAX_EVENTS_PER_VINT]; + unsigned int parent_virq; + u16 vint_id; +}; + +/** + * struct ti_sci_inta_irq_domain - Structure representing a TISCI based + * Interrupt Aggregator IRQ domain. + * @sci: Pointer to TISCI handle + * @vint: TISCI resource pointer representing IA interrupts. + * @global_event: TISCI resource pointer representing global events. + * @vint_list: List of the vints active in the system + * @vint_mutex: Mutex to protect vint_list + * @base: Base address of the memory mapped IO registers + * @pdev: Pointer to platform device. + * @ti_sci_id: TI-SCI device identifier + * @unmapped_cnt: Number of @unmapped_dev_ids entries + * @unmapped_dev_ids: Pointer to an array of TI-SCI device identifiers of + * unmapped event sources. + * Unmapped Events are not part of the Global Event Map and + * they are converted to Global event within INTA to be + * received by the same INTA to generate an interrupt. + * In case an interrupt request comes for a device which is + * generating Unmapped Event, we must use the INTA's TI-SCI + * device identifier in place of the source device + * identifier to let sysfw know where it has to program the + * Global Event number. + */ +struct ti_sci_inta_irq_domain { + const struct ti_sci_handle *sci; + struct ti_sci_resource *vint; + struct ti_sci_resource *global_event; + struct list_head vint_list; + /* Mutex to protect vint list */ + struct mutex vint_mutex; + void __iomem *base; + struct platform_device *pdev; + u32 ti_sci_id; + + int unmapped_cnt; + u16 *unmapped_dev_ids; +}; + +#define to_vint_desc(e, i) container_of(e, struct ti_sci_inta_vint_desc, \ + events[i]) + +static u16 ti_sci_inta_get_dev_id(struct ti_sci_inta_irq_domain *inta, u32 hwirq) +{ + u16 dev_id = HWIRQ_TO_DEVID(hwirq); + int i; + + if (inta->unmapped_cnt == 0) + return dev_id; + + /* + * For devices sending Unmapped Events we must use the INTA's TI-SCI + * device identifier number to be able to convert it to a Global Event + * and map it to an interrupt. + */ + for (i = 0; i < inta->unmapped_cnt; i++) { + if (dev_id == inta->unmapped_dev_ids[i]) { + dev_id = inta->ti_sci_id; + break; + } + } + + return dev_id; +} + +/** + * ti_sci_inta_irq_handler() - Chained IRQ handler for the vint irqs + * @desc: Pointer to irq_desc corresponding to the irq + */ +static void ti_sci_inta_irq_handler(struct irq_desc *desc) +{ + struct ti_sci_inta_vint_desc *vint_desc; + struct ti_sci_inta_irq_domain *inta; + struct irq_domain *domain; + unsigned int bit; + unsigned long val; + + vint_desc = irq_desc_get_handler_data(desc); + domain = vint_desc->domain; + inta = domain->host_data; + + chained_irq_enter(irq_desc_get_chip(desc), desc); + + val = readq_relaxed(inta->base + vint_desc->vint_id * 0x1000 + + VINT_STATUS_MASKED_OFFSET); + + for_each_set_bit(bit, &val, MAX_EVENTS_PER_VINT) + generic_handle_domain_irq(domain, vint_desc->events[bit].hwirq); + + chained_irq_exit(irq_desc_get_chip(desc), desc); +} + +/** + * ti_sci_inta_xlate_irq() - Translate hwirq to parent's hwirq. + * @inta: IRQ domain corresponding to Interrupt Aggregator + * @vint_id: Hardware irq corresponding to the above irq domain + * + * Return parent irq number if translation is available else -ENOENT. + */ +static int ti_sci_inta_xlate_irq(struct ti_sci_inta_irq_domain *inta, + u16 vint_id) +{ + struct device_node *np = dev_of_node(&inta->pdev->dev); + u32 base, parent_base, size; + const __be32 *range; + int len; + + range = of_get_property(np, "ti,interrupt-ranges", &len); + if (!range) + return vint_id; + + for (len /= sizeof(*range); len >= 3; len -= 3) { + base = be32_to_cpu(*range++); + parent_base = be32_to_cpu(*range++); + size = be32_to_cpu(*range++); + + if (base <= vint_id && vint_id < base + size) + return vint_id - base + parent_base; + } + + return -ENOENT; +} + +/** + * ti_sci_inta_alloc_parent_irq() - Allocate parent irq to Interrupt aggregator + * @domain: IRQ domain corresponding to Interrupt Aggregator + * + * Return 0 if all went well else corresponding error value. + */ +static struct ti_sci_inta_vint_desc *ti_sci_inta_alloc_parent_irq(struct irq_domain *domain) +{ + struct ti_sci_inta_irq_domain *inta = domain->host_data; + struct ti_sci_inta_vint_desc *vint_desc; + struct irq_fwspec parent_fwspec; + struct device_node *parent_node; + unsigned int parent_virq; + int p_hwirq, ret; + u16 vint_id; + + vint_id = ti_sci_get_free_resource(inta->vint); + if (vint_id == TI_SCI_RESOURCE_NULL) + return ERR_PTR(-EINVAL); + + p_hwirq = ti_sci_inta_xlate_irq(inta, vint_id); + if (p_hwirq < 0) { + ret = p_hwirq; + goto free_vint; + } + + vint_desc = kzalloc(sizeof(*vint_desc), GFP_KERNEL); + if (!vint_desc) { + ret = -ENOMEM; + goto free_vint; + } + + vint_desc->domain = domain; + vint_desc->vint_id = vint_id; + INIT_LIST_HEAD(&vint_desc->list); + + parent_node = of_irq_find_parent(dev_of_node(&inta->pdev->dev)); + parent_fwspec.fwnode = of_fwnode_handle(parent_node); + + if (of_device_is_compatible(parent_node, "arm,gic-v3")) { + /* Parent is GIC */ + parent_fwspec.param_count = 3; + parent_fwspec.param[0] = 0; + parent_fwspec.param[1] = p_hwirq - 32; + parent_fwspec.param[2] = IRQ_TYPE_LEVEL_HIGH; + } else { + /* Parent is Interrupt Router */ + parent_fwspec.param_count = 1; + parent_fwspec.param[0] = p_hwirq; + } + + parent_virq = irq_create_fwspec_mapping(&parent_fwspec); + if (parent_virq == 0) { + dev_err(&inta->pdev->dev, "Parent IRQ allocation failed\n"); + ret = -EINVAL; + goto free_vint_desc; + + } + vint_desc->parent_virq = parent_virq; + + list_add_tail(&vint_desc->list, &inta->vint_list); + irq_set_chained_handler_and_data(vint_desc->parent_virq, + ti_sci_inta_irq_handler, vint_desc); + + return vint_desc; +free_vint_desc: + kfree(vint_desc); +free_vint: + ti_sci_release_resource(inta->vint, vint_id); + return ERR_PTR(ret); +} + +/** + * ti_sci_inta_alloc_event() - Attach an event to a IA vint. + * @vint_desc: Pointer to vint_desc to which the event gets attached + * @free_bit: Bit inside vint to which event gets attached + * @hwirq: hwirq of the input event + * + * Return event_desc pointer if all went ok else appropriate error value. + */ +static struct ti_sci_inta_event_desc *ti_sci_inta_alloc_event(struct ti_sci_inta_vint_desc *vint_desc, + u16 free_bit, + u32 hwirq) +{ + struct ti_sci_inta_irq_domain *inta = vint_desc->domain->host_data; + struct ti_sci_inta_event_desc *event_desc; + u16 dev_id, dev_index; + int err; + + dev_id = ti_sci_inta_get_dev_id(inta, hwirq); + dev_index = HWIRQ_TO_IRQID(hwirq); + + event_desc = &vint_desc->events[free_bit]; + event_desc->hwirq = hwirq; + event_desc->vint_bit = free_bit; + event_desc->global_event = ti_sci_get_free_resource(inta->global_event); + if (event_desc->global_event == TI_SCI_RESOURCE_NULL) + return ERR_PTR(-EINVAL); + + err = inta->sci->ops.rm_irq_ops.set_event_map(inta->sci, + dev_id, dev_index, + inta->ti_sci_id, + vint_desc->vint_id, + event_desc->global_event, + free_bit); + if (err) + goto free_global_event; + + return event_desc; +free_global_event: + ti_sci_release_resource(inta->global_event, event_desc->global_event); + return ERR_PTR(err); +} + +/** + * ti_sci_inta_alloc_irq() - Allocate an irq within INTA domain + * @domain: irq_domain pointer corresponding to INTA + * @hwirq: hwirq of the input event + * + * Note: Allocation happens in the following manner: + * - Find a free bit available in any of the vints available in the list. + * - If not found, allocate a vint from the vint pool + * - Attach the free bit to input hwirq. + * Return event_desc if all went ok else appropriate error value. + */ +static struct ti_sci_inta_event_desc *ti_sci_inta_alloc_irq(struct irq_domain *domain, + u32 hwirq) +{ + struct ti_sci_inta_irq_domain *inta = domain->host_data; + struct ti_sci_inta_vint_desc *vint_desc = NULL; + struct ti_sci_inta_event_desc *event_desc; + u16 free_bit; + + mutex_lock(&inta->vint_mutex); + list_for_each_entry(vint_desc, &inta->vint_list, list) { + free_bit = find_first_zero_bit(vint_desc->event_map, + MAX_EVENTS_PER_VINT); + if (free_bit != MAX_EVENTS_PER_VINT) { + set_bit(free_bit, vint_desc->event_map); + goto alloc_event; + } + } + + /* No free bits available. Allocate a new vint */ + vint_desc = ti_sci_inta_alloc_parent_irq(domain); + if (IS_ERR(vint_desc)) { + event_desc = ERR_CAST(vint_desc); + goto unlock; + } + + free_bit = find_first_zero_bit(vint_desc->event_map, + MAX_EVENTS_PER_VINT); + set_bit(free_bit, vint_desc->event_map); + +alloc_event: + event_desc = ti_sci_inta_alloc_event(vint_desc, free_bit, hwirq); + if (IS_ERR(event_desc)) + clear_bit(free_bit, vint_desc->event_map); + +unlock: + mutex_unlock(&inta->vint_mutex); + return event_desc; +} + +/** + * ti_sci_inta_free_parent_irq() - Free a parent irq to INTA + * @inta: Pointer to inta domain. + * @vint_desc: Pointer to vint_desc that needs to be freed. + */ +static void ti_sci_inta_free_parent_irq(struct ti_sci_inta_irq_domain *inta, + struct ti_sci_inta_vint_desc *vint_desc) +{ + if (find_first_bit(vint_desc->event_map, MAX_EVENTS_PER_VINT) == MAX_EVENTS_PER_VINT) { + list_del(&vint_desc->list); + ti_sci_release_resource(inta->vint, vint_desc->vint_id); + irq_dispose_mapping(vint_desc->parent_virq); + kfree(vint_desc); + } +} + +/** + * ti_sci_inta_free_irq() - Free an IRQ within INTA domain + * @event_desc: Pointer to event_desc that needs to be freed. + * @hwirq: Hwirq number within INTA domain that needs to be freed + */ +static void ti_sci_inta_free_irq(struct ti_sci_inta_event_desc *event_desc, + u32 hwirq) +{ + struct ti_sci_inta_vint_desc *vint_desc; + struct ti_sci_inta_irq_domain *inta; + u16 dev_id; + + vint_desc = to_vint_desc(event_desc, event_desc->vint_bit); + inta = vint_desc->domain->host_data; + dev_id = ti_sci_inta_get_dev_id(inta, hwirq); + /* free event irq */ + mutex_lock(&inta->vint_mutex); + inta->sci->ops.rm_irq_ops.free_event_map(inta->sci, + dev_id, HWIRQ_TO_IRQID(hwirq), + inta->ti_sci_id, + vint_desc->vint_id, + event_desc->global_event, + event_desc->vint_bit); + + clear_bit(event_desc->vint_bit, vint_desc->event_map); + ti_sci_release_resource(inta->global_event, event_desc->global_event); + event_desc->global_event = TI_SCI_RESOURCE_NULL; + event_desc->hwirq = 0; + + ti_sci_inta_free_parent_irq(inta, vint_desc); + mutex_unlock(&inta->vint_mutex); +} + +/** + * ti_sci_inta_request_resources() - Allocate resources for input irq + * @data: Pointer to corresponding irq_data + * + * Note: This is the core api where the actual allocation happens for input + * hwirq. This allocation involves creating a parent irq for vint. + * If this is done in irq_domain_ops.alloc() then a deadlock is reached + * for allocation. So this allocation is being done in request_resources() + * + * Return: 0 if all went well else corresponding error. + */ +static int ti_sci_inta_request_resources(struct irq_data *data) +{ + struct ti_sci_inta_event_desc *event_desc; + + event_desc = ti_sci_inta_alloc_irq(data->domain, data->hwirq); + if (IS_ERR(event_desc)) + return PTR_ERR(event_desc); + + data->chip_data = event_desc; + + return 0; +} + +/** + * ti_sci_inta_release_resources - Release resources for input irq + * @data: Pointer to corresponding irq_data + * + * Note: Corresponding to request_resources(), all the unmapping and deletion + * of parent vint irqs happens in this api. + */ +static void ti_sci_inta_release_resources(struct irq_data *data) +{ + struct ti_sci_inta_event_desc *event_desc; + + event_desc = irq_data_get_irq_chip_data(data); + ti_sci_inta_free_irq(event_desc, data->hwirq); +} + +/** + * ti_sci_inta_manage_event() - Control the event based on the offset + * @data: Pointer to corresponding irq_data + * @offset: register offset using which event is controlled. + */ +static void ti_sci_inta_manage_event(struct irq_data *data, u32 offset) +{ + struct ti_sci_inta_event_desc *event_desc; + struct ti_sci_inta_vint_desc *vint_desc; + struct ti_sci_inta_irq_domain *inta; + + event_desc = irq_data_get_irq_chip_data(data); + vint_desc = to_vint_desc(event_desc, event_desc->vint_bit); + inta = data->domain->host_data; + + writeq_relaxed(BIT(event_desc->vint_bit), + inta->base + vint_desc->vint_id * 0x1000 + offset); +} + +/** + * ti_sci_inta_mask_irq() - Mask an event + * @data: Pointer to corresponding irq_data + */ +static void ti_sci_inta_mask_irq(struct irq_data *data) +{ + ti_sci_inta_manage_event(data, VINT_ENABLE_CLR_OFFSET); +} + +/** + * ti_sci_inta_unmask_irq() - Unmask an event + * @data: Pointer to corresponding irq_data + */ +static void ti_sci_inta_unmask_irq(struct irq_data *data) +{ + ti_sci_inta_manage_event(data, VINT_ENABLE_SET_OFFSET); +} + +/** + * ti_sci_inta_ack_irq() - Ack an event + * @data: Pointer to corresponding irq_data + */ +static void ti_sci_inta_ack_irq(struct irq_data *data) +{ + /* + * Do not clear the event if hardware is capable of sending + * a down event. + */ + if (irqd_get_trigger_type(data) != IRQF_TRIGGER_HIGH) + ti_sci_inta_manage_event(data, VINT_STATUS_OFFSET); +} + +static int ti_sci_inta_set_affinity(struct irq_data *d, + const struct cpumask *mask_val, bool force) +{ + return -EINVAL; +} + +/** + * ti_sci_inta_set_type() - Update the trigger type of the irq. + * @data: Pointer to corresponding irq_data + * @type: Trigger type as specified by user + * + * Note: This updates the handle_irq callback for level msi. + * + * Return 0 if all went well else appropriate error. + */ +static int ti_sci_inta_set_type(struct irq_data *data, unsigned int type) +{ + /* + * .alloc default sets handle_edge_irq. But if the user specifies + * that IRQ is level MSI, then update the handle to handle_level_irq + */ + switch (type & IRQ_TYPE_SENSE_MASK) { + case IRQF_TRIGGER_HIGH: + irq_set_handler_locked(data, handle_level_irq); + return 0; + case IRQF_TRIGGER_RISING: + return 0; + default: + return -EINVAL; + } +} + +static struct irq_chip ti_sci_inta_irq_chip = { + .name = "INTA", + .irq_ack = ti_sci_inta_ack_irq, + .irq_mask = ti_sci_inta_mask_irq, + .irq_set_type = ti_sci_inta_set_type, + .irq_unmask = ti_sci_inta_unmask_irq, + .irq_set_affinity = ti_sci_inta_set_affinity, + .irq_request_resources = ti_sci_inta_request_resources, + .irq_release_resources = ti_sci_inta_release_resources, +}; + +/** + * ti_sci_inta_irq_domain_free() - Free an IRQ from the IRQ domain + * @domain: Domain to which the irqs belong + * @virq: base linux virtual IRQ to be freed. + * @nr_irqs: Number of continuous irqs to be freed + */ +static void ti_sci_inta_irq_domain_free(struct irq_domain *domain, + unsigned int virq, unsigned int nr_irqs) +{ + struct irq_data *data = irq_domain_get_irq_data(domain, virq); + + irq_domain_reset_irq_data(data); +} + +/** + * ti_sci_inta_irq_domain_alloc() - Allocate Interrupt aggregator IRQs + * @domain: Point to the interrupt aggregator IRQ domain + * @virq: Corresponding Linux virtual IRQ number + * @nr_irqs: Continuous irqs to be allocated + * @data: Pointer to firmware specifier + * + * No actual allocation happens here. + * + * Return 0 if all went well else appropriate error value. + */ +static int ti_sci_inta_irq_domain_alloc(struct irq_domain *domain, + unsigned int virq, unsigned int nr_irqs, + void *data) +{ + msi_alloc_info_t *arg = data; + + irq_domain_set_info(domain, virq, arg->hwirq, &ti_sci_inta_irq_chip, + NULL, handle_edge_irq, NULL, NULL); + + return 0; +} + +static const struct irq_domain_ops ti_sci_inta_irq_domain_ops = { + .free = ti_sci_inta_irq_domain_free, + .alloc = ti_sci_inta_irq_domain_alloc, +}; + +static struct irq_chip ti_sci_inta_msi_irq_chip = { + .name = "MSI-INTA", + .flags = IRQCHIP_SUPPORTS_LEVEL_MSI, +}; + +static void ti_sci_inta_msi_set_desc(msi_alloc_info_t *arg, + struct msi_desc *desc) +{ + struct platform_device *pdev = to_platform_device(desc->dev); + + arg->desc = desc; + arg->hwirq = TO_HWIRQ(pdev->id, desc->msi_index); +} + +static struct msi_domain_ops ti_sci_inta_msi_ops = { + .set_desc = ti_sci_inta_msi_set_desc, +}; + +static struct msi_domain_info ti_sci_inta_msi_domain_info = { + .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | + MSI_FLAG_LEVEL_CAPABLE), + .ops = &ti_sci_inta_msi_ops, + .chip = &ti_sci_inta_msi_irq_chip, +}; + +static int ti_sci_inta_get_unmapped_sources(struct ti_sci_inta_irq_domain *inta) +{ + struct device *dev = &inta->pdev->dev; + struct device_node *node = dev_of_node(dev); + struct of_phandle_iterator it; + int count, err, ret, i; + + count = of_count_phandle_with_args(node, "ti,unmapped-event-sources", NULL); + if (count <= 0) + return 0; + + inta->unmapped_dev_ids = devm_kcalloc(dev, count, + sizeof(*inta->unmapped_dev_ids), + GFP_KERNEL); + if (!inta->unmapped_dev_ids) + return -ENOMEM; + + i = 0; + of_for_each_phandle(&it, err, node, "ti,unmapped-event-sources", NULL, 0) { + u32 dev_id; + + ret = of_property_read_u32(it.node, "ti,sci-dev-id", &dev_id); + if (ret) { + dev_err(dev, "ti,sci-dev-id read failure for %pOFf\n", it.node); + of_node_put(it.node); + return ret; + } + inta->unmapped_dev_ids[i++] = dev_id; + } + + inta->unmapped_cnt = count; + + return 0; +} + +static int ti_sci_inta_irq_domain_probe(struct platform_device *pdev) +{ + struct irq_domain *parent_domain, *domain, *msi_domain; + struct device_node *parent_node, *node; + struct ti_sci_inta_irq_domain *inta; + struct device *dev = &pdev->dev; + int ret; + + node = dev_of_node(dev); + parent_node = of_irq_find_parent(node); + if (!parent_node) { + dev_err(dev, "Failed to get IRQ parent node\n"); + return -ENODEV; + } + + parent_domain = irq_find_host(parent_node); + if (!parent_domain) + return -EPROBE_DEFER; + + inta = devm_kzalloc(dev, sizeof(*inta), GFP_KERNEL); + if (!inta) + return -ENOMEM; + + inta->pdev = pdev; + inta->sci = devm_ti_sci_get_by_phandle(dev, "ti,sci"); + if (IS_ERR(inta->sci)) + return dev_err_probe(dev, PTR_ERR(inta->sci), + "ti,sci read fail\n"); + + ret = of_property_read_u32(dev->of_node, "ti,sci-dev-id", &inta->ti_sci_id); + if (ret) { + dev_err(dev, "missing 'ti,sci-dev-id' property\n"); + return -EINVAL; + } + + inta->vint = devm_ti_sci_get_resource(inta->sci, dev, inta->ti_sci_id, + TI_SCI_RESASG_SUBTYPE_IA_VINT); + if (IS_ERR(inta->vint)) { + dev_err(dev, "VINT resource allocation failed\n"); + return PTR_ERR(inta->vint); + } + + inta->global_event = devm_ti_sci_get_resource(inta->sci, dev, inta->ti_sci_id, + TI_SCI_RESASG_SUBTYPE_GLOBAL_EVENT_SEVT); + if (IS_ERR(inta->global_event)) { + dev_err(dev, "Global event resource allocation failed\n"); + return PTR_ERR(inta->global_event); + } + + inta->base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(inta->base)) + return PTR_ERR(inta->base); + + ret = ti_sci_inta_get_unmapped_sources(inta); + if (ret) + return ret; + + domain = irq_domain_create_linear(dev_fwnode(dev), ti_sci_get_num_resources(inta->vint), + &ti_sci_inta_irq_domain_ops, inta); + if (!domain) { + dev_err(dev, "Failed to allocate IRQ domain\n"); + return -ENOMEM; + } + + msi_domain = ti_sci_inta_msi_create_irq_domain(of_fwnode_handle(node), + &ti_sci_inta_msi_domain_info, + domain); + if (!msi_domain) { + irq_domain_remove(domain); + dev_err(dev, "Failed to allocate msi domain\n"); + return -ENOMEM; + } + + INIT_LIST_HEAD(&inta->vint_list); + mutex_init(&inta->vint_mutex); + + dev_info(dev, "Interrupt Aggregator domain %d created\n", inta->ti_sci_id); + + return 0; +} + +static const struct of_device_id ti_sci_inta_irq_domain_of_match[] = { + { .compatible = "ti,sci-inta", }, + { /* sentinel */ }, +}; +MODULE_DEVICE_TABLE(of, ti_sci_inta_irq_domain_of_match); + +static struct platform_driver ti_sci_inta_irq_domain_driver = { + .probe = ti_sci_inta_irq_domain_probe, + .driver = { + .name = "ti-sci-inta", + .of_match_table = ti_sci_inta_irq_domain_of_match, + }, +}; +module_platform_driver(ti_sci_inta_irq_domain_driver); + +MODULE_AUTHOR("Lokesh Vutla <lokeshvutla@ti.com>"); +MODULE_DESCRIPTION("K3 Interrupt Aggregator driver over TI SCI protocol"); +MODULE_LICENSE("GPL"); diff --git a/drivers/irqchip/irq-ti-sci-intr.c b/drivers/irqchip/irq-ti-sci-intr.c new file mode 100644 index 000000000000..354613e74ad0 --- /dev/null +++ b/drivers/irqchip/irq-ti-sci-intr.c @@ -0,0 +1,306 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Texas Instruments' K3 Interrupt Router irqchip driver + * + * Copyright (C) 2018-2019 Texas Instruments Incorporated - https://www.ti.com/ + * Lokesh Vutla <lokeshvutla@ti.com> + */ + +#include <linux/err.h> +#include <linux/module.h> +#include <linux/moduleparam.h> +#include <linux/io.h> +#include <linux/irqchip.h> +#include <linux/irqdomain.h> +#include <linux/of.h> +#include <linux/of_irq.h> +#include <linux/platform_device.h> +#include <linux/soc/ti/ti_sci_protocol.h> + +/** + * struct ti_sci_intr_irq_domain - Structure representing a TISCI based + * Interrupt Router IRQ domain. + * @sci: Pointer to TISCI handle + * @out_irqs: TISCI resource pointer representing INTR irqs. + * @dev: Struct device pointer. + * @ti_sci_id: TI-SCI device identifier + * @type: Specifies the trigger type supported by this Interrupt Router + */ +struct ti_sci_intr_irq_domain { + const struct ti_sci_handle *sci; + struct ti_sci_resource *out_irqs; + struct device *dev; + u32 ti_sci_id; + u32 type; +}; + +static struct irq_chip ti_sci_intr_irq_chip = { + .name = "INTR", + .irq_eoi = irq_chip_eoi_parent, + .irq_mask = irq_chip_mask_parent, + .irq_unmask = irq_chip_unmask_parent, + .irq_set_type = irq_chip_set_type_parent, + .irq_retrigger = irq_chip_retrigger_hierarchy, + .irq_set_affinity = irq_chip_set_affinity_parent, +}; + +/** + * ti_sci_intr_irq_domain_translate() - Retrieve hwirq and type from + * IRQ firmware specific handler. + * @domain: Pointer to IRQ domain + * @fwspec: Pointer to IRQ specific firmware structure + * @hwirq: IRQ number identified by hardware + * @type: IRQ type + * + * Return 0 if all went ok else appropriate error. + */ +static int ti_sci_intr_irq_domain_translate(struct irq_domain *domain, + struct irq_fwspec *fwspec, + unsigned long *hwirq, + unsigned int *type) +{ + struct ti_sci_intr_irq_domain *intr = domain->host_data; + + if (fwspec->param_count != 1) + return -EINVAL; + + *hwirq = fwspec->param[0]; + *type = intr->type; + + return 0; +} + +/** + * ti_sci_intr_xlate_irq() - Translate hwirq to parent's hwirq. + * @intr: IRQ domain corresponding to Interrupt Router + * @irq: Hardware irq corresponding to the above irq domain + * + * Return parent irq number if translation is available else -ENOENT. + */ +static int ti_sci_intr_xlate_irq(struct ti_sci_intr_irq_domain *intr, u32 irq) +{ + struct device_node *np = dev_of_node(intr->dev); + u32 base, pbase, size, len; + const __be32 *range; + + range = of_get_property(np, "ti,interrupt-ranges", &len); + if (!range) + return irq; + + for (len /= sizeof(*range); len >= 3; len -= 3) { + base = be32_to_cpu(*range++); + pbase = be32_to_cpu(*range++); + size = be32_to_cpu(*range++); + + if (base <= irq && irq < base + size) + return irq - base + pbase; + } + + return -ENOENT; +} + +/** + * ti_sci_intr_irq_domain_free() - Free the specified IRQs from the domain. + * @domain: Domain to which the irqs belong + * @virq: Linux virtual IRQ to be freed. + * @nr_irqs: Number of continuous irqs to be freed + */ +static void ti_sci_intr_irq_domain_free(struct irq_domain *domain, + unsigned int virq, unsigned int nr_irqs) +{ + struct ti_sci_intr_irq_domain *intr = domain->host_data; + struct irq_data *data; + int out_irq; + + data = irq_domain_get_irq_data(domain, virq); + out_irq = (uintptr_t)data->chip_data; + + intr->sci->ops.rm_irq_ops.free_irq(intr->sci, + intr->ti_sci_id, data->hwirq, + intr->ti_sci_id, out_irq); + ti_sci_release_resource(intr->out_irqs, out_irq); + irq_domain_free_irqs_parent(domain, virq, 1); + irq_domain_reset_irq_data(data); +} + +/** + * ti_sci_intr_alloc_parent_irq() - Allocate parent IRQ + * @domain: Pointer to the interrupt router IRQ domain + * @virq: Corresponding Linux virtual IRQ number + * @hwirq: Corresponding hwirq for the IRQ within this IRQ domain + * + * Returns intr output irq if all went well else appropriate error pointer. + */ +static int ti_sci_intr_alloc_parent_irq(struct irq_domain *domain, + unsigned int virq, u32 hwirq) +{ + struct ti_sci_intr_irq_domain *intr = domain->host_data; + struct device_node *parent_node; + struct irq_fwspec fwspec; + int p_hwirq, err = 0; + u16 out_irq; + + out_irq = ti_sci_get_free_resource(intr->out_irqs); + if (out_irq == TI_SCI_RESOURCE_NULL) + return -EINVAL; + + p_hwirq = ti_sci_intr_xlate_irq(intr, out_irq); + if (p_hwirq < 0) + goto err_irqs; + + parent_node = of_irq_find_parent(dev_of_node(intr->dev)); + fwspec.fwnode = of_fwnode_handle(parent_node); + + if (of_device_is_compatible(parent_node, "arm,gic-v3")) { + /* Parent is GIC */ + fwspec.param_count = 3; + fwspec.param[0] = 0; /* SPI */ + fwspec.param[1] = p_hwirq - 32; /* SPI offset */ + fwspec.param[2] = intr->type; + } else { + /* Parent is Interrupt Router */ + fwspec.param_count = 1; + fwspec.param[0] = p_hwirq; + } + + err = irq_domain_alloc_irqs_parent(domain, virq, 1, &fwspec); + if (err) + goto err_irqs; + + err = intr->sci->ops.rm_irq_ops.set_irq(intr->sci, + intr->ti_sci_id, hwirq, + intr->ti_sci_id, out_irq); + if (err) + goto err_msg; + + return out_irq; + +err_msg: + irq_domain_free_irqs_parent(domain, virq, 1); +err_irqs: + ti_sci_release_resource(intr->out_irqs, out_irq); + return err; +} + +/** + * ti_sci_intr_irq_domain_alloc() - Allocate Interrupt router IRQs + * @domain: Point to the interrupt router IRQ domain + * @virq: Corresponding Linux virtual IRQ number + * @nr_irqs: Continuous irqs to be allocated + * @data: Pointer to firmware specifier + * + * Return 0 if all went well else appropriate error value. + */ +static int ti_sci_intr_irq_domain_alloc(struct irq_domain *domain, + unsigned int virq, unsigned int nr_irqs, + void *data) +{ + struct irq_fwspec *fwspec = data; + unsigned long hwirq; + unsigned int flags; + int err, out_irq; + + err = ti_sci_intr_irq_domain_translate(domain, fwspec, &hwirq, &flags); + if (err) + return err; + + out_irq = ti_sci_intr_alloc_parent_irq(domain, virq, hwirq); + if (out_irq < 0) + return out_irq; + + irq_domain_set_hwirq_and_chip(domain, virq, hwirq, + &ti_sci_intr_irq_chip, + (void *)(uintptr_t)out_irq); + + return 0; +} + +static const struct irq_domain_ops ti_sci_intr_irq_domain_ops = { + .free = ti_sci_intr_irq_domain_free, + .alloc = ti_sci_intr_irq_domain_alloc, + .translate = ti_sci_intr_irq_domain_translate, +}; + +static int ti_sci_intr_irq_domain_probe(struct platform_device *pdev) +{ + struct irq_domain *parent_domain, *domain; + struct ti_sci_intr_irq_domain *intr; + struct device_node *parent_node; + struct device *dev = &pdev->dev; + int ret; + + parent_node = of_irq_find_parent(dev_of_node(dev)); + if (!parent_node) { + dev_err(dev, "Failed to get IRQ parent node\n"); + return -ENODEV; + } + + parent_domain = irq_find_host(parent_node); + of_node_put(parent_node); + if (!parent_domain) { + dev_err(dev, "Failed to find IRQ parent domain\n"); + return -ENODEV; + } + + intr = devm_kzalloc(dev, sizeof(*intr), GFP_KERNEL); + if (!intr) + return -ENOMEM; + + intr->dev = dev; + ret = of_property_read_u32(dev_of_node(dev), "ti,intr-trigger-type", + &intr->type); + if (ret) { + dev_err(dev, "missing ti,intr-trigger-type property\n"); + return -EINVAL; + } + + intr->sci = devm_ti_sci_get_by_phandle(dev, "ti,sci"); + if (IS_ERR(intr->sci)) + return dev_err_probe(dev, PTR_ERR(intr->sci), + "ti,sci read fail\n"); + + ret = of_property_read_u32(dev_of_node(dev), "ti,sci-dev-id", + &intr->ti_sci_id); + if (ret) { + dev_err(dev, "missing 'ti,sci-dev-id' property\n"); + return -EINVAL; + } + + intr->out_irqs = devm_ti_sci_get_resource(intr->sci, dev, + intr->ti_sci_id, + TI_SCI_RESASG_SUBTYPE_IR_OUTPUT); + if (IS_ERR(intr->out_irqs)) { + dev_err(dev, "Destination irq resource allocation failed\n"); + return PTR_ERR(intr->out_irqs); + } + + domain = irq_domain_create_hierarchy(parent_domain, 0, 0, dev_fwnode(dev), + &ti_sci_intr_irq_domain_ops, intr); + if (!domain) { + dev_err(dev, "Failed to allocate IRQ domain\n"); + return -ENOMEM; + } + + dev_info(dev, "Interrupt Router %d domain created\n", intr->ti_sci_id); + + return 0; +} + +static const struct of_device_id ti_sci_intr_irq_domain_of_match[] = { + { .compatible = "ti,sci-intr", }, + { /* sentinel */ }, +}; +MODULE_DEVICE_TABLE(of, ti_sci_intr_irq_domain_of_match); + +static struct platform_driver ti_sci_intr_irq_domain_driver = { + .probe = ti_sci_intr_irq_domain_probe, + .driver = { + .name = "ti-sci-intr", + .of_match_table = ti_sci_intr_irq_domain_of_match, + }, +}; +module_platform_driver(ti_sci_intr_irq_domain_driver); + +MODULE_AUTHOR("Lokesh Vutla <lokeshvutla@ticom>"); +MODULE_DESCRIPTION("K3 Interrupt Router driver over TI SCI protocol"); +MODULE_LICENSE("GPL"); diff --git a/drivers/irqchip/irq-ts4800.c b/drivers/irqchip/irq-ts4800.c index 2325fb3c482b..2e4013c6834d 100644 --- a/drivers/irqchip/irq-ts4800.c +++ b/drivers/irqchip/irq-ts4800.c @@ -19,14 +19,15 @@ #include <linux/of_address.h> #include <linux/of_irq.h> #include <linux/platform_device.h> +#include <linux/seq_file.h> #define IRQ_MASK 0x4 #define IRQ_STATUS 0x8 struct ts4800_irq_data { void __iomem *base; + struct platform_device *pdev; struct irq_domain *domain; - struct irq_chip irq_chip; }; static void ts4800_irq_mask(struct irq_data *d) @@ -47,12 +48,25 @@ static void ts4800_irq_unmask(struct irq_data *d) writew(reg & ~mask, data->base + IRQ_MASK); } +static void ts4800_irq_print_chip(struct irq_data *d, struct seq_file *p) +{ + struct ts4800_irq_data *data = irq_data_get_irq_chip_data(d); + + seq_puts(p, dev_name(&data->pdev->dev)); +} + +static const struct irq_chip ts4800_chip = { + .irq_mask = ts4800_irq_mask, + .irq_unmask = ts4800_irq_unmask, + .irq_print_chip = ts4800_irq_print_chip, +}; + static int ts4800_irqdomain_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hwirq) { struct ts4800_irq_data *data = d->host_data; - irq_set_chip_and_handler(irq, &data->irq_chip, handle_simple_irq); + irq_set_chip_and_handler(irq, &ts4800_chip, handle_simple_irq); irq_set_chip_data(irq, data); irq_set_noprobe(irq); @@ -79,10 +93,9 @@ static void ts4800_ic_chained_handle_irq(struct irq_desc *desc) do { unsigned int bit = __ffs(status); - int irq = irq_find_mapping(data->domain, bit); + generic_handle_domain_irq(data->domain, bit); status &= ~(1 << bit); - generic_handle_irq(irq); } while (status); out: @@ -93,16 +106,14 @@ static int ts4800_ic_probe(struct platform_device *pdev) { struct device_node *node = pdev->dev.of_node; struct ts4800_irq_data *data; - struct irq_chip *irq_chip; - struct resource *res; int parent_irq; data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL); if (!data) return -ENOMEM; - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - data->base = devm_ioremap_resource(&pdev->dev, res); + data->pdev = pdev; + data->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(data->base)) return PTR_ERR(data->base); @@ -114,12 +125,7 @@ static int ts4800_ic_probe(struct platform_device *pdev) return -EINVAL; } - irq_chip = &data->irq_chip; - irq_chip->name = dev_name(&pdev->dev); - irq_chip->irq_mask = ts4800_irq_mask; - irq_chip->irq_unmask = ts4800_irq_unmask; - - data->domain = irq_domain_add_linear(node, 8, &ts4800_ic_ops, data); + data->domain = irq_domain_create_linear(dev_fwnode(&pdev->dev), 8, &ts4800_ic_ops, data); if (!data->domain) { dev_err(&pdev->dev, "cannot add IRQ domain\n"); return -ENOMEM; @@ -133,13 +139,11 @@ static int ts4800_ic_probe(struct platform_device *pdev) return 0; } -static int ts4800_ic_remove(struct platform_device *pdev) +static void ts4800_ic_remove(struct platform_device *pdev) { struct ts4800_irq_data *data = platform_get_drvdata(pdev); irq_domain_remove(data->domain); - - return 0; } static const struct of_device_id ts4800_ic_of_match[] = { @@ -149,15 +153,15 @@ static const struct of_device_id ts4800_ic_of_match[] = { MODULE_DEVICE_TABLE(of, ts4800_ic_of_match); static struct platform_driver ts4800_ic_driver = { - .probe = ts4800_ic_probe, - .remove = ts4800_ic_remove, + .probe = ts4800_ic_probe, + .remove = ts4800_ic_remove, .driver = { - .name = "ts4800-irqc", - .of_match_table = ts4800_ic_of_match, + .name = "ts4800-irqc", + .of_match_table = ts4800_ic_of_match, }, }; module_platform_driver(ts4800_ic_driver); MODULE_AUTHOR("Damien Riegel <damien.riegel@savoirfairelinux.com>"); +MODULE_DESCRIPTION("Multiplexed-IRQs driver for TS-4800's FPGA"); MODULE_LICENSE("GPL v2"); -MODULE_ALIAS("platform:ts4800_irqc"); diff --git a/drivers/irqchip/irq-uniphier-aidet.c b/drivers/irqchip/irq-uniphier-aidet.c index 7ba7f253470e..6005c2d28dd9 100644 --- a/drivers/irqchip/irq-uniphier-aidet.c +++ b/drivers/irqchip/irq-uniphier-aidet.c @@ -1,17 +1,9 @@ +// SPDX-License-Identifier: GPL-2.0-only /* * Driver for UniPhier AIDET (ARM Interrupt Detector) * * Copyright (C) 2017 Socionext Inc. * Author: Masahiro Yamada <yamada.masahiro@socionext.com> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. */ #include <linux/bitops.h> @@ -20,7 +12,6 @@ #include <linux/irqdomain.h> #include <linux/kernel.h> #include <linux/of.h> -#include <linux/of_device.h> #include <linux/of_irq.h> #include <linux/platform_device.h> #include <linux/spinlock.h> @@ -174,7 +165,6 @@ static int uniphier_aidet_probe(struct platform_device *pdev) struct device_node *parent_np; struct irq_domain *parent_domain; struct uniphier_aidet_priv *priv; - struct resource *res; parent_np = of_irq_find_parent(dev->of_node); if (!parent_np) @@ -189,8 +179,7 @@ static int uniphier_aidet_probe(struct platform_device *pdev) if (!priv) return -ENOMEM; - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - priv->reg_base = devm_ioremap_resource(dev, res); + priv->reg_base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(priv->reg_base)) return PTR_ERR(priv->reg_base); @@ -199,7 +188,7 @@ static int uniphier_aidet_probe(struct platform_device *pdev) priv->domain = irq_domain_create_hierarchy( parent_domain, 0, UNIPHIER_AIDET_NR_IRQS, - of_node_to_fwnode(dev->of_node), + of_fwnode_handle(dev->of_node), &uniphier_aidet_domain_ops, priv); if (!priv->domain) return -ENOMEM; @@ -247,6 +236,7 @@ static const struct of_device_id uniphier_aidet_match[] = { { .compatible = "socionext,uniphier-ld11-aidet" }, { .compatible = "socionext,uniphier-ld20-aidet" }, { .compatible = "socionext,uniphier-pxs3-aidet" }, + { .compatible = "socionext,uniphier-nx1-aidet" }, { /* sentinel */ } }; diff --git a/drivers/irqchip/irq-versatile-fpga.c b/drivers/irqchip/irq-versatile-fpga.c index 928858dada75..034ce6afe170 100644 --- a/drivers/irqchip/irq-versatile-fpga.c +++ b/drivers/irqchip/irq-versatile-fpga.c @@ -6,12 +6,13 @@ #include <linux/irq.h> #include <linux/io.h> #include <linux/irqchip.h> -#include <linux/irqchip/versatile-fpga.h> +#include <linux/irqchip/chained_irq.h> #include <linux/irqdomain.h> #include <linux/module.h> #include <linux/of.h> #include <linux/of_address.h> #include <linux/of_irq.h> +#include <linux/seq_file.h> #include <asm/exception.h> #include <asm/mach/irq.h> @@ -33,14 +34,12 @@ /** * struct fpga_irq_data - irq data container for the FPGA IRQ controller * @base: memory offset in virtual memory - * @chip: chip container for this instance * @domain: IRQ domain for this instance * @valid: mask for valid IRQs on this controller * @used_irqs: number of active IRQs on this controller */ struct fpga_irq_data { void __iomem *base; - struct irq_chip chip; u32 valid; struct irq_domain *domain; u8 used_irqs; @@ -66,22 +65,43 @@ static void fpga_irq_unmask(struct irq_data *d) writel(mask, f->base + IRQ_ENABLE_SET); } +static void fpga_irq_print_chip(struct irq_data *d, struct seq_file *p) +{ + struct fpga_irq_data *f = irq_data_get_irq_chip_data(d); + + seq_puts(p, irq_domain_get_of_node(f->domain)->name); +} + +static const struct irq_chip fpga_chip = { + .irq_ack = fpga_irq_mask, + .irq_mask = fpga_irq_mask, + .irq_unmask = fpga_irq_unmask, + .irq_print_chip = fpga_irq_print_chip, +}; + static void fpga_irq_handle(struct irq_desc *desc) { + struct irq_chip *chip = irq_desc_get_chip(desc); struct fpga_irq_data *f = irq_desc_get_handler_data(desc); - u32 status = readl(f->base + IRQ_STATUS); + u32 status; + + chained_irq_enter(chip, desc); + status = readl(f->base + IRQ_STATUS); if (status == 0) { do_bad_IRQ(desc); - return; + goto out; } do { unsigned int irq = ffs(status) - 1; status &= ~(1 << irq); - generic_handle_irq(irq_find_mapping(f->domain, irq)); + generic_handle_domain_irq(f->domain, irq); } while (status); + +out: + chained_irq_exit(chip, desc); } /* @@ -97,7 +117,7 @@ static int handle_one_fpga(struct fpga_irq_data *f, struct pt_regs *regs) while ((status = readl(f->base + IRQ_STATUS))) { irq = ffs(status) - 1; - handle_domain_irq(f->domain, irq, regs); + generic_handle_domain_irq(f->domain, irq); handled = 1; } @@ -108,7 +128,7 @@ static int handle_one_fpga(struct fpga_irq_data *f, struct pt_regs *regs) * Keep iterating over all registered FPGA IRQ controllers until there are * no pending interrupts. */ -asmlinkage void __exception_irq_entry fpga_handle_irq(struct pt_regs *regs) +static void __exception_irq_entry fpga_handle_irq(struct pt_regs *regs) { int i, handled; @@ -127,8 +147,7 @@ static int fpga_irqdomain_map(struct irq_domain *d, unsigned int irq, if (!(f->valid & BIT(hwirq))) return -EPERM; irq_set_chip_data(irq, f); - irq_set_chip_and_handler(irq, &f->chip, - handle_level_irq); + irq_set_chip_and_handler(irq, &fpga_chip, handle_level_irq); irq_set_probe(irq); return 0; } @@ -138,8 +157,8 @@ static const struct irq_domain_ops fpga_irqdomain_ops = { .xlate = irq_domain_xlate_onetwocell, }; -void __init fpga_irq_init(void __iomem *base, const char *name, int irq_start, - int parent_irq, u32 valid, struct device_node *node) +static void __init fpga_irq_init(void __iomem *base, int parent_irq, + u32 valid, struct device_node *node) { struct fpga_irq_data *f; int i; @@ -150,10 +169,6 @@ void __init fpga_irq_init(void __iomem *base, const char *name, int irq_start, } f = &fpga_irq_devices[fpga_irq_id]; f->base = base; - f->chip.name = name; - f->chip.irq_ack = fpga_irq_mask; - f->chip.irq_mask = fpga_irq_mask; - f->chip.irq_unmask = fpga_irq_unmask; f->valid = valid; if (parent_irq != -1) { @@ -161,20 +176,19 @@ void __init fpga_irq_init(void __iomem *base, const char *name, int irq_start, f); } - /* This will also allocate irq descriptors */ - f->domain = irq_domain_add_simple(node, fls(valid), irq_start, - &fpga_irqdomain_ops, f); + f->domain = irq_domain_create_linear(of_fwnode_handle(node), fls(valid), + &fpga_irqdomain_ops, f); /* This will allocate all valid descriptors in the linear case */ for (i = 0; i < fls(valid); i++) if (valid & BIT(i)) { - if (!irq_start) - irq_create_mapping(f->domain, i); + /* Is this still required? */ + irq_create_mapping(f->domain, i); f->used_irqs++; } pr_info("FPGA IRQ chip %d \"%s\" @ %p, %u irqs", - fpga_irq_id, name, base, f->used_irqs); + fpga_irq_id, node->name, base, f->used_irqs); if (parent_irq != -1) pr_cont(", parent IRQ: %d\n", parent_irq); else @@ -184,8 +198,8 @@ void __init fpga_irq_init(void __iomem *base, const char *name, int irq_start, } #ifdef CONFIG_OF -int __init fpga_irq_of_init(struct device_node *node, - struct device_node *parent) +static int __init fpga_irq_of_init(struct device_node *node, + struct device_node *parent) { void __iomem *base; u32 clear_mask; @@ -204,6 +218,9 @@ int __init fpga_irq_of_init(struct device_node *node, if (of_property_read_u32(node, "valid-mask", &valid_mask)) valid_mask = 0; + writel(clear_mask, base + IRQ_ENABLE_CLEAR); + writel(clear_mask, base + FIQ_ENABLE_CLEAR); + /* Some chips are cascaded from a parent IRQ */ parent_irq = irq_of_parse_and_map(node, 0); if (!parent_irq) { @@ -211,10 +228,7 @@ int __init fpga_irq_of_init(struct device_node *node, parent_irq = -1; } - fpga_irq_init(base, node->name, 0, parent_irq, valid_mask, node); - - writel(clear_mask, base + IRQ_ENABLE_CLEAR); - writel(clear_mask, base + FIQ_ENABLE_CLEAR); + fpga_irq_init(base, parent_irq, valid_mask, node); /* * On Versatile AB/PB, some secondary interrupts have a direct @@ -228,5 +242,4 @@ int __init fpga_irq_of_init(struct device_node *node, } IRQCHIP_DECLARE(arm_fpga, "arm,versatile-fpga-irq", fpga_irq_of_init); IRQCHIP_DECLARE(arm_fpga_sic, "arm,versatile-sic", fpga_irq_of_init); -IRQCHIP_DECLARE(ox810se_rps, "oxsemi,ox810se-rps-irq", fpga_irq_of_init); #endif diff --git a/drivers/irqchip/irq-vf610-mscm-ir.c b/drivers/irqchip/irq-vf610-mscm-ir.c index 56b5e3cb9de2..5d9c7503aa7f 100644 --- a/drivers/irqchip/irq-vf610-mscm-ir.c +++ b/drivers/irqchip/irq-vf610-mscm-ir.c @@ -1,12 +1,8 @@ +// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2014-2015 Toradex AG * Author: Stefan Agner <stefan@agner.ch> * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * * IRQ chip driver for MSCM interrupt router available on Vybrid SoC's. * The interrupt router is between the CPU's interrupt controller and the * peripheral. The router allows to route the peripheral interrupts to @@ -213,9 +209,9 @@ static int __init vf610_mscm_ir_of_init(struct device_node *node, regmap_read(mscm_cp_regmap, MSCM_CPxNUM, &cpuid); mscm_ir_data->cpu_mask = 0x1 << cpuid; - domain = irq_domain_add_hierarchy(domain_parent, 0, - MSCM_IRSPRC_NUM, node, - &mscm_irq_domain_ops, mscm_ir_data); + domain = irq_domain_create_hierarchy(domain_parent, 0, MSCM_IRSPRC_NUM, + of_fwnode_handle(node), &mscm_irq_domain_ops, + mscm_ir_data); if (!domain) { ret = -ENOMEM; goto out_unmap; diff --git a/drivers/irqchip/irq-vic.c b/drivers/irqchip/irq-vic.c index 74192f62dd67..e38104c5064e 100644 --- a/drivers/irqchip/irq-vic.c +++ b/drivers/irqchip/irq-vic.c @@ -1,22 +1,9 @@ +// SPDX-License-Identifier: GPL-2.0-or-later /* * linux/arch/arm/common/vic.c * * Copyright (C) 1999 - 2003 ARM Limited * Copyright (C) 2000 Deep Blue Solutions Ltd - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/export.h> @@ -40,7 +27,10 @@ #define VIC_IRQ_STATUS 0x00 #define VIC_FIQ_STATUS 0x04 +#define VIC_RAW_STATUS 0x08 #define VIC_INT_SELECT 0x0c /* 1 = FIQ, 0 = IRQ */ +#define VIC_INT_ENABLE 0x10 /* 1 = enable, 0 = disable */ +#define VIC_INT_ENABLE_CLEAR 0x14 #define VIC_INT_SOFT 0x18 #define VIC_INT_SOFT_CLEAR 0x1c #define VIC_PROTECT 0x20 @@ -57,9 +47,8 @@ /** * struct vic_device - VIC PM device - * @parent_irq: The parent IRQ number of the VIC if cascaded, or 0. - * @irq: The IRQ number for the base of the VIC. * @base: The register base for the VIC. + * @irq: The IRQ number for the base of the VIC. * @valid_sources: A bitmask of valid interrupts * @resume_sources: A bitmask of interrupts for resume. * @resume_irqs: The IRQs enabled for resume. @@ -131,7 +120,7 @@ static void resume_one_vic(struct vic_device *vic) writel(~vic->soft_int, base + VIC_INT_SOFT_CLEAR); } -static void vic_resume(void) +static void vic_resume(void *data) { int id; @@ -157,7 +146,7 @@ static void suspend_one_vic(struct vic_device *vic) writel(~vic->resume_irqs, base + VIC_INT_ENABLE_CLEAR); } -static int vic_suspend(void) +static int vic_suspend(void *data) { int id; @@ -167,13 +156,17 @@ static int vic_suspend(void) return 0; } -static struct syscore_ops vic_syscore_ops = { +static const struct syscore_ops vic_syscore_ops = { .suspend = vic_suspend, .resume = vic_resume, }; +static struct syscore vic_syscore = { + .ops = &vic_syscore_ops, +}; + /** - * vic_pm_init - initicall to register VIC pm + * vic_pm_init - initcall to register VIC pm * * This is called via late_initcall() to register * the resources for the VICs due to the early @@ -182,7 +175,7 @@ static struct syscore_ops vic_syscore_ops = { static int __init vic_pm_init(void) { if (vic_id > 0) - register_syscore_ops(&vic_syscore_ops); + register_syscore(&vic_syscore); return 0; } @@ -218,7 +211,7 @@ static int handle_one_vic(struct vic_device *vic, struct pt_regs *regs) while ((stat = readl_relaxed(vic->base + VIC_IRQ_STATUS))) { irq = ffs(stat) - 1; - handle_domain_irq(vic->domain, irq, regs); + generic_handle_domain_irq(vic->domain, irq); handled = 1; } @@ -235,7 +228,7 @@ static void vic_handle_irq_cascaded(struct irq_desc *desc) while ((stat = readl_relaxed(vic->base + VIC_IRQ_STATUS))) { hwirq = ffs(stat) - 1; - generic_handle_irq(irq_find_mapping(vic->domain, hwirq)); + generic_handle_domain_irq(vic->domain, hwirq); } chained_irq_exit(host_chip, desc); @@ -300,8 +293,9 @@ static void __init vic_register(void __iomem *base, unsigned int parent_irq, vic_handle_irq_cascaded, v); } - v->domain = irq_domain_add_simple(node, fls(valid_sources), irq, - &vic_irqdomain_ops, v); + v->domain = irq_domain_create_simple(of_fwnode_handle(node), + fls(valid_sources), irq, + &vic_irqdomain_ops, v); /* create an IRQ mapping for each valid IRQ */ for (i = 0; i < fls(valid_sources); i++) if (valid_sources & (1 << i)) @@ -407,7 +401,7 @@ static void __init vic_clear_interrupts(void __iomem *base) /* * The PL190 cell from ARM has been modified by ST to handle 64 interrupts. * The original cell has 32 interrupts, while the modified one has 64, - * replocating two blocks 0x00..0x1f in 0x20..0x3f. In that case + * replicating two blocks 0x00..0x1f in 0x20..0x3f. In that case * the probe function is called twice, with base set to offset 000 * and 020 within the page. We call this "second block". */ @@ -441,7 +435,7 @@ static void __init vic_init_st(void __iomem *base, unsigned int irq_start, vic_register(base, 0, irq_start, vic_sources, 0, node); } -void __init __vic_init(void __iomem *base, int parent_irq, int irq_start, +static void __init __vic_init(void __iomem *base, int parent_irq, int irq_start, u32 vic_sources, u32 resume_sources, struct device_node *node) { @@ -465,7 +459,7 @@ void __init __vic_init(void __iomem *base, int parent_irq, int irq_start, return; default: printk(KERN_WARNING "VIC: unknown vendor, continuing anyways\n"); - /* fall through */ + fallthrough; case AMBA_VENDOR_ARM: break; } @@ -494,27 +488,6 @@ void __init vic_init(void __iomem *base, unsigned int irq_start, __vic_init(base, 0, irq_start, vic_sources, resume_sources, NULL); } -/** - * vic_init_cascaded() - initialise a cascaded vectored interrupt controller - * @base: iomem base address - * @parent_irq: the parent IRQ we're cascaded off - * @vic_sources: bitmask of interrupt sources to allow - * @resume_sources: bitmask of interrupt sources to allow for resume - * - * This returns the base for the new interrupts or negative on error. - */ -int __init vic_init_cascaded(void __iomem *base, unsigned int parent_irq, - u32 vic_sources, u32 resume_sources) -{ - struct vic_device *v; - - v = &vic_devices[vic_id]; - __vic_init(base, parent_irq, 0, vic_sources, resume_sources, NULL); - /* Return out acquired base */ - return v->irq; -} -EXPORT_SYMBOL_GPL(vic_init_cascaded); - #ifdef CONFIG_OF static int __init vic_of_init(struct device_node *node, struct device_node *parent) @@ -522,9 +495,7 @@ static int __init vic_of_init(struct device_node *node, void __iomem *regs; u32 interrupt_mask = ~0; u32 wakeup_mask = ~0; - - if (WARN(parent, "non-root VICs are not supported")) - return -EINVAL; + int parent_irq; regs = of_iomap(node, 0); if (WARN_ON(!regs)) @@ -532,11 +503,14 @@ static int __init vic_of_init(struct device_node *node, of_property_read_u32(node, "valid-mask", &interrupt_mask); of_property_read_u32(node, "valid-wakeup-mask", &wakeup_mask); + parent_irq = of_irq_get(node, 0); + if (parent_irq < 0) + parent_irq = 0; /* * Passing 0 as first IRQ makes the simple domain allocate descriptors */ - __vic_init(regs, 0, 0, interrupt_mask, wakeup_mask, node); + __vic_init(regs, parent_irq, 0, interrupt_mask, wakeup_mask, node); return 0; } diff --git a/drivers/irqchip/irq-vt8500.c b/drivers/irqchip/irq-vt8500.c index f9af0af21751..3b742590aec8 100644 --- a/drivers/irqchip/irq-vt8500.c +++ b/drivers/irqchip/irq-vt8500.c @@ -1,22 +1,9 @@ +// SPDX-License-Identifier: GPL-2.0-or-later /* * arch/arm/mach-vt8500/irq.c * * Copyright (C) 2012 Tony Prisk <linux@prisktech.co.nz> * Copyright (C) 2010 Alexey Charkov <alchark@gmail.com> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ /* @@ -28,6 +15,7 @@ #include <linux/io.h> #include <linux/irq.h> #include <linux/irqchip.h> +#include <linux/irqchip/chained_irq.h> #include <linux/irqdomain.h> #include <linux/interrupt.h> #include <linux/bitops.h> @@ -76,29 +64,28 @@ struct vt8500_irq_data { struct irq_domain *domain; /* Domain for this controller */ }; -/* Global variable for accessing io-mem addresses */ -static struct vt8500_irq_data intc[VT8500_INTC_MAX]; -static u32 active_cnt = 0; +/* Primary interrupt controller data */ +static struct vt8500_irq_data *primary_intc; -static void vt8500_irq_mask(struct irq_data *d) +static void vt8500_irq_ack(struct irq_data *d) { struct vt8500_irq_data *priv = d->domain->host_data; void __iomem *base = priv->base; void __iomem *stat_reg = base + VT8500_ICIS + (d->hwirq < 32 ? 0 : 4); - u8 edge, dctr; - u32 status; + u32 status = (1 << (d->hwirq & 0x1f)); - edge = readb(base + VT8500_ICDC + d->hwirq) & VT8500_EDGE; - if (edge) { - status = readl(stat_reg); + writel(status, stat_reg); +} - status |= (1 << (d->hwirq & 0x1f)); - writel(status, stat_reg); - } else { - dctr = readb(base + VT8500_ICDC + d->hwirq); - dctr &= ~VT8500_INT_ENABLE; - writeb(dctr, base + VT8500_ICDC + d->hwirq); - } +static void vt8500_irq_mask(struct irq_data *d) +{ + struct vt8500_irq_data *priv = d->domain->host_data; + void __iomem *base = priv->base; + u8 dctr; + + dctr = readb(base + VT8500_ICDC + d->hwirq); + dctr &= ~VT8500_INT_ENABLE; + writeb(dctr, base + VT8500_ICDC + d->hwirq); } static void vt8500_irq_unmask(struct irq_data *d) @@ -143,11 +130,11 @@ static int vt8500_irq_set_type(struct irq_data *d, unsigned int flow_type) } static struct irq_chip vt8500_irq_chip = { - .name = "vt8500", - .irq_ack = vt8500_irq_mask, - .irq_mask = vt8500_irq_mask, - .irq_unmask = vt8500_irq_unmask, - .irq_set_type = vt8500_irq_set_type, + .name = "vt8500", + .irq_ack = vt8500_irq_ack, + .irq_mask = vt8500_irq_mask, + .irq_unmask = vt8500_irq_unmask, + .irq_set_type = vt8500_irq_set_type, }; static void __init vt8500_init_irq_hw(void __iomem *base) @@ -176,82 +163,89 @@ static const struct irq_domain_ops vt8500_irq_domain_ops = { .xlate = irq_domain_xlate_onecell, }; +static inline void vt8500_handle_irq_common(struct vt8500_irq_data *intc) +{ + unsigned long irqnr = readl_relaxed(intc->base) & 0x3F; + unsigned long stat; + + /* + * Highest Priority register default = 63, so check that this + * is a real interrupt by checking the status register + */ + if (irqnr == 63) { + stat = readl_relaxed(intc->base + VT8500_ICIS + 4); + if (!(stat & BIT(31))) + return; + } + + generic_handle_domain_irq(intc->domain, irqnr); +} + static void __exception_irq_entry vt8500_handle_irq(struct pt_regs *regs) { - u32 stat, i; - int irqnr; - void __iomem *base; - - /* Loop through each active controller */ - for (i=0; i<active_cnt; i++) { - base = intc[i].base; - irqnr = readl_relaxed(base) & 0x3F; - /* - Highest Priority register default = 63, so check that this - is a real interrupt by checking the status register - */ - if (irqnr == 63) { - stat = readl_relaxed(base + VT8500_ICIS + 4); - if (!(stat & BIT(31))) - continue; - } + vt8500_handle_irq_common(primary_intc); +} - handle_domain_irq(intc[i].domain, irqnr, regs); - } +static void vt8500_handle_irq_chained(struct irq_desc *desc) +{ + struct irq_domain *d = irq_desc_get_handler_data(desc); + struct irq_chip *chip = irq_desc_get_chip(desc); + struct vt8500_irq_data *intc = d->host_data; + + chained_irq_enter(chip, desc); + vt8500_handle_irq_common(intc); + chained_irq_exit(chip, desc); } static int __init vt8500_irq_init(struct device_node *node, struct device_node *parent) { - int irq, i; - struct device_node *np = node; + struct vt8500_irq_data *intc; + int irq, i, ret = 0; - if (active_cnt == VT8500_INTC_MAX) { - pr_err("%s: Interrupt controllers > VT8500_INTC_MAX\n", - __func__); - goto out; - } - - intc[active_cnt].base = of_iomap(np, 0); - intc[active_cnt].domain = irq_domain_add_linear(node, 64, - &vt8500_irq_domain_ops, &intc[active_cnt]); + intc = kzalloc(sizeof(*intc), GFP_KERNEL); + if (!intc) + return -ENOMEM; - if (!intc[active_cnt].base) { + intc->base = of_iomap(node, 0); + if (!intc->base) { pr_err("%s: Unable to map IO memory\n", __func__); - goto out; + ret = -ENOMEM; + goto err_free; } - if (!intc[active_cnt].domain) { + intc->domain = irq_domain_create_linear(of_fwnode_handle(node), 64, + &vt8500_irq_domain_ops, intc); + if (!intc->domain) { pr_err("%s: Unable to add irq domain!\n", __func__); - goto out; + ret = -ENOMEM; + goto err_unmap; } - set_handle_irq(vt8500_handle_irq); - - vt8500_init_irq_hw(intc[active_cnt].base); + vt8500_init_irq_hw(intc->base); pr_info("vt8500-irq: Added interrupt controller\n"); - active_cnt++; - - /* check if this is a slaved controller */ - if (of_irq_count(np) != 0) { - /* check that we have the correct number of interrupts */ - if (of_irq_count(np) != 8) { - pr_err("%s: Incorrect IRQ map for slaved controller\n", - __func__); - return -EINVAL; - } - - for (i = 0; i < 8; i++) { - irq = irq_of_parse_and_map(np, i); - enable_irq(irq); + /* check if this is a chained controller */ + if (of_irq_count(node) != 0) { + for (i = 0; i < of_irq_count(node); i++) { + irq = irq_of_parse_and_map(node, i); + irq_set_chained_handler_and_data(irq, vt8500_handle_irq_chained, + intc); } pr_info("vt8500-irq: Enabled slave->parent interrupts\n"); + } else { + primary_intc = intc; + set_handle_irq(vt8500_handle_irq); } -out: return 0; + +err_unmap: + iounmap(intc->base); +err_free: + kfree(intc); + return ret; } IRQCHIP_DECLARE(vt8500_irq, "via,vt8500-intc", vt8500_irq_init); diff --git a/drivers/irqchip/irq-wpcm450-aic.c b/drivers/irqchip/irq-wpcm450-aic.c new file mode 100644 index 000000000000..a8ed4894d29e --- /dev/null +++ b/drivers/irqchip/irq-wpcm450-aic.c @@ -0,0 +1,162 @@ +// SPDX-License-Identifier: GPL-2.0-only +// Copyright 2021 Jonathan Neuschäfer + +#include <linux/irqchip.h> +#include <linux/of_address.h> +#include <linux/of_irq.h> +#include <linux/printk.h> + +#include <asm/exception.h> + +#define AIC_SCR(x) ((x)*4) /* Source control registers */ +#define AIC_GEN 0x84 /* Interrupt group enable control register */ +#define AIC_GRSR 0x88 /* Interrupt group raw status register */ +#define AIC_IRSR 0x100 /* Interrupt raw status register */ +#define AIC_IASR 0x104 /* Interrupt active status register */ +#define AIC_ISR 0x108 /* Interrupt status register */ +#define AIC_IPER 0x10c /* Interrupt priority encoding register */ +#define AIC_ISNR 0x110 /* Interrupt source number register */ +#define AIC_IMR 0x114 /* Interrupt mask register */ +#define AIC_OISR 0x118 /* Output interrupt status register */ +#define AIC_MECR 0x120 /* Mask enable command register */ +#define AIC_MDCR 0x124 /* Mask disable command register */ +#define AIC_SSCR 0x128 /* Source set command register */ +#define AIC_SCCR 0x12c /* Source clear command register */ +#define AIC_EOSCR 0x130 /* End of service command register */ + +#define AIC_SCR_SRCTYPE_LOW_LEVEL (0 << 6) +#define AIC_SCR_SRCTYPE_HIGH_LEVEL (1 << 6) +#define AIC_SCR_SRCTYPE_NEG_EDGE (2 << 6) +#define AIC_SCR_SRCTYPE_POS_EDGE (3 << 6) +#define AIC_SCR_PRIORITY(x) (x) +#define AIC_SCR_PRIORITY_MASK 0x7 + +#define AIC_NUM_IRQS 32 + +struct wpcm450_aic { + void __iomem *regs; + struct irq_domain *domain; +}; + +static struct wpcm450_aic *aic; + +static void wpcm450_aic_init_hw(void) +{ + int i; + + /* Disable (mask) all interrupts */ + writel(0xffffffff, aic->regs + AIC_MDCR); + + /* + * Make sure the interrupt controller is ready to serve new interrupts. + * Reading from IPER indicates that the nIRQ signal may be deasserted, + * and writing to EOSCR indicates that interrupt handling has finished. + */ + readl(aic->regs + AIC_IPER); + writel(0, aic->regs + AIC_EOSCR); + + /* Initialize trigger mode and priority of each interrupt source */ + for (i = 0; i < AIC_NUM_IRQS; i++) + writel(AIC_SCR_SRCTYPE_HIGH_LEVEL | AIC_SCR_PRIORITY(7), + aic->regs + AIC_SCR(i)); +} + +static void __exception_irq_entry wpcm450_aic_handle_irq(struct pt_regs *regs) +{ + int hwirq; + + /* Determine the interrupt source */ + /* Read IPER to signal that nIRQ can be de-asserted */ + hwirq = readl(aic->regs + AIC_IPER) / 4; + + generic_handle_domain_irq(aic->domain, hwirq); +} + +static void wpcm450_aic_eoi(struct irq_data *d) +{ + /* Signal end-of-service */ + writel(0, aic->regs + AIC_EOSCR); +} + +static void wpcm450_aic_mask(struct irq_data *d) +{ + unsigned int mask = BIT(d->hwirq); + + /* Disable (mask) the interrupt */ + writel(mask, aic->regs + AIC_MDCR); +} + +static void wpcm450_aic_unmask(struct irq_data *d) +{ + unsigned int mask = BIT(d->hwirq); + + /* Enable (unmask) the interrupt */ + writel(mask, aic->regs + AIC_MECR); +} + +static int wpcm450_aic_set_type(struct irq_data *d, unsigned int flow_type) +{ + /* + * The hardware supports high/low level, as well as rising/falling edge + * modes, and the DT binding accommodates for that, but as long as + * other modes than high level mode are not used and can't be tested, + * they are rejected in this driver. + */ + if ((flow_type & IRQ_TYPE_SENSE_MASK) != IRQ_TYPE_LEVEL_HIGH) + return -EINVAL; + + return 0; +} + +static struct irq_chip wpcm450_aic_chip = { + .name = "wpcm450-aic", + .irq_eoi = wpcm450_aic_eoi, + .irq_mask = wpcm450_aic_mask, + .irq_unmask = wpcm450_aic_unmask, + .irq_set_type = wpcm450_aic_set_type, +}; + +static int wpcm450_aic_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hwirq) +{ + if (hwirq >= AIC_NUM_IRQS) + return -EPERM; + + irq_set_chip_and_handler(irq, &wpcm450_aic_chip, handle_fasteoi_irq); + irq_set_chip_data(irq, aic); + irq_set_probe(irq); + + return 0; +} + +static const struct irq_domain_ops wpcm450_aic_ops = { + .map = wpcm450_aic_map, + .xlate = irq_domain_xlate_twocell, +}; + +static int __init wpcm450_aic_of_init(struct device_node *node, + struct device_node *parent) +{ + if (parent) + return -EINVAL; + + aic = kzalloc(sizeof(*aic), GFP_KERNEL); + if (!aic) + return -ENOMEM; + + aic->regs = of_iomap(node, 0); + if (!aic->regs) { + pr_err("Failed to map WPCM450 AIC registers\n"); + kfree(aic); + return -ENOMEM; + } + + wpcm450_aic_init_hw(); + + set_handle_irq(wpcm450_aic_handle_irq); + + aic->domain = irq_domain_create_linear(of_fwnode_handle(node), AIC_NUM_IRQS, &wpcm450_aic_ops, aic); + + return 0; +} + +IRQCHIP_DECLARE(wpcm450_aic, "nuvoton,wpcm450-aic", wpcm450_aic_of_init); diff --git a/drivers/irqchip/irq-xilinx-intc.c b/drivers/irqchip/irq-xilinx-intc.c index e3043ded8973..92dcb9fdcb25 100644 --- a/drivers/irqchip/irq-xilinx-intc.c +++ b/drivers/irqchip/irq-xilinx-intc.c @@ -32,35 +32,39 @@ #define MER_ME (1<<0) #define MER_HIE (1<<1) +#define SPURIOUS_IRQ (-1U) + static DEFINE_STATIC_KEY_FALSE(xintc_is_be); struct xintc_irq_chip { void __iomem *base; struct irq_domain *root_domain; u32 intr_mask; + u32 nr_irq; }; -static struct xintc_irq_chip *xintc_irqc; +static struct xintc_irq_chip *primary_intc; -static void xintc_write(int reg, u32 data) +static void xintc_write(struct xintc_irq_chip *irqc, int reg, u32 data) { if (static_branch_unlikely(&xintc_is_be)) - iowrite32be(data, xintc_irqc->base + reg); + iowrite32be(data, irqc->base + reg); else - iowrite32(data, xintc_irqc->base + reg); + iowrite32(data, irqc->base + reg); } -static unsigned int xintc_read(int reg) +static u32 xintc_read(struct xintc_irq_chip *irqc, int reg) { if (static_branch_unlikely(&xintc_is_be)) - return ioread32be(xintc_irqc->base + reg); + return ioread32be(irqc->base + reg); else - return ioread32(xintc_irqc->base + reg); + return ioread32(irqc->base + reg); } static void intc_enable_or_unmask(struct irq_data *d) { - unsigned long mask = 1 << d->hwirq; + struct xintc_irq_chip *irqc = irq_data_get_irq_chip_data(d); + unsigned long mask = BIT(d->hwirq); pr_debug("irq-xilinx: enable_or_unmask: %ld\n", d->hwirq); @@ -69,30 +73,35 @@ static void intc_enable_or_unmask(struct irq_data *d) * acks the irq before calling the interrupt handler */ if (irqd_is_level_type(d)) - xintc_write(IAR, mask); + xintc_write(irqc, IAR, mask); - xintc_write(SIE, mask); + xintc_write(irqc, SIE, mask); } static void intc_disable_or_mask(struct irq_data *d) { + struct xintc_irq_chip *irqc = irq_data_get_irq_chip_data(d); + pr_debug("irq-xilinx: disable: %ld\n", d->hwirq); - xintc_write(CIE, 1 << d->hwirq); + xintc_write(irqc, CIE, BIT(d->hwirq)); } static void intc_ack(struct irq_data *d) { + struct xintc_irq_chip *irqc = irq_data_get_irq_chip_data(d); + pr_debug("irq-xilinx: ack: %ld\n", d->hwirq); - xintc_write(IAR, 1 << d->hwirq); + xintc_write(irqc, IAR, BIT(d->hwirq)); } static void intc_mask_ack(struct irq_data *d) { - unsigned long mask = 1 << d->hwirq; + struct xintc_irq_chip *irqc = irq_data_get_irq_chip_data(d); + unsigned long mask = BIT(d->hwirq); pr_debug("irq-xilinx: disable_and_ack: %ld\n", d->hwirq); - xintc_write(CIE, mask); - xintc_write(IAR, mask); + xintc_write(irqc, CIE, mask); + xintc_write(irqc, IAR, mask); } static struct irq_chip intc_dev = { @@ -103,30 +112,20 @@ static struct irq_chip intc_dev = { .irq_mask_ack = intc_mask_ack, }; -unsigned int xintc_get_irq(void) -{ - unsigned int hwirq, irq = -1; - - hwirq = xintc_read(IVR); - if (hwirq != -1U) - irq = irq_find_mapping(xintc_irqc->root_domain, hwirq); - - pr_debug("irq-xilinx: hwirq=%d, irq=%d\n", hwirq, irq); - - return irq; -} - static int xintc_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hw) { - if (xintc_irqc->intr_mask & (1 << hw)) { + struct xintc_irq_chip *irqc = d->host_data; + + if (irqc->intr_mask & BIT(hw)) { irq_set_chip_and_handler_name(irq, &intc_dev, - handle_edge_irq, "edge"); + handle_edge_irq, "edge"); irq_clear_status_flags(irq, IRQ_LEVEL); } else { irq_set_chip_and_handler_name(irq, &intc_dev, - handle_level_irq, "level"); + handle_level_irq, "level"); irq_set_status_flags(irq, IRQ_LEVEL); } + irq_set_chip_data(irq, irqc); return 0; } @@ -138,43 +137,50 @@ static const struct irq_domain_ops xintc_irq_domain_ops = { static void xil_intc_irq_handler(struct irq_desc *desc) { struct irq_chip *chip = irq_desc_get_chip(desc); - u32 pending; + struct xintc_irq_chip *irqc; + irqc = irq_data_get_irq_handler_data(&desc->irq_data); chained_irq_enter(chip, desc); do { - pending = xintc_get_irq(); - if (pending == -1U) + u32 hwirq = xintc_read(irqc, IVR); + + if (hwirq == -1U) break; - generic_handle_irq(pending); + + generic_handle_domain_irq(irqc->root_domain, hwirq); } while (true); chained_irq_exit(chip, desc); } +static void xil_intc_handle_irq(struct pt_regs *regs) +{ + u32 hwirq; + + do { + hwirq = xintc_read(primary_intc, IVR); + if (unlikely(hwirq == SPURIOUS_IRQ)) + break; + + generic_handle_domain_irq(primary_intc->root_domain, hwirq); + } while (true); +} + static int __init xilinx_intc_of_init(struct device_node *intc, struct device_node *parent) { - u32 nr_irq; - int ret, irq; struct xintc_irq_chip *irqc; - - if (xintc_irqc) { - pr_err("irq-xilinx: Multiple instances aren't supported\n"); - return -EINVAL; - } + int ret, irq; irqc = kzalloc(sizeof(*irqc), GFP_KERNEL); if (!irqc) return -ENOMEM; - - xintc_irqc = irqc; - irqc->base = of_iomap(intc, 0); BUG_ON(!irqc->base); - ret = of_property_read_u32(intc, "xlnx,num-intr-inputs", &nr_irq); + ret = of_property_read_u32(intc, "xlnx,num-intr-inputs", &irqc->nr_irq); if (ret < 0) { pr_err("irq-xilinx: unable to read xlnx,num-intr-inputs\n"); - goto err_alloc; + goto error; } ret = of_property_read_u32(intc, "xlnx,kind-of-intr", &irqc->intr_mask); @@ -183,34 +189,35 @@ static int __init xilinx_intc_of_init(struct device_node *intc, irqc->intr_mask = 0; } - if (irqc->intr_mask >> nr_irq) + if ((u64)irqc->intr_mask >> irqc->nr_irq) pr_warn("irq-xilinx: mismatch in kind-of-intr param\n"); pr_info("irq-xilinx: %pOF: num_irq=%d, edge=0x%x\n", - intc, nr_irq, irqc->intr_mask); + intc, irqc->nr_irq, irqc->intr_mask); /* * Disable all external interrupts until they are - * explicity requested. + * explicitly requested. */ - xintc_write(IER, 0); + xintc_write(irqc, IER, 0); /* Acknowledge any pending interrupts just in case. */ - xintc_write(IAR, 0xffffffff); + xintc_write(irqc, IAR, 0xffffffff); /* Turn on the Master Enable. */ - xintc_write(MER, MER_HIE | MER_ME); - if (!(xintc_read(MER) & (MER_HIE | MER_ME))) { + xintc_write(irqc, MER, MER_HIE | MER_ME); + if (xintc_read(irqc, MER) != (MER_HIE | MER_ME)) { static_branch_enable(&xintc_is_be); - xintc_write(MER, MER_HIE | MER_ME); + xintc_write(irqc, MER, MER_HIE | MER_ME); } - irqc->root_domain = irq_domain_add_linear(intc, nr_irq, - &xintc_irq_domain_ops, irqc); + irqc->root_domain = irq_domain_create_linear(of_fwnode_handle(intc), irqc->nr_irq, + &xintc_irq_domain_ops, irqc); if (!irqc->root_domain) { pr_err("irq-xilinx: Unable to create IRQ domain\n"); - goto err_alloc; + ret = -EINVAL; + goto error; } if (parent) { @@ -222,16 +229,18 @@ static int __init xilinx_intc_of_init(struct device_node *intc, } else { pr_err("irq-xilinx: interrupts property not in DT\n"); ret = -EINVAL; - goto err_alloc; + goto error; } } else { - irq_set_default_host(irqc->root_domain); + primary_intc = irqc; + irq_set_default_domain(primary_intc->root_domain); + set_handle_irq(xil_intc_handle_irq); } return 0; -err_alloc: - xintc_irqc = NULL; +error: + iounmap(irqc->base); kfree(irqc); return ret; diff --git a/drivers/irqchip/irq-xtensa-mx.c b/drivers/irqchip/irq-xtensa-mx.c index 5385f5768345..9fdacbd89a63 100644 --- a/drivers/irqchip/irq-xtensa-mx.c +++ b/drivers/irqchip/irq-xtensa-mx.c @@ -12,6 +12,7 @@ #include <linux/irqdomain.h> #include <linux/irq.h> #include <linux/irqchip.h> +#include <linux/irqchip/xtensa-mx.h> #include <linux/of.h> #include <asm/mxregs.h> @@ -71,14 +72,17 @@ static void xtensa_mx_irq_mask(struct irq_data *d) unsigned int mask = 1u << d->hwirq; if (mask & (XCHAL_INTTYPE_MASK_EXTERN_EDGE | - XCHAL_INTTYPE_MASK_EXTERN_LEVEL)) { - set_er(1u << (xtensa_get_ext_irq_no(d->hwirq) - - HW_IRQ_MX_BASE), MIENG); - } else { - mask = __this_cpu_read(cached_irq_mask) & ~mask; - __this_cpu_write(cached_irq_mask, mask); - xtensa_set_sr(mask, intenable); + XCHAL_INTTYPE_MASK_EXTERN_LEVEL)) { + unsigned int ext_irq = xtensa_get_ext_irq_no(d->hwirq); + + if (ext_irq >= HW_IRQ_MX_BASE) { + set_er(1u << (ext_irq - HW_IRQ_MX_BASE), MIENG); + return; + } } + mask = __this_cpu_read(cached_irq_mask) & ~mask; + __this_cpu_write(cached_irq_mask, mask); + xtensa_set_sr(mask, intenable); } static void xtensa_mx_irq_unmask(struct irq_data *d) @@ -86,14 +90,17 @@ static void xtensa_mx_irq_unmask(struct irq_data *d) unsigned int mask = 1u << d->hwirq; if (mask & (XCHAL_INTTYPE_MASK_EXTERN_EDGE | - XCHAL_INTTYPE_MASK_EXTERN_LEVEL)) { - set_er(1u << (xtensa_get_ext_irq_no(d->hwirq) - - HW_IRQ_MX_BASE), MIENGSET); - } else { - mask |= __this_cpu_read(cached_irq_mask); - __this_cpu_write(cached_irq_mask, mask); - xtensa_set_sr(mask, intenable); + XCHAL_INTTYPE_MASK_EXTERN_LEVEL)) { + unsigned int ext_irq = xtensa_get_ext_irq_no(d->hwirq); + + if (ext_irq >= HW_IRQ_MX_BASE) { + set_er(1u << (ext_irq - HW_IRQ_MX_BASE), MIENGSET); + return; + } } + mask |= __this_cpu_read(cached_irq_mask); + __this_cpu_write(cached_irq_mask, mask); + xtensa_set_sr(mask, intenable); } static void xtensa_mx_irq_enable(struct irq_data *d) @@ -113,7 +120,11 @@ static void xtensa_mx_irq_ack(struct irq_data *d) static int xtensa_mx_irq_retrigger(struct irq_data *d) { - xtensa_set_sr(1 << d->hwirq, intset); + unsigned int mask = 1u << d->hwirq; + + if (WARN_ON(mask & ~XCHAL_INTTYPE_MASK_SOFTWARE)) + return 0; + xtensa_set_sr(mask, intset); return 1; } @@ -141,14 +152,24 @@ static struct irq_chip xtensa_mx_irq_chip = { .irq_set_affinity = xtensa_mx_irq_set_affinity, }; +static void __init xtensa_mx_init_common(struct irq_domain *root_domain) +{ + unsigned int i; + + irq_set_default_domain(root_domain); + secondary_init_irq(); + + /* Initialize default IRQ routing to CPU 0 */ + for (i = 0; i < XCHAL_NUM_EXTINTERRUPTS; ++i) + set_er(1, MIROUT(i)); +} + int __init xtensa_mx_init_legacy(struct device_node *interrupt_parent) { struct irq_domain *root_domain = - irq_domain_add_legacy(NULL, NR_IRQS - 1, 1, 0, - &xtensa_mx_irq_domain_ops, + irq_domain_create_legacy(NULL, NR_IRQS - 1, 1, 0, &xtensa_mx_irq_domain_ops, &xtensa_mx_irq_chip); - irq_set_default_host(root_domain); - secondary_init_irq(); + xtensa_mx_init_common(root_domain); return 0; } @@ -156,10 +177,9 @@ static int __init xtensa_mx_init(struct device_node *np, struct device_node *interrupt_parent) { struct irq_domain *root_domain = - irq_domain_add_linear(np, NR_IRQS, &xtensa_mx_irq_domain_ops, + irq_domain_create_linear(of_fwnode_handle(np), NR_IRQS, &xtensa_mx_irq_domain_ops, &xtensa_mx_irq_chip); - irq_set_default_host(root_domain); - secondary_init_irq(); + xtensa_mx_init_common(root_domain); return 0; } IRQCHIP_DECLARE(xtensa_mx_irq_chip, "cdns,xtensa-mx", xtensa_mx_init); diff --git a/drivers/irqchip/irq-xtensa-pic.c b/drivers/irqchip/irq-xtensa-pic.c index c200234dd2c9..44e7be051a2e 100644 --- a/drivers/irqchip/irq-xtensa-pic.c +++ b/drivers/irqchip/irq-xtensa-pic.c @@ -12,14 +12,14 @@ * Kevin Chea */ +#include <linux/bits.h> #include <linux/interrupt.h> #include <linux/irqdomain.h> #include <linux/irq.h> #include <linux/irqchip.h> +#include <linux/irqchip/xtensa-pic.h> #include <linux/of.h> -unsigned int cached_irq_mask; - /* * Device Tree IRQ specifier translation function which works with one or * two cell bindings. First cell value maps directly to the hwirq number. @@ -43,41 +43,39 @@ static const struct irq_domain_ops xtensa_irq_domain_ops = { static void xtensa_irq_mask(struct irq_data *d) { - cached_irq_mask &= ~(1 << d->hwirq); - xtensa_set_sr(cached_irq_mask, intenable); -} + u32 irq_mask; -static void xtensa_irq_unmask(struct irq_data *d) -{ - cached_irq_mask |= 1 << d->hwirq; - xtensa_set_sr(cached_irq_mask, intenable); + irq_mask = xtensa_get_sr(intenable); + irq_mask &= ~BIT(d->hwirq); + xtensa_set_sr(irq_mask, intenable); } -static void xtensa_irq_enable(struct irq_data *d) +static void xtensa_irq_unmask(struct irq_data *d) { - xtensa_irq_unmask(d); -} + u32 irq_mask; -static void xtensa_irq_disable(struct irq_data *d) -{ - xtensa_irq_mask(d); + irq_mask = xtensa_get_sr(intenable); + irq_mask |= BIT(d->hwirq); + xtensa_set_sr(irq_mask, intenable); } static void xtensa_irq_ack(struct irq_data *d) { - xtensa_set_sr(1 << d->hwirq, intclear); + xtensa_set_sr(BIT(d->hwirq), intclear); } static int xtensa_irq_retrigger(struct irq_data *d) { - xtensa_set_sr(1 << d->hwirq, intset); + unsigned int mask = BIT(d->hwirq); + + if (WARN_ON(mask & ~XCHAL_INTTYPE_MASK_SOFTWARE)) + return 0; + xtensa_set_sr(mask, intset); return 1; } static struct irq_chip xtensa_irq_chip = { .name = "xtensa", - .irq_enable = xtensa_irq_enable, - .irq_disable = xtensa_irq_disable, .irq_mask = xtensa_irq_mask, .irq_unmask = xtensa_irq_unmask, .irq_ack = xtensa_irq_ack, @@ -87,9 +85,9 @@ static struct irq_chip xtensa_irq_chip = { int __init xtensa_pic_init_legacy(struct device_node *interrupt_parent) { struct irq_domain *root_domain = - irq_domain_add_legacy(NULL, NR_IRQS - 1, 1, 0, + irq_domain_create_legacy(NULL, NR_IRQS - 1, 1, 0, &xtensa_irq_domain_ops, &xtensa_irq_chip); - irq_set_default_host(root_domain); + irq_set_default_domain(root_domain); return 0; } @@ -97,9 +95,9 @@ static int __init xtensa_pic_init(struct device_node *np, struct device_node *interrupt_parent) { struct irq_domain *root_domain = - irq_domain_add_linear(np, NR_IRQS, &xtensa_irq_domain_ops, + irq_domain_create_linear(of_fwnode_handle(np), NR_IRQS, &xtensa_irq_domain_ops, &xtensa_irq_chip); - irq_set_default_host(root_domain); + irq_set_default_domain(root_domain); return 0; } IRQCHIP_DECLARE(xtensa_irq_chip, "cdns,xtensa-pic", xtensa_pic_init); diff --git a/drivers/irqchip/irq-zevio.c b/drivers/irqchip/irq-zevio.c index cb9d8ec37507..22d46c246594 100644 --- a/drivers/irqchip/irq-zevio.c +++ b/drivers/irqchip/irq-zevio.c @@ -1,12 +1,8 @@ +// SPDX-License-Identifier: GPL-2.0-only /* * linux/drivers/irqchip/irq-zevio.c * * Copyright (C) 2013 Daniel Tang <tangrs@tangrs.id.au> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2, as - * published by the Free Software Foundation. - * */ #include <linux/io.h> @@ -54,8 +50,8 @@ static void __exception_irq_entry zevio_handle_irq(struct pt_regs *regs) while (readl(zevio_irq_io + IO_STATUS)) { irqnr = readl(zevio_irq_io + IO_CURRENT); - handle_domain_irq(zevio_irq_domain, irqnr, regs); - }; + generic_handle_domain_irq(zevio_irq_domain, irqnr); + } } static void __init zevio_init_irq_base(void __iomem *base) @@ -96,8 +92,8 @@ static int __init zevio_of_init(struct device_node *node, zevio_init_irq_base(zevio_irq_io + IO_IRQ_BASE); zevio_init_irq_base(zevio_irq_io + IO_FIQ_BASE); - zevio_irq_domain = irq_domain_add_linear(node, MAX_INTRS, - &irq_generic_chip_ops, NULL); + zevio_irq_domain = irq_domain_create_linear(of_fwnode_handle(node), MAX_INTRS, + &irq_generic_chip_ops, NULL); BUG_ON(!zevio_irq_domain); ret = irq_alloc_domain_generic_chips(zevio_irq_domain, MAX_INTRS, 1, diff --git a/drivers/irqchip/irqchip.c b/drivers/irqchip/irqchip.c index 2b35e68bea82..689c8e448901 100644 --- a/drivers/irqchip/irqchip.c +++ b/drivers/irqchip/irqchip.c @@ -10,8 +10,10 @@ #include <linux/acpi.h> #include <linux/init.h> +#include <linux/of.h> #include <linux/of_irq.h> #include <linux/irqchip.h> +#include <linux/platform_device.h> /* * This special of_device_id is the sentinel at the end of the @@ -20,7 +22,7 @@ * special section. */ static const struct of_device_id -irqchip_of_match_end __used __section(__irqchip_of_table_end); +irqchip_of_match_end __used __section("__irqchip_of_table_end"); extern struct of_device_id __irqchip_of_table[]; @@ -29,3 +31,30 @@ void __init irqchip_init(void) of_irq_init(__irqchip_of_table); acpi_probe_device_table(irqchip); } + +int platform_irqchip_probe(struct platform_device *pdev) +{ + struct device_node *np = pdev->dev.of_node; + struct device_node *par_np __free(device_node) = of_irq_find_parent(np); + platform_irq_probe_t irq_probe = of_device_get_match_data(&pdev->dev); + + if (!irq_probe) + return -EINVAL; + + if (par_np == np) + par_np = NULL; + + /* + * If there's a parent interrupt controller and none of the parent irq + * domains have been registered, that means the parent interrupt + * controller has not been initialized yet. it's not time for this + * interrupt controller to initialize. So, defer probe of this + * interrupt controller. The actual initialization callback of this + * interrupt controller can check for specific domains as necessary. + */ + if (par_np && !irq_find_matching_host(par_np, DOMAIN_BUS_ANY)) + return -EPROBE_DEFER; + + return irq_probe(pdev, par_np); +} +EXPORT_SYMBOL_GPL(platform_irqchip_probe); diff --git a/drivers/irqchip/qcom-irq-combiner.c b/drivers/irqchip/qcom-irq-combiner.c index 7f0c0be322e0..09819007d08e 100644 --- a/drivers/irqchip/qcom-irq-combiner.c +++ b/drivers/irqchip/qcom-irq-combiner.c @@ -1,13 +1,5 @@ +// SPDX-License-Identifier: GPL-2.0-only /* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 and - * only version 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. */ /* @@ -41,7 +33,7 @@ struct combiner { int parent_irq; u32 nirqs; u32 nregs; - struct combiner_reg regs[0]; + struct combiner_reg regs[]; }; static inline int irq_nr(u32 reg, u32 bit) @@ -61,7 +53,6 @@ static void combiner_handle_irq(struct irq_desc *desc) chained_irq_enter(chip, desc); for (reg = 0; reg < combiner->nregs; reg++) { - int virq; int hwirq; u32 bit; u32 status; @@ -78,10 +69,7 @@ static void combiner_handle_irq(struct irq_desc *desc) bit = __ffs(status); status &= ~(1 << bit); hwirq = irq_nr(reg, bit); - virq = irq_find_mapping(combiner->domain, hwirq); - if (virq > 0) - generic_handle_irq(virq); - + generic_handle_domain_irq(combiner->domain, hwirq); } } @@ -234,10 +222,9 @@ static int get_registers(struct platform_device *pdev, struct combiner *comb) return 0; } -static int __init combiner_probe(struct platform_device *pdev) +static int combiner_probe(struct platform_device *pdev) { struct combiner *combiner; - size_t alloc_sz; int nregs; int err; @@ -247,8 +234,8 @@ static int __init combiner_probe(struct platform_device *pdev) return -EINVAL; } - alloc_sz = sizeof(*combiner) + sizeof(struct combiner_reg) * nregs; - combiner = devm_kzalloc(&pdev->dev, alloc_sz, GFP_KERNEL); + combiner = devm_kzalloc(&pdev->dev, struct_size(combiner, regs, nregs), + GFP_KERNEL); if (!combiner) return -ENOMEM; @@ -257,10 +244,8 @@ static int __init combiner_probe(struct platform_device *pdev) return err; combiner->parent_irq = platform_get_irq(pdev, 0); - if (combiner->parent_irq <= 0) { - dev_err(&pdev->dev, "Error getting IRQ resource\n"); + if (combiner->parent_irq <= 0) return -EPROBE_DEFER; - } combiner->domain = irq_domain_create_linear(pdev->dev.fwnode, combiner->nirqs, &domain_ops, combiner); @@ -281,11 +266,11 @@ static const struct acpi_device_id qcom_irq_combiner_ids[] = { { } }; -static struct platform_driver qcom_irq_combiner_probe = { +static struct platform_driver qcom_irq_combiner_driver = { .driver = { .name = "qcom-irq-combiner", .acpi_match_table = ACPI_PTR(qcom_irq_combiner_ids), }, .probe = combiner_probe, }; -builtin_platform_driver(qcom_irq_combiner_probe); +builtin_platform_driver(qcom_irq_combiner_driver); diff --git a/drivers/irqchip/qcom-pdc.c b/drivers/irqchip/qcom-pdc.c index faa7d61b9d6c..518f7f0f3dab 100644 --- a/drivers/irqchip/qcom-pdc.c +++ b/drivers/irqchip/qcom-pdc.c @@ -1,45 +1,67 @@ // SPDX-License-Identifier: GPL-2.0 /* - * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved. + * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved. */ #include <linux/err.h> #include <linux/init.h> +#include <linux/interrupt.h> #include <linux/irq.h> #include <linux/irqchip.h> #include <linux/irqdomain.h> #include <linux/io.h> #include <linux/kernel.h> +#include <linux/module.h> #include <linux/of.h> #include <linux/of_address.h> -#include <linux/of_device.h> +#include <linux/of_irq.h> +#include <linux/soc/qcom/irq.h> #include <linux/spinlock.h> -#include <linux/platform_device.h> #include <linux/slab.h> #include <linux/types.h> -#define PDC_MAX_IRQS 126 - -#define CLEAR_INTR(reg, intr) (reg & ~(1 << intr)) -#define ENABLE_INTR(reg, intr) (reg | (1 << intr)) +#define PDC_MAX_GPIO_IRQS 256 +#define PDC_DRV_OFFSET 0x10000 +/* Valid only on HW version < 3.2 */ #define IRQ_ENABLE_BANK 0x10 +#define IRQ_ENABLE_BANK_MAX (IRQ_ENABLE_BANK + BITS_TO_BYTES(PDC_MAX_GPIO_IRQS)) #define IRQ_i_CFG 0x110 +/* Valid only on HW version >= 3.2 */ +#define IRQ_i_CFG_IRQ_ENABLE 3 + +#define IRQ_i_CFG_TYPE_MASK GENMASK(2, 0) + +#define PDC_VERSION_REG 0x1000 + +/* Notable PDC versions */ +#define PDC_VERSION_3_2 0x30200 + struct pdc_pin_region { u32 pin_base; u32 parent_base; u32 cnt; }; +#define pin_to_hwirq(r, p) ((r)->parent_base + (p) - (r)->pin_base) + static DEFINE_RAW_SPINLOCK(pdc_lock); static void __iomem *pdc_base; +static void __iomem *pdc_prev_base; static struct pdc_pin_region *pdc_region; static int pdc_region_cnt; +static unsigned int pdc_version; +static bool pdc_x1e_quirk; + +static void pdc_base_reg_write(void __iomem *base, int reg, u32 i, u32 val) +{ + writel_relaxed(val, base + reg + i * sizeof(u32)); +} static void pdc_reg_write(int reg, u32 i, u32 val) { - writel_relaxed(val, pdc_base + reg + i * sizeof(u32)); + pdc_base_reg_write(pdc_base, reg, i, val); } static u32 pdc_reg_read(int reg, u32 i) @@ -47,32 +69,77 @@ static u32 pdc_reg_read(int reg, u32 i) return readl_relaxed(pdc_base + reg + i * sizeof(u32)); } +static void pdc_x1e_irq_enable_write(u32 bank, u32 enable) +{ + void __iomem *base; + + /* Remap the write access to work around a hardware bug on X1E */ + switch (bank) { + case 0 ... 1: + /* Use previous DRV (client) region and shift to bank 3-4 */ + base = pdc_prev_base; + bank += 3; + break; + case 2 ... 4: + /* Use our own region and shift to bank 0-2 */ + base = pdc_base; + bank -= 2; + break; + case 5: + /* No fixup required for bank 5 */ + base = pdc_base; + break; + default: + WARN_ON(1); + return; + } + + pdc_base_reg_write(base, IRQ_ENABLE_BANK, bank, enable); +} + +static void __pdc_enable_intr(int pin_out, bool on) +{ + unsigned long enable; + + if (pdc_version < PDC_VERSION_3_2) { + u32 index, mask; + + index = pin_out / 32; + mask = pin_out % 32; + + enable = pdc_reg_read(IRQ_ENABLE_BANK, index); + __assign_bit(mask, &enable, on); + + if (pdc_x1e_quirk) + pdc_x1e_irq_enable_write(index, enable); + else + pdc_reg_write(IRQ_ENABLE_BANK, index, enable); + } else { + enable = pdc_reg_read(IRQ_i_CFG, pin_out); + __assign_bit(IRQ_i_CFG_IRQ_ENABLE, &enable, on); + pdc_reg_write(IRQ_i_CFG, pin_out, enable); + } +} + static void pdc_enable_intr(struct irq_data *d, bool on) { - int pin_out = d->hwirq; - u32 index, mask; - u32 enable; - - index = pin_out / 32; - mask = pin_out % 32; - - raw_spin_lock(&pdc_lock); - enable = pdc_reg_read(IRQ_ENABLE_BANK, index); - enable = on ? ENABLE_INTR(enable, mask) : CLEAR_INTR(enable, mask); - pdc_reg_write(IRQ_ENABLE_BANK, index, enable); - raw_spin_unlock(&pdc_lock); + unsigned long flags; + + raw_spin_lock_irqsave(&pdc_lock, flags); + __pdc_enable_intr(d->hwirq, on); + raw_spin_unlock_irqrestore(&pdc_lock, flags); } -static void qcom_pdc_gic_mask(struct irq_data *d) +static void qcom_pdc_gic_disable(struct irq_data *d) { pdc_enable_intr(d, false); - irq_chip_mask_parent(d); + irq_chip_disable_parent(d); } -static void qcom_pdc_gic_unmask(struct irq_data *d) +static void qcom_pdc_gic_enable(struct irq_data *d) { pdc_enable_intr(d, true); - irq_chip_unmask_parent(d); + irq_chip_enable_parent(d); } /* @@ -111,8 +178,9 @@ enum pdc_irq_config_bits { */ static int qcom_pdc_gic_set_type(struct irq_data *d, unsigned int type) { - int pin_out = d->hwirq; enum pdc_irq_config_bits pdc_type; + enum pdc_irq_config_bits old_pdc_type; + int ret; switch (type) { case IRQ_TYPE_EDGE_RISING: @@ -138,54 +206,59 @@ static int qcom_pdc_gic_set_type(struct irq_data *d, unsigned int type) return -EINVAL; } - pdc_reg_write(IRQ_i_CFG, pin_out, pdc_type); + old_pdc_type = pdc_reg_read(IRQ_i_CFG, d->hwirq); + pdc_type |= (old_pdc_type & ~IRQ_i_CFG_TYPE_MASK); + pdc_reg_write(IRQ_i_CFG, d->hwirq, pdc_type); + + ret = irq_chip_set_type_parent(d, type); + if (ret) + return ret; - return irq_chip_set_type_parent(d, type); + /* + * When we change types the PDC can give a phantom interrupt. + * Clear it. Specifically the phantom shows up when reconfiguring + * polarity of interrupt without changing the state of the signal + * but let's be consistent and clear it always. + * + * Doing this works because we have IRQCHIP_SET_TYPE_MASKED so the + * interrupt will be cleared before the rest of the system sees it. + */ + if (old_pdc_type != pdc_type) + irq_chip_set_parent_state(d, IRQCHIP_STATE_PENDING, false); + + return 0; } static struct irq_chip qcom_pdc_gic_chip = { .name = "PDC", .irq_eoi = irq_chip_eoi_parent, - .irq_mask = qcom_pdc_gic_mask, - .irq_unmask = qcom_pdc_gic_unmask, + .irq_mask = irq_chip_mask_parent, + .irq_unmask = irq_chip_unmask_parent, + .irq_disable = qcom_pdc_gic_disable, + .irq_enable = qcom_pdc_gic_enable, + .irq_get_irqchip_state = irq_chip_get_parent_state, + .irq_set_irqchip_state = irq_chip_set_parent_state, .irq_retrigger = irq_chip_retrigger_hierarchy, .irq_set_type = qcom_pdc_gic_set_type, .flags = IRQCHIP_MASK_ON_SUSPEND | IRQCHIP_SET_TYPE_MASKED | - IRQCHIP_SKIP_SET_WAKE, + IRQCHIP_SKIP_SET_WAKE | + IRQCHIP_ENABLE_WAKEUP_ON_SUSPEND, .irq_set_vcpu_affinity = irq_chip_set_vcpu_affinity_parent, .irq_set_affinity = irq_chip_set_affinity_parent, }; -static irq_hw_number_t get_parent_hwirq(int pin) +static struct pdc_pin_region *get_pin_region(int pin) { int i; - struct pdc_pin_region *region; for (i = 0; i < pdc_region_cnt; i++) { - region = &pdc_region[i]; - if (pin >= region->pin_base && - pin < region->pin_base + region->cnt) - return (region->parent_base + pin - region->pin_base); + if (pin >= pdc_region[i].pin_base && + pin < pdc_region[i].pin_base + pdc_region[i].cnt) + return &pdc_region[i]; } - WARN_ON(1); - return ~0UL; -} - -static int qcom_pdc_translate(struct irq_domain *d, struct irq_fwspec *fwspec, - unsigned long *hwirq, unsigned int *type) -{ - if (is_of_node(fwspec->fwnode)) { - if (fwspec->param_count != 2) - return -EINVAL; - - *hwirq = fwspec->param[0]; - *type = fwspec->param[1] & IRQ_TYPE_SENSE_MASK; - return 0; - } - - return -EINVAL; + return NULL; } static int qcom_pdc_alloc(struct irq_domain *domain, unsigned int virq, @@ -193,23 +266,27 @@ static int qcom_pdc_alloc(struct irq_domain *domain, unsigned int virq, { struct irq_fwspec *fwspec = data; struct irq_fwspec parent_fwspec; - irq_hw_number_t hwirq, parent_hwirq; + struct pdc_pin_region *region; + irq_hw_number_t hwirq; unsigned int type; int ret; - ret = qcom_pdc_translate(domain, fwspec, &hwirq, &type); + ret = irq_domain_translate_twocell(domain, fwspec, &hwirq, &type); if (ret) - return -EINVAL; + return ret; - parent_hwirq = get_parent_hwirq(hwirq); - if (parent_hwirq == ~0UL) - return -EINVAL; + if (hwirq == GPIO_NO_WAKE_IRQ) + return irq_domain_disconnect_hierarchy(domain, virq); - ret = irq_domain_set_hwirq_and_chip(domain, virq, hwirq, - &qcom_pdc_gic_chip, NULL); + ret = irq_domain_set_hwirq_and_chip(domain, virq, hwirq, + &qcom_pdc_gic_chip, NULL); if (ret) return ret; + region = get_pin_region(hwirq); + if (!region) + return irq_domain_disconnect_hierarchy(domain->parent, virq); + if (type & IRQ_TYPE_EDGE_BOTH) type = IRQ_TYPE_EDGE_RISING; @@ -219,7 +296,7 @@ static int qcom_pdc_alloc(struct irq_domain *domain, unsigned int virq, parent_fwspec.fwnode = domain->parent->fwnode; parent_fwspec.param_count = 3; parent_fwspec.param[0] = 0; - parent_fwspec.param[1] = parent_hwirq; + parent_fwspec.param[1] = pin_to_hwirq(region, hwirq); parent_fwspec.param[2] = type; return irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, @@ -227,14 +304,14 @@ static int qcom_pdc_alloc(struct irq_domain *domain, unsigned int virq, } static const struct irq_domain_ops qcom_pdc_ops = { - .translate = qcom_pdc_translate, + .translate = irq_domain_translate_twocell, .alloc = qcom_pdc_alloc, .free = irq_domain_free_irqs_common, }; static int pdc_setup_pin_mapping(struct device_node *np) { - int ret, n; + int ret, n, i; n = of_property_count_elems_of_size(np, "qcom,pdc-ranges", sizeof(u32)); if (n <= 0 || n % 3) @@ -263,22 +340,59 @@ static int pdc_setup_pin_mapping(struct device_node *np) &pdc_region[n].cnt); if (ret) return ret; + + for (i = 0; i < pdc_region[n].cnt; i++) + __pdc_enable_intr(i + pdc_region[n].pin_base, 0); } return 0; } -static int qcom_pdc_init(struct device_node *node, struct device_node *parent) +#define QCOM_PDC_SIZE 0x30000 + +static int qcom_pdc_probe(struct platform_device *pdev, struct device_node *parent) { struct irq_domain *parent_domain, *pdc_domain; + struct device_node *node = pdev->dev.of_node; + resource_size_t res_size; + struct resource res; int ret; - pdc_base = of_iomap(node, 0); + /* compat with old sm8150 DT which had very small region for PDC */ + if (of_address_to_resource(node, 0, &res)) + return -EINVAL; + + res_size = max_t(resource_size_t, resource_size(&res), QCOM_PDC_SIZE); + if (res_size > resource_size(&res)) + pr_warn("%pOF: invalid reg size, please fix DT\n", node); + + /* + * PDC has multiple DRV regions, each one provides the same set of + * registers for a particular client in the system. Due to a hardware + * bug on X1E, some writes to the IRQ_ENABLE_BANK register must be + * issued inside the previous region. This region belongs to + * a different client and is not described in the device tree. Map the + * region with the expected offset to preserve support for old DTs. + */ + if (of_device_is_compatible(node, "qcom,x1e80100-pdc")) { + pdc_prev_base = ioremap(res.start - PDC_DRV_OFFSET, IRQ_ENABLE_BANK_MAX); + if (!pdc_prev_base) { + pr_err("%pOF: unable to map previous PDC DRV region\n", node); + return -ENXIO; + } + + pdc_x1e_quirk = true; + } + + pdc_base = ioremap(res.start, res_size); if (!pdc_base) { pr_err("%pOF: unable to map PDC registers\n", node); - return -ENXIO; + ret = -ENXIO; + goto fail; } + pdc_version = pdc_reg_read(PDC_VERSION_REG, 0); + parent_domain = irq_find_host(parent); if (!parent_domain) { pr_err("%pOF: unable to find PDC's parent domain\n", node); @@ -292,21 +406,30 @@ static int qcom_pdc_init(struct device_node *node, struct device_node *parent) goto fail; } - pdc_domain = irq_domain_create_hierarchy(parent_domain, 0, PDC_MAX_IRQS, - of_fwnode_handle(node), - &qcom_pdc_ops, NULL); + pdc_domain = irq_domain_create_hierarchy(parent_domain, + IRQ_DOMAIN_FLAG_QCOM_PDC_WAKEUP, + PDC_MAX_GPIO_IRQS, + of_fwnode_handle(node), + &qcom_pdc_ops, NULL); if (!pdc_domain) { - pr_err("%pOF: GIC domain add failed\n", node); + pr_err("%pOF: PDC domain add failed\n", node); ret = -ENOMEM; goto fail; } + irq_domain_update_bus_token(pdc_domain, DOMAIN_BUS_WAKEUP); + return 0; fail: kfree(pdc_region); iounmap(pdc_base); + iounmap(pdc_prev_base); return ret; } -IRQCHIP_DECLARE(pdc_sdm845, "qcom,sdm845-pdc", qcom_pdc_init); +IRQCHIP_PLATFORM_DRIVER_BEGIN(qcom_pdc) +IRQCHIP_MATCH("qcom,pdc", qcom_pdc_probe) +IRQCHIP_PLATFORM_DRIVER_END(qcom_pdc) +MODULE_DESCRIPTION("Qualcomm Technologies, Inc. Power Domain Controller"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/irqchip/spear-shirq.c b/drivers/irqchip/spear-shirq.c index 1518ba31a80c..576e55569d77 100644 --- a/drivers/irqchip/spear-shirq.c +++ b/drivers/irqchip/spear-shirq.c @@ -149,6 +149,8 @@ static struct spear_shirq spear320_shirq_ras3 = { .offset = 0, .nr_irqs = 7, .mask = ((0x1 << 7) - 1) << 0, + .irq_chip = &dummy_irq_chip, + .status_reg = SPEAR320_INT_STS_MASK_REG, }; static struct spear_shirq spear320_shirq_ras1 = { @@ -237,7 +239,7 @@ static int __init shirq_init(struct spear_shirq **shirq_blocks, int block_nr, goto err_unmap; } - shirq_domain = irq_domain_add_legacy(np, nr_irqs, virq_base, 0, + shirq_domain = irq_domain_create_legacy(of_fwnode_handle(np), nr_irqs, virq_base, 0, &irq_domain_simple_ops, NULL); if (WARN_ON(!shirq_domain)) { pr_warn("%s: irq domain init failed\n", __func__); |
