diff options
Diffstat (limited to 'drivers/mailbox')
45 files changed, 13173 insertions, 1726 deletions
diff --git a/drivers/mailbox/Kconfig b/drivers/mailbox/Kconfig index 3eeb12e93e98..29f16f220384 100644 --- a/drivers/mailbox/Kconfig +++ b/drivers/mailbox/Kconfig @@ -1,3 +1,4 @@ +# SPDX-License-Identifier: GPL-2.0-only menuconfig MAILBOX bool "Mailbox Hardware Support" help @@ -15,6 +16,56 @@ config ARM_MHU The controller has 3 mailbox channels, the last of which can be used in Secure mode only. +config ARM_MHU_V2 + tristate "ARM MHUv2 Mailbox" + depends on ARM_AMBA + help + Say Y here if you want to build the ARM MHUv2 controller driver, + which provides unidirectional mailboxes between processing elements. + +config ARM_MHU_V3 + tristate "ARM MHUv3 Mailbox" + depends on ARM64 || COMPILE_TEST + depends on HAS_IOMEM || COMPILE_TEST + depends on OF + help + Say Y here if you want to build the ARM MHUv3 controller driver, + which provides unidirectional mailboxes between processing elements. + + ARM MHUv3 controllers can implement a varying number of extensions + that provides different means of transports: supported extensions + will be discovered and possibly managed at probe-time. + +config AST2700_MBOX + tristate "ASPEED AST2700 IPC driver" + depends on ARCH_ASPEED || COMPILE_TEST + help + Mailbox driver implementation for ASPEED AST27XX SoCs. This driver + can be used to send message between different processors in SoC. + The driver provides mailbox support for sending interrupts to the + clients. Say Y here if you want to build this driver. + +config CV1800_MBOX + tristate "cv1800 mailbox" + depends on ARCH_SOPHGO || COMPILE_TEST + help + Mailbox driver implementation for Sophgo CV18XX SoCs. This driver + can be used to send message between different processors in SoC. Any + processer can write data in a channel, and set co-responding register + to raise interrupt to notice another processor, and it is allowed to + send data to itself. + +config EXYNOS_MBOX + tristate "Exynos Mailbox" + depends on ARCH_EXYNOS || COMPILE_TEST + help + Say Y here if you want to build the Samsung Exynos Mailbox controller + driver. The controller has 16 flag bits for hardware interrupt + generation and a shared register for passing mailbox messages. + When the controller is used by the ACPM interface the shared register + is ignored and the mailbox controller acts as a doorbell that raises + the interrupt to the ACPM firmware. + config IMX_MBOX tristate "i.MX Mailbox" depends on ARCH_MXC || COMPILE_TEST @@ -41,26 +92,27 @@ config PL320_MBOX Management Engine, primarily for cpufreq. Say Y here if you want to use the PL320 IPCM support. +config ARMADA_37XX_RWTM_MBOX + tristate "Armada 37xx rWTM BIU Mailbox" + depends on ARCH_MVEBU || COMPILE_TEST + depends on OF + help + Mailbox implementation for communication with the the firmware + running on the Cortex-M3 rWTM secure processor of the Armada 37xx + SOC. Say Y here if you are building for such a device (for example + the Turris Mox router). + config OMAP2PLUS_MBOX tristate "OMAP2+ Mailbox framework support" - depends on ARCH_OMAP2PLUS + depends on ARCH_OMAP2PLUS || ARCH_K3 || COMPILE_TEST help Mailbox implementation for OMAP family chips with hardware for interprocessor communication involving DSP, IVA1.0 and IVA2 in OMAP2/3; or IPU, IVA HD and DSP in OMAP4/5. Say Y here if you want to use OMAP2+ Mailbox framework support. -config OMAP_MBOX_KFIFO_SIZE - int "Mailbox kfifo default buffer size (bytes)" - depends on OMAP2PLUS_MBOX - default 256 - help - Specify the default size of mailbox's kfifo buffers (bytes). - This can also be changed at runtime (via the mbox_kfifo_size - module parameter). - config ROCKCHIP_MBOX - bool "Rockchip Soc Intergrated Mailbox Support" + bool "Rockchip Soc Integrated Mailbox Support" depends on ARCH_ROCKCHIP || COMPILE_TEST help This driver provides support for inter-processor communication @@ -105,7 +157,8 @@ config STI_MBOX config TI_MESSAGE_MANAGER tristate "Texas Instruments Message Manager Driver" - depends on ARCH_KEYSTONE || ARCH_K3 + depends on ARCH_KEYSTONE || ARCH_K3 || COMPILE_TEST + default ARCH_K3 help An implementation of Message Manager slave driver for Keystone and K3 architecture SoCs from Texas Instruments. Message Manager @@ -142,6 +195,32 @@ config MAILBOX_TEST Test client to help with testing new Controller driver implementations. +config POLARFIRE_SOC_MAILBOX + tristate "PolarFire SoC (MPFS) Mailbox" + depends on HAS_IOMEM + depends on MFD_SYSCON + depends on ARCH_MICROCHIP_POLARFIRE || COMPILE_TEST + help + This driver adds support for the PolarFire SoC (MPFS) mailbox controller. + + To compile this driver as a module, choose M here. the + module will be called mailbox-mpfs. + + If unsure, say N. + +config MCHP_SBI_IPC_MBOX + tristate "Microchip Inter-processor Communication (IPC) SBI driver" + depends on RISCV_SBI + depends on ARCH_MICROCHIP || COMPILE_TEST + help + Mailbox implementation for Microchip devices with an + Inter-process communication (IPC) controller. + + To compile this driver as a module, choose M here. the + module will be called mailbox-mchp-ipc-sbi. + + If unsure, say N. + config QCOM_APCS_IPC tristate "Qualcomm APCS IPC driver" depends on ARCH_QCOM || COMPILE_TEST @@ -181,7 +260,7 @@ config BCM_FLEXRM_MBOX tristate "Broadcom FlexRM Mailbox" depends on ARM64 depends on ARCH_BCM_IPROC || COMPILE_TEST - select GENERIC_MSI_IRQ_DOMAIN + select GENERIC_MSI_IRQ default m if ARCH_BCM_IPROC help Mailbox implementation of the Broadcom FlexRM ring manager, @@ -190,12 +269,21 @@ config BCM_FLEXRM_MBOX config STM32_IPCC tristate "STM32 IPCC Mailbox" - depends on MACH_STM32MP157 + depends on MACH_STM32MP157 || COMPILE_TEST help Mailbox implementation for STMicroelectonics STM32 family chips with hardware for Inter-Processor Communication Controller (IPCC) between processors. Say Y here if you want to have this support. +config MTK_ADSP_MBOX + tristate "MediaTek ADSP Mailbox Controller" + depends on ARCH_MEDIATEK || COMPILE_TEST + help + Say yes here to add support for "MediaTek ADSP Mailbox Controller. + This mailbox driver is used to send notification or short message + between processors with ADSP. It will place the message to share + buffer and will access the ipc control. + config MTK_CMDQ_MBOX tristate "MediaTek CMDQ Mailbox Support" depends on ARCH_MEDIATEK || COMPILE_TEST @@ -205,4 +293,101 @@ config MTK_CMDQ_MBOX mailbox driver. The CMDQ is used to help read/write registers with critical time limitation, such as updating display configuration during the vblank. + +config MTK_GPUEB_MBOX + tristate "MediaTek GPUEB Mailbox Support" + depends on ARCH_MEDIATEK || COMPILE_TEST + help + The MediaTek GPUEB mailbox is used to communicate with the embedded + controller in charge of GPU frequency and power management on some + MediaTek SoCs, such as the MT8196. + Say Y or m here if you want to support the MT8196 SoC in your kernel + build. + +config ZYNQMP_IPI_MBOX + tristate "Xilinx ZynqMP IPI Mailbox" + depends on ARCH_ZYNQMP && OF + help + Say yes here to add support for Xilinx IPI mailbox driver. + This mailbox driver is used to send notification or short message + between processors with Xilinx ZynqMP IPI. It will place the + message to the IPI buffer and will access the IPI control + registers to kick the other processor or enquire status. + +config SUN6I_MSGBOX + tristate "Allwinner sun6i/sun8i/sun9i/sun50i Message Box" + depends on ARCH_SUNXI || COMPILE_TEST + default ARCH_SUNXI + help + Mailbox implementation for the hardware message box present in + various Allwinner SoCs. This mailbox is used for communication + between the application CPUs and the power management coprocessor. + +config SPRD_MBOX + tristate "Spreadtrum Mailbox" + depends on ARCH_SPRD || COMPILE_TEST + help + Mailbox driver implementation for the Spreadtrum platform. It is used + to send message between application processors and MCU. Say Y here if + you want to build the Spreatrum mailbox controller driver. + +config QCOM_CPUCP_MBOX + tristate "Qualcomm Technologies, Inc. CPUCP mailbox driver" + depends on (ARCH_QCOM || COMPILE_TEST) && 64BIT + help + Qualcomm Technologies, Inc. CPUSS Control Processor (CPUCP) mailbox + controller driver enables communication between AP and CPUCP. Say + Y here if you want to build this driver. + +config QCOM_IPCC + tristate "Qualcomm Technologies, Inc. IPCC driver" + depends on ARCH_QCOM || COMPILE_TEST + help + Qualcomm Technologies, Inc. Inter-Processor Communication Controller + (IPCC) driver for MSM devices. The driver provides mailbox support for + sending interrupts to the clients. On the other hand, the driver also + acts as an interrupt controller for receiving interrupts from clients. + Say Y here if you want to build this driver. + +config THEAD_TH1520_MBOX + tristate "T-head TH1520 Mailbox" + depends on ARCH_THEAD || COMPILE_TEST + help + Mailbox driver implementation for the Thead TH-1520 platform. Enables + two cores within the SoC to communicate and coordinate by passing + messages. Could be used to communicate between E910 core, on which the + kernel is running, and E902 core used for power management among other + things. + +config CIX_MBOX + tristate "CIX Mailbox" + depends on ARCH_CIX || COMPILE_TEST + depends on OF + help + Mailbox implementation for CIX IPC system. The controller supports + 11 mailbox channels with different operating mode and every channel + is unidirectional. Say Y here if you want to use the CIX Mailbox + support. + +config BCM74110_MAILBOX + tristate "Brcmstb BCM74110 Mailbox" + depends on ARCH_BRCMSTB || COMPILE_TEST + default ARCH_BRCMSTB + help + Broadcom STB mailbox driver present starting with brcmstb bcm74110 + SoCs. The mailbox is a communication channel between the host + processor and coprocessor that handles various power management task + and more. + +config RISCV_SBI_MPXY_MBOX + tristate "RISC-V SBI Message Proxy (MPXY) Mailbox" + depends on RISCV_SBI + default RISCV + help + Mailbox driver implementation for RISC-V SBI Message Proxy (MPXY) + extension. This mailbox driver is used to send messages to the + remote processor through the SBI implementation (M-mode firmware + or HS-mode hypervisor). Say Y here if you want to have this support. + If unsure say N. + endif diff --git a/drivers/mailbox/Makefile b/drivers/mailbox/Makefile index c818b5d011ae..81820a4f5528 100644 --- a/drivers/mailbox/Makefile +++ b/drivers/mailbox/Makefile @@ -5,10 +5,22 @@ obj-$(CONFIG_MAILBOX) += mailbox.o obj-$(CONFIG_MAILBOX_TEST) += mailbox-test.o -obj-$(CONFIG_ARM_MHU) += arm_mhu.o +obj-$(CONFIG_ARM_MHU) += arm_mhu.o arm_mhu_db.o + +obj-$(CONFIG_ARM_MHU_V2) += arm_mhuv2.o + +obj-$(CONFIG_ARM_MHU_V3) += arm_mhuv3.o + +obj-$(CONFIG_AST2700_MBOX) += ast2700-mailbox.o + +obj-$(CONFIG_CV1800_MBOX) += cv1800-mailbox.o + +obj-$(CONFIG_EXYNOS_MBOX) += exynos-mailbox.o obj-$(CONFIG_IMX_MBOX) += imx-mailbox.o +obj-$(CONFIG_ARMADA_37XX_RWTM_MBOX) += armada-37xx-rwtm-mailbox.o + obj-$(CONFIG_PLATFORM_MHU) += platform_mhu.o obj-$(CONFIG_PL320_MBOX) += pl320-ipc.o @@ -37,10 +49,36 @@ obj-$(CONFIG_BCM_PDC_MBOX) += bcm-pdc-mailbox.o obj-$(CONFIG_BCM_FLEXRM_MBOX) += bcm-flexrm-mailbox.o +obj-$(CONFIG_POLARFIRE_SOC_MAILBOX) += mailbox-mpfs.o + +obj-$(CONFIG_MCHP_SBI_IPC_MBOX) += mailbox-mchp-ipc-sbi.o + obj-$(CONFIG_QCOM_APCS_IPC) += qcom-apcs-ipc-mailbox.o obj-$(CONFIG_TEGRA_HSP_MBOX) += tegra-hsp.o obj-$(CONFIG_STM32_IPCC) += stm32-ipcc.o +obj-$(CONFIG_MTK_ADSP_MBOX) += mtk-adsp-mailbox.o + obj-$(CONFIG_MTK_CMDQ_MBOX) += mtk-cmdq-mailbox.o + +obj-$(CONFIG_MTK_GPUEB_MBOX) += mtk-gpueb-mailbox.o + +obj-$(CONFIG_ZYNQMP_IPI_MBOX) += zynqmp-ipi-mailbox.o + +obj-$(CONFIG_SUN6I_MSGBOX) += sun6i-msgbox.o + +obj-$(CONFIG_SPRD_MBOX) += sprd-mailbox.o + +obj-$(CONFIG_QCOM_CPUCP_MBOX) += qcom-cpucp-mbox.o + +obj-$(CONFIG_QCOM_IPCC) += qcom-ipcc.o + +obj-$(CONFIG_THEAD_TH1520_MBOX) += mailbox-th1520.o + +obj-$(CONFIG_CIX_MBOX) += cix-mailbox.o + +obj-$(CONFIG_BCM74110_MAILBOX) += bcm74110-mailbox.o + +obj-$(CONFIG_RISCV_SBI_MPXY_MBOX) += riscv-sbi-mpxy-mbox.o diff --git a/drivers/mailbox/arm_mhu.c b/drivers/mailbox/arm_mhu.c index 64d85c6a2bdf..0950b7bce184 100644 --- a/drivers/mailbox/arm_mhu.c +++ b/drivers/mailbox/arm_mhu.c @@ -1,28 +1,18 @@ +// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2013-2015 Fujitsu Semiconductor Ltd. * Copyright (C) 2015 Linaro Ltd. * Author: Jassi Brar <jaswinder.singh@linaro.org> - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation, version 2 of the License. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. */ -#include <linux/interrupt.h> -#include <linux/spinlock.h> -#include <linux/mutex.h> -#include <linux/delay.h> -#include <linux/slab.h> +#include <linux/amba/bus.h> +#include <linux/device.h> #include <linux/err.h> +#include <linux/interrupt.h> #include <linux/io.h> -#include <linux/module.h> -#include <linux/amba/bus.h> #include <linux/mailbox_controller.h> +#include <linux/module.h> +#include <linux/of.h> #define INTR_STAT_OFS 0x0 #define INTR_SET_OFS 0x8 @@ -124,16 +114,17 @@ static int mhu_probe(struct amba_device *adev, const struct amba_id *id) struct device *dev = &adev->dev; int mhu_reg[MHU_CHANS] = {MHU_LP_OFFSET, MHU_HP_OFFSET, MHU_SEC_OFFSET}; + if (!of_device_is_compatible(dev->of_node, "arm,mhu")) + return -ENODEV; + /* Allocate memory for device */ mhu = devm_kzalloc(dev, sizeof(*mhu), GFP_KERNEL); if (!mhu) return -ENOMEM; mhu->base = devm_ioremap_resource(dev, &adev->res); - if (IS_ERR(mhu->base)) { - dev_err(dev, "ioremap failed\n"); + if (IS_ERR(mhu->base)) return PTR_ERR(mhu->base); - } for (i = 0; i < MHU_CHANS; i++) { mhu->chan[i].con_priv = &mhu->mlink[i]; @@ -162,7 +153,7 @@ static int mhu_probe(struct amba_device *adev, const struct amba_id *id) return 0; } -static struct amba_id mhu_ids[] = { +static const struct amba_id mhu_ids[] = { { .id = 0x1bb098, .mask = 0xffffff, diff --git a/drivers/mailbox/arm_mhu_db.c b/drivers/mailbox/arm_mhu_db.c new file mode 100644 index 000000000000..9e937b09c5fb --- /dev/null +++ b/drivers/mailbox/arm_mhu_db.c @@ -0,0 +1,351 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2013-2015 Fujitsu Semiconductor Ltd. + * Copyright (C) 2015 Linaro Ltd. + * Based on ARM MHU driver by Jassi Brar <jaswinder.singh@linaro.org> + * Copyright (C) 2020 ARM Ltd. + */ + +#include <linux/amba/bus.h> +#include <linux/device.h> +#include <linux/err.h> +#include <linux/interrupt.h> +#include <linux/io.h> +#include <linux/kernel.h> +#include <linux/mailbox_controller.h> +#include <linux/module.h> +#include <linux/of.h> + +#define INTR_STAT_OFS 0x0 +#define INTR_SET_OFS 0x8 +#define INTR_CLR_OFS 0x10 + +#define MHU_LP_OFFSET 0x0 +#define MHU_HP_OFFSET 0x20 +#define MHU_SEC_OFFSET 0x200 +#define TX_REG_OFFSET 0x100 + +#define MHU_CHANS 3 /* Secure, Non-Secure High and Low Priority */ +#define MHU_CHAN_MAX 20 /* Max channels to save on unused RAM */ +#define MHU_NUM_DOORBELLS 32 + +struct mhu_db_link { + unsigned int irq; + void __iomem *tx_reg; + void __iomem *rx_reg; +}; + +struct arm_mhu { + void __iomem *base; + struct mhu_db_link mlink[MHU_CHANS]; + struct mbox_controller mbox; + struct device *dev; +}; + +/** + * struct mhu_db_channel - ARM MHU Mailbox allocated channel information + * + * @mhu: Pointer to parent mailbox device + * @pchan: Physical channel within which this doorbell resides in + * @doorbell: doorbell number pertaining to this channel + */ +struct mhu_db_channel { + struct arm_mhu *mhu; + unsigned int pchan; + unsigned int doorbell; +}; + +static inline struct mbox_chan * +mhu_db_mbox_to_channel(struct mbox_controller *mbox, unsigned int pchan, + unsigned int doorbell) +{ + int i; + struct mhu_db_channel *chan_info; + + for (i = 0; i < mbox->num_chans; i++) { + chan_info = mbox->chans[i].con_priv; + if (chan_info && chan_info->pchan == pchan && + chan_info->doorbell == doorbell) + return &mbox->chans[i]; + } + + return NULL; +} + +static void mhu_db_mbox_clear_irq(struct mbox_chan *chan) +{ + struct mhu_db_channel *chan_info = chan->con_priv; + void __iomem *base = chan_info->mhu->mlink[chan_info->pchan].rx_reg; + + writel_relaxed(BIT(chan_info->doorbell), base + INTR_CLR_OFS); +} + +static unsigned int mhu_db_mbox_irq_to_pchan_num(struct arm_mhu *mhu, int irq) +{ + unsigned int pchan; + + for (pchan = 0; pchan < MHU_CHANS; pchan++) + if (mhu->mlink[pchan].irq == irq) + break; + return pchan; +} + +static struct mbox_chan * +mhu_db_mbox_irq_to_channel(struct arm_mhu *mhu, unsigned int pchan) +{ + unsigned long bits; + unsigned int doorbell; + struct mbox_chan *chan = NULL; + struct mbox_controller *mbox = &mhu->mbox; + void __iomem *base = mhu->mlink[pchan].rx_reg; + + bits = readl_relaxed(base + INTR_STAT_OFS); + if (!bits) + /* No IRQs fired in specified physical channel */ + return NULL; + + /* An IRQ has fired, find the associated channel */ + for (doorbell = 0; bits; doorbell++) { + if (!test_and_clear_bit(doorbell, &bits)) + continue; + + chan = mhu_db_mbox_to_channel(mbox, pchan, doorbell); + if (chan) + break; + dev_err(mbox->dev, + "Channel not registered: pchan: %d doorbell: %d\n", + pchan, doorbell); + } + + return chan; +} + +static irqreturn_t mhu_db_mbox_rx_handler(int irq, void *data) +{ + struct mbox_chan *chan; + struct arm_mhu *mhu = data; + unsigned int pchan = mhu_db_mbox_irq_to_pchan_num(mhu, irq); + + while (NULL != (chan = mhu_db_mbox_irq_to_channel(mhu, pchan))) { + mbox_chan_received_data(chan, NULL); + mhu_db_mbox_clear_irq(chan); + } + + return IRQ_HANDLED; +} + +static bool mhu_db_last_tx_done(struct mbox_chan *chan) +{ + struct mhu_db_channel *chan_info = chan->con_priv; + void __iomem *base = chan_info->mhu->mlink[chan_info->pchan].tx_reg; + + if (readl_relaxed(base + INTR_STAT_OFS) & BIT(chan_info->doorbell)) + return false; + + return true; +} + +static int mhu_db_send_data(struct mbox_chan *chan, void *data) +{ + struct mhu_db_channel *chan_info = chan->con_priv; + void __iomem *base = chan_info->mhu->mlink[chan_info->pchan].tx_reg; + + /* Send event to co-processor */ + writel_relaxed(BIT(chan_info->doorbell), base + INTR_SET_OFS); + + return 0; +} + +static int mhu_db_startup(struct mbox_chan *chan) +{ + mhu_db_mbox_clear_irq(chan); + return 0; +} + +static void mhu_db_shutdown(struct mbox_chan *chan) +{ + struct mhu_db_channel *chan_info = chan->con_priv; + struct mbox_controller *mbox = &chan_info->mhu->mbox; + int i; + + for (i = 0; i < mbox->num_chans; i++) + if (chan == &mbox->chans[i]) + break; + + if (mbox->num_chans == i) { + dev_warn(mbox->dev, "Request to free non-existent channel\n"); + return; + } + + /* Reset channel */ + mhu_db_mbox_clear_irq(chan); + devm_kfree(mbox->dev, chan->con_priv); + chan->con_priv = NULL; +} + +static struct mbox_chan *mhu_db_mbox_xlate(struct mbox_controller *mbox, + const struct of_phandle_args *spec) +{ + struct arm_mhu *mhu = dev_get_drvdata(mbox->dev); + struct mhu_db_channel *chan_info; + struct mbox_chan *chan; + unsigned int pchan = spec->args[0]; + unsigned int doorbell = spec->args[1]; + int i; + + /* Bounds checking */ + if (pchan >= MHU_CHANS || doorbell >= MHU_NUM_DOORBELLS) { + dev_err(mbox->dev, + "Invalid channel requested pchan: %d doorbell: %d\n", + pchan, doorbell); + return ERR_PTR(-EINVAL); + } + + /* Is requested channel free? */ + chan = mhu_db_mbox_to_channel(mbox, pchan, doorbell); + if (chan) { + dev_err(mbox->dev, "Channel in use: pchan: %d doorbell: %d\n", + pchan, doorbell); + return ERR_PTR(-EBUSY); + } + + /* Find the first free slot */ + for (i = 0; i < mbox->num_chans; i++) + if (!mbox->chans[i].con_priv) + break; + + if (mbox->num_chans == i) { + dev_err(mbox->dev, "No free channels left\n"); + return ERR_PTR(-EBUSY); + } + + chan = &mbox->chans[i]; + + chan_info = devm_kzalloc(mbox->dev, sizeof(*chan_info), GFP_KERNEL); + if (!chan_info) + return ERR_PTR(-ENOMEM); + + chan_info->mhu = mhu; + chan_info->pchan = pchan; + chan_info->doorbell = doorbell; + + chan->con_priv = chan_info; + + dev_dbg(mbox->dev, "mbox: created channel phys: %d doorbell: %d\n", + pchan, doorbell); + + return chan; +} + +static const struct mbox_chan_ops mhu_db_ops = { + .send_data = mhu_db_send_data, + .startup = mhu_db_startup, + .shutdown = mhu_db_shutdown, + .last_tx_done = mhu_db_last_tx_done, +}; + +static int mhu_db_probe(struct amba_device *adev, const struct amba_id *id) +{ + u32 cell_count; + int i, err, max_chans; + struct arm_mhu *mhu; + struct mbox_chan *chans; + struct device *dev = &adev->dev; + struct device_node *np = dev->of_node; + int mhu_reg[MHU_CHANS] = { + MHU_LP_OFFSET, MHU_HP_OFFSET, MHU_SEC_OFFSET, + }; + + if (!of_device_is_compatible(np, "arm,mhu-doorbell")) + return -ENODEV; + + err = of_property_read_u32(np, "#mbox-cells", &cell_count); + if (err) { + dev_err(dev, "failed to read #mbox-cells in '%pOF'\n", np); + return err; + } + + if (cell_count == 2) { + max_chans = MHU_CHAN_MAX; + } else { + dev_err(dev, "incorrect value of #mbox-cells in '%pOF'\n", np); + return -EINVAL; + } + + mhu = devm_kzalloc(dev, sizeof(*mhu), GFP_KERNEL); + if (!mhu) + return -ENOMEM; + + mhu->base = devm_ioremap_resource(dev, &adev->res); + if (IS_ERR(mhu->base)) + return PTR_ERR(mhu->base); + + chans = devm_kcalloc(dev, max_chans, sizeof(*chans), GFP_KERNEL); + if (!chans) + return -ENOMEM; + + mhu->dev = dev; + mhu->mbox.dev = dev; + mhu->mbox.chans = chans; + mhu->mbox.num_chans = max_chans; + mhu->mbox.txdone_irq = false; + mhu->mbox.txdone_poll = true; + mhu->mbox.txpoll_period = 1; + + mhu->mbox.of_xlate = mhu_db_mbox_xlate; + amba_set_drvdata(adev, mhu); + + mhu->mbox.ops = &mhu_db_ops; + + err = devm_mbox_controller_register(dev, &mhu->mbox); + if (err) { + dev_err(dev, "Failed to register mailboxes %d\n", err); + return err; + } + + for (i = 0; i < MHU_CHANS; i++) { + int irq = mhu->mlink[i].irq = adev->irq[i]; + + if (irq <= 0) { + dev_dbg(dev, "No IRQ found for Channel %d\n", i); + continue; + } + + mhu->mlink[i].rx_reg = mhu->base + mhu_reg[i]; + mhu->mlink[i].tx_reg = mhu->mlink[i].rx_reg + TX_REG_OFFSET; + + err = devm_request_threaded_irq(dev, irq, NULL, + mhu_db_mbox_rx_handler, + IRQF_ONESHOT, "mhu_db_link", mhu); + if (err) { + dev_err(dev, "Can't claim IRQ %d\n", irq); + mbox_controller_unregister(&mhu->mbox); + return err; + } + } + + dev_info(dev, "ARM MHU Doorbell mailbox registered\n"); + return 0; +} + +static const struct amba_id mhu_ids[] = { + { + .id = 0x1bb098, + .mask = 0xffffff, + }, + { 0, 0 }, +}; +MODULE_DEVICE_TABLE(amba, mhu_ids); + +static struct amba_driver arm_mhu_db_driver = { + .drv = { + .name = "mhu-doorbell", + }, + .id_table = mhu_ids, + .probe = mhu_db_probe, +}; +module_amba_driver(arm_mhu_db_driver); + +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("ARM MHU Doorbell Driver"); +MODULE_AUTHOR("Sudeep Holla <sudeep.holla@arm.com>"); diff --git a/drivers/mailbox/arm_mhuv2.c b/drivers/mailbox/arm_mhuv2.c new file mode 100644 index 000000000000..f035284944c0 --- /dev/null +++ b/drivers/mailbox/arm_mhuv2.c @@ -0,0 +1,1138 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * ARM Message Handling Unit Version 2 (MHUv2) driver. + * + * Copyright (C) 2020 ARM Ltd. + * Copyright (C) 2020 Linaro Ltd. + * + * An MHUv2 mailbox controller can provide up to 124 channel windows (each 32 + * bit long) and the driver allows any combination of both the transport + * protocol modes: data-transfer and doorbell, to be used on those channel + * windows. + * + * The transport protocols should be specified in the device tree entry for the + * device. The transport protocols determine how the underlying hardware + * resources of the device are utilized when transmitting data. Refer to the + * device tree bindings of the ARM MHUv2 controller for more details. + * + * The number of registered mailbox channels is dependent on both the underlying + * hardware - mainly the number of channel windows implemented by the platform, + * as well as the selected transport protocols. + * + * The MHUv2 controller can work both as a sender and receiver, but the driver + * and the DT bindings support unidirectional transfers for better allocation of + * the channels. That is, this driver will be probed for two separate devices + * for each mailbox controller, a sender device and a receiver device. + */ + +#include <linux/amba/bus.h> +#include <linux/interrupt.h> +#include <linux/mailbox_controller.h> +#include <linux/mailbox/arm_mhuv2_message.h> +#include <linux/module.h> +#include <linux/of_address.h> +#include <linux/spinlock.h> + +/* ====== MHUv2 Registers ====== */ + +/* Maximum number of channel windows */ +#define MHUV2_CH_WN_MAX 124 +/* Number of combined interrupt status registers */ +#define MHUV2_CMB_INT_ST_REG_CNT 4 +#define MHUV2_STAT_BYTES (sizeof(u32)) +#define MHUV2_STAT_BITS (MHUV2_STAT_BYTES * __CHAR_BIT__) + +#define LSB_MASK(n) ((1 << (n * __CHAR_BIT__)) - 1) +#define MHUV2_PROTOCOL_PROP "arm,mhuv2-protocols" + +/* Register Message Handling Unit Configuration fields */ +struct mhu_cfg_t { + u32 num_ch : 7; + u32 pad : 25; +} __packed; + +/* register Interrupt Status fields */ +struct int_st_t { + u32 nr2r : 1; + u32 r2nr : 1; + u32 pad : 30; +} __packed; + +/* Register Interrupt Clear fields */ +struct int_clr_t { + u32 nr2r : 1; + u32 r2nr : 1; + u32 pad : 30; +} __packed; + +/* Register Interrupt Enable fields */ +struct int_en_t { + u32 r2nr : 1; + u32 nr2r : 1; + u32 chcomb : 1; + u32 pad : 29; +} __packed; + +/* Register Implementer Identification fields */ +struct iidr_t { + u32 implementer : 12; + u32 revision : 4; + u32 variant : 4; + u32 product_id : 12; +} __packed; + +/* Register Architecture Identification Register fields */ +struct aidr_t { + u32 arch_minor_rev : 4; + u32 arch_major_rev : 4; + u32 pad : 24; +} __packed; + +/* Sender Channel Window fields */ +struct mhu2_send_ch_wn_reg { + u32 stat; + u8 pad1[0x0C - 0x04]; + u32 stat_set; + u32 int_st; + u32 int_clr; + u32 int_en; + u8 pad2[0x20 - 0x1C]; +} __packed; + +/* Sender frame register fields */ +struct mhu2_send_frame_reg { + struct mhu2_send_ch_wn_reg ch_wn[MHUV2_CH_WN_MAX]; + struct mhu_cfg_t mhu_cfg; + u32 resp_cfg; + u32 access_request; + u32 access_ready; + struct int_st_t int_st; + struct int_clr_t int_clr; + struct int_en_t int_en; + u32 reserved0; + u32 chcomb_int_st[MHUV2_CMB_INT_ST_REG_CNT]; + u8 pad[0xFC8 - 0xFB0]; + struct iidr_t iidr; + struct aidr_t aidr; +} __packed; + +/* Receiver Channel Window fields */ +struct mhu2_recv_ch_wn_reg { + u32 stat; + u32 stat_masked; + u32 stat_clear; + u8 reserved0[0x10 - 0x0C]; + u32 mask; + u32 mask_set; + u32 mask_clear; + u8 pad[0x20 - 0x1C]; +} __packed; + +/* Receiver frame register fields */ +struct mhu2_recv_frame_reg { + struct mhu2_recv_ch_wn_reg ch_wn[MHUV2_CH_WN_MAX]; + struct mhu_cfg_t mhu_cfg; + u8 reserved0[0xF90 - 0xF84]; + struct int_st_t int_st; + struct int_clr_t int_clr; + struct int_en_t int_en; + u32 pad; + u32 chcomb_int_st[MHUV2_CMB_INT_ST_REG_CNT]; + u8 reserved2[0xFC8 - 0xFB0]; + struct iidr_t iidr; + struct aidr_t aidr; +} __packed; + + +/* ====== MHUv2 data structures ====== */ + +enum mhuv2_transport_protocol { + DOORBELL = 0, + DATA_TRANSFER = 1 +}; + +enum mhuv2_frame { + RECEIVER_FRAME, + SENDER_FRAME +}; + +/** + * struct mhuv2 - MHUv2 mailbox controller data + * + * @mbox: Mailbox controller belonging to the MHU frame. + * @send: Base address of the register mapping region. + * @recv: Base address of the register mapping region. + * @frame: Frame type: RECEIVER_FRAME or SENDER_FRAME. + * @irq: Interrupt. + * @windows: Channel windows implemented by the platform. + * @minor: Minor version of the controller. + * @length: Length of the protocols array in bytes. + * @protocols: Raw protocol information, derived from device tree. + * @doorbell_pending_lock: spinlock required for correct operation of Tx + * interrupt for doorbells. + */ +struct mhuv2 { + struct mbox_controller mbox; + union { + struct mhu2_send_frame_reg __iomem *send; + struct mhu2_recv_frame_reg __iomem *recv; + }; + enum mhuv2_frame frame; + unsigned int irq; + unsigned int windows; + unsigned int minor; + unsigned int length; + u32 *protocols; + + spinlock_t doorbell_pending_lock; +}; + +#define mhu_from_mbox(_mbox) container_of(_mbox, struct mhuv2, mbox) + +/** + * struct mhuv2_protocol_ops - MHUv2 operations + * + * Each transport protocol must provide an implementation of the operations + * provided here. + * + * @rx_startup: Startup callback for receiver. + * @rx_shutdown: Shutdown callback for receiver. + * @read_data: Reads and clears newly available data. + * @tx_startup: Startup callback for receiver. + * @tx_shutdown: Shutdown callback for receiver. + * @last_tx_done: Report back if the last tx is completed or not. + * @send_data: Send data to the receiver. + */ +struct mhuv2_protocol_ops { + int (*rx_startup)(struct mhuv2 *mhu, struct mbox_chan *chan); + void (*rx_shutdown)(struct mhuv2 *mhu, struct mbox_chan *chan); + void *(*read_data)(struct mhuv2 *mhu, struct mbox_chan *chan); + + void (*tx_startup)(struct mhuv2 *mhu, struct mbox_chan *chan); + void (*tx_shutdown)(struct mhuv2 *mhu, struct mbox_chan *chan); + int (*last_tx_done)(struct mhuv2 *mhu, struct mbox_chan *chan); + int (*send_data)(struct mhuv2 *mhu, struct mbox_chan *chan, void *arg); +}; + +/* + * MHUv2 mailbox channel's private information + * + * @ops: protocol specific ops for the channel. + * @ch_wn_idx: Channel window index allocated to the channel. + * @windows: Total number of windows consumed by the channel, only relevant + * in DATA_TRANSFER protocol. + * @doorbell: Doorbell bit number within the ch_wn_idx window, only relevant + * in DOORBELL protocol. + * @pending: Flag indicating pending doorbell interrupt, only relevant in + * DOORBELL protocol. + */ +struct mhuv2_mbox_chan_priv { + const struct mhuv2_protocol_ops *ops; + u32 ch_wn_idx; + union { + u32 windows; + struct { + u32 doorbell; + u32 pending; + }; + }; +}; + +/* Macro for reading a bitfield within a physically mapped packed struct */ +#define readl_relaxed_bitfield(_regptr, _type, _field) \ + ({ \ + u32 _regval; \ + _regval = readl_relaxed((_regptr)); \ + (*(_type *)(&_regval))._field; \ + }) + +/* Macro for writing a bitfield within a physically mapped packed struct */ +#define writel_relaxed_bitfield(_value, _regptr, _type, _field) \ + ({ \ + u32 _regval; \ + _regval = readl_relaxed(_regptr); \ + (*(_type *)(&_regval))._field = _value; \ + writel_relaxed(_regval, _regptr); \ + }) + + +/* =================== Doorbell transport protocol operations =============== */ + +static int mhuv2_doorbell_rx_startup(struct mhuv2 *mhu, struct mbox_chan *chan) +{ + struct mhuv2_mbox_chan_priv *priv = chan->con_priv; + + writel_relaxed(BIT(priv->doorbell), + &mhu->recv->ch_wn[priv->ch_wn_idx].mask_clear); + return 0; +} + +static void mhuv2_doorbell_rx_shutdown(struct mhuv2 *mhu, + struct mbox_chan *chan) +{ + struct mhuv2_mbox_chan_priv *priv = chan->con_priv; + + writel_relaxed(BIT(priv->doorbell), + &mhu->recv->ch_wn[priv->ch_wn_idx].mask_set); +} + +static void *mhuv2_doorbell_read_data(struct mhuv2 *mhu, struct mbox_chan *chan) +{ + struct mhuv2_mbox_chan_priv *priv = chan->con_priv; + + writel_relaxed(BIT(priv->doorbell), + &mhu->recv->ch_wn[priv->ch_wn_idx].stat_clear); + return NULL; +} + +static int mhuv2_doorbell_last_tx_done(struct mhuv2 *mhu, + struct mbox_chan *chan) +{ + struct mhuv2_mbox_chan_priv *priv = chan->con_priv; + + return !(readl_relaxed(&mhu->send->ch_wn[priv->ch_wn_idx].stat) & + BIT(priv->doorbell)); +} + +static int mhuv2_doorbell_send_data(struct mhuv2 *mhu, struct mbox_chan *chan, + void *arg) +{ + struct mhuv2_mbox_chan_priv *priv = chan->con_priv; + unsigned long flags; + + spin_lock_irqsave(&mhu->doorbell_pending_lock, flags); + + priv->pending = 1; + writel_relaxed(BIT(priv->doorbell), + &mhu->send->ch_wn[priv->ch_wn_idx].stat_set); + + spin_unlock_irqrestore(&mhu->doorbell_pending_lock, flags); + + return 0; +} + +static const struct mhuv2_protocol_ops mhuv2_doorbell_ops = { + .rx_startup = mhuv2_doorbell_rx_startup, + .rx_shutdown = mhuv2_doorbell_rx_shutdown, + .read_data = mhuv2_doorbell_read_data, + .last_tx_done = mhuv2_doorbell_last_tx_done, + .send_data = mhuv2_doorbell_send_data, +}; +#define IS_PROTOCOL_DOORBELL(_priv) (_priv->ops == &mhuv2_doorbell_ops) + +/* ============= Data transfer transport protocol operations ================ */ + +static int mhuv2_data_transfer_rx_startup(struct mhuv2 *mhu, + struct mbox_chan *chan) +{ + struct mhuv2_mbox_chan_priv *priv = chan->con_priv; + int i = priv->ch_wn_idx + priv->windows - 1; + + /* + * The protocol mandates that all but the last status register must be + * masked. + */ + writel_relaxed(0xFFFFFFFF, &mhu->recv->ch_wn[i].mask_clear); + return 0; +} + +static void mhuv2_data_transfer_rx_shutdown(struct mhuv2 *mhu, + struct mbox_chan *chan) +{ + struct mhuv2_mbox_chan_priv *priv = chan->con_priv; + int i = priv->ch_wn_idx + priv->windows - 1; + + writel_relaxed(0xFFFFFFFF, &mhu->recv->ch_wn[i].mask_set); +} + +static void *mhuv2_data_transfer_read_data(struct mhuv2 *mhu, + struct mbox_chan *chan) +{ + struct mhuv2_mbox_chan_priv *priv = chan->con_priv; + const int windows = priv->windows; + struct arm_mhuv2_mbox_msg *msg; + u32 *data; + int i, idx; + + msg = kzalloc(sizeof(*msg) + windows * MHUV2_STAT_BYTES, GFP_KERNEL); + if (!msg) + return ERR_PTR(-ENOMEM); + + data = msg->data = msg + 1; + msg->len = windows * MHUV2_STAT_BYTES; + + /* + * Messages are expected in order of most significant word to least + * significant word. Refer mhuv2_data_transfer_send_data() for more + * details. + * + * We also need to read the stat register instead of stat_masked, as we + * masked all but the last window. + * + * Last channel window must be cleared as the final operation. Upon + * clearing the last channel window register, which is unmasked in + * data-transfer protocol, the interrupt is de-asserted. + */ + for (i = 0; i < windows; i++) { + idx = priv->ch_wn_idx + i; + data[windows - 1 - i] = readl_relaxed(&mhu->recv->ch_wn[idx].stat); + writel_relaxed(0xFFFFFFFF, &mhu->recv->ch_wn[idx].stat_clear); + } + + return msg; +} + +static void mhuv2_data_transfer_tx_startup(struct mhuv2 *mhu, + struct mbox_chan *chan) +{ + struct mhuv2_mbox_chan_priv *priv = chan->con_priv; + int i = priv->ch_wn_idx + priv->windows - 1; + + /* Enable interrupts only for the last window */ + if (mhu->minor) { + writel_relaxed(0x1, &mhu->send->ch_wn[i].int_clr); + writel_relaxed(0x1, &mhu->send->ch_wn[i].int_en); + } +} + +static void mhuv2_data_transfer_tx_shutdown(struct mhuv2 *mhu, + struct mbox_chan *chan) +{ + struct mhuv2_mbox_chan_priv *priv = chan->con_priv; + int i = priv->ch_wn_idx + priv->windows - 1; + + if (mhu->minor) + writel_relaxed(0x0, &mhu->send->ch_wn[i].int_en); +} + +static int mhuv2_data_transfer_last_tx_done(struct mhuv2 *mhu, + struct mbox_chan *chan) +{ + struct mhuv2_mbox_chan_priv *priv = chan->con_priv; + int i = priv->ch_wn_idx + priv->windows - 1; + + /* Just checking the last channel window should be enough */ + return !readl_relaxed(&mhu->send->ch_wn[i].stat); +} + +/* + * Message will be transmitted from most significant to least significant word. + * This is to allow for messages shorter than channel windows to still trigger + * the receiver interrupt which gets activated when the last stat register is + * written. As an example, a 6-word message is to be written on a 4-channel MHU + * connection: Registers marked with '*' are masked, and will not generate an + * interrupt on the receiver side once written. + * + * u32 *data = [0x00000001], [0x00000002], [0x00000003], [0x00000004], + * [0x00000005], [0x00000006] + * + * ROUND 1: + * stat reg To write Write sequence + * [ stat 3 ] <- [0x00000001] 4 <- triggers interrupt on receiver + * [ stat 2 ] <- [0x00000002] 3 + * [ stat 1 ] <- [0x00000003] 2 + * [ stat 0 ] <- [0x00000004] 1 + * + * data += 4 // Increment data pointer by number of stat regs + * + * ROUND 2: + * stat reg To write Write sequence + * [ stat 3 ] <- [0x00000005] 2 <- triggers interrupt on receiver + * [ stat 2 ] <- [0x00000006] 1 + * [ stat 1 ] <- [0x00000000] + * [ stat 0 ] <- [0x00000000] + */ +static int mhuv2_data_transfer_send_data(struct mhuv2 *mhu, + struct mbox_chan *chan, void *arg) +{ + const struct arm_mhuv2_mbox_msg *msg = arg; + int bytes_left = msg->len, bytes_to_send, bytes_in_round, i; + struct mhuv2_mbox_chan_priv *priv = chan->con_priv; + int windows = priv->windows; + u32 *data = msg->data, word; + + while (bytes_left) { + if (!data[0]) { + dev_err(mhu->mbox.dev, "Data aligned at first window can't be zero to guarantee interrupt generation at receiver"); + return -EINVAL; + } + + while(!mhuv2_data_transfer_last_tx_done(mhu, chan)) + continue; + + bytes_in_round = min(bytes_left, (int)(windows * MHUV2_STAT_BYTES)); + + for (i = windows - 1; i >= 0; i--) { + /* Data less than windows can transfer ? */ + if (unlikely(bytes_in_round <= i * MHUV2_STAT_BYTES)) + continue; + + word = data[i]; + bytes_to_send = bytes_in_round & (MHUV2_STAT_BYTES - 1); + if (unlikely(bytes_to_send)) + word &= LSB_MASK(bytes_to_send); + else + bytes_to_send = MHUV2_STAT_BYTES; + + writel_relaxed(word, &mhu->send->ch_wn[priv->ch_wn_idx + windows - 1 - i].stat_set); + bytes_left -= bytes_to_send; + bytes_in_round -= bytes_to_send; + } + + data += windows; + } + + return 0; +} + +static const struct mhuv2_protocol_ops mhuv2_data_transfer_ops = { + .rx_startup = mhuv2_data_transfer_rx_startup, + .rx_shutdown = mhuv2_data_transfer_rx_shutdown, + .read_data = mhuv2_data_transfer_read_data, + .tx_startup = mhuv2_data_transfer_tx_startup, + .tx_shutdown = mhuv2_data_transfer_tx_shutdown, + .last_tx_done = mhuv2_data_transfer_last_tx_done, + .send_data = mhuv2_data_transfer_send_data, +}; + +/* Interrupt handlers */ + +static struct mbox_chan *get_irq_chan_comb(struct mhuv2 *mhu, u32 __iomem *reg) +{ + struct mbox_chan *chans = mhu->mbox.chans; + int channel = 0, i, j, offset = 0, windows, protocol, ch_wn; + u32 stat; + + for (i = 0; i < MHUV2_CMB_INT_ST_REG_CNT; i++) { + stat = readl_relaxed(reg + i); + if (!stat) + continue; + + ch_wn = i * MHUV2_STAT_BITS + __builtin_ctz(stat); + + for (j = 0; j < mhu->length; j += 2) { + protocol = mhu->protocols[j]; + windows = mhu->protocols[j + 1]; + + if (ch_wn >= offset + windows) { + if (protocol == DOORBELL) + channel += MHUV2_STAT_BITS * windows; + else + channel++; + + offset += windows; + continue; + } + + /* Return first chan of the window in doorbell mode */ + if (protocol == DOORBELL) + channel += MHUV2_STAT_BITS * (ch_wn - offset); + + return &chans[channel]; + } + } + + return ERR_PTR(-EIO); +} + +static irqreturn_t mhuv2_sender_interrupt(int irq, void *data) +{ + struct mhuv2 *mhu = data; + struct device *dev = mhu->mbox.dev; + struct mhuv2_mbox_chan_priv *priv; + struct mbox_chan *chan; + unsigned long flags; + int i, found = 0; + u32 stat; + + chan = get_irq_chan_comb(mhu, mhu->send->chcomb_int_st); + if (IS_ERR(chan)) { + dev_warn(dev, "Failed to find channel for the Tx interrupt\n"); + return IRQ_NONE; + } + priv = chan->con_priv; + + if (!IS_PROTOCOL_DOORBELL(priv)) { + for (i = 0; i < priv->windows; i++) + writel_relaxed(1, &mhu->send->ch_wn[priv->ch_wn_idx + i].int_clr); + + if (chan->cl) { + mbox_chan_txdone(chan, 0); + return IRQ_HANDLED; + } + + dev_warn(dev, "Tx interrupt Received on channel (%u) not currently attached to a mailbox client\n", + priv->ch_wn_idx); + return IRQ_NONE; + } + + /* Clear the interrupt first, so we don't miss any doorbell later */ + writel_relaxed(1, &mhu->send->ch_wn[priv->ch_wn_idx].int_clr); + + /* + * In Doorbell mode, make sure no new transitions happen while the + * interrupt handler is trying to find the finished doorbell tx + * operations, else we may think few of the transfers were complete + * before they actually were. + */ + spin_lock_irqsave(&mhu->doorbell_pending_lock, flags); + + /* + * In case of doorbell mode, the first channel of the window is returned + * by get_irq_chan_comb(). Find all the pending channels here. + */ + stat = readl_relaxed(&mhu->send->ch_wn[priv->ch_wn_idx].stat); + + for (i = 0; i < MHUV2_STAT_BITS; i++) { + priv = chan[i].con_priv; + + /* Find cases where pending was 1, but stat's bit is cleared */ + if (priv->pending ^ ((stat >> i) & 0x1)) { + BUG_ON(!priv->pending); + + if (!chan->cl) { + dev_warn(dev, "Tx interrupt received on doorbell (%u : %u) channel not currently attached to a mailbox client\n", + priv->ch_wn_idx, i); + continue; + } + + mbox_chan_txdone(&chan[i], 0); + priv->pending = 0; + found++; + } + } + + spin_unlock_irqrestore(&mhu->doorbell_pending_lock, flags); + + if (!found) { + /* + * We may have already processed the doorbell in the previous + * iteration if the interrupt came right after we cleared it but + * before we read the stat register. + */ + dev_dbg(dev, "Couldn't find the doorbell (%u) for the Tx interrupt interrupt\n", + priv->ch_wn_idx); + return IRQ_NONE; + } + + return IRQ_HANDLED; +} + +static struct mbox_chan *get_irq_chan_comb_rx(struct mhuv2 *mhu) +{ + struct mhuv2_mbox_chan_priv *priv; + struct mbox_chan *chan; + u32 stat; + + chan = get_irq_chan_comb(mhu, mhu->recv->chcomb_int_st); + if (IS_ERR(chan)) + return chan; + + priv = chan->con_priv; + if (!IS_PROTOCOL_DOORBELL(priv)) + return chan; + + /* + * In case of doorbell mode, the first channel of the window is returned + * by the routine. Find the exact channel here. + */ + stat = readl_relaxed(&mhu->recv->ch_wn[priv->ch_wn_idx].stat_masked); + BUG_ON(!stat); + + return chan + __builtin_ctz(stat); +} + +static struct mbox_chan *get_irq_chan_stat_rx(struct mhuv2 *mhu) +{ + struct mbox_chan *chans = mhu->mbox.chans; + struct mhuv2_mbox_chan_priv *priv; + u32 stat; + int i = 0; + + while (i < mhu->mbox.num_chans) { + priv = chans[i].con_priv; + stat = readl_relaxed(&mhu->recv->ch_wn[priv->ch_wn_idx].stat_masked); + + if (stat) { + if (IS_PROTOCOL_DOORBELL(priv)) + i += __builtin_ctz(stat); + return &chans[i]; + } + + i += IS_PROTOCOL_DOORBELL(priv) ? MHUV2_STAT_BITS : 1; + } + + return ERR_PTR(-EIO); +} + +static struct mbox_chan *get_irq_chan_rx(struct mhuv2 *mhu) +{ + if (!mhu->minor) + return get_irq_chan_stat_rx(mhu); + + return get_irq_chan_comb_rx(mhu); +} + +static irqreturn_t mhuv2_receiver_interrupt(int irq, void *arg) +{ + struct mhuv2 *mhu = arg; + struct mbox_chan *chan = get_irq_chan_rx(mhu); + struct device *dev = mhu->mbox.dev; + struct mhuv2_mbox_chan_priv *priv; + int ret = IRQ_NONE; + void *data; + + if (IS_ERR(chan)) { + dev_warn(dev, "Failed to find channel for the rx interrupt\n"); + return IRQ_NONE; + } + priv = chan->con_priv; + + /* Read and clear the data first */ + data = priv->ops->read_data(mhu, chan); + + if (!chan->cl) { + dev_warn(dev, "Received data on channel (%u) not currently attached to a mailbox client\n", + priv->ch_wn_idx); + } else if (IS_ERR(data)) { + dev_err(dev, "Failed to read data: %lu\n", PTR_ERR(data)); + } else { + mbox_chan_received_data(chan, data); + ret = IRQ_HANDLED; + } + + if (!IS_ERR(data)) + kfree(data); + + return ret; +} + +/* Sender and receiver ops */ +static bool mhuv2_sender_last_tx_done(struct mbox_chan *chan) +{ + struct mhuv2 *mhu = mhu_from_mbox(chan->mbox); + struct mhuv2_mbox_chan_priv *priv = chan->con_priv; + + return priv->ops->last_tx_done(mhu, chan); +} + +static int mhuv2_sender_send_data(struct mbox_chan *chan, void *data) +{ + struct mhuv2 *mhu = mhu_from_mbox(chan->mbox); + struct mhuv2_mbox_chan_priv *priv = chan->con_priv; + + if (!priv->ops->last_tx_done(mhu, chan)) + return -EBUSY; + + return priv->ops->send_data(mhu, chan, data); +} + +static int mhuv2_sender_startup(struct mbox_chan *chan) +{ + struct mhuv2 *mhu = mhu_from_mbox(chan->mbox); + struct mhuv2_mbox_chan_priv *priv = chan->con_priv; + + if (priv->ops->tx_startup) + priv->ops->tx_startup(mhu, chan); + return 0; +} + +static void mhuv2_sender_shutdown(struct mbox_chan *chan) +{ + struct mhuv2 *mhu = mhu_from_mbox(chan->mbox); + struct mhuv2_mbox_chan_priv *priv = chan->con_priv; + + if (priv->ops->tx_shutdown) + priv->ops->tx_shutdown(mhu, chan); +} + +static const struct mbox_chan_ops mhuv2_sender_ops = { + .send_data = mhuv2_sender_send_data, + .startup = mhuv2_sender_startup, + .shutdown = mhuv2_sender_shutdown, + .last_tx_done = mhuv2_sender_last_tx_done, +}; + +static int mhuv2_receiver_startup(struct mbox_chan *chan) +{ + struct mhuv2 *mhu = mhu_from_mbox(chan->mbox); + struct mhuv2_mbox_chan_priv *priv = chan->con_priv; + + return priv->ops->rx_startup(mhu, chan); +} + +static void mhuv2_receiver_shutdown(struct mbox_chan *chan) +{ + struct mhuv2 *mhu = mhu_from_mbox(chan->mbox); + struct mhuv2_mbox_chan_priv *priv = chan->con_priv; + + priv->ops->rx_shutdown(mhu, chan); +} + +static int mhuv2_receiver_send_data(struct mbox_chan *chan, void *data) +{ + dev_err(chan->mbox->dev, + "Trying to transmit on a receiver MHU frame\n"); + return -EIO; +} + +static bool mhuv2_receiver_last_tx_done(struct mbox_chan *chan) +{ + dev_err(chan->mbox->dev, "Trying to Tx poll on a receiver MHU frame\n"); + return true; +} + +static const struct mbox_chan_ops mhuv2_receiver_ops = { + .send_data = mhuv2_receiver_send_data, + .startup = mhuv2_receiver_startup, + .shutdown = mhuv2_receiver_shutdown, + .last_tx_done = mhuv2_receiver_last_tx_done, +}; + +static struct mbox_chan *mhuv2_mbox_of_xlate(struct mbox_controller *mbox, + const struct of_phandle_args *pa) +{ + struct mhuv2 *mhu = mhu_from_mbox(mbox); + struct mbox_chan *chans = mbox->chans; + int channel = 0, i, offset, doorbell, protocol, windows; + + if (pa->args_count != 2) + return ERR_PTR(-EINVAL); + + offset = pa->args[0]; + doorbell = pa->args[1]; + if (doorbell >= MHUV2_STAT_BITS) + goto out; + + for (i = 0; i < mhu->length; i += 2) { + protocol = mhu->protocols[i]; + windows = mhu->protocols[i + 1]; + + if (protocol == DOORBELL) { + if (offset < windows) + return &chans[channel + MHUV2_STAT_BITS * offset + doorbell]; + + channel += MHUV2_STAT_BITS * windows; + offset -= windows; + } else { + if (offset == 0) { + if (doorbell) + goto out; + + return &chans[channel]; + } + + channel++; + offset--; + } + } + +out: + dev_err(mbox->dev, "Couldn't xlate to a valid channel (%d: %d)\n", + pa->args[0], doorbell); + return ERR_PTR(-ENODEV); +} + +static int mhuv2_verify_protocol(struct mhuv2 *mhu) +{ + struct device *dev = mhu->mbox.dev; + int protocol, windows, channels = 0, total_windows = 0, i; + + for (i = 0; i < mhu->length; i += 2) { + protocol = mhu->protocols[i]; + windows = mhu->protocols[i + 1]; + + if (!windows) { + dev_err(dev, "Window size can't be zero (%d)\n", i); + return -EINVAL; + } + total_windows += windows; + + if (protocol == DOORBELL) { + channels += MHUV2_STAT_BITS * windows; + } else if (protocol == DATA_TRANSFER) { + channels++; + } else { + dev_err(dev, "Invalid protocol (%d) present in %s property at index %d\n", + protocol, MHUV2_PROTOCOL_PROP, i); + return -EINVAL; + } + } + + if (total_windows > mhu->windows) { + dev_err(dev, "Channel windows can't be more than what's implemented by the hardware ( %d: %d)\n", + total_windows, mhu->windows); + return -EINVAL; + } + + mhu->mbox.num_chans = channels; + return 0; +} + +static int mhuv2_allocate_channels(struct mhuv2 *mhu) +{ + struct mbox_controller *mbox = &mhu->mbox; + struct mhuv2_mbox_chan_priv *priv; + struct device *dev = mbox->dev; + struct mbox_chan *chans; + int protocol, windows = 0, next_window = 0, i, j, k; + + chans = devm_kcalloc(dev, mbox->num_chans, sizeof(*chans), GFP_KERNEL); + if (!chans) + return -ENOMEM; + + mbox->chans = chans; + + for (i = 0; i < mhu->length; i += 2) { + next_window += windows; + + protocol = mhu->protocols[i]; + windows = mhu->protocols[i + 1]; + + if (protocol == DATA_TRANSFER) { + priv = devm_kmalloc(dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + priv->ch_wn_idx = next_window; + priv->ops = &mhuv2_data_transfer_ops; + priv->windows = windows; + chans++->con_priv = priv; + continue; + } + + for (j = 0; j < windows; j++) { + for (k = 0; k < MHUV2_STAT_BITS; k++) { + priv = devm_kmalloc(dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + priv->ch_wn_idx = next_window + j; + priv->ops = &mhuv2_doorbell_ops; + priv->doorbell = k; + chans++->con_priv = priv; + } + + /* + * Permanently enable interrupt as we can't + * control it per doorbell. + */ + if (mhu->frame == SENDER_FRAME && mhu->minor) + writel_relaxed(0x1, &mhu->send->ch_wn[priv->ch_wn_idx].int_en); + } + } + + /* Make sure we have initialized all channels */ + BUG_ON(chans - mbox->chans != mbox->num_chans); + + return 0; +} + +static int mhuv2_parse_channels(struct mhuv2 *mhu) +{ + struct device *dev = mhu->mbox.dev; + const struct device_node *np = dev->of_node; + int ret, count; + u32 *protocols; + + count = of_property_count_u32_elems(np, MHUV2_PROTOCOL_PROP); + if (count <= 0 || count % 2) { + dev_err(dev, "Invalid %s property (%d)\n", MHUV2_PROTOCOL_PROP, + count); + return -EINVAL; + } + + protocols = devm_kmalloc_array(dev, count, sizeof(*protocols), GFP_KERNEL); + if (!protocols) + return -ENOMEM; + + ret = of_property_read_u32_array(np, MHUV2_PROTOCOL_PROP, protocols, count); + if (ret) { + dev_err(dev, "Failed to read %s property: %d\n", + MHUV2_PROTOCOL_PROP, ret); + return ret; + } + + mhu->protocols = protocols; + mhu->length = count; + + ret = mhuv2_verify_protocol(mhu); + if (ret) + return ret; + + return mhuv2_allocate_channels(mhu); +} + +static int mhuv2_tx_init(struct amba_device *adev, struct mhuv2 *mhu, + void __iomem *reg) +{ + struct device *dev = mhu->mbox.dev; + int ret, i; + + mhu->frame = SENDER_FRAME; + mhu->mbox.ops = &mhuv2_sender_ops; + mhu->send = reg; + + mhu->windows = readl_relaxed_bitfield(&mhu->send->mhu_cfg, struct mhu_cfg_t, num_ch); + mhu->minor = readl_relaxed_bitfield(&mhu->send->aidr, struct aidr_t, arch_minor_rev); + + spin_lock_init(&mhu->doorbell_pending_lock); + + /* + * For minor version 1 and forward, tx interrupt is provided by + * the controller. + */ + if (mhu->minor && adev->irq[0]) { + ret = devm_request_threaded_irq(dev, adev->irq[0], NULL, + mhuv2_sender_interrupt, + IRQF_ONESHOT, "mhuv2-tx", mhu); + if (ret) { + dev_err(dev, "Failed to request tx IRQ, fallback to polling mode: %d\n", + ret); + } else { + mhu->mbox.txdone_irq = true; + mhu->mbox.txdone_poll = false; + mhu->irq = adev->irq[0]; + + writel_relaxed_bitfield(1, &mhu->send->int_en, struct int_en_t, chcomb); + + /* Disable all channel interrupts */ + for (i = 0; i < mhu->windows; i++) + writel_relaxed(0x0, &mhu->send->ch_wn[i].int_en); + + goto out; + } + } + + mhu->mbox.txdone_irq = false; + mhu->mbox.txdone_poll = true; + mhu->mbox.txpoll_period = 1; + +out: + /* Wait for receiver to be ready */ + writel_relaxed(0x1, &mhu->send->access_request); + while (!readl_relaxed(&mhu->send->access_ready)) + continue; + + return 0; +} + +static int mhuv2_rx_init(struct amba_device *adev, struct mhuv2 *mhu, + void __iomem *reg) +{ + struct device *dev = mhu->mbox.dev; + int ret, i; + + mhu->frame = RECEIVER_FRAME; + mhu->mbox.ops = &mhuv2_receiver_ops; + mhu->recv = reg; + + mhu->windows = readl_relaxed_bitfield(&mhu->recv->mhu_cfg, struct mhu_cfg_t, num_ch); + mhu->minor = readl_relaxed_bitfield(&mhu->recv->aidr, struct aidr_t, arch_minor_rev); + + mhu->irq = adev->irq[0]; + if (!mhu->irq) { + dev_err(dev, "Missing receiver IRQ\n"); + return -EINVAL; + } + + ret = devm_request_threaded_irq(dev, mhu->irq, NULL, + mhuv2_receiver_interrupt, IRQF_ONESHOT, + "mhuv2-rx", mhu); + if (ret) { + dev_err(dev, "Failed to request rx IRQ\n"); + return ret; + } + + /* Mask all the channel windows */ + for (i = 0; i < mhu->windows; i++) + writel_relaxed(0xFFFFFFFF, &mhu->recv->ch_wn[i].mask_set); + + if (mhu->minor) + writel_relaxed_bitfield(1, &mhu->recv->int_en, struct int_en_t, chcomb); + + return 0; +} + +static int mhuv2_probe(struct amba_device *adev, const struct amba_id *id) +{ + struct device *dev = &adev->dev; + const struct device_node *np = dev->of_node; + struct mhuv2 *mhu; + void __iomem *reg; + int ret = -EINVAL; + + reg = devm_of_iomap(dev, dev->of_node, 0, NULL); + if (IS_ERR(reg)) + return PTR_ERR(reg); + + mhu = devm_kzalloc(dev, sizeof(*mhu), GFP_KERNEL); + if (!mhu) + return -ENOMEM; + + mhu->mbox.dev = dev; + mhu->mbox.of_xlate = mhuv2_mbox_of_xlate; + + if (of_device_is_compatible(np, "arm,mhuv2-tx")) + ret = mhuv2_tx_init(adev, mhu, reg); + else if (of_device_is_compatible(np, "arm,mhuv2-rx")) + ret = mhuv2_rx_init(adev, mhu, reg); + else + dev_err(dev, "Invalid compatible property\n"); + + if (ret) + return ret; + + /* Channel windows can't be 0 */ + BUG_ON(!mhu->windows); + + ret = mhuv2_parse_channels(mhu); + if (ret) + return ret; + + amba_set_drvdata(adev, mhu); + + ret = devm_mbox_controller_register(dev, &mhu->mbox); + if (ret) + dev_err(dev, "failed to register ARM MHUv2 driver %d\n", ret); + + return ret; +} + +static void mhuv2_remove(struct amba_device *adev) +{ + struct mhuv2 *mhu = amba_get_drvdata(adev); + + if (mhu->frame == SENDER_FRAME) + writel_relaxed(0x0, &mhu->send->access_request); +} + +static const struct amba_id mhuv2_ids[] = { + { + /* 2.0 */ + .id = 0xbb0d1, + .mask = 0xfffff, + }, + { + /* 2.1 */ + .id = 0xbb076, + .mask = 0xfffff, + }, + { 0, 0 }, +}; +MODULE_DEVICE_TABLE(amba, mhuv2_ids); + +static struct amba_driver mhuv2_driver = { + .drv = { + .name = "arm-mhuv2", + }, + .id_table = mhuv2_ids, + .probe = mhuv2_probe, + .remove = mhuv2_remove, +}; +module_amba_driver(mhuv2_driver); + +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("ARM MHUv2 Driver"); +MODULE_AUTHOR("Viresh Kumar <viresh.kumar@linaro.org>"); +MODULE_AUTHOR("Tushar Khandelwal <tushar.khandelwal@arm.com>"); diff --git a/drivers/mailbox/arm_mhuv3.c b/drivers/mailbox/arm_mhuv3.c new file mode 100644 index 000000000000..0910da67f8a1 --- /dev/null +++ b/drivers/mailbox/arm_mhuv3.c @@ -0,0 +1,1103 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * ARM Message Handling Unit Version 3 (MHUv3) driver. + * + * Copyright (C) 2024 ARM Ltd. + * + * Based on ARM MHUv2 driver. + */ + +#include <linux/bitfield.h> +#include <linux/bitops.h> +#include <linux/bits.h> +#include <linux/cleanup.h> +#include <linux/device.h> +#include <linux/interrupt.h> +#include <linux/mailbox_controller.h> +#include <linux/module.h> +#include <linux/of_address.h> +#include <linux/platform_device.h> +#include <linux/spinlock.h> +#include <linux/sizes.h> +#include <linux/slab.h> +#include <linux/types.h> + +/* ====== MHUv3 Registers ====== */ + +/* Maximum number of Doorbell channel windows */ +#define MHUV3_DBCW_MAX 128 +/* Number of DBCH combined interrupt status registers */ +#define MHUV3_DBCH_CMB_INT_ST_REG_CNT 4 + +/* Number of FFCH combined interrupt status registers */ +#define MHUV3_FFCH_CMB_INT_ST_REG_CNT 2 + +#define MHUV3_FLAG_BITS 32 + +/* Not a typo ... */ +#define MHUV3_MAJOR_VERSION 2 + +enum { + MHUV3_MBOX_CELL_TYPE, + MHUV3_MBOX_CELL_CHWN, + MHUV3_MBOX_CELL_PARAM, + MHUV3_MBOX_CELLS +}; + +/* Padding bitfields/fields represents hole in the regs MMIO */ + +/* CTRL_Page */ +struct blk_id { +#define id GENMASK(3, 0) + u32 val; +} __packed; + +struct feat_spt0 { +#define dbe_spt GENMASK(3, 0) +#define fe_spt GENMASK(7, 4) +#define fce_spt GENMASK(11, 8) + u32 val; +} __packed; + +struct feat_spt1 { +#define auto_op_spt GENMASK(3, 0) + u32 val; +} __packed; + +struct dbch_cfg0 { +#define num_dbch GENMASK(7, 0) + u32 val; +} __packed; + +struct ffch_cfg0 { +#define num_ffch GENMASK(7, 0) +#define x8ba_spt BIT(8) +#define x16ba_spt BIT(9) +#define x32ba_spt BIT(10) +#define x64ba_spt BIT(11) +#define ffch_depth GENMASK(25, 16) + u32 val; +} __packed; + +struct fch_cfg0 { +#define num_fch GENMASK(9, 0) +#define fcgi_spt BIT(10) // MBX-only +#define num_fcg GENMASK(15, 11) +#define num_fch_per_grp GENMASK(20, 16) +#define fch_ws GENMASK(28, 21) + u32 val; +} __packed; + +struct ctrl { +#define op_req BIT(0) +#define ch_op_mask BIT(1) + u32 val; +} __packed; + +struct fch_ctrl { +#define _int_en BIT(2) + u32 val; +} __packed; + +struct iidr { +#define implementer GENMASK(11, 0) +#define revision GENMASK(15, 12) +#define variant GENMASK(19, 16) +#define product_id GENMASK(31, 20) + u32 val; +} __packed; + +struct aidr { +#define arch_minor_rev GENMASK(3, 0) +#define arch_major_rev GENMASK(7, 4) + u32 val; +} __packed; + +struct ctrl_page { + struct blk_id blk_id; + u8 pad[12]; + struct feat_spt0 feat_spt0; + struct feat_spt1 feat_spt1; + u8 pad1[8]; + struct dbch_cfg0 dbch_cfg0; + u8 pad2[12]; + struct ffch_cfg0 ffch_cfg0; + u8 pad3[12]; + struct fch_cfg0 fch_cfg0; + u8 pad4[188]; + struct ctrl x_ctrl; + /*-- MBX-only registers --*/ + u8 pad5[60]; + struct fch_ctrl fch_ctrl; + u32 fcg_int_en; + u8 pad6[696]; + /*-- End of MBX-only ---- */ + u32 dbch_int_st[MHUV3_DBCH_CMB_INT_ST_REG_CNT]; + u32 ffch_int_st[MHUV3_FFCH_CMB_INT_ST_REG_CNT]; + /*-- MBX-only registers --*/ + u8 pad7[88]; + u32 fcg_int_st; + u8 pad8[12]; + u32 fcg_grp_int_st[32]; + u8 pad9[2760]; + /*-- End of MBX-only ---- */ + struct iidr iidr; + struct aidr aidr; + u32 imp_def_id[12]; +} __packed; + +/* DBCW_Page */ + +struct xbcw_ctrl { +#define comb_en BIT(0) + u32 val; +} __packed; + +struct pdbcw_int { +#define tfr_ack BIT(0) + u32 val; +} __packed; + +struct pdbcw_page { + u32 st; + u8 pad[8]; + u32 set; + struct pdbcw_int int_st; + struct pdbcw_int int_clr; + struct pdbcw_int int_en; + struct xbcw_ctrl ctrl; +} __packed; + +struct mdbcw_page { + u32 st; + u32 st_msk; + u32 clr; + u8 pad[4]; + u32 msk_st; + u32 msk_set; + u32 msk_clr; + struct xbcw_ctrl ctrl; +} __packed; + +struct dummy_page { + u8 pad[SZ_4K]; +} __packed; + +struct mhu3_pbx_frame_reg { + struct ctrl_page ctrl; + struct pdbcw_page dbcw[MHUV3_DBCW_MAX]; + struct dummy_page ffcw; + struct dummy_page fcw; + u8 pad[SZ_4K * 11]; + struct dummy_page impdef; +} __packed; + +struct mhu3_mbx_frame_reg { + struct ctrl_page ctrl; + struct mdbcw_page dbcw[MHUV3_DBCW_MAX]; + struct dummy_page ffcw; + struct dummy_page fcw; + u8 pad[SZ_4K * 11]; + struct dummy_page impdef; +} __packed; + +/* Macro for reading a bitmask within a physically mapped packed struct */ +#define readl_relaxed_bitmask(_regptr, _bitmask) \ + ({ \ + unsigned long _rval; \ + _rval = readl_relaxed(_regptr); \ + FIELD_GET(_bitmask, _rval); \ + }) + +/* Macro for writing a bitmask within a physically mapped packed struct */ +#define writel_relaxed_bitmask(_value, _regptr, _bitmask) \ + ({ \ + unsigned long _rval; \ + typeof(_regptr) _rptr = _regptr; \ + typeof(_bitmask) _bmask = _bitmask; \ + _rval = readl_relaxed(_rptr); \ + _rval &= ~(_bmask); \ + _rval |= FIELD_PREP((unsigned long long)_bmask, _value);\ + writel_relaxed(_rval, _rptr); \ + }) + +/* ====== MHUv3 data structures ====== */ + +enum mhuv3_frame { + PBX_FRAME, + MBX_FRAME, +}; + +static char *mhuv3_str[] = { + "PBX", + "MBX" +}; + +enum mhuv3_extension_type { + DBE_EXT, + FCE_EXT, + FE_EXT, + NUM_EXT +}; + +static char *mhuv3_ext_str[] = { + "DBE", + "FCE", + "FE" +}; + +struct mhuv3; + +/** + * struct mhuv3_protocol_ops - MHUv3 operations + * + * @rx_startup: Receiver startup callback. + * @rx_shutdown: Receiver shutdown callback. + * @read_data: Read available Sender in-band LE data (if any). + * @rx_complete: Acknowledge data reception to the Sender. Any out-of-band data + * has to have been already retrieved before calling this. + * @tx_startup: Sender startup callback. + * @tx_shutdown: Sender shutdown callback. + * @last_tx_done: Report back to the Sender if the last transfer has completed. + * @send_data: Send data to the receiver. + * + * Each supported transport protocol provides its own implementation of + * these operations. + */ +struct mhuv3_protocol_ops { + int (*rx_startup)(struct mhuv3 *mhu, struct mbox_chan *chan); + void (*rx_shutdown)(struct mhuv3 *mhu, struct mbox_chan *chan); + void *(*read_data)(struct mhuv3 *mhu, struct mbox_chan *chan); + void (*rx_complete)(struct mhuv3 *mhu, struct mbox_chan *chan); + void (*tx_startup)(struct mhuv3 *mhu, struct mbox_chan *chan); + void (*tx_shutdown)(struct mhuv3 *mhu, struct mbox_chan *chan); + int (*last_tx_done)(struct mhuv3 *mhu, struct mbox_chan *chan); + int (*send_data)(struct mhuv3 *mhu, struct mbox_chan *chan, void *arg); +}; + +/** + * struct mhuv3_mbox_chan_priv - MHUv3 channel private information + * + * @ch_idx: Channel window index associated to this mailbox channel. + * @doorbell: Doorbell bit number within the @ch_idx window. + * Only relevant to Doorbell transport. + * @ops: Transport protocol specific operations for this channel. + * + * Transport specific data attached to mmailbox channel priv data. + */ +struct mhuv3_mbox_chan_priv { + u32 ch_idx; + u32 doorbell; + const struct mhuv3_protocol_ops *ops; +}; + +/** + * struct mhuv3_extension - MHUv3 extension descriptor + * + * @type: Type of extension + * @num_chans: Max number of channels found for this extension. + * @base_ch_idx: First channel number assigned to this extension, picked from + * the set of all mailbox channels descriptors created. + * @mbox_of_xlate: Extension specific helper to parse DT and lookup associated + * channel from the related 'mboxes' property. + * @combined_irq_setup: Extension specific helper to setup the combined irq. + * @channels_init: Extension specific helper to initialize channels. + * @chan_from_comb_irq_get: Extension specific helper to lookup which channel + * triggered the combined irq. + * @pending_db: Array of per-channel pending doorbells. + * @pending_lock: Protect access to pending_db. + */ +struct mhuv3_extension { + enum mhuv3_extension_type type; + unsigned int num_chans; + unsigned int base_ch_idx; + struct mbox_chan *(*mbox_of_xlate)(struct mhuv3 *mhu, + unsigned int channel, + unsigned int param); + void (*combined_irq_setup)(struct mhuv3 *mhu); + int (*channels_init)(struct mhuv3 *mhu); + struct mbox_chan *(*chan_from_comb_irq_get)(struct mhuv3 *mhu); + u32 pending_db[MHUV3_DBCW_MAX]; + /* Protect access to pending_db */ + spinlock_t pending_lock; +}; + +/** + * struct mhuv3 - MHUv3 mailbox controller data + * + * @frame: Frame type: MBX_FRAME or PBX_FRAME. + * @auto_op_full: Flag to indicate if the MHU supports AutoOp full mode. + * @major: MHUv3 controller architectural major version. + * @minor: MHUv3 controller architectural minor version. + * @implem: MHUv3 controller IIDR implementer. + * @rev: MHUv3 controller IIDR revision. + * @var: MHUv3 controller IIDR variant. + * @prod_id: MHUv3 controller IIDR product_id. + * @num_chans: The total number of channnels discovered across all extensions. + * @cmb_irq: Combined IRQ number if any found defined. + * @ctrl: A reference to the MHUv3 control page for this block. + * @pbx: Base address of the PBX register mapping region. + * @mbx: Base address of the MBX register mapping region. + * @ext: Array holding descriptors for any found implemented extension. + * @mbox: Mailbox controller belonging to the MHU frame. + */ +struct mhuv3 { + enum mhuv3_frame frame; + bool auto_op_full; + unsigned int major; + unsigned int minor; + unsigned int implem; + unsigned int rev; + unsigned int var; + unsigned int prod_id; + unsigned int num_chans; + int cmb_irq; + struct ctrl_page __iomem *ctrl; + union { + struct mhu3_pbx_frame_reg __iomem *pbx; + struct mhu3_mbx_frame_reg __iomem *mbx; + }; + struct mhuv3_extension *ext[NUM_EXT]; + struct mbox_controller mbox; +}; + +#define mhu_from_mbox(_mbox) container_of(_mbox, struct mhuv3, mbox) + +typedef int (*mhuv3_extension_initializer)(struct mhuv3 *mhu); + +/* =================== Doorbell transport protocol operations =============== */ + +static void mhuv3_doorbell_tx_startup(struct mhuv3 *mhu, struct mbox_chan *chan) +{ + struct mhuv3_mbox_chan_priv *priv = chan->con_priv; + + /* Enable Transfer Acknowledgment events */ + writel_relaxed_bitmask(0x1, &mhu->pbx->dbcw[priv->ch_idx].int_en, tfr_ack); +} + +static void mhuv3_doorbell_tx_shutdown(struct mhuv3 *mhu, struct mbox_chan *chan) +{ + struct mhuv3_mbox_chan_priv *priv = chan->con_priv; + struct mhuv3_extension *e = mhu->ext[DBE_EXT]; + unsigned long flags; + + /* Disable Channel Transfer Ack events */ + writel_relaxed_bitmask(0x0, &mhu->pbx->dbcw[priv->ch_idx].int_en, tfr_ack); + + /* Clear Channel Transfer Ack and pending doorbells */ + writel_relaxed_bitmask(0x1, &mhu->pbx->dbcw[priv->ch_idx].int_clr, tfr_ack); + spin_lock_irqsave(&e->pending_lock, flags); + e->pending_db[priv->ch_idx] = 0; + spin_unlock_irqrestore(&e->pending_lock, flags); +} + +static int mhuv3_doorbell_rx_startup(struct mhuv3 *mhu, struct mbox_chan *chan) +{ + struct mhuv3_mbox_chan_priv *priv = chan->con_priv; + + /* Unmask Channel Transfer events */ + writel_relaxed(BIT(priv->doorbell), &mhu->mbx->dbcw[priv->ch_idx].msk_clr); + + return 0; +} + +static void mhuv3_doorbell_rx_shutdown(struct mhuv3 *mhu, + struct mbox_chan *chan) +{ + struct mhuv3_mbox_chan_priv *priv = chan->con_priv; + + /* Mask Channel Transfer events */ + writel_relaxed(BIT(priv->doorbell), &mhu->mbx->dbcw[priv->ch_idx].msk_set); +} + +static void mhuv3_doorbell_rx_complete(struct mhuv3 *mhu, struct mbox_chan *chan) +{ + struct mhuv3_mbox_chan_priv *priv = chan->con_priv; + + /* Clearing the pending transfer generates the Channel Transfer Ack */ + writel_relaxed(BIT(priv->doorbell), &mhu->mbx->dbcw[priv->ch_idx].clr); +} + +static int mhuv3_doorbell_last_tx_done(struct mhuv3 *mhu, + struct mbox_chan *chan) +{ + struct mhuv3_mbox_chan_priv *priv = chan->con_priv; + int done; + + done = !(readl_relaxed(&mhu->pbx->dbcw[priv->ch_idx].st) & + BIT(priv->doorbell)); + if (done) { + struct mhuv3_extension *e = mhu->ext[DBE_EXT]; + unsigned long flags; + + /* Take care to clear the pending doorbell also when polling */ + spin_lock_irqsave(&e->pending_lock, flags); + e->pending_db[priv->ch_idx] &= ~BIT(priv->doorbell); + spin_unlock_irqrestore(&e->pending_lock, flags); + } + + return done; +} + +static int mhuv3_doorbell_send_data(struct mhuv3 *mhu, struct mbox_chan *chan, + void *arg) +{ + struct mhuv3_mbox_chan_priv *priv = chan->con_priv; + struct mhuv3_extension *e = mhu->ext[DBE_EXT]; + + scoped_guard(spinlock_irqsave, &e->pending_lock) { + /* Only one in-flight Transfer is allowed per-doorbell */ + if (e->pending_db[priv->ch_idx] & BIT(priv->doorbell)) + return -EBUSY; + + e->pending_db[priv->ch_idx] |= BIT(priv->doorbell); + } + + writel_relaxed(BIT(priv->doorbell), &mhu->pbx->dbcw[priv->ch_idx].set); + + return 0; +} + +static const struct mhuv3_protocol_ops mhuv3_doorbell_ops = { + .tx_startup = mhuv3_doorbell_tx_startup, + .tx_shutdown = mhuv3_doorbell_tx_shutdown, + .rx_startup = mhuv3_doorbell_rx_startup, + .rx_shutdown = mhuv3_doorbell_rx_shutdown, + .rx_complete = mhuv3_doorbell_rx_complete, + .last_tx_done = mhuv3_doorbell_last_tx_done, + .send_data = mhuv3_doorbell_send_data, +}; + +/* Sender and receiver mailbox ops */ +static bool mhuv3_sender_last_tx_done(struct mbox_chan *chan) +{ + struct mhuv3_mbox_chan_priv *priv = chan->con_priv; + struct mhuv3 *mhu = mhu_from_mbox(chan->mbox); + + return priv->ops->last_tx_done(mhu, chan); +} + +static int mhuv3_sender_send_data(struct mbox_chan *chan, void *data) +{ + struct mhuv3_mbox_chan_priv *priv = chan->con_priv; + struct mhuv3 *mhu = mhu_from_mbox(chan->mbox); + + if (!priv->ops->last_tx_done(mhu, chan)) + return -EBUSY; + + return priv->ops->send_data(mhu, chan, data); +} + +static int mhuv3_sender_startup(struct mbox_chan *chan) +{ + struct mhuv3_mbox_chan_priv *priv = chan->con_priv; + struct mhuv3 *mhu = mhu_from_mbox(chan->mbox); + + if (priv->ops->tx_startup) + priv->ops->tx_startup(mhu, chan); + + return 0; +} + +static void mhuv3_sender_shutdown(struct mbox_chan *chan) +{ + struct mhuv3_mbox_chan_priv *priv = chan->con_priv; + struct mhuv3 *mhu = mhu_from_mbox(chan->mbox); + + if (priv->ops->tx_shutdown) + priv->ops->tx_shutdown(mhu, chan); +} + +static const struct mbox_chan_ops mhuv3_sender_ops = { + .send_data = mhuv3_sender_send_data, + .startup = mhuv3_sender_startup, + .shutdown = mhuv3_sender_shutdown, + .last_tx_done = mhuv3_sender_last_tx_done, +}; + +static int mhuv3_receiver_startup(struct mbox_chan *chan) +{ + struct mhuv3_mbox_chan_priv *priv = chan->con_priv; + struct mhuv3 *mhu = mhu_from_mbox(chan->mbox); + + return priv->ops->rx_startup(mhu, chan); +} + +static void mhuv3_receiver_shutdown(struct mbox_chan *chan) +{ + struct mhuv3_mbox_chan_priv *priv = chan->con_priv; + struct mhuv3 *mhu = mhu_from_mbox(chan->mbox); + + priv->ops->rx_shutdown(mhu, chan); +} + +static int mhuv3_receiver_send_data(struct mbox_chan *chan, void *data) +{ + dev_err(chan->mbox->dev, + "Trying to transmit on a MBX MHUv3 frame\n"); + return -EIO; +} + +static bool mhuv3_receiver_last_tx_done(struct mbox_chan *chan) +{ + dev_err(chan->mbox->dev, "Trying to Tx poll on a MBX MHUv3 frame\n"); + return true; +} + +static const struct mbox_chan_ops mhuv3_receiver_ops = { + .send_data = mhuv3_receiver_send_data, + .startup = mhuv3_receiver_startup, + .shutdown = mhuv3_receiver_shutdown, + .last_tx_done = mhuv3_receiver_last_tx_done, +}; + +static struct mbox_chan *mhuv3_dbe_mbox_of_xlate(struct mhuv3 *mhu, + unsigned int channel, + unsigned int doorbell) +{ + struct mhuv3_extension *e = mhu->ext[DBE_EXT]; + struct mbox_controller *mbox = &mhu->mbox; + struct mbox_chan *chans = mbox->chans; + + if (channel >= e->num_chans || doorbell >= MHUV3_FLAG_BITS) { + dev_err(mbox->dev, "Couldn't xlate to a valid channel (%d: %d)\n", + channel, doorbell); + return ERR_PTR(-ENODEV); + } + + return &chans[e->base_ch_idx + channel * MHUV3_FLAG_BITS + doorbell]; +} + +static void mhuv3_dbe_combined_irq_setup(struct mhuv3 *mhu) +{ + struct mhuv3_extension *e = mhu->ext[DBE_EXT]; + int i; + + if (mhu->frame == PBX_FRAME) { + struct pdbcw_page __iomem *dbcw = mhu->pbx->dbcw; + + for (i = 0; i < e->num_chans; i++) { + writel_relaxed_bitmask(0x1, &dbcw[i].int_clr, tfr_ack); + writel_relaxed_bitmask(0x0, &dbcw[i].int_en, tfr_ack); + writel_relaxed_bitmask(0x1, &dbcw[i].ctrl, comb_en); + } + } else { + struct mdbcw_page __iomem *dbcw = mhu->mbx->dbcw; + + for (i = 0; i < e->num_chans; i++) { + writel_relaxed(0xFFFFFFFF, &dbcw[i].clr); + writel_relaxed(0xFFFFFFFF, &dbcw[i].msk_set); + writel_relaxed_bitmask(0x1, &dbcw[i].ctrl, comb_en); + } + } +} + +static int mhuv3_dbe_channels_init(struct mhuv3 *mhu) +{ + struct mhuv3_extension *e = mhu->ext[DBE_EXT]; + struct mbox_controller *mbox = &mhu->mbox; + struct mbox_chan *chans; + int i; + + chans = mbox->chans + mbox->num_chans; + e->base_ch_idx = mbox->num_chans; + for (i = 0; i < e->num_chans; i++) { + struct mhuv3_mbox_chan_priv *priv; + int k; + + for (k = 0; k < MHUV3_FLAG_BITS; k++) { + priv = devm_kmalloc(mbox->dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + priv->ch_idx = i; + priv->ops = &mhuv3_doorbell_ops; + priv->doorbell = k; + chans++->con_priv = priv; + mbox->num_chans++; + } + } + + spin_lock_init(&e->pending_lock); + + return 0; +} + +static bool mhuv3_dbe_doorbell_lookup(struct mhuv3 *mhu, unsigned int channel, + unsigned int *db) +{ + struct mhuv3_extension *e = mhu->ext[DBE_EXT]; + struct device *dev = mhu->mbox.dev; + u32 st; + + if (mhu->frame == PBX_FRAME) { + u32 active_dbs, fired_dbs; + + st = readl_relaxed_bitmask(&mhu->pbx->dbcw[channel].int_st, + tfr_ack); + if (!st) + goto err_spurious; + + active_dbs = readl_relaxed(&mhu->pbx->dbcw[channel].st); + scoped_guard(spinlock_irqsave, &e->pending_lock) { + fired_dbs = e->pending_db[channel] & ~active_dbs; + if (!fired_dbs) + goto err_spurious; + + *db = __ffs(fired_dbs); + e->pending_db[channel] &= ~BIT(*db); + } + fired_dbs &= ~BIT(*db); + /* Clear TFR Ack if no more doorbells pending */ + if (!fired_dbs) + writel_relaxed_bitmask(0x1, + &mhu->pbx->dbcw[channel].int_clr, + tfr_ack); + } else { + st = readl_relaxed(&mhu->mbx->dbcw[channel].st_msk); + if (!st) + goto err_spurious; + + *db = __ffs(st); + } + + return true; + +err_spurious: + dev_warn(dev, "Spurious IRQ on %s channel:%d\n", + mhuv3_str[mhu->frame], channel); + + return false; +} + +static struct mbox_chan *mhuv3_dbe_chan_from_comb_irq_get(struct mhuv3 *mhu) +{ + struct mhuv3_extension *e = mhu->ext[DBE_EXT]; + struct device *dev = mhu->mbox.dev; + int i; + + for (i = 0; i < MHUV3_DBCH_CMB_INT_ST_REG_CNT; i++) { + unsigned int channel, db; + u32 cmb_st; + + cmb_st = readl_relaxed(&mhu->ctrl->dbch_int_st[i]); + if (!cmb_st) + continue; + + channel = i * MHUV3_FLAG_BITS + __ffs(cmb_st); + if (channel >= e->num_chans) { + dev_err(dev, "Invalid %s channel:%d\n", + mhuv3_str[mhu->frame], channel); + return ERR_PTR(-EIO); + } + + if (!mhuv3_dbe_doorbell_lookup(mhu, channel, &db)) + continue; + + dev_dbg(dev, "Found %s ch[%d]/db[%d]\n", + mhuv3_str[mhu->frame], channel, db); + + return &mhu->mbox.chans[channel * MHUV3_FLAG_BITS + db]; + } + + return ERR_PTR(-EIO); +} + +static int mhuv3_dbe_init(struct mhuv3 *mhu) +{ + struct device *dev = mhu->mbox.dev; + struct mhuv3_extension *e; + + if (!readl_relaxed_bitmask(&mhu->ctrl->feat_spt0, dbe_spt)) + return 0; + + dev_dbg(dev, "%s: Initializing DBE Extension.\n", mhuv3_str[mhu->frame]); + + e = devm_kzalloc(dev, sizeof(*e), GFP_KERNEL); + if (!e) + return -ENOMEM; + + e->type = DBE_EXT; + /* Note that, by the spec, the number of channels is (num_dbch + 1) */ + e->num_chans = + readl_relaxed_bitmask(&mhu->ctrl->dbch_cfg0, num_dbch) + 1; + e->mbox_of_xlate = mhuv3_dbe_mbox_of_xlate; + e->combined_irq_setup = mhuv3_dbe_combined_irq_setup; + e->channels_init = mhuv3_dbe_channels_init; + e->chan_from_comb_irq_get = mhuv3_dbe_chan_from_comb_irq_get; + + mhu->num_chans += e->num_chans * MHUV3_FLAG_BITS; + mhu->ext[DBE_EXT] = e; + + dev_dbg(dev, "%s: found %d DBE channels.\n", + mhuv3_str[mhu->frame], e->num_chans); + + return 0; +} + +static int mhuv3_fce_init(struct mhuv3 *mhu) +{ + struct device *dev = mhu->mbox.dev; + + if (!readl_relaxed_bitmask(&mhu->ctrl->feat_spt0, fce_spt)) + return 0; + + dev_dbg(dev, "%s: FCE Extension not supported by driver.\n", + mhuv3_str[mhu->frame]); + + return 0; +} + +static int mhuv3_fe_init(struct mhuv3 *mhu) +{ + struct device *dev = mhu->mbox.dev; + + if (!readl_relaxed_bitmask(&mhu->ctrl->feat_spt0, fe_spt)) + return 0; + + dev_dbg(dev, "%s: FE Extension not supported by driver.\n", + mhuv3_str[mhu->frame]); + + return 0; +} + +static mhuv3_extension_initializer mhuv3_extension_init[NUM_EXT] = { + mhuv3_dbe_init, + mhuv3_fce_init, + mhuv3_fe_init, +}; + +static int mhuv3_initialize_channels(struct device *dev, struct mhuv3 *mhu) +{ + struct mbox_controller *mbox = &mhu->mbox; + int i, ret = 0; + + mbox->chans = devm_kcalloc(dev, mhu->num_chans, + sizeof(*mbox->chans), GFP_KERNEL); + if (!mbox->chans) + return dev_err_probe(dev, -ENOMEM, + "Failed to initialize channels\n"); + + for (i = 0; i < NUM_EXT && !ret; i++) + if (mhu->ext[i]) + ret = mhu->ext[i]->channels_init(mhu); + + return ret; +} + +static struct mbox_chan *mhuv3_mbox_of_xlate(struct mbox_controller *mbox, + const struct of_phandle_args *pa) +{ + struct mhuv3 *mhu = mhu_from_mbox(mbox); + unsigned int type, channel, param; + + if (pa->args_count != MHUV3_MBOX_CELLS) + return ERR_PTR(-EINVAL); + + type = pa->args[MHUV3_MBOX_CELL_TYPE]; + if (type >= NUM_EXT) + return ERR_PTR(-EINVAL); + + channel = pa->args[MHUV3_MBOX_CELL_CHWN]; + param = pa->args[MHUV3_MBOX_CELL_PARAM]; + + return mhu->ext[type]->mbox_of_xlate(mhu, channel, param); +} + +static void mhu_frame_cleanup_actions(void *data) +{ + struct mhuv3 *mhu = data; + + writel_relaxed_bitmask(0x0, &mhu->ctrl->x_ctrl, op_req); +} + +static int mhuv3_frame_init(struct mhuv3 *mhu, void __iomem *regs) +{ + struct device *dev = mhu->mbox.dev; + int i; + + mhu->ctrl = regs; + mhu->frame = readl_relaxed_bitmask(&mhu->ctrl->blk_id, id); + if (mhu->frame > MBX_FRAME) + return dev_err_probe(dev, -EINVAL, + "Invalid Frame type- %d\n", mhu->frame); + + mhu->major = readl_relaxed_bitmask(&mhu->ctrl->aidr, arch_major_rev); + mhu->minor = readl_relaxed_bitmask(&mhu->ctrl->aidr, arch_minor_rev); + mhu->implem = readl_relaxed_bitmask(&mhu->ctrl->iidr, implementer); + mhu->rev = readl_relaxed_bitmask(&mhu->ctrl->iidr, revision); + mhu->var = readl_relaxed_bitmask(&mhu->ctrl->iidr, variant); + mhu->prod_id = readl_relaxed_bitmask(&mhu->ctrl->iidr, product_id); + if (mhu->major != MHUV3_MAJOR_VERSION) + return dev_err_probe(dev, -EINVAL, + "Unsupported MHU %s block - major:%d minor:%d\n", + mhuv3_str[mhu->frame], mhu->major, + mhu->minor); + + mhu->auto_op_full = + !!readl_relaxed_bitmask(&mhu->ctrl->feat_spt1, auto_op_spt); + /* Request the PBX/MBX to remain operational */ + if (mhu->auto_op_full) { + writel_relaxed_bitmask(0x1, &mhu->ctrl->x_ctrl, op_req); + devm_add_action_or_reset(dev, mhu_frame_cleanup_actions, mhu); + } + + dev_dbg(dev, + "Found MHU %s block - major:%d minor:%d\n implem:0x%X rev:0x%X var:0x%X prod_id:0x%X", + mhuv3_str[mhu->frame], mhu->major, mhu->minor, + mhu->implem, mhu->rev, mhu->var, mhu->prod_id); + + if (mhu->frame == PBX_FRAME) + mhu->pbx = regs; + else + mhu->mbx = regs; + + for (i = 0; i < NUM_EXT; i++) { + int ret; + + /* + * Note that extensions initialization fails only when such + * extension initialization routine fails and the extensions + * was found to be supported in hardware and in software. + */ + ret = mhuv3_extension_init[i](mhu); + if (ret) + return dev_err_probe(dev, ret, + "Failed to initialize %s %s\n", + mhuv3_str[mhu->frame], + mhuv3_ext_str[i]); + } + + return 0; +} + +static irqreturn_t mhuv3_pbx_comb_interrupt(int irq, void *arg) +{ + unsigned int i, found = 0; + struct mhuv3 *mhu = arg; + struct mbox_chan *chan; + struct device *dev; + int ret = IRQ_NONE; + + dev = mhu->mbox.dev; + for (i = 0; i < NUM_EXT; i++) { + struct mhuv3_mbox_chan_priv *priv; + + /* FCE does not participate to the PBX combined */ + if (i == FCE_EXT || !mhu->ext[i]) + continue; + + chan = mhu->ext[i]->chan_from_comb_irq_get(mhu); + if (IS_ERR(chan)) + continue; + + found++; + priv = chan->con_priv; + if (!chan->cl) { + dev_warn(dev, "TX Ack on UNBOUND channel (%u)\n", + priv->ch_idx); + continue; + } + + mbox_chan_txdone(chan, 0); + ret = IRQ_HANDLED; + } + + if (found == 0) + dev_warn_once(dev, "Failed to find channel for the TX interrupt\n"); + + return ret; +} + +static irqreturn_t mhuv3_mbx_comb_interrupt(int irq, void *arg) +{ + unsigned int i, found = 0; + struct mhuv3 *mhu = arg; + struct mbox_chan *chan; + struct device *dev; + int ret = IRQ_NONE; + + dev = mhu->mbox.dev; + for (i = 0; i < NUM_EXT; i++) { + struct mhuv3_mbox_chan_priv *priv; + void *data __free(kfree) = NULL; + + if (!mhu->ext[i]) + continue; + + /* Process any extension which could be source of the IRQ */ + chan = mhu->ext[i]->chan_from_comb_irq_get(mhu); + if (IS_ERR(chan)) + continue; + + found++; + /* From here on we need to call rx_complete even on error */ + priv = chan->con_priv; + if (!chan->cl) { + dev_warn(dev, "RX Data on UNBOUND channel (%u)\n", + priv->ch_idx); + goto rx_ack; + } + + /* Read optional in-band LE data first. */ + if (priv->ops->read_data) { + data = priv->ops->read_data(mhu, chan); + if (IS_ERR(data)) { + dev_err(dev, + "Failed to read in-band data. err:%ld\n", + PTR_ERR(data)); + goto rx_ack; + } + } + + mbox_chan_received_data(chan, data); + ret = IRQ_HANDLED; + + /* + * Acknowledge transfer after any possible optional + * out-of-band data has also been retrieved via + * mbox_chan_received_data(). + */ +rx_ack: + if (priv->ops->rx_complete) + priv->ops->rx_complete(mhu, chan); + } + + if (found == 0) + dev_warn_once(dev, "Failed to find channel for the RX interrupt\n"); + + return ret; +} + +static int mhuv3_setup_pbx(struct mhuv3 *mhu) +{ + struct device *dev = mhu->mbox.dev; + + mhu->mbox.ops = &mhuv3_sender_ops; + + if (mhu->cmb_irq > 0) { + int ret, i; + + ret = devm_request_threaded_irq(dev, mhu->cmb_irq, NULL, + mhuv3_pbx_comb_interrupt, + IRQF_ONESHOT, "mhuv3-pbx", mhu); + if (ret) + return dev_err_probe(dev, ret, + "Failed to request PBX IRQ\n"); + + mhu->mbox.txdone_irq = true; + mhu->mbox.txdone_poll = false; + + for (i = 0; i < NUM_EXT; i++) + if (mhu->ext[i]) + mhu->ext[i]->combined_irq_setup(mhu); + + dev_dbg(dev, "MHUv3 PBX IRQs initialized.\n"); + + return 0; + } + + dev_info(dev, "Using PBX in Tx polling mode.\n"); + mhu->mbox.txdone_irq = false; + mhu->mbox.txdone_poll = true; + mhu->mbox.txpoll_period = 1; + + return 0; +} + +static int mhuv3_setup_mbx(struct mhuv3 *mhu) +{ + struct device *dev = mhu->mbox.dev; + int ret, i; + + mhu->mbox.ops = &mhuv3_receiver_ops; + + if (mhu->cmb_irq <= 0) + return dev_err_probe(dev, -EINVAL, + "MBX combined IRQ is missing !\n"); + + ret = devm_request_threaded_irq(dev, mhu->cmb_irq, NULL, + mhuv3_mbx_comb_interrupt, IRQF_ONESHOT, + "mhuv3-mbx", mhu); + if (ret) + return dev_err_probe(dev, ret, "Failed to request MBX IRQ\n"); + + for (i = 0; i < NUM_EXT; i++) + if (mhu->ext[i]) + mhu->ext[i]->combined_irq_setup(mhu); + + dev_dbg(dev, "MHUv3 MBX IRQs initialized.\n"); + + return ret; +} + +static int mhuv3_irqs_init(struct mhuv3 *mhu, struct platform_device *pdev) +{ + dev_dbg(mhu->mbox.dev, "Initializing %s block.\n", + mhuv3_str[mhu->frame]); + + if (mhu->frame == PBX_FRAME) { + mhu->cmb_irq = + platform_get_irq_byname_optional(pdev, "combined"); + return mhuv3_setup_pbx(mhu); + } + + mhu->cmb_irq = platform_get_irq_byname(pdev, "combined"); + return mhuv3_setup_mbx(mhu); +} + +static int mhuv3_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + void __iomem *regs; + struct mhuv3 *mhu; + int ret; + + mhu = devm_kzalloc(dev, sizeof(*mhu), GFP_KERNEL); + if (!mhu) + return -ENOMEM; + + regs = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(regs)) + return PTR_ERR(regs); + + mhu->mbox.dev = dev; + ret = mhuv3_frame_init(mhu, regs); + if (ret) + return ret; + + ret = mhuv3_irqs_init(mhu, pdev); + if (ret) + return ret; + + mhu->mbox.of_xlate = mhuv3_mbox_of_xlate; + ret = mhuv3_initialize_channels(dev, mhu); + if (ret) + return ret; + + ret = devm_mbox_controller_register(dev, &mhu->mbox); + if (ret) + return dev_err_probe(dev, ret, + "Failed to register ARM MHUv3 driver\n"); + + return ret; +} + +static const struct of_device_id mhuv3_of_match[] = { + { .compatible = "arm,mhuv3", .data = NULL }, + {} +}; +MODULE_DEVICE_TABLE(of, mhuv3_of_match); + +static struct platform_driver mhuv3_driver = { + .driver = { + .name = "arm-mhuv3-mailbox", + .of_match_table = mhuv3_of_match, + }, + .probe = mhuv3_probe, +}; +module_platform_driver(mhuv3_driver); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("ARM MHUv3 Driver"); +MODULE_AUTHOR("Cristian Marussi <cristian.marussi@arm.com>"); diff --git a/drivers/mailbox/armada-37xx-rwtm-mailbox.c b/drivers/mailbox/armada-37xx-rwtm-mailbox.c new file mode 100644 index 000000000000..456a117a65fd --- /dev/null +++ b/drivers/mailbox/armada-37xx-rwtm-mailbox.c @@ -0,0 +1,206 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * rWTM BIU Mailbox driver for Armada 37xx + * + * Author: Marek Behún <kabel@kernel.org> + */ + +#include <linux/device.h> +#include <linux/interrupt.h> +#include <linux/io.h> +#include <linux/kernel.h> +#include <linux/mailbox_controller.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/platform_device.h> +#include <linux/armada-37xx-rwtm-mailbox.h> + +#define DRIVER_NAME "armada-37xx-rwtm-mailbox" + +/* relative to rWTM BIU Mailbox Registers */ +#define RWTM_MBOX_PARAM(i) (0x0 + ((i) << 2)) +#define RWTM_MBOX_COMMAND 0x40 +#define RWTM_MBOX_RETURN_STATUS 0x80 +#define RWTM_MBOX_STATUS(i) (0x84 + ((i) << 2)) +#define RWTM_MBOX_FIFO_STATUS 0xc4 +#define FIFO_STS_RDY 0x100 +#define FIFO_STS_CNTR_MASK 0x7 +#define FIFO_STS_CNTR_MAX 4 + +#define RWTM_HOST_INT_RESET 0xc8 +#define RWTM_HOST_INT_MASK 0xcc +#define SP_CMD_COMPLETE BIT(0) +#define SP_CMD_QUEUE_FULL_ACCESS BIT(17) +#define SP_CMD_QUEUE_FULL BIT(18) + +struct a37xx_mbox { + struct device *dev; + struct mbox_controller controller; + void __iomem *base; + int irq; +}; + +static void a37xx_mbox_receive(struct mbox_chan *chan) +{ + struct a37xx_mbox *mbox = chan->con_priv; + struct armada_37xx_rwtm_rx_msg rx_msg; + int i; + + rx_msg.retval = readl(mbox->base + RWTM_MBOX_RETURN_STATUS); + for (i = 0; i < 16; ++i) + rx_msg.status[i] = readl(mbox->base + RWTM_MBOX_STATUS(i)); + + mbox_chan_received_data(chan, &rx_msg); +} + +static irqreturn_t a37xx_mbox_irq_handler(int irq, void *data) +{ + struct mbox_chan *chan = data; + struct a37xx_mbox *mbox = chan->con_priv; + u32 reg; + + reg = readl(mbox->base + RWTM_HOST_INT_RESET); + + if (reg & SP_CMD_COMPLETE) + a37xx_mbox_receive(chan); + + if (reg & (SP_CMD_QUEUE_FULL_ACCESS | SP_CMD_QUEUE_FULL)) + dev_err(mbox->dev, "Secure processor command queue full\n"); + + writel(reg, mbox->base + RWTM_HOST_INT_RESET); + if (reg) + mbox_chan_txdone(chan, 0); + + return reg ? IRQ_HANDLED : IRQ_NONE; +} + +static int a37xx_mbox_send_data(struct mbox_chan *chan, void *data) +{ + struct a37xx_mbox *mbox = chan->con_priv; + struct armada_37xx_rwtm_tx_msg *msg = data; + int i; + u32 reg; + + if (!data) + return -EINVAL; + + reg = readl(mbox->base + RWTM_MBOX_FIFO_STATUS); + if (!(reg & FIFO_STS_RDY)) + dev_warn(mbox->dev, "Secure processor not ready\n"); + + if ((reg & FIFO_STS_CNTR_MASK) >= FIFO_STS_CNTR_MAX) { + dev_err(mbox->dev, "Secure processor command queue full\n"); + return -EBUSY; + } + + for (i = 0; i < 16; ++i) + writel(msg->args[i], mbox->base + RWTM_MBOX_PARAM(i)); + writel(msg->command, mbox->base + RWTM_MBOX_COMMAND); + + return 0; +} + +static int a37xx_mbox_startup(struct mbox_chan *chan) +{ + struct a37xx_mbox *mbox = chan->con_priv; + u32 reg; + int ret; + + ret = devm_request_irq(mbox->dev, mbox->irq, a37xx_mbox_irq_handler, 0, + DRIVER_NAME, chan); + if (ret < 0) { + dev_err(mbox->dev, "Cannot request irq\n"); + return ret; + } + + /* enable IRQ generation */ + reg = readl(mbox->base + RWTM_HOST_INT_MASK); + reg &= ~(SP_CMD_COMPLETE | SP_CMD_QUEUE_FULL_ACCESS | SP_CMD_QUEUE_FULL); + writel(reg, mbox->base + RWTM_HOST_INT_MASK); + + return 0; +} + +static void a37xx_mbox_shutdown(struct mbox_chan *chan) +{ + u32 reg; + struct a37xx_mbox *mbox = chan->con_priv; + + /* disable interrupt generation */ + reg = readl(mbox->base + RWTM_HOST_INT_MASK); + reg |= SP_CMD_COMPLETE | SP_CMD_QUEUE_FULL_ACCESS | SP_CMD_QUEUE_FULL; + writel(reg, mbox->base + RWTM_HOST_INT_MASK); + + devm_free_irq(mbox->dev, mbox->irq, chan); +} + +static const struct mbox_chan_ops a37xx_mbox_ops = { + .send_data = a37xx_mbox_send_data, + .startup = a37xx_mbox_startup, + .shutdown = a37xx_mbox_shutdown, +}; + +static int armada_37xx_mbox_probe(struct platform_device *pdev) +{ + struct a37xx_mbox *mbox; + struct mbox_chan *chans; + int ret; + + mbox = devm_kzalloc(&pdev->dev, sizeof(*mbox), GFP_KERNEL); + if (!mbox) + return -ENOMEM; + + /* Allocated one channel */ + chans = devm_kzalloc(&pdev->dev, sizeof(*chans), GFP_KERNEL); + if (!chans) + return -ENOMEM; + + mbox->base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(mbox->base)) + return PTR_ERR(mbox->base); + + mbox->irq = platform_get_irq(pdev, 0); + if (mbox->irq < 0) + return mbox->irq; + + mbox->dev = &pdev->dev; + + /* Hardware supports only one channel. */ + chans[0].con_priv = mbox; + mbox->controller.dev = mbox->dev; + mbox->controller.num_chans = 1; + mbox->controller.chans = chans; + mbox->controller.ops = &a37xx_mbox_ops; + mbox->controller.txdone_irq = true; + + ret = devm_mbox_controller_register(mbox->dev, &mbox->controller); + if (ret) { + dev_err(&pdev->dev, "Could not register mailbox controller\n"); + return ret; + } + + platform_set_drvdata(pdev, mbox); + return ret; +} + + +static const struct of_device_id armada_37xx_mbox_match[] = { + { .compatible = "marvell,armada-3700-rwtm-mailbox" }, + { }, +}; + +MODULE_DEVICE_TABLE(of, armada_37xx_mbox_match); + +static struct platform_driver armada_37xx_mbox_driver = { + .probe = armada_37xx_mbox_probe, + .driver = { + .name = DRIVER_NAME, + .of_match_table = armada_37xx_mbox_match, + }, +}; + +module_platform_driver(armada_37xx_mbox_driver); + +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("rWTM BIU Mailbox driver for Armada 37xx"); +MODULE_AUTHOR("Marek Behun <kabel@kernel.org>"); diff --git a/drivers/mailbox/ast2700-mailbox.c b/drivers/mailbox/ast2700-mailbox.c new file mode 100644 index 000000000000..83c6afe5411f --- /dev/null +++ b/drivers/mailbox/ast2700-mailbox.c @@ -0,0 +1,235 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright Aspeed Technology Inc. (C) 2025. All rights reserved + */ + +#include <linux/interrupt.h> +#include <linux/io.h> +#include <linux/iopoll.h> +#include <linux/kernel.h> +#include <linux/mailbox_controller.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/platform_device.h> +#include <linux/slab.h> + +/* Each bit in the register represents an IPC ID */ +#define IPCR_TX_TRIG 0x00 +#define IPCR_ENABLE 0x04 +#define IPCR_STATUS 0x08 +#define RX_IRQ(n) BIT(n) +#define RX_IRQ_MASK 0xf +#define IPCR_DATA 0x10 + +struct ast2700_mbox_data { + u8 num_chans; + u8 msg_size; +}; + +struct ast2700_mbox { + struct mbox_controller mbox; + u8 msg_size; + void __iomem *tx_regs; + void __iomem *rx_regs; + spinlock_t lock; +}; + +static inline int ch_num(struct mbox_chan *chan) +{ + return chan - chan->mbox->chans; +} + +static inline bool ast2700_mbox_tx_done(struct ast2700_mbox *mb, int idx) +{ + return !(readl(mb->tx_regs + IPCR_STATUS) & BIT(idx)); +} + +static irqreturn_t ast2700_mbox_irq(int irq, void *p) +{ + struct ast2700_mbox *mb = p; + void __iomem *data_reg; + int num_words = mb->msg_size / sizeof(u32); + u32 *word_data; + u32 status; + int n, i; + + /* Only examine channels that are currently enabled. */ + status = readl(mb->rx_regs + IPCR_ENABLE) & + readl(mb->rx_regs + IPCR_STATUS); + + if (!(status & RX_IRQ_MASK)) + return IRQ_NONE; + + for (n = 0; n < mb->mbox.num_chans; ++n) { + struct mbox_chan *chan = &mb->mbox.chans[n]; + + if (!(status & RX_IRQ(n))) + continue; + + data_reg = mb->rx_regs + IPCR_DATA + mb->msg_size * n; + word_data = chan->con_priv; + /* Read the message data */ + for (i = 0; i < num_words; i++) + word_data[i] = readl(data_reg + i * sizeof(u32)); + + mbox_chan_received_data(chan, chan->con_priv); + + /* The IRQ can be cleared only once the FIFO is empty. */ + writel(RX_IRQ(n), mb->rx_regs + IPCR_STATUS); + } + + return IRQ_HANDLED; +} + +static int ast2700_mbox_send_data(struct mbox_chan *chan, void *data) +{ + struct ast2700_mbox *mb = dev_get_drvdata(chan->mbox->dev); + int idx = ch_num(chan); + void __iomem *data_reg = mb->tx_regs + IPCR_DATA + mb->msg_size * idx; + u32 *word_data = data; + int num_words = mb->msg_size / sizeof(u32); + int i; + + if (!(readl(mb->tx_regs + IPCR_ENABLE) & BIT(idx))) { + dev_warn(mb->mbox.dev, "%s: Ch-%d not enabled yet\n", __func__, idx); + return -ENODEV; + } + + if (!(ast2700_mbox_tx_done(mb, idx))) { + dev_warn(mb->mbox.dev, "%s: Ch-%d last data has not finished\n", __func__, idx); + return -EBUSY; + } + + /* Write the message data */ + for (i = 0 ; i < num_words; i++) + writel(word_data[i], data_reg + i * sizeof(u32)); + + writel(BIT(idx), mb->tx_regs + IPCR_TX_TRIG); + dev_dbg(mb->mbox.dev, "%s: Ch-%d sent\n", __func__, idx); + + return 0; +} + +static int ast2700_mbox_startup(struct mbox_chan *chan) +{ + struct ast2700_mbox *mb = dev_get_drvdata(chan->mbox->dev); + int idx = ch_num(chan); + void __iomem *reg = mb->rx_regs + IPCR_ENABLE; + unsigned long flags; + + spin_lock_irqsave(&mb->lock, flags); + writel(readl(reg) | BIT(idx), reg); + spin_unlock_irqrestore(&mb->lock, flags); + + return 0; +} + +static void ast2700_mbox_shutdown(struct mbox_chan *chan) +{ + struct ast2700_mbox *mb = dev_get_drvdata(chan->mbox->dev); + int idx = ch_num(chan); + void __iomem *reg = mb->rx_regs + IPCR_ENABLE; + unsigned long flags; + + spin_lock_irqsave(&mb->lock, flags); + writel(readl(reg) & ~BIT(idx), reg); + spin_unlock_irqrestore(&mb->lock, flags); +} + +static bool ast2700_mbox_last_tx_done(struct mbox_chan *chan) +{ + struct ast2700_mbox *mb = dev_get_drvdata(chan->mbox->dev); + int idx = ch_num(chan); + + return ast2700_mbox_tx_done(mb, idx); +} + +static const struct mbox_chan_ops ast2700_mbox_chan_ops = { + .send_data = ast2700_mbox_send_data, + .startup = ast2700_mbox_startup, + .shutdown = ast2700_mbox_shutdown, + .last_tx_done = ast2700_mbox_last_tx_done, +}; + +static int ast2700_mbox_probe(struct platform_device *pdev) +{ + struct ast2700_mbox *mb; + const struct ast2700_mbox_data *dev_data; + struct device *dev = &pdev->dev; + int irq, ret; + + if (!pdev->dev.of_node) + return -ENODEV; + + dev_data = device_get_match_data(&pdev->dev); + + mb = devm_kzalloc(dev, sizeof(*mb), GFP_KERNEL); + if (!mb) + return -ENOMEM; + + mb->mbox.chans = devm_kcalloc(&pdev->dev, dev_data->num_chans, + sizeof(*mb->mbox.chans), GFP_KERNEL); + if (!mb->mbox.chans) + return -ENOMEM; + + /* con_priv of each channel is used to store the message received */ + for (int i = 0; i < dev_data->num_chans; i++) { + mb->mbox.chans[i].con_priv = devm_kcalloc(dev, dev_data->msg_size, + sizeof(u8), GFP_KERNEL); + if (!mb->mbox.chans[i].con_priv) + return -ENOMEM; + } + + platform_set_drvdata(pdev, mb); + + mb->tx_regs = devm_platform_ioremap_resource_byname(pdev, "tx"); + if (IS_ERR(mb->tx_regs)) + return PTR_ERR(mb->tx_regs); + + mb->rx_regs = devm_platform_ioremap_resource_byname(pdev, "rx"); + if (IS_ERR(mb->rx_regs)) + return PTR_ERR(mb->rx_regs); + + mb->msg_size = dev_data->msg_size; + mb->mbox.dev = dev; + mb->mbox.num_chans = dev_data->num_chans; + mb->mbox.ops = &ast2700_mbox_chan_ops; + mb->mbox.txdone_irq = false; + mb->mbox.txdone_poll = true; + mb->mbox.txpoll_period = 5; + spin_lock_init(&mb->lock); + + irq = platform_get_irq(pdev, 0); + if (irq < 0) + return irq; + + ret = devm_request_irq(dev, irq, ast2700_mbox_irq, 0, dev_name(dev), mb); + if (ret) + return ret; + + return devm_mbox_controller_register(dev, &mb->mbox); +} + +static const struct ast2700_mbox_data ast2700_dev_data = { + .num_chans = 4, + .msg_size = 0x20, +}; + +static const struct of_device_id ast2700_mbox_of_match[] = { + { .compatible = "aspeed,ast2700-mailbox", .data = &ast2700_dev_data }, + {} +}; +MODULE_DEVICE_TABLE(of, ast2700_mbox_of_match); + +static struct platform_driver ast2700_mbox_driver = { + .driver = { + .name = "ast2700-mailbox", + .of_match_table = ast2700_mbox_of_match, + }, + .probe = ast2700_mbox_probe, +}; +module_platform_driver(ast2700_mbox_driver); + +MODULE_AUTHOR("Jammy Huang <jammy_huang@aspeedtech.com>"); +MODULE_DESCRIPTION("ASPEED AST2700 IPC driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/mailbox/bcm-flexrm-mailbox.c b/drivers/mailbox/bcm-flexrm-mailbox.c index d713271ebf7c..41f79e51d9e5 100644 --- a/drivers/mailbox/bcm-flexrm-mailbox.c +++ b/drivers/mailbox/bcm-flexrm-mailbox.c @@ -1,15 +1,5 @@ -/* - * Copyright (C) 2017 Broadcom - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License as - * published by the Free Software Foundation version 2. - * - * This program is distributed "as is" WITHOUT ANY WARRANTY of any - * kind, whether express or implied; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ +// SPDX-License-Identifier: GPL-2.0-only +// Copyright (C) 2017 Broadcom /* * Broadcom FlexRM Mailbox Driver @@ -296,8 +286,6 @@ struct flexrm_mbox { struct dma_pool *bd_pool; struct dma_pool *cmpl_pool; struct dentry *root; - struct dentry *config; - struct dentry *stats; struct mbox_controller controller; }; @@ -425,7 +413,7 @@ static void flexrm_enqueue_desc(u32 nhpos, u32 nhcnt, u32 reqid, * * In general use, number of non-HEADER descriptors can easily go * beyond 31. To tackle this situation, we have packet (or request) - * extenstion bits (STARTPKT and ENDPKT) in the HEADER descriptor. + * extension bits (STARTPKT and ENDPKT) in the HEADER descriptor. * * To use packet extension, the first HEADER descriptor of request * (or packet) will have STARTPKT=1 and ENDPKT=0. The intermediate @@ -634,15 +622,15 @@ static int flexrm_spu_dma_map(struct device *dev, struct brcm_message *msg) rc = dma_map_sg(dev, msg->spu.src, sg_nents(msg->spu.src), DMA_TO_DEVICE); - if (rc < 0) - return rc; + if (!rc) + return -EIO; rc = dma_map_sg(dev, msg->spu.dst, sg_nents(msg->spu.dst), DMA_FROM_DEVICE); - if (rc < 0) { + if (!rc) { dma_unmap_sg(dev, msg->spu.src, sg_nents(msg->spu.src), DMA_TO_DEVICE); - return rc; + return -EIO; } return 0; @@ -1097,7 +1085,7 @@ static int flexrm_process_completions(struct flexrm_ring *ring) /* * Get current completion read and write offset * - * Note: We should read completion write pointer atleast once + * Note: We should read completion write pointer at least once * after we get a MSI interrupt because HW maintains internal * MSI status which will allow next MSI interrupt only after * completion write pointer is read. @@ -1165,8 +1153,7 @@ static int flexrm_process_completions(struct flexrm_ring *ring) static int flexrm_debugfs_conf_show(struct seq_file *file, void *offset) { - struct platform_device *pdev = to_platform_device(file->private); - struct flexrm_mbox *mbox = platform_get_drvdata(pdev); + struct flexrm_mbox *mbox = dev_get_drvdata(file->private); /* Write config in file */ flexrm_write_config_in_seqfile(mbox, file); @@ -1176,8 +1163,7 @@ static int flexrm_debugfs_conf_show(struct seq_file *file, void *offset) static int flexrm_debugfs_stats_show(struct seq_file *file, void *offset) { - struct platform_device *pdev = to_platform_device(file->private); - struct flexrm_mbox *mbox = platform_get_drvdata(pdev); + struct flexrm_mbox *mbox = dev_get_drvdata(file->private); /* Write stats in file */ flexrm_write_stats_in_seqfile(mbox, file); @@ -1302,7 +1288,7 @@ static int flexrm_startup(struct mbox_chan *chan) val = (num_online_cpus() < val) ? val / num_online_cpus() : 1; cpumask_set_cpu((ring->num / val) % num_online_cpus(), &ring->irq_aff_hint); - ret = irq_set_affinity_hint(ring->irq, &ring->irq_aff_hint); + ret = irq_update_affinity_hint(ring->irq, &ring->irq_aff_hint); if (ret) { dev_err(ring->mbox->dev, "failed to set IRQ affinity hint for ring%d\n", @@ -1396,9 +1382,9 @@ static void flexrm_shutdown(struct mbox_chan *chan) /* Clear ring flush state */ timeout = 1000; /* timeout of 1s */ - writel_relaxed(0x0, ring + RING_CONTROL); + writel_relaxed(0x0, ring->regs + RING_CONTROL); do { - if (!(readl_relaxed(ring + RING_FLUSH_DONE) & + if (!(readl_relaxed(ring->regs + RING_FLUSH_DONE) & FLUSH_DONE_MASK)) break; mdelay(1); @@ -1429,7 +1415,7 @@ static void flexrm_shutdown(struct mbox_chan *chan) /* Release IRQ */ if (ring->irq_requested) { - irq_set_affinity_hint(ring->irq, NULL); + irq_update_affinity_hint(ring->irq, NULL); free_irq(ring->irq, ring); ring->irq_requested = false; } @@ -1488,7 +1474,7 @@ static void flexrm_mbox_msi_write(struct msi_desc *desc, struct msi_msg *msg) { struct device *dev = msi_desc_to_dev(desc); struct flexrm_mbox *mbox = dev_get_drvdata(dev); - struct flexrm_ring *ring = &mbox->rings[desc->platform.msi_index]; + struct flexrm_ring *ring = &mbox->rings[desc->msi_index]; /* Configure per-Ring MSI registers */ writel_relaxed(msg->address_lo, ring->regs + RING_MSI_ADDR_LS); @@ -1501,7 +1487,6 @@ static int flexrm_mbox_probe(struct platform_device *pdev) int index, ret = 0; void __iomem *regs; void __iomem *regs_end; - struct msi_desc *desc; struct resource *iomem; struct flexrm_ring *ring; struct flexrm_mbox *mbox; @@ -1516,18 +1501,13 @@ static int flexrm_mbox_probe(struct platform_device *pdev) mbox->dev = dev; platform_set_drvdata(pdev, mbox); - /* Get resource for registers */ - iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0); + /* Get resource for registers and map registers of all rings */ + mbox->regs = devm_platform_get_and_ioremap_resource(pdev, 0, &iomem); if (!iomem || (resource_size(iomem) < RING_REGS_SIZE)) { ret = -ENODEV; goto fail; - } - - /* Map registers of all rings */ - mbox->regs = devm_ioremap_resource(&pdev->dev, iomem); - if (IS_ERR(mbox->regs)) { + } else if (IS_ERR(mbox->regs)) { ret = PTR_ERR(mbox->regs); - dev_err(&pdev->dev, "Failed to remap mailbox regs: %d\n", ret); goto fail; } regs_end = mbox->regs + resource_size(iomem); @@ -1607,16 +1587,14 @@ static int flexrm_mbox_probe(struct platform_device *pdev) } /* Allocate platform MSIs for each ring */ - ret = platform_msi_domain_alloc_irqs(dev, mbox->num_rings, - flexrm_mbox_msi_write); + ret = platform_device_msi_init_and_alloc_irqs(dev, mbox->num_rings, + flexrm_mbox_msi_write); if (ret) goto fail_destroy_cmpl_pool; /* Save alloced IRQ numbers for each ring */ - for_each_msi_entry(desc, dev) { - ring = &mbox->rings[desc->platform.msi_index]; - ring->irq = desc->irq; - } + for (index = 0; index < mbox->num_rings; index++) + mbox->rings[index].irq = msi_get_virq(dev, index); /* Check availability of debugfs */ if (!debugfs_initialized()) @@ -1624,28 +1602,15 @@ static int flexrm_mbox_probe(struct platform_device *pdev) /* Create debugfs root entry */ mbox->root = debugfs_create_dir(dev_name(mbox->dev), NULL); - if (IS_ERR_OR_NULL(mbox->root)) { - ret = PTR_ERR_OR_ZERO(mbox->root); - goto fail_free_msis; - } /* Create debugfs config entry */ - mbox->config = debugfs_create_devm_seqfile(mbox->dev, - "config", mbox->root, - flexrm_debugfs_conf_show); - if (IS_ERR_OR_NULL(mbox->config)) { - ret = PTR_ERR_OR_ZERO(mbox->config); - goto fail_free_debugfs_root; - } + debugfs_create_devm_seqfile(mbox->dev, "config", mbox->root, + flexrm_debugfs_conf_show); /* Create debugfs stats entry */ - mbox->stats = debugfs_create_devm_seqfile(mbox->dev, - "stats", mbox->root, - flexrm_debugfs_stats_show); - if (IS_ERR_OR_NULL(mbox->stats)) { - ret = PTR_ERR_OR_ZERO(mbox->stats); - goto fail_free_debugfs_root; - } + debugfs_create_devm_seqfile(mbox->dev, "stats", mbox->root, + flexrm_debugfs_stats_show); + skip_debugfs: /* Initialize mailbox controller */ @@ -1676,8 +1641,7 @@ skip_debugfs: fail_free_debugfs_root: debugfs_remove_recursive(mbox->root); -fail_free_msis: - platform_msi_domain_free_irqs(dev); + platform_device_msi_free_irqs_all(dev); fail_destroy_cmpl_pool: dma_pool_destroy(mbox->cmpl_pool); fail_destroy_bd_pool: @@ -1686,19 +1650,17 @@ fail: return ret; } -static int flexrm_mbox_remove(struct platform_device *pdev) +static void flexrm_mbox_remove(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct flexrm_mbox *mbox = platform_get_drvdata(pdev); debugfs_remove_recursive(mbox->root); - platform_msi_domain_free_irqs(dev); + platform_device_msi_free_irqs_all(dev); dma_pool_destroy(mbox->cmpl_pool); dma_pool_destroy(mbox->bd_pool); - - return 0; } static const struct of_device_id flexrm_mbox_of_match[] = { diff --git a/drivers/mailbox/bcm-pdc-mailbox.c b/drivers/mailbox/bcm-pdc-mailbox.c index ccf3d62af7e7..406bc41cba60 100644 --- a/drivers/mailbox/bcm-pdc-mailbox.c +++ b/drivers/mailbox/bcm-pdc-mailbox.c @@ -1,17 +1,6 @@ +// SPDX-License-Identifier: GPL-2.0-only /* * Copyright 2016 Broadcom - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License, version 2, as - * published by the Free Software Foundation (the "GPL"). - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 (GPLv2) for more details. - * - * You should have received a copy of the GNU General Public License - * version 2 (GPLv2) along with this source code. */ /* @@ -44,10 +33,9 @@ #include <linux/interrupt.h> #include <linux/wait.h> #include <linux/platform_device.h> +#include <linux/property.h> #include <linux/io.h> #include <linux/of.h> -#include <linux/of_device.h> -#include <linux/of_address.h> #include <linux/of_irq.h> #include <linux/mailbox_controller.h> #include <linux/mailbox/brcm-message.h> @@ -55,6 +43,7 @@ #include <linux/dma-direction.h> #include <linux/dma-mapping.h> #include <linux/dmapool.h> +#include <linux/workqueue.h> #define PDC_SUCCESS 0 @@ -169,10 +158,6 @@ enum pdc_hw { PDC_HW /* PDC/MDE hardware (i.e. Northstar 2, Pegasus) */ }; -struct pdc_dma_map { - void *ctx; /* opaque context associated with frame */ -}; - /* dma descriptor */ struct dma64dd { u32 ctrl1; /* misc control bits */ @@ -305,8 +290,8 @@ struct pdc_state { unsigned int pdc_irq; - /* tasklet for deferred processing after DMA rx interrupt */ - struct tasklet_struct rx_tasklet; + /* work for deferred processing after DMA rx interrupt */ + struct work_struct rx_work; /* Number of bytes of receive status prior to each rx frame */ u32 rx_status_len; @@ -406,8 +391,6 @@ struct pdc_state { */ struct scatterlist *src_sg[PDC_RING_ENTRIES]; - struct dentry *debugfs_stats; /* debug FS stats file for this PDC */ - /* counters */ u32 pdc_requests; /* number of request messages submitted */ u32 pdc_replies; /* number of reply messages received */ @@ -449,33 +432,33 @@ static ssize_t pdc_debugfs_read(struct file *filp, char __user *ubuf, pdcs = filp->private_data; out_offset = 0; - out_offset += snprintf(buf + out_offset, out_count - out_offset, + out_offset += scnprintf(buf + out_offset, out_count - out_offset, "SPU %u stats:\n", pdcs->pdc_idx); - out_offset += snprintf(buf + out_offset, out_count - out_offset, + out_offset += scnprintf(buf + out_offset, out_count - out_offset, "PDC requests....................%u\n", pdcs->pdc_requests); - out_offset += snprintf(buf + out_offset, out_count - out_offset, + out_offset += scnprintf(buf + out_offset, out_count - out_offset, "PDC responses...................%u\n", pdcs->pdc_replies); - out_offset += snprintf(buf + out_offset, out_count - out_offset, + out_offset += scnprintf(buf + out_offset, out_count - out_offset, "Tx not done.....................%u\n", pdcs->last_tx_not_done); - out_offset += snprintf(buf + out_offset, out_count - out_offset, + out_offset += scnprintf(buf + out_offset, out_count - out_offset, "Tx ring full....................%u\n", pdcs->tx_ring_full); - out_offset += snprintf(buf + out_offset, out_count - out_offset, + out_offset += scnprintf(buf + out_offset, out_count - out_offset, "Rx ring full....................%u\n", pdcs->rx_ring_full); - out_offset += snprintf(buf + out_offset, out_count - out_offset, + out_offset += scnprintf(buf + out_offset, out_count - out_offset, "Tx desc write fail. Ring full...%u\n", pdcs->txnobuf); - out_offset += snprintf(buf + out_offset, out_count - out_offset, + out_offset += scnprintf(buf + out_offset, out_count - out_offset, "Rx desc write fail. Ring full...%u\n", pdcs->rxnobuf); - out_offset += snprintf(buf + out_offset, out_count - out_offset, + out_offset += scnprintf(buf + out_offset, out_count - out_offset, "Receive overflow................%u\n", pdcs->rx_oflow); - out_offset += snprintf(buf + out_offset, out_count - out_offset, + out_offset += scnprintf(buf + out_offset, out_count - out_offset, "Num frags in rx ring............%u\n", NRXDACTIVE(pdcs->rxin, pdcs->last_rx_curr, pdcs->nrxpost)); @@ -512,9 +495,8 @@ static void pdc_setup_debugfs(struct pdc_state *pdcs) debugfs_dir = debugfs_create_dir(KBUILD_MODNAME, NULL); /* S_IRUSR == 0400 */ - pdcs->debugfs_stats = debugfs_create_file(spu_stats_name, 0400, - debugfs_dir, pdcs, - &pdc_debugfs_stats); + debugfs_create_file(spu_stats_name, 0400, debugfs_dir, pdcs, + &pdc_debugfs_stats); } static void pdc_free_debugfs(void) @@ -693,7 +675,7 @@ pdc_receive(struct pdc_state *pdcs) /* read last_rx_curr from register once */ pdcs->last_rx_curr = - (ioread32(&pdcs->rxregs_64->status0) & + (ioread32((const void __iomem *)&pdcs->rxregs_64->status0) & CRYPTO_D64_RS0_CD_MASK) / RING_ENTRY_SIZE; do { @@ -708,7 +690,7 @@ pdc_receive(struct pdc_state *pdcs) * pdc_tx_list_sg_add() - Add the buffers in a scatterlist to the transmit * descriptors for a given SPU. The scatterlist buffers contain the data for a * SPU request message. - * @spu_idx: The index of the SPU to submit the request to, [0, max_spu) + * @pdcs: PDC state for the SPU that will process this request * @sg: Scatterlist whose buffers contain part of the SPU request * * If a scatterlist buffer is larger than PDC_DMA_BUF_MAX, multiple descriptors @@ -875,7 +857,7 @@ static int pdc_rx_list_init(struct pdc_state *pdcs, struct scatterlist *dst_sg, * pdc_rx_list_sg_add() - Add the buffers in a scatterlist to the receive * descriptors for a given SPU. The caller must have already DMA mapped the * scatterlist. - * @spu_idx: Indicates which SPU the buffers are for + * @pdcs: PDC state for the SPU that will process this request * @sg: Scatterlist whose buffers are added to the receive ring * * If a receive buffer in the scatterlist is larger than PDC_DMA_BUF_MAX, @@ -967,18 +949,18 @@ static irqreturn_t pdc_irq_handler(int irq, void *data) iowrite32(intstatus, pdcs->pdc_reg_vbase + PDC_INTSTATUS_OFFSET); /* Wakeup IRQ thread */ - tasklet_schedule(&pdcs->rx_tasklet); + queue_work(system_bh_wq, &pdcs->rx_work); return IRQ_HANDLED; } /** - * pdc_tasklet_cb() - Tasklet callback that runs the deferred processing after + * pdc_work_cb() - Work callback that runs the deferred processing after * a DMA receive interrupt. Reenables the receive interrupt. - * @data: PDC state structure + * @t: Pointer to the Altera sSGDMA channel structure */ -static void pdc_tasklet_cb(unsigned long data) +static void pdc_work_cb(struct work_struct *t) { - struct pdc_state *pdcs = (struct pdc_state *)data; + struct pdc_state *pdcs = from_work(pdcs, t, rx_work); pdc_receive(pdcs); @@ -1508,7 +1490,6 @@ static int pdc_dt_read(struct platform_device *pdev, struct pdc_state *pdcs) { struct device *dev = &pdev->dev; struct device_node *dn = pdev->dev.of_node; - const struct of_device_id *match; const int *hw_type; int err; @@ -1523,11 +1504,9 @@ static int pdc_dt_read(struct platform_device *pdev, struct pdc_state *pdcs) pdcs->hw_type = PDC_HW; - match = of_match_device(of_match_ptr(pdc_mbox_of_match), dev); - if (match != NULL) { - hw_type = match->data; + hw_type = device_get_match_data(dev); + if (hw_type) pdcs->hw_type = *hw_type; - } return 0; } @@ -1580,20 +1559,13 @@ static int pdc_probe(struct platform_device *pdev) if (err) goto cleanup_ring_pool; - pdc_regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!pdc_regs) { - err = -ENODEV; - goto cleanup_ring_pool; - } - dev_dbg(dev, "PDC register region res.start = %pa, res.end = %pa", - &pdc_regs->start, &pdc_regs->end); - - pdcs->pdc_reg_vbase = devm_ioremap_resource(&pdev->dev, pdc_regs); + pdcs->pdc_reg_vbase = devm_platform_get_and_ioremap_resource(pdev, 0, &pdc_regs); if (IS_ERR(pdcs->pdc_reg_vbase)) { err = PTR_ERR(pdcs->pdc_reg_vbase); - dev_err(&pdev->dev, "Failed to map registers: %d\n", err); goto cleanup_ring_pool; } + dev_dbg(dev, "PDC register region res.start = %pa, res.end = %pa", + &pdc_regs->start, &pdc_regs->end); /* create rx buffer pool after dt read to know how big buffers are */ err = pdc_rx_buf_pool_create(pdcs); @@ -1602,8 +1574,8 @@ static int pdc_probe(struct platform_device *pdev) pdc_hw_init(pdcs); - /* Init tasklet for deferred DMA rx processing */ - tasklet_init(&pdcs->rx_tasklet, pdc_tasklet_cb, (unsigned long)pdcs); + /* Init work for deferred DMA rx processing */ + INIT_WORK(&pdcs->rx_work, pdc_work_cb); err = pdc_interrupts_init(pdcs); if (err) @@ -1614,14 +1586,13 @@ static int pdc_probe(struct platform_device *pdev) if (err) goto cleanup_buf_pool; - pdcs->debugfs_stats = NULL; pdc_setup_debugfs(pdcs); dev_dbg(dev, "pdc_probe() successful"); return PDC_SUCCESS; cleanup_buf_pool: - tasklet_kill(&pdcs->rx_tasklet); + cancel_work_sync(&pdcs->rx_work); dma_pool_destroy(pdcs->rx_buf_pool); cleanup_ring_pool: @@ -1631,19 +1602,18 @@ cleanup: return err; } -static int pdc_remove(struct platform_device *pdev) +static void pdc_remove(struct platform_device *pdev) { struct pdc_state *pdcs = platform_get_drvdata(pdev); pdc_free_debugfs(); - tasklet_kill(&pdcs->rx_tasklet); + cancel_work_sync(&pdcs->rx_work); pdc_hw_disable(pdcs); dma_pool_destroy(pdcs->rx_buf_pool); dma_pool_destroy(pdcs->ring_pool); - return 0; } static struct platform_driver pdc_mbox_driver = { @@ -1651,7 +1621,7 @@ static struct platform_driver pdc_mbox_driver = { .remove = pdc_remove, .driver = { .name = "brcm-iproc-pdc-mbox", - .of_match_table = of_match_ptr(pdc_mbox_of_match), + .of_match_table = pdc_mbox_of_match, }, }; module_platform_driver(pdc_mbox_driver); diff --git a/drivers/mailbox/bcm2835-mailbox.c b/drivers/mailbox/bcm2835-mailbox.c index 39761d190545..ea12fb8d2401 100644 --- a/drivers/mailbox/bcm2835-mailbox.c +++ b/drivers/mailbox/bcm2835-mailbox.c @@ -137,7 +137,6 @@ static int bcm2835_mbox_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; int ret = 0; - struct resource *iomem; struct bcm2835_mbox *mbox; mbox = devm_kzalloc(dev, sizeof(*mbox), GFP_KERNEL); @@ -146,18 +145,17 @@ static int bcm2835_mbox_probe(struct platform_device *pdev) spin_lock_init(&mbox->lock); ret = devm_request_irq(dev, irq_of_parse_and_map(dev->of_node, 0), - bcm2835_mbox_irq, 0, dev_name(dev), mbox); + bcm2835_mbox_irq, IRQF_NO_SUSPEND, dev_name(dev), + mbox); if (ret) { dev_err(dev, "Failed to register a mailbox IRQ handler: %d\n", ret); return -ENODEV; } - iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0); - mbox->regs = devm_ioremap_resource(&pdev->dev, iomem); + mbox->regs = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(mbox->regs)) { ret = PTR_ERR(mbox->regs); - dev_err(&pdev->dev, "Failed to remap mailbox regs: %d\n", ret); return ret; } diff --git a/drivers/mailbox/bcm74110-mailbox.c b/drivers/mailbox/bcm74110-mailbox.c new file mode 100644 index 000000000000..2e7e86f3e6a4 --- /dev/null +++ b/drivers/mailbox/bcm74110-mailbox.c @@ -0,0 +1,656 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Broadcom BCM74110 Mailbox Driver + * + * Copyright (c) 2025 Broadcom + */ +#include <linux/list.h> +#include <linux/types.h> +#include <linux/workqueue.h> +#include <linux/io-64-nonatomic-hi-lo.h> +#include <linux/interrupt.h> +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/of.h> +#include <linux/delay.h> +#include <linux/mailbox_controller.h> +#include <linux/bitfield.h> +#include <linux/slab.h> + +#define BCM_MBOX_BASE(sel) ((sel) * 0x40) +#define BCM_MBOX_IRQ_BASE(sel) (((sel) * 0x20) + 0x800) + +#define BCM_MBOX_CFGA 0x0 +#define BCM_MBOX_CFGB 0x4 +#define BCM_MBOX_CFGC 0x8 +#define BCM_MBOX_CFGD 0xc +#define BCM_MBOX_CTRL 0x10 +#define BCM_MBOX_CTRL_EN BIT(0) +#define BCM_MBOX_CTRL_CLR BIT(1) +#define BCM_MBOX_STATUS0 0x14 +#define BCM_MBOX_STATUS0_NOT_EMPTY BIT(28) +#define BCM_MBOX_STATUS0_FULL BIT(29) +#define BCM_MBOX_STATUS1 0x18 +#define BCM_MBOX_STATUS2 0x1c +#define BCM_MBOX_WDATA 0x20 +#define BCM_MBOX_RDATA 0x28 + +#define BCM_MBOX_IRQ_STATUS 0x0 +#define BCM_MBOX_IRQ_SET 0x4 +#define BCM_MBOX_IRQ_CLEAR 0x8 +#define BCM_MBOX_IRQ_MASK_STATUS 0xc +#define BCM_MBOX_IRQ_MASK_SET 0x10 +#define BCM_MBOX_IRQ_MASK_CLEAR 0x14 +#define BCM_MBOX_IRQ_TIMEOUT BIT(0) +#define BCM_MBOX_IRQ_NOT_EMPTY BIT(1) +#define BCM_MBOX_IRQ_FULL BIT(2) +#define BCM_MBOX_IRQ_LOW_WM BIT(3) +#define BCM_MBOX_IRQ_HIGH_WM BIT(4) + +#define BCM_LINK_CODE0 0xbe0 +#define BCM_LINK_CODE1 0xbe1 +#define BCM_LINK_CODE2 0xbe2 + +enum { + BCM_MSG_FUNC_LINK_START = 0, + BCM_MSG_FUNC_LINK_STOP, + BCM_MSG_FUNC_SHMEM_TX, + BCM_MSG_FUNC_SHMEM_RX, + BCM_MSG_FUNC_SHMEM_STOP, + BCM_MSG_FUNC_MAX, +}; + +enum { + BCM_MSG_SVC_INIT = 0, + BCM_MSG_SVC_PMC, + BCM_MSG_SVC_SCMI, + BCM_MSG_SVC_DPFE, + BCM_MSG_SVC_MAX, +}; + +struct bcm74110_mbox_msg { + struct list_head list_entry; +#define BCM_MSG_VERSION_MASK GENMASK(31, 29) +#define BCM_MSG_VERSION 0x1 +#define BCM_MSG_REQ_MASK BIT(28) +#define BCM_MSG_RPLY_MASK BIT(27) +#define BCM_MSG_SVC_MASK GENMASK(26, 24) +#define BCM_MSG_FUNC_MASK GENMASK(23, 16) +#define BCM_MSG_LENGTH_MASK GENMASK(15, 4) +#define BCM_MSG_SLOT_MASK GENMASK(3, 0) + +#define BCM_MSG_SET_FIELD(hdr, field, val) \ + do { \ + hdr &= ~BCM_MSG_##field##_MASK; \ + hdr |= FIELD_PREP(BCM_MSG_##field##_MASK, val); \ + } while (0) + +#define BCM_MSG_GET_FIELD(hdr, field) \ + FIELD_GET(BCM_MSG_##field##_MASK, hdr) + u32 msg; +}; + +struct bcm74110_mbox_chan { + struct bcm74110_mbox *mbox; + bool en; + int slot; + int type; +}; + +struct bcm74110_mbox { + struct platform_device *pdev; + void __iomem *base; + + int tx_chan; + int rx_chan; + int rx_irq; + struct list_head rx_svc_init_list; + spinlock_t rx_svc_list_lock; + + struct mbox_controller controller; + struct bcm74110_mbox_chan *mbox_chan; +}; + +#define BCM74110_OFFSET_IO_WRITEL_MACRO(name, offset_base) \ +static void bcm74110_##name##_writel(struct bcm74110_mbox *mbox,\ + u32 val, u32 off) \ +{ \ + writel_relaxed(val, mbox->base + offset_base + off); \ +} +BCM74110_OFFSET_IO_WRITEL_MACRO(tx, BCM_MBOX_BASE(mbox->tx_chan)); +BCM74110_OFFSET_IO_WRITEL_MACRO(irq, BCM_MBOX_IRQ_BASE(mbox->rx_chan)); + +#define BCM74110_OFFSET_IO_READL_MACRO(name, offset_base) \ +static u32 bcm74110_##name##_readl(struct bcm74110_mbox *mbox, \ + u32 off) \ +{ \ + return readl_relaxed(mbox->base + offset_base + off); \ +} +BCM74110_OFFSET_IO_READL_MACRO(tx, BCM_MBOX_BASE(mbox->tx_chan)); +BCM74110_OFFSET_IO_READL_MACRO(rx, BCM_MBOX_BASE(mbox->rx_chan)); +BCM74110_OFFSET_IO_READL_MACRO(irq, BCM_MBOX_IRQ_BASE(mbox->rx_chan)); + +static inline struct bcm74110_mbox *bcm74110_mbox_from_cntrl( + struct mbox_controller *cntrl) +{ + return container_of(cntrl, struct bcm74110_mbox, controller); +} + +static void bcm74110_rx_push_init_msg(struct bcm74110_mbox *mbox, u32 val) +{ + struct bcm74110_mbox_msg *msg; + + msg = kzalloc(sizeof(*msg), GFP_ATOMIC); + if (!msg) + return; + + INIT_LIST_HEAD(&msg->list_entry); + msg->msg = val; + + spin_lock(&mbox->rx_svc_list_lock); + list_add_tail(&msg->list_entry, &mbox->rx_svc_init_list); + spin_unlock(&mbox->rx_svc_list_lock); +} + +static void bcm74110_rx_process_msg(struct bcm74110_mbox *mbox) +{ + struct device *dev = &mbox->pdev->dev; + struct bcm74110_mbox_chan *chan_priv; + struct mbox_chan *chan; + u32 msg, status; + int type; + + do { + msg = bcm74110_rx_readl(mbox, BCM_MBOX_RDATA); + status = bcm74110_rx_readl(mbox, BCM_MBOX_STATUS0); + + dev_dbg(dev, "rx: [{req=%lu|rply=%lu|srv=%lu|fn=%lu|length=%lu|slot=%lu]\n", + BCM_MSG_GET_FIELD(msg, REQ), BCM_MSG_GET_FIELD(msg, RPLY), + BCM_MSG_GET_FIELD(msg, SVC), BCM_MSG_GET_FIELD(msg, FUNC), + BCM_MSG_GET_FIELD(msg, LENGTH), BCM_MSG_GET_FIELD(msg, SLOT)); + + type = BCM_MSG_GET_FIELD(msg, SVC); + switch (type) { + case BCM_MSG_SVC_INIT: + bcm74110_rx_push_init_msg(mbox, msg); + break; + case BCM_MSG_SVC_PMC: + case BCM_MSG_SVC_SCMI: + case BCM_MSG_SVC_DPFE: + chan = &mbox->controller.chans[type]; + chan_priv = chan->con_priv; + if (chan_priv->en) + mbox_chan_received_data(chan, NULL); + else + dev_warn(dev, "Channel not enabled\n"); + break; + default: + dev_warn(dev, "Unsupported msg received\n"); + } + } while (status & BCM_MBOX_STATUS0_NOT_EMPTY); +} + +static irqreturn_t bcm74110_mbox_isr(int irq, void *data) +{ + struct bcm74110_mbox *mbox = data; + u32 status; + + status = bcm74110_irq_readl(mbox, BCM_MBOX_IRQ_STATUS); + + bcm74110_irq_writel(mbox, 0xffffffff, BCM_MBOX_IRQ_CLEAR); + + if (status & BCM_MBOX_IRQ_NOT_EMPTY) + bcm74110_rx_process_msg(mbox); + else + dev_warn(&mbox->pdev->dev, "Spurious interrupt\n"); + + return IRQ_HANDLED; +} + +static void bcm74110_mbox_mask_and_clear(struct bcm74110_mbox *mbox) +{ + bcm74110_irq_writel(mbox, 0xffffffff, BCM_MBOX_IRQ_MASK_SET); + bcm74110_irq_writel(mbox, 0xffffffff, BCM_MBOX_IRQ_CLEAR); +} + +static int bcm74110_rx_pop_init_msg(struct bcm74110_mbox *mbox, u32 func_type, + u32 *val) +{ + struct bcm74110_mbox_msg *msg, *msg_tmp; + unsigned long flags; + bool found = false; + + spin_lock_irqsave(&mbox->rx_svc_list_lock, flags); + list_for_each_entry_safe(msg, msg_tmp, &mbox->rx_svc_init_list, + list_entry) { + if (BCM_MSG_GET_FIELD(msg->msg, FUNC) == func_type) { + list_del(&msg->list_entry); + found = true; + break; + } + } + spin_unlock_irqrestore(&mbox->rx_svc_list_lock, flags); + + if (!found) + return -EINVAL; + + *val = msg->msg; + kfree(msg); + + return 0; +} + +static void bcm74110_rx_flush_msg(struct bcm74110_mbox *mbox) +{ + struct bcm74110_mbox_msg *msg, *msg_tmp; + LIST_HEAD(list_temp); + unsigned long flags; + + spin_lock_irqsave(&mbox->rx_svc_list_lock, flags); + list_splice_init(&mbox->rx_svc_init_list, &list_temp); + spin_unlock_irqrestore(&mbox->rx_svc_list_lock, flags); + + list_for_each_entry_safe(msg, msg_tmp, &list_temp, list_entry) { + list_del(&msg->list_entry); + kfree(msg); + } +} + +#define BCM_DEQUEUE_TIMEOUT_MS 30 +static int bcm74110_rx_pop_init_msg_block(struct bcm74110_mbox *mbox, u32 func_type, + u32 *val) +{ + int ret, timeout = 0; + + do { + ret = bcm74110_rx_pop_init_msg(mbox, func_type, val); + + if (!ret) + return 0; + + /* TODO: Figure out what is a good sleep here. */ + usleep_range(1000, 2000); + timeout++; + } while (timeout < BCM_DEQUEUE_TIMEOUT_MS); + + dev_warn(&mbox->pdev->dev, "Timeout waiting for service init response\n"); + return -ETIMEDOUT; +} + +static int bcm74110_mbox_create_msg(int req, int rply, int svc, int func, + int length, int slot) +{ + u32 msg = 0; + + BCM_MSG_SET_FIELD(msg, REQ, req); + BCM_MSG_SET_FIELD(msg, RPLY, rply); + BCM_MSG_SET_FIELD(msg, SVC, svc); + BCM_MSG_SET_FIELD(msg, FUNC, func); + BCM_MSG_SET_FIELD(msg, LENGTH, length); + BCM_MSG_SET_FIELD(msg, SLOT, slot); + + return msg; +} + +static int bcm74110_mbox_tx_msg(struct bcm74110_mbox *mbox, u32 msg) +{ + int val; + + /* We can potentially poll with timeout here instead */ + val = bcm74110_tx_readl(mbox, BCM_MBOX_STATUS0); + if (val & BCM_MBOX_STATUS0_FULL) { + dev_err(&mbox->pdev->dev, "Mailbox full\n"); + return -EINVAL; + } + + dev_dbg(&mbox->pdev->dev, "tx: [{req=%lu|rply=%lu|srv=%lu|fn=%lu|length=%lu|slot=%lu]\n", + BCM_MSG_GET_FIELD(msg, REQ), BCM_MSG_GET_FIELD(msg, RPLY), + BCM_MSG_GET_FIELD(msg, SVC), BCM_MSG_GET_FIELD(msg, FUNC), + BCM_MSG_GET_FIELD(msg, LENGTH), BCM_MSG_GET_FIELD(msg, SLOT)); + + bcm74110_tx_writel(mbox, msg, BCM_MBOX_WDATA); + + return 0; +} + +#define BCM_MBOX_LINK_TRAINING_RETRIES 5 +static int bcm74110_mbox_link_training(struct bcm74110_mbox *mbox) +{ + int ret, retries = 0; + u32 msg = 0, orig_len = 0, len = BCM_LINK_CODE0; + + do { + switch (len) { + case 0: + retries++; + dev_warn(&mbox->pdev->dev, + "Link train failed, trying again... %d\n", + retries); + if (retries > BCM_MBOX_LINK_TRAINING_RETRIES) + return -EINVAL; + len = BCM_LINK_CODE0; + fallthrough; + case BCM_LINK_CODE0: + case BCM_LINK_CODE1: + case BCM_LINK_CODE2: + msg = bcm74110_mbox_create_msg(1, 0, BCM_MSG_SVC_INIT, + BCM_MSG_FUNC_LINK_START, + len, BCM_MSG_SVC_INIT); + break; + default: + break; + } + + bcm74110_mbox_tx_msg(mbox, msg); + + /* No response expected for LINK_CODE2 */ + if (len == BCM_LINK_CODE2) + return 0; + + orig_len = len; + + ret = bcm74110_rx_pop_init_msg_block(mbox, + BCM_MSG_GET_FIELD(msg, FUNC), + &msg); + if (ret) { + len = 0; + continue; + } + + if ((BCM_MSG_GET_FIELD(msg, SVC) != BCM_MSG_SVC_INIT) || + (BCM_MSG_GET_FIELD(msg, FUNC) != BCM_MSG_FUNC_LINK_START) || + (BCM_MSG_GET_FIELD(msg, SLOT) != 0) || + (BCM_MSG_GET_FIELD(msg, RPLY) != 1) || + (BCM_MSG_GET_FIELD(msg, REQ) != 0)) { + len = 0; + continue; + } + + len = BCM_MSG_GET_FIELD(msg, LENGTH); + + /* Make sure sequence is good */ + if (len != (orig_len + 1)) { + len = 0; + continue; + } + } while (1); + + return -EINVAL; +} + +static int bcm74110_mbox_tx_msg_and_wait_ack(struct bcm74110_mbox *mbox, u32 msg) +{ + int ret; + u32 recv_msg; + + ret = bcm74110_mbox_tx_msg(mbox, msg); + if (ret) + return ret; + + ret = bcm74110_rx_pop_init_msg_block(mbox, BCM_MSG_GET_FIELD(msg, FUNC), + &recv_msg); + if (ret) + return ret; + + /* + * Modify tx message to verify rx ack. + * Flip RPLY/REQ for synchronous messages + */ + if (BCM_MSG_GET_FIELD(msg, REQ) == 1) { + BCM_MSG_SET_FIELD(msg, RPLY, 1); + BCM_MSG_SET_FIELD(msg, REQ, 0); + } + + if (msg != recv_msg) { + dev_err(&mbox->pdev->dev, "Found ack, but ack is invalid\n"); + return -EINVAL; + } + + return 0; +} + +/* Each index points to 0x100 of HAB MEM. IDX size counts from 0 */ +#define BCM_MBOX_HAB_MEM_IDX_START 0x30 +#define BCM_MBOX_HAB_MEM_IDX_SIZE 0x0 +static int bcm74110_mbox_shmem_init(struct bcm74110_mbox *mbox) +{ + u32 msg = 0; + int ret; + + msg = bcm74110_mbox_create_msg(1, 0, BCM_MSG_SVC_INIT, + BCM_MSG_FUNC_SHMEM_STOP, + 0, BCM_MSG_SVC_INIT); + ret = bcm74110_mbox_tx_msg_and_wait_ack(mbox, msg); + if (ret) + return -EINVAL; + + msg = bcm74110_mbox_create_msg(1, 0, BCM_MSG_SVC_INIT, + BCM_MSG_FUNC_SHMEM_TX, + BCM_MBOX_HAB_MEM_IDX_START, + BCM_MBOX_HAB_MEM_IDX_SIZE); + ret = bcm74110_mbox_tx_msg_and_wait_ack(mbox, msg); + if (ret) + return -EINVAL; + + msg = bcm74110_mbox_create_msg(1, 0, BCM_MSG_SVC_INIT, + BCM_MSG_FUNC_SHMEM_RX, + BCM_MBOX_HAB_MEM_IDX_START, + BCM_MBOX_HAB_MEM_IDX_SIZE); + ret = bcm74110_mbox_tx_msg_and_wait_ack(mbox, msg); + if (ret) + return -EINVAL; + + return 0; +} + +static int bcm74110_mbox_init(struct bcm74110_mbox *mbox) +{ + int ret = 0; + + /* Disable queues tx/rx */ + bcm74110_tx_writel(mbox, 0x0, BCM_MBOX_CTRL); + + /* Clear status & restart tx/rx*/ + bcm74110_tx_writel(mbox, BCM_MBOX_CTRL_EN | BCM_MBOX_CTRL_CLR, + BCM_MBOX_CTRL); + + /* Unmask irq */ + bcm74110_irq_writel(mbox, BCM_MBOX_IRQ_NOT_EMPTY, BCM_MBOX_IRQ_MASK_CLEAR); + + ret = bcm74110_mbox_link_training(mbox); + if (ret) { + dev_err(&mbox->pdev->dev, "Training failed\n"); + return ret; + } + + return bcm74110_mbox_shmem_init(mbox); +} + +static int bcm74110_mbox_send_data(struct mbox_chan *chan, void *data) +{ + struct bcm74110_mbox_chan *chan_priv = chan->con_priv; + u32 msg; + + switch (chan_priv->type) { + case BCM_MSG_SVC_PMC: + case BCM_MSG_SVC_SCMI: + case BCM_MSG_SVC_DPFE: + msg = bcm74110_mbox_create_msg(1, 0, chan_priv->type, 0, + 128 + 28, chan_priv->slot); + break; + default: + return -EINVAL; + } + + return bcm74110_mbox_tx_msg(chan_priv->mbox, msg); +} + +static int bcm74110_mbox_chan_startup(struct mbox_chan *chan) +{ + struct bcm74110_mbox_chan *chan_priv = chan->con_priv; + + chan_priv->en = true; + + return 0; +} + +static void bcm74110_mbox_chan_shutdown(struct mbox_chan *chan) +{ + struct bcm74110_mbox_chan *chan_priv = chan->con_priv; + + chan_priv->en = false; +} + +static const struct mbox_chan_ops bcm74110_mbox_chan_ops = { + .send_data = bcm74110_mbox_send_data, + .startup = bcm74110_mbox_chan_startup, + .shutdown = bcm74110_mbox_chan_shutdown, +}; + +static void bcm74110_mbox_shutdown(struct platform_device *pdev) +{ + struct bcm74110_mbox *mbox = dev_get_drvdata(&pdev->dev); + u32 msg; + + msg = bcm74110_mbox_create_msg(1, 0, BCM_MSG_SVC_INIT, + BCM_MSG_FUNC_LINK_STOP, + 0, 0); + + bcm74110_mbox_tx_msg_and_wait_ack(mbox, msg); + + /* Even if we don't receive ACK, lets shut it down */ + + bcm74110_mbox_mask_and_clear(mbox); + + /* Disable queues tx/rx */ + bcm74110_tx_writel(mbox, 0x0, BCM_MBOX_CTRL); + + /* Flush queues */ + bcm74110_rx_flush_msg(mbox); +} + +static struct mbox_chan *bcm74110_mbox_of_xlate(struct mbox_controller *cntrl, + const struct of_phandle_args *p) +{ + struct bcm74110_mbox *mbox = bcm74110_mbox_from_cntrl(cntrl); + struct device *dev = &mbox->pdev->dev; + struct bcm74110_mbox_chan *chan_priv; + int slot, type; + + if (p->args_count != 2) { + dev_err(dev, "Invalid arguments\n"); + return ERR_PTR(-EINVAL); + } + + type = p->args[0]; + slot = p->args[1]; + + switch (type) { + case BCM_MSG_SVC_PMC: + case BCM_MSG_SVC_SCMI: + case BCM_MSG_SVC_DPFE: + if (slot > BCM_MBOX_HAB_MEM_IDX_SIZE) { + dev_err(dev, "Not enough shared memory\n"); + return ERR_PTR(-EINVAL); + } + chan_priv = cntrl->chans[type].con_priv; + chan_priv->slot = slot; + chan_priv->type = type; + break; + default: + dev_err(dev, "Invalid channel type: %d\n", type); + return ERR_PTR(-EINVAL); + } + + return &cntrl->chans[type]; +} + +static int bcm74110_mbox_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct bcm74110_mbox *mbox; + int i, ret; + + mbox = devm_kzalloc(dev, sizeof(*mbox), GFP_KERNEL); + if (!mbox) + return -ENOMEM; + + mbox->pdev = pdev; + platform_set_drvdata(pdev, mbox); + + mbox->base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(mbox->base)) + return dev_err_probe(dev, PTR_ERR(mbox->base), "Failed to iomap\n"); + + ret = of_property_read_u32(dev->of_node, "brcm,tx", &mbox->tx_chan); + if (ret) + return dev_err_probe(dev, ret, "Failed to find tx channel\n"); + + ret = of_property_read_u32(dev->of_node, "brcm,rx", &mbox->rx_chan); + if (ret) + return dev_err_probe(dev, ret, "Failed to find rx channel\n"); + + mbox->rx_irq = platform_get_irq(pdev, 0); + if (mbox->rx_irq < 0) + return mbox->rx_irq; + + INIT_LIST_HEAD(&mbox->rx_svc_init_list); + spin_lock_init(&mbox->rx_svc_list_lock); + bcm74110_mbox_mask_and_clear(mbox); + + ret = devm_request_irq(dev, mbox->rx_irq, bcm74110_mbox_isr, + IRQF_NO_SUSPEND, pdev->name, mbox); + if (ret) + return dev_err_probe(dev, ret, "Failed to request irq\n"); + + mbox->controller.ops = &bcm74110_mbox_chan_ops; + mbox->controller.dev = dev; + mbox->controller.num_chans = BCM_MSG_SVC_MAX; + mbox->controller.of_xlate = &bcm74110_mbox_of_xlate; + mbox->controller.chans = devm_kcalloc(dev, BCM_MSG_SVC_MAX, + sizeof(*mbox->controller.chans), + GFP_KERNEL); + if (!mbox->controller.chans) + return -ENOMEM; + + mbox->mbox_chan = devm_kcalloc(dev, BCM_MSG_SVC_MAX, + sizeof(*mbox->mbox_chan), + GFP_KERNEL); + if (!mbox->mbox_chan) + return -ENOMEM; + + for (i = 0; i < BCM_MSG_SVC_MAX; i++) { + mbox->mbox_chan[i].mbox = mbox; + mbox->controller.chans[i].con_priv = &mbox->mbox_chan[i]; + } + + ret = devm_mbox_controller_register(dev, &mbox->controller); + if (ret) + return ret; + + ret = bcm74110_mbox_init(mbox); + if (ret) + return ret; + + return 0; +} + +static const struct of_device_id bcm74110_mbox_of_match[] = { + { .compatible = "brcm,bcm74110-mbox", }, + { /* sentinel */ }, +}; +MODULE_DEVICE_TABLE(of, bcm74110_mbox_of_match); + +static struct platform_driver bcm74110_mbox_driver = { + .driver = { + .name = "bcm74110-mbox", + .of_match_table = bcm74110_mbox_of_match, + }, + .probe = bcm74110_mbox_probe, + .shutdown = bcm74110_mbox_shutdown, +}; +module_platform_driver(bcm74110_mbox_driver); + +MODULE_AUTHOR("Justin Chen <justin.chen@broadcom.com>"); +MODULE_DESCRIPTION("BCM74110 mailbox driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/mailbox/cix-mailbox.c b/drivers/mailbox/cix-mailbox.c new file mode 100644 index 000000000000..5bb1416c26a5 --- /dev/null +++ b/drivers/mailbox/cix-mailbox.c @@ -0,0 +1,645 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright 2025 Cix Technology Group Co., Ltd. + */ + +#include <linux/device.h> +#include <linux/err.h> +#include <linux/io.h> +#include <linux/interrupt.h> +#include <linux/kernel.h> +#include <linux/mailbox_controller.h> +#include <linux/module.h> +#include <linux/platform_device.h> + +#include "mailbox.h" + +/* + * The maximum transmission size is 32 words or 128 bytes. + */ +#define CIX_MBOX_MSG_WORDS 32 /* Max length = 32 words */ +#define CIX_MBOX_MSG_LEN_MASK 0x7fL /* Max length = 128 bytes */ + +/* [0~7] Fast channel + * [8] doorbell base channel + * [9]fifo base channel + * [10] register base channel + */ +#define CIX_MBOX_FAST_IDX 7 +#define CIX_MBOX_DB_IDX 8 +#define CIX_MBOX_FIFO_IDX 9 +#define CIX_MBOX_REG_IDX 10 +#define CIX_MBOX_CHANS 11 + +/* Register define */ +#define CIX_REG_MSG(n) (0x0 + 0x4*(n)) /* 0x0~0x7c */ +#define CIX_REG_DB_ACK CIX_REG_MSG(CIX_MBOX_MSG_WORDS) /* 0x80 */ +#define CIX_ERR_COMP (CIX_REG_DB_ACK + 0x4) /* 0x84 */ +#define CIX_ERR_COMP_CLR (CIX_REG_DB_ACK + 0x8) /* 0x88 */ +#define CIX_REG_F_INT(IDX) (CIX_ERR_COMP_CLR + 0x4*(IDX+1)) /* 0x8c~0xa8 */ +#define CIX_FIFO_WR (CIX_REG_F_INT(CIX_MBOX_FAST_IDX+1)) /* 0xac */ +#define CIX_FIFO_RD (CIX_FIFO_WR + 0x4) /* 0xb0 */ +#define CIX_FIFO_STAS (CIX_FIFO_WR + 0x8) /* 0xb4 */ +#define CIX_FIFO_WM (CIX_FIFO_WR + 0xc) /* 0xb8 */ +#define CIX_INT_ENABLE (CIX_FIFO_WR + 0x10) /* 0xbc */ +#define CIX_INT_ENABLE_SIDE_B (CIX_FIFO_WR + 0x14) /* 0xc0 */ +#define CIX_INT_CLEAR (CIX_FIFO_WR + 0x18) /* 0xc4 */ +#define CIX_INT_STATUS (CIX_FIFO_WR + 0x1c) /* 0xc8 */ +#define CIX_FIFO_RST (CIX_FIFO_WR + 0x20) /* 0xcc */ + +#define CIX_MBOX_TX 0 +#define CIX_MBOX_RX 1 + +#define CIX_DB_INT_BIT BIT(0) +#define CIX_DB_ACK_INT_BIT BIT(1) + +#define CIX_FIFO_WM_DEFAULT CIX_MBOX_MSG_WORDS +#define CIX_FIFO_STAS_WMK BIT(0) +#define CIX_FIFO_STAS_FULL BIT(1) +#define CIX_FIFO_STAS_EMPTY BIT(2) +#define CIX_FIFO_STAS_UFLOW BIT(3) +#define CIX_FIFO_STAS_OFLOW BIT(4) + +#define CIX_FIFO_RST_BIT BIT(0) + +#define CIX_DB_INT BIT(0) +#define CIX_ACK_INT BIT(1) +#define CIX_FIFO_FULL_INT BIT(2) +#define CIX_FIFO_EMPTY_INT BIT(3) +#define CIX_FIFO_WM01_INT BIT(4) +#define CIX_FIFO_WM10_INT BIT(5) +#define CIX_FIFO_OFLOW_INT BIT(6) +#define CIX_FIFO_UFLOW_INT BIT(7) +#define CIX_FIFO_N_EMPTY_INT BIT(8) +#define CIX_FAST_CH_INT(IDX) BIT((IDX)+9) + +#define CIX_SHMEM_OFFSET 0x80 + +enum cix_mbox_chan_type { + CIX_MBOX_TYPE_DB, + CIX_MBOX_TYPE_REG, + CIX_MBOX_TYPE_FIFO, + CIX_MBOX_TYPE_FAST, +}; + +struct cix_mbox_con_priv { + enum cix_mbox_chan_type type; + struct mbox_chan *chan; + int index; +}; + +struct cix_mbox_priv { + struct device *dev; + int irq; + int dir; + void __iomem *base; /* region for mailbox */ + struct cix_mbox_con_priv con_priv[CIX_MBOX_CHANS]; + struct mbox_chan mbox_chans[CIX_MBOX_CHANS]; + struct mbox_controller mbox; + bool use_shmem; +}; + +/* + * The CIX mailbox supports four types of transfers: + * CIX_MBOX_TYPE_DB, CIX_MBOX_TYPE_FAST, CIX_MBOX_TYPE_REG, and CIX_MBOX_TYPE_FIFO. + * For the REG and FIFO types of transfers, the message format is as follows: + */ +union cix_mbox_msg_reg_fifo { + u32 length; /* unit is byte */ + u32 buf[CIX_MBOX_MSG_WORDS]; /* buf[0] must be the byte length of this array */ +}; + +static struct cix_mbox_priv *to_cix_mbox_priv(struct mbox_controller *mbox) +{ + return container_of(mbox, struct cix_mbox_priv, mbox); +} + +static void cix_mbox_write(struct cix_mbox_priv *priv, u32 val, u32 offset) +{ + if (priv->use_shmem) + iowrite32(val, priv->base + offset - CIX_SHMEM_OFFSET); + else + iowrite32(val, priv->base + offset); +} + +static u32 cix_mbox_read(struct cix_mbox_priv *priv, u32 offset) +{ + if (priv->use_shmem) + return ioread32(priv->base + offset - CIX_SHMEM_OFFSET); + else + return ioread32(priv->base + offset); +} + +static bool mbox_fifo_empty(struct mbox_chan *chan) +{ + struct cix_mbox_priv *priv = to_cix_mbox_priv(chan->mbox); + + return ((cix_mbox_read(priv, CIX_FIFO_STAS) & CIX_FIFO_STAS_EMPTY) ? true : false); +} + +/* + *The transmission unit of the CIX mailbox is word. + *The byte length should be converted into the word length. + */ +static inline u32 mbox_get_msg_size(void *msg) +{ + u32 len; + + len = ((u32 *)msg)[0] & CIX_MBOX_MSG_LEN_MASK; + return DIV_ROUND_UP(len, 4); +} + +static int cix_mbox_send_data_db(struct mbox_chan *chan, void *data) +{ + struct cix_mbox_priv *priv = to_cix_mbox_priv(chan->mbox); + + /* trigger doorbell irq */ + cix_mbox_write(priv, CIX_DB_INT_BIT, CIX_REG_DB_ACK); + + return 0; +} + +static int cix_mbox_send_data_reg(struct mbox_chan *chan, void *data) +{ + struct cix_mbox_priv *priv = to_cix_mbox_priv(chan->mbox); + union cix_mbox_msg_reg_fifo *msg = data; + u32 len, i; + + if (!data) + return -EINVAL; + + len = mbox_get_msg_size(data); + for (i = 0; i < len; i++) + cix_mbox_write(priv, msg->buf[i], CIX_REG_MSG(i)); + + /* trigger doorbell irq */ + cix_mbox_write(priv, CIX_DB_INT_BIT, CIX_REG_DB_ACK); + + return 0; +} + +static int cix_mbox_send_data_fifo(struct mbox_chan *chan, void *data) +{ + struct cix_mbox_priv *priv = to_cix_mbox_priv(chan->mbox); + union cix_mbox_msg_reg_fifo *msg = data; + u32 len, val, i; + + if (!data) + return -EINVAL; + + len = mbox_get_msg_size(data); + cix_mbox_write(priv, len, CIX_FIFO_WM); + for (i = 0; i < len; i++) + cix_mbox_write(priv, msg->buf[i], CIX_FIFO_WR); + + /* Enable fifo empty interrupt */ + val = cix_mbox_read(priv, CIX_INT_ENABLE); + val |= CIX_FIFO_EMPTY_INT; + cix_mbox_write(priv, val, CIX_INT_ENABLE); + + return 0; +} + +static int cix_mbox_send_data_fast(struct mbox_chan *chan, void *data) +{ + struct cix_mbox_priv *priv = to_cix_mbox_priv(chan->mbox); + struct cix_mbox_con_priv *cp = chan->con_priv; + u32 *arg = (u32 *)data; + int index = cp->index; + + if (!data) + return -EINVAL; + + if (index < 0 || index > CIX_MBOX_FAST_IDX) { + dev_err(priv->dev, "Invalid Mbox index %d\n", index); + return -EINVAL; + } + + cix_mbox_write(priv, arg[0], CIX_REG_F_INT(index)); + + return 0; +} + +static int cix_mbox_send_data(struct mbox_chan *chan, void *data) +{ + struct cix_mbox_priv *priv = to_cix_mbox_priv(chan->mbox); + struct cix_mbox_con_priv *cp = chan->con_priv; + + if (priv->dir != CIX_MBOX_TX) { + dev_err(priv->dev, "Invalid Mbox dir %d\n", priv->dir); + return -EINVAL; + } + + switch (cp->type) { + case CIX_MBOX_TYPE_DB: + cix_mbox_send_data_db(chan, data); + break; + case CIX_MBOX_TYPE_REG: + cix_mbox_send_data_reg(chan, data); + break; + case CIX_MBOX_TYPE_FIFO: + cix_mbox_send_data_fifo(chan, data); + break; + case CIX_MBOX_TYPE_FAST: + cix_mbox_send_data_fast(chan, data); + break; + default: + dev_err(priv->dev, "Invalid channel type: %d\n", cp->type); + return -EINVAL; + } + return 0; +} + +static void cix_mbox_isr_db(struct mbox_chan *chan) +{ + struct cix_mbox_priv *priv = to_cix_mbox_priv(chan->mbox); + u32 int_status; + + int_status = cix_mbox_read(priv, CIX_INT_STATUS); + + if (priv->dir == CIX_MBOX_RX) { + /* rx interrupt is triggered */ + if (int_status & CIX_DB_INT) { + cix_mbox_write(priv, CIX_DB_INT, CIX_INT_CLEAR); + mbox_chan_received_data(chan, NULL); + /* trigger ack interrupt */ + cix_mbox_write(priv, CIX_DB_ACK_INT_BIT, CIX_REG_DB_ACK); + } + } else { + /* tx ack interrupt is triggered */ + if (int_status & CIX_ACK_INT) { + cix_mbox_write(priv, CIX_ACK_INT, CIX_INT_CLEAR); + mbox_chan_received_data(chan, NULL); + } + } +} + +static void cix_mbox_isr_reg(struct mbox_chan *chan) +{ + struct cix_mbox_priv *priv = to_cix_mbox_priv(chan->mbox); + u32 int_status; + + int_status = cix_mbox_read(priv, CIX_INT_STATUS); + + if (priv->dir == CIX_MBOX_RX) { + /* rx interrupt is triggered */ + if (int_status & CIX_DB_INT) { + u32 data[CIX_MBOX_MSG_WORDS], len, i; + + cix_mbox_write(priv, CIX_DB_INT, CIX_INT_CLEAR); + data[0] = cix_mbox_read(priv, CIX_REG_MSG(0)); + len = mbox_get_msg_size(data); + for (i = 1; i < len; i++) + data[i] = cix_mbox_read(priv, CIX_REG_MSG(i)); + + /* trigger ack interrupt */ + cix_mbox_write(priv, CIX_DB_ACK_INT_BIT, CIX_REG_DB_ACK); + mbox_chan_received_data(chan, data); + } + } else { + /* tx ack interrupt is triggered */ + if (int_status & CIX_ACK_INT) { + cix_mbox_write(priv, CIX_ACK_INT, CIX_INT_CLEAR); + mbox_chan_txdone(chan, 0); + } + } +} + +static void cix_mbox_isr_fifo(struct mbox_chan *chan) +{ + struct cix_mbox_priv *priv = to_cix_mbox_priv(chan->mbox); + u32 int_status, status; + + int_status = cix_mbox_read(priv, CIX_INT_STATUS); + + if (priv->dir == CIX_MBOX_RX) { + /* FIFO waterMark interrupt is generated */ + if (int_status & (CIX_FIFO_FULL_INT | CIX_FIFO_WM01_INT)) { + u32 data[CIX_MBOX_MSG_WORDS] = { 0 }, i = 0; + + cix_mbox_write(priv, (CIX_FIFO_FULL_INT | CIX_FIFO_WM01_INT), + CIX_INT_CLEAR); + do { + data[i++] = cix_mbox_read(priv, CIX_FIFO_RD); + } while (!mbox_fifo_empty(chan) && i < CIX_MBOX_MSG_WORDS); + + mbox_chan_received_data(chan, data); + } + /* FIFO underflow is generated */ + if (int_status & CIX_FIFO_UFLOW_INT) { + status = cix_mbox_read(priv, CIX_FIFO_STAS); + dev_err(priv->dev, "fifo underflow: int_stats %d\n", status); + cix_mbox_write(priv, CIX_FIFO_UFLOW_INT, CIX_INT_CLEAR); + } + } else { + /* FIFO empty interrupt is generated */ + if (int_status & CIX_FIFO_EMPTY_INT) { + u32 val; + + cix_mbox_write(priv, CIX_FIFO_EMPTY_INT, CIX_INT_CLEAR); + /* Disable empty irq*/ + val = cix_mbox_read(priv, CIX_INT_ENABLE); + val &= ~CIX_FIFO_EMPTY_INT; + cix_mbox_write(priv, val, CIX_INT_ENABLE); + mbox_chan_txdone(chan, 0); + } + /* FIFO overflow is generated */ + if (int_status & CIX_FIFO_OFLOW_INT) { + status = cix_mbox_read(priv, CIX_FIFO_STAS); + dev_err(priv->dev, "fifo overlow: int_stats %d\n", status); + cix_mbox_write(priv, CIX_FIFO_OFLOW_INT, CIX_INT_CLEAR); + } + } +} + +static void cix_mbox_isr_fast(struct mbox_chan *chan) +{ + struct cix_mbox_priv *priv = to_cix_mbox_priv(chan->mbox); + struct cix_mbox_con_priv *cp = chan->con_priv; + u32 int_status, data; + + /* no irq will be trigger for TX dir mbox */ + if (priv->dir != CIX_MBOX_RX) + return; + + int_status = cix_mbox_read(priv, CIX_INT_STATUS); + + if (int_status & CIX_FAST_CH_INT(cp->index)) { + cix_mbox_write(priv, CIX_FAST_CH_INT(cp->index), CIX_INT_CLEAR); + data = cix_mbox_read(priv, CIX_REG_F_INT(cp->index)); + mbox_chan_received_data(chan, &data); + } +} + +static irqreturn_t cix_mbox_isr(int irq, void *arg) +{ + struct mbox_chan *chan = arg; + struct cix_mbox_priv *priv = to_cix_mbox_priv(chan->mbox); + struct cix_mbox_con_priv *cp = chan->con_priv; + + switch (cp->type) { + case CIX_MBOX_TYPE_DB: + cix_mbox_isr_db(chan); + break; + case CIX_MBOX_TYPE_REG: + cix_mbox_isr_reg(chan); + break; + case CIX_MBOX_TYPE_FIFO: + cix_mbox_isr_fifo(chan); + break; + case CIX_MBOX_TYPE_FAST: + cix_mbox_isr_fast(chan); + break; + default: + dev_err(priv->dev, "Invalid channel type: %d\n", cp->type); + return IRQ_NONE; + } + + return IRQ_HANDLED; +} + +static int cix_mbox_startup(struct mbox_chan *chan) +{ + struct cix_mbox_priv *priv = to_cix_mbox_priv(chan->mbox); + struct cix_mbox_con_priv *cp = chan->con_priv; + int index = cp->index, ret; + u32 val; + + ret = request_irq(priv->irq, cix_mbox_isr, 0, + dev_name(priv->dev), chan); + if (ret) { + dev_err(priv->dev, "Unable to acquire IRQ %d\n", priv->irq); + return ret; + } + + switch (cp->type) { + case CIX_MBOX_TYPE_DB: + /* Overwrite txdone_method for DB channel */ + chan->txdone_method = TXDONE_BY_ACK; + fallthrough; + case CIX_MBOX_TYPE_REG: + if (priv->dir == CIX_MBOX_TX) { + /* Enable ACK interrupt */ + val = cix_mbox_read(priv, CIX_INT_ENABLE); + val |= CIX_ACK_INT; + cix_mbox_write(priv, val, CIX_INT_ENABLE); + } else { + /* Enable Doorbell interrupt */ + val = cix_mbox_read(priv, CIX_INT_ENABLE_SIDE_B); + val |= CIX_DB_INT; + cix_mbox_write(priv, val, CIX_INT_ENABLE_SIDE_B); + } + break; + case CIX_MBOX_TYPE_FIFO: + /* reset fifo */ + cix_mbox_write(priv, CIX_FIFO_RST_BIT, CIX_FIFO_RST); + /* set default watermark */ + cix_mbox_write(priv, CIX_FIFO_WM_DEFAULT, CIX_FIFO_WM); + if (priv->dir == CIX_MBOX_TX) { + /* Enable fifo overflow interrupt */ + val = cix_mbox_read(priv, CIX_INT_ENABLE); + val |= CIX_FIFO_OFLOW_INT; + cix_mbox_write(priv, val, CIX_INT_ENABLE); + } else { + /* Enable fifo full/underflow interrupt */ + val = cix_mbox_read(priv, CIX_INT_ENABLE_SIDE_B); + val |= CIX_FIFO_UFLOW_INT|CIX_FIFO_WM01_INT; + cix_mbox_write(priv, val, CIX_INT_ENABLE_SIDE_B); + } + break; + case CIX_MBOX_TYPE_FAST: + /* Only RX channel has intterupt */ + if (priv->dir == CIX_MBOX_RX) { + if (index < 0 || index > CIX_MBOX_FAST_IDX) { + dev_err(priv->dev, "Invalid index %d\n", index); + ret = -EINVAL; + goto failed; + } + /* enable fast channel interrupt */ + val = cix_mbox_read(priv, CIX_INT_ENABLE_SIDE_B); + val |= CIX_FAST_CH_INT(index); + cix_mbox_write(priv, val, CIX_INT_ENABLE_SIDE_B); + } + break; + default: + dev_err(priv->dev, "Invalid channel type: %d\n", cp->type); + ret = -EINVAL; + goto failed; + } + return 0; + +failed: + free_irq(priv->irq, chan); + return ret; +} + +static void cix_mbox_shutdown(struct mbox_chan *chan) +{ + struct cix_mbox_priv *priv = to_cix_mbox_priv(chan->mbox); + struct cix_mbox_con_priv *cp = chan->con_priv; + int index = cp->index; + u32 val; + + switch (cp->type) { + case CIX_MBOX_TYPE_DB: + case CIX_MBOX_TYPE_REG: + if (priv->dir == CIX_MBOX_TX) { + /* Disable ACK interrupt */ + val = cix_mbox_read(priv, CIX_INT_ENABLE); + val &= ~CIX_ACK_INT; + cix_mbox_write(priv, val, CIX_INT_ENABLE); + } else if (priv->dir == CIX_MBOX_RX) { + /* Disable Doorbell interrupt */ + val = cix_mbox_read(priv, CIX_INT_ENABLE_SIDE_B); + val &= ~CIX_DB_INT; + cix_mbox_write(priv, val, CIX_INT_ENABLE_SIDE_B); + } + break; + case CIX_MBOX_TYPE_FIFO: + if (priv->dir == CIX_MBOX_TX) { + /* Disable empty/fifo overflow irq*/ + val = cix_mbox_read(priv, CIX_INT_ENABLE); + val &= ~(CIX_FIFO_EMPTY_INT | CIX_FIFO_OFLOW_INT); + cix_mbox_write(priv, val, CIX_INT_ENABLE); + } else if (priv->dir == CIX_MBOX_RX) { + /* Disable fifo WM01/underflow interrupt */ + val = cix_mbox_read(priv, CIX_INT_ENABLE_SIDE_B); + val &= ~(CIX_FIFO_UFLOW_INT | CIX_FIFO_WM01_INT); + cix_mbox_write(priv, val, CIX_INT_ENABLE_SIDE_B); + } + break; + case CIX_MBOX_TYPE_FAST: + if (priv->dir == CIX_MBOX_RX) { + if (index < 0 || index > CIX_MBOX_FAST_IDX) { + dev_err(priv->dev, "Invalid index %d\n", index); + break; + } + /* Disable fast channel interrupt */ + val = cix_mbox_read(priv, CIX_INT_ENABLE_SIDE_B); + val &= ~CIX_FAST_CH_INT(index); + cix_mbox_write(priv, val, CIX_INT_ENABLE_SIDE_B); + } + break; + + default: + dev_err(priv->dev, "Invalid channel type: %d\n", cp->type); + break; + } + + free_irq(priv->irq, chan); +} + +static const struct mbox_chan_ops cix_mbox_chan_ops = { + .send_data = cix_mbox_send_data, + .startup = cix_mbox_startup, + .shutdown = cix_mbox_shutdown, +}; + +static void cix_mbox_init(struct cix_mbox_priv *priv) +{ + struct cix_mbox_con_priv *cp; + int i; + + for (i = 0; i < CIX_MBOX_CHANS; i++) { + cp = &priv->con_priv[i]; + cp->index = i; + cp->chan = &priv->mbox_chans[i]; + priv->mbox_chans[i].con_priv = cp; + if (cp->index <= CIX_MBOX_FAST_IDX) + cp->type = CIX_MBOX_TYPE_FAST; + if (cp->index == CIX_MBOX_DB_IDX) + cp->type = CIX_MBOX_TYPE_DB; + if (cp->index == CIX_MBOX_FIFO_IDX) + cp->type = CIX_MBOX_TYPE_FIFO; + if (cp->index == CIX_MBOX_REG_IDX) + cp->type = CIX_MBOX_TYPE_REG; + } +} + +static int cix_mbox_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct cix_mbox_priv *priv; + struct resource *res; + const char *dir_str; + int ret; + + priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + priv->dev = dev; + priv->base = devm_platform_get_and_ioremap_resource(pdev, 0, &res); + if (IS_ERR(priv->base)) + return PTR_ERR(priv->base); + + /* + * The first 0x80 bytes of the register space of the cix mailbox controller + * can be used as shared memory for clients. When this shared memory is in + * use, the base address of the mailbox is offset by 0x80. Therefore, when + * performing subsequent read/write operations, it is necessary to subtract + * the offset CIX_SHMEM_OFFSET. + * + * When the base address of the mailbox is offset by 0x80, it indicates + * that shmem is in use. + */ + priv->use_shmem = !!(res->start & CIX_SHMEM_OFFSET); + + priv->irq = platform_get_irq(pdev, 0); + if (priv->irq < 0) + return priv->irq; + + if (device_property_read_string(dev, "cix,mbox-dir", &dir_str)) { + dev_err(priv->dev, "cix,mbox_dir property not found\n"); + return -EINVAL; + } + + if (!strcmp(dir_str, "tx")) + priv->dir = 0; + else if (!strcmp(dir_str, "rx")) + priv->dir = 1; + else { + dev_err(priv->dev, "cix,mbox_dir=%s is not expected\n", dir_str); + return -EINVAL; + } + + cix_mbox_init(priv); + + priv->mbox.dev = dev; + priv->mbox.ops = &cix_mbox_chan_ops; + priv->mbox.chans = priv->mbox_chans; + priv->mbox.txdone_irq = true; + priv->mbox.num_chans = CIX_MBOX_CHANS; + priv->mbox.of_xlate = NULL; + + platform_set_drvdata(pdev, priv); + ret = devm_mbox_controller_register(dev, &priv->mbox); + if (ret) + dev_err(dev, "Failed to register mailbox %d\n", ret); + + return ret; +} + +static const struct of_device_id cix_mbox_dt_ids[] = { + { .compatible = "cix,sky1-mbox" }, + { }, +}; +MODULE_DEVICE_TABLE(of, cix_mbox_dt_ids); + +static struct platform_driver cix_mbox_driver = { + .probe = cix_mbox_probe, + .driver = { + .name = "cix_mbox", + .of_match_table = cix_mbox_dt_ids, + }, +}; + +static int __init cix_mailbox_init(void) +{ + return platform_driver_register(&cix_mbox_driver); +} +arch_initcall(cix_mailbox_init); + +MODULE_AUTHOR("Cix Technology Group Co., Ltd."); +MODULE_DESCRIPTION("CIX mailbox driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/mailbox/cv1800-mailbox.c b/drivers/mailbox/cv1800-mailbox.c new file mode 100644 index 000000000000..4761191acf78 --- /dev/null +++ b/drivers/mailbox/cv1800-mailbox.c @@ -0,0 +1,220 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Copyright (C) 2024 Sophgo Technology Inc. + * Copyright (C) 2024 Yuntao Dai <d1581209858@live.com> + * Copyright (C) 2025 Junhui Liu <junhui.liu@pigmoral.tech> + */ + +#include <linux/bits.h> +#include <linux/device.h> +#include <linux/err.h> +#include <linux/interrupt.h> +#include <linux/io.h> +#include <linux/kfifo.h> +#include <linux/mailbox_client.h> +#include <linux/mailbox_controller.h> +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/slab.h> + +#define RECV_CPU 1 + +#define MAILBOX_MAX_CHAN 8 +#define MAILBOX_MSG_LEN 8 + +#define MBOX_EN_REG(cpu) (cpu << 2) +#define MBOX_DONE_REG(cpu) ((cpu << 2) + 2) +#define MBOX_SET_CLR_REG(cpu) (0x10 + (cpu << 4)) +#define MBOX_SET_INT_REG(cpu) (0x18 + (cpu << 4)) +#define MBOX_SET_REG 0x60 + +#define MAILBOX_CONTEXT_OFFSET 0x0400 +#define MAILBOX_CONTEXT_SIZE 0x0040 + +#define MBOX_CONTEXT_BASE_INDEX(base, index) \ + ((u64 __iomem *)(base + MAILBOX_CONTEXT_OFFSET) + index) + +/** + * struct cv1800_mbox_chan_priv - cv1800 mailbox channel private data + * @idx: index of channel + * @cpu: send to which processor + */ +struct cv1800_mbox_chan_priv { + int idx; + int cpu; +}; + +struct cv1800_mbox { + struct mbox_controller mbox; + struct cv1800_mbox_chan_priv priv[MAILBOX_MAX_CHAN]; + struct mbox_chan chans[MAILBOX_MAX_CHAN]; + u64 __iomem *content[MAILBOX_MAX_CHAN]; + void __iomem *mbox_base; + int recvid; +}; + +static irqreturn_t cv1800_mbox_isr(int irq, void *dev_id) +{ + struct cv1800_mbox *mbox = (struct cv1800_mbox *)dev_id; + size_t i; + u64 msg; + int ret = IRQ_NONE; + + for (i = 0; i < MAILBOX_MAX_CHAN; i++) { + if (mbox->content[i] && mbox->chans[i].cl) { + memcpy_fromio(&msg, mbox->content[i], MAILBOX_MSG_LEN); + mbox->content[i] = NULL; + mbox_chan_received_data(&mbox->chans[i], (void *)&msg); + ret = IRQ_HANDLED; + } + } + + return ret; +} + +static irqreturn_t cv1800_mbox_irq(int irq, void *dev_id) +{ + struct cv1800_mbox *mbox = (struct cv1800_mbox *)dev_id; + u8 set, valid; + size_t i; + int ret = IRQ_NONE; + + set = readb(mbox->mbox_base + MBOX_SET_INT_REG(RECV_CPU)); + + if (!set) + return ret; + + for (i = 0; i < MAILBOX_MAX_CHAN; i++) { + valid = set & BIT(i); + if (valid) { + mbox->content[i] = + MBOX_CONTEXT_BASE_INDEX(mbox->mbox_base, i); + writeb(valid, mbox->mbox_base + + MBOX_SET_CLR_REG(RECV_CPU)); + writeb(~valid, mbox->mbox_base + MBOX_EN_REG(RECV_CPU)); + ret = IRQ_WAKE_THREAD; + } + } + + return ret; +} + +static int cv1800_mbox_send_data(struct mbox_chan *chan, void *data) +{ + struct cv1800_mbox_chan_priv *priv = + (struct cv1800_mbox_chan_priv *)chan->con_priv; + struct cv1800_mbox *mbox = dev_get_drvdata(chan->mbox->dev); + int idx = priv->idx; + int cpu = priv->cpu; + u8 en, valid; + + memcpy_toio(MBOX_CONTEXT_BASE_INDEX(mbox->mbox_base, idx), + data, MAILBOX_MSG_LEN); + + valid = BIT(idx); + writeb(valid, mbox->mbox_base + MBOX_SET_CLR_REG(cpu)); + en = readb(mbox->mbox_base + MBOX_EN_REG(cpu)); + writeb(en | valid, mbox->mbox_base + MBOX_EN_REG(cpu)); + writeb(valid, mbox->mbox_base + MBOX_SET_REG); + + return 0; +} + +static bool cv1800_last_tx_done(struct mbox_chan *chan) +{ + struct cv1800_mbox_chan_priv *priv = + (struct cv1800_mbox_chan_priv *)chan->con_priv; + struct cv1800_mbox *mbox = dev_get_drvdata(chan->mbox->dev); + u8 en; + + en = readb(mbox->mbox_base + MBOX_EN_REG(priv->cpu)); + + return !(en & BIT(priv->idx)); +} + +static const struct mbox_chan_ops cv1800_mbox_chan_ops = { + .send_data = cv1800_mbox_send_data, + .last_tx_done = cv1800_last_tx_done, +}; + +static struct mbox_chan *cv1800_mbox_xlate(struct mbox_controller *mbox, + const struct of_phandle_args *spec) +{ + struct cv1800_mbox_chan_priv *priv; + + int idx = spec->args[0]; + int cpu = spec->args[1]; + + if (idx >= mbox->num_chans) + return ERR_PTR(-EINVAL); + + priv = mbox->chans[idx].con_priv; + priv->cpu = cpu; + + return &mbox->chans[idx]; +} + +static const struct of_device_id cv1800_mbox_of_match[] = { + { .compatible = "sophgo,cv1800b-mailbox", }, + {}, +}; +MODULE_DEVICE_TABLE(of, cv1800_mbox_of_match); + +static int cv1800_mbox_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct cv1800_mbox *mb; + int irq, idx, err; + + mb = devm_kzalloc(dev, sizeof(*mb), GFP_KERNEL); + if (!mb) + return -ENOMEM; + + mb->mbox_base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(mb->mbox_base)) + return dev_err_probe(dev, PTR_ERR(mb->mbox_base), + "Failed to map resource\n"); + + mb->mbox.dev = dev; + mb->mbox.chans = mb->chans; + mb->mbox.txdone_poll = true; + mb->mbox.ops = &cv1800_mbox_chan_ops; + mb->mbox.num_chans = MAILBOX_MAX_CHAN; + mb->mbox.of_xlate = cv1800_mbox_xlate; + + irq = platform_get_irq(pdev, 0); + if (irq < 0) + return irq; + + err = devm_request_threaded_irq(dev, irq, cv1800_mbox_irq, + cv1800_mbox_isr, IRQF_ONESHOT, + dev_name(&pdev->dev), mb); + if (err < 0) + return dev_err_probe(dev, err, "Failed to register irq\n"); + + for (idx = 0; idx < MAILBOX_MAX_CHAN; idx++) { + mb->priv[idx].idx = idx; + mb->mbox.chans[idx].con_priv = &mb->priv[idx]; + } + + platform_set_drvdata(pdev, mb); + + err = devm_mbox_controller_register(dev, &mb->mbox); + if (err) + return dev_err_probe(dev, err, "Failed to register mailbox\n"); + + return 0; +} + +static struct platform_driver cv1800_mbox_driver = { + .driver = { + .name = "cv1800-mbox", + .of_match_table = cv1800_mbox_of_match, + }, + .probe = cv1800_mbox_probe, +}; + +module_platform_driver(cv1800_mbox_driver); + +MODULE_DESCRIPTION("cv1800 mailbox driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/mailbox/exynos-mailbox.c b/drivers/mailbox/exynos-mailbox.c new file mode 100644 index 000000000000..2320649bf60c --- /dev/null +++ b/drivers/mailbox/exynos-mailbox.c @@ -0,0 +1,157 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright 2020 Samsung Electronics Co., Ltd. + * Copyright 2020 Google LLC. + * Copyright 2024 Linaro Ltd. + */ + +#include <linux/bitops.h> +#include <linux/bits.h> +#include <linux/clk.h> +#include <linux/io.h> +#include <linux/mailbox_controller.h> +#include <linux/mailbox/exynos-message.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/platform_device.h> +#include <linux/slab.h> + +#define EXYNOS_MBOX_MCUCTRL 0x0 /* Mailbox Control Register */ +#define EXYNOS_MBOX_INTCR0 0x24 /* Interrupt Clear Register 0 */ +#define EXYNOS_MBOX_INTMR0 0x28 /* Interrupt Mask Register 0 */ +#define EXYNOS_MBOX_INTSR0 0x2c /* Interrupt Status Register 0 */ +#define EXYNOS_MBOX_INTMSR0 0x30 /* Interrupt Mask Status Register 0 */ +#define EXYNOS_MBOX_INTGR1 0x40 /* Interrupt Generation Register 1 */ +#define EXYNOS_MBOX_INTMR1 0x48 /* Interrupt Mask Register 1 */ +#define EXYNOS_MBOX_INTSR1 0x4c /* Interrupt Status Register 1 */ +#define EXYNOS_MBOX_INTMSR1 0x50 /* Interrupt Mask Status Register 1 */ + +#define EXYNOS_MBOX_INTMR0_MASK GENMASK(15, 0) +#define EXYNOS_MBOX_INTGR1_MASK GENMASK(15, 0) + +#define EXYNOS_MBOX_CHAN_COUNT HWEIGHT32(EXYNOS_MBOX_INTGR1_MASK) + +/** + * struct exynos_mbox - driver's private data. + * @regs: mailbox registers base address. + * @mbox: pointer to the mailbox controller. + * @pclk: pointer to the mailbox peripheral clock. + */ +struct exynos_mbox { + void __iomem *regs; + struct mbox_controller *mbox; + struct clk *pclk; +}; + +static int exynos_mbox_send_data(struct mbox_chan *chan, void *data) +{ + struct device *dev = chan->mbox->dev; + struct exynos_mbox *exynos_mbox = dev_get_drvdata(dev); + struct exynos_mbox_msg *msg = data; + + if (msg->chan_id >= exynos_mbox->mbox->num_chans) { + dev_err(dev, "Invalid channel ID %d\n", msg->chan_id); + return -EINVAL; + } + + if (msg->chan_type != EXYNOS_MBOX_CHAN_TYPE_DOORBELL) { + dev_err(dev, "Unsupported channel type [%d]\n", msg->chan_type); + return -EINVAL; + } + + writel(BIT(msg->chan_id), exynos_mbox->regs + EXYNOS_MBOX_INTGR1); + + return 0; +} + +static const struct mbox_chan_ops exynos_mbox_chan_ops = { + .send_data = exynos_mbox_send_data, +}; + +static struct mbox_chan *exynos_mbox_of_xlate(struct mbox_controller *mbox, + const struct of_phandle_args *sp) +{ + int i; + + if (sp->args_count != 0) + return ERR_PTR(-EINVAL); + + /* + * Return the first available channel. When we don't pass the + * channel ID from device tree, each channel populated by the driver is + * just a software construct or a virtual channel. We use 'void *data' + * in send_data() to pass the channel identifiers. + */ + for (i = 0; i < mbox->num_chans; i++) + if (mbox->chans[i].cl == NULL) + return &mbox->chans[i]; + return ERR_PTR(-EINVAL); +} + +static const struct of_device_id exynos_mbox_match[] = { + { .compatible = "google,gs101-mbox" }, + {}, +}; +MODULE_DEVICE_TABLE(of, exynos_mbox_match); + +static int exynos_mbox_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct exynos_mbox *exynos_mbox; + struct mbox_controller *mbox; + struct mbox_chan *chans; + int i; + + exynos_mbox = devm_kzalloc(dev, sizeof(*exynos_mbox), GFP_KERNEL); + if (!exynos_mbox) + return -ENOMEM; + + mbox = devm_kzalloc(dev, sizeof(*mbox), GFP_KERNEL); + if (!mbox) + return -ENOMEM; + + chans = devm_kcalloc(dev, EXYNOS_MBOX_CHAN_COUNT, sizeof(*chans), + GFP_KERNEL); + if (!chans) + return -ENOMEM; + + exynos_mbox->regs = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(exynos_mbox->regs)) + return PTR_ERR(exynos_mbox->regs); + + exynos_mbox->pclk = devm_clk_get_enabled(dev, "pclk"); + if (IS_ERR(exynos_mbox->pclk)) + return dev_err_probe(dev, PTR_ERR(exynos_mbox->pclk), + "Failed to enable clock.\n"); + + mbox->num_chans = EXYNOS_MBOX_CHAN_COUNT; + mbox->chans = chans; + mbox->dev = dev; + mbox->ops = &exynos_mbox_chan_ops; + mbox->of_xlate = exynos_mbox_of_xlate; + + for (i = 0; i < EXYNOS_MBOX_CHAN_COUNT; i++) + chans[i].mbox = mbox; + + exynos_mbox->mbox = mbox; + + platform_set_drvdata(pdev, exynos_mbox); + + /* Mask out all interrupts. We support just polling channels for now. */ + writel(EXYNOS_MBOX_INTMR0_MASK, exynos_mbox->regs + EXYNOS_MBOX_INTMR0); + + return devm_mbox_controller_register(dev, mbox); +} + +static struct platform_driver exynos_mbox_driver = { + .probe = exynos_mbox_probe, + .driver = { + .name = "exynos-acpm-mbox", + .of_match_table = exynos_mbox_match, + }, +}; +module_platform_driver(exynos_mbox_driver); + +MODULE_AUTHOR("Tudor Ambarus <tudor.ambarus@linaro.org>"); +MODULE_DESCRIPTION("Samsung Exynos mailbox driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/mailbox/hi3660-mailbox.c b/drivers/mailbox/hi3660-mailbox.c index 53f4bc2488c5..17c29e960fbf 100644 --- a/drivers/mailbox/hi3660-mailbox.c +++ b/drivers/mailbox/hi3660-mailbox.c @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -// Copyright (c) 2017-2018 Hisilicon Limited. +// Copyright (c) 2017-2018 HiSilicon Limited. // Copyright (c) 2017-2018 Linaro Limited. #include <linux/bitops.h> @@ -11,6 +11,7 @@ #include <linux/iopoll.h> #include <linux/mailbox_controller.h> #include <linux/module.h> +#include <linux/of.h> #include <linux/platform_device.h> #include <linux/slab.h> @@ -44,14 +45,13 @@ #define MBOX_MSG_LEN 8 /** - * Hi3660 mailbox channel information + * struct hi3660_chan_info - Hi3660 mailbox channel information + * @dst_irq: Interrupt vector for remote processor + * @ack_irq: Interrupt vector for local processor * * A channel can be used for TX or RX, it can trigger remote * processor interrupt to notify remote processor and can receive - * interrupt if has incoming message. - * - * @dst_irq: Interrupt vector for remote processor - * @ack_irq: Interrupt vector for local processor + * interrupt if it has an incoming message. */ struct hi3660_chan_info { unsigned int dst_irq; @@ -59,16 +59,15 @@ struct hi3660_chan_info { }; /** - * Hi3660 mailbox controller data - * - * Mailbox controller includes 32 channels and can allocate - * channel for message transferring. - * + * struct hi3660_mbox - Hi3660 mailbox controller data * @dev: Device to which it is attached * @base: Base address of the register mapping region * @chan: Representation of channels in mailbox controller * @mchan: Representation of channel info * @controller: Representation of a communication channel controller + * + * Mailbox controller includes 32 channels and can allocate + * channel for message transferring. */ struct hi3660_mbox { struct device *dev; @@ -240,7 +239,6 @@ static int hi3660_mbox_probe(struct platform_device *pdev) struct device *dev = &pdev->dev; struct hi3660_mbox *mbox; struct mbox_chan *chan; - struct resource *res; unsigned long ch; int err; @@ -248,8 +246,7 @@ static int hi3660_mbox_probe(struct platform_device *pdev) if (!mbox) return -ENOMEM; - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - mbox->base = devm_ioremap_resource(dev, res); + mbox->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(mbox->base)) return PTR_ERR(mbox->base); diff --git a/drivers/mailbox/hi6220-mailbox.c b/drivers/mailbox/hi6220-mailbox.c index c32cbfaf223a..f77741ce42e7 100644 --- a/drivers/mailbox/hi6220-mailbox.c +++ b/drivers/mailbox/hi6220-mailbox.c @@ -1,20 +1,11 @@ +// SPDX-License-Identifier: GPL-2.0-only /* * Hisilicon's Hi6220 mailbox driver * - * Copyright (c) 2015 Hisilicon Limited. + * Copyright (c) 2015 HiSilicon Limited. * Copyright (c) 2015 Linaro Limited. * * Author: Leo Yan <leo.yan@linaro.org> - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation, version 2 of the License. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * */ #include <linux/device.h> @@ -24,6 +15,7 @@ #include <linux/kfifo.h> #include <linux/mailbox_controller.h> #include <linux/module.h> +#include <linux/of.h> #include <linux/platform_device.h> #include <linux/slab.h> @@ -273,7 +265,6 @@ static int hi6220_mbox_probe(struct platform_device *pdev) struct device_node *node = pdev->dev.of_node; struct device *dev = &pdev->dev; struct hi6220_mbox *mbox; - struct resource *res; int i, err; mbox = devm_kzalloc(dev, sizeof(*mbox), GFP_KERNEL); @@ -296,15 +287,13 @@ static int hi6220_mbox_probe(struct platform_device *pdev) if (mbox->irq < 0) return mbox->irq; - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - mbox->ipc = devm_ioremap_resource(dev, res); + mbox->ipc = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(mbox->ipc)) { dev_err(dev, "ioremap ipc failed\n"); return PTR_ERR(mbox->ipc); } - res = platform_get_resource(pdev, IORESOURCE_MEM, 1); - mbox->base = devm_ioremap_resource(dev, res); + mbox->base = devm_platform_ioremap_resource(pdev, 1); if (IS_ERR(mbox->base)) { dev_err(dev, "ioremap buffer failed\n"); return PTR_ERR(mbox->base); @@ -337,10 +326,7 @@ static int hi6220_mbox_probe(struct platform_device *pdev) writel(~0x0, ACK_INT_CLR_REG(mbox->ipc)); /* use interrupt for tx's ack */ - if (of_find_property(node, "hi6220,mbox-tx-noirq", NULL)) - mbox->tx_irq_mode = false; - else - mbox->tx_irq_mode = true; + mbox->tx_irq_mode = !of_property_read_bool(node, "hi6220,mbox-tx-noirq"); if (mbox->tx_irq_mode) mbox->controller.txdone_irq = true; @@ -363,7 +349,6 @@ static int hi6220_mbox_probe(struct platform_device *pdev) static struct platform_driver hi6220_mbox_driver = { .driver = { .name = "hi6220-mbox", - .owner = THIS_MODULE, .of_match_table = hi6220_mbox_of_match, }, .probe = hi6220_mbox_probe, diff --git a/drivers/mailbox/imx-mailbox.c b/drivers/mailbox/imx-mailbox.c index 774362a05159..6778afc64a04 100644 --- a/drivers/mailbox/imx-mailbox.c +++ b/drivers/mailbox/imx-mailbox.c @@ -1,47 +1,79 @@ // SPDX-License-Identifier: GPL-2.0 /* * Copyright (c) 2018 Pengutronix, Oleksij Rempel <o.rempel@pengutronix.de> + * Copyright 2022 NXP, Peng Fan <peng.fan@nxp.com> */ +#include <linux/bitfield.h> #include <linux/clk.h> +#include <linux/firmware/imx/ipc.h> +#include <linux/firmware/imx/s4.h> #include <linux/interrupt.h> #include <linux/io.h> +#include <linux/iopoll.h> +#include <linux/jiffies.h> #include <linux/kernel.h> #include <linux/mailbox_controller.h> #include <linux/module.h> -#include <linux/of_device.h> +#include <linux/of.h> +#include <linux/of_platform.h> +#include <linux/platform_device.h> +#include <linux/pm_runtime.h> +#include <linux/suspend.h> #include <linux/slab.h> +#include <linux/workqueue.h> -/* Transmit Register */ -#define IMX_MU_xTRn(x) (0x00 + 4 * (x)) -/* Receive Register */ -#define IMX_MU_xRRn(x) (0x10 + 4 * (x)) -/* Status Register */ -#define IMX_MU_xSR 0x20 -#define IMX_MU_xSR_GIPn(x) BIT(28 + (3 - (x))) -#define IMX_MU_xSR_RFn(x) BIT(24 + (3 - (x))) -#define IMX_MU_xSR_TEn(x) BIT(20 + (3 - (x))) -#define IMX_MU_xSR_BRDIP BIT(9) - -/* Control Register */ -#define IMX_MU_xCR 0x24 -/* General Purpose Interrupt Enable */ -#define IMX_MU_xCR_GIEn(x) BIT(28 + (3 - (x))) -/* Receive Interrupt Enable */ -#define IMX_MU_xCR_RIEn(x) BIT(24 + (3 - (x))) -/* Transmit Interrupt Enable */ -#define IMX_MU_xCR_TIEn(x) BIT(20 + (3 - (x))) -/* General Purpose Interrupt Request */ -#define IMX_MU_xCR_GIRn(x) BIT(16 + (3 - (x))) +#include "mailbox.h" + +#define IMX_MU_CHANS 24 +/* TX0/RX0/RXDB[0-3] */ +#define IMX_MU_SCU_CHANS 6 +/* TX0/RX0 */ +#define IMX_MU_S4_CHANS 2 +#define IMX_MU_CHAN_NAME_SIZE 32 + +#define IMX_MU_V2_PAR_OFF 0x4 +#define IMX_MU_V2_TR_MASK GENMASK(7, 0) +#define IMX_MU_V2_RR_MASK GENMASK(15, 8) -#define IMX_MU_CHANS 16 -#define IMX_MU_CHAN_NAME_SIZE 20 +#define IMX_MU_SECO_TX_TOUT (msecs_to_jiffies(3000)) +#define IMX_MU_SECO_RX_TOUT (msecs_to_jiffies(3000)) +/* Please not change TX & RX */ enum imx_mu_chan_type { - IMX_MU_TYPE_TX, /* Tx */ - IMX_MU_TYPE_RX, /* Rx */ - IMX_MU_TYPE_TXDB, /* Tx doorbell */ - IMX_MU_TYPE_RXDB, /* Rx doorbell */ + IMX_MU_TYPE_TX = 0, /* Tx */ + IMX_MU_TYPE_RX = 1, /* Rx */ + IMX_MU_TYPE_TXDB = 2, /* Tx doorbell */ + IMX_MU_TYPE_RXDB = 3, /* Rx doorbell */ + IMX_MU_TYPE_RST = 4, /* Reset */ + IMX_MU_TYPE_TXDB_V2 = 5, /* Tx doorbell with S/W ACK */ +}; + +enum imx_mu_xcr { + IMX_MU_CR, + IMX_MU_GIER, + IMX_MU_GCR, + IMX_MU_TCR, + IMX_MU_RCR, + IMX_MU_xCR_MAX, +}; + +enum imx_mu_xsr { + IMX_MU_SR, + IMX_MU_GSR, + IMX_MU_TSR, + IMX_MU_RSR, + IMX_MU_xSR_MAX, +}; + +struct imx_sc_rpc_msg_max { + struct imx_sc_rpc_msg hdr; + u32 data[30]; +}; + +struct imx_s4_rpc_msg_max { + struct imx_s4_rpc_msg hdr; + u32 data[254]; }; struct imx_mu_con_priv { @@ -49,24 +81,66 @@ struct imx_mu_con_priv { char irq_desc[IMX_MU_CHAN_NAME_SIZE]; enum imx_mu_chan_type type; struct mbox_chan *chan; - struct tasklet_struct txdb_tasklet; + struct work_struct txdb_work; }; struct imx_mu_priv { struct device *dev; void __iomem *base; + void *msg; spinlock_t xcr_lock; /* control register lock */ struct mbox_controller mbox; struct mbox_chan mbox_chans[IMX_MU_CHANS]; struct imx_mu_con_priv con_priv[IMX_MU_CHANS]; + const struct imx_mu_dcfg *dcfg; struct clk *clk; - int irq; - + int irq[IMX_MU_CHANS]; + bool suspend; bool side_b; + + u32 xcr[IMX_MU_xCR_MAX]; + u32 num_tr; + u32 num_rr; +}; + +enum imx_mu_type { + IMX_MU_V1, + IMX_MU_V2 = BIT(1), + IMX_MU_V2_S4 = BIT(15), + IMX_MU_V2_IRQ = BIT(16), }; +struct imx_mu_dcfg { + int (*tx)(struct imx_mu_priv *priv, struct imx_mu_con_priv *cp, void *data); + int (*rx)(struct imx_mu_priv *priv, struct imx_mu_con_priv *cp); + int (*rxdb)(struct imx_mu_priv *priv, struct imx_mu_con_priv *cp); + int (*init)(struct imx_mu_priv *priv); + enum imx_mu_type type; + u32 xTR; /* Transmit Register0 */ + u32 xRR; /* Receive Register0 */ + u32 xSR[IMX_MU_xSR_MAX]; /* Status Registers */ + u32 xCR[IMX_MU_xCR_MAX]; /* Control Registers */ +}; + +#define IMX_MU_xSR_GIPn(type, x) (type & IMX_MU_V2 ? BIT(x) : BIT(28 + (3 - (x)))) +#define IMX_MU_xSR_RFn(type, x) (type & IMX_MU_V2 ? BIT(x) : BIT(24 + (3 - (x)))) +#define IMX_MU_xSR_TEn(type, x) (type & IMX_MU_V2 ? BIT(x) : BIT(20 + (3 - (x)))) + +/* General Purpose Interrupt Enable */ +#define IMX_MU_xCR_GIEn(type, x) (type & IMX_MU_V2 ? BIT(x) : BIT(28 + (3 - (x)))) +/* Receive Interrupt Enable */ +#define IMX_MU_xCR_RIEn(type, x) (type & IMX_MU_V2 ? BIT(x) : BIT(24 + (3 - (x)))) +/* Transmit Interrupt Enable */ +#define IMX_MU_xCR_TIEn(type, x) (type & IMX_MU_V2 ? BIT(x) : BIT(20 + (3 - (x)))) +/* General Purpose Interrupt Request */ +#define IMX_MU_xCR_GIRn(type, x) (type & IMX_MU_V2 ? BIT(x) : BIT(16 + (3 - (x)))) +/* MU reset */ +#define IMX_MU_xCR_RST(type) (type & IMX_MU_V2 ? BIT(0) : BIT(5)) +#define IMX_MU_xSR_RST(type) (type & IMX_MU_V2 ? BIT(0) : BIT(7)) + + static struct imx_mu_priv *to_imx_mu_priv(struct mbox_controller *mbox) { return container_of(mbox, struct imx_mu_priv, mbox); @@ -82,24 +156,355 @@ static u32 imx_mu_read(struct imx_mu_priv *priv, u32 offs) return ioread32(priv->base + offs); } -static u32 imx_mu_xcr_rmw(struct imx_mu_priv *priv, u32 set, u32 clr) +static int imx_mu_tx_waiting_write(struct imx_mu_priv *priv, u32 val, u32 idx) +{ + u64 timeout_time = get_jiffies_64() + IMX_MU_SECO_TX_TOUT; + u32 status; + u32 can_write; + + dev_dbg(priv->dev, "Trying to write %.8x to idx %d\n", val, idx); + + do { + status = imx_mu_read(priv, priv->dcfg->xSR[IMX_MU_TSR]); + can_write = status & IMX_MU_xSR_TEn(priv->dcfg->type, idx % 4); + } while (!can_write && time_is_after_jiffies64(timeout_time)); + + if (!can_write) { + dev_err(priv->dev, "timeout trying to write %.8x at %d(%.8x)\n", + val, idx, status); + return -ETIME; + } + + imx_mu_write(priv, val, priv->dcfg->xTR + (idx % 4) * 4); + + return 0; +} + +static int imx_mu_rx_waiting_read(struct imx_mu_priv *priv, u32 *val, u32 idx) +{ + u64 timeout_time = get_jiffies_64() + IMX_MU_SECO_RX_TOUT; + u32 status; + u32 can_read; + + dev_dbg(priv->dev, "Trying to read from idx %d\n", idx); + + do { + status = imx_mu_read(priv, priv->dcfg->xSR[IMX_MU_RSR]); + can_read = status & IMX_MU_xSR_RFn(priv->dcfg->type, idx % 4); + } while (!can_read && time_is_after_jiffies64(timeout_time)); + + if (!can_read) { + dev_err(priv->dev, "timeout trying to read idx %d (%.8x)\n", + idx, status); + return -ETIME; + } + + *val = imx_mu_read(priv, priv->dcfg->xRR + (idx % 4) * 4); + dev_dbg(priv->dev, "Read %.8x\n", *val); + + return 0; +} + +static u32 imx_mu_xcr_rmw(struct imx_mu_priv *priv, enum imx_mu_xcr type, u32 set, u32 clr) { unsigned long flags; u32 val; spin_lock_irqsave(&priv->xcr_lock, flags); - val = imx_mu_read(priv, IMX_MU_xCR); + val = imx_mu_read(priv, priv->dcfg->xCR[type]); val &= ~clr; val |= set; - imx_mu_write(priv, val, IMX_MU_xCR); + imx_mu_write(priv, val, priv->dcfg->xCR[type]); spin_unlock_irqrestore(&priv->xcr_lock, flags); return val; } -static void imx_mu_txdb_tasklet(unsigned long data) +static int imx_mu_generic_tx(struct imx_mu_priv *priv, + struct imx_mu_con_priv *cp, + void *data) { - struct imx_mu_con_priv *cp = (struct imx_mu_con_priv *)data; + u32 *arg = data; + u32 val; + int ret, count; + + switch (cp->type) { + case IMX_MU_TYPE_TX: + imx_mu_write(priv, *arg, priv->dcfg->xTR + cp->idx * 4); + imx_mu_xcr_rmw(priv, IMX_MU_TCR, IMX_MU_xCR_TIEn(priv->dcfg->type, cp->idx), 0); + break; + case IMX_MU_TYPE_TXDB: + imx_mu_xcr_rmw(priv, IMX_MU_GCR, IMX_MU_xCR_GIRn(priv->dcfg->type, cp->idx), 0); + queue_work(system_bh_wq, &cp->txdb_work); + break; + case IMX_MU_TYPE_TXDB_V2: + imx_mu_write(priv, IMX_MU_xCR_GIRn(priv->dcfg->type, cp->idx), + priv->dcfg->xCR[IMX_MU_GCR]); + ret = -ETIMEDOUT; + count = 0; + while (ret && (count < 10)) { + ret = + readl_poll_timeout(priv->base + priv->dcfg->xCR[IMX_MU_GCR], val, + !(val & IMX_MU_xCR_GIRn(priv->dcfg->type, cp->idx)), + 0, 10000); + + if (ret) { + dev_warn_ratelimited(priv->dev, + "channel type: %d timeout, %d times, retry\n", + cp->type, ++count); + } + } + break; + default: + dev_warn_ratelimited(priv->dev, "Send data on wrong channel type: %d\n", cp->type); + return -EINVAL; + } + + return 0; +} + +static int imx_mu_generic_rx(struct imx_mu_priv *priv, + struct imx_mu_con_priv *cp) +{ + u32 dat; + + dat = imx_mu_read(priv, priv->dcfg->xRR + (cp->idx) * 4); + mbox_chan_received_data(cp->chan, (void *)&dat); + + return 0; +} + +static int imx_mu_generic_rxdb(struct imx_mu_priv *priv, + struct imx_mu_con_priv *cp) +{ + imx_mu_write(priv, IMX_MU_xSR_GIPn(priv->dcfg->type, cp->idx), + priv->dcfg->xSR[IMX_MU_GSR]); + mbox_chan_received_data(cp->chan, NULL); + + return 0; +} + +static int imx_mu_specific_tx(struct imx_mu_priv *priv, struct imx_mu_con_priv *cp, void *data) +{ + u32 *arg = data; + u32 num_tr = priv->num_tr; + int i, ret; + u32 xsr; + u32 size, max_size; + + if (priv->dcfg->type & IMX_MU_V2_S4) { + size = ((struct imx_s4_rpc_msg_max *)data)->hdr.size; + max_size = sizeof(struct imx_s4_rpc_msg_max); + } else { + size = ((struct imx_sc_rpc_msg_max *)data)->hdr.size; + max_size = sizeof(struct imx_sc_rpc_msg_max); + } + + switch (cp->type) { + case IMX_MU_TYPE_TX: + /* + * msg->hdr.size specifies the number of u32 words while + * sizeof yields bytes. + */ + + if (size > max_size / 4) { + /* + * The real message size can be different to + * struct imx_sc_rpc_msg_max/imx_s4_rpc_msg_max size + */ + dev_err(priv->dev, "Maximal message size (%u bytes) exceeded on TX; got: %i bytes\n", max_size, size << 2); + return -EINVAL; + } + + for (i = 0; i < num_tr && i < size; i++) + imx_mu_write(priv, *arg++, priv->dcfg->xTR + (i % num_tr) * 4); + for (; i < size; i++) { + ret = readl_poll_timeout(priv->base + priv->dcfg->xSR[IMX_MU_TSR], + xsr, + xsr & IMX_MU_xSR_TEn(priv->dcfg->type, i % num_tr), + 0, 5 * USEC_PER_SEC); + if (ret) { + dev_err(priv->dev, "Send data index: %d timeout\n", i); + return ret; + } + imx_mu_write(priv, *arg++, priv->dcfg->xTR + (i % num_tr) * 4); + } + + imx_mu_xcr_rmw(priv, IMX_MU_TCR, IMX_MU_xCR_TIEn(priv->dcfg->type, cp->idx), 0); + break; + default: + dev_warn_ratelimited(priv->dev, "Send data on wrong channel type: %d\n", cp->type); + return -EINVAL; + } + + return 0; +} + +static int imx_mu_specific_rx(struct imx_mu_priv *priv, struct imx_mu_con_priv *cp) +{ + u32 *data; + int i, ret; + u32 xsr; + u32 size, max_size; + u32 num_rr = priv->num_rr; + + data = (u32 *)priv->msg; + + imx_mu_xcr_rmw(priv, IMX_MU_RCR, 0, IMX_MU_xCR_RIEn(priv->dcfg->type, 0)); + *data++ = imx_mu_read(priv, priv->dcfg->xRR); + + if (priv->dcfg->type & IMX_MU_V2_S4) { + size = ((struct imx_s4_rpc_msg_max *)priv->msg)->hdr.size; + max_size = sizeof(struct imx_s4_rpc_msg_max); + } else { + size = ((struct imx_sc_rpc_msg_max *)priv->msg)->hdr.size; + max_size = sizeof(struct imx_sc_rpc_msg_max); + } + + if (size > max_size / 4) { + dev_err(priv->dev, "Maximal message size (%u bytes) exceeded on RX; got: %i bytes\n", max_size, size << 2); + return -EINVAL; + } + + for (i = 1; i < size; i++) { + ret = readl_poll_timeout(priv->base + priv->dcfg->xSR[IMX_MU_RSR], xsr, + xsr & IMX_MU_xSR_RFn(priv->dcfg->type, i % num_rr), 0, + 5 * USEC_PER_SEC); + if (ret) { + dev_err(priv->dev, "timeout read idx %d\n", i); + return ret; + } + *data++ = imx_mu_read(priv, priv->dcfg->xRR + (i % num_rr) * 4); + } + + imx_mu_xcr_rmw(priv, IMX_MU_RCR, IMX_MU_xCR_RIEn(priv->dcfg->type, 0), 0); + mbox_chan_received_data(cp->chan, (void *)priv->msg); + + return 0; +} + +static int imx_mu_seco_tx(struct imx_mu_priv *priv, struct imx_mu_con_priv *cp, + void *data) +{ + struct imx_sc_rpc_msg_max *msg = data; + u32 *arg = data; + u32 byte_size; + int err; + int i; + + dev_dbg(priv->dev, "Sending message\n"); + + switch (cp->type) { + case IMX_MU_TYPE_TXDB: + byte_size = msg->hdr.size * sizeof(u32); + if (byte_size > sizeof(*msg)) { + /* + * The real message size can be different to + * struct imx_sc_rpc_msg_max size + */ + dev_err(priv->dev, + "Exceed max msg size (%zu) on TX, got: %i\n", + sizeof(*msg), byte_size); + return -EINVAL; + } + + print_hex_dump_debug("from client ", DUMP_PREFIX_OFFSET, 4, 4, + data, byte_size, false); + + /* Send first word */ + dev_dbg(priv->dev, "Sending header\n"); + imx_mu_write(priv, *arg++, priv->dcfg->xTR); + + /* Send signaling */ + dev_dbg(priv->dev, "Sending signaling\n"); + imx_mu_xcr_rmw(priv, IMX_MU_GCR, + IMX_MU_xCR_GIRn(priv->dcfg->type, cp->idx), 0); + + /* Send words to fill the mailbox */ + for (i = 1; i < 4 && i < msg->hdr.size; i++) { + dev_dbg(priv->dev, "Sending word %d\n", i); + imx_mu_write(priv, *arg++, + priv->dcfg->xTR + (i % 4) * 4); + } + + /* Send rest of message waiting for remote read */ + for (; i < msg->hdr.size; i++) { + dev_dbg(priv->dev, "Sending word %d\n", i); + err = imx_mu_tx_waiting_write(priv, *arg++, i); + if (err) { + dev_err(priv->dev, "Timeout tx %d\n", i); + return err; + } + } + + /* Simulate hack for mbox framework */ + queue_work(system_bh_wq, &cp->txdb_work); + + break; + default: + dev_warn_ratelimited(priv->dev, + "Send data on wrong channel type: %d\n", + cp->type); + return -EINVAL; + } + + return 0; +} + +static int imx_mu_seco_rxdb(struct imx_mu_priv *priv, struct imx_mu_con_priv *cp) +{ + struct imx_sc_rpc_msg_max msg; + u32 *data = (u32 *)&msg; + u32 byte_size; + int err = 0; + int i; + + dev_dbg(priv->dev, "Receiving message\n"); + + /* Read header */ + dev_dbg(priv->dev, "Receiving header\n"); + *data++ = imx_mu_read(priv, priv->dcfg->xRR); + byte_size = msg.hdr.size * sizeof(u32); + if (byte_size > sizeof(msg)) { + dev_err(priv->dev, "Exceed max msg size (%zu) on RX, got: %i\n", + sizeof(msg), byte_size); + err = -EINVAL; + goto error; + } + + /* Read message waiting they are written */ + for (i = 1; i < msg.hdr.size; i++) { + dev_dbg(priv->dev, "Receiving word %d\n", i); + err = imx_mu_rx_waiting_read(priv, data++, i); + if (err) { + dev_err(priv->dev, "Timeout rx %d\n", i); + goto error; + } + } + + /* Clear GIP */ + imx_mu_write(priv, IMX_MU_xSR_GIPn(priv->dcfg->type, cp->idx), + priv->dcfg->xSR[IMX_MU_GSR]); + + print_hex_dump_debug("to client ", DUMP_PREFIX_OFFSET, 4, 4, + &msg, byte_size, false); + + /* send data to client */ + dev_dbg(priv->dev, "Sending message to client\n"); + mbox_chan_received_data(cp->chan, (void *)&msg); + + goto exit; + +error: + mbox_chan_received_data(cp->chan, ERR_PTR(err)); + +exit: + return err; +} + +static void imx_mu_txdb_work(struct work_struct *t) +{ + struct imx_mu_con_priv *cp = from_work(cp, t, txdb_work); mbox_chan_txdone(cp->chan, 0); } @@ -109,45 +514,56 @@ static irqreturn_t imx_mu_isr(int irq, void *p) struct mbox_chan *chan = p; struct imx_mu_priv *priv = to_imx_mu_priv(chan->mbox); struct imx_mu_con_priv *cp = chan->con_priv; - u32 val, ctrl, dat; - - ctrl = imx_mu_read(priv, IMX_MU_xCR); - val = imx_mu_read(priv, IMX_MU_xSR); + u32 val, ctrl; switch (cp->type) { case IMX_MU_TYPE_TX: - val &= IMX_MU_xSR_TEn(cp->idx) & - (ctrl & IMX_MU_xCR_TIEn(cp->idx)); + ctrl = imx_mu_read(priv, priv->dcfg->xCR[IMX_MU_TCR]); + val = imx_mu_read(priv, priv->dcfg->xSR[IMX_MU_TSR]); + val &= IMX_MU_xSR_TEn(priv->dcfg->type, cp->idx) & + (ctrl & IMX_MU_xCR_TIEn(priv->dcfg->type, cp->idx)); break; case IMX_MU_TYPE_RX: - val &= IMX_MU_xSR_RFn(cp->idx) & - (ctrl & IMX_MU_xCR_RIEn(cp->idx)); + ctrl = imx_mu_read(priv, priv->dcfg->xCR[IMX_MU_RCR]); + val = imx_mu_read(priv, priv->dcfg->xSR[IMX_MU_RSR]); + val &= IMX_MU_xSR_RFn(priv->dcfg->type, cp->idx) & + (ctrl & IMX_MU_xCR_RIEn(priv->dcfg->type, cp->idx)); break; case IMX_MU_TYPE_RXDB: - val &= IMX_MU_xSR_GIPn(cp->idx) & - (ctrl & IMX_MU_xCR_GIEn(cp->idx)); + ctrl = imx_mu_read(priv, priv->dcfg->xCR[IMX_MU_GIER]); + val = imx_mu_read(priv, priv->dcfg->xSR[IMX_MU_GSR]); + val &= IMX_MU_xSR_GIPn(priv->dcfg->type, cp->idx) & + (ctrl & IMX_MU_xCR_GIEn(priv->dcfg->type, cp->idx)); break; + case IMX_MU_TYPE_RST: + return IRQ_NONE; default: - break; + dev_warn_ratelimited(priv->dev, "Unhandled channel type %d\n", + cp->type); + return IRQ_NONE; } if (!val) return IRQ_NONE; - if (val == IMX_MU_xSR_TEn(cp->idx)) { - imx_mu_xcr_rmw(priv, 0, IMX_MU_xCR_TIEn(cp->idx)); + if ((val == IMX_MU_xSR_TEn(priv->dcfg->type, cp->idx)) && + (cp->type == IMX_MU_TYPE_TX)) { + imx_mu_xcr_rmw(priv, IMX_MU_TCR, 0, IMX_MU_xCR_TIEn(priv->dcfg->type, cp->idx)); mbox_chan_txdone(chan, 0); - } else if (val == IMX_MU_xSR_RFn(cp->idx)) { - dat = imx_mu_read(priv, IMX_MU_xRRn(cp->idx)); - mbox_chan_received_data(chan, (void *)&dat); - } else if (val == IMX_MU_xSR_GIPn(cp->idx)) { - imx_mu_write(priv, IMX_MU_xSR_GIPn(cp->idx), IMX_MU_xSR); - mbox_chan_received_data(chan, NULL); + } else if ((val == IMX_MU_xSR_RFn(priv->dcfg->type, cp->idx)) && + (cp->type == IMX_MU_TYPE_RX)) { + priv->dcfg->rx(priv, cp); + } else if ((val == IMX_MU_xSR_GIPn(priv->dcfg->type, cp->idx)) && + (cp->type == IMX_MU_TYPE_RXDB)) { + priv->dcfg->rxdb(priv, cp); } else { dev_warn_ratelimited(priv->dev, "Not handled interrupt\n"); return IRQ_NONE; } + if (priv->suspend) + pm_system_wakeup(); + return IRQ_HANDLED; } @@ -155,52 +571,46 @@ static int imx_mu_send_data(struct mbox_chan *chan, void *data) { struct imx_mu_priv *priv = to_imx_mu_priv(chan->mbox); struct imx_mu_con_priv *cp = chan->con_priv; - u32 *arg = data; - - switch (cp->type) { - case IMX_MU_TYPE_TX: - imx_mu_write(priv, *arg, IMX_MU_xTRn(cp->idx)); - imx_mu_xcr_rmw(priv, IMX_MU_xCR_TIEn(cp->idx), 0); - break; - case IMX_MU_TYPE_TXDB: - imx_mu_xcr_rmw(priv, IMX_MU_xCR_GIRn(cp->idx), 0); - tasklet_schedule(&cp->txdb_tasklet); - break; - default: - dev_warn_ratelimited(priv->dev, "Send data on wrong channel type: %d\n", cp->type); - return -EINVAL; - } - return 0; + return priv->dcfg->tx(priv, cp, data); } static int imx_mu_startup(struct mbox_chan *chan) { struct imx_mu_priv *priv = to_imx_mu_priv(chan->mbox); struct imx_mu_con_priv *cp = chan->con_priv; + unsigned long irq_flag = 0; int ret; + pm_runtime_get_sync(priv->dev); + if (cp->type == IMX_MU_TYPE_TXDB_V2) + return 0; + if (cp->type == IMX_MU_TYPE_TXDB) { /* Tx doorbell don't have ACK support */ - tasklet_init(&cp->txdb_tasklet, imx_mu_txdb_tasklet, - (unsigned long)cp); + INIT_WORK(&cp->txdb_work, imx_mu_txdb_work); return 0; } - ret = request_irq(priv->irq, imx_mu_isr, IRQF_SHARED, cp->irq_desc, - chan); + /* IPC MU should be with IRQF_NO_SUSPEND set */ + if (!priv->dev->pm_domain) + irq_flag |= IRQF_NO_SUSPEND; + + if (!(priv->dcfg->type & IMX_MU_V2_IRQ)) + irq_flag |= IRQF_SHARED; + + ret = request_irq(priv->irq[cp->type], imx_mu_isr, irq_flag, cp->irq_desc, chan); if (ret) { - dev_err(priv->dev, - "Unable to acquire IRQ %d\n", priv->irq); + dev_err(priv->dev, "Unable to acquire IRQ %d\n", priv->irq[cp->type]); return ret; } switch (cp->type) { case IMX_MU_TYPE_RX: - imx_mu_xcr_rmw(priv, IMX_MU_xCR_RIEn(cp->idx), 0); + imx_mu_xcr_rmw(priv, IMX_MU_RCR, IMX_MU_xCR_RIEn(priv->dcfg->type, cp->idx), 0); break; case IMX_MU_TYPE_RXDB: - imx_mu_xcr_rmw(priv, IMX_MU_xCR_GIEn(cp->idx), 0); + imx_mu_xcr_rmw(priv, IMX_MU_GIER, IMX_MU_xCR_GIEn(priv->dcfg->type, cp->idx), 0); break; default: break; @@ -213,14 +623,43 @@ static void imx_mu_shutdown(struct mbox_chan *chan) { struct imx_mu_priv *priv = to_imx_mu_priv(chan->mbox); struct imx_mu_con_priv *cp = chan->con_priv; + int ret; + u32 sr; - if (cp->type == IMX_MU_TYPE_TXDB) - tasklet_kill(&cp->txdb_tasklet); + if (cp->type == IMX_MU_TYPE_TXDB_V2) { + pm_runtime_put_sync(priv->dev); + return; + } + + if (cp->type == IMX_MU_TYPE_TXDB) { + cancel_work_sync(&cp->txdb_work); + pm_runtime_put_sync(priv->dev); + return; + } - imx_mu_xcr_rmw(priv, 0, - IMX_MU_xCR_TIEn(cp->idx) | IMX_MU_xCR_RIEn(cp->idx)); + switch (cp->type) { + case IMX_MU_TYPE_TX: + imx_mu_xcr_rmw(priv, IMX_MU_TCR, 0, IMX_MU_xCR_TIEn(priv->dcfg->type, cp->idx)); + break; + case IMX_MU_TYPE_RX: + imx_mu_xcr_rmw(priv, IMX_MU_RCR, 0, IMX_MU_xCR_RIEn(priv->dcfg->type, cp->idx)); + break; + case IMX_MU_TYPE_RXDB: + imx_mu_xcr_rmw(priv, IMX_MU_GIER, 0, IMX_MU_xCR_GIEn(priv->dcfg->type, cp->idx)); + break; + case IMX_MU_TYPE_RST: + imx_mu_xcr_rmw(priv, IMX_MU_CR, IMX_MU_xCR_RST(priv->dcfg->type), 0); + ret = readl_poll_timeout(priv->base + priv->dcfg->xSR[IMX_MU_SR], sr, + !(sr & IMX_MU_xSR_RST(priv->dcfg->type)), 1, 5); + if (ret) + dev_warn(priv->dev, "RST channel timeout\n"); + break; + default: + break; + } - free_irq(priv->irq, chan); + free_irq(priv->irq[cp->type], chan); + pm_runtime_put_sync(priv->dev); } static const struct mbox_chan_ops imx_mu_ops = { @@ -229,9 +668,46 @@ static const struct mbox_chan_ops imx_mu_ops = { .shutdown = imx_mu_shutdown, }; +static struct mbox_chan *imx_mu_specific_xlate(struct mbox_controller *mbox, + const struct of_phandle_args *sp) +{ + u32 type, idx, chan; + + if (sp->args_count != 2) { + dev_err(mbox->dev, "Invalid argument count %d\n", sp->args_count); + return ERR_PTR(-EINVAL); + } + + type = sp->args[0]; /* channel type */ + idx = sp->args[1]; /* index */ + + switch (type) { + case IMX_MU_TYPE_TX: + case IMX_MU_TYPE_RX: + if (idx != 0) + dev_err(mbox->dev, "Invalid chan idx: %d\n", idx); + chan = type; + break; + case IMX_MU_TYPE_RXDB: + chan = 2 + idx; + break; + default: + dev_err(mbox->dev, "Invalid chan type: %d\n", type); + return ERR_PTR(-EINVAL); + } + + if (chan >= mbox->num_chans) { + dev_err(mbox->dev, "Not supported channel number: %d. (type: %d, idx: %d)\n", chan, type, idx); + return ERR_PTR(-EINVAL); + } + + return &mbox->chans[chan]; +} + static struct mbox_chan * imx_mu_xlate(struct mbox_controller *mbox, const struct of_phandle_args *sp) { + struct mbox_chan *p_chan; u32 type, idx, chan; if (sp->args_count != 2) { @@ -241,33 +717,150 @@ static struct mbox_chan * imx_mu_xlate(struct mbox_controller *mbox, type = sp->args[0]; /* channel type */ idx = sp->args[1]; /* index */ - chan = type * 4 + idx; + /* RST only supports 1 channel */ + if ((type == IMX_MU_TYPE_RST) && idx) { + dev_err(mbox->dev, "Invalid RST channel %d\n", idx); + return ERR_PTR(-EINVAL); + } + + chan = type * 4 + idx; if (chan >= mbox->num_chans) { dev_err(mbox->dev, "Not supported channel number: %d. (type: %d, idx: %d)\n", chan, type, idx); return ERR_PTR(-EINVAL); } - return &mbox->chans[chan]; + p_chan = &mbox->chans[chan]; + + if (type == IMX_MU_TYPE_TXDB_V2) + p_chan->txdone_method = TXDONE_BY_ACK; + + return p_chan; +} + +static struct mbox_chan *imx_mu_seco_xlate(struct mbox_controller *mbox, + const struct of_phandle_args *sp) +{ + u32 type; + + if (sp->args_count < 1) { + dev_err(mbox->dev, "Invalid argument count %d\n", sp->args_count); + return ERR_PTR(-EINVAL); + } + + type = sp->args[0]; /* channel type */ + + /* Only supports TXDB and RXDB */ + if (type == IMX_MU_TYPE_TX || type == IMX_MU_TYPE_RX) { + dev_err(mbox->dev, "Invalid type: %d\n", type); + return ERR_PTR(-EINVAL); + } + + return imx_mu_xlate(mbox, sp); } -static void imx_mu_init_generic(struct imx_mu_priv *priv) +static void imx_mu_get_tr_rr(struct imx_mu_priv *priv) { + u32 val; + + if (priv->dcfg->type & IMX_MU_V2) { + val = imx_mu_read(priv, IMX_MU_V2_PAR_OFF); + priv->num_tr = FIELD_GET(IMX_MU_V2_TR_MASK, val); + priv->num_rr = FIELD_GET(IMX_MU_V2_RR_MASK, val); + } else { + priv->num_tr = 4; + priv->num_rr = 4; + } +} + +static int imx_mu_init_generic(struct imx_mu_priv *priv) +{ + unsigned int i; + unsigned int val; + + if (priv->num_rr > 4 || priv->num_tr > 4) { + WARN_ONCE(true, "%s not support TR/RR larger than 4\n", __func__); + return -EOPNOTSUPP; + } + + for (i = 0; i < IMX_MU_CHANS; i++) { + struct imx_mu_con_priv *cp = &priv->con_priv[i]; + + cp->idx = i % 4; + cp->type = i >> 2; + cp->chan = &priv->mbox_chans[i]; + priv->mbox_chans[i].con_priv = cp; + snprintf(cp->irq_desc, sizeof(cp->irq_desc), + "%s[%i-%u]", dev_name(priv->dev), cp->type, cp->idx); + } + + priv->mbox.num_chans = IMX_MU_CHANS; + priv->mbox.of_xlate = imx_mu_xlate; + if (priv->side_b) - return; + return 0; + + /* Set default MU configuration */ + for (i = 0; i < IMX_MU_xCR_MAX; i++) + imx_mu_write(priv, 0, priv->dcfg->xCR[i]); + + /* Clear any pending GIP */ + val = imx_mu_read(priv, priv->dcfg->xSR[IMX_MU_GSR]); + imx_mu_write(priv, val, priv->dcfg->xSR[IMX_MU_GSR]); + + /* Clear any pending RSR */ + for (i = 0; i < priv->num_rr; i++) + imx_mu_read(priv, priv->dcfg->xRR + i * 4); + + return 0; +} + +static int imx_mu_init_specific(struct imx_mu_priv *priv) +{ + unsigned int i; + int num_chans = priv->dcfg->type & IMX_MU_V2_S4 ? IMX_MU_S4_CHANS : IMX_MU_SCU_CHANS; + + for (i = 0; i < num_chans; i++) { + struct imx_mu_con_priv *cp = &priv->con_priv[i]; + + cp->idx = i < 2 ? 0 : i - 2; + cp->type = i < 2 ? i : IMX_MU_TYPE_RXDB; + cp->chan = &priv->mbox_chans[i]; + priv->mbox_chans[i].con_priv = cp; + snprintf(cp->irq_desc, sizeof(cp->irq_desc), + "%s[%i-%u]", dev_name(priv->dev), cp->type, cp->idx); + } + + priv->mbox.num_chans = num_chans; + priv->mbox.of_xlate = imx_mu_specific_xlate; /* Set default MU configuration */ - imx_mu_write(priv, 0, IMX_MU_xCR); + for (i = 0; i < IMX_MU_xCR_MAX; i++) + imx_mu_write(priv, 0, priv->dcfg->xCR[i]); + + return 0; +} + +static int imx_mu_init_seco(struct imx_mu_priv *priv) +{ + int ret; + + ret = imx_mu_init_generic(priv); + if (ret) + return ret; + priv->mbox.of_xlate = imx_mu_seco_xlate; + + return 0; } static int imx_mu_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct device_node *np = dev->of_node; - struct resource *iomem; struct imx_mu_priv *priv; - unsigned int i; - int ret; + const struct imx_mu_dcfg *dcfg; + int i, ret; + u32 size; priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); if (!priv) @@ -275,14 +868,38 @@ static int imx_mu_probe(struct platform_device *pdev) priv->dev = dev; - iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0); - priv->base = devm_ioremap_resource(&pdev->dev, iomem); + priv->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(priv->base)) return PTR_ERR(priv->base); - priv->irq = platform_get_irq(pdev, 0); - if (priv->irq < 0) - return priv->irq; + dcfg = of_device_get_match_data(dev); + if (!dcfg) + return -EINVAL; + priv->dcfg = dcfg; + if (priv->dcfg->type & IMX_MU_V2_IRQ) { + priv->irq[IMX_MU_TYPE_TX] = platform_get_irq_byname(pdev, "tx"); + if (priv->irq[IMX_MU_TYPE_TX] < 0) + return priv->irq[IMX_MU_TYPE_TX]; + priv->irq[IMX_MU_TYPE_RX] = platform_get_irq_byname(pdev, "rx"); + if (priv->irq[IMX_MU_TYPE_RX] < 0) + return priv->irq[IMX_MU_TYPE_RX]; + } else { + ret = platform_get_irq(pdev, 0); + if (ret < 0) + return ret; + + for (i = 0; i < IMX_MU_CHANS; i++) + priv->irq[i] = ret; + } + + if (priv->dcfg->type & IMX_MU_V2_S4) + size = sizeof(struct imx_s4_rpc_msg_max); + else + size = sizeof(struct imx_sc_rpc_msg_max); + + priv->msg = devm_kzalloc(dev, size, GFP_KERNEL); + if (!priv->msg) + return -ENOMEM; priv->clk = devm_clk_get(dev, NULL); if (IS_ERR(priv->clk)) { @@ -298,56 +915,225 @@ static int imx_mu_probe(struct platform_device *pdev) return ret; } - for (i = 0; i < IMX_MU_CHANS; i++) { - struct imx_mu_con_priv *cp = &priv->con_priv[i]; - - cp->idx = i % 4; - cp->type = i >> 2; - cp->chan = &priv->mbox_chans[i]; - priv->mbox_chans[i].con_priv = cp; - snprintf(cp->irq_desc, sizeof(cp->irq_desc), - "imx_mu_chan[%i-%i]", cp->type, cp->idx); - } + imx_mu_get_tr_rr(priv); priv->side_b = of_property_read_bool(np, "fsl,mu-side-b"); + ret = priv->dcfg->init(priv); + if (ret) { + dev_err(dev, "Failed to init MU\n"); + goto disable_clk; + } + spin_lock_init(&priv->xcr_lock); priv->mbox.dev = dev; priv->mbox.ops = &imx_mu_ops; priv->mbox.chans = priv->mbox_chans; - priv->mbox.num_chans = IMX_MU_CHANS; - priv->mbox.of_xlate = imx_mu_xlate; priv->mbox.txdone_irq = true; platform_set_drvdata(pdev, priv); - imx_mu_init_generic(priv); + ret = devm_mbox_controller_register(dev, &priv->mbox); + if (ret) + goto disable_clk; - return devm_mbox_controller_register(dev, &priv->mbox); -} + of_platform_populate(dev->of_node, NULL, NULL, dev); -static int imx_mu_remove(struct platform_device *pdev) -{ - struct imx_mu_priv *priv = platform_get_drvdata(pdev); + pm_runtime_enable(dev); + + ret = pm_runtime_resume_and_get(dev); + if (ret < 0) + goto disable_runtime_pm; + + ret = pm_runtime_put_sync(dev); + if (ret < 0) + goto disable_runtime_pm; clk_disable_unprepare(priv->clk); return 0; + +disable_runtime_pm: + pm_runtime_disable(dev); +disable_clk: + clk_disable_unprepare(priv->clk); + return ret; } +static void imx_mu_remove(struct platform_device *pdev) +{ + struct imx_mu_priv *priv = platform_get_drvdata(pdev); + + pm_runtime_disable(priv->dev); +} + +static const struct imx_mu_dcfg imx_mu_cfg_imx6sx = { + .tx = imx_mu_generic_tx, + .rx = imx_mu_generic_rx, + .rxdb = imx_mu_generic_rxdb, + .init = imx_mu_init_generic, + .xTR = 0x0, + .xRR = 0x10, + .xSR = {0x20, 0x20, 0x20, 0x20}, + .xCR = {0x24, 0x24, 0x24, 0x24, 0x24}, +}; + +static const struct imx_mu_dcfg imx_mu_cfg_imx7ulp = { + .tx = imx_mu_generic_tx, + .rx = imx_mu_generic_rx, + .rxdb = imx_mu_generic_rxdb, + .init = imx_mu_init_generic, + .xTR = 0x20, + .xRR = 0x40, + .xSR = {0x60, 0x60, 0x60, 0x60}, + .xCR = {0x64, 0x64, 0x64, 0x64, 0x64}, +}; + +static const struct imx_mu_dcfg imx_mu_cfg_imx8ulp = { + .tx = imx_mu_generic_tx, + .rx = imx_mu_generic_rx, + .rxdb = imx_mu_generic_rxdb, + .init = imx_mu_init_generic, + .type = IMX_MU_V2, + .xTR = 0x200, + .xRR = 0x280, + .xSR = {0xC, 0x118, 0x124, 0x12C}, + .xCR = {0x8, 0x110, 0x114, 0x120, 0x128}, +}; + +static const struct imx_mu_dcfg imx_mu_cfg_imx8ulp_s4 = { + .tx = imx_mu_specific_tx, + .rx = imx_mu_specific_rx, + .init = imx_mu_init_specific, + .type = IMX_MU_V2 | IMX_MU_V2_S4, + .xTR = 0x200, + .xRR = 0x280, + .xSR = {0xC, 0x118, 0x124, 0x12C}, + .xCR = {0x8, 0x110, 0x114, 0x120, 0x128}, +}; + +static const struct imx_mu_dcfg imx_mu_cfg_imx93_s4 = { + .tx = imx_mu_specific_tx, + .rx = imx_mu_specific_rx, + .init = imx_mu_init_specific, + .type = IMX_MU_V2 | IMX_MU_V2_S4 | IMX_MU_V2_IRQ, + .xTR = 0x200, + .xRR = 0x280, + .xSR = {0xC, 0x118, 0x124, 0x12C}, + .xCR = {0x8, 0x110, 0x114, 0x120, 0x128}, +}; + +static const struct imx_mu_dcfg imx_mu_cfg_imx8_scu = { + .tx = imx_mu_specific_tx, + .rx = imx_mu_specific_rx, + .init = imx_mu_init_specific, + .rxdb = imx_mu_generic_rxdb, + .xTR = 0x0, + .xRR = 0x10, + .xSR = {0x20, 0x20, 0x20, 0x20}, + .xCR = {0x24, 0x24, 0x24, 0x24, 0x24}, +}; + +static const struct imx_mu_dcfg imx_mu_cfg_imx8_seco = { + .tx = imx_mu_seco_tx, + .rx = imx_mu_generic_rx, + .rxdb = imx_mu_seco_rxdb, + .init = imx_mu_init_seco, + .xTR = 0x0, + .xRR = 0x10, + .xSR = {0x20, 0x20, 0x20, 0x20}, + .xCR = {0x24, 0x24, 0x24, 0x24, 0x24}, +}; + static const struct of_device_id imx_mu_dt_ids[] = { - { .compatible = "fsl,imx6sx-mu" }, + { .compatible = "fsl,imx7ulp-mu", .data = &imx_mu_cfg_imx7ulp }, + { .compatible = "fsl,imx6sx-mu", .data = &imx_mu_cfg_imx6sx }, + { .compatible = "fsl,imx8ulp-mu", .data = &imx_mu_cfg_imx8ulp }, + { .compatible = "fsl,imx8ulp-mu-s4", .data = &imx_mu_cfg_imx8ulp_s4 }, + { .compatible = "fsl,imx93-mu-s4", .data = &imx_mu_cfg_imx93_s4 }, + { .compatible = "fsl,imx95-mu", .data = &imx_mu_cfg_imx8ulp }, + { .compatible = "fsl,imx95-mu-ele", .data = &imx_mu_cfg_imx8ulp_s4 }, + { .compatible = "fsl,imx95-mu-v2x", .data = &imx_mu_cfg_imx8ulp_s4 }, + { .compatible = "fsl,imx8-mu-scu", .data = &imx_mu_cfg_imx8_scu }, + { .compatible = "fsl,imx8-mu-seco", .data = &imx_mu_cfg_imx8_seco }, { }, }; MODULE_DEVICE_TABLE(of, imx_mu_dt_ids); +static int __maybe_unused imx_mu_suspend_noirq(struct device *dev) +{ + struct imx_mu_priv *priv = dev_get_drvdata(dev); + int i; + + if (!priv->clk) { + for (i = 0; i < IMX_MU_xCR_MAX; i++) + priv->xcr[i] = imx_mu_read(priv, priv->dcfg->xCR[i]); + } + + priv->suspend = true; + + return 0; +} + +static int __maybe_unused imx_mu_resume_noirq(struct device *dev) +{ + struct imx_mu_priv *priv = dev_get_drvdata(dev); + int i; + + /* + * ONLY restore MU when context lost, the TIE could + * be set during noirq resume as there is MU data + * communication going on, and restore the saved + * value will overwrite the TIE and cause MU data + * send failed, may lead to system freeze. This issue + * is observed by testing freeze mode suspend. + */ + if (!priv->clk && !imx_mu_read(priv, priv->dcfg->xCR[0])) { + for (i = 0; i < IMX_MU_xCR_MAX; i++) + imx_mu_write(priv, priv->xcr[i], priv->dcfg->xCR[i]); + } + + priv->suspend = false; + + return 0; +} + +static int __maybe_unused imx_mu_runtime_suspend(struct device *dev) +{ + struct imx_mu_priv *priv = dev_get_drvdata(dev); + + clk_disable_unprepare(priv->clk); + + return 0; +} + +static int __maybe_unused imx_mu_runtime_resume(struct device *dev) +{ + struct imx_mu_priv *priv = dev_get_drvdata(dev); + int ret; + + ret = clk_prepare_enable(priv->clk); + if (ret) + dev_err(dev, "failed to enable clock\n"); + + return ret; +} + +static const struct dev_pm_ops imx_mu_pm_ops = { + SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(imx_mu_suspend_noirq, + imx_mu_resume_noirq) + SET_RUNTIME_PM_OPS(imx_mu_runtime_suspend, + imx_mu_runtime_resume, NULL) +}; + static struct platform_driver imx_mu_driver = { .probe = imx_mu_probe, .remove = imx_mu_remove, .driver = { .name = "imx_mu", .of_match_table = imx_mu_dt_ids, + .pm = &imx_mu_pm_ops, }, }; module_platform_driver(imx_mu_driver); diff --git a/drivers/mailbox/mailbox-altera.c b/drivers/mailbox/mailbox-altera.c index 397e25ddae29..17278c2571d3 100644 --- a/drivers/mailbox/mailbox-altera.c +++ b/drivers/mailbox/mailbox-altera.c @@ -1,17 +1,6 @@ +// SPDX-License-Identifier: GPL-2.0-only /* * Copyright Altera Corporation (C) 2013-2014. All rights reserved - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program. If not, see <http://www.gnu.org/licenses/>. */ #include <linux/device.h> @@ -141,7 +130,7 @@ static void altera_mbox_rx_data(struct mbox_chan *chan) static void altera_mbox_poll_rx(struct timer_list *t) { - struct altera_mbox *mbox = from_timer(mbox, t, rxpoll_timer); + struct altera_mbox *mbox = timer_container_of(mbox, t, rxpoll_timer); altera_mbox_rx_data(mbox->chan); @@ -281,7 +270,7 @@ static void altera_mbox_shutdown(struct mbox_chan *chan) writel_relaxed(~0, mbox->mbox_base + MAILBOX_INTMASK_REG); free_irq(mbox->irq, chan); } else if (!mbox->is_sender) { - del_timer_sync(&mbox->rxpoll_timer); + timer_delete_sync(&mbox->rxpoll_timer); } } @@ -296,7 +285,6 @@ static const struct mbox_chan_ops altera_mbox_ops = { static int altera_mbox_probe(struct platform_device *pdev) { struct altera_mbox *mbox; - struct resource *regs; struct mbox_chan *chans; int ret; @@ -310,9 +298,7 @@ static int altera_mbox_probe(struct platform_device *pdev) if (!chans) return -ENOMEM; - regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); - - mbox->mbox_base = devm_ioremap_resource(&pdev->dev, regs); + mbox->mbox_base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(mbox->mbox_base)) return PTR_ERR(mbox->mbox_base); diff --git a/drivers/mailbox/mailbox-mchp-ipc-sbi.c b/drivers/mailbox/mailbox-mchp-ipc-sbi.c new file mode 100644 index 000000000000..a6e52009a424 --- /dev/null +++ b/drivers/mailbox/mailbox-mchp-ipc-sbi.c @@ -0,0 +1,504 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Microchip Inter-Processor communication (IPC) driver + * + * Copyright (c) 2021 - 2024 Microchip Technology Inc. All rights reserved. + * + * Author: Valentina Fernandez <valentina.fernandezalanis@microchip.com> + * + */ + +#include <linux/io.h> +#include <linux/err.h> +#include <linux/smp.h> +#include <linux/init.h> +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/of_device.h> +#include <linux/interrupt.h> +#include <linux/dma-mapping.h> +#include <linux/platform_device.h> +#include <linux/mailbox/mchp-ipc.h> +#include <asm/sbi.h> +#include <asm/vendorid_list.h> + +#define IRQ_STATUS_BITS 12 +#define NUM_CHANS_PER_CLUSTER 5 +#define IPC_DMA_BIT_MASK 32 +#define SBI_EXT_MICROCHIP_TECHNOLOGY (SBI_EXT_VENDOR_START | \ + MICROCHIP_VENDOR_ID) + +enum { + SBI_EXT_IPC_PROBE = 0x100, + SBI_EXT_IPC_CH_INIT, + SBI_EXT_IPC_SEND, + SBI_EXT_IPC_RECEIVE, + SBI_EXT_IPC_STATUS, +}; + +enum ipc_hw { + MIV_IHC, +}; + +/** + * struct mchp_ipc_mbox_info - IPC probe message format + * + * @hw_type: IPC implementation available in the hardware + * @num_channels: number of IPC channels available in the hardware + * + * Used to retrieve information on the IPC implementation + * using the SBI_EXT_IPC_PROBE SBI function id. + */ +struct mchp_ipc_mbox_info { + enum ipc_hw hw_type; + u8 num_channels; +}; + +/** + * struct mchp_ipc_init - IPC channel init message format + * + * @max_msg_size: maxmimum message size in bytes of a given channel + * + * struct used by the SBI_EXT_IPC_CH_INIT SBI function id to get + * the max message size in bytes of the initialized channel. + */ +struct mchp_ipc_init { + u16 max_msg_size; +}; + +/** + * struct mchp_ipc_status - IPC status message format + * + * @status: interrupt status for all channels associated to a cluster + * @cluster: specifies the cluster instance that originated an irq + * + * struct used by the SBI_EXT_IPC_STATUS SBI function id to get + * the message present and message clear interrupt status for all the + * channels associated to a cluster. + */ +struct mchp_ipc_status { + u32 status; + u8 cluster; +}; + +/** + * struct mchp_ipc_sbi_msg - IPC SBI payload message + * + * @buf_addr: physical address where the received data should be copied to + * @size: maximum size(in bytes) that can be stored in the buffer pointed to by `buf` + * @irq_type: mask representing the irq types that triggered an irq + * + * struct used by the SBI_EXT_IPC_SEND/SBI_EXT_IPC_RECEIVE SBI function + * ids to send/receive a message from an associated processor using + * the IPC. + */ +struct mchp_ipc_sbi_msg { + u64 buf_addr; + u16 size; + u8 irq_type; +}; + +struct mchp_ipc_cluster_cfg { + void *buf_base; + phys_addr_t buf_base_addr; + int irq; +}; + +struct mchp_ipc_sbi_mbox { + struct device *dev; + struct mbox_chan *chans; + struct mchp_ipc_cluster_cfg *cluster_cfg; + void *buf_base; + unsigned long buf_base_addr; + struct mbox_controller controller; + enum ipc_hw hw_type; +}; + +static int mchp_ipc_sbi_chan_send(u32 command, u32 channel, unsigned long address) +{ + struct sbiret ret; + + ret = sbi_ecall(SBI_EXT_MICROCHIP_TECHNOLOGY, command, channel, + address, 0, 0, 0, 0); + + if (ret.error) + return sbi_err_map_linux_errno(ret.error); + else + return ret.value; +} + +static int mchp_ipc_sbi_send(u32 command, unsigned long address) +{ + struct sbiret ret; + + ret = sbi_ecall(SBI_EXT_MICROCHIP_TECHNOLOGY, command, address, + 0, 0, 0, 0, 0); + + if (ret.error) + return sbi_err_map_linux_errno(ret.error); + else + return ret.value; +} + +static struct mchp_ipc_sbi_mbox *to_mchp_ipc_mbox(struct mbox_controller *mbox) +{ + return container_of(mbox, struct mchp_ipc_sbi_mbox, controller); +} + +static inline void mchp_ipc_prepare_receive_req(struct mbox_chan *chan) +{ + struct mchp_ipc_sbi_chan *chan_info = (struct mchp_ipc_sbi_chan *)chan->con_priv; + struct mchp_ipc_sbi_msg request; + + request.buf_addr = chan_info->msg_buf_rx_addr; + request.size = chan_info->max_msg_size; + memcpy(chan_info->buf_base_rx, &request, sizeof(struct mchp_ipc_sbi_msg)); +} + +static inline void mchp_ipc_process_received_data(struct mbox_chan *chan, + struct mchp_ipc_msg *ipc_msg) +{ + struct mchp_ipc_sbi_chan *chan_info = (struct mchp_ipc_sbi_chan *)chan->con_priv; + struct mchp_ipc_sbi_msg sbi_msg; + + memcpy(&sbi_msg, chan_info->buf_base_rx, sizeof(struct mchp_ipc_sbi_msg)); + ipc_msg->buf = (u32 *)chan_info->msg_buf_rx; + ipc_msg->size = sbi_msg.size; +} + +static irqreturn_t mchp_ipc_cluster_aggr_isr(int irq, void *data) +{ + struct mbox_chan *chan; + struct mchp_ipc_sbi_chan *chan_info; + struct mchp_ipc_sbi_mbox *ipc = (struct mchp_ipc_sbi_mbox *)data; + struct mchp_ipc_msg ipc_msg; + struct mchp_ipc_status status_msg; + int ret; + unsigned long hartid; + u32 i, chan_index, chan_id; + + /* Find out the hart that originated the irq */ + for_each_online_cpu(i) { + hartid = cpuid_to_hartid_map(i); + if (irq == ipc->cluster_cfg[hartid].irq) + break; + } + + status_msg.cluster = hartid; + memcpy(ipc->cluster_cfg[hartid].buf_base, &status_msg, sizeof(struct mchp_ipc_status)); + + ret = mchp_ipc_sbi_send(SBI_EXT_IPC_STATUS, ipc->cluster_cfg[hartid].buf_base_addr); + if (ret < 0) { + dev_err_ratelimited(ipc->dev, "could not get IHC irq status ret=%d\n", ret); + return IRQ_HANDLED; + } + + memcpy(&status_msg, ipc->cluster_cfg[hartid].buf_base, sizeof(struct mchp_ipc_status)); + + /* + * Iterate over each bit set in the IHC interrupt status register (IRQ_STATUS) to identify + * the channel(s) that have a message to be processed/acknowledged. + * The bits are organized in alternating format, where each pair of bits represents + * the status of the message present and message clear interrupts for each cluster/hart + * (from hart 0 to hart 5). Each cluster can have up to 5 fixed channels associated. + */ + + for_each_set_bit(i, (unsigned long *)&status_msg.status, IRQ_STATUS_BITS) { + /* Find out the destination hart that triggered the interrupt */ + chan_index = i / 2; + + /* + * The IP has no loopback channels, so we need to decrement the index when + * the target hart has a greater index than our own + */ + if (chan_index >= status_msg.cluster) + chan_index--; + + /* + * Calculate the channel id given the hart and channel index. Channel IDs + * are unique across all clusters of an IPC, and iterate contiguously + * across all clusters. + */ + chan_id = status_msg.cluster * (NUM_CHANS_PER_CLUSTER + chan_index); + + chan = &ipc->chans[chan_id]; + chan_info = (struct mchp_ipc_sbi_chan *)chan->con_priv; + + if (i % 2 == 0) { + mchp_ipc_prepare_receive_req(chan); + ret = mchp_ipc_sbi_chan_send(SBI_EXT_IPC_RECEIVE, chan_id, + chan_info->buf_base_rx_addr); + if (ret < 0) + continue; + + mchp_ipc_process_received_data(chan, &ipc_msg); + mbox_chan_received_data(&ipc->chans[chan_id], (void *)&ipc_msg); + + } else { + ret = mchp_ipc_sbi_chan_send(SBI_EXT_IPC_RECEIVE, chan_id, + chan_info->buf_base_rx_addr); + mbox_chan_txdone(&ipc->chans[chan_id], ret); + } + } + return IRQ_HANDLED; +} + +static int mchp_ipc_send_data(struct mbox_chan *chan, void *data) +{ + struct mchp_ipc_sbi_chan *chan_info = (struct mchp_ipc_sbi_chan *)chan->con_priv; + const struct mchp_ipc_msg *msg = data; + struct mchp_ipc_sbi_msg sbi_payload; + + memcpy(chan_info->msg_buf_tx, msg->buf, msg->size); + sbi_payload.buf_addr = chan_info->msg_buf_tx_addr; + sbi_payload.size = msg->size; + memcpy(chan_info->buf_base_tx, &sbi_payload, sizeof(sbi_payload)); + + return mchp_ipc_sbi_chan_send(SBI_EXT_IPC_SEND, chan_info->id, chan_info->buf_base_tx_addr); +} + +static int mchp_ipc_startup(struct mbox_chan *chan) +{ + struct mchp_ipc_sbi_chan *chan_info = (struct mchp_ipc_sbi_chan *)chan->con_priv; + struct mchp_ipc_sbi_mbox *ipc = to_mchp_ipc_mbox(chan->mbox); + struct mchp_ipc_init ch_init_msg; + int ret; + + /* + * The TX base buffer is used to transmit two types of messages: + * - struct mchp_ipc_init to initialize the channel + * - struct mchp_ipc_sbi_msg to transmit user data/payload + * Ensure the TX buffer size is large enough to accommodate either message type. + */ + size_t max_size = max(sizeof(struct mchp_ipc_init), sizeof(struct mchp_ipc_sbi_msg)); + + chan_info->buf_base_tx = kmalloc(max_size, GFP_KERNEL); + if (!chan_info->buf_base_tx) { + ret = -ENOMEM; + goto fail; + } + + chan_info->buf_base_tx_addr = __pa(chan_info->buf_base_tx); + + chan_info->buf_base_rx = kmalloc(max_size, GFP_KERNEL); + if (!chan_info->buf_base_rx) { + ret = -ENOMEM; + goto fail_free_buf_base_tx; + } + + chan_info->buf_base_rx_addr = __pa(chan_info->buf_base_rx); + + ret = mchp_ipc_sbi_chan_send(SBI_EXT_IPC_CH_INIT, chan_info->id, + chan_info->buf_base_tx_addr); + if (ret < 0) { + dev_err(ipc->dev, "channel %u init failed\n", chan_info->id); + goto fail_free_buf_base_rx; + } + + memcpy(&ch_init_msg, chan_info->buf_base_tx, sizeof(struct mchp_ipc_init)); + chan_info->max_msg_size = ch_init_msg.max_msg_size; + + chan_info->msg_buf_tx = kmalloc(chan_info->max_msg_size, GFP_KERNEL); + if (!chan_info->msg_buf_tx) { + ret = -ENOMEM; + goto fail_free_buf_base_rx; + } + + chan_info->msg_buf_tx_addr = __pa(chan_info->msg_buf_tx); + + chan_info->msg_buf_rx = kmalloc(chan_info->max_msg_size, GFP_KERNEL); + if (!chan_info->msg_buf_rx) { + ret = -ENOMEM; + goto fail_free_buf_msg_tx; + } + + chan_info->msg_buf_rx_addr = __pa(chan_info->msg_buf_rx); + + switch (ipc->hw_type) { + case MIV_IHC: + return 0; + default: + goto fail_free_buf_msg_rx; + } + + if (ret) { + dev_err(ipc->dev, "failed to register interrupt(s)\n"); + goto fail_free_buf_msg_rx; + } + + return ret; + +fail_free_buf_msg_rx: + kfree(chan_info->msg_buf_rx); +fail_free_buf_msg_tx: + kfree(chan_info->msg_buf_tx); +fail_free_buf_base_rx: + kfree(chan_info->buf_base_rx); +fail_free_buf_base_tx: + kfree(chan_info->buf_base_tx); +fail: + return ret; +} + +static void mchp_ipc_shutdown(struct mbox_chan *chan) +{ + struct mchp_ipc_sbi_chan *chan_info = (struct mchp_ipc_sbi_chan *)chan->con_priv; + + kfree(chan_info->buf_base_tx); + kfree(chan_info->buf_base_rx); + kfree(chan_info->msg_buf_tx); + kfree(chan_info->msg_buf_rx); +} + +static const struct mbox_chan_ops mchp_ipc_ops = { + .startup = mchp_ipc_startup, + .send_data = mchp_ipc_send_data, + .shutdown = mchp_ipc_shutdown, +}; + +static struct mbox_chan *mchp_ipc_mbox_xlate(struct mbox_controller *controller, + const struct of_phandle_args *spec) +{ + struct mchp_ipc_sbi_mbox *ipc = to_mchp_ipc_mbox(controller); + unsigned int chan_id = spec->args[0]; + + if (chan_id >= ipc->controller.num_chans) { + dev_err(ipc->dev, "invalid channel id %d\n", chan_id); + return ERR_PTR(-EINVAL); + } + + return &ipc->chans[chan_id]; +} + +static int mchp_ipc_get_cluster_aggr_irq(struct mchp_ipc_sbi_mbox *ipc) +{ + struct platform_device *pdev = to_platform_device(ipc->dev); + char *irq_name; + int cpuid, ret; + unsigned long hartid; + bool irq_found = false; + + for_each_online_cpu(cpuid) { + hartid = cpuid_to_hartid_map(cpuid); + irq_name = devm_kasprintf(ipc->dev, GFP_KERNEL, "hart-%lu", hartid); + ret = platform_get_irq_byname_optional(pdev, irq_name); + if (ret <= 0) + continue; + + ipc->cluster_cfg[hartid].irq = ret; + ret = devm_request_irq(ipc->dev, ipc->cluster_cfg[hartid].irq, + mchp_ipc_cluster_aggr_isr, IRQF_SHARED, + "miv-ihc-irq", ipc); + if (ret) + return ret; + + ipc->cluster_cfg[hartid].buf_base = devm_kmalloc(ipc->dev, + sizeof(struct mchp_ipc_status), + GFP_KERNEL); + + if (!ipc->cluster_cfg[hartid].buf_base) + return -ENOMEM; + + ipc->cluster_cfg[hartid].buf_base_addr = __pa(ipc->cluster_cfg[hartid].buf_base); + + irq_found = true; + } + + return irq_found; +} + +static int mchp_ipc_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct mchp_ipc_mbox_info ipc_info; + struct mchp_ipc_sbi_mbox *ipc; + struct mchp_ipc_sbi_chan *priv; + bool irq_avail = false; + int ret; + u32 chan_id; + + ret = sbi_probe_extension(SBI_EXT_MICROCHIP_TECHNOLOGY); + if (ret <= 0) + return dev_err_probe(dev, ret, "Microchip SBI extension not detected\n"); + + ipc = devm_kzalloc(dev, sizeof(*ipc), GFP_KERNEL); + if (!ipc) + return -ENOMEM; + + platform_set_drvdata(pdev, ipc); + + ipc->buf_base = devm_kmalloc(dev, sizeof(struct mchp_ipc_mbox_info), GFP_KERNEL); + if (!ipc->buf_base) + return -ENOMEM; + + ipc->buf_base_addr = __pa(ipc->buf_base); + + ret = mchp_ipc_sbi_send(SBI_EXT_IPC_PROBE, ipc->buf_base_addr); + if (ret < 0) + return dev_err_probe(dev, ret, "could not probe IPC SBI service\n"); + + memcpy(&ipc_info, ipc->buf_base, sizeof(struct mchp_ipc_mbox_info)); + ipc->controller.num_chans = ipc_info.num_channels; + ipc->hw_type = ipc_info.hw_type; + + ipc->chans = devm_kcalloc(dev, ipc->controller.num_chans, sizeof(*ipc->chans), GFP_KERNEL); + if (!ipc->chans) + return -ENOMEM; + + ipc->dev = dev; + ipc->controller.txdone_irq = true; + ipc->controller.dev = ipc->dev; + ipc->controller.ops = &mchp_ipc_ops; + ipc->controller.chans = ipc->chans; + ipc->controller.of_xlate = mchp_ipc_mbox_xlate; + + for (chan_id = 0; chan_id < ipc->controller.num_chans; chan_id++) { + priv = devm_kmalloc(dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + ipc->chans[chan_id].con_priv = priv; + priv->id = chan_id; + } + + if (ipc->hw_type == MIV_IHC) { + ipc->cluster_cfg = devm_kcalloc(dev, num_online_cpus(), + sizeof(struct mchp_ipc_cluster_cfg), + GFP_KERNEL); + if (!ipc->cluster_cfg) + return -ENOMEM; + + if (mchp_ipc_get_cluster_aggr_irq(ipc)) + irq_avail = true; + } + + if (!irq_avail) + return dev_err_probe(dev, -ENODEV, "missing interrupt property\n"); + + ret = devm_mbox_controller_register(dev, &ipc->controller); + if (ret) + return dev_err_probe(dev, ret, + "Inter-Processor communication (IPC) registration failed\n"); + + return 0; +} + +static const struct of_device_id mchp_ipc_of_match[] = { + {.compatible = "microchip,sbi-ipc", }, + {} +}; +MODULE_DEVICE_TABLE(of, mchp_ipc_of_match); + +static struct platform_driver mchp_ipc_driver = { + .driver = { + .name = "microchip_ipc", + .of_match_table = mchp_ipc_of_match, + }, + .probe = mchp_ipc_probe, +}; + +module_platform_driver(mchp_ipc_driver); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Valentina Fernandez <valentina.fernandezalanis@microchip.com>"); +MODULE_DESCRIPTION("Microchip Inter-Processor Communication (IPC) driver"); diff --git a/drivers/mailbox/mailbox-mpfs.c b/drivers/mailbox/mailbox-mpfs.c new file mode 100644 index 000000000000..d5d9effece97 --- /dev/null +++ b/drivers/mailbox/mailbox-mpfs.c @@ -0,0 +1,340 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Microchip PolarFire SoC (MPFS) system controller/mailbox controller driver + * + * Copyright (c) 2020-2022 Microchip Corporation. All rights reserved. + * + * Author: Conor Dooley <conor.dooley@microchip.com> + * + */ + +#include <linux/io.h> +#include <linux/err.h> +#include <linux/init.h> +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/regmap.h> +#include <linux/interrupt.h> +#include <linux/mfd/syscon.h> +#include <linux/mod_devicetable.h> +#include <linux/platform_device.h> +#include <linux/mailbox_controller.h> +#include <soc/microchip/mpfs.h> + +#define MESSAGE_INT_OFFSET 0x18cu +#define SERVICES_CR_OFFSET 0x50u +#define SERVICES_SR_OFFSET 0x54u +#define MAILBOX_REG_OFFSET 0x800u +#define MSS_SYS_MAILBOX_DATA_OFFSET 0u +#define SCB_MASK_WIDTH 16u + +/* SCBCTRL service control register */ + +#define SCB_CTRL_REQ (0) +#define SCB_CTRL_REQ_MASK BIT(SCB_CTRL_REQ) + +#define SCB_CTRL_BUSY (1) +#define SCB_CTRL_BUSY_MASK BIT(SCB_CTRL_BUSY) + +#define SCB_CTRL_ABORT (2) +#define SCB_CTRL_ABORT_MASK BIT(SCB_CTRL_ABORT) + +#define SCB_CTRL_NOTIFY (3) +#define SCB_CTRL_NOTIFY_MASK BIT(SCB_CTRL_NOTIFY) + +#define SCB_CTRL_POS (16) +#define SCB_CTRL_MASK GENMASK(SCB_CTRL_POS + SCB_MASK_WIDTH - 1, SCB_CTRL_POS) + +/* SCBCTRL service status register */ + +#define SCB_STATUS_REQ (0) +#define SCB_STATUS_REQ_MASK BIT(SCB_STATUS_REQ) + +#define SCB_STATUS_BUSY (1) +#define SCB_STATUS_BUSY_MASK BIT(SCB_STATUS_BUSY) + +#define SCB_STATUS_ABORT (2) +#define SCB_STATUS_ABORT_MASK BIT(SCB_STATUS_ABORT) + +#define SCB_STATUS_NOTIFY (3) +#define SCB_STATUS_NOTIFY_MASK BIT(SCB_STATUS_NOTIFY) + +#define SCB_STATUS_POS (16) +#define SCB_STATUS_MASK GENMASK(SCB_STATUS_POS + SCB_MASK_WIDTH - 1, SCB_STATUS_POS) + +struct mpfs_mbox { + struct mbox_controller controller; + struct device *dev; + int irq; + void __iomem *ctrl_base; + void __iomem *mbox_base; + void __iomem *int_reg; + struct mbox_chan chans[1]; + struct mpfs_mss_response *response; + struct regmap *sysreg_scb, *control_scb; + u16 resp_offset; +}; + +static bool mpfs_mbox_busy(struct mpfs_mbox *mbox) +{ + u32 status; + + if (mbox->control_scb) + regmap_read(mbox->control_scb, SERVICES_SR_OFFSET, &status); + else + status = readl_relaxed(mbox->ctrl_base + SERVICES_SR_OFFSET); + + return status & SCB_STATUS_BUSY_MASK; +} + +static bool mpfs_mbox_last_tx_done(struct mbox_chan *chan) +{ + struct mpfs_mbox *mbox = (struct mpfs_mbox *)chan->con_priv; + struct mpfs_mss_response *response = mbox->response; + u32 val; + + if (mpfs_mbox_busy(mbox)) + return false; + + /* + * The service status is stored in bits 31:16 of the SERVICES_SR + * register & is only valid when the system controller is not busy. + * Failed services are intended to generated interrupts, but in reality + * this does not happen, so the status must be checked here. + */ + if (mbox->control_scb) + regmap_read(mbox->control_scb, SERVICES_SR_OFFSET, &val); + else + val = readl_relaxed(mbox->ctrl_base + SERVICES_SR_OFFSET); + + response->resp_status = (val & SCB_STATUS_MASK) >> SCB_STATUS_POS; + + return true; +} + +static int mpfs_mbox_send_data(struct mbox_chan *chan, void *data) +{ + struct mpfs_mbox *mbox = (struct mpfs_mbox *)chan->con_priv; + struct mpfs_mss_msg *msg = data; + u32 tx_trigger; + u16 opt_sel; + u32 val = 0u; + + mbox->response = msg->response; + mbox->resp_offset = msg->resp_offset; + + if (mpfs_mbox_busy(mbox)) + return -EBUSY; + + if (msg->cmd_data_size) { + u32 index; + u8 extra_bits = msg->cmd_data_size & 3; + u32 *word_buf = (u32 *)msg->cmd_data; + + for (index = 0; index < (msg->cmd_data_size / 4); index++) + writel_relaxed(word_buf[index], + mbox->mbox_base + msg->mbox_offset + index * 0x4); + if (extra_bits) { + u8 i; + u8 byte_off = ALIGN_DOWN(msg->cmd_data_size, 4); + u8 *byte_buf = msg->cmd_data + byte_off; + + val = readl_relaxed(mbox->mbox_base + msg->mbox_offset + index * 0x4); + + for (i = 0u; i < extra_bits; i++) { + val &= ~(0xffu << (i * 8u)); + val |= (byte_buf[i] << (i * 8u)); + } + + writel_relaxed(val, mbox->mbox_base + msg->mbox_offset + index * 0x4); + } + } + + opt_sel = ((msg->mbox_offset << 7u) | (msg->cmd_opcode & 0x7fu)); + + tx_trigger = (opt_sel << SCB_CTRL_POS) & SCB_CTRL_MASK; + tx_trigger |= SCB_CTRL_REQ_MASK | SCB_STATUS_NOTIFY_MASK; + + if (mbox->control_scb) + regmap_write(mbox->control_scb, SERVICES_CR_OFFSET, tx_trigger); + else + writel_relaxed(tx_trigger, mbox->ctrl_base + SERVICES_CR_OFFSET); + + + return 0; +} + +static void mpfs_mbox_rx_data(struct mbox_chan *chan) +{ + struct mpfs_mbox *mbox = (struct mpfs_mbox *)chan->con_priv; + struct mpfs_mss_response *response = mbox->response; + u16 num_words = ALIGN((response->resp_size), (4)) / 4U; + u32 i; + + if (!response->resp_msg) { + dev_err(mbox->dev, "failed to assign memory for response %d\n", -ENOMEM); + return; + } + + /* + * We should *never* get an interrupt while the controller is + * still in the busy state. If we do, something has gone badly + * wrong & the content of the mailbox would not be valid. + */ + if (mpfs_mbox_busy(mbox)) { + dev_err(mbox->dev, "got an interrupt but system controller is busy\n"); + response->resp_status = 0xDEAD; + return; + } + + for (i = 0; i < num_words; i++) { + response->resp_msg[i] = + readl_relaxed(mbox->mbox_base + + mbox->resp_offset + i * 0x4); + } + + mbox_chan_received_data(chan, response); +} + +static irqreturn_t mpfs_mbox_inbox_isr(int irq, void *data) +{ + struct mbox_chan *chan = data; + struct mpfs_mbox *mbox = (struct mpfs_mbox *)chan->con_priv; + + if (mbox->control_scb) + regmap_write(mbox->sysreg_scb, MESSAGE_INT_OFFSET, 0); + else + writel_relaxed(0, mbox->int_reg); + + mpfs_mbox_rx_data(chan); + + return IRQ_HANDLED; +} + +static int mpfs_mbox_startup(struct mbox_chan *chan) +{ + struct mpfs_mbox *mbox = (struct mpfs_mbox *)chan->con_priv; + int ret = 0; + + if (!mbox) + return -EINVAL; + + ret = devm_request_irq(mbox->dev, mbox->irq, mpfs_mbox_inbox_isr, 0, "mpfs-mailbox", chan); + if (ret) + dev_err(mbox->dev, "failed to register mailbox interrupt:%d\n", ret); + + return ret; +} + +static void mpfs_mbox_shutdown(struct mbox_chan *chan) +{ + struct mpfs_mbox *mbox = (struct mpfs_mbox *)chan->con_priv; + + devm_free_irq(mbox->dev, mbox->irq, chan); +} + +static const struct mbox_chan_ops mpfs_mbox_ops = { + .send_data = mpfs_mbox_send_data, + .startup = mpfs_mbox_startup, + .shutdown = mpfs_mbox_shutdown, + .last_tx_done = mpfs_mbox_last_tx_done, +}; + +static inline int mpfs_mbox_syscon_probe(struct mpfs_mbox *mbox, struct platform_device *pdev) +{ + mbox->control_scb = syscon_regmap_lookup_by_compatible("microchip,mpfs-control-scb"); + if (IS_ERR(mbox->control_scb)) + return PTR_ERR(mbox->control_scb); + + mbox->sysreg_scb = syscon_regmap_lookup_by_compatible("microchip,mpfs-sysreg-scb"); + if (IS_ERR(mbox->sysreg_scb)) + return PTR_ERR(mbox->sysreg_scb); + + mbox->mbox_base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(mbox->mbox_base)) + return PTR_ERR(mbox->mbox_base); + + return 0; +} + +static inline int mpfs_mbox_old_format_probe(struct mpfs_mbox *mbox, struct platform_device *pdev) +{ + dev_warn(&pdev->dev, "falling back to old devicetree format"); + + mbox->ctrl_base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(mbox->ctrl_base)) + return PTR_ERR(mbox->ctrl_base); + + mbox->int_reg = devm_platform_ioremap_resource(pdev, 1); + if (IS_ERR(mbox->int_reg)) + return PTR_ERR(mbox->int_reg); + + mbox->mbox_base = devm_platform_ioremap_resource(pdev, 2); + if (IS_ERR(mbox->mbox_base)) // account for the old dt-binding w/ 2 regs + mbox->mbox_base = mbox->ctrl_base + MAILBOX_REG_OFFSET; + + return 0; +} + +static int mpfs_mbox_probe(struct platform_device *pdev) +{ + struct mpfs_mbox *mbox; + int ret; + + mbox = devm_kzalloc(&pdev->dev, sizeof(*mbox), GFP_KERNEL); + if (!mbox) + return -ENOMEM; + + ret = mpfs_mbox_syscon_probe(mbox, pdev); + if (ret) { + /* + * set this to null, so it can be used as the decision for to + * regmap or not to regmap + */ + mbox->control_scb = NULL; + ret = mpfs_mbox_old_format_probe(mbox, pdev); + if (ret) + return ret; + } + mbox->irq = platform_get_irq(pdev, 0); + if (mbox->irq < 0) + return mbox->irq; + + mbox->dev = &pdev->dev; + + mbox->chans[0].con_priv = mbox; + mbox->controller.dev = mbox->dev; + mbox->controller.num_chans = 1; + mbox->controller.chans = mbox->chans; + mbox->controller.ops = &mpfs_mbox_ops; + mbox->controller.txdone_poll = true; + mbox->controller.txpoll_period = 10u; + + ret = devm_mbox_controller_register(&pdev->dev, &mbox->controller); + if (ret) { + dev_err(&pdev->dev, "Registering MPFS mailbox controller failed\n"); + return ret; + } + dev_info(&pdev->dev, "Registered MPFS mailbox controller driver\n"); + + return 0; +} + +static const struct of_device_id mpfs_mbox_of_match[] = { + {.compatible = "microchip,mpfs-mailbox", }, + {}, +}; +MODULE_DEVICE_TABLE(of, mpfs_mbox_of_match); + +static struct platform_driver mpfs_mbox_driver = { + .driver = { + .name = "mpfs-mailbox", + .of_match_table = mpfs_mbox_of_match, + }, + .probe = mpfs_mbox_probe, +}; +module_platform_driver(mpfs_mbox_driver); + +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR("Conor Dooley <conor.dooley@microchip.com>"); +MODULE_DESCRIPTION("MPFS mailbox controller driver"); diff --git a/drivers/mailbox/mailbox-sti.c b/drivers/mailbox/mailbox-sti.c index adf82b85dbb2..b4b5bdd503cf 100644 --- a/drivers/mailbox/mailbox-sti.c +++ b/drivers/mailbox/mailbox-sti.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0-or-later /* * STi Mailbox * @@ -7,11 +8,6 @@ * * Based on the original driver written by; * Alexandre Torgue, Olivier Lebreton and Loic Pallardy - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. */ #include <linux/err.h> @@ -21,8 +17,8 @@ #include <linux/mailbox_controller.h> #include <linux/module.h> #include <linux/of.h> -#include <linux/of_device.h> #include <linux/platform_device.h> +#include <linux/property.h> #include <linux/slab.h> #include "mailbox.h" @@ -40,12 +36,7 @@ #define MBOX_BASE(mdev, inst) ((mdev)->base + ((inst) * 4)) /** - * STi Mailbox device data - * - * An IP Mailbox is currently composed of 4 instances - * Each instance is currently composed of 32 channels - * This means that we have 128 channels per Mailbox - * A channel an be used for TX or RX + * struct sti_mbox_device - STi Mailbox device data * * @dev: Device to which it is attached * @mbox: Representation of a communication channel controller @@ -53,6 +44,11 @@ * @name: Name of the mailbox * @enabled: Local copy of enabled channels * @lock: Mutex protecting enabled status + * + * An IP Mailbox is currently composed of 4 instances + * Each instance is currently composed of 32 channels + * This means that we have 128 channels per Mailbox + * A channel an be used for TX or RX */ struct sti_mbox_device { struct device *dev; @@ -64,7 +60,7 @@ struct sti_mbox_device { }; /** - * STi Mailbox platform specific configuration + * struct sti_mbox_pdata - STi Mailbox platform specific configuration * * @num_inst: Maximum number of instances in one HW Mailbox * @num_chan: Maximum number of channel per instance @@ -75,7 +71,7 @@ struct sti_mbox_pdata { }; /** - * STi Mailbox allocated channel information + * struct sti_channel - STi Mailbox allocated channel information * * @mdev: Pointer to parent Mailbox device * @instance: Instance number channel resides in @@ -407,21 +403,18 @@ MODULE_DEVICE_TABLE(of, sti_mailbox_match); static int sti_mbox_probe(struct platform_device *pdev) { - const struct of_device_id *match; struct mbox_controller *mbox; struct sti_mbox_device *mdev; struct device_node *np = pdev->dev.of_node; struct mbox_chan *chans; - struct resource *res; int irq; int ret; - match = of_match_device(sti_mailbox_match, &pdev->dev); - if (!match) { + pdev->dev.platform_data = (struct sti_mbox_pdata *)device_get_match_data(&pdev->dev); + if (!pdev->dev.platform_data) { dev_err(&pdev->dev, "No configuration found\n"); return -ENODEV; } - pdev->dev.platform_data = (struct sti_mbox_pdata *) match->data; mdev = devm_kzalloc(&pdev->dev, sizeof(*mdev), GFP_KERNEL); if (!mdev) @@ -429,8 +422,7 @@ static int sti_mbox_probe(struct platform_device *pdev) platform_set_drvdata(pdev, mdev); - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - mdev->base = devm_ioremap_resource(&pdev->dev, res); + mdev->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(mdev->base)) return PTR_ERR(mdev->base); diff --git a/drivers/mailbox/mailbox-test.c b/drivers/mailbox/mailbox-test.c index 58bfafc34bc4..3a28ab5c42e5 100644 --- a/drivers/mailbox/mailbox-test.c +++ b/drivers/mailbox/mailbox-test.c @@ -1,12 +1,8 @@ +// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright (C) 2015 ST Microelectronics * * Author: Lee Jones <lee.jones@linaro.org> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. */ #include <linux/debugfs.h> @@ -16,10 +12,12 @@ #include <linux/kernel.h> #include <linux/mailbox_client.h> #include <linux/module.h> +#include <linux/mutex.h> #include <linux/of.h> #include <linux/platform_device.h> #include <linux/poll.h> #include <linux/slab.h> +#include <linux/spinlock.h> #include <linux/uaccess.h> #include <linux/sched/signal.h> @@ -31,7 +29,6 @@ (MBOX_MAX_MSG_LEN / MBOX_BYTES_PER_LINE)) static bool mbox_data_ready; -static struct dentry *root_debugfs_dir; struct mbox_test_device { struct device *dev; @@ -43,8 +40,10 @@ struct mbox_test_device { char *signal; char *message; spinlock_t lock; + struct mutex mutex; wait_queue_head_t waitq; struct fasync_struct *async_queue; + struct dentry *root_debugfs_dir; }; static ssize_t mbox_test_signal_write(struct file *filp, @@ -99,6 +98,7 @@ static ssize_t mbox_test_message_write(struct file *filp, size_t count, loff_t *ppos) { struct mbox_test_device *tdev = filp->private_data; + char *message; void *data; int ret; @@ -114,10 +114,13 @@ static ssize_t mbox_test_message_write(struct file *filp, return -EINVAL; } - tdev->message = kzalloc(MBOX_MAX_MSG_LEN, GFP_KERNEL); - if (!tdev->message) + message = kzalloc(MBOX_MAX_MSG_LEN, GFP_KERNEL); + if (!message) return -ENOMEM; + mutex_lock(&tdev->mutex); + + tdev->message = message; ret = copy_from_user(tdev->message, userbuf, count); if (ret) { ret = -EFAULT; @@ -148,6 +151,8 @@ out: kfree(tdev->message); tdev->signal = NULL; + mutex_unlock(&tdev->mutex); + return ret < 0 ? ret : count; } @@ -262,16 +267,16 @@ static int mbox_test_add_debugfs(struct platform_device *pdev, if (!debugfs_initialized()) return 0; - root_debugfs_dir = debugfs_create_dir("mailbox", NULL); - if (!root_debugfs_dir) { + tdev->root_debugfs_dir = debugfs_create_dir(dev_name(&pdev->dev), NULL); + if (IS_ERR(tdev->root_debugfs_dir)) { dev_err(&pdev->dev, "Failed to create Mailbox debugfs\n"); return -EINVAL; } - debugfs_create_file("message", 0600, root_debugfs_dir, + debugfs_create_file("message", 0600, tdev->root_debugfs_dir, tdev, &mbox_test_message_ops); - debugfs_create_file("signal", 0200, root_debugfs_dir, + debugfs_create_file("signal", 0200, tdev->root_debugfs_dir, tdev, &mbox_test_signal_ops); return 0; @@ -362,28 +367,28 @@ static int mbox_test_probe(struct platform_device *pdev) return -ENOMEM; /* It's okay for MMIO to be NULL */ - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - size = resource_size(res); - tdev->tx_mmio = devm_ioremap_resource(&pdev->dev, res); - if (PTR_ERR(tdev->tx_mmio) == -EBUSY) + tdev->tx_mmio = devm_platform_get_and_ioremap_resource(pdev, 0, &res); + if (PTR_ERR(tdev->tx_mmio) == -EBUSY) { /* if reserved area in SRAM, try just ioremap */ + size = resource_size(res); tdev->tx_mmio = devm_ioremap(&pdev->dev, res->start, size); - else if (IS_ERR(tdev->tx_mmio)) + } else if (IS_ERR(tdev->tx_mmio)) { tdev->tx_mmio = NULL; + } /* If specified, second reg entry is Rx MMIO */ - res = platform_get_resource(pdev, IORESOURCE_MEM, 1); - size = resource_size(res); - tdev->rx_mmio = devm_ioremap_resource(&pdev->dev, res); - if (PTR_ERR(tdev->rx_mmio) == -EBUSY) + tdev->rx_mmio = devm_platform_get_and_ioremap_resource(pdev, 1, &res); + if (PTR_ERR(tdev->rx_mmio) == -EBUSY) { + size = resource_size(res); tdev->rx_mmio = devm_ioremap(&pdev->dev, res->start, size); - else if (IS_ERR(tdev->rx_mmio)) + } else if (IS_ERR(tdev->rx_mmio)) { tdev->rx_mmio = tdev->tx_mmio; + } tdev->tx_channel = mbox_test_request_channel(pdev, "tx"); tdev->rx_channel = mbox_test_request_channel(pdev, "rx"); - if (!tdev->tx_channel && !tdev->rx_channel) + if (IS_ERR_OR_NULL(tdev->tx_channel) && IS_ERR_OR_NULL(tdev->rx_channel)) return -EPROBE_DEFER; /* If Rx is not specified but has Rx MMIO, then Rx = Tx */ @@ -394,6 +399,7 @@ static int mbox_test_probe(struct platform_device *pdev) platform_set_drvdata(pdev, tdev); spin_lock_init(&tdev->lock); + mutex_init(&tdev->mutex); if (tdev->rx_channel) { tdev->rx_buffer = devm_kzalloc(&pdev->dev, @@ -412,18 +418,16 @@ static int mbox_test_probe(struct platform_device *pdev) return 0; } -static int mbox_test_remove(struct platform_device *pdev) +static void mbox_test_remove(struct platform_device *pdev) { struct mbox_test_device *tdev = platform_get_drvdata(pdev); - debugfs_remove_recursive(root_debugfs_dir); + debugfs_remove_recursive(tdev->root_debugfs_dir); if (tdev->tx_channel) mbox_free_channel(tdev->tx_channel); if (tdev->rx_channel) mbox_free_channel(tdev->rx_channel); - - return 0; } static const struct of_device_id mbox_test_match[] = { @@ -437,7 +441,7 @@ static struct platform_driver mbox_test_driver = { .name = "mailbox_test", .of_match_table = mbox_test_match, }, - .probe = mbox_test_probe, + .probe = mbox_test_probe, .remove = mbox_test_remove, }; module_platform_driver(mbox_test_driver); diff --git a/drivers/mailbox/mailbox-th1520.c b/drivers/mailbox/mailbox-th1520.c new file mode 100644 index 000000000000..626957c2e435 --- /dev/null +++ b/drivers/mailbox/mailbox-th1520.c @@ -0,0 +1,597 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2021 Alibaba Group Holding Limited. + */ + +#include <linux/clk.h> +#include <linux/interrupt.h> +#include <linux/io.h> +#include <linux/kernel.h> +#include <linux/mailbox_controller.h> +#include <linux/module.h> +#include <linux/of_device.h> +#include <linux/platform_device.h> +#include <linux/slab.h> + +/* Status Register */ +#define TH_1520_MBOX_STA 0x0 +#define TH_1520_MBOX_CLR 0x4 +#define TH_1520_MBOX_MASK 0xc + +/* Transmit/receive data register: + * INFO0 ~ INFO6 + */ +#define TH_1520_MBOX_INFO_NUM 8 +#define TH_1520_MBOX_DATA_INFO_NUM 7 +#define TH_1520_MBOX_INFO0 0x14 +/* Transmit ack register: INFO7 */ +#define TH_1520_MBOX_INFO7 0x30 + +/* Generate remote icu IRQ Register */ +#define TH_1520_MBOX_GEN 0x10 +#define TH_1520_MBOX_GEN_RX_DATA BIT(6) +#define TH_1520_MBOX_GEN_TX_ACK BIT(7) + +#define TH_1520_MBOX_CHAN_RES_SIZE 0x1000 +#define TH_1520_MBOX_CHANS 4 +#define TH_1520_MBOX_CHAN_NAME_SIZE 20 + +#define TH_1520_MBOX_ACK_MAGIC 0xdeadbeaf + +#ifdef CONFIG_PM_SLEEP +/* store MBOX context across system-wide suspend/resume transitions */ +struct th1520_mbox_context { + u32 intr_mask[TH_1520_MBOX_CHANS]; +}; +#endif + +enum th1520_mbox_icu_cpu_id { + TH_1520_MBOX_ICU_KERNEL_CPU0, /* 910T */ + TH_1520_MBOX_ICU_CPU1, /* 902 */ + TH_1520_MBOX_ICU_CPU2, /* 906 */ + TH_1520_MBOX_ICU_CPU3, /* 910R */ +}; + +struct th1520_mbox_con_priv { + enum th1520_mbox_icu_cpu_id idx; + void __iomem *comm_local_base; + void __iomem *comm_remote_base; + char irq_desc[TH_1520_MBOX_CHAN_NAME_SIZE]; + struct mbox_chan *chan; +}; + +struct th1520_mbox_priv { + struct device *dev; + void __iomem *local_icu[TH_1520_MBOX_CHANS]; + void __iomem *remote_icu[TH_1520_MBOX_CHANS - 1]; + void __iomem *cur_cpu_ch_base; + spinlock_t mbox_lock; /* control register lock */ + + struct mbox_controller mbox; + struct mbox_chan mbox_chans[TH_1520_MBOX_CHANS]; + struct clk_bulk_data clocks[TH_1520_MBOX_CHANS]; + struct th1520_mbox_con_priv con_priv[TH_1520_MBOX_CHANS]; + int irq; +#ifdef CONFIG_PM_SLEEP + struct th1520_mbox_context *ctx; +#endif +}; + +static struct th1520_mbox_priv * +to_th1520_mbox_priv(struct mbox_controller *mbox) +{ + return container_of(mbox, struct th1520_mbox_priv, mbox); +} + +static void th1520_mbox_write(struct th1520_mbox_priv *priv, u32 val, u32 offs) +{ + iowrite32(val, priv->cur_cpu_ch_base + offs); +} + +static u32 th1520_mbox_read(struct th1520_mbox_priv *priv, u32 offs) +{ + return ioread32(priv->cur_cpu_ch_base + offs); +} + +static u32 th1520_mbox_rmw(struct th1520_mbox_priv *priv, u32 off, u32 set, + u32 clr) +{ + unsigned long flags; + u32 val; + + spin_lock_irqsave(&priv->mbox_lock, flags); + val = th1520_mbox_read(priv, off); + val &= ~clr; + val |= set; + th1520_mbox_write(priv, val, off); + spin_unlock_irqrestore(&priv->mbox_lock, flags); + + return val; +} + +static void th1520_mbox_chan_write(struct th1520_mbox_con_priv *cp, u32 val, + u32 offs, bool is_remote) +{ + if (is_remote) + iowrite32(val, cp->comm_remote_base + offs); + else + iowrite32(val, cp->comm_local_base + offs); +} + +static u32 th1520_mbox_chan_read(struct th1520_mbox_con_priv *cp, u32 offs, + bool is_remote) +{ + if (is_remote) + return ioread32(cp->comm_remote_base + offs); + else + return ioread32(cp->comm_local_base + offs); +} + +static void th1520_mbox_chan_rmw(struct th1520_mbox_con_priv *cp, u32 off, + u32 set, u32 clr, bool is_remote) +{ + struct th1520_mbox_priv *priv = to_th1520_mbox_priv(cp->chan->mbox); + unsigned long flags; + u32 val; + + spin_lock_irqsave(&priv->mbox_lock, flags); + val = th1520_mbox_chan_read(cp, off, is_remote); + val &= ~clr; + val |= set; + th1520_mbox_chan_write(cp, val, off, is_remote); + spin_unlock_irqrestore(&priv->mbox_lock, flags); +} + +static void th1520_mbox_chan_rd_data(struct th1520_mbox_con_priv *cp, + void *data, bool is_remote) +{ + u32 off = TH_1520_MBOX_INFO0; + u32 *arg = data; + u32 i; + + /* read info0 ~ info6, totally 28 bytes + * requires data memory size is 28 bytes + */ + for (i = 0; i < TH_1520_MBOX_DATA_INFO_NUM; i++) { + *arg = th1520_mbox_chan_read(cp, off, is_remote); + off += 4; + arg++; + } +} + +static void th1520_mbox_chan_wr_data(struct th1520_mbox_con_priv *cp, + void *data, bool is_remote) +{ + u32 off = TH_1520_MBOX_INFO0; + u32 *arg = data; + u32 i; + + /* write info0 ~ info6, totally 28 bytes + * requires data memory is 28 bytes valid data + */ + for (i = 0; i < TH_1520_MBOX_DATA_INFO_NUM; i++) { + th1520_mbox_chan_write(cp, *arg, off, is_remote); + off += 4; + arg++; + } +} + +static void th1520_mbox_chan_wr_ack(struct th1520_mbox_con_priv *cp, void *data, + bool is_remote) +{ + u32 off = TH_1520_MBOX_INFO7; + u32 *arg = data; + + th1520_mbox_chan_write(cp, *arg, off, is_remote); +} + +static int th1520_mbox_chan_id_to_mapbit(struct th1520_mbox_con_priv *cp) +{ + int mapbit = 0; + int i; + + for (i = 0; i < TH_1520_MBOX_CHANS; i++) { + if (i == cp->idx) + return mapbit; + + if (i != TH_1520_MBOX_ICU_KERNEL_CPU0) + mapbit++; + } + + if (i == TH_1520_MBOX_CHANS) + dev_err(cp->chan->mbox->dev, "convert to mapbit failed\n"); + + return 0; +} + +static irqreturn_t th1520_mbox_isr(int irq, void *p) +{ + struct mbox_chan *chan = p; + struct th1520_mbox_priv *priv = to_th1520_mbox_priv(chan->mbox); + struct th1520_mbox_con_priv *cp = chan->con_priv; + int mapbit = th1520_mbox_chan_id_to_mapbit(cp); + u32 sta, dat[TH_1520_MBOX_DATA_INFO_NUM]; + u32 ack_magic = TH_1520_MBOX_ACK_MAGIC; + u32 info0_data, info7_data; + + sta = th1520_mbox_read(priv, TH_1520_MBOX_STA); + if (!(sta & BIT(mapbit))) + return IRQ_NONE; + + /* clear chan irq bit in STA register */ + th1520_mbox_rmw(priv, TH_1520_MBOX_CLR, BIT(mapbit), 0); + + /* info0 is the protocol word, should not be zero! */ + info0_data = th1520_mbox_chan_read(cp, TH_1520_MBOX_INFO0, false); + if (info0_data) { + /* read info0~info6 data */ + th1520_mbox_chan_rd_data(cp, dat, false); + + /* clear local info0 */ + th1520_mbox_chan_write(cp, 0x0, TH_1520_MBOX_INFO0, false); + + /* notify remote cpu */ + th1520_mbox_chan_wr_ack(cp, &ack_magic, true); + /* CPU1 902/906 use polling mode to monitor info7 */ + if (cp->idx != TH_1520_MBOX_ICU_CPU1 && + cp->idx != TH_1520_MBOX_ICU_CPU2) + th1520_mbox_chan_rmw(cp, TH_1520_MBOX_GEN, + TH_1520_MBOX_GEN_TX_ACK, 0, true); + + /* transfer the data to client */ + mbox_chan_received_data(chan, (void *)dat); + } + + /* info7 magic value mean the real ack signal, not generate bit7 */ + info7_data = th1520_mbox_chan_read(cp, TH_1520_MBOX_INFO7, false); + if (info7_data == TH_1520_MBOX_ACK_MAGIC) { + /* clear local info7 */ + th1520_mbox_chan_write(cp, 0x0, TH_1520_MBOX_INFO7, false); + + /* notify framework the last TX has completed */ + mbox_chan_txdone(chan, 0); + } + + if (!info0_data && !info7_data) + return IRQ_NONE; + + return IRQ_HANDLED; +} + +static int th1520_mbox_send_data(struct mbox_chan *chan, void *data) +{ + struct th1520_mbox_con_priv *cp = chan->con_priv; + + th1520_mbox_chan_wr_data(cp, data, true); + th1520_mbox_chan_rmw(cp, TH_1520_MBOX_GEN, TH_1520_MBOX_GEN_RX_DATA, 0, + true); + return 0; +} + +static int th1520_mbox_startup(struct mbox_chan *chan) +{ + struct th1520_mbox_priv *priv = to_th1520_mbox_priv(chan->mbox); + struct th1520_mbox_con_priv *cp = chan->con_priv; + u32 data[8] = {}; + int mask_bit; + int ret; + + /* clear local and remote generate and info0~info7 */ + th1520_mbox_chan_rmw(cp, TH_1520_MBOX_GEN, 0x0, 0xff, true); + th1520_mbox_chan_rmw(cp, TH_1520_MBOX_GEN, 0x0, 0xff, false); + th1520_mbox_chan_wr_ack(cp, &data[7], true); + th1520_mbox_chan_wr_ack(cp, &data[7], false); + th1520_mbox_chan_wr_data(cp, &data[0], true); + th1520_mbox_chan_wr_data(cp, &data[0], false); + + /* enable the chan mask */ + mask_bit = th1520_mbox_chan_id_to_mapbit(cp); + th1520_mbox_rmw(priv, TH_1520_MBOX_MASK, BIT(mask_bit), 0); + + /* + * Mixing devm_ managed resources with manual IRQ handling is generally + * discouraged due to potential complexities with resource management, + * especially when dealing with shared interrupts. However, in this case, + * the approach is safe and effective because: + * + * 1. Each mailbox channel requests its IRQ within the .startup() callback + * and frees it within the .shutdown() callback. + * 2. During device unbinding, the devm_ managed mailbox controller first + * iterates through all channels, ensuring that their IRQs are freed before + * any other devm_ resources are released. + * + * This ordering guarantees that no interrupts can be triggered from the device + * while it is being unbound, preventing race conditions and ensuring system + * stability. + */ + ret = request_irq(priv->irq, th1520_mbox_isr, + IRQF_SHARED | IRQF_NO_SUSPEND, cp->irq_desc, chan); + if (ret) { + dev_err(priv->dev, "Unable to acquire IRQ %d\n", priv->irq); + return ret; + } + + return 0; +} + +static void th1520_mbox_shutdown(struct mbox_chan *chan) +{ + struct th1520_mbox_priv *priv = to_th1520_mbox_priv(chan->mbox); + struct th1520_mbox_con_priv *cp = chan->con_priv; + int mask_bit; + + free_irq(priv->irq, chan); + + /* clear the chan mask */ + mask_bit = th1520_mbox_chan_id_to_mapbit(cp); + th1520_mbox_rmw(priv, TH_1520_MBOX_MASK, 0, BIT(mask_bit)); +} + +static const struct mbox_chan_ops th1520_mbox_ops = { + .send_data = th1520_mbox_send_data, + .startup = th1520_mbox_startup, + .shutdown = th1520_mbox_shutdown, +}; + +static int th1520_mbox_init_generic(struct th1520_mbox_priv *priv) +{ +#ifdef CONFIG_PM_SLEEP + priv->ctx = devm_kzalloc(priv->dev, sizeof(*priv->ctx), GFP_KERNEL); + if (!priv->ctx) + return -ENOMEM; +#endif + /* Set default configuration */ + th1520_mbox_write(priv, 0xff, TH_1520_MBOX_CLR); + th1520_mbox_write(priv, 0x0, TH_1520_MBOX_MASK); + return 0; +} + +static struct mbox_chan *th1520_mbox_xlate(struct mbox_controller *mbox, + const struct of_phandle_args *sp) +{ + u32 chan; + + if (sp->args_count != 1) { + dev_err(mbox->dev, "Invalid argument count %d\n", + sp->args_count); + return ERR_PTR(-EINVAL); + } + + chan = sp->args[0]; /* comm remote channel */ + + if (chan >= mbox->num_chans) { + dev_err(mbox->dev, "Not supported channel number: %d\n", chan); + return ERR_PTR(-EINVAL); + } + + if (chan == TH_1520_MBOX_ICU_KERNEL_CPU0) { + dev_err(mbox->dev, "Cannot communicate with yourself\n"); + return ERR_PTR(-EINVAL); + } + + return &mbox->chans[chan]; +} + +static void __iomem *th1520_map_mmio(struct platform_device *pdev, + char *res_name, size_t offset) +{ + void __iomem *mapped; + struct resource *res; + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, res_name); + + if (!res) { + dev_err(&pdev->dev, "Failed to get resource: %s\n", res_name); + return ERR_PTR(-EINVAL); + } + + mapped = devm_ioremap(&pdev->dev, res->start + offset, + resource_size(res) - offset); + if (!mapped) { + dev_err(&pdev->dev, "Failed to map resource: %s\n", res_name); + return ERR_PTR(-ENOMEM); + } + + return mapped; +} + +static void th1520_disable_clk(void *data) +{ + struct th1520_mbox_priv *priv = data; + + clk_bulk_disable_unprepare(ARRAY_SIZE(priv->clocks), priv->clocks); +} + +static int th1520_mbox_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct th1520_mbox_priv *priv; + unsigned int remote_idx = 0; + unsigned int i; + int ret; + + priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + priv->dev = dev; + + priv->clocks[0].id = "clk-local"; + priv->clocks[1].id = "clk-remote-icu0"; + priv->clocks[2].id = "clk-remote-icu1"; + priv->clocks[3].id = "clk-remote-icu2"; + + ret = devm_clk_bulk_get(dev, ARRAY_SIZE(priv->clocks), + priv->clocks); + if (ret) { + dev_err(dev, "Failed to get clocks\n"); + return ret; + } + + ret = clk_bulk_prepare_enable(ARRAY_SIZE(priv->clocks), priv->clocks); + if (ret) { + dev_err(dev, "Failed to enable clocks\n"); + return ret; + } + + ret = devm_add_action_or_reset(dev, th1520_disable_clk, priv); + if (ret) + return ret; + + /* + * The address mappings in the device tree align precisely with those + * outlined in the manual. However, register offsets within these + * mapped regions are irregular, particularly for remote-icu0. + * Consequently, th1520_map_mmio() requires an additional parameter to + * handle this quirk. + */ + priv->local_icu[TH_1520_MBOX_ICU_KERNEL_CPU0] = + th1520_map_mmio(pdev, "local", 0x0); + if (IS_ERR(priv->local_icu[TH_1520_MBOX_ICU_KERNEL_CPU0])) + return PTR_ERR(priv->local_icu[TH_1520_MBOX_ICU_KERNEL_CPU0]); + + priv->remote_icu[0] = th1520_map_mmio(pdev, "remote-icu0", 0x4000); + if (IS_ERR(priv->remote_icu[0])) + return PTR_ERR(priv->remote_icu[0]); + + priv->remote_icu[1] = th1520_map_mmio(pdev, "remote-icu1", 0x0); + if (IS_ERR(priv->remote_icu[1])) + return PTR_ERR(priv->remote_icu[1]); + + priv->remote_icu[2] = th1520_map_mmio(pdev, "remote-icu2", 0x0); + if (IS_ERR(priv->remote_icu[2])) + return PTR_ERR(priv->remote_icu[2]); + + priv->local_icu[TH_1520_MBOX_ICU_CPU1] = + priv->local_icu[TH_1520_MBOX_ICU_KERNEL_CPU0] + + TH_1520_MBOX_CHAN_RES_SIZE; + priv->local_icu[TH_1520_MBOX_ICU_CPU2] = + priv->local_icu[TH_1520_MBOX_ICU_CPU1] + + TH_1520_MBOX_CHAN_RES_SIZE; + priv->local_icu[TH_1520_MBOX_ICU_CPU3] = + priv->local_icu[TH_1520_MBOX_ICU_CPU2] + + TH_1520_MBOX_CHAN_RES_SIZE; + + priv->cur_cpu_ch_base = priv->local_icu[TH_1520_MBOX_ICU_KERNEL_CPU0]; + + priv->irq = platform_get_irq(pdev, 0); + if (priv->irq < 0) + return priv->irq; + + /* init the chans */ + for (i = 0; i < TH_1520_MBOX_CHANS; i++) { + struct th1520_mbox_con_priv *cp = &priv->con_priv[i]; + + cp->idx = i; + cp->chan = &priv->mbox_chans[i]; + priv->mbox_chans[i].con_priv = cp; + snprintf(cp->irq_desc, sizeof(cp->irq_desc), + "th1520_mbox_chan[%i]", cp->idx); + + cp->comm_local_base = priv->local_icu[i]; + if (i != TH_1520_MBOX_ICU_KERNEL_CPU0) { + cp->comm_remote_base = priv->remote_icu[remote_idx]; + remote_idx++; + } + } + + spin_lock_init(&priv->mbox_lock); + + priv->mbox.dev = dev; + priv->mbox.ops = &th1520_mbox_ops; + priv->mbox.chans = priv->mbox_chans; + priv->mbox.num_chans = TH_1520_MBOX_CHANS; + priv->mbox.of_xlate = th1520_mbox_xlate; + priv->mbox.txdone_irq = true; + + platform_set_drvdata(pdev, priv); + + ret = th1520_mbox_init_generic(priv); + if (ret) { + dev_err(dev, "Failed to init mailbox context\n"); + return ret; + } + + return devm_mbox_controller_register(dev, &priv->mbox); +} + +static const struct of_device_id th1520_mbox_dt_ids[] = { + { .compatible = "thead,th1520-mbox" }, + {} +}; +MODULE_DEVICE_TABLE(of, th1520_mbox_dt_ids); + +#ifdef CONFIG_PM_SLEEP +static int __maybe_unused th1520_mbox_suspend_noirq(struct device *dev) +{ + struct th1520_mbox_priv *priv = dev_get_drvdata(dev); + struct th1520_mbox_context *ctx = priv->ctx; + u32 i; + /* + * ONLY interrupt mask bit should be stored and restores. + * INFO data all assumed to be lost. + */ + for (i = 0; i < TH_1520_MBOX_CHANS; i++) { + ctx->intr_mask[i] = + ioread32(priv->local_icu[i] + TH_1520_MBOX_MASK); + } + return 0; +} + +static int __maybe_unused th1520_mbox_resume_noirq(struct device *dev) +{ + struct th1520_mbox_priv *priv = dev_get_drvdata(dev); + struct th1520_mbox_context *ctx = priv->ctx; + u32 i; + + for (i = 0; i < TH_1520_MBOX_CHANS; i++) { + iowrite32(ctx->intr_mask[i], + priv->local_icu[i] + TH_1520_MBOX_MASK); + } + + return 0; +} +#endif + +static int __maybe_unused th1520_mbox_runtime_suspend(struct device *dev) +{ + struct th1520_mbox_priv *priv = dev_get_drvdata(dev); + + clk_bulk_disable_unprepare(ARRAY_SIZE(priv->clocks), priv->clocks); + + return 0; +} + +static int __maybe_unused th1520_mbox_runtime_resume(struct device *dev) +{ + struct th1520_mbox_priv *priv = dev_get_drvdata(dev); + int ret; + + ret = clk_bulk_prepare_enable(ARRAY_SIZE(priv->clocks), priv->clocks); + if (ret) + dev_err(dev, "Failed to enable clocks in runtime resume\n"); + + return ret; +} + +static const struct dev_pm_ops th1520_mbox_pm_ops = { +#ifdef CONFIG_PM_SLEEP + SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(th1520_mbox_suspend_noirq, + th1520_mbox_resume_noirq) +#endif + SET_RUNTIME_PM_OPS(th1520_mbox_runtime_suspend, + th1520_mbox_runtime_resume, NULL) +}; + +static struct platform_driver th1520_mbox_driver = { + .probe = th1520_mbox_probe, + .driver = { + .name = "th1520-mbox", + .of_match_table = th1520_mbox_dt_ids, + .pm = &th1520_mbox_pm_ops, + }, +}; +module_platform_driver(th1520_mbox_driver); + +MODULE_DESCRIPTION("Thead TH-1520 mailbox IPC driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/mailbox/mailbox-xgene-slimpro.c b/drivers/mailbox/mailbox-xgene-slimpro.c index 8f397da1150b..946ea773ec33 100644 --- a/drivers/mailbox/mailbox-xgene-slimpro.c +++ b/drivers/mailbox/mailbox-xgene-slimpro.c @@ -1,22 +1,9 @@ +// SPDX-License-Identifier: GPL-2.0-or-later /* * APM X-Gene SLIMpro MailBox Driver * * Copyright (c) 2015, Applied Micro Circuits Corporation * Author: Feng Kan fkan@apm.com - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License as - * published by the Free Software Foundation; either version 2 of - * the License, or (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, see <http://www.gnu.org/licenses/>. - * */ #include <linux/acpi.h> #include <linux/delay.h> @@ -64,10 +51,10 @@ struct slimpro_mbox_chan { /** * X-Gene SlimPRO Mailbox controller data * - * X-Gene SlimPRO Mailbox controller has 8 commnunication channels. - * Each channel has a separate IRQ number assgined to it. + * X-Gene SlimPRO Mailbox controller has 8 communication channels. + * Each channel has a separate IRQ number assigned to it. * - * @mb_ctrl: Representation of the commnunication channel controller + * @mb_ctrl: Representation of the communication channel controller * @mc: Array of SlimPRO mailbox channels of the controller * @chans: Array of mailbox communication channels * @@ -183,7 +170,6 @@ static const struct mbox_chan_ops slimpro_mbox_ops = { static int slimpro_mbox_probe(struct platform_device *pdev) { struct slimpro_mbox *ctx; - struct resource *regs; void __iomem *mb_base; int rc; int i; @@ -194,8 +180,7 @@ static int slimpro_mbox_probe(struct platform_device *pdev) platform_set_drvdata(pdev, ctx); - regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); - mb_base = devm_ioremap_resource(&pdev->dev, regs); + mb_base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(mb_base)) return PTR_ERR(mb_base); diff --git a/drivers/mailbox/mailbox.c b/drivers/mailbox/mailbox.c index c6a7d4582dc6..2acc6ec229a4 100644 --- a/drivers/mailbox/mailbox.c +++ b/drivers/mailbox/mailbox.c @@ -1,25 +1,22 @@ +// SPDX-License-Identifier: GPL-2.0-only /* * Mailbox: Common code for Mailbox controllers and users * * Copyright (C) 2013-2014 Linaro Ltd. * Author: Jassi Brar <jassisinghbrar@gmail.com> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. */ -#include <linux/interrupt.h> -#include <linux/spinlock.h> -#include <linux/mutex.h> +#include <linux/cleanup.h> #include <linux/delay.h> -#include <linux/slab.h> -#include <linux/err.h> -#include <linux/module.h> #include <linux/device.h> -#include <linux/bitops.h> +#include <linux/err.h> #include <linux/mailbox_client.h> #include <linux/mailbox_controller.h> +#include <linux/module.h> +#include <linux/mutex.h> +#include <linux/of.h> +#include <linux/property.h> +#include <linux/spinlock.h> #include "mailbox.h" @@ -29,15 +26,12 @@ static DEFINE_MUTEX(con_mutex); static int add_to_rbuf(struct mbox_chan *chan, void *mssg) { int idx; - unsigned long flags; - spin_lock_irqsave(&chan->lock, flags); + guard(spinlock_irqsave)(&chan->lock); /* See if there is any space left */ - if (chan->msg_count == MBOX_TX_QUEUE_LEN) { - spin_unlock_irqrestore(&chan->lock, flags); + if (chan->msg_count == MBOX_TX_QUEUE_LEN) return -ENOBUFS; - } idx = chan->msg_free; chan->msg_data[idx] = mssg; @@ -48,57 +42,53 @@ static int add_to_rbuf(struct mbox_chan *chan, void *mssg) else chan->msg_free++; - spin_unlock_irqrestore(&chan->lock, flags); - return idx; } static void msg_submit(struct mbox_chan *chan) { unsigned count, idx; - unsigned long flags; void *data; int err = -EBUSY; - spin_lock_irqsave(&chan->lock, flags); - - if (!chan->msg_count || chan->active_req) - goto exit; + scoped_guard(spinlock_irqsave, &chan->lock) { + if (!chan->msg_count || chan->active_req) + break; - count = chan->msg_count; - idx = chan->msg_free; - if (idx >= count) - idx -= count; - else - idx += MBOX_TX_QUEUE_LEN - count; + count = chan->msg_count; + idx = chan->msg_free; + if (idx >= count) + idx -= count; + else + idx += MBOX_TX_QUEUE_LEN - count; - data = chan->msg_data[idx]; + data = chan->msg_data[idx]; - if (chan->cl->tx_prepare) - chan->cl->tx_prepare(chan->cl, data); - /* Try to submit a message to the MBOX controller */ - err = chan->mbox->ops->send_data(chan, data); - if (!err) { - chan->active_req = data; - chan->msg_count--; + if (chan->cl->tx_prepare) + chan->cl->tx_prepare(chan->cl, data); + /* Try to submit a message to the MBOX controller */ + err = chan->mbox->ops->send_data(chan, data); + if (!err) { + chan->active_req = data; + chan->msg_count--; + } } -exit: - spin_unlock_irqrestore(&chan->lock, flags); - if (!err && (chan->txdone_method & TXDONE_BY_POLL)) + if (!err && (chan->txdone_method & TXDONE_BY_POLL)) { /* kick start the timer immediately to avoid delays */ - hrtimer_start(&chan->mbox->poll_hrt, 0, HRTIMER_MODE_REL); + scoped_guard(spinlock_irqsave, &chan->mbox->poll_hrt_lock) + hrtimer_start(&chan->mbox->poll_hrt, 0, HRTIMER_MODE_REL); + } } static void tx_tick(struct mbox_chan *chan, int r) { - unsigned long flags; void *mssg; - spin_lock_irqsave(&chan->lock, flags); - mssg = chan->active_req; - chan->active_req = NULL; - spin_unlock_irqrestore(&chan->lock, flags); + scoped_guard(spinlock_irqsave, &chan->lock) { + mssg = chan->active_req; + chan->active_req = NULL; + } /* Submit next message */ msg_submit(chan); @@ -134,7 +124,11 @@ static enum hrtimer_restart txdone_hrtimer(struct hrtimer *hrtimer) } if (resched) { - hrtimer_forward_now(hrtimer, ms_to_ktime(mbox->txpoll_period)); + scoped_guard(spinlock_irqsave, &mbox->poll_hrt_lock) { + if (!hrtimer_is_queued(hrtimer)) + hrtimer_forward_now(hrtimer, ms_to_ktime(mbox->txpoll_period)); + } + return HRTIMER_RESTART; } return HRTIMER_NORESTART; @@ -310,6 +304,66 @@ int mbox_flush(struct mbox_chan *chan, unsigned long timeout) return ret; } +EXPORT_SYMBOL_GPL(mbox_flush); + +static int __mbox_bind_client(struct mbox_chan *chan, struct mbox_client *cl) +{ + struct device *dev = cl->dev; + int ret; + + if (chan->cl || !try_module_get(chan->mbox->dev->driver->owner)) { + dev_err(dev, "%s: mailbox not free\n", __func__); + return -EBUSY; + } + + scoped_guard(spinlock_irqsave, &chan->lock) { + chan->msg_free = 0; + chan->msg_count = 0; + chan->active_req = NULL; + chan->cl = cl; + init_completion(&chan->tx_complete); + + if (chan->txdone_method == TXDONE_BY_POLL && cl->knows_txdone) + chan->txdone_method = TXDONE_BY_ACK; + } + + if (chan->mbox->ops->startup) { + ret = chan->mbox->ops->startup(chan); + + if (ret) { + dev_err(dev, "Unable to startup the chan (%d)\n", ret); + mbox_free_channel(chan); + return ret; + } + } + + return 0; +} + +/** + * mbox_bind_client - Request a mailbox channel. + * @chan: The mailbox channel to bind the client to. + * @cl: Identity of the client requesting the channel. + * + * The Client specifies its requirements and capabilities while asking for + * a mailbox channel. It can't be called from atomic context. + * The channel is exclusively allocated and can't be used by another + * client before the owner calls mbox_free_channel. + * After assignment, any packet received on this channel will be + * handed over to the client via the 'rx_callback'. + * The framework holds reference to the client, so the mbox_client + * structure shouldn't be modified until the mbox_free_channel returns. + * + * Return: 0 if the channel was assigned to the client successfully. + * <0 for request failure. + */ +int mbox_bind_client(struct mbox_chan *chan, struct mbox_client *cl) +{ + guard(mutex)(&con_mutex); + + return __mbox_bind_client(chan, cl); +} +EXPORT_SYMBOL_GPL(mbox_bind_client); /** * mbox_request_channel - Request a mailbox channel. @@ -330,71 +384,65 @@ int mbox_flush(struct mbox_chan *chan, unsigned long timeout) */ struct mbox_chan *mbox_request_channel(struct mbox_client *cl, int index) { - struct device *dev = cl->dev; + struct fwnode_reference_args fwspec; + struct fwnode_handle *fwnode; struct mbox_controller *mbox; struct of_phandle_args spec; struct mbox_chan *chan; - unsigned long flags; + struct device *dev; + unsigned int i; int ret; - if (!dev || !dev->of_node) { - pr_debug("%s: No owner device node\n", __func__); + dev = cl->dev; + if (!dev) { + pr_debug("No owner device\n"); return ERR_PTR(-ENODEV); } - mutex_lock(&con_mutex); - - if (of_parse_phandle_with_args(dev->of_node, "mboxes", - "#mbox-cells", index, &spec)) { - dev_dbg(dev, "%s: can't parse \"mboxes\" property\n", __func__); - mutex_unlock(&con_mutex); + fwnode = dev_fwnode(dev); + if (!fwnode) { + dev_dbg(dev, "No owner fwnode\n"); return ERR_PTR(-ENODEV); } - chan = ERR_PTR(-EPROBE_DEFER); - list_for_each_entry(mbox, &mbox_cons, node) - if (mbox->dev->of_node == spec.np) { - chan = mbox->of_xlate(mbox, &spec); - if (!IS_ERR(chan)) - break; - } - - of_node_put(spec.np); - - if (IS_ERR(chan)) { - mutex_unlock(&con_mutex); - return chan; - } - - if (chan->cl || !try_module_get(mbox->dev->driver->owner)) { - dev_dbg(dev, "%s: mailbox not free\n", __func__); - mutex_unlock(&con_mutex); - return ERR_PTR(-EBUSY); + ret = fwnode_property_get_reference_args(fwnode, "mboxes", "#mbox-cells", + 0, index, &fwspec); + if (ret) { + dev_err(dev, "%s: can't parse \"%s\" property\n", __func__, "mboxes"); + return ERR_PTR(ret); } - spin_lock_irqsave(&chan->lock, flags); - chan->msg_free = 0; - chan->msg_count = 0; - chan->active_req = NULL; - chan->cl = cl; - init_completion(&chan->tx_complete); - - if (chan->txdone_method == TXDONE_BY_POLL && cl->knows_txdone) - chan->txdone_method = TXDONE_BY_ACK; + spec.np = to_of_node(fwspec.fwnode); + spec.args_count = fwspec.nargs; + for (i = 0; i < spec.args_count; i++) + spec.args[i] = fwspec.args[i]; + + scoped_guard(mutex, &con_mutex) { + chan = ERR_PTR(-EPROBE_DEFER); + list_for_each_entry(mbox, &mbox_cons, node) { + if (device_match_fwnode(mbox->dev, fwspec.fwnode)) { + if (mbox->fw_xlate) { + chan = mbox->fw_xlate(mbox, &fwspec); + if (!IS_ERR(chan)) + break; + } else if (mbox->of_xlate) { + chan = mbox->of_xlate(mbox, &spec); + if (!IS_ERR(chan)) + break; + } + } + } - spin_unlock_irqrestore(&chan->lock, flags); + fwnode_handle_put(fwspec.fwnode); - if (chan->mbox->ops->startup) { - ret = chan->mbox->ops->startup(chan); + if (IS_ERR(chan)) + return chan; - if (ret) { - dev_err(dev, "Unable to startup the chan (%d)\n", ret); - mbox_free_channel(chan); + ret = __mbox_bind_client(chan, cl); + if (ret) chan = ERR_PTR(ret); - } } - mutex_unlock(&con_mutex); return chan; } EXPORT_SYMBOL_GPL(mbox_request_channel); @@ -402,28 +450,13 @@ EXPORT_SYMBOL_GPL(mbox_request_channel); struct mbox_chan *mbox_request_channel_byname(struct mbox_client *cl, const char *name) { - struct device_node *np = cl->dev->of_node; - struct property *prop; - const char *mbox_name; - int index = 0; - - if (!np) { - dev_err(cl->dev, "%s() currently only supports DT\n", __func__); - return ERR_PTR(-EINVAL); - } + int index = device_property_match_string(cl->dev, "mbox-names", name); - if (!of_get_property(np, "mbox-names", NULL)) { - dev_err(cl->dev, - "%s() requires an \"mbox-names\" property\n", __func__); - return ERR_PTR(-EINVAL); + if (index < 0) { + dev_err(cl->dev, "%s() could not locate channel named \"%s\"\n", + __func__, name); + return ERR_PTR(index); } - - of_property_for_each_string(np, "mbox-names", prop, mbox_name) { - if (!strncmp(name, mbox_name, strlen(name))) - break; - index++; - } - return mbox_request_channel(cl, index); } EXPORT_SYMBOL_GPL(mbox_request_channel_byname); @@ -435,8 +468,6 @@ EXPORT_SYMBOL_GPL(mbox_request_channel_byname); */ void mbox_free_channel(struct mbox_chan *chan) { - unsigned long flags; - if (!chan || !chan->cl) return; @@ -444,20 +475,19 @@ void mbox_free_channel(struct mbox_chan *chan) chan->mbox->ops->shutdown(chan); /* The queued TX requests are simply aborted, no callbacks are made */ - spin_lock_irqsave(&chan->lock, flags); - chan->cl = NULL; - chan->active_req = NULL; - if (chan->txdone_method == TXDONE_BY_ACK) - chan->txdone_method = TXDONE_BY_POLL; + scoped_guard(spinlock_irqsave, &chan->lock) { + chan->cl = NULL; + chan->active_req = NULL; + if (chan->txdone_method == TXDONE_BY_ACK) + chan->txdone_method = TXDONE_BY_POLL; + } module_put(chan->mbox->dev->driver->owner); - spin_unlock_irqrestore(&chan->lock, flags); } EXPORT_SYMBOL_GPL(mbox_free_channel); -static struct mbox_chan * -of_mbox_index_xlate(struct mbox_controller *mbox, - const struct of_phandle_args *sp) +static struct mbox_chan *fw_mbox_index_xlate(struct mbox_controller *mbox, + const struct fwnode_reference_args *sp) { int ind = sp->args[0]; @@ -495,9 +525,8 @@ int mbox_controller_register(struct mbox_controller *mbox) return -EINVAL; } - hrtimer_init(&mbox->poll_hrt, CLOCK_MONOTONIC, - HRTIMER_MODE_REL); - mbox->poll_hrt.function = txdone_hrtimer; + hrtimer_setup(&mbox->poll_hrt, txdone_hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); + spin_lock_init(&mbox->poll_hrt_lock); } for (i = 0; i < mbox->num_chans; i++) { @@ -509,12 +538,11 @@ int mbox_controller_register(struct mbox_controller *mbox) spin_lock_init(&chan->lock); } - if (!mbox->of_xlate) - mbox->of_xlate = of_mbox_index_xlate; + if (!mbox->fw_xlate && !mbox->of_xlate) + mbox->fw_xlate = fw_mbox_index_xlate; - mutex_lock(&con_mutex); - list_add_tail(&mbox->node, &mbox_cons); - mutex_unlock(&con_mutex); + scoped_guard(mutex, &con_mutex) + list_add_tail(&mbox->node, &mbox_cons); return 0; } @@ -531,17 +559,15 @@ void mbox_controller_unregister(struct mbox_controller *mbox) if (!mbox) return; - mutex_lock(&con_mutex); + scoped_guard(mutex, &con_mutex) { + list_del(&mbox->node); - list_del(&mbox->node); + for (i = 0; i < mbox->num_chans; i++) + mbox_free_channel(&mbox->chans[i]); - for (i = 0; i < mbox->num_chans; i++) - mbox_free_channel(&mbox->chans[i]); - - if (mbox->txdone_poll) - hrtimer_cancel(&mbox->poll_hrt); - - mutex_unlock(&con_mutex); + if (mbox->txdone_poll) + hrtimer_cancel(&mbox->poll_hrt); + } } EXPORT_SYMBOL_GPL(mbox_controller_unregister); @@ -552,16 +578,6 @@ static void __devm_mbox_controller_unregister(struct device *dev, void *res) mbox_controller_unregister(*mbox); } -static int devm_mbox_controller_match(struct device *dev, void *res, void *data) -{ - struct mbox_controller **mbox = res; - - if (WARN_ON(!mbox || !*mbox)) - return 0; - - return *mbox == data; -} - /** * devm_mbox_controller_register() - managed mbox_controller_register() * @dev: device owning the mailbox controller being registered @@ -597,20 +613,3 @@ int devm_mbox_controller_register(struct device *dev, return 0; } EXPORT_SYMBOL_GPL(devm_mbox_controller_register); - -/** - * devm_mbox_controller_unregister() - managed mbox_controller_unregister() - * @dev: device owning the mailbox controller being unregistered - * @mbox: mailbox controller being unregistered - * - * This function unregisters the mailbox controller and removes the device- - * managed resource that was set up to automatically unregister the mailbox - * controller on driver probe failure or driver removal. It's typically not - * necessary to call this function. - */ -void devm_mbox_controller_unregister(struct device *dev, struct mbox_controller *mbox) -{ - WARN_ON(devres_release(dev, __devm_mbox_controller_unregister, - devm_mbox_controller_match, mbox)); -} -EXPORT_SYMBOL_GPL(devm_mbox_controller_unregister); diff --git a/drivers/mailbox/mailbox.h b/drivers/mailbox/mailbox.h index 456ba68513bb..e1ec4efab693 100644 --- a/drivers/mailbox/mailbox.h +++ b/drivers/mailbox/mailbox.h @@ -1,14 +1,12 @@ -/* - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ +/* SPDX-License-Identifier: GPL-2.0-only */ #ifndef __MAILBOX_H #define __MAILBOX_H +#include <linux/bits.h> + #define TXDONE_BY_IRQ BIT(0) /* controller has remote RTR irq */ #define TXDONE_BY_POLL BIT(1) /* controller can read status of last TX */ -#define TXDONE_BY_ACK BIT(2) /* S/W ACK recevied by Client ticks the TX */ +#define TXDONE_BY_ACK BIT(2) /* S/W ACK received by Client ticks the TX */ #endif /* __MAILBOX_H */ diff --git a/drivers/mailbox/mtk-adsp-mailbox.c b/drivers/mailbox/mtk-adsp-mailbox.c new file mode 100644 index 000000000000..91487aa4d7da --- /dev/null +++ b/drivers/mailbox/mtk-adsp-mailbox.c @@ -0,0 +1,185 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2022 MediaTek Corporation. All rights reserved. + * Author: Allen-KH Cheng <allen-kh.cheng@mediatek.com> + */ + +#include <linux/interrupt.h> +#include <linux/io.h> +#include <linux/iopoll.h> +#include <linux/kernel.h> +#include <linux/mailbox_controller.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/platform_device.h> +#include <linux/slab.h> + +struct mtk_adsp_mbox_priv { + struct device *dev; + struct mbox_controller mbox; + void __iomem *va_mboxreg; + const struct mtk_adsp_mbox_cfg *cfg; +}; + +struct mtk_adsp_mbox_cfg { + u32 set_in; + u32 set_out; + u32 clr_in; + u32 clr_out; +}; + +static inline struct mtk_adsp_mbox_priv *get_mtk_adsp_mbox_priv(struct mbox_controller *mbox) +{ + return container_of(mbox, struct mtk_adsp_mbox_priv, mbox); +} + +static irqreturn_t mtk_adsp_mbox_irq(int irq, void *data) +{ + struct mbox_chan *chan = data; + struct mtk_adsp_mbox_priv *priv = get_mtk_adsp_mbox_priv(chan->mbox); + u32 op = readl(priv->va_mboxreg + priv->cfg->set_out); + + writel(op, priv->va_mboxreg + priv->cfg->clr_out); + + return IRQ_WAKE_THREAD; +} + +static irqreturn_t mtk_adsp_mbox_isr(int irq, void *data) +{ + struct mbox_chan *chan = data; + + mbox_chan_received_data(chan, NULL); + + return IRQ_HANDLED; +} + +static struct mbox_chan *mtk_adsp_mbox_xlate(struct mbox_controller *mbox, + const struct of_phandle_args *sp) +{ + return mbox->chans; +} + +static int mtk_adsp_mbox_startup(struct mbox_chan *chan) +{ + struct mtk_adsp_mbox_priv *priv = get_mtk_adsp_mbox_priv(chan->mbox); + + /* Clear ADSP mbox command */ + writel(0xFFFFFFFF, priv->va_mboxreg + priv->cfg->clr_in); + writel(0xFFFFFFFF, priv->va_mboxreg + priv->cfg->clr_out); + + return 0; +} + +static void mtk_adsp_mbox_shutdown(struct mbox_chan *chan) +{ + struct mtk_adsp_mbox_priv *priv = get_mtk_adsp_mbox_priv(chan->mbox); + + /* Clear ADSP mbox command */ + writel(0xFFFFFFFF, priv->va_mboxreg + priv->cfg->clr_in); + writel(0xFFFFFFFF, priv->va_mboxreg + priv->cfg->clr_out); +} + +static int mtk_adsp_mbox_send_data(struct mbox_chan *chan, void *data) +{ + struct mtk_adsp_mbox_priv *priv = get_mtk_adsp_mbox_priv(chan->mbox); + u32 *msg = data; + + writel(*msg, priv->va_mboxreg + priv->cfg->set_in); + + return 0; +} + +static bool mtk_adsp_mbox_last_tx_done(struct mbox_chan *chan) +{ + struct mtk_adsp_mbox_priv *priv = get_mtk_adsp_mbox_priv(chan->mbox); + + return readl(priv->va_mboxreg + priv->cfg->set_in) == 0; +} + +static const struct mbox_chan_ops mtk_adsp_mbox_chan_ops = { + .send_data = mtk_adsp_mbox_send_data, + .startup = mtk_adsp_mbox_startup, + .shutdown = mtk_adsp_mbox_shutdown, + .last_tx_done = mtk_adsp_mbox_last_tx_done, +}; + +static int mtk_adsp_mbox_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct mtk_adsp_mbox_priv *priv; + const struct mtk_adsp_mbox_cfg *cfg; + struct mbox_controller *mbox; + int ret, irq; + + priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + mbox = &priv->mbox; + mbox->dev = dev; + mbox->ops = &mtk_adsp_mbox_chan_ops; + mbox->txdone_irq = false; + mbox->txdone_poll = true; + mbox->of_xlate = mtk_adsp_mbox_xlate; + mbox->num_chans = 1; + mbox->chans = devm_kzalloc(dev, sizeof(*mbox->chans), GFP_KERNEL); + if (!mbox->chans) + return -ENOMEM; + + priv->va_mboxreg = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(priv->va_mboxreg)) + return PTR_ERR(priv->va_mboxreg); + + cfg = of_device_get_match_data(dev); + if (!cfg) + return -EINVAL; + priv->cfg = cfg; + + irq = platform_get_irq(pdev, 0); + if (irq < 0) + return irq; + + ret = devm_request_threaded_irq(dev, irq, mtk_adsp_mbox_irq, + mtk_adsp_mbox_isr, IRQF_TRIGGER_NONE, + dev_name(dev), mbox->chans); + if (ret < 0) + return ret; + + platform_set_drvdata(pdev, priv); + + return devm_mbox_controller_register(dev, &priv->mbox); +} + +static const struct mtk_adsp_mbox_cfg mt8186_adsp_mbox_cfg = { + .set_in = 0x00, + .set_out = 0x04, + .clr_in = 0x08, + .clr_out = 0x0C, +}; + +static const struct mtk_adsp_mbox_cfg mt8195_adsp_mbox_cfg = { + .set_in = 0x00, + .set_out = 0x1c, + .clr_in = 0x04, + .clr_out = 0x20, +}; + +static const struct of_device_id mtk_adsp_mbox_of_match[] = { + { .compatible = "mediatek,mt8186-adsp-mbox", .data = &mt8186_adsp_mbox_cfg }, + { .compatible = "mediatek,mt8195-adsp-mbox", .data = &mt8195_adsp_mbox_cfg }, + {}, +}; +MODULE_DEVICE_TABLE(of, mtk_adsp_mbox_of_match); + +static struct platform_driver mtk_adsp_mbox_driver = { + .probe = mtk_adsp_mbox_probe, + .driver = { + .name = "mtk_adsp_mbox", + .of_match_table = mtk_adsp_mbox_of_match, + }, +}; +module_platform_driver(mtk_adsp_mbox_driver); + +MODULE_AUTHOR("Allen-KH Cheng <Allen-KH.Cheng@mediatek.com>"); +MODULE_DESCRIPTION("MTK ADSP Mailbox Controller"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/mailbox/mtk-cmdq-mailbox.c b/drivers/mailbox/mtk-cmdq-mailbox.c index 22811784dc7d..5791f80f995a 100644 --- a/drivers/mailbox/mtk-cmdq-mailbox.c +++ b/drivers/mailbox/mtk-cmdq-mailbox.c @@ -8,19 +8,23 @@ #include <linux/dma-mapping.h> #include <linux/errno.h> #include <linux/interrupt.h> +#include <linux/io.h> #include <linux/iopoll.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/platform_device.h> +#include <linux/pm_runtime.h> #include <linux/mailbox_controller.h> #include <linux/mailbox/mtk-cmdq-mailbox.h> -#include <linux/of_device.h> +#include <linux/of.h> + +#define CMDQ_MBOX_AUTOSUSPEND_DELAY_MS 100 #define CMDQ_OP_CODE_MASK (0xff << CMDQ_OP_CODE_SHIFT) -#define CMDQ_IRQ_MASK 0xffff #define CMDQ_NUM_CMD(t) (t->cmd_buf_size / CMDQ_INST_SIZE) #define CMDQ_CURR_IRQ_STATUS 0x10 +#define CMDQ_SYNC_TOKEN_UPDATE 0x68 #define CMDQ_THR_SLOT_CYCLES 0x30 #define CMDQ_THR_BASE 0x100 #define CMDQ_THR_SIZE 0x80 @@ -35,6 +39,10 @@ #define CMDQ_THR_WAIT_TOKEN 0x30 #define CMDQ_THR_PRIORITY 0x40 +#define GCE_GCTL_VALUE 0x48 +#define GCE_CTRL_BY_SW GENMASK(2, 0) +#define GCE_DDR_EN GENMASK(18, 16) + #define CMDQ_THR_ACTIVE_SLOT_CYCLES 0x3200 #define CMDQ_THR_ENABLED 0x1 #define CMDQ_THR_DISABLED 0x0 @@ -55,7 +63,6 @@ struct cmdq_thread { void __iomem *base; struct list_head task_busy_list; u32 priority; - bool atomic_exec; }; struct cmdq_task { @@ -69,13 +76,55 @@ struct cmdq_task { struct cmdq { struct mbox_controller mbox; void __iomem *base; - u32 irq; - u32 thread_nr; + int irq; + u32 irq_mask; + const struct gce_plat *pdata; struct cmdq_thread *thread; - struct clk *clock; + struct clk_bulk_data *clocks; bool suspended; }; +struct gce_plat { + u32 thread_nr; + u8 shift; + bool control_by_sw; + bool sw_ddr_en; + u32 gce_num; +}; + +static inline u32 cmdq_convert_gce_addr(dma_addr_t addr, const struct gce_plat *pdata) +{ + /* Convert DMA addr (PA or IOVA) to GCE readable addr */ + return addr >> pdata->shift; +} + +static inline dma_addr_t cmdq_revert_gce_addr(u32 addr, const struct gce_plat *pdata) +{ + /* Revert GCE readable addr to DMA addr (PA or IOVA) */ + return (dma_addr_t)addr << pdata->shift; +} + +u8 cmdq_get_shift_pa(struct mbox_chan *chan) +{ + struct cmdq *cmdq = container_of(chan->mbox, struct cmdq, mbox); + + return cmdq->pdata->shift; +} +EXPORT_SYMBOL(cmdq_get_shift_pa); + +static void cmdq_gctl_value_toggle(struct cmdq *cmdq, bool ddr_enable) +{ + u32 val = cmdq->pdata->control_by_sw ? GCE_CTRL_BY_SW : 0; + + if (!cmdq->pdata->control_by_sw && !cmdq->pdata->sw_ddr_en) + return; + + if (cmdq->pdata->sw_ddr_en && ddr_enable) + val |= GCE_DDR_EN; + + writel(val, cmdq->base + GCE_GCTL_VALUE); +} + static int cmdq_thread_suspend(struct cmdq *cmdq, struct cmdq_thread *thread) { u32 status; @@ -103,9 +152,16 @@ static void cmdq_thread_resume(struct cmdq_thread *thread) static void cmdq_init(struct cmdq *cmdq) { - WARN_ON(clk_enable(cmdq->clock) < 0); + int i; + + WARN_ON(clk_bulk_enable(cmdq->pdata->gce_num, cmdq->clocks)); + + cmdq_gctl_value_toggle(cmdq, true); + writel(CMDQ_THR_ACTIVE_SLOT_CYCLES, cmdq->base + CMDQ_THR_SLOT_CYCLES); - clk_disable(cmdq->clock); + for (i = 0; i <= CMDQ_MAX_EVENT; i++) + writel(i, cmdq->base + CMDQ_SYNC_TOKEN_UPDATE); + clk_bulk_disable(cmdq->pdata->gce_num, cmdq->clocks); } static int cmdq_thread_reset(struct cmdq *cmdq, struct cmdq_thread *thread) @@ -144,69 +200,30 @@ static void cmdq_task_insert_into_thread(struct cmdq_task *task) struct cmdq_task *prev_task = list_last_entry( &thread->task_busy_list, typeof(*task), list_entry); u64 *prev_task_base = prev_task->pkt->va_base; + u32 gce_addr = cmdq_convert_gce_addr(task->pa_base, task->cmdq->pdata); /* let previous task jump to this task */ dma_sync_single_for_cpu(dev, prev_task->pa_base, prev_task->pkt->cmd_buf_size, DMA_TO_DEVICE); - prev_task_base[CMDQ_NUM_CMD(prev_task->pkt) - 1] = - (u64)CMDQ_JUMP_BY_PA << 32 | task->pa_base; + prev_task_base[CMDQ_NUM_CMD(prev_task->pkt) - 1] = (u64)CMDQ_JUMP_BY_PA << 32 | gce_addr; dma_sync_single_for_device(dev, prev_task->pa_base, prev_task->pkt->cmd_buf_size, DMA_TO_DEVICE); cmdq_thread_invalidate_fetched_data(thread); } -static bool cmdq_command_is_wfe(u64 cmd) -{ - u64 wfe_option = CMDQ_WFE_UPDATE | CMDQ_WFE_WAIT | CMDQ_WFE_WAIT_VALUE; - u64 wfe_op = (u64)(CMDQ_CODE_WFE << CMDQ_OP_CODE_SHIFT) << 32; - u64 wfe_mask = (u64)CMDQ_OP_CODE_MASK << 32 | 0xffffffff; - - return ((cmd & wfe_mask) == (wfe_op | wfe_option)); -} - -/* we assume tasks in the same display GCE thread are waiting the same event. */ -static void cmdq_task_remove_wfe(struct cmdq_task *task) -{ - struct device *dev = task->cmdq->mbox.dev; - u64 *base = task->pkt->va_base; - int i; - - dma_sync_single_for_cpu(dev, task->pa_base, task->pkt->cmd_buf_size, - DMA_TO_DEVICE); - for (i = 0; i < CMDQ_NUM_CMD(task->pkt); i++) - if (cmdq_command_is_wfe(base[i])) - base[i] = (u64)CMDQ_JUMP_BY_OFFSET << 32 | - CMDQ_JUMP_PASS; - dma_sync_single_for_device(dev, task->pa_base, task->pkt->cmd_buf_size, - DMA_TO_DEVICE); -} - static bool cmdq_thread_is_in_wfe(struct cmdq_thread *thread) { return readl(thread->base + CMDQ_THR_WAIT_TOKEN) & CMDQ_THR_IS_WAITING; } -static void cmdq_thread_wait_end(struct cmdq_thread *thread, - unsigned long end_pa) -{ - struct device *dev = thread->chan->mbox->dev; - unsigned long curr_pa; - - if (readl_poll_timeout_atomic(thread->base + CMDQ_THR_CURR_ADDR, - curr_pa, curr_pa == end_pa, 1, 20)) - dev_err(dev, "GCE thread cannot run to end.\n"); -} - -static void cmdq_task_exec_done(struct cmdq_task *task, enum cmdq_cb_status sta) +static void cmdq_task_exec_done(struct cmdq_task *task, int sta) { - struct cmdq_task_cb *cb = &task->pkt->async_cb; struct cmdq_cb_data data; - WARN_ON(cb->cb == (cmdq_async_flush_cb)NULL); data.sta = sta; - data.data = cb->data; - cb->cb(data); + data.pkt = task->pkt; + mbox_chan_received_data(task->thread->chan, &data); list_del(&task->list_entry); } @@ -215,13 +232,15 @@ static void cmdq_task_handle_error(struct cmdq_task *task) { struct cmdq_thread *thread = task->thread; struct cmdq_task *next_task; + struct cmdq *cmdq = task->cmdq; - dev_err(task->cmdq->mbox.dev, "task 0x%p error\n", task); - WARN_ON(cmdq_thread_suspend(task->cmdq, thread) < 0); + dev_err(cmdq->mbox.dev, "task 0x%p error\n", task); + WARN_ON(cmdq_thread_suspend(cmdq, thread) < 0); next_task = list_first_entry_or_null(&thread->task_busy_list, struct cmdq_task, list_entry); if (next_task) - writel(next_task->pa_base, thread->base + CMDQ_THR_CURR_ADDR); + writel(next_task->pa_base >> cmdq->pdata->shift, + thread->base + CMDQ_THR_CURR_ADDR); cmdq_thread_resume(thread); } @@ -229,7 +248,8 @@ static void cmdq_thread_irq_handler(struct cmdq *cmdq, struct cmdq_thread *thread) { struct cmdq_task *task, *tmp, *curr_task = NULL; - u32 curr_pa, irq_flag, task_end_pa; + u32 irq_flag, gce_addr; + dma_addr_t curr_pa, task_end_pa; bool err; irq_flag = readl(thread->base + CMDQ_THR_IRQ_STATUS); @@ -251,7 +271,8 @@ static void cmdq_thread_irq_handler(struct cmdq *cmdq, else return; - curr_pa = readl(thread->base + CMDQ_THR_CURR_ADDR); + gce_addr = readl(thread->base + CMDQ_THR_CURR_ADDR); + curr_pa = cmdq_revert_gce_addr(gce_addr, cmdq->pdata); list_for_each_entry_safe(task, tmp, &thread->task_busy_list, list_entry) { @@ -260,10 +281,10 @@ static void cmdq_thread_irq_handler(struct cmdq *cmdq, curr_task = task; if (!curr_task || curr_pa == task_end_pa - CMDQ_INST_SIZE) { - cmdq_task_exec_done(task, CMDQ_CB_NORMAL); + cmdq_task_exec_done(task, 0); kfree(task); } else if (err) { - cmdq_task_exec_done(task, CMDQ_CB_ERROR); + cmdq_task_exec_done(task, -ENOEXEC); cmdq_task_handle_error(curr_task); kfree(task); } @@ -272,10 +293,8 @@ static void cmdq_thread_irq_handler(struct cmdq *cmdq, break; } - if (list_empty(&thread->task_busy_list)) { + if (list_empty(&thread->task_busy_list)) cmdq_thread_disable(cmdq, thread); - clk_disable(cmdq->clock); - } } static irqreturn_t cmdq_irq_handler(int irq, void *dev) @@ -284,11 +303,11 @@ static irqreturn_t cmdq_irq_handler(int irq, void *dev) unsigned long irq_status, flags = 0L; int bit; - irq_status = readl(cmdq->base + CMDQ_CURR_IRQ_STATUS) & CMDQ_IRQ_MASK; - if (!(irq_status ^ CMDQ_IRQ_MASK)) + irq_status = readl(cmdq->base + CMDQ_CURR_IRQ_STATUS) & cmdq->irq_mask; + if (!(irq_status ^ cmdq->irq_mask)) return IRQ_NONE; - for_each_clear_bit(bit, &irq_status, fls(CMDQ_IRQ_MASK)) { + for_each_clear_bit(bit, &irq_status, cmdq->pdata->thread_nr) { struct cmdq_thread *thread = &cmdq->thread[bit]; spin_lock_irqsave(&thread->chan->lock, flags); @@ -296,9 +315,33 @@ static irqreturn_t cmdq_irq_handler(int irq, void *dev) spin_unlock_irqrestore(&thread->chan->lock, flags); } + pm_runtime_mark_last_busy(cmdq->mbox.dev); + return IRQ_HANDLED; } +static int cmdq_runtime_resume(struct device *dev) +{ + struct cmdq *cmdq = dev_get_drvdata(dev); + int ret; + + ret = clk_bulk_enable(cmdq->pdata->gce_num, cmdq->clocks); + if (ret) + return ret; + + cmdq_gctl_value_toggle(cmdq, true); + return 0; +} + +static int cmdq_runtime_suspend(struct device *dev) +{ + struct cmdq *cmdq = dev_get_drvdata(dev); + + cmdq_gctl_value_toggle(cmdq, false); + clk_bulk_disable(cmdq->pdata->gce_num, cmdq->clocks); + return 0; +} + static int cmdq_suspend(struct device *dev) { struct cmdq *cmdq = dev_get_drvdata(dev); @@ -308,7 +351,7 @@ static int cmdq_suspend(struct device *dev) cmdq->suspended = true; - for (i = 0; i < cmdq->thread_nr; i++) { + for (i = 0; i < cmdq->pdata->thread_nr; i++) { thread = &cmdq->thread[i]; if (!list_empty(&thread->task_busy_list)) { task_running = true; @@ -319,27 +362,27 @@ static int cmdq_suspend(struct device *dev) if (task_running) dev_warn(dev, "exist running task(s) in suspend\n"); - clk_unprepare(cmdq->clock); - - return 0; + return pm_runtime_force_suspend(dev); } static int cmdq_resume(struct device *dev) { struct cmdq *cmdq = dev_get_drvdata(dev); - WARN_ON(clk_prepare(cmdq->clock) < 0); + WARN_ON(pm_runtime_force_resume(dev)); cmdq->suspended = false; + return 0; } -static int cmdq_remove(struct platform_device *pdev) +static void cmdq_remove(struct platform_device *pdev) { struct cmdq *cmdq = platform_get_drvdata(pdev); - clk_unprepare(cmdq->clock); + if (!IS_ENABLED(CONFIG_PM)) + cmdq_runtime_suspend(&pdev->dev); - return 0; + clk_bulk_unprepare(cmdq->pdata->gce_num, cmdq->clocks); } static int cmdq_mbox_send_data(struct mbox_chan *chan, void *data) @@ -348,7 +391,8 @@ static int cmdq_mbox_send_data(struct mbox_chan *chan, void *data) struct cmdq_thread *thread = (struct cmdq_thread *)chan->con_priv; struct cmdq *cmdq = dev_get_drvdata(chan->mbox->dev); struct cmdq_task *task; - unsigned long curr_pa, end_pa; + u32 gce_addr; + dma_addr_t curr_pa, end_pa; /* Client should not flush new tasks if suspended. */ WARN_ON(cmdq->suspended); @@ -364,51 +408,39 @@ static int cmdq_mbox_send_data(struct mbox_chan *chan, void *data) task->pkt = pkt; if (list_empty(&thread->task_busy_list)) { - WARN_ON(clk_enable(cmdq->clock) < 0); + /* + * The thread reset will clear thread related register to 0, + * including pc, end, priority, irq, suspend and enable. Thus + * set CMDQ_THR_ENABLED to CMDQ_THR_ENABLE_TASK will enable + * thread and make it running. + */ WARN_ON(cmdq_thread_reset(cmdq, thread) < 0); - writel(task->pa_base, thread->base + CMDQ_THR_CURR_ADDR); - writel(task->pa_base + pkt->cmd_buf_size, - thread->base + CMDQ_THR_END_ADDR); + gce_addr = cmdq_convert_gce_addr(task->pa_base, cmdq->pdata); + writel(gce_addr, thread->base + CMDQ_THR_CURR_ADDR); + gce_addr = cmdq_convert_gce_addr(task->pa_base + pkt->cmd_buf_size, cmdq->pdata); + writel(gce_addr, thread->base + CMDQ_THR_END_ADDR); + writel(thread->priority, thread->base + CMDQ_THR_PRIORITY); writel(CMDQ_THR_IRQ_EN, thread->base + CMDQ_THR_IRQ_ENABLE); writel(CMDQ_THR_ENABLED, thread->base + CMDQ_THR_ENABLE_TASK); } else { WARN_ON(cmdq_thread_suspend(cmdq, thread) < 0); - curr_pa = readl(thread->base + CMDQ_THR_CURR_ADDR); - end_pa = readl(thread->base + CMDQ_THR_END_ADDR); - - /* - * Atomic execution should remove the following wfe, i.e. only - * wait event at first task, and prevent to pause when running. - */ - if (thread->atomic_exec) { - /* GCE is executing if command is not WFE */ - if (!cmdq_thread_is_in_wfe(thread)) { - cmdq_thread_resume(thread); - cmdq_thread_wait_end(thread, end_pa); - WARN_ON(cmdq_thread_suspend(cmdq, thread) < 0); - /* set to this task directly */ - writel(task->pa_base, - thread->base + CMDQ_THR_CURR_ADDR); - } else { - cmdq_task_insert_into_thread(task); - cmdq_task_remove_wfe(task); - smp_mb(); /* modify jump before enable thread */ - } + gce_addr = readl(thread->base + CMDQ_THR_CURR_ADDR); + curr_pa = cmdq_revert_gce_addr(gce_addr, cmdq->pdata); + gce_addr = readl(thread->base + CMDQ_THR_END_ADDR); + end_pa = cmdq_revert_gce_addr(gce_addr, cmdq->pdata); + /* check boundary */ + if (curr_pa == end_pa - CMDQ_INST_SIZE || + curr_pa == end_pa) { + /* set to this task directly */ + writel(task->pa_base >> cmdq->pdata->shift, + thread->base + CMDQ_THR_CURR_ADDR); } else { - /* check boundary */ - if (curr_pa == end_pa - CMDQ_INST_SIZE || - curr_pa == end_pa) { - /* set to this task directly */ - writel(task->pa_base, - thread->base + CMDQ_THR_CURR_ADDR); - } else { - cmdq_task_insert_into_thread(task); - smp_mb(); /* modify jump before enable thread */ - } + cmdq_task_insert_into_thread(task); + smp_mb(); /* modify jump before enable thread */ } - writel(task->pa_base + pkt->cmd_buf_size, + writel((task->pa_base + pkt->cmd_buf_size) >> cmdq->pdata->shift, thread->base + CMDQ_THR_END_ADDR); cmdq_thread_resume(thread); } @@ -424,12 +456,106 @@ static int cmdq_mbox_startup(struct mbox_chan *chan) static void cmdq_mbox_shutdown(struct mbox_chan *chan) { + struct cmdq_thread *thread = (struct cmdq_thread *)chan->con_priv; + struct cmdq *cmdq = dev_get_drvdata(chan->mbox->dev); + struct cmdq_task *task, *tmp; + unsigned long flags; + + WARN_ON(pm_runtime_get_sync(cmdq->mbox.dev) < 0); + + spin_lock_irqsave(&thread->chan->lock, flags); + if (list_empty(&thread->task_busy_list)) + goto done; + + WARN_ON(cmdq_thread_suspend(cmdq, thread) < 0); + + /* make sure executed tasks have success callback */ + cmdq_thread_irq_handler(cmdq, thread); + if (list_empty(&thread->task_busy_list)) + goto done; + + list_for_each_entry_safe(task, tmp, &thread->task_busy_list, + list_entry) { + cmdq_task_exec_done(task, -ECONNABORTED); + kfree(task); + } + + cmdq_thread_disable(cmdq, thread); + +done: + /* + * The thread->task_busy_list empty means thread already disable. The + * cmdq_mbox_send_data() always reset thread which clear disable and + * suspend statue when first pkt send to channel, so there is no need + * to do any operation here, only unlock and leave. + */ + spin_unlock_irqrestore(&thread->chan->lock, flags); + + pm_runtime_mark_last_busy(cmdq->mbox.dev); + pm_runtime_put_autosuspend(cmdq->mbox.dev); +} + +static int cmdq_mbox_flush(struct mbox_chan *chan, unsigned long timeout) +{ + struct cmdq_thread *thread = (struct cmdq_thread *)chan->con_priv; + struct cmdq_cb_data data; + struct cmdq *cmdq = dev_get_drvdata(chan->mbox->dev); + struct cmdq_task *task, *tmp; + unsigned long flags; + u32 enable; + int ret; + + ret = pm_runtime_get_sync(cmdq->mbox.dev); + if (ret < 0) + return ret; + + spin_lock_irqsave(&thread->chan->lock, flags); + if (list_empty(&thread->task_busy_list)) + goto out; + + WARN_ON(cmdq_thread_suspend(cmdq, thread) < 0); + if (!cmdq_thread_is_in_wfe(thread)) + goto wait; + + list_for_each_entry_safe(task, tmp, &thread->task_busy_list, + list_entry) { + data.sta = -ECONNABORTED; + data.pkt = task->pkt; + mbox_chan_received_data(task->thread->chan, &data); + list_del(&task->list_entry); + kfree(task); + } + + cmdq_thread_resume(thread); + cmdq_thread_disable(cmdq, thread); + +out: + spin_unlock_irqrestore(&thread->chan->lock, flags); + pm_runtime_mark_last_busy(cmdq->mbox.dev); + pm_runtime_put_autosuspend(cmdq->mbox.dev); + + return 0; + +wait: + cmdq_thread_resume(thread); + spin_unlock_irqrestore(&thread->chan->lock, flags); + if (readl_poll_timeout_atomic(thread->base + CMDQ_THR_ENABLE_TASK, + enable, enable == 0, 1, timeout)) { + dev_err(cmdq->mbox.dev, "Fail to wait GCE thread 0x%x done\n", + (u32)(thread->base - cmdq->base)); + + return -EFAULT; + } + pm_runtime_mark_last_busy(cmdq->mbox.dev); + pm_runtime_put_autosuspend(cmdq->mbox.dev); + return 0; } static const struct mbox_chan_ops cmdq_mbox_chan_ops = { .send_data = cmdq_mbox_send_data, .startup = cmdq_mbox_startup, .shutdown = cmdq_mbox_shutdown, + .flush = cmdq_mbox_flush, }; static struct mbox_chan *cmdq_xlate(struct mbox_controller *mbox, @@ -443,16 +569,67 @@ static struct mbox_chan *cmdq_xlate(struct mbox_controller *mbox, thread = (struct cmdq_thread *)mbox->chans[ind].con_priv; thread->priority = sp->args[1]; - thread->atomic_exec = (sp->args[2] != 0); thread->chan = &mbox->chans[ind]; return &mbox->chans[ind]; } +static int cmdq_get_clocks(struct device *dev, struct cmdq *cmdq) +{ + static const char * const gce_name = "gce"; + struct device_node *node, *parent = dev->of_node->parent; + struct clk_bulk_data *clks; + + cmdq->clocks = devm_kcalloc(dev, cmdq->pdata->gce_num, + sizeof(*cmdq->clocks), GFP_KERNEL); + if (!cmdq->clocks) + return -ENOMEM; + + if (cmdq->pdata->gce_num == 1) { + clks = &cmdq->clocks[0]; + + clks->id = gce_name; + clks->clk = devm_clk_get(dev, NULL); + if (IS_ERR(clks->clk)) + return dev_err_probe(dev, PTR_ERR(clks->clk), + "failed to get gce clock\n"); + + return 0; + } + + /* + * If there is more than one GCE, get the clocks for the others too, + * as the clock of the main GCE must be enabled for additional IPs + * to be reachable. + */ + for_each_child_of_node(parent, node) { + int alias_id = of_alias_get_id(node, gce_name); + + if (alias_id < 0 || alias_id >= cmdq->pdata->gce_num) + continue; + + clks = &cmdq->clocks[alias_id]; + + clks->id = devm_kasprintf(dev, GFP_KERNEL, "gce%d", alias_id); + if (!clks->id) { + of_node_put(node); + return -ENOMEM; + } + + clks->clk = of_clk_get(node, 0); + if (IS_ERR(clks->clk)) { + of_node_put(node); + return dev_err_probe(dev, PTR_ERR(clks->clk), + "failed to get gce%d clock\n", alias_id); + } + } + + return 0; +} + static int cmdq_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; - struct resource *res; struct cmdq *cmdq; int err, i; @@ -460,42 +637,39 @@ static int cmdq_probe(struct platform_device *pdev) if (!cmdq) return -ENOMEM; - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - cmdq->base = devm_ioremap_resource(dev, res); - if (IS_ERR(cmdq->base)) { - dev_err(dev, "failed to ioremap gce\n"); + cmdq->base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(cmdq->base)) return PTR_ERR(cmdq->base); - } cmdq->irq = platform_get_irq(pdev, 0); - if (!cmdq->irq) { - dev_err(dev, "failed to get irq\n"); + if (cmdq->irq < 0) + return cmdq->irq; + + cmdq->pdata = device_get_match_data(dev); + if (!cmdq->pdata) { + dev_err(dev, "failed to get match data\n"); return -EINVAL; } - err = devm_request_irq(dev, cmdq->irq, cmdq_irq_handler, IRQF_SHARED, - "mtk_cmdq", cmdq); - if (err < 0) { - dev_err(dev, "failed to register ISR (%d)\n", err); - return err; - } + + cmdq->irq_mask = GENMASK(cmdq->pdata->thread_nr - 1, 0); dev_dbg(dev, "cmdq device: addr:0x%p, va:0x%p, irq:%d\n", dev, cmdq->base, cmdq->irq); - cmdq->clock = devm_clk_get(dev, "gce"); - if (IS_ERR(cmdq->clock)) { - dev_err(dev, "failed to get gce clk\n"); - return PTR_ERR(cmdq->clock); - } + err = cmdq_get_clocks(dev, cmdq); + if (err) + return err; + + dma_set_coherent_mask(dev, + DMA_BIT_MASK(sizeof(u32) * BITS_PER_BYTE + cmdq->pdata->shift)); - cmdq->thread_nr = (u32)(unsigned long)of_device_get_match_data(dev); cmdq->mbox.dev = dev; - cmdq->mbox.chans = devm_kcalloc(dev, cmdq->thread_nr, + cmdq->mbox.chans = devm_kcalloc(dev, cmdq->pdata->thread_nr, sizeof(*cmdq->mbox.chans), GFP_KERNEL); if (!cmdq->mbox.chans) return -ENOMEM; - cmdq->mbox.num_chans = cmdq->thread_nr; + cmdq->mbox.num_chans = cmdq->pdata->thread_nr; cmdq->mbox.ops = &cmdq_mbox_chan_ops; cmdq->mbox.of_xlate = cmdq_xlate; @@ -503,28 +677,50 @@ static int cmdq_probe(struct platform_device *pdev) cmdq->mbox.txdone_irq = false; cmdq->mbox.txdone_poll = false; - cmdq->thread = devm_kcalloc(dev, cmdq->thread_nr, + cmdq->thread = devm_kcalloc(dev, cmdq->pdata->thread_nr, sizeof(*cmdq->thread), GFP_KERNEL); if (!cmdq->thread) return -ENOMEM; - for (i = 0; i < cmdq->thread_nr; i++) { + for (i = 0; i < cmdq->pdata->thread_nr; i++) { cmdq->thread[i].base = cmdq->base + CMDQ_THR_BASE + CMDQ_THR_SIZE * i; INIT_LIST_HEAD(&cmdq->thread[i].task_busy_list); cmdq->mbox.chans[i].con_priv = (void *)&cmdq->thread[i]; } - err = devm_mbox_controller_register(dev, &cmdq->mbox); + platform_set_drvdata(pdev, cmdq); + + WARN_ON(clk_bulk_prepare(cmdq->pdata->gce_num, cmdq->clocks)); + + cmdq_init(cmdq); + + err = devm_request_irq(dev, cmdq->irq, cmdq_irq_handler, IRQF_SHARED, + "mtk_cmdq", cmdq); if (err < 0) { - dev_err(dev, "failed to register mailbox: %d\n", err); + dev_err(dev, "failed to register ISR (%d)\n", err); return err; } - platform_set_drvdata(pdev, cmdq); - WARN_ON(clk_prepare(cmdq->clock) < 0); + /* If Runtime PM is not available enable the clocks now. */ + if (!IS_ENABLED(CONFIG_PM)) { + err = cmdq_runtime_resume(dev); + if (err) + return err; + } - cmdq_init(cmdq); + err = devm_pm_runtime_enable(dev); + if (err) + return err; + + pm_runtime_set_autosuspend_delay(dev, CMDQ_MBOX_AUTOSUSPEND_DELAY_MS); + pm_runtime_use_autosuspend(dev); + + err = devm_mbox_controller_register(dev, &cmdq->mbox); + if (err < 0) { + dev_err(dev, "failed to register mailbox: %d\n", err); + return err; + } return 0; } @@ -532,12 +728,71 @@ static int cmdq_probe(struct platform_device *pdev) static const struct dev_pm_ops cmdq_pm_ops = { .suspend = cmdq_suspend, .resume = cmdq_resume, + SET_RUNTIME_PM_OPS(cmdq_runtime_suspend, + cmdq_runtime_resume, NULL) +}; + +static const struct gce_plat gce_plat_mt6779 = { + .thread_nr = 24, + .shift = 3, + .control_by_sw = false, + .gce_num = 1 +}; + +static const struct gce_plat gce_plat_mt8173 = { + .thread_nr = 16, + .shift = 0, + .control_by_sw = false, + .gce_num = 1 +}; + +static const struct gce_plat gce_plat_mt8183 = { + .thread_nr = 24, + .shift = 0, + .control_by_sw = false, + .gce_num = 1 +}; + +static const struct gce_plat gce_plat_mt8186 = { + .thread_nr = 24, + .shift = 3, + .control_by_sw = true, + .sw_ddr_en = true, + .gce_num = 1 +}; + +static const struct gce_plat gce_plat_mt8188 = { + .thread_nr = 32, + .shift = 3, + .control_by_sw = true, + .gce_num = 2 +}; + +static const struct gce_plat gce_plat_mt8192 = { + .thread_nr = 24, + .shift = 3, + .control_by_sw = true, + .gce_num = 1 +}; + +static const struct gce_plat gce_plat_mt8195 = { + .thread_nr = 24, + .shift = 3, + .control_by_sw = true, + .gce_num = 2 }; static const struct of_device_id cmdq_of_ids[] = { - {.compatible = "mediatek,mt8173-gce", .data = (void *)16}, + {.compatible = "mediatek,mt6779-gce", .data = (void *)&gce_plat_mt6779}, + {.compatible = "mediatek,mt8173-gce", .data = (void *)&gce_plat_mt8173}, + {.compatible = "mediatek,mt8183-gce", .data = (void *)&gce_plat_mt8183}, + {.compatible = "mediatek,mt8186-gce", .data = (void *)&gce_plat_mt8186}, + {.compatible = "mediatek,mt8188-gce", .data = (void *)&gce_plat_mt8188}, + {.compatible = "mediatek,mt8192-gce", .data = (void *)&gce_plat_mt8192}, + {.compatible = "mediatek,mt8195-gce", .data = (void *)&gce_plat_mt8195}, {} }; +MODULE_DEVICE_TABLE(of, cmdq_of_ids); static struct platform_driver cmdq_drv = { .probe = cmdq_probe, @@ -562,4 +817,5 @@ static void __exit cmdq_drv_exit(void) subsys_initcall(cmdq_drv_init); module_exit(cmdq_drv_exit); +MODULE_DESCRIPTION("Mediatek Command Queue(CMDQ) Mailbox driver"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/mailbox/mtk-gpueb-mailbox.c b/drivers/mailbox/mtk-gpueb-mailbox.c new file mode 100644 index 000000000000..f6d2beccd91b --- /dev/null +++ b/drivers/mailbox/mtk-gpueb-mailbox.c @@ -0,0 +1,319 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * MediaTek GPUEB mailbox driver for SoCs such as the MT8196 + * + * Copyright (C) 2025, Collabora Ltd. + * + * Developers harmed in the making of this driver: + * - Nicolas Frattaroli <nicolas.frattaroli@collabora.com> + */ + +#include <linux/atomic.h> +#include <linux/clk.h> +#include <linux/device.h> +#include <linux/interrupt.h> +#include <linux/io.h> +#include <linux/iopoll.h> +#include <linux/mailbox_controller.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/platform_device.h> + +#define GPUEB_MBOX_CTL_TX_STS 0x00 +#define GPUEB_MBOX_CTL_IRQ_SET 0x04 +#define GPUEB_MBOX_CTL_IRQ_CLR 0x74 +#define GPUEB_MBOX_CTL_RX_STS 0x78 + +#define GPUEB_MBOX_FULL BIT(0) /* i.e. we've received data */ +#define GPUEB_MBOX_BLOCKED BIT(1) /* i.e. the channel is shutdown */ + +#define GPUEB_MBOX_MAX_RX_SIZE 32 /* in bytes */ + +struct mtk_gpueb_mbox { + struct device *dev; + struct clk *clk; + void __iomem *mbox_mmio; + void __iomem *mbox_ctl; + struct mbox_controller mbox; + struct mtk_gpueb_mbox_chan *ch; + int irq; + const struct mtk_gpueb_mbox_variant *v; +}; + +/** + * struct mtk_gpueb_mbox_chan - per-channel runtime data + * @ebm: pointer to the parent &struct mtk_gpueb_mbox mailbox + * @full_name: descriptive name of channel for IRQ subsystem + * @num: channel number, starting at 0 + * @rx_status: signifies whether channel reception is turned off, or full + * @c: pointer to the constant &struct mtk_gpueb_mbox_chan_desc channel data + */ +struct mtk_gpueb_mbox_chan { + struct mtk_gpueb_mbox *ebm; + char *full_name; + u8 num; + atomic_t rx_status; + const struct mtk_gpueb_mbox_chan_desc *c; +}; + +/** + * struct mtk_gpueb_mbox_chan_desc - per-channel constant data + * @name: name of this channel + * @num: index of this channel, starting at 0 + * @tx_offset: byte offset measured from mmio base for outgoing data + * @tx_len: size, in bytes, of the outgoing data on this channel + * @rx_offset: bytes offset measured from mmio base for incoming data + * @rx_len: size, in bytes, of the incoming data on this channel + */ +struct mtk_gpueb_mbox_chan_desc { + const char *name; + const u8 num; + const u16 tx_offset; + const u8 tx_len; + const u16 rx_offset; + const u8 rx_len; +}; + +struct mtk_gpueb_mbox_variant { + const u8 num_channels; + const struct mtk_gpueb_mbox_chan_desc channels[] __counted_by(num_channels); +}; + +/** + * mtk_gpueb_mbox_read_rx - read RX buffer from MMIO into channel's RX buffer + * @buf: buffer to read into + * @chan: pointer to the channel to read + */ +static void mtk_gpueb_mbox_read_rx(void *buf, struct mtk_gpueb_mbox_chan *chan) +{ + memcpy_fromio(buf, chan->ebm->mbox_mmio + chan->c->rx_offset, chan->c->rx_len); +} + +static irqreturn_t mtk_gpueb_mbox_isr(int irq, void *data) +{ + struct mtk_gpueb_mbox_chan *ch = data; + u32 rx_sts; + + rx_sts = readl(ch->ebm->mbox_ctl + GPUEB_MBOX_CTL_RX_STS); + + if (rx_sts & BIT(ch->num)) { + if (!atomic_cmpxchg(&ch->rx_status, 0, GPUEB_MBOX_FULL | GPUEB_MBOX_BLOCKED)) + return IRQ_WAKE_THREAD; + } + + return IRQ_NONE; +} + +static irqreturn_t mtk_gpueb_mbox_thread(int irq, void *data) +{ + struct mtk_gpueb_mbox_chan *ch = data; + int status; + + status = atomic_cmpxchg(&ch->rx_status, GPUEB_MBOX_FULL | GPUEB_MBOX_BLOCKED, + GPUEB_MBOX_FULL); + if (status == (GPUEB_MBOX_FULL | GPUEB_MBOX_BLOCKED)) { + u8 buf[GPUEB_MBOX_MAX_RX_SIZE] = {}; + + mtk_gpueb_mbox_read_rx(buf, ch); + writel(BIT(ch->num), ch->ebm->mbox_ctl + GPUEB_MBOX_CTL_IRQ_CLR); + mbox_chan_received_data(&ch->ebm->mbox.chans[ch->num], buf); + atomic_set(&ch->rx_status, 0); + return IRQ_HANDLED; + } + + return IRQ_NONE; +} + +static int mtk_gpueb_mbox_send_data(struct mbox_chan *chan, void *data) +{ + struct mtk_gpueb_mbox_chan *ch = chan->con_priv; + u32 *values = data; + int i; + + if (atomic_read(&ch->rx_status)) + return -EBUSY; + + /* + * We don't want any fancy nonsense, just write the 32-bit values in + * order. memcpy_toio/__iowrite32_copy don't work here, as they may use + * writes of different sizes or memory ordering characteristics depending + * on the architecture, alignment and the current phase of the moon. + */ + for (i = 0; i < ch->c->tx_len; i += 4) + writel(values[i / 4], ch->ebm->mbox_mmio + ch->c->tx_offset + i); + + writel(BIT(ch->num), ch->ebm->mbox_ctl + GPUEB_MBOX_CTL_IRQ_SET); + + return 0; +} + +static int mtk_gpueb_mbox_startup(struct mbox_chan *chan) +{ + struct mtk_gpueb_mbox_chan *ch = chan->con_priv; + int ret; + + atomic_set(&ch->rx_status, 0); + + ret = clk_enable(ch->ebm->clk); + if (ret) { + dev_err(ch->ebm->dev, "Failed to enable EB clock: %pe\n", + ERR_PTR(ret)); + goto err_block; + } + + writel(BIT(ch->num), ch->ebm->mbox_ctl + GPUEB_MBOX_CTL_IRQ_CLR); + + ret = devm_request_threaded_irq(ch->ebm->dev, ch->ebm->irq, mtk_gpueb_mbox_isr, + mtk_gpueb_mbox_thread, IRQF_SHARED | IRQF_ONESHOT, + ch->full_name, ch); + if (ret) { + dev_err(ch->ebm->dev, "Failed to request IRQ: %pe\n", + ERR_PTR(ret)); + goto err_unclk; + } + + return 0; + +err_unclk: + clk_disable(ch->ebm->clk); +err_block: + atomic_set(&ch->rx_status, GPUEB_MBOX_BLOCKED); + + return ret; +} + +static void mtk_gpueb_mbox_shutdown(struct mbox_chan *chan) +{ + struct mtk_gpueb_mbox_chan *ch = chan->con_priv; + + atomic_set(&ch->rx_status, GPUEB_MBOX_BLOCKED); + + devm_free_irq(ch->ebm->dev, ch->ebm->irq, ch); + + clk_disable(ch->ebm->clk); +} + +static bool mtk_gpueb_mbox_last_tx_done(struct mbox_chan *chan) +{ + struct mtk_gpueb_mbox_chan *ch = chan->con_priv; + + return !(readl(ch->ebm->mbox_ctl + GPUEB_MBOX_CTL_TX_STS) & BIT(ch->num)); +} + +static const struct mbox_chan_ops mtk_gpueb_mbox_ops = { + .send_data = mtk_gpueb_mbox_send_data, + .startup = mtk_gpueb_mbox_startup, + .shutdown = mtk_gpueb_mbox_shutdown, + .last_tx_done = mtk_gpueb_mbox_last_tx_done, +}; + +static int mtk_gpueb_mbox_probe(struct platform_device *pdev) +{ + struct mtk_gpueb_mbox_chan *ch; + struct mtk_gpueb_mbox *ebm; + unsigned int i; + + ebm = devm_kzalloc(&pdev->dev, sizeof(*ebm), GFP_KERNEL); + if (!ebm) + return -ENOMEM; + + ebm->dev = &pdev->dev; + ebm->v = of_device_get_match_data(ebm->dev); + + ebm->irq = platform_get_irq(pdev, 0); + if (ebm->irq < 0) + return ebm->irq; + + ebm->clk = devm_clk_get_prepared(ebm->dev, NULL); + if (IS_ERR(ebm->clk)) + return dev_err_probe(ebm->dev, PTR_ERR(ebm->clk), + "Failed to get 'eb' clock\n"); + + ebm->mbox_mmio = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(ebm->mbox_mmio)) + return dev_err_probe(ebm->dev, PTR_ERR(ebm->mbox_mmio), + "Couldn't map mailbox data registers\n"); + + ebm->mbox_ctl = devm_platform_ioremap_resource(pdev, 1); + if (IS_ERR(ebm->mbox_ctl)) + return dev_err_probe( + ebm->dev, PTR_ERR(ebm->mbox_ctl), + "Couldn't map mailbox control registers\n"); + + ebm->ch = devm_kmalloc_array(ebm->dev, ebm->v->num_channels, + sizeof(*ebm->ch), GFP_KERNEL); + if (!ebm->ch) + return -ENOMEM; + + ebm->mbox.chans = devm_kcalloc(ebm->dev, ebm->v->num_channels, + sizeof(struct mbox_chan), GFP_KERNEL); + if (!ebm->mbox.chans) + return -ENOMEM; + + for (i = 0; i < ebm->v->num_channels; i++) { + ch = &ebm->ch[i]; + ch->c = &ebm->v->channels[i]; + if (ch->c->rx_len > GPUEB_MBOX_MAX_RX_SIZE) { + dev_err(ebm->dev, "Channel %s RX size (%d) too large\n", + ch->c->name, ch->c->rx_len); + return -EINVAL; + } + ch->full_name = devm_kasprintf(ebm->dev, GFP_KERNEL, "%s:%s", + dev_name(ebm->dev), ch->c->name); + if (!ch->full_name) + return -ENOMEM; + + ch->ebm = ebm; + ch->num = i; + spin_lock_init(&ebm->mbox.chans[i].lock); + ebm->mbox.chans[i].con_priv = ch; + atomic_set(&ch->rx_status, GPUEB_MBOX_BLOCKED); + } + + ebm->mbox.dev = ebm->dev; + ebm->mbox.num_chans = ebm->v->num_channels; + ebm->mbox.txdone_poll = true; + ebm->mbox.txpoll_period = 0; /* minimum hrtimer interval */ + ebm->mbox.ops = &mtk_gpueb_mbox_ops; + + dev_set_drvdata(ebm->dev, ebm); + + return devm_mbox_controller_register(ebm->dev, &ebm->mbox); +} + +static const struct mtk_gpueb_mbox_variant mtk_gpueb_mbox_mt8196 = { + .num_channels = 12, + .channels = { + { "fast-dvfs-event", 0, 0x0000, 16, 0x00e0, 16 }, + { "gpufreq", 1, 0x0010, 32, 0x00f0, 32 }, + { "sleep", 2, 0x0030, 12, 0x0110, 4 }, + { "timer", 3, 0x003c, 24, 0x0114, 4 }, + { "fhctl", 4, 0x0054, 36, 0x0118, 4 }, + { "ccf", 5, 0x0078, 16, 0x011c, 16 }, + { "gpumpu", 6, 0x0088, 24, 0x012c, 4 }, + { "fast-dvfs", 7, 0x00a0, 24, 0x0130, 24 }, + { "ipir-c-met", 8, 0x00b8, 4, 0x0148, 16 }, + { "ipis-c-met", 9, 0x00bc, 16, 0x0158, 4 }, + { "brisket", 10, 0x00cc, 16, 0x015c, 16 }, + { "ppb", 11, 0x00dc, 4, 0x016c, 4 }, + }, +}; + +static const struct of_device_id mtk_gpueb_mbox_of_ids[] = { + { .compatible = "mediatek,mt8196-gpueb-mbox", .data = &mtk_gpueb_mbox_mt8196 }, + { /* Sentinel */ } +}; +MODULE_DEVICE_TABLE(of, mtk_gpueb_mbox_of_ids); + +static struct platform_driver mtk_gpueb_mbox_drv = { + .probe = mtk_gpueb_mbox_probe, + .driver = { + .name = "mtk-gpueb-mbox", + .of_match_table = mtk_gpueb_mbox_of_ids, + } +}; +module_platform_driver(mtk_gpueb_mbox_drv); + +MODULE_AUTHOR("Nicolas Frattaroli <nicolas.frattaroli@collabora.com>"); +MODULE_DESCRIPTION("MediaTek GPUEB mailbox driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/mailbox/omap-mailbox.c b/drivers/mailbox/omap-mailbox.c index ca50177a33f2..17fe6545875d 100644 --- a/drivers/mailbox/omap-mailbox.c +++ b/drivers/mailbox/omap-mailbox.c @@ -3,7 +3,7 @@ * OMAP mailbox driver * * Copyright (C) 2006-2009 Nokia Corporation. All rights reserved. - * Copyright (C) 2013-2016 Texas Instruments Incorporated - http://www.ti.com + * Copyright (C) 2013-2021 Texas Instruments Incorporated - https://www.ti.com * * Contact: Hiroshi DOYU <Hiroshi.DOYU@nokia.com> * Suman Anna <s-anna@ti.com> @@ -15,11 +15,11 @@ #include <linux/slab.h> #include <linux/kfifo.h> #include <linux/err.h> +#include <linux/io.h> #include <linux/module.h> -#include <linux/of_device.h> +#include <linux/of.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> -#include <linux/omap-mailbox.h> #include <linux/mailbox_controller.h> #include <linux/mailbox_client.h> @@ -51,6 +51,11 @@ #define MBOX_INTR_CFG_TYPE1 0 #define MBOX_INTR_CFG_TYPE2 1 +typedef enum { + IRQ_TX = 1, + IRQ_RX = 2, +} omap_mbox_irq_t; + struct omap_mbox_fifo { unsigned long msg; unsigned long fifo_stat; @@ -61,16 +66,9 @@ struct omap_mbox_fifo { u32 intr_bit; }; -struct omap_mbox_queue { - spinlock_t lock; - struct kfifo fifo; - struct work_struct work; - struct omap_mbox *mbox; - bool full; -}; - struct omap_mbox_match_data { u32 intr_type; + bool is_exclusive; }; struct omap_mbox_device { @@ -81,29 +79,12 @@ struct omap_mbox_device { u32 num_users; u32 num_fifos; u32 intr_type; - struct omap_mbox **mboxes; - struct mbox_controller controller; - struct list_head elem; -}; - -struct omap_mbox_fifo_info { - int tx_id; - int tx_usr; - int tx_irq; - - int rx_id; - int rx_usr; - int rx_irq; - - const char *name; - bool send_no_irq; + const struct omap_mbox_match_data *mbox_data; }; struct omap_mbox { const char *name; int irq; - struct omap_mbox_queue *rxq; - struct device *dev; struct omap_mbox_device *parent; struct omap_mbox_fifo tx_fifo; struct omap_mbox_fifo rx_fifo; @@ -112,22 +93,6 @@ struct omap_mbox { bool send_no_irq; }; -/* global variables for the mailbox devices */ -static DEFINE_MUTEX(omap_mbox_devices_lock); -static LIST_HEAD(omap_mbox_devices); - -static unsigned int mbox_kfifo_size = CONFIG_OMAP_MBOX_KFIFO_SIZE; -module_param(mbox_kfifo_size, uint, S_IRUGO); -MODULE_PARM_DESC(mbox_kfifo_size, "Size of omap's mailbox kfifo (bytes)"); - -static struct omap_mbox *mbox_chan_to_omap_mbox(struct mbox_chan *chan) -{ - if (!chan || !chan->con_priv) - return NULL; - - return (struct omap_mbox *)chan->con_priv; -} - static inline unsigned int mbox_read_reg(struct omap_mbox_device *mdev, size_t ofs) { @@ -141,14 +106,14 @@ void mbox_write_reg(struct omap_mbox_device *mdev, u32 val, size_t ofs) } /* Mailbox FIFO handle functions */ -static mbox_msg_t mbox_fifo_read(struct omap_mbox *mbox) +static u32 mbox_fifo_read(struct omap_mbox *mbox) { struct omap_mbox_fifo *fifo = &mbox->rx_fifo; - return (mbox_msg_t)mbox_read_reg(mbox->parent, fifo->msg); + return mbox_read_reg(mbox->parent, fifo->msg); } -static void mbox_fifo_write(struct omap_mbox *mbox, mbox_msg_t msg) +static void mbox_fifo_write(struct omap_mbox *mbox, u32 msg) { struct omap_mbox_fifo *fifo = &mbox->tx_fifo; @@ -197,7 +162,7 @@ static int is_mbox_irq(struct omap_mbox *mbox, omap_mbox_irq_t irq) return (int)(enable & status & bit); } -static void _omap_mbox_enable_irq(struct omap_mbox *mbox, omap_mbox_irq_t irq) +static void omap_mbox_enable_irq(struct omap_mbox *mbox, omap_mbox_irq_t irq) { u32 l; struct omap_mbox_fifo *fifo = (irq == IRQ_TX) ? @@ -210,7 +175,7 @@ static void _omap_mbox_enable_irq(struct omap_mbox *mbox, omap_mbox_irq_t irq) mbox_write_reg(mbox->parent, l, irqenable); } -static void _omap_mbox_disable_irq(struct omap_mbox *mbox, omap_mbox_irq_t irq) +static void omap_mbox_disable_irq(struct omap_mbox *mbox, omap_mbox_irq_t irq) { struct omap_mbox_fifo *fifo = (irq == IRQ_TX) ? &mbox->tx_fifo : &mbox->rx_fifo; @@ -227,85 +192,27 @@ static void _omap_mbox_disable_irq(struct omap_mbox *mbox, omap_mbox_irq_t irq) mbox_write_reg(mbox->parent, bit, irqdisable); } -void omap_mbox_enable_irq(struct mbox_chan *chan, omap_mbox_irq_t irq) -{ - struct omap_mbox *mbox = mbox_chan_to_omap_mbox(chan); - - if (WARN_ON(!mbox)) - return; - - _omap_mbox_enable_irq(mbox, irq); -} -EXPORT_SYMBOL(omap_mbox_enable_irq); - -void omap_mbox_disable_irq(struct mbox_chan *chan, omap_mbox_irq_t irq) -{ - struct omap_mbox *mbox = mbox_chan_to_omap_mbox(chan); - - if (WARN_ON(!mbox)) - return; - - _omap_mbox_disable_irq(mbox, irq); -} -EXPORT_SYMBOL(omap_mbox_disable_irq); - -/* - * Message receiver(workqueue) - */ -static void mbox_rx_work(struct work_struct *work) -{ - struct omap_mbox_queue *mq = - container_of(work, struct omap_mbox_queue, work); - mbox_msg_t msg; - int len; - - while (kfifo_len(&mq->fifo) >= sizeof(msg)) { - len = kfifo_out(&mq->fifo, (unsigned char *)&msg, sizeof(msg)); - WARN_ON(len != sizeof(msg)); - - mbox_chan_received_data(mq->mbox->chan, (void *)msg); - spin_lock_irq(&mq->lock); - if (mq->full) { - mq->full = false; - _omap_mbox_enable_irq(mq->mbox, IRQ_RX); - } - spin_unlock_irq(&mq->lock); - } -} - /* * Mailbox interrupt handler */ static void __mbox_tx_interrupt(struct omap_mbox *mbox) { - _omap_mbox_disable_irq(mbox, IRQ_TX); + omap_mbox_disable_irq(mbox, IRQ_TX); ack_mbox_irq(mbox, IRQ_TX); mbox_chan_txdone(mbox->chan, 0); } static void __mbox_rx_interrupt(struct omap_mbox *mbox) { - struct omap_mbox_queue *mq = mbox->rxq; - mbox_msg_t msg; - int len; + u32 msg; while (!mbox_fifo_empty(mbox)) { - if (unlikely(kfifo_avail(&mq->fifo) < sizeof(msg))) { - _omap_mbox_disable_irq(mbox, IRQ_RX); - mq->full = true; - goto nomem; - } - msg = mbox_fifo_read(mbox); - - len = kfifo_in(&mq->fifo, (unsigned char *)&msg, sizeof(msg)); - WARN_ON(len != sizeof(msg)); + mbox_chan_received_data(mbox->chan, (void *)(uintptr_t)msg); } - /* no more messages in the fifo. clear IRQ source. */ + /* clear IRQ source. */ ack_mbox_irq(mbox, IRQ_RX); -nomem: - schedule_work(&mbox->rxq->work); } static irqreturn_t mbox_interrupt(int irq, void *p) @@ -321,202 +228,35 @@ static irqreturn_t mbox_interrupt(int irq, void *p) return IRQ_HANDLED; } -static struct omap_mbox_queue *mbox_queue_alloc(struct omap_mbox *mbox, - void (*work)(struct work_struct *)) -{ - struct omap_mbox_queue *mq; - - if (!work) - return NULL; - - mq = kzalloc(sizeof(*mq), GFP_KERNEL); - if (!mq) - return NULL; - - spin_lock_init(&mq->lock); - - if (kfifo_alloc(&mq->fifo, mbox_kfifo_size, GFP_KERNEL)) - goto error; - - INIT_WORK(&mq->work, work); - return mq; - -error: - kfree(mq); - return NULL; -} - -static void mbox_queue_free(struct omap_mbox_queue *q) -{ - kfifo_free(&q->fifo); - kfree(q); -} - static int omap_mbox_startup(struct omap_mbox *mbox) { int ret = 0; - struct omap_mbox_queue *mq; - - mq = mbox_queue_alloc(mbox, mbox_rx_work); - if (!mq) - return -ENOMEM; - mbox->rxq = mq; - mq->mbox = mbox; - ret = request_irq(mbox->irq, mbox_interrupt, IRQF_SHARED, - mbox->name, mbox); + ret = request_threaded_irq(mbox->irq, NULL, mbox_interrupt, + IRQF_SHARED | IRQF_ONESHOT, mbox->name, + mbox); if (unlikely(ret)) { pr_err("failed to register mailbox interrupt:%d\n", ret); - goto fail_request_irq; + return ret; } if (mbox->send_no_irq) mbox->chan->txdone_method = TXDONE_BY_ACK; - _omap_mbox_enable_irq(mbox, IRQ_RX); + omap_mbox_enable_irq(mbox, IRQ_RX); return 0; - -fail_request_irq: - mbox_queue_free(mbox->rxq); - return ret; } static void omap_mbox_fini(struct omap_mbox *mbox) { - _omap_mbox_disable_irq(mbox, IRQ_RX); + omap_mbox_disable_irq(mbox, IRQ_RX); free_irq(mbox->irq, mbox); - flush_work(&mbox->rxq->work); - mbox_queue_free(mbox->rxq); -} - -static struct omap_mbox *omap_mbox_device_find(struct omap_mbox_device *mdev, - const char *mbox_name) -{ - struct omap_mbox *_mbox, *mbox = NULL; - struct omap_mbox **mboxes = mdev->mboxes; - int i; - - if (!mboxes) - return NULL; - - for (i = 0; (_mbox = mboxes[i]); i++) { - if (!strcmp(_mbox->name, mbox_name)) { - mbox = _mbox; - break; - } - } - return mbox; -} - -struct mbox_chan *omap_mbox_request_channel(struct mbox_client *cl, - const char *chan_name) -{ - struct device *dev = cl->dev; - struct omap_mbox *mbox = NULL; - struct omap_mbox_device *mdev; - struct mbox_chan *chan; - unsigned long flags; - int ret; - - if (!dev) - return ERR_PTR(-ENODEV); - - if (dev->of_node) { - pr_err("%s: please use mbox_request_channel(), this API is supported only for OMAP non-DT usage\n", - __func__); - return ERR_PTR(-ENODEV); - } - - mutex_lock(&omap_mbox_devices_lock); - list_for_each_entry(mdev, &omap_mbox_devices, elem) { - mbox = omap_mbox_device_find(mdev, chan_name); - if (mbox) - break; - } - mutex_unlock(&omap_mbox_devices_lock); - - if (!mbox || !mbox->chan) - return ERR_PTR(-ENOENT); - - chan = mbox->chan; - spin_lock_irqsave(&chan->lock, flags); - chan->msg_free = 0; - chan->msg_count = 0; - chan->active_req = NULL; - chan->cl = cl; - init_completion(&chan->tx_complete); - spin_unlock_irqrestore(&chan->lock, flags); - - ret = chan->mbox->ops->startup(chan); - if (ret) { - pr_err("Unable to startup the chan (%d)\n", ret); - mbox_free_channel(chan); - chan = ERR_PTR(ret); - } - - return chan; -} -EXPORT_SYMBOL(omap_mbox_request_channel); - -static struct class omap_mbox_class = { .name = "mbox", }; - -static int omap_mbox_register(struct omap_mbox_device *mdev) -{ - int ret; - int i; - struct omap_mbox **mboxes; - - if (!mdev || !mdev->mboxes) - return -EINVAL; - - mboxes = mdev->mboxes; - for (i = 0; mboxes[i]; i++) { - struct omap_mbox *mbox = mboxes[i]; - - mbox->dev = device_create(&omap_mbox_class, mdev->dev, - 0, mbox, "%s", mbox->name); - if (IS_ERR(mbox->dev)) { - ret = PTR_ERR(mbox->dev); - goto err_out; - } - } - - mutex_lock(&omap_mbox_devices_lock); - list_add(&mdev->elem, &omap_mbox_devices); - mutex_unlock(&omap_mbox_devices_lock); - - ret = devm_mbox_controller_register(mdev->dev, &mdev->controller); - -err_out: - if (ret) { - while (i--) - device_unregister(mboxes[i]->dev); - } - return ret; -} - -static int omap_mbox_unregister(struct omap_mbox_device *mdev) -{ - int i; - struct omap_mbox **mboxes; - - if (!mdev || !mdev->mboxes) - return -EINVAL; - - mutex_lock(&omap_mbox_devices_lock); - list_del(&mdev->elem); - mutex_unlock(&omap_mbox_devices_lock); - - mboxes = mdev->mboxes; - for (i = 0; mboxes[i]; i++) - device_unregister(mboxes[i]->dev); - return 0; } static int omap_mbox_chan_startup(struct mbox_chan *chan) { - struct omap_mbox *mbox = mbox_chan_to_omap_mbox(chan); + struct omap_mbox *mbox = chan->con_priv; struct omap_mbox_device *mdev = mbox->parent; int ret = 0; @@ -531,7 +271,7 @@ static int omap_mbox_chan_startup(struct mbox_chan *chan) static void omap_mbox_chan_shutdown(struct mbox_chan *chan) { - struct omap_mbox *mbox = mbox_chan_to_omap_mbox(chan); + struct omap_mbox *mbox = chan->con_priv; struct omap_mbox_device *mdev = mbox->parent; mutex_lock(&mdev->cfg_lock); @@ -540,50 +280,50 @@ static void omap_mbox_chan_shutdown(struct mbox_chan *chan) mutex_unlock(&mdev->cfg_lock); } -static int omap_mbox_chan_send_noirq(struct omap_mbox *mbox, void *data) +static int omap_mbox_chan_send_noirq(struct omap_mbox *mbox, u32 msg) { - int ret = -EBUSY; + if (mbox_fifo_full(mbox)) + return -EBUSY; - if (!mbox_fifo_full(mbox)) { - _omap_mbox_enable_irq(mbox, IRQ_RX); - mbox_fifo_write(mbox, (mbox_msg_t)data); - ret = 0; - _omap_mbox_disable_irq(mbox, IRQ_RX); + omap_mbox_enable_irq(mbox, IRQ_RX); + mbox_fifo_write(mbox, msg); + omap_mbox_disable_irq(mbox, IRQ_RX); - /* we must read and ack the interrupt directly from here */ - mbox_fifo_read(mbox); - ack_mbox_irq(mbox, IRQ_RX); - } + /* we must read and ack the interrupt directly from here */ + mbox_fifo_read(mbox); + ack_mbox_irq(mbox, IRQ_RX); - return ret; + return 0; } -static int omap_mbox_chan_send(struct omap_mbox *mbox, void *data) +static int omap_mbox_chan_send(struct omap_mbox *mbox, u32 msg) { - int ret = -EBUSY; - - if (!mbox_fifo_full(mbox)) { - mbox_fifo_write(mbox, (mbox_msg_t)data); - ret = 0; + if (mbox_fifo_full(mbox)) { + /* always enable the interrupt */ + omap_mbox_enable_irq(mbox, IRQ_TX); + return -EBUSY; } + mbox_fifo_write(mbox, msg); + /* always enable the interrupt */ - _omap_mbox_enable_irq(mbox, IRQ_TX); - return ret; + omap_mbox_enable_irq(mbox, IRQ_TX); + return 0; } static int omap_mbox_chan_send_data(struct mbox_chan *chan, void *data) { - struct omap_mbox *mbox = mbox_chan_to_omap_mbox(chan); + struct omap_mbox *mbox = chan->con_priv; int ret; + u32 msg = (u32)(uintptr_t)(data); if (!mbox) return -EINVAL; if (mbox->send_no_irq) - ret = omap_mbox_chan_send_noirq(mbox, data); + ret = omap_mbox_chan_send_noirq(mbox, msg); else - ret = omap_mbox_chan_send(mbox, data); + ret = omap_mbox_chan_send(mbox, msg); return ret; } @@ -603,11 +343,13 @@ static int omap_mbox_suspend(struct device *dev) if (pm_runtime_status_suspended(dev)) return 0; - for (fifo = 0; fifo < mdev->num_fifos; fifo++) { - if (mbox_read_reg(mdev, MAILBOX_MSGSTATUS(fifo))) { - dev_err(mdev->dev, "fifo %d has unexpected unread messages\n", - fifo); - return -EBUSY; + if (mdev->mbox_data->is_exclusive) { + for (fifo = 0; fifo < mdev->num_fifos; fifo++) { + if (mbox_read_reg(mdev, MAILBOX_MSGSTATUS(fifo))) { + dev_err(mdev->dev, "fifo %d has unexpected unread messages\n", + fifo); + return -EBUSY; + } } } @@ -640,8 +382,9 @@ static const struct dev_pm_ops omap_mbox_pm_ops = { SET_SYSTEM_SLEEP_PM_OPS(omap_mbox_suspend, omap_mbox_resume) }; -static const struct omap_mbox_match_data omap2_data = { MBOX_INTR_CFG_TYPE1 }; -static const struct omap_mbox_match_data omap4_data = { MBOX_INTR_CFG_TYPE2 }; +static const struct omap_mbox_match_data omap2_data = { MBOX_INTR_CFG_TYPE1, true }; +static const struct omap_mbox_match_data omap4_data = { MBOX_INTR_CFG_TYPE2, true }; +static const struct omap_mbox_match_data am654_data = { MBOX_INTR_CFG_TYPE2, false }; static const struct of_device_id omap_mailbox_of_match[] = { { @@ -657,6 +400,14 @@ static const struct of_device_id omap_mailbox_of_match[] = { .data = &omap4_data, }, { + .compatible = "ti,am654-mailbox", + .data = &am654_data, + }, + { + .compatible = "ti,am64-mailbox", + .data = &am654_data, + }, + { /* end */ }, }; @@ -669,8 +420,9 @@ static struct mbox_chan *omap_mbox_of_xlate(struct mbox_controller *controller, struct device_node *node; struct omap_mbox_device *mdev; struct omap_mbox *mbox; + int i; - mdev = container_of(controller, struct omap_mbox_device, controller); + mdev = dev_get_drvdata(controller->dev); if (WARN_ON(!mdev)) return ERR_PTR(-EINVAL); @@ -681,23 +433,28 @@ static struct mbox_chan *omap_mbox_of_xlate(struct mbox_controller *controller, return ERR_PTR(-ENODEV); } - mbox = omap_mbox_device_find(mdev, node->name); + for (i = 0; i < controller->num_chans; i++) { + mbox = controller->chans[i].con_priv; + if (!strcmp(mbox->name, node->name)) { + of_node_put(node); + return &controller->chans[i]; + } + } + of_node_put(node); - return mbox ? mbox->chan : ERR_PTR(-ENOENT); + return ERR_PTR(-ENOENT); } static int omap_mbox_probe(struct platform_device *pdev) { - struct resource *mem; int ret; struct mbox_chan *chnls; - struct omap_mbox **list, *mbox, *mboxblk; - struct omap_mbox_fifo_info *finfo, *finfoblk; + struct omap_mbox *mbox; struct omap_mbox_device *mdev; struct omap_mbox_fifo *fifo; struct device_node *node = pdev->dev.of_node; struct device_node *child; - const struct omap_mbox_match_data *match_data; + struct mbox_controller *controller; u32 intr_type, info_count; u32 num_users, num_fifos; u32 tmp[3]; @@ -709,11 +466,6 @@ static int omap_mbox_probe(struct platform_device *pdev) return -ENODEV; } - match_data = of_device_get_match_data(&pdev->dev); - if (!match_data) - return -ENODEV; - intr_type = match_data->intr_type; - if (of_property_read_u32(node, "ti,mbox-num-users", &num_users)) return -ENODEV; @@ -726,47 +478,17 @@ static int omap_mbox_probe(struct platform_device *pdev) return -ENODEV; } - finfoblk = devm_kcalloc(&pdev->dev, info_count, sizeof(*finfoblk), - GFP_KERNEL); - if (!finfoblk) - return -ENOMEM; - - finfo = finfoblk; - child = NULL; - for (i = 0; i < info_count; i++, finfo++) { - child = of_get_next_available_child(node, child); - ret = of_property_read_u32_array(child, "ti,mbox-tx", tmp, - ARRAY_SIZE(tmp)); - if (ret) - return ret; - finfo->tx_id = tmp[0]; - finfo->tx_irq = tmp[1]; - finfo->tx_usr = tmp[2]; - - ret = of_property_read_u32_array(child, "ti,mbox-rx", tmp, - ARRAY_SIZE(tmp)); - if (ret) - return ret; - finfo->rx_id = tmp[0]; - finfo->rx_irq = tmp[1]; - finfo->rx_usr = tmp[2]; - - finfo->name = child->name; - - if (of_find_property(child, "ti,mbox-send-noirq", NULL)) - finfo->send_no_irq = true; - - if (finfo->tx_id >= num_fifos || finfo->rx_id >= num_fifos || - finfo->tx_usr >= num_users || finfo->rx_usr >= num_users) - return -EINVAL; - } - mdev = devm_kzalloc(&pdev->dev, sizeof(*mdev), GFP_KERNEL); if (!mdev) return -ENOMEM; - mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); - mdev->mbox_base = devm_ioremap_resource(&pdev->dev, mem); + mdev->mbox_data = device_get_match_data(&pdev->dev); + if (!mdev->mbox_data) + return -ENODEV; + + intr_type = mdev->mbox_data->intr_type; + + mdev->mbox_base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(mdev->mbox_base)) return PTR_ERR(mdev->mbox_base); @@ -775,52 +497,67 @@ static int omap_mbox_probe(struct platform_device *pdev) if (!mdev->irq_ctx) return -ENOMEM; - /* allocate one extra for marking end of list */ - list = devm_kcalloc(&pdev->dev, info_count + 1, sizeof(*list), - GFP_KERNEL); - if (!list) - return -ENOMEM; - chnls = devm_kcalloc(&pdev->dev, info_count + 1, sizeof(*chnls), GFP_KERNEL); if (!chnls) return -ENOMEM; - mboxblk = devm_kcalloc(&pdev->dev, info_count, sizeof(*mbox), - GFP_KERNEL); - if (!mboxblk) - return -ENOMEM; + child = NULL; + for (i = 0; i < info_count; i++) { + int tx_id, tx_irq, tx_usr; + int rx_id, rx_usr; + + mbox = devm_kzalloc(&pdev->dev, sizeof(*mbox), GFP_KERNEL); + if (!mbox) + return -ENOMEM; + + child = of_get_next_available_child(node, child); + ret = of_property_read_u32_array(child, "ti,mbox-tx", tmp, + ARRAY_SIZE(tmp)); + if (ret) + return ret; + tx_id = tmp[0]; + tx_irq = tmp[1]; + tx_usr = tmp[2]; + + ret = of_property_read_u32_array(child, "ti,mbox-rx", tmp, + ARRAY_SIZE(tmp)); + if (ret) + return ret; + rx_id = tmp[0]; + /* rx_irq = tmp[1]; */ + rx_usr = tmp[2]; + + if (tx_id >= num_fifos || rx_id >= num_fifos || + tx_usr >= num_users || rx_usr >= num_users) + return -EINVAL; - mbox = mboxblk; - finfo = finfoblk; - for (i = 0; i < info_count; i++, finfo++) { fifo = &mbox->tx_fifo; - fifo->msg = MAILBOX_MESSAGE(finfo->tx_id); - fifo->fifo_stat = MAILBOX_FIFOSTATUS(finfo->tx_id); - fifo->intr_bit = MAILBOX_IRQ_NOTFULL(finfo->tx_id); - fifo->irqenable = MAILBOX_IRQENABLE(intr_type, finfo->tx_usr); - fifo->irqstatus = MAILBOX_IRQSTATUS(intr_type, finfo->tx_usr); - fifo->irqdisable = MAILBOX_IRQDISABLE(intr_type, finfo->tx_usr); + fifo->msg = MAILBOX_MESSAGE(tx_id); + fifo->fifo_stat = MAILBOX_FIFOSTATUS(tx_id); + fifo->intr_bit = MAILBOX_IRQ_NOTFULL(tx_id); + fifo->irqenable = MAILBOX_IRQENABLE(intr_type, tx_usr); + fifo->irqstatus = MAILBOX_IRQSTATUS(intr_type, tx_usr); + fifo->irqdisable = MAILBOX_IRQDISABLE(intr_type, tx_usr); fifo = &mbox->rx_fifo; - fifo->msg = MAILBOX_MESSAGE(finfo->rx_id); - fifo->msg_stat = MAILBOX_MSGSTATUS(finfo->rx_id); - fifo->intr_bit = MAILBOX_IRQ_NEWMSG(finfo->rx_id); - fifo->irqenable = MAILBOX_IRQENABLE(intr_type, finfo->rx_usr); - fifo->irqstatus = MAILBOX_IRQSTATUS(intr_type, finfo->rx_usr); - fifo->irqdisable = MAILBOX_IRQDISABLE(intr_type, finfo->rx_usr); - - mbox->send_no_irq = finfo->send_no_irq; + fifo->msg = MAILBOX_MESSAGE(rx_id); + fifo->msg_stat = MAILBOX_MSGSTATUS(rx_id); + fifo->intr_bit = MAILBOX_IRQ_NEWMSG(rx_id); + fifo->irqenable = MAILBOX_IRQENABLE(intr_type, rx_usr); + fifo->irqstatus = MAILBOX_IRQSTATUS(intr_type, rx_usr); + fifo->irqdisable = MAILBOX_IRQDISABLE(intr_type, rx_usr); + + mbox->send_no_irq = of_property_read_bool(child, "ti,mbox-send-noirq"); mbox->intr_type = intr_type; mbox->parent = mdev; - mbox->name = finfo->name; - mbox->irq = platform_get_irq(pdev, finfo->tx_irq); + mbox->name = child->name; + mbox->irq = platform_get_irq(pdev, tx_irq); if (mbox->irq < 0) return mbox->irq; mbox->chan = &chnls[i]; chnls[i].con_priv = mbox; - list[i] = mbox++; } mutex_init(&mdev->cfg_lock); @@ -828,27 +565,30 @@ static int omap_mbox_probe(struct platform_device *pdev) mdev->num_users = num_users; mdev->num_fifos = num_fifos; mdev->intr_type = intr_type; - mdev->mboxes = list; - - /* OMAP does not have a Tx-Done IRQ, but rather a Tx-Ready IRQ */ - mdev->controller.txdone_irq = true; - mdev->controller.dev = mdev->dev; - mdev->controller.ops = &omap_mbox_chan_ops; - mdev->controller.chans = chnls; - mdev->controller.num_chans = info_count; - mdev->controller.of_xlate = omap_mbox_of_xlate; - ret = omap_mbox_register(mdev); + + controller = devm_kzalloc(&pdev->dev, sizeof(*controller), GFP_KERNEL); + if (!controller) + return -ENOMEM; + /* + * OMAP/K3 Mailbox IP does not have a Tx-Done IRQ, but rather a Tx-Ready + * IRQ and is needed to run the Tx state machine + */ + controller->txdone_irq = true; + controller->dev = mdev->dev; + controller->ops = &omap_mbox_chan_ops; + controller->chans = chnls; + controller->num_chans = info_count; + controller->of_xlate = omap_mbox_of_xlate; + ret = devm_mbox_controller_register(mdev->dev, controller); if (ret) return ret; platform_set_drvdata(pdev, mdev); - pm_runtime_enable(mdev->dev); + devm_pm_runtime_enable(mdev->dev); - ret = pm_runtime_get_sync(mdev->dev); - if (ret < 0) { - pm_runtime_put_noidle(mdev->dev); - goto unregister; - } + ret = pm_runtime_resume_and_get(mdev->dev); + if (ret < 0) + return ret; /* * just print the raw revision register, the format is not @@ -858,65 +598,21 @@ static int omap_mbox_probe(struct platform_device *pdev) dev_info(mdev->dev, "omap mailbox rev 0x%x\n", l); ret = pm_runtime_put_sync(mdev->dev); - if (ret < 0) - goto unregister; - - devm_kfree(&pdev->dev, finfoblk); - return 0; - -unregister: - pm_runtime_disable(mdev->dev); - omap_mbox_unregister(mdev); - return ret; -} - -static int omap_mbox_remove(struct platform_device *pdev) -{ - struct omap_mbox_device *mdev = platform_get_drvdata(pdev); - - pm_runtime_disable(mdev->dev); - omap_mbox_unregister(mdev); + if (ret < 0 && ret != -ENOSYS) + return ret; return 0; } static struct platform_driver omap_mbox_driver = { .probe = omap_mbox_probe, - .remove = omap_mbox_remove, .driver = { .name = "omap-mailbox", .pm = &omap_mbox_pm_ops, - .of_match_table = of_match_ptr(omap_mailbox_of_match), + .of_match_table = omap_mailbox_of_match, }, }; - -static int __init omap_mbox_init(void) -{ - int err; - - err = class_register(&omap_mbox_class); - if (err) - return err; - - /* kfifo size sanity check: alignment and minimal size */ - mbox_kfifo_size = ALIGN(mbox_kfifo_size, sizeof(mbox_msg_t)); - mbox_kfifo_size = max_t(unsigned int, mbox_kfifo_size, - sizeof(mbox_msg_t)); - - err = platform_driver_register(&omap_mbox_driver); - if (err) - class_unregister(&omap_mbox_class); - - return err; -} -subsys_initcall(omap_mbox_init); - -static void __exit omap_mbox_exit(void) -{ - platform_driver_unregister(&omap_mbox_driver); - class_unregister(&omap_mbox_class); -} -module_exit(omap_mbox_exit); +module_platform_driver(omap_mbox_driver); MODULE_LICENSE("GPL v2"); MODULE_DESCRIPTION("omap mailbox: interrupt driven messaging"); diff --git a/drivers/mailbox/pcc.c b/drivers/mailbox/pcc.c index 256f18b67e8a..ff292b9e0be9 100644 --- a/drivers/mailbox/pcc.c +++ b/drivers/mailbox/pcc.c @@ -1,17 +1,8 @@ +// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright (C) 2014 Linaro Ltd. * Author: Ashwin Chaugule <ashwin.chaugule@linaro.org> * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * * PCC (Platform Communication Channel) is defined in the ACPI 5.0+ * specification. It is a mailbox like mechanism to allow clients * such as CPPC (Collaborative Processor Performance Control), RAS @@ -41,7 +32,7 @@ * * Client writes WRITE cmd in communication region cmd address. * * Client issues mbox_send_message() which rings the PCC doorbell * for its PCC channel. - * * If command completes, then writes have succeded and it can release + * * If command completes, then writes have succeeded and it can release * the channel lock. * * There is a Nominal latency defined for each channel which indicates @@ -61,6 +52,7 @@ #include <linux/init.h> #include <linux/interrupt.h> #include <linux/list.h> +#include <linux/log2.h> #include <linux/platform_device.h> #include <linux/mailbox_controller.h> #include <linux/mailbox_client.h> @@ -71,31 +63,59 @@ #define MBOX_IRQ_NAME "pcc-mbox" -static struct mbox_chan *pcc_mbox_channels; - -/* Array of cached virtual address for doorbell registers */ -static void __iomem **pcc_doorbell_vaddr; -/* Array of cached virtual address for doorbell ack registers */ -static void __iomem **pcc_doorbell_ack_vaddr; -/* Array of doorbell interrupts */ -static int *pcc_doorbell_irq; +/** + * struct pcc_chan_reg - PCC register bundle + * + * @vaddr: cached virtual address for this register + * @gas: pointer to the generic address structure for this register + * @preserve_mask: bitmask to preserve when writing to this register + * @set_mask: bitmask to set when writing to this register + * @status_mask: bitmask to determine and/or update the status for this register + */ +struct pcc_chan_reg { + void __iomem *vaddr; + struct acpi_generic_address *gas; + u64 preserve_mask; + u64 set_mask; + u64 status_mask; +}; -static struct mbox_controller pcc_mbox_ctrl = {}; /** - * get_pcc_channel - Given a PCC subspace idx, get - * the respective mbox_channel. - * @id: PCC subspace index. + * struct pcc_chan_info - PCC channel specific information * - * Return: ERR_PTR(errno) if error, else pointer - * to mbox channel. + * @chan: PCC channel information with Shared Memory Region info + * @db: PCC register bundle for the doorbell register + * @plat_irq_ack: PCC register bundle for the platform interrupt acknowledge + * register + * @cmd_complete: PCC register bundle for the command complete check register + * @cmd_update: PCC register bundle for the command complete update register + * @error: PCC register bundle for the error status register + * @plat_irq: platform interrupt + * @type: PCC subspace type + * @plat_irq_flags: platform interrupt flags + * @chan_in_use: this flag is used just to check if the interrupt needs + * handling when it is shared. Since only one transfer can occur + * at a time and mailbox takes care of locking, this flag can be + * accessed without a lock. Note: the type only support the + * communication from OSPM to Platform, like type3, use it, and + * other types completely ignore it. */ -static struct mbox_chan *get_pcc_channel(int id) -{ - if (id < 0 || id >= pcc_mbox_ctrl.num_chans) - return ERR_PTR(-ENOENT); +struct pcc_chan_info { + struct pcc_mbox_chan chan; + struct pcc_chan_reg db; + struct pcc_chan_reg plat_irq_ack; + struct pcc_chan_reg cmd_complete; + struct pcc_chan_reg cmd_update; + struct pcc_chan_reg error; + int plat_irq; + u8 type; + unsigned int plat_irq_flags; + bool chan_in_use; +}; - return &pcc_mbox_channels[id]; -} +#define to_pcc_chan_info(c) container_of(c, struct pcc_chan_info, chan) +static struct pcc_chan_info *chan_info; +static int pcc_chan_count; /* * PCC can be used with perf critical drivers such as CPPC @@ -105,10 +125,8 @@ static struct mbox_chan *get_pcc_channel(int id) * The below read_register and write_registers are used to read and * write from perf critical registers such as PCC doorbell register */ -static int read_register(void __iomem *vaddr, u64 *val, unsigned int bit_width) +static void read_register(void __iomem *vaddr, u64 *val, unsigned int bit_width) { - int ret_val = 0; - switch (bit_width) { case 8: *val = readb(vaddr); @@ -122,19 +140,11 @@ static int read_register(void __iomem *vaddr, u64 *val, unsigned int bit_width) case 64: *val = readq(vaddr); break; - default: - pr_debug("Error: Cannot read register of %u bit width", - bit_width); - ret_val = -EFAULT; - break; } - return ret_val; } -static int write_register(void __iomem *vaddr, u64 val, unsigned int bit_width) +static void write_register(void __iomem *vaddr, u64 val, unsigned int bit_width) { - int ret_val = 0; - switch (bit_width) { case 8: writeb(val, vaddr); @@ -148,13 +158,54 @@ static int write_register(void __iomem *vaddr, u64 val, unsigned int bit_width) case 64: writeq(val, vaddr); break; - default: - pr_debug("Error: Cannot write register of %u bit width", - bit_width); - ret_val = -EFAULT; - break; } - return ret_val; +} + +static int pcc_chan_reg_read(struct pcc_chan_reg *reg, u64 *val) +{ + int ret = 0; + + if (!reg->gas) { + *val = 0; + return 0; + } + + if (reg->vaddr) + read_register(reg->vaddr, val, reg->gas->bit_width); + else + ret = acpi_read(val, reg->gas); + + return ret; +} + +static int pcc_chan_reg_write(struct pcc_chan_reg *reg, u64 val) +{ + int ret = 0; + + if (!reg->gas) + return 0; + + if (reg->vaddr) + write_register(reg->vaddr, val, reg->gas->bit_width); + else + ret = acpi_write(val, reg->gas); + + return ret; +} + +static int pcc_chan_reg_read_modify_write(struct pcc_chan_reg *reg) +{ + int ret = 0; + u64 val; + + ret = pcc_chan_reg_read(reg, &val); + if (ret) + return ret; + + val &= reg->preserve_mask; + val |= reg->set_mask; + + return pcc_chan_reg_write(reg, val); } /** @@ -181,45 +232,145 @@ static int pcc_map_interrupt(u32 interrupt, u32 flags) return acpi_register_gsi(NULL, interrupt, trigger, polarity); } +static bool pcc_chan_plat_irq_can_be_shared(struct pcc_chan_info *pchan) +{ + return (pchan->plat_irq_flags & ACPI_PCCT_INTERRUPT_MODE) == + ACPI_LEVEL_SENSITIVE; +} + +static bool pcc_mbox_cmd_complete_check(struct pcc_chan_info *pchan) +{ + u64 val; + int ret; + + if (!pchan->cmd_complete.gas) + return true; + + ret = pcc_chan_reg_read(&pchan->cmd_complete, &val); + if (ret) + return false; + + /* + * Judge if the channel respond the interrupt based on the value of + * command complete. + */ + val &= pchan->cmd_complete.status_mask; + + /* + * If this is PCC slave subspace channel, and the command complete + * bit 0 indicates that Platform is sending a notification and OSPM + * needs to respond this interrupt to process this command. + */ + if (pchan->type == ACPI_PCCT_TYPE_EXT_PCC_SLAVE_SUBSPACE) + return !val; + + return !!val; +} + +static int pcc_mbox_error_check_and_clear(struct pcc_chan_info *pchan) +{ + u64 val; + int ret; + + ret = pcc_chan_reg_read(&pchan->error, &val); + if (ret) + return ret; + + if (val & pchan->error.status_mask) { + val &= pchan->error.preserve_mask; + pcc_chan_reg_write(&pchan->error, val); + return -EIO; + } + + return 0; +} + +static void pcc_chan_acknowledge(struct pcc_chan_info *pchan) +{ + struct acpi_pcct_ext_pcc_shared_memory __iomem *pcc_hdr; + + if (pchan->type != ACPI_PCCT_TYPE_EXT_PCC_SLAVE_SUBSPACE) + return; + + pcc_chan_reg_read_modify_write(&pchan->cmd_update); + + pcc_hdr = pchan->chan.shmem; + + /* + * The PCC slave subspace channel needs to set the command + * complete bit after processing message. If the PCC_ACK_FLAG + * is set, it should also ring the doorbell. + */ + if (ioread32(&pcc_hdr->flags) & PCC_CMD_COMPLETION_NOTIFY) + pcc_chan_reg_read_modify_write(&pchan->db); +} + +static void *write_response(struct pcc_chan_info *pchan) +{ + struct pcc_header pcc_header; + void *buffer; + int data_len; + + memcpy_fromio(&pcc_header, pchan->chan.shmem, + sizeof(pcc_header)); + data_len = pcc_header.length - sizeof(u32) + sizeof(struct pcc_header); + + buffer = pchan->chan.rx_alloc(pchan->chan.mchan->cl, data_len); + if (buffer != NULL) + memcpy_fromio(buffer, pchan->chan.shmem, data_len); + return buffer; +} + /** * pcc_mbox_irq - PCC mailbox interrupt handler + * @irq: interrupt number + * @p: data/cookie passed from the caller to identify the channel + * + * Returns: IRQ_HANDLED if interrupt is handled or IRQ_NONE if not */ static irqreturn_t pcc_mbox_irq(int irq, void *p) { - struct acpi_generic_address *doorbell_ack; - struct acpi_pcct_hw_reduced *pcct_ss; + struct pcc_chan_info *pchan; struct mbox_chan *chan = p; - u64 doorbell_ack_preserve; - u64 doorbell_ack_write; - u64 doorbell_ack_val; - int ret; + struct pcc_header *pcc_header = chan->active_req; + void *handle = NULL; - pcct_ss = chan->con_priv; + pchan = chan->con_priv; - mbox_chan_received_data(chan, NULL); + if (pcc_chan_reg_read_modify_write(&pchan->plat_irq_ack)) + return IRQ_NONE; - if (pcct_ss->header.type == ACPI_PCCT_TYPE_HW_REDUCED_SUBSPACE_TYPE2) { - struct acpi_pcct_hw_reduced_type2 *pcct2_ss = chan->con_priv; - u32 id = chan - pcc_mbox_channels; + if (pchan->type == ACPI_PCCT_TYPE_EXT_PCC_MASTER_SUBSPACE && + !pchan->chan_in_use) + return IRQ_NONE; - doorbell_ack = &pcct2_ss->platform_ack_register; - doorbell_ack_preserve = pcct2_ss->ack_preserve_mask; - doorbell_ack_write = pcct2_ss->ack_write_mask; + if (!pcc_mbox_cmd_complete_check(pchan)) + return IRQ_NONE; - ret = read_register(pcc_doorbell_ack_vaddr[id], - &doorbell_ack_val, - doorbell_ack->bit_width); - if (ret) - return IRQ_NONE; + if (pcc_mbox_error_check_and_clear(pchan)) + return IRQ_NONE; - ret = write_register(pcc_doorbell_ack_vaddr[id], - (doorbell_ack_val & doorbell_ack_preserve) - | doorbell_ack_write, - doorbell_ack->bit_width); - if (ret) - return IRQ_NONE; + /* + * Clear this flag after updating interrupt ack register and just + * before mbox_chan_received_data() which might call pcc_send_data() + * where the flag is set again to start new transfer. This is + * required to avoid any possible race in updatation of this flag. + */ + pchan->chan_in_use = false; + + if (pchan->chan.rx_alloc) + handle = write_response(pchan); + + if (chan->active_req) { + pcc_header = chan->active_req; + if (pcc_header->flags & PCC_CMD_COMPLETION_NOTIFY) + mbox_chan_txdone(chan, 0); } + mbox_chan_received_data(chan, handle); + + pcc_chan_acknowledge(pchan); + return IRQ_HANDLED; } @@ -233,93 +384,113 @@ static irqreturn_t pcc_mbox_irq(int irq, void *p) * ACPI package. This is used to lookup the array of PCC * subspaces as parsed by the PCC Mailbox controller. * - * Return: Pointer to the Mailbox Channel if successful or - * ERR_PTR. + * Return: Pointer to the PCC Mailbox Channel if successful or ERR_PTR. */ -struct mbox_chan *pcc_mbox_request_channel(struct mbox_client *cl, - int subspace_id) +struct pcc_mbox_chan * +pcc_mbox_request_channel(struct mbox_client *cl, int subspace_id) { - struct device *dev = pcc_mbox_ctrl.dev; + struct pcc_mbox_chan *pcc_mchan; + struct pcc_chan_info *pchan; struct mbox_chan *chan; - unsigned long flags; + int rc; - /* - * Each PCC Subspace is a Mailbox Channel. - * The PCC Clients get their PCC Subspace ID - * from their own tables and pass it here. - * This returns a pointer to the PCC subspace - * for the Client to operate on. - */ - chan = get_pcc_channel(subspace_id); + if (subspace_id < 0 || subspace_id >= pcc_chan_count) + return ERR_PTR(-ENOENT); + pchan = chan_info + subspace_id; + chan = pchan->chan.mchan; if (IS_ERR(chan) || chan->cl) { - dev_err(dev, "Channel not found for idx: %d\n", subspace_id); + pr_err("Channel not found for idx: %d\n", subspace_id); return ERR_PTR(-EBUSY); } - spin_lock_irqsave(&chan->lock, flags); - chan->msg_free = 0; - chan->msg_count = 0; - chan->active_req = NULL; - chan->cl = cl; - init_completion(&chan->tx_complete); + rc = mbox_bind_client(chan, cl); + if (rc) + return ERR_PTR(rc); - if (chan->txdone_method == TXDONE_BY_POLL && cl->knows_txdone) - chan->txdone_method = TXDONE_BY_ACK; + pcc_mchan = &pchan->chan; + pcc_mchan->shmem = acpi_os_ioremap(pcc_mchan->shmem_base_addr, + pcc_mchan->shmem_size); + if (!pcc_mchan->shmem) + goto err; - spin_unlock_irqrestore(&chan->lock, flags); + pcc_mchan->manage_writes = false; - if (pcc_doorbell_irq[subspace_id] > 0) { - int rc; + /* This indicates that the channel is ready to accept messages. + * This needs to happen after the channel has registered + * its callback. There is no access point to do that in + * the mailbox API. That implies that the mailbox client must + * have set the allocate callback function prior to + * sending any messages. + */ + if (pchan->type == ACPI_PCCT_TYPE_EXT_PCC_SLAVE_SUBSPACE) + pcc_chan_reg_read_modify_write(&pchan->cmd_update); - rc = devm_request_irq(dev, pcc_doorbell_irq[subspace_id], - pcc_mbox_irq, 0, MBOX_IRQ_NAME, chan); - if (unlikely(rc)) { - dev_err(dev, "failed to register PCC interrupt %d\n", - pcc_doorbell_irq[subspace_id]); - pcc_mbox_free_channel(chan); - chan = ERR_PTR(rc); - } - } + return pcc_mchan; - return chan; +err: + mbox_free_channel(chan); + return ERR_PTR(-ENXIO); } EXPORT_SYMBOL_GPL(pcc_mbox_request_channel); /** * pcc_mbox_free_channel - Clients call this to free their Channel. * - * @chan: Pointer to the mailbox channel as returned by - * pcc_mbox_request_channel() + * @pchan: Pointer to the PCC mailbox channel as returned by + * pcc_mbox_request_channel() */ -void pcc_mbox_free_channel(struct mbox_chan *chan) +void pcc_mbox_free_channel(struct pcc_mbox_chan *pchan) { - u32 id = chan - pcc_mbox_channels; - unsigned long flags; + struct mbox_chan *chan = pchan->mchan; + struct pcc_chan_info *pchan_info; + struct pcc_mbox_chan *pcc_mbox_chan; if (!chan || !chan->cl) return; - - if (id >= pcc_mbox_ctrl.num_chans) { - pr_debug("pcc_mbox_free_channel: Invalid mbox_chan passed\n"); - return; + pchan_info = chan->con_priv; + pcc_mbox_chan = &pchan_info->chan; + if (pcc_mbox_chan->shmem) { + iounmap(pcc_mbox_chan->shmem); + pcc_mbox_chan->shmem = NULL; } - if (pcc_doorbell_irq[id] > 0) - devm_free_irq(chan->mbox->dev, pcc_doorbell_irq[id], chan); + mbox_free_channel(chan); +} +EXPORT_SYMBOL_GPL(pcc_mbox_free_channel); - spin_lock_irqsave(&chan->lock, flags); - chan->cl = NULL; - chan->active_req = NULL; - if (chan->txdone_method == TXDONE_BY_ACK) - chan->txdone_method = TXDONE_BY_POLL; +static int pcc_write_to_buffer(struct mbox_chan *chan, void *data) +{ + struct pcc_chan_info *pchan = chan->con_priv; + struct pcc_mbox_chan *pcc_mbox_chan = &pchan->chan; + struct pcc_header *pcc_header = data; - spin_unlock_irqrestore(&chan->lock, flags); + if (!pchan->chan.manage_writes) + return 0; + + /* The PCC header length includes the command field + * but not the other values from the header. + */ + int len = pcc_header->length - sizeof(u32) + sizeof(struct pcc_header); + u64 val; + + pcc_chan_reg_read(&pchan->cmd_complete, &val); + if (!val) { + pr_info("%s pchan->cmd_complete not set", __func__); + return -1; + } + memcpy_toio(pcc_mbox_chan->shmem, data, len); + return 0; } -EXPORT_SYMBOL_GPL(pcc_mbox_free_channel); + /** - * pcc_send_data - Called from Mailbox Controller code. Used + * pcc_send_data - Called from Mailbox Controller code. If + * pchan->chan.rx_alloc is set, then the command complete + * flag is checked and the data is written to the shared + * buffer io memory. + * + * If pchan->chan.rx_alloc is not set, then it is used * here only to ring the channel doorbell. The PCC client * specific read/write is done in the client driver in * order to maintain atomicity over PCC channel once @@ -332,48 +503,90 @@ EXPORT_SYMBOL_GPL(pcc_mbox_free_channel); */ static int pcc_send_data(struct mbox_chan *chan, void *data) { - struct acpi_pcct_hw_reduced *pcct_ss = chan->con_priv; - struct acpi_generic_address *doorbell; - u64 doorbell_preserve; - u64 doorbell_val; - u64 doorbell_write; - u32 id = chan - pcc_mbox_channels; - int ret = 0; + int ret; + struct pcc_chan_info *pchan = chan->con_priv; - if (id >= pcc_mbox_ctrl.num_chans) { - pr_debug("pcc_send_data: Invalid mbox_chan passed\n"); - return -ENOENT; - } + ret = pcc_write_to_buffer(chan, data); + if (ret) + return ret; - doorbell = &pcct_ss->doorbell_register; - doorbell_preserve = pcct_ss->preserve_mask; - doorbell_write = pcct_ss->write_mask; + ret = pcc_chan_reg_read_modify_write(&pchan->cmd_update); + if (ret) + return ret; + + ret = pcc_chan_reg_read_modify_write(&pchan->db); + + if (!ret && pchan->plat_irq > 0) + pchan->chan_in_use = true; - /* Sync notification from OS to Platform. */ - if (pcc_doorbell_vaddr[id]) { - ret = read_register(pcc_doorbell_vaddr[id], &doorbell_val, - doorbell->bit_width); - if (ret) - return ret; - ret = write_register(pcc_doorbell_vaddr[id], - (doorbell_val & doorbell_preserve) | doorbell_write, - doorbell->bit_width); - } else { - ret = acpi_read(&doorbell_val, doorbell); - if (ret) - return ret; - ret = acpi_write((doorbell_val & doorbell_preserve) | doorbell_write, - doorbell); - } return ret; } + +static bool pcc_last_tx_done(struct mbox_chan *chan) +{ + struct pcc_chan_info *pchan = chan->con_priv; + u64 val; + + pcc_chan_reg_read(&pchan->cmd_complete, &val); + if (!val) + return false; + else + return true; +} + + + +/** + * pcc_startup - Called from Mailbox Controller code. Used here + * to request the interrupt. + * @chan: Pointer to Mailbox channel to startup. + * + * Return: Err if something failed else 0 for success. + */ +static int pcc_startup(struct mbox_chan *chan) +{ + struct pcc_chan_info *pchan = chan->con_priv; + unsigned long irqflags; + int rc; + + if (pchan->plat_irq > 0) { + irqflags = pcc_chan_plat_irq_can_be_shared(pchan) ? + IRQF_SHARED | IRQF_ONESHOT : 0; + rc = devm_request_irq(chan->mbox->dev, pchan->plat_irq, pcc_mbox_irq, + irqflags, MBOX_IRQ_NAME, chan); + if (unlikely(rc)) { + dev_err(chan->mbox->dev, "failed to register PCC interrupt %d\n", + pchan->plat_irq); + return rc; + } + } + + return 0; +} + +/** + * pcc_shutdown - Called from Mailbox Controller code. Used here + * to free the interrupt. + * @chan: Pointer to Mailbox channel to shutdown. + */ +static void pcc_shutdown(struct mbox_chan *chan) +{ + struct pcc_chan_info *pchan = chan->con_priv; + + if (pchan->plat_irq > 0) + devm_free_irq(chan->mbox->dev, pchan->plat_irq, chan); +} + static const struct mbox_chan_ops pcc_chan_ops = { .send_data = pcc_send_data, + .startup = pcc_startup, + .shutdown = pcc_shutdown, + .last_tx_done = pcc_last_tx_done, }; /** - * parse_pcc_subspaces -- Count PCC subspaces defined + * parse_pcc_subspace - Count PCC subspaces defined * @header: Pointer to the ACPI subtable header under the PCCT. * @end: End of subtable entry. * @@ -382,7 +595,7 @@ static const struct mbox_chan_ops pcc_chan_ops = { * * This gets called for each entry in the PCC table. */ -static int parse_pcc_subspace(struct acpi_subtable_header *header, +static int parse_pcc_subspace(union acpi_subtable_headers *header, const unsigned long end) { struct acpi_pcct_subspace *ss = (struct acpi_pcct_subspace *) header; @@ -393,41 +606,180 @@ static int parse_pcc_subspace(struct acpi_subtable_header *header, return -EINVAL; } +static int +pcc_chan_reg_init(struct pcc_chan_reg *reg, struct acpi_generic_address *gas, + u64 preserve_mask, u64 set_mask, u64 status_mask, char *name) +{ + if (gas->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) { + if (!(gas->bit_width >= 8 && gas->bit_width <= 64 && + is_power_of_2(gas->bit_width))) { + pr_err("Error: Cannot access register of %u bit width", + gas->bit_width); + return -EFAULT; + } + + reg->vaddr = acpi_os_ioremap(gas->address, gas->bit_width / 8); + if (!reg->vaddr) { + pr_err("Failed to ioremap PCC %s register\n", name); + return -ENOMEM; + } + } + reg->gas = gas; + reg->preserve_mask = preserve_mask; + reg->set_mask = set_mask; + reg->status_mask = status_mask; + return 0; +} + /** * pcc_parse_subspace_irq - Parse the PCC IRQ and PCC ACK register - * There should be one entry per PCC client. - * @id: PCC subspace index. - * @pcct_ss: Pointer to the ACPI subtable header under the PCCT. + * + * @pchan: Pointer to the PCC channel info structure. + * @pcct_entry: Pointer to the ACPI subtable header. * * Return: 0 for Success, else errno. * - * This gets called for each entry in the PCC table. + * There should be one entry per PCC channel. This gets called for each + * entry in the PCC table. This uses PCCY Type1 structure for all applicable + * types(Type 1-4) to fetch irq */ -static int pcc_parse_subspace_irq(int id, - struct acpi_pcct_hw_reduced *pcct_ss) +static int pcc_parse_subspace_irq(struct pcc_chan_info *pchan, + struct acpi_subtable_header *pcct_entry) { - pcc_doorbell_irq[id] = pcc_map_interrupt(pcct_ss->platform_interrupt, - (u32)pcct_ss->flags); - if (pcc_doorbell_irq[id] <= 0) { + int ret = 0; + struct acpi_pcct_hw_reduced *pcct_ss; + + if (pcct_entry->type < ACPI_PCCT_TYPE_HW_REDUCED_SUBSPACE || + pcct_entry->type > ACPI_PCCT_TYPE_EXT_PCC_SLAVE_SUBSPACE) + return 0; + + pcct_ss = (struct acpi_pcct_hw_reduced *)pcct_entry; + pchan->plat_irq = pcc_map_interrupt(pcct_ss->platform_interrupt, + (u32)pcct_ss->flags); + if (pchan->plat_irq <= 0) { pr_err("PCC GSI %d not registered\n", pcct_ss->platform_interrupt); return -EINVAL; } + pchan->plat_irq_flags = pcct_ss->flags; - if (pcct_ss->header.type - == ACPI_PCCT_TYPE_HW_REDUCED_SUBSPACE_TYPE2) { + if (pcct_ss->header.type == ACPI_PCCT_TYPE_HW_REDUCED_SUBSPACE_TYPE2) { struct acpi_pcct_hw_reduced_type2 *pcct2_ss = (void *)pcct_ss; - pcc_doorbell_ack_vaddr[id] = acpi_os_ioremap( - pcct2_ss->platform_ack_register.address, - pcct2_ss->platform_ack_register.bit_width / 8); - if (!pcc_doorbell_ack_vaddr[id]) { - pr_err("Failed to ioremap PCC ACK register\n"); - return -ENOMEM; - } + ret = pcc_chan_reg_init(&pchan->plat_irq_ack, + &pcct2_ss->platform_ack_register, + pcct2_ss->ack_preserve_mask, + pcct2_ss->ack_write_mask, 0, + "PLAT IRQ ACK"); + + } else if (pcct_ss->header.type == ACPI_PCCT_TYPE_EXT_PCC_MASTER_SUBSPACE || + pcct_ss->header.type == ACPI_PCCT_TYPE_EXT_PCC_SLAVE_SUBSPACE) { + struct acpi_pcct_ext_pcc_master *pcct_ext = (void *)pcct_ss; + + ret = pcc_chan_reg_init(&pchan->plat_irq_ack, + &pcct_ext->platform_ack_register, + pcct_ext->ack_preserve_mask, + pcct_ext->ack_set_mask, 0, + "PLAT IRQ ACK"); } - return 0; + if (pcc_chan_plat_irq_can_be_shared(pchan) && + !pchan->plat_irq_ack.gas) { + pr_err("PCC subspace has level IRQ with no ACK register\n"); + return -EINVAL; + } + + return ret; +} + +/** + * pcc_parse_subspace_db_reg - Parse the PCC doorbell register + * + * @pchan: Pointer to the PCC channel info structure. + * @pcct_entry: Pointer to the ACPI subtable header. + * + * Return: 0 for Success, else errno. + */ +static int pcc_parse_subspace_db_reg(struct pcc_chan_info *pchan, + struct acpi_subtable_header *pcct_entry) +{ + int ret = 0; + + if (pcct_entry->type <= ACPI_PCCT_TYPE_HW_REDUCED_SUBSPACE_TYPE2) { + struct acpi_pcct_subspace *pcct_ss; + + pcct_ss = (struct acpi_pcct_subspace *)pcct_entry; + + ret = pcc_chan_reg_init(&pchan->db, + &pcct_ss->doorbell_register, + pcct_ss->preserve_mask, + pcct_ss->write_mask, 0, "Doorbell"); + + } else { + struct acpi_pcct_ext_pcc_master *pcct_ext; + + pcct_ext = (struct acpi_pcct_ext_pcc_master *)pcct_entry; + + ret = pcc_chan_reg_init(&pchan->db, + &pcct_ext->doorbell_register, + pcct_ext->preserve_mask, + pcct_ext->write_mask, 0, "Doorbell"); + if (ret) + return ret; + + ret = pcc_chan_reg_init(&pchan->cmd_complete, + &pcct_ext->cmd_complete_register, + 0, 0, pcct_ext->cmd_complete_mask, + "Command Complete Check"); + if (ret) + return ret; + + ret = pcc_chan_reg_init(&pchan->cmd_update, + &pcct_ext->cmd_update_register, + pcct_ext->cmd_update_preserve_mask, + pcct_ext->cmd_update_set_mask, 0, + "Command Complete Update"); + if (ret) + return ret; + + ret = pcc_chan_reg_init(&pchan->error, + &pcct_ext->error_status_register, + ~pcct_ext->error_status_mask, 0, + pcct_ext->error_status_mask, + "Error Status"); + } + return ret; +} + +/** + * pcc_parse_subspace_shmem - Parse the PCC Shared Memory Region information + * + * @pchan: Pointer to the PCC channel info structure. + * @pcct_entry: Pointer to the ACPI subtable header. + * + */ +static void pcc_parse_subspace_shmem(struct pcc_chan_info *pchan, + struct acpi_subtable_header *pcct_entry) +{ + if (pcct_entry->type <= ACPI_PCCT_TYPE_HW_REDUCED_SUBSPACE_TYPE2) { + struct acpi_pcct_subspace *pcct_ss = + (struct acpi_pcct_subspace *)pcct_entry; + + pchan->chan.shmem_base_addr = pcct_ss->base_address; + pchan->chan.shmem_size = pcct_ss->length; + pchan->chan.latency = pcct_ss->latency; + pchan->chan.max_access_rate = pcct_ss->max_access_rate; + pchan->chan.min_turnaround_time = pcct_ss->min_turnaround_time; + } else { + struct acpi_pcct_ext_pcc_master *pcct_ext = + (struct acpi_pcct_ext_pcc_master *)pcct_entry; + + pchan->chan.shmem_base_addr = pcct_ext->base_address; + pchan->chan.shmem_size = pcct_ext->length; + pchan->chan.latency = pcct_ext->latency; + pchan->chan.max_access_rate = pcct_ext->max_access_rate; + pchan->chan.min_turnaround_time = pcct_ext->min_turnaround_time; + } } /** @@ -437,16 +789,12 @@ static int pcc_parse_subspace_irq(int id, */ static int __init acpi_pcc_probe(void) { + int count, i, rc = 0; + acpi_status status; struct acpi_table_header *pcct_tbl; - struct acpi_subtable_header *pcct_entry; - struct acpi_table_pcct *acpi_pcct_tbl; struct acpi_subtable_proc proc[ACPI_PCCT_TYPE_RESERVED]; - int count, i, rc; - acpi_status status = AE_OK; - /* Search for PCCT */ status = acpi_get_table(ACPI_SIG_PCCT, 0, &pcct_tbl); - if (ACPI_FAILURE(status) || !pcct_tbl) return -ENODEV; @@ -466,32 +814,62 @@ static int __init acpi_pcc_probe(void) pr_warn("Error parsing PCC subspaces from PCCT\n"); else pr_warn("Invalid PCCT: %d PCC subspaces\n", count); - return -EINVAL; - } - pcc_mbox_channels = kcalloc(count, sizeof(struct mbox_chan), - GFP_KERNEL); - if (!pcc_mbox_channels) { - pr_err("Could not allocate space for PCC mbox channels\n"); - return -ENOMEM; + rc = -EINVAL; + } else { + pcc_chan_count = count; } - pcc_doorbell_vaddr = kcalloc(count, sizeof(void *), GFP_KERNEL); - if (!pcc_doorbell_vaddr) { + acpi_put_table(pcct_tbl); + + return rc; +} + +/** + * pcc_mbox_probe - Called when we find a match for the + * PCCT platform device. This is purely used to represent + * the PCCT as a virtual device for registering with the + * generic Mailbox framework. + * + * @pdev: Pointer to platform device returned when a match + * is found. + * + * Return: 0 for Success, else errno. + */ +static int pcc_mbox_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct mbox_controller *pcc_mbox_ctrl; + struct mbox_chan *pcc_mbox_channels; + struct acpi_table_header *pcct_tbl; + struct acpi_subtable_header *pcct_entry; + struct acpi_table_pcct *acpi_pcct_tbl; + acpi_status status = AE_OK; + int i, rc, count = pcc_chan_count; + + /* Search for PCCT */ + status = acpi_get_table(ACPI_SIG_PCCT, 0, &pcct_tbl); + + if (ACPI_FAILURE(status) || !pcct_tbl) + return -ENODEV; + + pcc_mbox_channels = devm_kcalloc(dev, count, sizeof(*pcc_mbox_channels), + GFP_KERNEL); + if (!pcc_mbox_channels) { rc = -ENOMEM; - goto err_free_mbox; + goto err; } - pcc_doorbell_ack_vaddr = kcalloc(count, sizeof(void *), GFP_KERNEL); - if (!pcc_doorbell_ack_vaddr) { + chan_info = devm_kcalloc(dev, count, sizeof(*chan_info), GFP_KERNEL); + if (!chan_info) { rc = -ENOMEM; - goto err_free_db_vaddr; + goto err; } - pcc_doorbell_irq = kcalloc(count, sizeof(int), GFP_KERNEL); - if (!pcc_doorbell_irq) { + pcc_mbox_ctrl = devm_kzalloc(dev, sizeof(*pcc_mbox_ctrl), GFP_KERNEL); + if (!pcc_mbox_ctrl) { rc = -ENOMEM; - goto err_free_db_ack_vaddr; + goto err; } /* Point to the first PCC subspace entry */ @@ -500,88 +878,60 @@ static int __init acpi_pcc_probe(void) acpi_pcct_tbl = (struct acpi_table_pcct *) pcct_tbl; if (acpi_pcct_tbl->flags & ACPI_PCCT_DOORBELL) - pcc_mbox_ctrl.txdone_irq = true; + pcc_mbox_ctrl->txdone_irq = true; for (i = 0; i < count; i++) { - struct acpi_generic_address *db_reg; - struct acpi_pcct_subspace *pcct_ss; - pcc_mbox_channels[i].con_priv = pcct_entry; + struct pcc_chan_info *pchan = chan_info + i; - if (pcct_entry->type == ACPI_PCCT_TYPE_HW_REDUCED_SUBSPACE || - pcct_entry->type == ACPI_PCCT_TYPE_HW_REDUCED_SUBSPACE_TYPE2) { - struct acpi_pcct_hw_reduced *pcct_hrss; + pcc_mbox_channels[i].con_priv = pchan; + pchan->chan.mchan = &pcc_mbox_channels[i]; - pcct_hrss = (struct acpi_pcct_hw_reduced *) pcct_entry; + if (pcct_entry->type == ACPI_PCCT_TYPE_EXT_PCC_SLAVE_SUBSPACE && + !pcc_mbox_ctrl->txdone_irq) { + pr_err("Platform Interrupt flag must be set to 1"); + rc = -EINVAL; + goto err; + } - if (pcc_mbox_ctrl.txdone_irq) { - rc = pcc_parse_subspace_irq(i, pcct_hrss); - if (rc < 0) - goto err; - } + if (pcc_mbox_ctrl->txdone_irq) { + rc = pcc_parse_subspace_irq(pchan, pcct_entry); + if (rc < 0) + goto err; } - pcct_ss = (struct acpi_pcct_subspace *) pcct_entry; + rc = pcc_parse_subspace_db_reg(pchan, pcct_entry); + if (rc < 0) + goto err; + + pcc_parse_subspace_shmem(pchan, pcct_entry); - /* If doorbell is in system memory cache the virt address */ - db_reg = &pcct_ss->doorbell_register; - if (db_reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) - pcc_doorbell_vaddr[i] = acpi_os_ioremap(db_reg->address, - db_reg->bit_width/8); + pchan->type = pcct_entry->type; pcct_entry = (struct acpi_subtable_header *) ((unsigned long) pcct_entry + pcct_entry->length); } - pcc_mbox_ctrl.num_chans = count; + pcc_mbox_ctrl->num_chans = count; - pr_info("Detected %d PCC Subspaces\n", pcc_mbox_ctrl.num_chans); + pr_info("Detected %d PCC Subspaces\n", pcc_mbox_ctrl->num_chans); - return 0; + pcc_mbox_ctrl->chans = pcc_mbox_channels; + pcc_mbox_ctrl->ops = &pcc_chan_ops; + pcc_mbox_ctrl->dev = dev; + pr_info("Registering PCC driver as Mailbox controller\n"); + rc = mbox_controller_register(pcc_mbox_ctrl); + if (rc) + pr_err("Err registering PCC as Mailbox controller: %d\n", rc); + else + return 0; err: - kfree(pcc_doorbell_irq); -err_free_db_ack_vaddr: - kfree(pcc_doorbell_ack_vaddr); -err_free_db_vaddr: - kfree(pcc_doorbell_vaddr); -err_free_mbox: - kfree(pcc_mbox_channels); + acpi_put_table(pcct_tbl); return rc; } -/** - * pcc_mbox_probe - Called when we find a match for the - * PCCT platform device. This is purely used to represent - * the PCCT as a virtual device for registering with the - * generic Mailbox framework. - * - * @pdev: Pointer to platform device returned when a match - * is found. - * - * Return: 0 for Success, else errno. - */ -static int pcc_mbox_probe(struct platform_device *pdev) -{ - int ret = 0; - - pcc_mbox_ctrl.chans = pcc_mbox_channels; - pcc_mbox_ctrl.ops = &pcc_chan_ops; - pcc_mbox_ctrl.dev = &pdev->dev; - - pr_info("Registering PCC driver as Mailbox controller\n"); - ret = mbox_controller_register(&pcc_mbox_ctrl); - - if (ret) { - pr_err("Err registering PCC as Mailbox controller: %d\n", ret); - ret = -ENODEV; - } - - return ret; -} - -struct platform_driver pcc_mbox_driver = { +static struct platform_driver pcc_mbox_driver = { .probe = pcc_mbox_probe, .driver = { .name = "PCCT", - .owner = THIS_MODULE, }, }; @@ -606,6 +956,7 @@ static int __init pcc_init(void) if (IS_ERR(pcc_pdev)) { pr_debug("Err creating PCC platform bundle\n"); + pcc_chan_count = 0; return PTR_ERR(pcc_pdev); } diff --git a/drivers/mailbox/pl320-ipc.c b/drivers/mailbox/pl320-ipc.c index 2dbed87094d7..606f26a2a6fd 100644 --- a/drivers/mailbox/pl320-ipc.c +++ b/drivers/mailbox/pl320-ipc.c @@ -1,17 +1,6 @@ +// SPDX-License-Identifier: GPL-2.0-only /* * Copyright 2012 Calxeda, Inc. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program. If not, see <http://www.gnu.org/licenses/>. */ #include <linux/types.h> #include <linux/err.h> @@ -56,18 +45,6 @@ static DEFINE_MUTEX(ipc_m1_lock); static DECLARE_COMPLETION(ipc_completion); static ATOMIC_NOTIFIER_HEAD(ipc_notifier); -static inline void set_destination(int source, int mbox) -{ - writel_relaxed(CHAN_MASK(source), ipc_base + IPCMxDSET(mbox)); - writel_relaxed(CHAN_MASK(source), ipc_base + IPCMxMSET(mbox)); -} - -static inline void clear_destination(int source, int mbox) -{ - writel_relaxed(CHAN_MASK(source), ipc_base + IPCMxDCLEAR(mbox)); - writel_relaxed(CHAN_MASK(source), ipc_base + IPCMxMCLEAR(mbox)); -} - static void __ipc_send(int mbox, u32 *data) { int i; @@ -84,7 +61,7 @@ static u32 __ipc_rcv(int mbox, u32 *data) return data[1]; } -/* blocking implmentation from the A9 side, not usuable in interrupts! */ +/* blocking implementation from the A9 side, not usable in interrupts! */ int pl320_ipc_transmit(u32 *data) { int ret; @@ -175,7 +152,7 @@ err: return ret; } -static struct amba_id pl320_ids[] = { +static const struct amba_id pl320_ids[] = { { .id = 0x00041320, .mask = 0x000fffff, diff --git a/drivers/mailbox/platform_mhu.c b/drivers/mailbox/platform_mhu.c index d2502c5be130..834aecd720ac 100644 --- a/drivers/mailbox/platform_mhu.c +++ b/drivers/mailbox/platform_mhu.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2016 BayLibre SAS. * Author: Neil Armstrong <narmstrong@baylibre.com> @@ -5,15 +6,6 @@ * Copyright (C) 2013-2015 Fujitsu Semiconductor Ltd. * Copyright (C) 2015 Linaro Ltd. * Author: Jassi Brar <jaswinder.singh@linaro.org> - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation, version 2 of the License. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. */ #include <linux/interrupt.h> @@ -23,6 +15,7 @@ #include <linux/slab.h> #include <linux/err.h> #include <linux/io.h> +#include <linux/mod_devicetable.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/mailbox_controller.h> @@ -125,7 +118,6 @@ static int platform_mhu_probe(struct platform_device *pdev) int i, err; struct platform_mhu *mhu; struct device *dev = &pdev->dev; - struct resource *res; int platform_mhu_reg[MHU_CHANS] = { MHU_SEC_OFFSET, MHU_LP_OFFSET, MHU_HP_OFFSET }; @@ -135,8 +127,7 @@ static int platform_mhu_probe(struct platform_device *pdev) if (!mhu) return -ENOMEM; - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - mhu->base = devm_ioremap_resource(dev, res); + mhu->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(mhu->base)) { dev_err(dev, "ioremap failed\n"); return PTR_ERR(mhu->base); @@ -145,10 +136,8 @@ static int platform_mhu_probe(struct platform_device *pdev) for (i = 0; i < MHU_CHANS; i++) { mhu->chan[i].con_priv = &mhu->mlink[i]; mhu->mlink[i].irq = platform_get_irq(pdev, i); - if (mhu->mlink[i].irq < 0) { - dev_err(dev, "failed to get irq%d\n", i); + if (mhu->mlink[i].irq < 0) return mhu->mlink[i].irq; - } mhu->mlink[i].rx_reg = mhu->base + platform_mhu_reg[i]; mhu->mlink[i].tx_reg = mhu->mlink[i].rx_reg + TX_REG_OFFSET; } diff --git a/drivers/mailbox/qcom-apcs-ipc-mailbox.c b/drivers/mailbox/qcom-apcs-ipc-mailbox.c index 3cf2937be149..d3a8f6b4a03b 100644 --- a/drivers/mailbox/qcom-apcs-ipc-mailbox.c +++ b/drivers/mailbox/qcom-apcs-ipc-mailbox.c @@ -1,14 +1,6 @@ +// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (c) 2017, Linaro Ltd - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 and - * only version 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. */ #include <linux/kernel.h> @@ -32,12 +24,40 @@ struct qcom_apcs_ipc { struct platform_device *clk; }; +struct qcom_apcs_ipc_data { + int offset; + char *clk_name; +}; + +static const struct qcom_apcs_ipc_data ipq6018_apcs_data = { + .offset = 8, .clk_name = "qcom,apss-ipq6018-clk" +}; + +static const struct qcom_apcs_ipc_data msm8916_apcs_data = { + .offset = 8, .clk_name = "qcom-apcs-msm8916-clk" +}; + +static const struct qcom_apcs_ipc_data msm8994_apcs_data = { + .offset = 8, .clk_name = NULL +}; + +static const struct qcom_apcs_ipc_data msm8996_apcs_data = { + .offset = 16, .clk_name = "qcom-apcs-msm8996-clk" +}; + +static const struct qcom_apcs_ipc_data apps_shared_apcs_data = { + .offset = 12, .clk_name = NULL +}; + +static const struct qcom_apcs_ipc_data sdx55_apcs_data = { + .offset = 0x1008, .clk_name = "qcom-sdx55-acps-clk" +}; + static const struct regmap_config apcs_regmap_config = { .reg_bits = 32, .reg_stride = 4, .val_bits = 32, - .max_register = 0x1000, - .fast_io = true, + .max_register = 0x1008, }; static int qcom_apcs_ipc_send_data(struct mbox_chan *chan, void *data) @@ -55,11 +75,9 @@ static const struct mbox_chan_ops qcom_apcs_ipc_ops = { static int qcom_apcs_ipc_probe(struct platform_device *pdev) { - struct device_node *np = pdev->dev.of_node; struct qcom_apcs_ipc *apcs; + const struct qcom_apcs_ipc_data *apcs_data; struct regmap *regmap; - struct resource *res; - unsigned long offset; void __iomem *base; unsigned long i; int ret; @@ -68,8 +86,7 @@ static int qcom_apcs_ipc_probe(struct platform_device *pdev) if (!apcs) return -ENOMEM; - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - base = devm_ioremap_resource(&pdev->dev, res); + base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(base)) return PTR_ERR(base); @@ -77,10 +94,10 @@ static int qcom_apcs_ipc_probe(struct platform_device *pdev) if (IS_ERR(regmap)) return PTR_ERR(regmap); - offset = (unsigned long)of_device_get_match_data(&pdev->dev); + apcs_data = of_device_get_match_data(&pdev->dev); apcs->regmap = regmap; - apcs->offset = offset; + apcs->offset = apcs_data->offset; /* Initialize channel identifiers */ for (i = 0; i < ARRAY_SIZE(apcs->mbox_chans); i++) @@ -97,10 +114,19 @@ static int qcom_apcs_ipc_probe(struct platform_device *pdev) return ret; } - if (of_device_is_compatible(np, "qcom,msm8916-apcs-kpss-global")) { - apcs->clk = platform_device_register_data(&pdev->dev, - "qcom-apcs-msm8916-clk", - -1, NULL, 0); + if (apcs_data->clk_name) { + struct device_node *np = of_get_child_by_name(pdev->dev.of_node, + "clock-controller"); + struct platform_device_info pdevinfo = { + .parent = &pdev->dev, + .name = apcs_data->clk_name, + .id = PLATFORM_DEVID_AUTO, + .fwnode = of_fwnode_handle(np) ?: pdev->dev.fwnode, + .of_node_reused = !np, + }; + + apcs->clk = platform_device_register_full(&pdevinfo); + of_node_put(np); if (IS_ERR(apcs->clk)) dev_err(&pdev->dev, "failed to register APCS clk\n"); } @@ -110,23 +136,39 @@ static int qcom_apcs_ipc_probe(struct platform_device *pdev) return 0; } -static int qcom_apcs_ipc_remove(struct platform_device *pdev) +static void qcom_apcs_ipc_remove(struct platform_device *pdev) { struct qcom_apcs_ipc *apcs = platform_get_drvdata(pdev); struct platform_device *clk = apcs->clk; platform_device_unregister(clk); - - return 0; } /* .data is the offset of the ipc register within the global block */ static const struct of_device_id qcom_apcs_ipc_of_match[] = { - { .compatible = "qcom,msm8916-apcs-kpss-global", .data = (void *)8 }, - { .compatible = "qcom,msm8996-apcs-hmss-global", .data = (void *)16 }, - { .compatible = "qcom,msm8998-apcs-hmss-global", .data = (void *)8 }, - { .compatible = "qcom,qcs404-apcs-apps-global", .data = (void *)8 }, - { .compatible = "qcom,sdm845-apss-shared", .data = (void *)12 }, + { .compatible = "qcom,ipq6018-apcs-apps-global", .data = &ipq6018_apcs_data }, + { .compatible = "qcom,msm8916-apcs-kpss-global", .data = &msm8916_apcs_data }, + { .compatible = "qcom,msm8939-apcs-kpss-global", .data = &msm8916_apcs_data }, + { .compatible = "qcom,msm8953-apcs-kpss-global", .data = &msm8994_apcs_data }, + { .compatible = "qcom,msm8994-apcs-kpss-global", .data = &msm8994_apcs_data }, + { .compatible = "qcom,msm8996-apcs-hmss-global", .data = &msm8996_apcs_data }, + { .compatible = "qcom,qcm2290-apcs-hmss-global", .data = &msm8994_apcs_data }, + { .compatible = "qcom,sdm845-apss-shared", .data = &apps_shared_apcs_data }, + { .compatible = "qcom,sdx55-apcs-gcc", .data = &sdx55_apcs_data }, + /* Do not add any more entries using existing driver data */ + { .compatible = "qcom,msm8976-apcs-kpss-global", .data = &msm8994_apcs_data }, + { .compatible = "qcom,msm8998-apcs-hmss-global", .data = &msm8994_apcs_data }, + { .compatible = "qcom,qcs404-apcs-apps-global", .data = &msm8916_apcs_data }, + { .compatible = "qcom,sdm660-apcs-hmss-global", .data = &msm8994_apcs_data }, + { .compatible = "qcom,sm4250-apcs-hmss-global", .data = &msm8994_apcs_data }, + { .compatible = "qcom,sm6125-apcs-hmss-global", .data = &msm8994_apcs_data }, + { .compatible = "qcom,sm6115-apcs-hmss-global", .data = &msm8994_apcs_data }, + { .compatible = "qcom,ipq5332-apcs-apps-global", .data = &ipq6018_apcs_data }, + { .compatible = "qcom,ipq5424-apcs-apps-global", .data = &msm8994_apcs_data }, + { .compatible = "qcom,ipq8074-apcs-apps-global", .data = &ipq6018_apcs_data }, + { .compatible = "qcom,sc7180-apss-shared", .data = &apps_shared_apcs_data }, + { .compatible = "qcom,sc8180x-apss-shared", .data = &apps_shared_apcs_data }, + { .compatible = "qcom,sm8150-apss-shared", .data = &apps_shared_apcs_data }, {} }; MODULE_DEVICE_TABLE(of, qcom_apcs_ipc_of_match); diff --git a/drivers/mailbox/qcom-cpucp-mbox.c b/drivers/mailbox/qcom-cpucp-mbox.c new file mode 100644 index 000000000000..44f4ed15f818 --- /dev/null +++ b/drivers/mailbox/qcom-cpucp-mbox.c @@ -0,0 +1,187 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2024, Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include <linux/bitops.h> +#include <linux/interrupt.h> +#include <linux/io.h> +#include <linux/irq.h> +#include <linux/irqdomain.h> +#include <linux/mailbox_controller.h> +#include <linux/module.h> +#include <linux/platform_device.h> + +#define APSS_CPUCP_IPC_CHAN_SUPPORTED 3 +#define APSS_CPUCP_MBOX_CMD_OFF 0x4 + +/* Tx Registers */ +#define APSS_CPUCP_TX_MBOX_CMD(i) (0x100 + ((i) * 8)) + +/* Rx Registers */ +#define APSS_CPUCP_RX_MBOX_CMD(i) (0x100 + ((i) * 8)) +#define APSS_CPUCP_RX_MBOX_MAP 0x4000 +#define APSS_CPUCP_RX_MBOX_STAT 0x4400 +#define APSS_CPUCP_RX_MBOX_CLEAR 0x4800 +#define APSS_CPUCP_RX_MBOX_EN 0x4c00 +#define APSS_CPUCP_RX_MBOX_CMD_MASK GENMASK_ULL(63, 0) + +/** + * struct qcom_cpucp_mbox - Holder for the mailbox driver + * @chans: The mailbox channel + * @mbox: The mailbox controller + * @tx_base: Base address of the CPUCP tx registers + * @rx_base: Base address of the CPUCP rx registers + */ +struct qcom_cpucp_mbox { + struct mbox_chan chans[APSS_CPUCP_IPC_CHAN_SUPPORTED]; + struct mbox_controller mbox; + void __iomem *tx_base; + void __iomem *rx_base; +}; + +static inline int channel_number(struct mbox_chan *chan) +{ + return chan - chan->mbox->chans; +} + +static irqreturn_t qcom_cpucp_mbox_irq_fn(int irq, void *data) +{ + struct qcom_cpucp_mbox *cpucp = data; + u64 status; + int i; + + status = readq(cpucp->rx_base + APSS_CPUCP_RX_MBOX_STAT); + + for_each_set_bit(i, (unsigned long *)&status, APSS_CPUCP_IPC_CHAN_SUPPORTED) { + u32 val = readl(cpucp->rx_base + APSS_CPUCP_RX_MBOX_CMD(i) + APSS_CPUCP_MBOX_CMD_OFF); + struct mbox_chan *chan = &cpucp->chans[i]; + unsigned long flags; + + /* Provide mutual exclusion with changes to chan->cl */ + spin_lock_irqsave(&chan->lock, flags); + if (chan->cl) + mbox_chan_received_data(chan, &val); + writeq(BIT(i), cpucp->rx_base + APSS_CPUCP_RX_MBOX_CLEAR); + spin_unlock_irqrestore(&chan->lock, flags); + } + + return IRQ_HANDLED; +} + +static int qcom_cpucp_mbox_startup(struct mbox_chan *chan) +{ + struct qcom_cpucp_mbox *cpucp = container_of(chan->mbox, struct qcom_cpucp_mbox, mbox); + unsigned long chan_id = channel_number(chan); + u64 val; + + val = readq(cpucp->rx_base + APSS_CPUCP_RX_MBOX_EN); + val |= BIT(chan_id); + writeq(val, cpucp->rx_base + APSS_CPUCP_RX_MBOX_EN); + + return 0; +} + +static void qcom_cpucp_mbox_shutdown(struct mbox_chan *chan) +{ + struct qcom_cpucp_mbox *cpucp = container_of(chan->mbox, struct qcom_cpucp_mbox, mbox); + unsigned long chan_id = channel_number(chan); + u64 val; + + val = readq(cpucp->rx_base + APSS_CPUCP_RX_MBOX_EN); + val &= ~BIT(chan_id); + writeq(val, cpucp->rx_base + APSS_CPUCP_RX_MBOX_EN); +} + +static int qcom_cpucp_mbox_send_data(struct mbox_chan *chan, void *data) +{ + struct qcom_cpucp_mbox *cpucp = container_of(chan->mbox, struct qcom_cpucp_mbox, mbox); + unsigned long chan_id = channel_number(chan); + u32 *val = data; + + writel(*val, cpucp->tx_base + APSS_CPUCP_TX_MBOX_CMD(chan_id) + APSS_CPUCP_MBOX_CMD_OFF); + + return 0; +} + +static const struct mbox_chan_ops qcom_cpucp_mbox_chan_ops = { + .startup = qcom_cpucp_mbox_startup, + .send_data = qcom_cpucp_mbox_send_data, + .shutdown = qcom_cpucp_mbox_shutdown +}; + +static int qcom_cpucp_mbox_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct qcom_cpucp_mbox *cpucp; + struct mbox_controller *mbox; + int irq, ret; + + cpucp = devm_kzalloc(dev, sizeof(*cpucp), GFP_KERNEL); + if (!cpucp) + return -ENOMEM; + + cpucp->rx_base = devm_of_iomap(dev, dev->of_node, 0, NULL); + if (IS_ERR(cpucp->rx_base)) + return PTR_ERR(cpucp->rx_base); + + cpucp->tx_base = devm_of_iomap(dev, dev->of_node, 1, NULL); + if (IS_ERR(cpucp->tx_base)) + return PTR_ERR(cpucp->tx_base); + + writeq(0, cpucp->rx_base + APSS_CPUCP_RX_MBOX_EN); + writeq(0, cpucp->rx_base + APSS_CPUCP_RX_MBOX_CLEAR); + writeq(0, cpucp->rx_base + APSS_CPUCP_RX_MBOX_MAP); + + irq = platform_get_irq(pdev, 0); + if (irq < 0) + return irq; + + ret = devm_request_irq(dev, irq, qcom_cpucp_mbox_irq_fn, + IRQF_TRIGGER_HIGH | IRQF_NO_SUSPEND, "apss_cpucp_mbox", cpucp); + if (ret < 0) + return dev_err_probe(dev, ret, "Failed to register irq: %d\n", irq); + + writeq(APSS_CPUCP_RX_MBOX_CMD_MASK, cpucp->rx_base + APSS_CPUCP_RX_MBOX_MAP); + + mbox = &cpucp->mbox; + mbox->dev = dev; + mbox->num_chans = APSS_CPUCP_IPC_CHAN_SUPPORTED; + mbox->chans = cpucp->chans; + mbox->ops = &qcom_cpucp_mbox_chan_ops; + + ret = devm_mbox_controller_register(dev, mbox); + if (ret) + return dev_err_probe(dev, ret, "Failed to create mailbox\n"); + + return 0; +} + +static const struct of_device_id qcom_cpucp_mbox_of_match[] = { + { .compatible = "qcom,x1e80100-cpucp-mbox" }, + {} +}; +MODULE_DEVICE_TABLE(of, qcom_cpucp_mbox_of_match); + +static struct platform_driver qcom_cpucp_mbox_driver = { + .probe = qcom_cpucp_mbox_probe, + .driver = { + .name = "qcom_cpucp_mbox", + .of_match_table = qcom_cpucp_mbox_of_match, + }, +}; + +static int __init qcom_cpucp_mbox_init(void) +{ + return platform_driver_register(&qcom_cpucp_mbox_driver); +} +core_initcall(qcom_cpucp_mbox_init); + +static void __exit qcom_cpucp_mbox_exit(void) +{ + platform_driver_unregister(&qcom_cpucp_mbox_driver); +} +module_exit(qcom_cpucp_mbox_exit); + +MODULE_DESCRIPTION("QTI CPUCP MBOX Driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/mailbox/qcom-ipcc.c b/drivers/mailbox/qcom-ipcc.c new file mode 100644 index 000000000000..d957d989c0ce --- /dev/null +++ b/drivers/mailbox/qcom-ipcc.c @@ -0,0 +1,382 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved. + */ + +#include <linux/bitfield.h> +#include <linux/interrupt.h> +#include <linux/irq.h> +#include <linux/irqdomain.h> +#include <linux/mailbox_controller.h> +#include <linux/module.h> +#include <linux/platform_device.h> + +#include <dt-bindings/mailbox/qcom-ipcc.h> + +/* IPCC Register offsets */ +#define IPCC_REG_CONFIG 0x08 +#define IPCC_REG_SEND_ID 0x0c +#define IPCC_REG_RECV_ID 0x10 +#define IPCC_REG_RECV_SIGNAL_ENABLE 0x14 +#define IPCC_REG_RECV_SIGNAL_DISABLE 0x18 +#define IPCC_REG_RECV_SIGNAL_CLEAR 0x1c +#define IPCC_REG_CLIENT_CLEAR 0x38 + +#define IPCC_CLEAR_ON_RECV_RD BIT(0) +#define IPCC_SIGNAL_ID_MASK GENMASK(15, 0) +#define IPCC_CLIENT_ID_MASK GENMASK(31, 16) + +#define IPCC_NO_PENDING_IRQ GENMASK(31, 0) + +/** + * struct qcom_ipcc_chan_info - Per-mailbox-channel info + * @client_id: The client-id to which the interrupt has to be triggered + * @signal_id: The signal-id to which the interrupt has to be triggered + */ +struct qcom_ipcc_chan_info { + u16 client_id; + u16 signal_id; +}; + +/** + * struct qcom_ipcc - Holder for the mailbox driver + * @dev: Device associated with this instance + * @base: Base address of the IPCC frame associated to APSS + * @irq_domain: The irq_domain associated with this instance + * @chans: The mailbox channels array + * @mchan: The per-mailbox channel info array + * @mbox: The mailbox controller + * @num_chans: Number of @chans elements + * @irq: Summary irq + */ +struct qcom_ipcc { + struct device *dev; + void __iomem *base; + struct irq_domain *irq_domain; + struct mbox_chan *chans; + struct qcom_ipcc_chan_info *mchan; + struct mbox_controller mbox; + int num_chans; + int irq; +}; + +static inline struct qcom_ipcc *to_qcom_ipcc(struct mbox_controller *mbox) +{ + return container_of(mbox, struct qcom_ipcc, mbox); +} + +static inline u32 qcom_ipcc_get_hwirq(u16 client_id, u16 signal_id) +{ + return FIELD_PREP(IPCC_CLIENT_ID_MASK, client_id) | + FIELD_PREP(IPCC_SIGNAL_ID_MASK, signal_id); +} + +static irqreturn_t qcom_ipcc_irq_fn(int irq, void *data) +{ + struct qcom_ipcc *ipcc = data; + u32 hwirq; + int virq; + + for (;;) { + hwirq = readl(ipcc->base + IPCC_REG_RECV_ID); + if (hwirq == IPCC_NO_PENDING_IRQ) + break; + + virq = irq_find_mapping(ipcc->irq_domain, hwirq); + writel(hwirq, ipcc->base + IPCC_REG_RECV_SIGNAL_CLEAR); + generic_handle_irq(virq); + } + + return IRQ_HANDLED; +} + +static void qcom_ipcc_mask_irq(struct irq_data *irqd) +{ + struct qcom_ipcc *ipcc = irq_data_get_irq_chip_data(irqd); + irq_hw_number_t hwirq = irqd_to_hwirq(irqd); + + writel(hwirq, ipcc->base + IPCC_REG_RECV_SIGNAL_DISABLE); +} + +static void qcom_ipcc_unmask_irq(struct irq_data *irqd) +{ + struct qcom_ipcc *ipcc = irq_data_get_irq_chip_data(irqd); + irq_hw_number_t hwirq = irqd_to_hwirq(irqd); + + writel(hwirq, ipcc->base + IPCC_REG_RECV_SIGNAL_ENABLE); +} + +static struct irq_chip qcom_ipcc_irq_chip = { + .name = "ipcc", + .irq_mask = qcom_ipcc_mask_irq, + .irq_unmask = qcom_ipcc_unmask_irq, + .flags = IRQCHIP_SKIP_SET_WAKE, +}; + +static int qcom_ipcc_domain_map(struct irq_domain *d, unsigned int irq, + irq_hw_number_t hw) +{ + struct qcom_ipcc *ipcc = d->host_data; + + irq_set_chip_and_handler(irq, &qcom_ipcc_irq_chip, handle_level_irq); + irq_set_chip_data(irq, ipcc); + irq_set_noprobe(irq); + + return 0; +} + +static int qcom_ipcc_domain_xlate(struct irq_domain *d, + struct device_node *node, const u32 *intspec, + unsigned int intsize, + unsigned long *out_hwirq, + unsigned int *out_type) +{ + if (intsize != 3) + return -EINVAL; + + *out_hwirq = qcom_ipcc_get_hwirq(intspec[0], intspec[1]); + *out_type = intspec[2] & IRQ_TYPE_SENSE_MASK; + + return 0; +} + +static const struct irq_domain_ops qcom_ipcc_irq_ops = { + .map = qcom_ipcc_domain_map, + .xlate = qcom_ipcc_domain_xlate, +}; + +static int qcom_ipcc_mbox_send_data(struct mbox_chan *chan, void *data) +{ + struct qcom_ipcc *ipcc = to_qcom_ipcc(chan->mbox); + struct qcom_ipcc_chan_info *mchan = chan->con_priv; + u32 hwirq; + + hwirq = qcom_ipcc_get_hwirq(mchan->client_id, mchan->signal_id); + writel(hwirq, ipcc->base + IPCC_REG_SEND_ID); + + return 0; +} + +static void qcom_ipcc_mbox_shutdown(struct mbox_chan *chan) +{ + chan->con_priv = NULL; +} + +static struct mbox_chan *qcom_ipcc_mbox_xlate(struct mbox_controller *mbox, + const struct of_phandle_args *ph) +{ + struct qcom_ipcc *ipcc = to_qcom_ipcc(mbox); + struct qcom_ipcc_chan_info *mchan; + struct mbox_chan *chan; + struct device *dev; + int chan_id; + + dev = ipcc->dev; + + if (ph->args_count != 2) + return ERR_PTR(-EINVAL); + + for (chan_id = 0; chan_id < mbox->num_chans; chan_id++) { + chan = &ipcc->chans[chan_id]; + mchan = chan->con_priv; + + if (!mchan) + break; + else if (mchan->client_id == ph->args[0] && + mchan->signal_id == ph->args[1]) + return ERR_PTR(-EBUSY); + } + + if (chan_id >= mbox->num_chans) + return ERR_PTR(-EBUSY); + + mchan = devm_kzalloc(dev, sizeof(*mchan), GFP_KERNEL); + if (!mchan) + return ERR_PTR(-ENOMEM); + + mchan->client_id = ph->args[0]; + mchan->signal_id = ph->args[1]; + chan->con_priv = mchan; + + return chan; +} + +static const struct mbox_chan_ops ipcc_mbox_chan_ops = { + .send_data = qcom_ipcc_mbox_send_data, + .shutdown = qcom_ipcc_mbox_shutdown, +}; + +static int qcom_ipcc_setup_mbox(struct qcom_ipcc *ipcc, + struct device_node *controller_dn) +{ + struct of_phandle_args curr_ph; + struct device_node *client_dn; + struct mbox_controller *mbox; + struct device *dev = ipcc->dev; + int i, j, ret; + + /* + * Find out the number of clients interested in this mailbox + * and create channels accordingly. + */ + ipcc->num_chans = 0; + for_each_node_with_property(client_dn, "mboxes") { + if (!of_device_is_available(client_dn)) + continue; + i = of_count_phandle_with_args(client_dn, + "mboxes", "#mbox-cells"); + for (j = 0; j < i; j++) { + ret = of_parse_phandle_with_args(client_dn, "mboxes", + "#mbox-cells", j, &curr_ph); + of_node_put(curr_ph.np); + if (!ret && curr_ph.np == controller_dn) + ipcc->num_chans++; + } + } + + /* If no clients are found, skip registering as a mbox controller */ + if (!ipcc->num_chans) + return 0; + + ipcc->chans = devm_kcalloc(dev, ipcc->num_chans, + sizeof(struct mbox_chan), GFP_KERNEL); + if (!ipcc->chans) + return -ENOMEM; + + mbox = &ipcc->mbox; + mbox->dev = dev; + mbox->num_chans = ipcc->num_chans; + mbox->chans = ipcc->chans; + mbox->ops = &ipcc_mbox_chan_ops; + mbox->of_xlate = qcom_ipcc_mbox_xlate; + mbox->txdone_irq = false; + mbox->txdone_poll = false; + + return devm_mbox_controller_register(dev, mbox); +} + +static int qcom_ipcc_pm_resume(struct device *dev) +{ + struct qcom_ipcc *ipcc = dev_get_drvdata(dev); + u32 hwirq; + int virq; + + hwirq = readl(ipcc->base + IPCC_REG_RECV_ID); + if (hwirq == IPCC_NO_PENDING_IRQ) + return 0; + + virq = irq_find_mapping(ipcc->irq_domain, hwirq); + + dev_dbg(dev, "virq: %d triggered client-id: %ld; signal-id: %ld\n", virq, + FIELD_GET(IPCC_CLIENT_ID_MASK, hwirq), FIELD_GET(IPCC_SIGNAL_ID_MASK, hwirq)); + + return 0; +} + +static int qcom_ipcc_probe(struct platform_device *pdev) +{ + struct qcom_ipcc *ipcc; + u32 config_value; + static int id; + char *name; + int ret; + + ipcc = devm_kzalloc(&pdev->dev, sizeof(*ipcc), GFP_KERNEL); + if (!ipcc) + return -ENOMEM; + + ipcc->dev = &pdev->dev; + + ipcc->base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(ipcc->base)) + return PTR_ERR(ipcc->base); + + /* + * It is possible that boot firmware is using the same IPCC instance + * as of the HLOS and it has kept CLEAR_ON_RECV_RD set which basically + * means Interrupt pending registers are cleared when RECV_ID is read. + * The register automatically updates to the next pending interrupt/client + * status based on priority. + */ + config_value = readl(ipcc->base + IPCC_REG_CONFIG); + if (config_value & IPCC_CLEAR_ON_RECV_RD) { + config_value &= ~(IPCC_CLEAR_ON_RECV_RD); + writel(config_value, ipcc->base + IPCC_REG_CONFIG); + } + + ipcc->irq = platform_get_irq(pdev, 0); + if (ipcc->irq < 0) + return ipcc->irq; + + name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "ipcc_%d", id++); + if (!name) + return -ENOMEM; + + ipcc->irq_domain = irq_domain_create_tree(dev_fwnode(&pdev->dev), &qcom_ipcc_irq_ops, ipcc); + if (!ipcc->irq_domain) + return -ENOMEM; + + ret = qcom_ipcc_setup_mbox(ipcc, pdev->dev.of_node); + if (ret) + goto err_mbox; + + ret = devm_request_irq(&pdev->dev, ipcc->irq, qcom_ipcc_irq_fn, + IRQF_TRIGGER_HIGH | IRQF_NO_SUSPEND | + IRQF_NO_THREAD, name, ipcc); + if (ret < 0) { + dev_err(&pdev->dev, "Failed to register the irq: %d\n", ret); + goto err_req_irq; + } + + platform_set_drvdata(pdev, ipcc); + + return 0; + +err_req_irq: + if (ipcc->num_chans) + mbox_controller_unregister(&ipcc->mbox); +err_mbox: + irq_domain_remove(ipcc->irq_domain); + + return ret; +} + +static void qcom_ipcc_remove(struct platform_device *pdev) +{ + struct qcom_ipcc *ipcc = platform_get_drvdata(pdev); + + disable_irq_wake(ipcc->irq); + irq_domain_remove(ipcc->irq_domain); +} + +static const struct of_device_id qcom_ipcc_of_match[] = { + { .compatible = "qcom,ipcc"}, + {} +}; +MODULE_DEVICE_TABLE(of, qcom_ipcc_of_match); + +static const struct dev_pm_ops qcom_ipcc_dev_pm_ops = { + NOIRQ_SYSTEM_SLEEP_PM_OPS(NULL, qcom_ipcc_pm_resume) +}; + +static struct platform_driver qcom_ipcc_driver = { + .probe = qcom_ipcc_probe, + .remove = qcom_ipcc_remove, + .driver = { + .name = "qcom-ipcc", + .of_match_table = qcom_ipcc_of_match, + .suppress_bind_attrs = true, + .pm = pm_sleep_ptr(&qcom_ipcc_dev_pm_ops), + }, +}; + +static int __init qcom_ipcc_init(void) +{ + return platform_driver_register(&qcom_ipcc_driver); +} +arch_initcall(qcom_ipcc_init); + +MODULE_AUTHOR("Venkata Narendra Kumar Gutta <vnkgutta@codeaurora.org>"); +MODULE_AUTHOR("Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>"); +MODULE_DESCRIPTION("Qualcomm Technologies, Inc. IPCC driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/mailbox/riscv-sbi-mpxy-mbox.c b/drivers/mailbox/riscv-sbi-mpxy-mbox.c new file mode 100644 index 000000000000..7c9c006b7244 --- /dev/null +++ b/drivers/mailbox/riscv-sbi-mpxy-mbox.c @@ -0,0 +1,1019 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * RISC-V SBI Message Proxy (MPXY) mailbox controller driver + * + * Copyright (C) 2025 Ventana Micro Systems Inc. + */ + +#include <linux/acpi.h> +#include <linux/cpu.h> +#include <linux/errno.h> +#include <linux/init.h> +#include <linux/irqchip/riscv-imsic.h> +#include <linux/mailbox_controller.h> +#include <linux/mailbox/riscv-rpmi-message.h> +#include <linux/minmax.h> +#include <linux/mm.h> +#include <linux/module.h> +#include <linux/msi.h> +#include <linux/of_irq.h> +#include <linux/percpu.h> +#include <linux/platform_device.h> +#include <linux/smp.h> +#include <linux/string.h> +#include <linux/types.h> +#include <asm/byteorder.h> +#include <asm/sbi.h> + +/* ====== SBI MPXY extension data structures ====== */ + +/* SBI MPXY MSI related channel attributes */ +struct sbi_mpxy_msi_info { + /* Lower 32-bits of the MSI target address */ + u32 msi_addr_lo; + /* Upper 32-bits of the MSI target address */ + u32 msi_addr_hi; + /* MSI data value */ + u32 msi_data; +}; + +/* + * SBI MPXY standard channel attributes. + * + * NOTE: The sequence of attribute fields are as-per the + * defined sequence in the attribute table in spec (or + * as-per the enum sbi_mpxy_attribute_id). + */ +struct sbi_mpxy_channel_attrs { + /* Message protocol ID */ + u32 msg_proto_id; + /* Message protocol version */ + u32 msg_proto_version; + /* Message protocol maximum message length */ + u32 msg_max_len; + /* Message protocol message send timeout in microseconds */ + u32 msg_send_timeout; + /* Message protocol message completion timeout in microseconds */ + u32 msg_completion_timeout; + /* Bit array for channel capabilities */ + u32 capability; + /* SSE event ID */ + u32 sse_event_id; + /* MSI enable/disable control knob */ + u32 msi_control; + /* Channel MSI info */ + struct sbi_mpxy_msi_info msi_info; + /* Events state control */ + u32 events_state_ctrl; +}; + +/* + * RPMI specific SBI MPXY channel attributes. + * + * NOTE: The sequence of attribute fields are as-per the + * defined sequence in the attribute table in spec (or + * as-per the enum sbi_mpxy_rpmi_attribute_id). + */ +struct sbi_mpxy_rpmi_channel_attrs { + /* RPMI service group ID */ + u32 servicegroup_id; + /* RPMI service group version */ + u32 servicegroup_version; + /* RPMI implementation ID */ + u32 impl_id; + /* RPMI implementation version */ + u32 impl_version; +}; + +/* SBI MPXY channel IDs data in shared memory */ +struct sbi_mpxy_channel_ids_data { + /* Remaining number of channel ids */ + __le32 remaining; + /* Returned channel ids in current function call */ + __le32 returned; + /* Returned channel id array */ + __le32 channel_array[]; +}; + +/* SBI MPXY notification data in shared memory */ +struct sbi_mpxy_notification_data { + /* Remaining number of notification events */ + __le32 remaining; + /* Number of notification events returned */ + __le32 returned; + /* Number of notification events lost */ + __le32 lost; + /* Reserved for future use */ + __le32 reserved; + /* Returned channel id array */ + u8 events_data[]; +}; + +/* ====== MPXY data structures & helper routines ====== */ + +/* MPXY Per-CPU or local context */ +struct mpxy_local { + /* Shared memory base address */ + void *shmem; + /* Shared memory physical address */ + phys_addr_t shmem_phys_addr; + /* Flag representing whether shared memory is active or not */ + bool shmem_active; +}; + +static DEFINE_PER_CPU(struct mpxy_local, mpxy_local); +static unsigned long mpxy_shmem_size; +static bool mpxy_shmem_init_done; + +static int mpxy_get_channel_count(u32 *channel_count) +{ + struct mpxy_local *mpxy = this_cpu_ptr(&mpxy_local); + struct sbi_mpxy_channel_ids_data *sdata = mpxy->shmem; + u32 remaining, returned; + struct sbiret sret; + + if (!mpxy->shmem_active) + return -ENODEV; + if (!channel_count) + return -EINVAL; + + get_cpu(); + + /* Get the remaining and returned fields to calculate total */ + sret = sbi_ecall(SBI_EXT_MPXY, SBI_EXT_MPXY_GET_CHANNEL_IDS, + 0, 0, 0, 0, 0, 0); + if (sret.error) + goto err_put_cpu; + + remaining = le32_to_cpu(sdata->remaining); + returned = le32_to_cpu(sdata->returned); + *channel_count = remaining + returned; + +err_put_cpu: + put_cpu(); + return sbi_err_map_linux_errno(sret.error); +} + +static int mpxy_get_channel_ids(u32 channel_count, u32 *channel_ids) +{ + struct mpxy_local *mpxy = this_cpu_ptr(&mpxy_local); + struct sbi_mpxy_channel_ids_data *sdata = mpxy->shmem; + u32 remaining, returned, count, start_index = 0; + struct sbiret sret; + + if (!mpxy->shmem_active) + return -ENODEV; + if (!channel_count || !channel_ids) + return -EINVAL; + + get_cpu(); + + do { + sret = sbi_ecall(SBI_EXT_MPXY, SBI_EXT_MPXY_GET_CHANNEL_IDS, + start_index, 0, 0, 0, 0, 0); + if (sret.error) + goto err_put_cpu; + + remaining = le32_to_cpu(sdata->remaining); + returned = le32_to_cpu(sdata->returned); + + count = returned < (channel_count - start_index) ? + returned : (channel_count - start_index); + memcpy_from_le32(&channel_ids[start_index], sdata->channel_array, count); + start_index += count; + } while (remaining && start_index < channel_count); + +err_put_cpu: + put_cpu(); + return sbi_err_map_linux_errno(sret.error); +} + +static int mpxy_read_attrs(u32 channel_id, u32 base_attrid, u32 attr_count, + u32 *attrs_buf) +{ + struct mpxy_local *mpxy = this_cpu_ptr(&mpxy_local); + struct sbiret sret; + + if (!mpxy->shmem_active) + return -ENODEV; + if (!attr_count || !attrs_buf) + return -EINVAL; + + get_cpu(); + + sret = sbi_ecall(SBI_EXT_MPXY, SBI_EXT_MPXY_READ_ATTRS, + channel_id, base_attrid, attr_count, 0, 0, 0); + if (sret.error) + goto err_put_cpu; + + memcpy_from_le32(attrs_buf, (__le32 *)mpxy->shmem, attr_count); + +err_put_cpu: + put_cpu(); + return sbi_err_map_linux_errno(sret.error); +} + +static int mpxy_write_attrs(u32 channel_id, u32 base_attrid, u32 attr_count, + u32 *attrs_buf) +{ + struct mpxy_local *mpxy = this_cpu_ptr(&mpxy_local); + struct sbiret sret; + + if (!mpxy->shmem_active) + return -ENODEV; + if (!attr_count || !attrs_buf) + return -EINVAL; + + get_cpu(); + + memcpy_to_le32((__le32 *)mpxy->shmem, attrs_buf, attr_count); + sret = sbi_ecall(SBI_EXT_MPXY, SBI_EXT_MPXY_WRITE_ATTRS, + channel_id, base_attrid, attr_count, 0, 0, 0); + + put_cpu(); + return sbi_err_map_linux_errno(sret.error); +} + +static int mpxy_send_message_with_resp(u32 channel_id, u32 msg_id, + void *tx, unsigned long tx_len, + void *rx, unsigned long max_rx_len, + unsigned long *rx_len) +{ + struct mpxy_local *mpxy = this_cpu_ptr(&mpxy_local); + unsigned long rx_bytes; + struct sbiret sret; + + if (!mpxy->shmem_active) + return -ENODEV; + if (!tx && tx_len) + return -EINVAL; + + get_cpu(); + + /* Message protocols allowed to have no data in messages */ + if (tx_len) + memcpy(mpxy->shmem, tx, tx_len); + + sret = sbi_ecall(SBI_EXT_MPXY, SBI_EXT_MPXY_SEND_MSG_WITH_RESP, + channel_id, msg_id, tx_len, 0, 0, 0); + if (rx && !sret.error) { + rx_bytes = sret.value; + if (rx_bytes > max_rx_len) { + put_cpu(); + return -ENOSPC; + } + + memcpy(rx, mpxy->shmem, rx_bytes); + if (rx_len) + *rx_len = rx_bytes; + } + + put_cpu(); + return sbi_err_map_linux_errno(sret.error); +} + +static int mpxy_send_message_without_resp(u32 channel_id, u32 msg_id, + void *tx, unsigned long tx_len) +{ + struct mpxy_local *mpxy = this_cpu_ptr(&mpxy_local); + struct sbiret sret; + + if (!mpxy->shmem_active) + return -ENODEV; + if (!tx && tx_len) + return -EINVAL; + + get_cpu(); + + /* Message protocols allowed to have no data in messages */ + if (tx_len) + memcpy(mpxy->shmem, tx, tx_len); + + sret = sbi_ecall(SBI_EXT_MPXY, SBI_EXT_MPXY_SEND_MSG_WITHOUT_RESP, + channel_id, msg_id, tx_len, 0, 0, 0); + + put_cpu(); + return sbi_err_map_linux_errno(sret.error); +} + +static int mpxy_get_notifications(u32 channel_id, + struct sbi_mpxy_notification_data *notif_data, + unsigned long *events_data_len) +{ + struct mpxy_local *mpxy = this_cpu_ptr(&mpxy_local); + struct sbiret sret; + + if (!mpxy->shmem_active) + return -ENODEV; + if (!notif_data || !events_data_len) + return -EINVAL; + + get_cpu(); + + sret = sbi_ecall(SBI_EXT_MPXY, SBI_EXT_MPXY_GET_NOTIFICATION_EVENTS, + channel_id, 0, 0, 0, 0, 0); + if (sret.error) + goto err_put_cpu; + + memcpy(notif_data, mpxy->shmem, sret.value + 16); + *events_data_len = sret.value; + +err_put_cpu: + put_cpu(); + return sbi_err_map_linux_errno(sret.error); +} + +static int mpxy_get_shmem_size(unsigned long *shmem_size) +{ + struct sbiret sret; + + sret = sbi_ecall(SBI_EXT_MPXY, SBI_EXT_MPXY_GET_SHMEM_SIZE, + 0, 0, 0, 0, 0, 0); + if (sret.error) + return sbi_err_map_linux_errno(sret.error); + if (shmem_size) + *shmem_size = sret.value; + return 0; +} + +static int mpxy_setup_shmem(unsigned int cpu) +{ + struct page *shmem_page; + struct mpxy_local *mpxy; + struct sbiret sret; + + mpxy = per_cpu_ptr(&mpxy_local, cpu); + if (mpxy->shmem_active) + return 0; + + shmem_page = alloc_pages(GFP_KERNEL | __GFP_ZERO, get_order(mpxy_shmem_size)); + if (!shmem_page) + return -ENOMEM; + + /* + * Linux setup of shmem is done in mpxy OVERWRITE mode. + * flags[1:0] = 00b + */ + sret = sbi_ecall(SBI_EXT_MPXY, SBI_EXT_MPXY_SET_SHMEM, + page_to_phys(shmem_page), 0, 0, 0, 0, 0); + if (sret.error) { + free_pages((unsigned long)page_to_virt(shmem_page), + get_order(mpxy_shmem_size)); + return sbi_err_map_linux_errno(sret.error); + } + + mpxy->shmem = page_to_virt(shmem_page); + mpxy->shmem_phys_addr = page_to_phys(shmem_page); + mpxy->shmem_active = true; + + return 0; +} + +/* ====== MPXY mailbox data structures ====== */ + +/* MPXY mailbox channel */ +struct mpxy_mbox_channel { + struct mpxy_mbox *mbox; + u32 channel_id; + struct sbi_mpxy_channel_attrs attrs; + struct sbi_mpxy_rpmi_channel_attrs rpmi_attrs; + struct sbi_mpxy_notification_data *notif; + u32 max_xfer_len; + bool have_events_state; + u32 msi_index; + u32 msi_irq; + bool started; +}; + +/* MPXY mailbox */ +struct mpxy_mbox { + struct device *dev; + u32 channel_count; + struct mpxy_mbox_channel *channels; + u32 msi_count; + struct mpxy_mbox_channel **msi_index_to_channel; + struct mbox_controller controller; +}; + +/* ====== MPXY RPMI processing ====== */ + +static void mpxy_mbox_send_rpmi_data(struct mpxy_mbox_channel *mchan, + struct rpmi_mbox_message *msg) +{ + msg->error = 0; + switch (msg->type) { + case RPMI_MBOX_MSG_TYPE_GET_ATTRIBUTE: + switch (msg->attr.id) { + case RPMI_MBOX_ATTR_SPEC_VERSION: + msg->attr.value = mchan->attrs.msg_proto_version; + break; + case RPMI_MBOX_ATTR_MAX_MSG_DATA_SIZE: + msg->attr.value = mchan->max_xfer_len; + break; + case RPMI_MBOX_ATTR_SERVICEGROUP_ID: + msg->attr.value = mchan->rpmi_attrs.servicegroup_id; + break; + case RPMI_MBOX_ATTR_SERVICEGROUP_VERSION: + msg->attr.value = mchan->rpmi_attrs.servicegroup_version; + break; + case RPMI_MBOX_ATTR_IMPL_ID: + msg->attr.value = mchan->rpmi_attrs.impl_id; + break; + case RPMI_MBOX_ATTR_IMPL_VERSION: + msg->attr.value = mchan->rpmi_attrs.impl_version; + break; + default: + msg->error = -EOPNOTSUPP; + break; + } + break; + case RPMI_MBOX_MSG_TYPE_SET_ATTRIBUTE: + /* None of the RPMI linux mailbox attributes are writeable */ + msg->error = -EOPNOTSUPP; + break; + case RPMI_MBOX_MSG_TYPE_SEND_WITH_RESPONSE: + if ((!msg->data.request && msg->data.request_len) || + (msg->data.request && msg->data.request_len > mchan->max_xfer_len) || + (!msg->data.response && msg->data.max_response_len)) { + msg->error = -EINVAL; + break; + } + if (!(mchan->attrs.capability & SBI_MPXY_CHAN_CAP_SEND_WITH_RESP)) { + msg->error = -EIO; + break; + } + msg->error = mpxy_send_message_with_resp(mchan->channel_id, + msg->data.service_id, + msg->data.request, + msg->data.request_len, + msg->data.response, + msg->data.max_response_len, + &msg->data.out_response_len); + break; + case RPMI_MBOX_MSG_TYPE_SEND_WITHOUT_RESPONSE: + if ((!msg->data.request && msg->data.request_len) || + (msg->data.request && msg->data.request_len > mchan->max_xfer_len)) { + msg->error = -EINVAL; + break; + } + if (!(mchan->attrs.capability & SBI_MPXY_CHAN_CAP_SEND_WITHOUT_RESP)) { + msg->error = -EIO; + break; + } + msg->error = mpxy_send_message_without_resp(mchan->channel_id, + msg->data.service_id, + msg->data.request, + msg->data.request_len); + break; + default: + msg->error = -EOPNOTSUPP; + break; + } +} + +static void mpxy_mbox_peek_rpmi_data(struct mbox_chan *chan, + struct mpxy_mbox_channel *mchan, + struct sbi_mpxy_notification_data *notif, + unsigned long events_data_len) +{ + struct rpmi_notification_event *event; + struct rpmi_mbox_message msg; + unsigned long pos = 0; + + while (pos < events_data_len && (events_data_len - pos) <= sizeof(*event)) { + event = (struct rpmi_notification_event *)(notif->events_data + pos); + + msg.type = RPMI_MBOX_MSG_TYPE_NOTIFICATION_EVENT; + msg.notif.event_datalen = le16_to_cpu(event->event_datalen); + msg.notif.event_id = event->event_id; + msg.notif.event_data = event->event_data; + msg.error = 0; + + mbox_chan_received_data(chan, &msg); + pos += sizeof(*event) + msg.notif.event_datalen; + } +} + +static int mpxy_mbox_read_rpmi_attrs(struct mpxy_mbox_channel *mchan) +{ + return mpxy_read_attrs(mchan->channel_id, + SBI_MPXY_ATTR_MSGPROTO_ATTR_START, + sizeof(mchan->rpmi_attrs) / sizeof(u32), + (u32 *)&mchan->rpmi_attrs); +} + +/* ====== MPXY mailbox callbacks ====== */ + +static int mpxy_mbox_send_data(struct mbox_chan *chan, void *data) +{ + struct mpxy_mbox_channel *mchan = chan->con_priv; + + if (mchan->attrs.msg_proto_id == SBI_MPXY_MSGPROTO_RPMI_ID) { + mpxy_mbox_send_rpmi_data(mchan, data); + return 0; + } + + return -EOPNOTSUPP; +} + +static bool mpxy_mbox_peek_data(struct mbox_chan *chan) +{ + struct mpxy_mbox_channel *mchan = chan->con_priv; + struct sbi_mpxy_notification_data *notif = mchan->notif; + bool have_notifications = false; + unsigned long data_len; + int rc; + + if (!(mchan->attrs.capability & SBI_MPXY_CHAN_CAP_GET_NOTIFICATIONS)) + return false; + + do { + rc = mpxy_get_notifications(mchan->channel_id, notif, &data_len); + if (rc || !data_len) + break; + + if (mchan->attrs.msg_proto_id == SBI_MPXY_MSGPROTO_RPMI_ID) + mpxy_mbox_peek_rpmi_data(chan, mchan, notif, data_len); + + have_notifications = true; + } while (1); + + return have_notifications; +} + +static irqreturn_t mpxy_mbox_irq_thread(int irq, void *dev_id) +{ + mpxy_mbox_peek_data(dev_id); + return IRQ_HANDLED; +} + +static int mpxy_mbox_setup_msi(struct mbox_chan *chan, + struct mpxy_mbox_channel *mchan) +{ + struct device *dev = mchan->mbox->dev; + int rc; + + /* Do nothing if MSI not supported */ + if (mchan->msi_irq == U32_MAX) + return 0; + + /* Fail if MSI already enabled */ + if (mchan->attrs.msi_control) + return -EALREADY; + + /* Request channel MSI handler */ + rc = request_threaded_irq(mchan->msi_irq, NULL, mpxy_mbox_irq_thread, + 0, dev_name(dev), chan); + if (rc) { + dev_err(dev, "failed to request MPXY channel 0x%x IRQ\n", + mchan->channel_id); + return rc; + } + + /* Enable channel MSI control */ + mchan->attrs.msi_control = 1; + rc = mpxy_write_attrs(mchan->channel_id, SBI_MPXY_ATTR_MSI_CONTROL, + 1, &mchan->attrs.msi_control); + if (rc) { + dev_err(dev, "enable MSI control failed for MPXY channel 0x%x\n", + mchan->channel_id); + mchan->attrs.msi_control = 0; + free_irq(mchan->msi_irq, chan); + return rc; + } + + return 0; +} + +static void mpxy_mbox_cleanup_msi(struct mbox_chan *chan, + struct mpxy_mbox_channel *mchan) +{ + struct device *dev = mchan->mbox->dev; + int rc; + + /* Do nothing if MSI not supported */ + if (mchan->msi_irq == U32_MAX) + return; + + /* Do nothing if MSI already disabled */ + if (!mchan->attrs.msi_control) + return; + + /* Disable channel MSI control */ + mchan->attrs.msi_control = 0; + rc = mpxy_write_attrs(mchan->channel_id, SBI_MPXY_ATTR_MSI_CONTROL, + 1, &mchan->attrs.msi_control); + if (rc) { + dev_err(dev, "disable MSI control failed for MPXY channel 0x%x\n", + mchan->channel_id); + } + + /* Free channel MSI handler */ + free_irq(mchan->msi_irq, chan); +} + +static int mpxy_mbox_setup_events(struct mpxy_mbox_channel *mchan) +{ + struct device *dev = mchan->mbox->dev; + int rc; + + /* Do nothing if events state not supported */ + if (!mchan->have_events_state) + return 0; + + /* Fail if events state already enabled */ + if (mchan->attrs.events_state_ctrl) + return -EALREADY; + + /* Enable channel events state */ + mchan->attrs.events_state_ctrl = 1; + rc = mpxy_write_attrs(mchan->channel_id, SBI_MPXY_ATTR_EVENTS_STATE_CONTROL, + 1, &mchan->attrs.events_state_ctrl); + if (rc) { + dev_err(dev, "enable events state failed for MPXY channel 0x%x\n", + mchan->channel_id); + mchan->attrs.events_state_ctrl = 0; + return rc; + } + + return 0; +} + +static void mpxy_mbox_cleanup_events(struct mpxy_mbox_channel *mchan) +{ + struct device *dev = mchan->mbox->dev; + int rc; + + /* Do nothing if events state not supported */ + if (!mchan->have_events_state) + return; + + /* Do nothing if events state already disabled */ + if (!mchan->attrs.events_state_ctrl) + return; + + /* Disable channel events state */ + mchan->attrs.events_state_ctrl = 0; + rc = mpxy_write_attrs(mchan->channel_id, SBI_MPXY_ATTR_EVENTS_STATE_CONTROL, + 1, &mchan->attrs.events_state_ctrl); + if (rc) + dev_err(dev, "disable events state failed for MPXY channel 0x%x\n", + mchan->channel_id); +} + +static int mpxy_mbox_startup(struct mbox_chan *chan) +{ + struct mpxy_mbox_channel *mchan = chan->con_priv; + int rc; + + if (mchan->started) + return -EALREADY; + + /* Setup channel MSI */ + rc = mpxy_mbox_setup_msi(chan, mchan); + if (rc) + return rc; + + /* Setup channel notification events */ + rc = mpxy_mbox_setup_events(mchan); + if (rc) { + mpxy_mbox_cleanup_msi(chan, mchan); + return rc; + } + + /* Mark the channel as started */ + mchan->started = true; + + return 0; +} + +static void mpxy_mbox_shutdown(struct mbox_chan *chan) +{ + struct mpxy_mbox_channel *mchan = chan->con_priv; + + if (!mchan->started) + return; + + /* Mark the channel as stopped */ + mchan->started = false; + + /* Cleanup channel notification events */ + mpxy_mbox_cleanup_events(mchan); + + /* Cleanup channel MSI */ + mpxy_mbox_cleanup_msi(chan, mchan); +} + +static const struct mbox_chan_ops mpxy_mbox_ops = { + .send_data = mpxy_mbox_send_data, + .peek_data = mpxy_mbox_peek_data, + .startup = mpxy_mbox_startup, + .shutdown = mpxy_mbox_shutdown, +}; + +/* ====== MPXY platform driver ===== */ + +static void mpxy_mbox_msi_write(struct msi_desc *desc, struct msi_msg *msg) +{ + struct device *dev = msi_desc_to_dev(desc); + struct mpxy_mbox *mbox = dev_get_drvdata(dev); + struct mpxy_mbox_channel *mchan; + struct sbi_mpxy_msi_info *minfo; + int rc; + + mchan = mbox->msi_index_to_channel[desc->msi_index]; + if (!mchan) { + dev_warn(dev, "MPXY channel not available for MSI index %d\n", + desc->msi_index); + return; + } + + minfo = &mchan->attrs.msi_info; + minfo->msi_addr_lo = msg->address_lo; + minfo->msi_addr_hi = msg->address_hi; + minfo->msi_data = msg->data; + + rc = mpxy_write_attrs(mchan->channel_id, SBI_MPXY_ATTR_MSI_ADDR_LO, + sizeof(*minfo) / sizeof(u32), (u32 *)minfo); + if (rc) { + dev_warn(dev, "failed to write MSI info for MPXY channel 0x%x\n", + mchan->channel_id); + } +} + +static struct mbox_chan *mpxy_mbox_fw_xlate(struct mbox_controller *ctlr, + const struct fwnode_reference_args *pa) +{ + struct mpxy_mbox *mbox = container_of(ctlr, struct mpxy_mbox, controller); + struct mpxy_mbox_channel *mchan; + u32 i; + + if (pa->nargs != 2) + return ERR_PTR(-EINVAL); + + for (i = 0; i < mbox->channel_count; i++) { + mchan = &mbox->channels[i]; + if (mchan->channel_id == pa->args[0] && + mchan->attrs.msg_proto_id == pa->args[1]) + return &mbox->controller.chans[i]; + } + + return ERR_PTR(-ENOENT); +} + +static int mpxy_mbox_populate_channels(struct mpxy_mbox *mbox) +{ + u32 i, *channel_ids __free(kfree) = NULL; + struct mpxy_mbox_channel *mchan; + int rc; + + /* Find-out of number of channels */ + rc = mpxy_get_channel_count(&mbox->channel_count); + if (rc) + return dev_err_probe(mbox->dev, rc, "failed to get number of MPXY channels\n"); + if (!mbox->channel_count) + return dev_err_probe(mbox->dev, -ENODEV, "no MPXY channels available\n"); + + /* Allocate and fetch all channel IDs */ + channel_ids = kcalloc(mbox->channel_count, sizeof(*channel_ids), GFP_KERNEL); + if (!channel_ids) + return -ENOMEM; + rc = mpxy_get_channel_ids(mbox->channel_count, channel_ids); + if (rc) + return dev_err_probe(mbox->dev, rc, "failed to get MPXY channel IDs\n"); + + /* Populate all channels */ + mbox->channels = devm_kcalloc(mbox->dev, mbox->channel_count, + sizeof(*mbox->channels), GFP_KERNEL); + if (!mbox->channels) + return -ENOMEM; + for (i = 0; i < mbox->channel_count; i++) { + mchan = &mbox->channels[i]; + mchan->mbox = mbox; + mchan->channel_id = channel_ids[i]; + + rc = mpxy_read_attrs(mchan->channel_id, SBI_MPXY_ATTR_MSG_PROT_ID, + sizeof(mchan->attrs) / sizeof(u32), + (u32 *)&mchan->attrs); + if (rc) { + return dev_err_probe(mbox->dev, rc, + "MPXY channel 0x%x read attrs failed\n", + mchan->channel_id); + } + + if (mchan->attrs.msg_proto_id == SBI_MPXY_MSGPROTO_RPMI_ID) { + rc = mpxy_mbox_read_rpmi_attrs(mchan); + if (rc) { + return dev_err_probe(mbox->dev, rc, + "MPXY channel 0x%x read RPMI attrs failed\n", + mchan->channel_id); + } + } + + mchan->notif = devm_kzalloc(mbox->dev, mpxy_shmem_size, GFP_KERNEL); + if (!mchan->notif) + return -ENOMEM; + + mchan->max_xfer_len = min(mpxy_shmem_size, mchan->attrs.msg_max_len); + + if ((mchan->attrs.capability & SBI_MPXY_CHAN_CAP_GET_NOTIFICATIONS) && + (mchan->attrs.capability & SBI_MPXY_CHAN_CAP_EVENTS_STATE)) + mchan->have_events_state = true; + + if ((mchan->attrs.capability & SBI_MPXY_CHAN_CAP_GET_NOTIFICATIONS) && + (mchan->attrs.capability & SBI_MPXY_CHAN_CAP_MSI)) + mchan->msi_index = mbox->msi_count++; + else + mchan->msi_index = U32_MAX; + mchan->msi_irq = U32_MAX; + } + + return 0; +} + +static int mpxy_mbox_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct mpxy_mbox_channel *mchan; + struct mpxy_mbox *mbox; + int msi_idx, rc; + u32 i; + + /* + * Initialize MPXY shared memory only once. This also ensures + * that SBI MPXY mailbox is probed only once. + */ + if (mpxy_shmem_init_done) { + dev_err(dev, "SBI MPXY mailbox already initialized\n"); + return -EALREADY; + } + + /* Probe for SBI MPXY extension */ + if (sbi_spec_version < sbi_mk_version(1, 0) || + sbi_probe_extension(SBI_EXT_MPXY) <= 0) { + dev_info(dev, "SBI MPXY extension not available\n"); + return -ENODEV; + } + + /* Find-out shared memory size */ + rc = mpxy_get_shmem_size(&mpxy_shmem_size); + if (rc) + return dev_err_probe(dev, rc, "failed to get MPXY shared memory size\n"); + + /* + * Setup MPXY shared memory on each CPU + * + * Note: Don't cleanup MPXY shared memory upon CPU power-down + * because the RPMI System MSI irqchip driver needs it to be + * available when migrating IRQs in CPU power-down path. + */ + cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "riscv/sbi-mpxy-shmem", + mpxy_setup_shmem, NULL); + + /* Mark as MPXY shared memory initialization done */ + mpxy_shmem_init_done = true; + + /* Allocate mailbox instance */ + mbox = devm_kzalloc(dev, sizeof(*mbox), GFP_KERNEL); + if (!mbox) + return -ENOMEM; + mbox->dev = dev; + platform_set_drvdata(pdev, mbox); + + /* Populate mailbox channels */ + rc = mpxy_mbox_populate_channels(mbox); + if (rc) + return rc; + + /* Initialize mailbox controller */ + mbox->controller.txdone_irq = false; + mbox->controller.txdone_poll = false; + mbox->controller.ops = &mpxy_mbox_ops; + mbox->controller.dev = dev; + mbox->controller.num_chans = mbox->channel_count; + mbox->controller.fw_xlate = mpxy_mbox_fw_xlate; + mbox->controller.chans = devm_kcalloc(dev, mbox->channel_count, + sizeof(*mbox->controller.chans), + GFP_KERNEL); + if (!mbox->controller.chans) + return -ENOMEM; + for (i = 0; i < mbox->channel_count; i++) + mbox->controller.chans[i].con_priv = &mbox->channels[i]; + + /* Setup MSIs for mailbox (if required) */ + if (mbox->msi_count) { + /* + * The device MSI domain for platform devices on RISC-V architecture + * is only available after the MSI controller driver is probed so, + * explicitly configure here. + */ + if (!dev_get_msi_domain(dev)) { + struct fwnode_handle *fwnode = dev_fwnode(dev); + + /* + * The device MSI domain for OF devices is only set at the + * time of populating/creating OF device. If the device MSI + * domain is discovered later after the OF device is created + * then we need to set it explicitly before using any platform + * MSI functions. + */ + if (is_of_node(fwnode)) { + of_msi_configure(dev, dev_of_node(dev)); + } else if (is_acpi_device_node(fwnode)) { + struct irq_domain *msi_domain; + + msi_domain = irq_find_matching_fwnode(imsic_acpi_get_fwnode(dev), + DOMAIN_BUS_PLATFORM_MSI); + dev_set_msi_domain(dev, msi_domain); + } + + if (!dev_get_msi_domain(dev)) + return -EPROBE_DEFER; + } + + mbox->msi_index_to_channel = devm_kcalloc(dev, mbox->msi_count, + sizeof(*mbox->msi_index_to_channel), + GFP_KERNEL); + if (!mbox->msi_index_to_channel) + return -ENOMEM; + + for (msi_idx = 0; msi_idx < mbox->msi_count; msi_idx++) { + for (i = 0; i < mbox->channel_count; i++) { + mchan = &mbox->channels[i]; + if (mchan->msi_index == msi_idx) { + mbox->msi_index_to_channel[msi_idx] = mchan; + break; + } + } + } + + rc = platform_device_msi_init_and_alloc_irqs(dev, mbox->msi_count, + mpxy_mbox_msi_write); + if (rc) { + return dev_err_probe(dev, rc, "Failed to allocate %d MSIs\n", + mbox->msi_count); + } + + for (i = 0; i < mbox->channel_count; i++) { + mchan = &mbox->channels[i]; + if (mchan->msi_index == U32_MAX) + continue; + mchan->msi_irq = msi_get_virq(dev, mchan->msi_index); + } + } + + /* Register mailbox controller */ + rc = devm_mbox_controller_register(dev, &mbox->controller); + if (rc) { + dev_err_probe(dev, rc, "Registering SBI MPXY mailbox failed\n"); + if (mbox->msi_count) + platform_device_msi_free_irqs_all(dev); + return rc; + } + +#ifdef CONFIG_ACPI + struct acpi_device *adev = ACPI_COMPANION(dev); + + if (adev) + acpi_dev_clear_dependencies(adev); +#endif + + dev_info(dev, "mailbox registered with %d channels\n", + mbox->channel_count); + return 0; +} + +static void mpxy_mbox_remove(struct platform_device *pdev) +{ + struct mpxy_mbox *mbox = platform_get_drvdata(pdev); + + if (mbox->msi_count) + platform_device_msi_free_irqs_all(mbox->dev); +} + +static const struct of_device_id mpxy_mbox_of_match[] = { + { .compatible = "riscv,sbi-mpxy-mbox" }, + {} +}; +MODULE_DEVICE_TABLE(of, mpxy_mbox_of_match); + +static const struct acpi_device_id mpxy_mbox_acpi_match[] = { + { "RSCV0005" }, + {} +}; +MODULE_DEVICE_TABLE(acpi, mpxy_mbox_acpi_match); + +static struct platform_driver mpxy_mbox_driver = { + .driver = { + .name = "riscv-sbi-mpxy-mbox", + .of_match_table = mpxy_mbox_of_match, + .acpi_match_table = mpxy_mbox_acpi_match, + }, + .probe = mpxy_mbox_probe, + .remove = mpxy_mbox_remove, +}; +module_platform_driver(mpxy_mbox_driver); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Anup Patel <apatel@ventanamicro.com>"); +MODULE_DESCRIPTION("RISC-V SBI MPXY mailbox controller driver"); diff --git a/drivers/mailbox/rockchip-mailbox.c b/drivers/mailbox/rockchip-mailbox.c index f24a77b1a0ff..4d966cb2ed03 100644 --- a/drivers/mailbox/rockchip-mailbox.c +++ b/drivers/mailbox/rockchip-mailbox.c @@ -1,14 +1,6 @@ +// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (c) 2015, Fuzhou Rockchip Electronics Co., Ltd - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. */ #include <linux/clk.h> @@ -16,8 +8,8 @@ #include <linux/io.h> #include <linux/kernel.h> #include <linux/mailbox_controller.h> +#include <linux/of.h> #include <linux/module.h> -#include <linux/of_device.h> #include <linux/platform_device.h> #define MAILBOX_A2B_INTEN 0x00 @@ -167,12 +159,11 @@ static const struct of_device_id rockchip_mbox_of_match[] = { { .compatible = "rockchip,rk3368-mailbox", .data = &rk3368_drv_data}, { }, }; -MODULE_DEVICE_TABLE(of, rockchp_mbox_of_match); +MODULE_DEVICE_TABLE(of, rockchip_mbox_of_match); static int rockchip_mbox_probe(struct platform_device *pdev) { struct rockchip_mbox *mb; - const struct of_device_id *match; const struct rockchip_mbox_data *drv_data; struct resource *res; int ret, irq, i; @@ -180,8 +171,7 @@ static int rockchip_mbox_probe(struct platform_device *pdev) if (!pdev->dev.of_node) return -ENODEV; - match = of_match_node(rockchip_mbox_of_match, pdev->dev.of_node); - drv_data = (const struct rockchip_mbox_data *)match->data; + drv_data = (const struct rockchip_mbox_data *) device_get_match_data(&pdev->dev); mb = devm_kzalloc(&pdev->dev, sizeof(*mb), GFP_KERNEL); if (!mb) @@ -204,11 +194,7 @@ static int rockchip_mbox_probe(struct platform_device *pdev) mb->mbox.ops = &rockchip_mbox_chan_ops; mb->mbox.txdone_irq = true; - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!res) - return -ENODEV; - - mb->mbox_base = devm_ioremap_resource(&pdev->dev, res); + mb->mbox_base = devm_platform_get_and_ioremap_resource(pdev, 0, &res); if (IS_ERR(mb->mbox_base)) return PTR_ERR(mb->mbox_base); @@ -258,13 +244,12 @@ static struct platform_driver rockchip_mbox_driver = { .probe = rockchip_mbox_probe, .driver = { .name = "rockchip-mailbox", - .of_match_table = of_match_ptr(rockchip_mbox_of_match), + .of_match_table = rockchip_mbox_of_match, }, }; module_platform_driver(rockchip_mbox_driver); -MODULE_LICENSE("GPL v2"); MODULE_DESCRIPTION("Rockchip mailbox: communicate between CPU cores and MCU"); MODULE_AUTHOR("Addy Ke <addy.ke@rock-chips.com>"); MODULE_AUTHOR("Caesar Wang <wxt@rock-chips.com>"); diff --git a/drivers/mailbox/sprd-mailbox.c b/drivers/mailbox/sprd-mailbox.c new file mode 100644 index 000000000000..ee8539dfcef5 --- /dev/null +++ b/drivers/mailbox/sprd-mailbox.c @@ -0,0 +1,413 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Spreadtrum mailbox driver + * + * Copyright (c) 2020 Spreadtrum Communications Inc. + */ + +#include <linux/delay.h> +#include <linux/err.h> +#include <linux/interrupt.h> +#include <linux/io.h> +#include <linux/mailbox_controller.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/platform_device.h> +#include <linux/clk.h> + +#define SPRD_MBOX_ID 0x0 +#define SPRD_MBOX_MSG_LOW 0x4 +#define SPRD_MBOX_MSG_HIGH 0x8 +#define SPRD_MBOX_TRIGGER 0xc +#define SPRD_MBOX_FIFO_RST 0x10 +#define SPRD_MBOX_FIFO_STS 0x14 +#define SPRD_MBOX_IRQ_STS 0x18 +#define SPRD_MBOX_IRQ_MSK 0x1c +#define SPRD_MBOX_LOCK 0x20 +#define SPRD_MBOX_FIFO_DEPTH 0x24 + +/* Bit and mask definition for inbox's SPRD_MBOX_FIFO_STS register */ +#define SPRD_INBOX_FIFO_DELIVER_MASK GENMASK(23, 16) +#define SPRD_INBOX_FIFO_OVERLOW_MASK GENMASK(15, 8) +#define SPRD_INBOX_FIFO_DELIVER_SHIFT 16 +#define SPRD_INBOX_FIFO_BUSY_MASK GENMASK(7, 0) + +/* Bit and mask definition for SPRD_MBOX_IRQ_STS register */ +#define SPRD_MBOX_IRQ_CLR BIT(0) + +/* Bit and mask definition for outbox's SPRD_MBOX_FIFO_STS register */ +#define SPRD_OUTBOX_FIFO_FULL BIT(2) +#define SPRD_OUTBOX_FIFO_WR_SHIFT 16 +#define SPRD_OUTBOX_FIFO_RD_SHIFT 24 +#define SPRD_OUTBOX_FIFO_POS_MASK GENMASK(7, 0) + +/* Bit and mask definition for inbox's SPRD_MBOX_IRQ_MSK register */ +#define SPRD_INBOX_FIFO_BLOCK_IRQ BIT(0) +#define SPRD_INBOX_FIFO_OVERFLOW_IRQ BIT(1) +#define SPRD_INBOX_FIFO_DELIVER_IRQ BIT(2) +#define SPRD_INBOX_FIFO_IRQ_MASK GENMASK(2, 0) + +/* Bit and mask definition for outbox's SPRD_MBOX_IRQ_MSK register */ +#define SPRD_OUTBOX_FIFO_NOT_EMPTY_IRQ BIT(0) +#define SPRD_OUTBOX_FIFO_IRQ_MASK GENMASK(4, 0) + +#define SPRD_OUTBOX_BASE_SPAN 0x1000 +#define SPRD_MBOX_CHAN_MAX 8 +#define SPRD_SUPP_INBOX_ID_SC9863A 7 + +struct sprd_mbox_priv { + struct mbox_controller mbox; + struct device *dev; + void __iomem *inbox_base; + void __iomem *outbox_base; + /* Base register address for supplementary outbox */ + void __iomem *supp_base; + u32 outbox_fifo_depth; + + struct mutex lock; + u32 refcnt; + struct mbox_chan chan[SPRD_MBOX_CHAN_MAX]; +}; + +static struct sprd_mbox_priv *to_sprd_mbox_priv(struct mbox_controller *mbox) +{ + return container_of(mbox, struct sprd_mbox_priv, mbox); +} + +static u32 sprd_mbox_get_fifo_len(struct sprd_mbox_priv *priv, u32 fifo_sts) +{ + u32 wr_pos = (fifo_sts >> SPRD_OUTBOX_FIFO_WR_SHIFT) & + SPRD_OUTBOX_FIFO_POS_MASK; + u32 rd_pos = (fifo_sts >> SPRD_OUTBOX_FIFO_RD_SHIFT) & + SPRD_OUTBOX_FIFO_POS_MASK; + u32 fifo_len; + + /* + * If the read pointer is equal with write pointer, which means the fifo + * is full or empty. + */ + if (wr_pos == rd_pos) { + if (fifo_sts & SPRD_OUTBOX_FIFO_FULL) + fifo_len = priv->outbox_fifo_depth; + else + fifo_len = 0; + } else if (wr_pos > rd_pos) { + fifo_len = wr_pos - rd_pos; + } else { + fifo_len = priv->outbox_fifo_depth - rd_pos + wr_pos; + } + + return fifo_len; +} + +static irqreturn_t do_outbox_isr(void __iomem *base, struct sprd_mbox_priv *priv) +{ + struct mbox_chan *chan; + u32 fifo_sts, fifo_len, msg[2]; + int i, id; + + fifo_sts = readl(base + SPRD_MBOX_FIFO_STS); + + fifo_len = sprd_mbox_get_fifo_len(priv, fifo_sts); + if (!fifo_len) { + dev_warn_ratelimited(priv->dev, "spurious outbox interrupt\n"); + return IRQ_NONE; + } + + for (i = 0; i < fifo_len; i++) { + msg[0] = readl(base + SPRD_MBOX_MSG_LOW); + msg[1] = readl(base + SPRD_MBOX_MSG_HIGH); + id = readl(base + SPRD_MBOX_ID); + + chan = &priv->chan[id]; + if (chan->cl) + mbox_chan_received_data(chan, (void *)msg); + else + dev_warn_ratelimited(priv->dev, + "message's been dropped at ch[%d]\n", id); + + /* Trigger to update outbox FIFO pointer */ + writel(0x1, base + SPRD_MBOX_TRIGGER); + } + + /* Clear irq status after reading all message. */ + writel(SPRD_MBOX_IRQ_CLR, base + SPRD_MBOX_IRQ_STS); + + return IRQ_HANDLED; +} + +static irqreturn_t sprd_mbox_outbox_isr(int irq, void *data) +{ + struct sprd_mbox_priv *priv = data; + + return do_outbox_isr(priv->outbox_base, priv); +} + +static irqreturn_t sprd_mbox_supp_isr(int irq, void *data) +{ + struct sprd_mbox_priv *priv = data; + + return do_outbox_isr(priv->supp_base, priv); +} + +static irqreturn_t sprd_mbox_inbox_isr(int irq, void *data) +{ + struct sprd_mbox_priv *priv = data; + struct mbox_chan *chan; + u32 fifo_sts, send_sts, busy, id; + + fifo_sts = readl(priv->inbox_base + SPRD_MBOX_FIFO_STS); + + /* Get the inbox data delivery status */ + send_sts = (fifo_sts & SPRD_INBOX_FIFO_DELIVER_MASK) >> + SPRD_INBOX_FIFO_DELIVER_SHIFT; + if (!send_sts) { + dev_warn_ratelimited(priv->dev, "spurious inbox interrupt\n"); + return IRQ_NONE; + } + + while (send_sts) { + id = __ffs(send_sts); + send_sts &= (send_sts - 1); + + chan = &priv->chan[id]; + + /* + * Check if the message was fetched by remote target, if yes, + * that means the transmission has been completed. + */ + busy = fifo_sts & SPRD_INBOX_FIFO_BUSY_MASK; + if (!(busy & BIT(id))) + mbox_chan_txdone(chan, 0); + } + + /* Clear FIFO delivery and overflow status */ + writel(fifo_sts & + (SPRD_INBOX_FIFO_DELIVER_MASK | SPRD_INBOX_FIFO_OVERLOW_MASK), + priv->inbox_base + SPRD_MBOX_FIFO_RST); + + /* Clear irq status */ + writel(SPRD_MBOX_IRQ_CLR, priv->inbox_base + SPRD_MBOX_IRQ_STS); + + return IRQ_HANDLED; +} + +static int sprd_mbox_send_data(struct mbox_chan *chan, void *msg) +{ + struct sprd_mbox_priv *priv = to_sprd_mbox_priv(chan->mbox); + unsigned long id = (unsigned long)chan->con_priv; + u32 *data = msg; + + /* Write data into inbox FIFO, and only support 8 bytes every time */ + writel(data[0], priv->inbox_base + SPRD_MBOX_MSG_LOW); + writel(data[1], priv->inbox_base + SPRD_MBOX_MSG_HIGH); + + /* Set target core id */ + writel(id, priv->inbox_base + SPRD_MBOX_ID); + + /* Trigger remote request */ + writel(0x1, priv->inbox_base + SPRD_MBOX_TRIGGER); + + return 0; +} + +static int sprd_mbox_flush(struct mbox_chan *chan, unsigned long timeout) +{ + struct sprd_mbox_priv *priv = to_sprd_mbox_priv(chan->mbox); + unsigned long id = (unsigned long)chan->con_priv; + u32 busy; + + timeout = jiffies + msecs_to_jiffies(timeout); + + while (time_before(jiffies, timeout)) { + busy = readl(priv->inbox_base + SPRD_MBOX_FIFO_STS) & + SPRD_INBOX_FIFO_BUSY_MASK; + if (!(busy & BIT(id))) { + mbox_chan_txdone(chan, 0); + return 0; + } + + udelay(1); + } + + return -ETIME; +} + +static int sprd_mbox_startup(struct mbox_chan *chan) +{ + struct sprd_mbox_priv *priv = to_sprd_mbox_priv(chan->mbox); + u32 val; + + mutex_lock(&priv->lock); + if (priv->refcnt++ == 0) { + /* Select outbox FIFO mode and reset the outbox FIFO status */ + writel(0x0, priv->outbox_base + SPRD_MBOX_FIFO_RST); + + /* Enable inbox FIFO overflow and delivery interrupt */ + val = readl(priv->inbox_base + SPRD_MBOX_IRQ_MSK); + val &= ~(SPRD_INBOX_FIFO_OVERFLOW_IRQ | SPRD_INBOX_FIFO_DELIVER_IRQ); + writel(val, priv->inbox_base + SPRD_MBOX_IRQ_MSK); + + /* Enable outbox FIFO not empty interrupt */ + val = readl(priv->outbox_base + SPRD_MBOX_IRQ_MSK); + val &= ~SPRD_OUTBOX_FIFO_NOT_EMPTY_IRQ; + writel(val, priv->outbox_base + SPRD_MBOX_IRQ_MSK); + + /* Enable supplementary outbox as the fundamental one */ + if (priv->supp_base) { + writel(0x0, priv->supp_base + SPRD_MBOX_FIFO_RST); + val = readl(priv->supp_base + SPRD_MBOX_IRQ_MSK); + val &= ~SPRD_OUTBOX_FIFO_NOT_EMPTY_IRQ; + writel(val, priv->supp_base + SPRD_MBOX_IRQ_MSK); + } + } + mutex_unlock(&priv->lock); + + return 0; +} + +static void sprd_mbox_shutdown(struct mbox_chan *chan) +{ + struct sprd_mbox_priv *priv = to_sprd_mbox_priv(chan->mbox); + + mutex_lock(&priv->lock); + if (--priv->refcnt == 0) { + /* Disable inbox & outbox interrupt */ + writel(SPRD_INBOX_FIFO_IRQ_MASK, priv->inbox_base + SPRD_MBOX_IRQ_MSK); + writel(SPRD_OUTBOX_FIFO_IRQ_MASK, priv->outbox_base + SPRD_MBOX_IRQ_MSK); + + if (priv->supp_base) + writel(SPRD_OUTBOX_FIFO_IRQ_MASK, + priv->supp_base + SPRD_MBOX_IRQ_MSK); + } + mutex_unlock(&priv->lock); +} + +static const struct mbox_chan_ops sprd_mbox_ops = { + .send_data = sprd_mbox_send_data, + .flush = sprd_mbox_flush, + .startup = sprd_mbox_startup, + .shutdown = sprd_mbox_shutdown, +}; + +static int sprd_mbox_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct sprd_mbox_priv *priv; + int ret, inbox_irq, outbox_irq, supp_irq; + unsigned long id, supp; + struct clk *clk; + + priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + priv->dev = dev; + mutex_init(&priv->lock); + + /* + * Unisoc mailbox uses an inbox to send messages to the target + * core, and uses (an) outbox(es) to receive messages from other + * cores. + * + * Thus in general the mailbox controller supplies 2 different + * register addresses and IRQ numbers for inbox and outbox. + * + * If necessary, a supplementary inbox could be enabled optionally + * with an independent FIFO and an extra interrupt. + */ + priv->inbox_base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(priv->inbox_base)) + return PTR_ERR(priv->inbox_base); + + priv->outbox_base = devm_platform_ioremap_resource(pdev, 1); + if (IS_ERR(priv->outbox_base)) + return PTR_ERR(priv->outbox_base); + + clk = devm_clk_get_enabled(dev, "enable"); + if (IS_ERR(clk)) { + dev_err(dev, "failed to get mailbox clock\n"); + return PTR_ERR(clk); + } + + inbox_irq = platform_get_irq_byname(pdev, "inbox"); + if (inbox_irq < 0) + return inbox_irq; + + ret = devm_request_irq(dev, inbox_irq, sprd_mbox_inbox_isr, + IRQF_NO_SUSPEND, dev_name(dev), priv); + if (ret) { + dev_err(dev, "failed to request inbox IRQ: %d\n", ret); + return ret; + } + + outbox_irq = platform_get_irq_byname(pdev, "outbox"); + if (outbox_irq < 0) + return outbox_irq; + + ret = devm_request_irq(dev, outbox_irq, sprd_mbox_outbox_isr, + IRQF_NO_SUSPEND, dev_name(dev), priv); + if (ret) { + dev_err(dev, "failed to request outbox IRQ: %d\n", ret); + return ret; + } + + /* Supplementary outbox IRQ is optional */ + supp_irq = platform_get_irq_byname(pdev, "supp-outbox"); + if (supp_irq > 0) { + ret = devm_request_irq(dev, supp_irq, sprd_mbox_supp_isr, + IRQF_NO_SUSPEND, dev_name(dev), priv); + if (ret) { + dev_err(dev, "failed to request outbox IRQ: %d\n", ret); + return ret; + } + + supp = (unsigned long) of_device_get_match_data(dev); + if (!supp) { + dev_err(dev, "no supplementary outbox specified\n"); + return -ENODEV; + } + priv->supp_base = priv->outbox_base + (SPRD_OUTBOX_BASE_SPAN * supp); + } + + /* Get the default outbox FIFO depth */ + priv->outbox_fifo_depth = + readl(priv->outbox_base + SPRD_MBOX_FIFO_DEPTH) + 1; + priv->mbox.dev = dev; + priv->mbox.chans = &priv->chan[0]; + priv->mbox.num_chans = SPRD_MBOX_CHAN_MAX; + priv->mbox.ops = &sprd_mbox_ops; + priv->mbox.txdone_irq = true; + + for (id = 0; id < SPRD_MBOX_CHAN_MAX; id++) + priv->chan[id].con_priv = (void *)id; + + ret = devm_mbox_controller_register(dev, &priv->mbox); + if (ret) { + dev_err(dev, "failed to register mailbox: %d\n", ret); + return ret; + } + + return 0; +} + +static const struct of_device_id sprd_mbox_of_match[] = { + { .compatible = "sprd,sc9860-mailbox" }, + { .compatible = "sprd,sc9863a-mailbox", + .data = (void *)SPRD_SUPP_INBOX_ID_SC9863A }, + { }, +}; +MODULE_DEVICE_TABLE(of, sprd_mbox_of_match); + +static struct platform_driver sprd_mbox_driver = { + .driver = { + .name = "sprd-mailbox", + .of_match_table = sprd_mbox_of_match, + }, + .probe = sprd_mbox_probe, +}; +module_platform_driver(sprd_mbox_driver); + +MODULE_AUTHOR("Baolin Wang <baolin.wang@unisoc.com>"); +MODULE_DESCRIPTION("Spreadtrum mailbox driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/mailbox/stm32-ipcc.c b/drivers/mailbox/stm32-ipcc.c index a338bd4cd7db..4f63f1a14ca6 100644 --- a/drivers/mailbox/stm32-ipcc.c +++ b/drivers/mailbox/stm32-ipcc.c @@ -8,9 +8,10 @@ #include <linux/bitfield.h> #include <linux/clk.h> #include <linux/interrupt.h> +#include <linux/io.h> #include <linux/mailbox_controller.h> #include <linux/module.h> -#include <linux/of_irq.h> +#include <linux/of.h> #include <linux/platform_device.h> #include <linux/pm_wakeirq.h> @@ -50,22 +51,32 @@ struct stm32_ipcc { void __iomem *reg_base; void __iomem *reg_proc; struct clk *clk; + spinlock_t lock; /* protect access to IPCC registers */ int irqs[IPCC_IRQ_NUM]; - int wkp; u32 proc_id; u32 n_chans; u32 xcr; u32 xmr; }; -static inline void stm32_ipcc_set_bits(void __iomem *reg, u32 mask) +static inline void stm32_ipcc_set_bits(spinlock_t *lock, void __iomem *reg, + u32 mask) { + unsigned long flags; + + spin_lock_irqsave(lock, flags); writel_relaxed(readl_relaxed(reg) | mask, reg); + spin_unlock_irqrestore(lock, flags); } -static inline void stm32_ipcc_clr_bits(void __iomem *reg, u32 mask) +static inline void stm32_ipcc_clr_bits(spinlock_t *lock, void __iomem *reg, + u32 mask) { + unsigned long flags; + + spin_lock_irqsave(lock, flags); writel_relaxed(readl_relaxed(reg) & ~mask, reg); + spin_unlock_irqrestore(lock, flags); } static irqreturn_t stm32_ipcc_rx_irq(int irq, void *data) @@ -92,7 +103,7 @@ static irqreturn_t stm32_ipcc_rx_irq(int irq, void *data) mbox_chan_received_data(&ipcc->controller.chans[chan], NULL); - stm32_ipcc_set_bits(ipcc->reg_proc + IPCC_XSCR, + stm32_ipcc_set_bits(&ipcc->lock, ipcc->reg_proc + IPCC_XSCR, RX_BIT_CHAN(chan)); ret = IRQ_HANDLED; @@ -121,7 +132,7 @@ static irqreturn_t stm32_ipcc_tx_irq(int irq, void *data) dev_dbg(dev, "%s: chan:%d tx\n", __func__, chan); /* mask 'tx channel free' interrupt */ - stm32_ipcc_set_bits(ipcc->reg_proc + IPCC_XMR, + stm32_ipcc_set_bits(&ipcc->lock, ipcc->reg_proc + IPCC_XMR, TX_BIT_CHAN(chan)); mbox_chan_txdone(&ipcc->controller.chans[chan], 0); @@ -134,24 +145,26 @@ static irqreturn_t stm32_ipcc_tx_irq(int irq, void *data) static int stm32_ipcc_send_data(struct mbox_chan *link, void *data) { - unsigned int chan = (unsigned int)link->con_priv; + unsigned long chan = (unsigned long)link->con_priv; struct stm32_ipcc *ipcc = container_of(link->mbox, struct stm32_ipcc, controller); - dev_dbg(ipcc->controller.dev, "%s: chan:%d\n", __func__, chan); + dev_dbg(ipcc->controller.dev, "%s: chan:%lu\n", __func__, chan); /* set channel n occupied */ - stm32_ipcc_set_bits(ipcc->reg_proc + IPCC_XSCR, TX_BIT_CHAN(chan)); + stm32_ipcc_set_bits(&ipcc->lock, ipcc->reg_proc + IPCC_XSCR, + TX_BIT_CHAN(chan)); /* unmask 'tx channel free' interrupt */ - stm32_ipcc_clr_bits(ipcc->reg_proc + IPCC_XMR, TX_BIT_CHAN(chan)); + stm32_ipcc_clr_bits(&ipcc->lock, ipcc->reg_proc + IPCC_XMR, + TX_BIT_CHAN(chan)); return 0; } static int stm32_ipcc_startup(struct mbox_chan *link) { - unsigned int chan = (unsigned int)link->con_priv; + unsigned long chan = (unsigned long)link->con_priv; struct stm32_ipcc *ipcc = container_of(link->mbox, struct stm32_ipcc, controller); int ret; @@ -163,19 +176,20 @@ static int stm32_ipcc_startup(struct mbox_chan *link) } /* unmask 'rx channel occupied' interrupt */ - stm32_ipcc_clr_bits(ipcc->reg_proc + IPCC_XMR, RX_BIT_CHAN(chan)); + stm32_ipcc_clr_bits(&ipcc->lock, ipcc->reg_proc + IPCC_XMR, + RX_BIT_CHAN(chan)); return 0; } static void stm32_ipcc_shutdown(struct mbox_chan *link) { - unsigned int chan = (unsigned int)link->con_priv; + unsigned long chan = (unsigned long)link->con_priv; struct stm32_ipcc *ipcc = container_of(link->mbox, struct stm32_ipcc, controller); /* mask rx/tx interrupt */ - stm32_ipcc_set_bits(ipcc->reg_proc + IPCC_XMR, + stm32_ipcc_set_bits(&ipcc->lock, ipcc->reg_proc + IPCC_XMR, RX_BIT_CHAN(chan) | TX_BIT_CHAN(chan)); clk_disable_unprepare(ipcc->clk); @@ -192,8 +206,7 @@ static int stm32_ipcc_probe(struct platform_device *pdev) struct device *dev = &pdev->dev; struct device_node *np = dev->of_node; struct stm32_ipcc *ipcc; - struct resource *res; - unsigned int i; + unsigned long i; int ret; u32 ip_ver; static const char * const irq_name[] = {"rx", "tx"}; @@ -208,6 +221,8 @@ static int stm32_ipcc_probe(struct platform_device *pdev) if (!ipcc) return -ENOMEM; + spin_lock_init(&ipcc->lock); + /* proc_id */ if (of_property_read_u32(np, "st,proc-id", &ipcc->proc_id)) { dev_err(dev, "Missing st,proc-id\n"); @@ -220,8 +235,7 @@ static int stm32_ipcc_probe(struct platform_device *pdev) } /* regs */ - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - ipcc->reg_base = devm_ioremap_resource(dev, res); + ipcc->reg_base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(ipcc->reg_base)) return PTR_ERR(ipcc->reg_base); @@ -240,9 +254,8 @@ static int stm32_ipcc_probe(struct platform_device *pdev) /* irq */ for (i = 0; i < IPCC_IRQ_NUM; i++) { - ipcc->irqs[i] = of_irq_get_byname(dev->of_node, irq_name[i]); + ipcc->irqs[i] = platform_get_irq_byname(pdev, irq_name[i]); if (ipcc->irqs[i] < 0) { - dev_err(dev, "no IRQ specified %s\n", irq_name[i]); ret = ipcc->irqs[i]; goto err_clk; } @@ -251,33 +264,26 @@ static int stm32_ipcc_probe(struct platform_device *pdev) irq_thread[i], IRQF_ONESHOT, dev_name(dev), ipcc); if (ret) { - dev_err(dev, "failed to request irq %d (%d)\n", i, ret); + dev_err(dev, "failed to request irq %lu (%d)\n", i, ret); goto err_clk; } } /* mask and enable rx/tx irq */ - stm32_ipcc_set_bits(ipcc->reg_proc + IPCC_XMR, + stm32_ipcc_set_bits(&ipcc->lock, ipcc->reg_proc + IPCC_XMR, RX_BIT_MASK | TX_BIT_MASK); - stm32_ipcc_set_bits(ipcc->reg_proc + IPCC_XCR, XCR_RXOIE | XCR_TXOIE); + stm32_ipcc_set_bits(&ipcc->lock, ipcc->reg_proc + IPCC_XCR, + XCR_RXOIE | XCR_TXOIE); /* wakeup */ if (of_property_read_bool(np, "wakeup-source")) { - ipcc->wkp = of_irq_get_byname(dev->of_node, "wakeup"); - if (ipcc->wkp < 0) { - dev_err(dev, "could not get wakeup IRQ\n"); - ret = ipcc->wkp; - goto err_clk; - } + device_set_wakeup_capable(dev, true); - device_init_wakeup(dev, true); - ret = dev_pm_set_dedicated_wake_irq(dev, ipcc->wkp); + ret = dev_pm_set_wake_irq(dev, ipcc->irqs[IPCC_IRQ_RX]); if (ret) { dev_err(dev, "Failed to set wake up irq\n"); goto err_init_wkp; } - } else { - device_init_wakeup(dev, false); } /* mailbox controller */ @@ -316,38 +322,26 @@ static int stm32_ipcc_probe(struct platform_device *pdev) return 0; err_irq_wkp: - if (ipcc->wkp) + if (of_property_read_bool(np, "wakeup-source")) dev_pm_clear_wake_irq(dev); err_init_wkp: - device_init_wakeup(dev, false); + device_set_wakeup_capable(dev, false); err_clk: clk_disable_unprepare(ipcc->clk); return ret; } -static int stm32_ipcc_remove(struct platform_device *pdev) +static void stm32_ipcc_remove(struct platform_device *pdev) { - struct stm32_ipcc *ipcc = platform_get_drvdata(pdev); + struct device *dev = &pdev->dev; - if (ipcc->wkp) + if (of_property_read_bool(dev->of_node, "wakeup-source")) dev_pm_clear_wake_irq(&pdev->dev); - device_init_wakeup(&pdev->dev, false); - - return 0; + device_set_wakeup_capable(dev, false); } #ifdef CONFIG_PM_SLEEP -static void stm32_ipcc_set_irq_wake(struct device *dev, bool enable) -{ - struct stm32_ipcc *ipcc = dev_get_drvdata(dev); - unsigned int i; - - if (device_may_wakeup(dev)) - for (i = 0; i < IPCC_IRQ_NUM; i++) - irq_set_irq_wake(ipcc->irqs[i], enable); -} - static int stm32_ipcc_suspend(struct device *dev) { struct stm32_ipcc *ipcc = dev_get_drvdata(dev); @@ -355,8 +349,6 @@ static int stm32_ipcc_suspend(struct device *dev) ipcc->xmr = readl_relaxed(ipcc->reg_proc + IPCC_XMR); ipcc->xcr = readl_relaxed(ipcc->reg_proc + IPCC_XCR); - stm32_ipcc_set_irq_wake(dev, true); - return 0; } @@ -364,8 +356,6 @@ static int stm32_ipcc_resume(struct device *dev) { struct stm32_ipcc *ipcc = dev_get_drvdata(dev); - stm32_ipcc_set_irq_wake(dev, false); - writel_relaxed(ipcc->xmr, ipcc->reg_proc + IPCC_XMR); writel_relaxed(ipcc->xcr, ipcc->reg_proc + IPCC_XCR); diff --git a/drivers/mailbox/sun6i-msgbox.c b/drivers/mailbox/sun6i-msgbox.c new file mode 100644 index 000000000000..6ba6920f4645 --- /dev/null +++ b/drivers/mailbox/sun6i-msgbox.c @@ -0,0 +1,317 @@ +// SPDX-License-Identifier: GPL-2.0 +// +// Copyright (c) 2017-2019 Samuel Holland <samuel@sholland.org> + +#include <linux/bitops.h> +#include <linux/clk.h> +#include <linux/device.h> +#include <linux/err.h> +#include <linux/interrupt.h> +#include <linux/io.h> +#include <linux/kernel.h> +#include <linux/mailbox_controller.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/of_irq.h> +#include <linux/platform_device.h> +#include <linux/reset.h> +#include <linux/spinlock.h> + +#define NUM_CHANS 8 + +#define CTRL_REG(n) (0x0000 + 0x4 * ((n) / 4)) +#define CTRL_RX(n) BIT(0 + 8 * ((n) % 4)) +#define CTRL_TX(n) BIT(4 + 8 * ((n) % 4)) + +#define REMOTE_IRQ_EN_REG 0x0040 +#define REMOTE_IRQ_STAT_REG 0x0050 +#define LOCAL_IRQ_EN_REG 0x0060 +#define LOCAL_IRQ_STAT_REG 0x0070 + +#define RX_IRQ(n) BIT(0 + 2 * (n)) +#define RX_IRQ_MASK 0x5555 +#define TX_IRQ(n) BIT(1 + 2 * (n)) +#define TX_IRQ_MASK 0xaaaa + +#define FIFO_STAT_REG(n) (0x0100 + 0x4 * (n)) +#define FIFO_STAT_MASK GENMASK(0, 0) + +#define MSG_STAT_REG(n) (0x0140 + 0x4 * (n)) +#define MSG_STAT_MASK GENMASK(2, 0) + +#define MSG_DATA_REG(n) (0x0180 + 0x4 * (n)) + +#define mbox_dbg(mbox, ...) dev_dbg((mbox)->controller.dev, __VA_ARGS__) + +struct sun6i_msgbox { + struct mbox_controller controller; + struct clk *clk; + spinlock_t lock; + void __iomem *regs; +}; + +static bool sun6i_msgbox_last_tx_done(struct mbox_chan *chan); +static bool sun6i_msgbox_peek_data(struct mbox_chan *chan); + +static inline int channel_number(struct mbox_chan *chan) +{ + return chan - chan->mbox->chans; +} + +static inline struct sun6i_msgbox *to_sun6i_msgbox(struct mbox_chan *chan) +{ + return chan->con_priv; +} + +static irqreturn_t sun6i_msgbox_irq(int irq, void *dev_id) +{ + struct sun6i_msgbox *mbox = dev_id; + uint32_t status; + int n; + + /* Only examine channels that are currently enabled. */ + status = readl(mbox->regs + LOCAL_IRQ_EN_REG) & + readl(mbox->regs + LOCAL_IRQ_STAT_REG); + + if (!(status & RX_IRQ_MASK)) + return IRQ_NONE; + + for (n = 0; n < NUM_CHANS; ++n) { + struct mbox_chan *chan = &mbox->controller.chans[n]; + + if (!(status & RX_IRQ(n))) + continue; + + while (sun6i_msgbox_peek_data(chan)) { + uint32_t msg = readl(mbox->regs + MSG_DATA_REG(n)); + + mbox_dbg(mbox, "Channel %d received 0x%08x\n", n, msg); + mbox_chan_received_data(chan, &msg); + } + + /* The IRQ can be cleared only once the FIFO is empty. */ + writel(RX_IRQ(n), mbox->regs + LOCAL_IRQ_STAT_REG); + } + + return IRQ_HANDLED; +} + +static int sun6i_msgbox_send_data(struct mbox_chan *chan, void *data) +{ + struct sun6i_msgbox *mbox = to_sun6i_msgbox(chan); + int n = channel_number(chan); + uint32_t msg = *(uint32_t *)data; + + /* Using a channel backwards gets the hardware into a bad state. */ + if (WARN_ON_ONCE(!(readl(mbox->regs + CTRL_REG(n)) & CTRL_TX(n)))) + return 0; + + writel(msg, mbox->regs + MSG_DATA_REG(n)); + mbox_dbg(mbox, "Channel %d sent 0x%08x\n", n, msg); + + return 0; +} + +static int sun6i_msgbox_startup(struct mbox_chan *chan) +{ + struct sun6i_msgbox *mbox = to_sun6i_msgbox(chan); + int n = channel_number(chan); + + /* The coprocessor is responsible for setting channel directions. */ + if (readl(mbox->regs + CTRL_REG(n)) & CTRL_RX(n)) { + /* Flush the receive FIFO. */ + while (sun6i_msgbox_peek_data(chan)) + readl(mbox->regs + MSG_DATA_REG(n)); + writel(RX_IRQ(n), mbox->regs + LOCAL_IRQ_STAT_REG); + + /* Enable the receive IRQ. */ + spin_lock(&mbox->lock); + writel(readl(mbox->regs + LOCAL_IRQ_EN_REG) | RX_IRQ(n), + mbox->regs + LOCAL_IRQ_EN_REG); + spin_unlock(&mbox->lock); + } + + mbox_dbg(mbox, "Channel %d startup complete\n", n); + + return 0; +} + +static void sun6i_msgbox_shutdown(struct mbox_chan *chan) +{ + struct sun6i_msgbox *mbox = to_sun6i_msgbox(chan); + int n = channel_number(chan); + + if (readl(mbox->regs + CTRL_REG(n)) & CTRL_RX(n)) { + /* Disable the receive IRQ. */ + spin_lock(&mbox->lock); + writel(readl(mbox->regs + LOCAL_IRQ_EN_REG) & ~RX_IRQ(n), + mbox->regs + LOCAL_IRQ_EN_REG); + spin_unlock(&mbox->lock); + + /* Attempt to flush the FIFO until the IRQ is cleared. */ + do { + while (sun6i_msgbox_peek_data(chan)) + readl(mbox->regs + MSG_DATA_REG(n)); + writel(RX_IRQ(n), mbox->regs + LOCAL_IRQ_STAT_REG); + } while (readl(mbox->regs + LOCAL_IRQ_STAT_REG) & RX_IRQ(n)); + } + + mbox_dbg(mbox, "Channel %d shutdown complete\n", n); +} + +static bool sun6i_msgbox_last_tx_done(struct mbox_chan *chan) +{ + struct sun6i_msgbox *mbox = to_sun6i_msgbox(chan); + int n = channel_number(chan); + + /* + * The hardware allows snooping on the remote user's IRQ statuses. + * We consider a message to be acknowledged only once the receive IRQ + * for that channel is cleared. Since the receive IRQ for a channel + * cannot be cleared until the FIFO for that channel is empty, this + * ensures that the message has actually been read. It also gives the + * recipient an opportunity to perform minimal processing before + * acknowledging the message. + */ + return !(readl(mbox->regs + REMOTE_IRQ_STAT_REG) & RX_IRQ(n)); +} + +static bool sun6i_msgbox_peek_data(struct mbox_chan *chan) +{ + struct sun6i_msgbox *mbox = to_sun6i_msgbox(chan); + int n = channel_number(chan); + + return readl(mbox->regs + MSG_STAT_REG(n)) & MSG_STAT_MASK; +} + +static const struct mbox_chan_ops sun6i_msgbox_chan_ops = { + .send_data = sun6i_msgbox_send_data, + .startup = sun6i_msgbox_startup, + .shutdown = sun6i_msgbox_shutdown, + .last_tx_done = sun6i_msgbox_last_tx_done, + .peek_data = sun6i_msgbox_peek_data, +}; + +static int sun6i_msgbox_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct mbox_chan *chans; + struct reset_control *reset; + struct sun6i_msgbox *mbox; + int i, ret; + + mbox = devm_kzalloc(dev, sizeof(*mbox), GFP_KERNEL); + if (!mbox) + return -ENOMEM; + + chans = devm_kcalloc(dev, NUM_CHANS, sizeof(*chans), GFP_KERNEL); + if (!chans) + return -ENOMEM; + + for (i = 0; i < NUM_CHANS; ++i) + chans[i].con_priv = mbox; + + mbox->clk = devm_clk_get(dev, NULL); + if (IS_ERR(mbox->clk)) { + ret = PTR_ERR(mbox->clk); + dev_err(dev, "Failed to get clock: %d\n", ret); + return ret; + } + + ret = clk_prepare_enable(mbox->clk); + if (ret) { + dev_err(dev, "Failed to enable clock: %d\n", ret); + return ret; + } + + reset = devm_reset_control_get_exclusive(dev, NULL); + if (IS_ERR(reset)) { + ret = PTR_ERR(reset); + dev_err(dev, "Failed to get reset control: %d\n", ret); + goto err_disable_unprepare; + } + + /* + * NOTE: We rely on platform firmware to preconfigure the channel + * directions, and we share this hardware block with other firmware + * that runs concurrently with Linux (e.g. a trusted monitor). + * + * Therefore, we do *not* assert the reset line if probing fails or + * when removing the device. + */ + ret = reset_control_deassert(reset); + if (ret) { + dev_err(dev, "Failed to deassert reset: %d\n", ret); + goto err_disable_unprepare; + } + + mbox->regs = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(mbox->regs)) { + ret = PTR_ERR(mbox->regs); + dev_err(dev, "Failed to map MMIO resource: %d\n", ret); + goto err_disable_unprepare; + } + + /* Disable all IRQs for this end of the msgbox. */ + writel(0, mbox->regs + LOCAL_IRQ_EN_REG); + + ret = devm_request_irq(dev, irq_of_parse_and_map(dev->of_node, 0), + sun6i_msgbox_irq, 0, dev_name(dev), mbox); + if (ret) { + dev_err(dev, "Failed to register IRQ handler: %d\n", ret); + goto err_disable_unprepare; + } + + mbox->controller.dev = dev; + mbox->controller.ops = &sun6i_msgbox_chan_ops; + mbox->controller.chans = chans; + mbox->controller.num_chans = NUM_CHANS; + mbox->controller.txdone_irq = false; + mbox->controller.txdone_poll = true; + mbox->controller.txpoll_period = 5; + + spin_lock_init(&mbox->lock); + platform_set_drvdata(pdev, mbox); + + ret = mbox_controller_register(&mbox->controller); + if (ret) { + dev_err(dev, "Failed to register controller: %d\n", ret); + goto err_disable_unprepare; + } + + return 0; + +err_disable_unprepare: + clk_disable_unprepare(mbox->clk); + + return ret; +} + +static void sun6i_msgbox_remove(struct platform_device *pdev) +{ + struct sun6i_msgbox *mbox = platform_get_drvdata(pdev); + + mbox_controller_unregister(&mbox->controller); + /* See the comment in sun6i_msgbox_probe about the reset line. */ + clk_disable_unprepare(mbox->clk); +} + +static const struct of_device_id sun6i_msgbox_of_match[] = { + { .compatible = "allwinner,sun6i-a31-msgbox", }, + {}, +}; +MODULE_DEVICE_TABLE(of, sun6i_msgbox_of_match); + +static struct platform_driver sun6i_msgbox_driver = { + .driver = { + .name = "sun6i-msgbox", + .of_match_table = sun6i_msgbox_of_match, + }, + .probe = sun6i_msgbox_probe, + .remove = sun6i_msgbox_remove, +}; +module_platform_driver(sun6i_msgbox_driver); + +MODULE_AUTHOR("Samuel Holland <samuel@sholland.org>"); +MODULE_DESCRIPTION("Allwinner sun6i/sun8i/sun9i/sun50i Message Box"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/mailbox/tegra-hsp.c b/drivers/mailbox/tegra-hsp.c index e443f6a2ec4b..ed9a0bb2bcd8 100644 --- a/drivers/mailbox/tegra-hsp.c +++ b/drivers/mailbox/tegra-hsp.c @@ -1,14 +1,6 @@ +// SPDX-License-Identifier: GPL-2.0-only /* - * Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. + * Copyright (c) 2016-2025, NVIDIA CORPORATION. All rights reserved. */ #include <linux/delay.h> @@ -16,11 +8,12 @@ #include <linux/io.h> #include <linux/mailbox_controller.h> #include <linux/of.h> -#include <linux/of_device.h> #include <linux/platform_device.h> #include <linux/pm.h> #include <linux/slab.h> +#include <soc/tegra/fuse.h> + #include <dt-bindings/mailbox/tegra186-hsp.h> #include "mailbox.h" @@ -35,12 +28,6 @@ #define HSP_INT_FULL_MASK 0xff #define HSP_INT_DIMENSIONING 0x380 -#define HSP_nSM_SHIFT 0 -#define HSP_nSS_SHIFT 4 -#define HSP_nAS_SHIFT 8 -#define HSP_nDB_SHIFT 12 -#define HSP_nSI_SHIFT 16 -#define HSP_nINT_MASK 0xf #define HSP_DB_TRIGGER 0x0 #define HSP_DB_ENABLE 0x4 @@ -52,10 +39,18 @@ #define HSP_SM_SHRD_MBOX_FULL_INT_IE 0x04 #define HSP_SM_SHRD_MBOX_EMPTY_INT_IE 0x08 +#define HSP_SHRD_MBOX_TYPE1_TAG 0x40 +#define HSP_SHRD_MBOX_TYPE1_DATA0 0x48 +#define HSP_SHRD_MBOX_TYPE1_DATA1 0x4c +#define HSP_SHRD_MBOX_TYPE1_DATA2 0x50 +#define HSP_SHRD_MBOX_TYPE1_DATA3 0x54 + #define HSP_DB_CCPLEX 1 #define HSP_DB_BPMP 3 #define HSP_DB_MAX 7 +#define HSP_MBOX_TYPE_MASK 0xff + struct tegra_hsp_channel; struct tegra_hsp; @@ -73,8 +68,14 @@ struct tegra_hsp_doorbell { unsigned int index; }; +struct tegra_hsp_sm_ops { + void (*send)(struct tegra_hsp_channel *channel, void *data); + void (*recv)(struct tegra_hsp_channel *channel); +}; + struct tegra_hsp_mailbox { struct tegra_hsp_channel channel; + const struct tegra_hsp_sm_ops *ops; unsigned int index; bool producer; }; @@ -88,6 +89,22 @@ struct tegra_hsp_db_map { struct tegra_hsp_soc { const struct tegra_hsp_db_map *map; bool has_per_mb_ie; + bool has_128_bit_mb; + unsigned int reg_stride; + + /* Shifts for dimensioning register. */ + unsigned int si_shift; + unsigned int db_shift; + unsigned int as_shift; + unsigned int ss_shift; + unsigned int sm_shift; + + /* Masks for dimensioning register. */ + unsigned int si_mask; + unsigned int db_mask; + unsigned int as_mask; + unsigned int ss_mask; + unsigned int sm_mask; }; struct tegra_hsp { @@ -104,7 +121,9 @@ struct tegra_hsp { unsigned int num_ss; unsigned int num_db; unsigned int num_si; + spinlock_t lock; + struct lock_class_key lock_key; struct list_head doorbells; struct tegra_hsp_mailbox *mailboxes; @@ -212,8 +231,7 @@ static irqreturn_t tegra_hsp_shared_irq(int irq, void *data) { struct tegra_hsp *hsp = data; unsigned long bit, mask; - u32 status, value; - void *msg; + u32 status; status = tegra_hsp_readl(hsp, HSP_INT_IR) & hsp->mask; @@ -249,25 +267,8 @@ static irqreturn_t tegra_hsp_shared_irq(int irq, void *data) for_each_set_bit(bit, &mask, hsp->num_sm) { struct tegra_hsp_mailbox *mb = &hsp->mailboxes[bit]; - if (!mb->producer) { - value = tegra_hsp_channel_readl(&mb->channel, - HSP_SM_SHRD_MBOX); - value &= ~HSP_SM_SHRD_MBOX_FULL; - msg = (void *)(unsigned long)value; - mbox_chan_received_data(mb->channel.chan, msg); - - /* - * Need to clear all bits here since some producers, - * such as TCU, depend on fields in the register - * getting cleared by the consumer. - * - * The mailbox API doesn't give the consumers a way - * of doing that explicitly, so we have to make sure - * we cover all possible cases. - */ - tegra_hsp_channel_writel(&mb->channel, 0x0, - HSP_SM_SHRD_MBOX); - } + if (!mb->producer) + mb->ops->recv(&mb->channel); } return IRQ_HANDLED; @@ -286,7 +287,7 @@ tegra_hsp_doorbell_create(struct tegra_hsp *hsp, const char *name, return ERR_PTR(-ENOMEM); offset = (1 + (hsp->num_sm / 2) + hsp->num_ss + hsp->num_as) * SZ_64K; - offset += index * 0x100; + offset += index * hsp->soc->reg_stride; db->channel.regs = hsp->regs + offset; db->channel.hsp = hsp; @@ -330,7 +331,12 @@ static int tegra_hsp_doorbell_startup(struct mbox_chan *chan) if (!ccplex) return -ENODEV; - if (!tegra_hsp_doorbell_can_ring(db)) + /* + * On simulation platforms the BPMP hasn't had a chance yet to mark + * the doorbell as ringable by the CCPLEX, so we want to skip extra + * checks here. + */ + if (tegra_is_silicon() && !tegra_hsp_doorbell_can_ring(db)) return -ENODEV; spin_lock_irqsave(&hsp->lock, flags); @@ -371,21 +377,99 @@ static const struct mbox_chan_ops tegra_hsp_db_ops = { .shutdown = tegra_hsp_doorbell_shutdown, }; +static void tegra_hsp_sm_send32(struct tegra_hsp_channel *channel, void *data) +{ + u32 value; + + /* copy data and mark mailbox full */ + value = (u32)(unsigned long)data; + value |= HSP_SM_SHRD_MBOX_FULL; + + tegra_hsp_channel_writel(channel, value, HSP_SM_SHRD_MBOX); +} + +static void tegra_hsp_sm_recv32(struct tegra_hsp_channel *channel) +{ + u32 value; + void *msg; + + value = tegra_hsp_channel_readl(channel, HSP_SM_SHRD_MBOX); + value &= ~HSP_SM_SHRD_MBOX_FULL; + msg = (void *)(unsigned long)value; + + /* + * Need to clear all bits here since some producers, such as TCU, depend + * on fields in the register getting cleared by the consumer. + * + * The mailbox API doesn't give the consumers a way of doing that + * explicitly, so we have to make sure we cover all possible cases. + */ + tegra_hsp_channel_writel(channel, 0x0, HSP_SM_SHRD_MBOX); + + mbox_chan_received_data(channel->chan, msg); +} + +static const struct tegra_hsp_sm_ops tegra_hsp_sm_32bit_ops = { + .send = tegra_hsp_sm_send32, + .recv = tegra_hsp_sm_recv32, +}; + +static void tegra_hsp_sm_send128(struct tegra_hsp_channel *channel, void *data) +{ + u32 value[4]; + + memcpy(value, data, sizeof(value)); + + /* Copy data */ + tegra_hsp_channel_writel(channel, value[0], HSP_SHRD_MBOX_TYPE1_DATA0); + tegra_hsp_channel_writel(channel, value[1], HSP_SHRD_MBOX_TYPE1_DATA1); + tegra_hsp_channel_writel(channel, value[2], HSP_SHRD_MBOX_TYPE1_DATA2); + tegra_hsp_channel_writel(channel, value[3], HSP_SHRD_MBOX_TYPE1_DATA3); + + /* Update tag to mark mailbox full */ + tegra_hsp_channel_writel(channel, HSP_SM_SHRD_MBOX_FULL, + HSP_SHRD_MBOX_TYPE1_TAG); +} + +static void tegra_hsp_sm_recv128(struct tegra_hsp_channel *channel) +{ + u32 value[4]; + void *msg; + + value[0] = tegra_hsp_channel_readl(channel, HSP_SHRD_MBOX_TYPE1_DATA0); + value[1] = tegra_hsp_channel_readl(channel, HSP_SHRD_MBOX_TYPE1_DATA1); + value[2] = tegra_hsp_channel_readl(channel, HSP_SHRD_MBOX_TYPE1_DATA2); + value[3] = tegra_hsp_channel_readl(channel, HSP_SHRD_MBOX_TYPE1_DATA3); + + msg = (void *)(unsigned long)value; + + /* + * Clear data registers and tag. + */ + tegra_hsp_channel_writel(channel, 0x0, HSP_SHRD_MBOX_TYPE1_DATA0); + tegra_hsp_channel_writel(channel, 0x0, HSP_SHRD_MBOX_TYPE1_DATA1); + tegra_hsp_channel_writel(channel, 0x0, HSP_SHRD_MBOX_TYPE1_DATA2); + tegra_hsp_channel_writel(channel, 0x0, HSP_SHRD_MBOX_TYPE1_DATA3); + tegra_hsp_channel_writel(channel, 0x0, HSP_SHRD_MBOX_TYPE1_TAG); + + mbox_chan_received_data(channel->chan, msg); +} + +static const struct tegra_hsp_sm_ops tegra_hsp_sm_128bit_ops = { + .send = tegra_hsp_sm_send128, + .recv = tegra_hsp_sm_recv128, +}; + static int tegra_hsp_mailbox_send_data(struct mbox_chan *chan, void *data) { struct tegra_hsp_mailbox *mb = chan->con_priv; struct tegra_hsp *hsp = mb->channel.hsp; unsigned long flags; - u32 value; if (WARN_ON(!mb->producer)) return -EPERM; - /* copy data and mark mailbox full */ - value = (u32)(unsigned long)data; - value |= HSP_SM_SHRD_MBOX_FULL; - - tegra_hsp_channel_writel(&mb->channel, value, HSP_SM_SHRD_MBOX); + mb->ops->send(&mb->channel, data); /* enable EMPTY interrupt for the shared mailbox */ spin_lock_irqsave(&hsp->lock, flags); @@ -411,6 +495,11 @@ static int tegra_hsp_mailbox_flush(struct mbox_chan *chan, value = tegra_hsp_channel_readl(ch, HSP_SM_SHRD_MBOX); if ((value & HSP_SM_SHRD_MBOX_FULL) == 0) { mbox_chan_txdone(chan, 0); + + /* Wait until channel is empty */ + if (chan->active_req != NULL) + continue; + return 0; } @@ -546,12 +635,21 @@ static struct mbox_chan *tegra_hsp_sm_xlate(struct mbox_controller *mbox, index = args->args[1] & TEGRA_HSP_SM_MASK; - if (type != TEGRA_HSP_MBOX_TYPE_SM || !hsp->shared_irqs || - index >= hsp->num_sm) + if ((type & HSP_MBOX_TYPE_MASK) != TEGRA_HSP_MBOX_TYPE_SM || + !hsp->shared_irqs || index >= hsp->num_sm) return ERR_PTR(-ENODEV); mb = &hsp->mailboxes[index]; + if (type & TEGRA_HSP_MBOX_TYPE_SM_128BIT) { + if (!hsp->soc->has_128_bit_mb) + return ERR_PTR(-ENODEV); + + mb->ops = &tegra_hsp_sm_128bit_ops; + } else { + mb->ops = &tegra_hsp_sm_32bit_ops; + } + if ((args->args[1] & TEGRA_HSP_SM_FLAG_TX) == 0) mb->producer = false; else @@ -639,7 +737,6 @@ static int tegra_hsp_request_shared_irq(struct tegra_hsp *hsp) static int tegra_hsp_probe(struct platform_device *pdev) { struct tegra_hsp *hsp; - struct resource *res; unsigned int i; u32 value; int err; @@ -653,19 +750,18 @@ static int tegra_hsp_probe(struct platform_device *pdev) INIT_LIST_HEAD(&hsp->doorbells); spin_lock_init(&hsp->lock); - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - hsp->regs = devm_ioremap_resource(&pdev->dev, res); + hsp->regs = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(hsp->regs)) return PTR_ERR(hsp->regs); value = tegra_hsp_readl(hsp, HSP_INT_DIMENSIONING); - hsp->num_sm = (value >> HSP_nSM_SHIFT) & HSP_nINT_MASK; - hsp->num_ss = (value >> HSP_nSS_SHIFT) & HSP_nINT_MASK; - hsp->num_as = (value >> HSP_nAS_SHIFT) & HSP_nINT_MASK; - hsp->num_db = (value >> HSP_nDB_SHIFT) & HSP_nINT_MASK; - hsp->num_si = (value >> HSP_nSI_SHIFT) & HSP_nINT_MASK; + hsp->num_sm = (value >> hsp->soc->sm_shift) & hsp->soc->sm_mask; + hsp->num_ss = (value >> hsp->soc->ss_shift) & hsp->soc->ss_mask; + hsp->num_as = (value >> hsp->soc->as_shift) & hsp->soc->as_mask; + hsp->num_db = (value >> hsp->soc->db_shift) & hsp->soc->db_mask; + hsp->num_si = (value >> hsp->soc->si_shift) & hsp->soc->si_mask; - err = platform_get_irq_byname(pdev, "doorbell"); + err = platform_get_irq_byname_optional(pdev, "doorbell"); if (err >= 0) hsp->doorbell_irq = err; @@ -685,7 +781,7 @@ static int tegra_hsp_probe(struct platform_device *pdev) if (!name) return -ENOMEM; - err = platform_get_irq_byname(pdev, name); + err = platform_get_irq_byname_optional(pdev, name); if (err >= 0) { hsp->shared_irqs[i] = err; count++; @@ -776,25 +872,45 @@ static int tegra_hsp_probe(struct platform_device *pdev) return err; } + lockdep_register_key(&hsp->lock_key); + lockdep_set_class(&hsp->lock, &hsp->lock_key); + return 0; } -static int tegra_hsp_resume(struct device *dev) +static void tegra_hsp_remove(struct platform_device *pdev) +{ + struct tegra_hsp *hsp = platform_get_drvdata(pdev); + + lockdep_unregister_key(&hsp->lock_key); +} + +static int __maybe_unused tegra_hsp_resume(struct device *dev) { struct tegra_hsp *hsp = dev_get_drvdata(dev); unsigned int i; + struct tegra_hsp_doorbell *db; - for (i = 0; i < hsp->num_sm; i++) { - struct tegra_hsp_mailbox *mb = &hsp->mailboxes[i]; + list_for_each_entry(db, &hsp->doorbells, list) { + if (db->channel.chan) + tegra_hsp_doorbell_startup(db->channel.chan); + } + + if (hsp->mailboxes) { + for (i = 0; i < hsp->num_sm; i++) { + struct tegra_hsp_mailbox *mb = &hsp->mailboxes[i]; - if (mb->channel.chan->cl) - tegra_hsp_mailbox_startup(mb->channel.chan); + if (mb->channel.chan->cl) + tegra_hsp_mailbox_startup(mb->channel.chan); + } } return 0; } -static SIMPLE_DEV_PM_OPS(tegra_hsp_pm_ops, NULL, tegra_hsp_resume); +static const struct dev_pm_ops tegra_hsp_pm_ops = { + .resume_noirq = tegra_hsp_resume, +}; static const struct tegra_hsp_db_map tegra186_hsp_db_map[] = { { "ccplex", TEGRA_HSP_DB_MASTER_CCPLEX, HSP_DB_CCPLEX, }, @@ -805,16 +921,76 @@ static const struct tegra_hsp_db_map tegra186_hsp_db_map[] = { static const struct tegra_hsp_soc tegra186_hsp_soc = { .map = tegra186_hsp_db_map, .has_per_mb_ie = false, + .has_128_bit_mb = false, + .reg_stride = 0x100, + .si_shift = 16, + .db_shift = 12, + .as_shift = 8, + .ss_shift = 4, + .sm_shift = 0, + .si_mask = 0xf, + .db_mask = 0xf, + .as_mask = 0xf, + .ss_mask = 0xf, + .sm_mask = 0xf, }; static const struct tegra_hsp_soc tegra194_hsp_soc = { .map = tegra186_hsp_db_map, .has_per_mb_ie = true, + .has_128_bit_mb = false, + .reg_stride = 0x100, + .si_shift = 16, + .db_shift = 12, + .as_shift = 8, + .ss_shift = 4, + .sm_shift = 0, + .si_mask = 0xf, + .db_mask = 0xf, + .as_mask = 0xf, + .ss_mask = 0xf, + .sm_mask = 0xf, +}; + +static const struct tegra_hsp_soc tegra234_hsp_soc = { + .map = tegra186_hsp_db_map, + .has_per_mb_ie = false, + .has_128_bit_mb = true, + .reg_stride = 0x100, + .si_shift = 16, + .db_shift = 12, + .as_shift = 8, + .ss_shift = 4, + .sm_shift = 0, + .si_mask = 0xf, + .db_mask = 0xf, + .as_mask = 0xf, + .ss_mask = 0xf, + .sm_mask = 0xf, +}; + +static const struct tegra_hsp_soc tegra264_hsp_soc = { + .map = tegra186_hsp_db_map, + .has_per_mb_ie = false, + .has_128_bit_mb = true, + .reg_stride = 0x1000, + .si_shift = 17, + .db_shift = 12, + .as_shift = 8, + .ss_shift = 4, + .sm_shift = 0, + .si_mask = 0x1f, + .db_mask = 0x1f, + .as_mask = 0xf, + .ss_mask = 0xf, + .sm_mask = 0xf, }; static const struct of_device_id tegra_hsp_match[] = { { .compatible = "nvidia,tegra186-hsp", .data = &tegra186_hsp_soc }, { .compatible = "nvidia,tegra194-hsp", .data = &tegra194_hsp_soc }, + { .compatible = "nvidia,tegra234-hsp", .data = &tegra234_hsp_soc }, + { .compatible = "nvidia,tegra264-hsp", .data = &tegra264_hsp_soc }, { } }; @@ -825,6 +1001,7 @@ static struct platform_driver tegra_hsp_driver = { .pm = &tegra_hsp_pm_ops, }, .probe = tegra_hsp_probe, + .remove = tegra_hsp_remove, }; static int __init tegra_hsp_init(void) diff --git a/drivers/mailbox/ti-msgmgr.c b/drivers/mailbox/ti-msgmgr.c index 88047d835211..8eb8df8d95a4 100644 --- a/drivers/mailbox/ti-msgmgr.c +++ b/drivers/mailbox/ti-msgmgr.c @@ -2,7 +2,7 @@ /* * Texas Instruments' Message Manager Driver * - * Copyright (C) 2015-2017 Texas Instruments Incorporated - http://www.ti.com/ + * Copyright (C) 2015-2022 Texas Instruments Incorporated - https://www.ti.com/ * Nishanth Menon */ @@ -11,13 +11,14 @@ #include <linux/device.h> #include <linux/interrupt.h> #include <linux/io.h> +#include <linux/iopoll.h> #include <linux/kernel.h> #include <linux/mailbox_controller.h> #include <linux/module.h> -#include <linux/of_device.h> #include <linux/of.h> #include <linux/of_irq.h> #include <linux/platform_device.h> +#include <linux/property.h> #include <linux/soc/ti/ti-msgmgr.h> #define Q_DATA_OFFSET(proxy, queue, reg) \ @@ -100,6 +101,7 @@ struct ti_msgmgr_desc { * @queue_ctrl: Queue Control register * @chan: Mailbox channel * @rx_buff: Receive buffer pointer allocated at probe, max_message_size + * @polled_rx_mode: Use polling for rx instead of interrupts */ struct ti_queue_inst { char name[30]; @@ -113,6 +115,7 @@ struct ti_queue_inst { void __iomem *queue_ctrl; struct mbox_chan *chan; u32 *rx_buff; + bool polled_rx_mode; }; /** @@ -190,6 +193,73 @@ static inline bool ti_msgmgr_queue_is_error(const struct ti_msgmgr_desc *d, return val ? true : false; } +static int ti_msgmgr_queue_rx_data(struct mbox_chan *chan, struct ti_queue_inst *qinst, + const struct ti_msgmgr_desc *desc) +{ + int num_words; + struct ti_msgmgr_message message; + void __iomem *data_reg; + u32 *word_data; + + /* + * I have no idea about the protocol being used to communicate with the + * remote producer - 0 could be valid data, so I wont make a judgement + * of how many bytes I should be reading. Let the client figure this + * out.. I just read the full message and pass it on.. + */ + message.len = desc->max_message_size; + message.buf = (u8 *)qinst->rx_buff; + + /* + * NOTE about register access involved here: + * the hardware block is implemented with 32bit access operations and no + * support for data splitting. We don't want the hardware to misbehave + * with sub 32bit access - For example: if the last register read is + * split into byte wise access, it can result in the queue getting + * stuck or indeterminate behavior. An out of order read operation may + * result in weird data results as well. + * Hence, we do not use memcpy_fromio or __ioread32_copy here, instead + * we depend on readl for the purpose. + * + * Also note that the final register read automatically marks the + * queue message as read. + */ + for (data_reg = qinst->queue_buff_start, word_data = qinst->rx_buff, + num_words = (desc->max_message_size / sizeof(u32)); + num_words; num_words--, data_reg += sizeof(u32), word_data++) + *word_data = readl(data_reg); + + /* + * Last register read automatically clears the IRQ if only 1 message + * is pending - so send the data up the stack.. + * NOTE: Client is expected to be as optimal as possible, since + * we invoke the handler in IRQ context. + */ + mbox_chan_received_data(chan, (void *)&message); + + return 0; +} + +static int ti_msgmgr_queue_rx_poll_timeout(struct mbox_chan *chan, int timeout_us) +{ + struct device *dev = chan->mbox->dev; + struct ti_msgmgr_inst *inst = dev_get_drvdata(dev); + struct ti_queue_inst *qinst = chan->con_priv; + const struct ti_msgmgr_desc *desc = inst->desc; + int msg_count; + int ret; + + ret = readl_poll_timeout_atomic(qinst->queue_state, msg_count, + (msg_count & desc->status_cnt_mask), + 10, timeout_us); + if (ret != 0) + return ret; + + ti_msgmgr_queue_rx_data(chan, qinst, desc); + + return 0; +} + /** * ti_msgmgr_queue_rx_interrupt() - Interrupt handler for receive Queue * @irq: Interrupt number @@ -206,10 +276,7 @@ static irqreturn_t ti_msgmgr_queue_rx_interrupt(int irq, void *p) struct ti_msgmgr_inst *inst = dev_get_drvdata(dev); struct ti_queue_inst *qinst = chan->con_priv; const struct ti_msgmgr_desc *desc; - int msg_count, num_words; - struct ti_msgmgr_message message; - void __iomem *data_reg; - u32 *word_data; + int msg_count; if (WARN_ON(!inst)) { dev_err(dev, "no platform drv data??\n"); @@ -237,41 +304,7 @@ static irqreturn_t ti_msgmgr_queue_rx_interrupt(int irq, void *p) return IRQ_NONE; } - /* - * I have no idea about the protocol being used to communicate with the - * remote producer - 0 could be valid data, so I wont make a judgement - * of how many bytes I should be reading. Let the client figure this - * out.. I just read the full message and pass it on.. - */ - message.len = desc->max_message_size; - message.buf = (u8 *)qinst->rx_buff; - - /* - * NOTE about register access involved here: - * the hardware block is implemented with 32bit access operations and no - * support for data splitting. We don't want the hardware to misbehave - * with sub 32bit access - For example: if the last register read is - * split into byte wise access, it can result in the queue getting - * stuck or indeterminate behavior. An out of order read operation may - * result in weird data results as well. - * Hence, we do not use memcpy_fromio or __ioread32_copy here, instead - * we depend on readl for the purpose. - * - * Also note that the final register read automatically marks the - * queue message as read. - */ - for (data_reg = qinst->queue_buff_start, word_data = qinst->rx_buff, - num_words = (desc->max_message_size / sizeof(u32)); - num_words; num_words--, data_reg += sizeof(u32), word_data++) - *word_data = readl(data_reg); - - /* - * Last register read automatically clears the IRQ if only 1 message - * is pending - so send the data up the stack.. - * NOTE: Client is expected to be as optimal as possible, since - * we invoke the handler in IRQ context. - */ - mbox_chan_received_data(chan, (void *)&message); + ti_msgmgr_queue_rx_data(chan, qinst, desc); return IRQ_HANDLED; } @@ -336,6 +369,17 @@ static bool ti_msgmgr_last_tx_done(struct mbox_chan *chan) return msg_count ? false : true; } +static bool ti_msgmgr_chan_has_polled_queue_rx(struct mbox_chan *chan) +{ + struct ti_queue_inst *qinst; + + if (!chan) + return false; + + qinst = chan->con_priv; + return qinst->polled_rx_mode; +} + /** * ti_msgmgr_send_data() - Send data * @chan: Channel Pointer @@ -353,6 +397,7 @@ static int ti_msgmgr_send_data(struct mbox_chan *chan, void *data) struct ti_msgmgr_message *message = data; void __iomem *data_reg; u32 *word_data; + int ret = 0; if (WARN_ON(!inst)) { dev_err(dev, "no platform drv data??\n"); @@ -385,16 +430,27 @@ static int ti_msgmgr_send_data(struct mbox_chan *chan, void *data) /* Ensure all unused data is 0 */ data_trail &= 0xFFFFFFFF >> (8 * (sizeof(u32) - trail_bytes)); writel(data_trail, data_reg); - data_reg++; + data_reg += sizeof(u32); } + /* * 'data_reg' indicates next register to write. If we did not already * write on tx complete reg(last reg), we must do so for transmit + * In addition, we also need to make sure all intermediate data + * registers(if any required), are reset to 0 for TISCI backward + * compatibility to be maintained. */ - if (data_reg <= qinst->queue_buff_end) - writel(0, qinst->queue_buff_end); + while (data_reg <= qinst->queue_buff_end) { + writel(0, data_reg); + data_reg += sizeof(u32); + } - return 0; + /* If we are in polled mode, wait for a response before proceeding */ + if (ti_msgmgr_chan_has_polled_queue_rx(message->chan_rx)) + ret = ti_msgmgr_queue_rx_poll_timeout(message->chan_rx, + message->timeout_rx_ms * 1000); + + return ret; } /** @@ -642,6 +698,54 @@ static int ti_msgmgr_queue_setup(int idx, struct device *dev, return 0; } +static int ti_msgmgr_queue_rx_set_polled_mode(struct ti_queue_inst *qinst, bool enable) +{ + if (enable) { + disable_irq(qinst->irq); + qinst->polled_rx_mode = true; + } else { + enable_irq(qinst->irq); + qinst->polled_rx_mode = false; + } + + return 0; +} + +static int ti_msgmgr_suspend(struct device *dev) +{ + struct ti_msgmgr_inst *inst = dev_get_drvdata(dev); + struct ti_queue_inst *qinst; + int i; + + /* + * We must switch operation to polled mode now as drivers and the genpd + * layer may make late TI SCI calls to change clock and device states + * from the noirq phase of suspend. + */ + for (qinst = inst->qinsts, i = 0; i < inst->num_valid_queues; qinst++, i++) { + if (!qinst->is_tx) + ti_msgmgr_queue_rx_set_polled_mode(qinst, true); + } + + return 0; +} + +static int ti_msgmgr_resume(struct device *dev) +{ + struct ti_msgmgr_inst *inst = dev_get_drvdata(dev); + struct ti_queue_inst *qinst; + int i; + + for (qinst = inst->qinsts, i = 0; i < inst->num_valid_queues; qinst++, i++) { + if (!qinst->is_tx) + ti_msgmgr_queue_rx_set_polled_mode(qinst, false); + } + + return 0; +} + +static DEFINE_SIMPLE_DEV_PM_OPS(ti_msgmgr_pm_ops, ti_msgmgr_suspend, ti_msgmgr_resume); + /* Queue operations */ static const struct mbox_chan_ops ti_msgmgr_chan_ops = { .startup = ti_msgmgr_queue_startup, @@ -706,9 +810,7 @@ MODULE_DEVICE_TABLE(of, ti_msgmgr_of_match); static int ti_msgmgr_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; - const struct of_device_id *of_id; struct device_node *np; - struct resource *res; const struct ti_msgmgr_desc *desc; struct ti_msgmgr_inst *inst; struct ti_queue_inst *qinst; @@ -725,36 +827,26 @@ static int ti_msgmgr_probe(struct platform_device *pdev) } np = dev->of_node; - of_id = of_match_device(ti_msgmgr_of_match, dev); - if (!of_id) { - dev_err(dev, "OF data missing\n"); - return -EINVAL; - } - desc = of_id->data; - inst = devm_kzalloc(dev, sizeof(*inst), GFP_KERNEL); if (!inst) return -ENOMEM; inst->dev = dev; - inst->desc = desc; + inst->desc = desc = device_get_match_data(dev); - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, - desc->data_region_name); - inst->queue_proxy_region = devm_ioremap_resource(dev, res); + inst->queue_proxy_region = + devm_platform_ioremap_resource_byname(pdev, desc->data_region_name); if (IS_ERR(inst->queue_proxy_region)) return PTR_ERR(inst->queue_proxy_region); - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, - desc->status_region_name); - inst->queue_state_debug_region = devm_ioremap_resource(dev, res); + inst->queue_state_debug_region = + devm_platform_ioremap_resource_byname(pdev, desc->status_region_name); if (IS_ERR(inst->queue_state_debug_region)) return PTR_ERR(inst->queue_state_debug_region); if (desc->is_sproxy) { - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, - desc->ctrl_region_name); - inst->queue_ctrl_region = devm_ioremap_resource(dev, res); + inst->queue_ctrl_region = + devm_platform_ioremap_resource_byname(pdev, desc->ctrl_region_name); if (IS_ERR(inst->queue_ctrl_region)) return PTR_ERR(inst->queue_ctrl_region); } @@ -828,7 +920,8 @@ static struct platform_driver ti_msgmgr_driver = { .probe = ti_msgmgr_probe, .driver = { .name = "ti-msgmgr", - .of_match_table = of_match_ptr(ti_msgmgr_of_match), + .of_match_table = ti_msgmgr_of_match, + .pm = &ti_msgmgr_pm_ops, }, }; module_platform_driver(ti_msgmgr_driver); diff --git a/drivers/mailbox/zynqmp-ipi-mailbox.c b/drivers/mailbox/zynqmp-ipi-mailbox.c new file mode 100644 index 000000000000..967967b2b8a9 --- /dev/null +++ b/drivers/mailbox/zynqmp-ipi-mailbox.c @@ -0,0 +1,1041 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Xilinx Inter Processor Interrupt(IPI) Mailbox Driver + * + * Copyright (C) 2018 Xilinx, Inc. + */ + +#include <linux/arm-smccc.h> +#include <linux/cpuhotplug.h> +#include <linux/delay.h> +#include <linux/device.h> +#include <linux/interrupt.h> +#include <linux/irqdomain.h> +#include <linux/io.h> +#include <linux/kernel.h> +#include <linux/mailbox_controller.h> +#include <linux/mailbox/zynqmp-ipi-message.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/of_address.h> +#include <linux/of_irq.h> +#include <linux/platform_device.h> + +/* IPI agent ID any */ +#define IPI_ID_ANY 0xFFUL + +/* indicate if ZynqMP IPI mailbox driver uses SMC calls or HVC calls */ +#define USE_SMC 0 +#define USE_HVC 1 + +/* Default IPI SMC function IDs */ +#define SMC_IPI_MAILBOX_OPEN 0x82001000U +#define SMC_IPI_MAILBOX_RELEASE 0x82001001U +#define SMC_IPI_MAILBOX_STATUS_ENQUIRY 0x82001002U +#define SMC_IPI_MAILBOX_NOTIFY 0x82001003U +#define SMC_IPI_MAILBOX_ACK 0x82001004U +#define SMC_IPI_MAILBOX_ENABLE_IRQ 0x82001005U +#define SMC_IPI_MAILBOX_DISABLE_IRQ 0x82001006U + +/* IPI SMC Macros */ +#define IPI_SMC_ENQUIRY_DIRQ_MASK 0x00000001UL /* Flag to indicate if + * notification interrupt + * to be disabled. + */ +#define IPI_SMC_ACK_EIRQ_MASK 0x00000001UL /* Flag to indicate if + * notification interrupt + * to be enabled. + */ + +/* IPI mailbox status */ +#define IPI_MB_STATUS_IDLE 0 +#define IPI_MB_STATUS_SEND_PENDING 1 +#define IPI_MB_STATUS_RECV_PENDING 2 + +#define IPI_MB_CHNL_TX 0 /* IPI mailbox TX channel */ +#define IPI_MB_CHNL_RX 1 /* IPI mailbox RX channel */ + +/* IPI Message Buffer Information */ +#define RESP_OFFSET 0x20U +#define DEST_OFFSET 0x40U +#define IPI_BUF_SIZE 0x20U +#define DST_BIT_POS 9U +#define SRC_BITMASK GENMASK(11, 8) + +/* Macro to represent SGI type for IPI IRQs */ +#define IPI_IRQ_TYPE_SGI 2 + +/* + * Module parameters + */ +static int tx_poll_period = 5; +module_param_named(tx_poll_period, tx_poll_period, int, 0644); +MODULE_PARM_DESC(tx_poll_period, "Poll period waiting for ack after send."); + +/** + * struct zynqmp_ipi_mchan - Description of a Xilinx ZynqMP IPI mailbox channel + * @is_opened: indicate if the IPI channel is opened + * @req_buf: local to remote request buffer start address + * @resp_buf: local to remote response buffer start address + * @req_buf_size: request buffer size + * @resp_buf_size: response buffer size + * @rx_buf: receive buffer to pass received message to client + * @chan_type: channel type + */ +struct zynqmp_ipi_mchan { + int is_opened; + void __iomem *req_buf; + void __iomem *resp_buf; + void *rx_buf; + size_t req_buf_size; + size_t resp_buf_size; + unsigned int chan_type; +}; + +struct zynqmp_ipi_mbox; + +typedef int (*setup_ipi_fn)(struct zynqmp_ipi_mbox *ipi_mbox, struct device_node *node); + +/** + * struct zynqmp_ipi_mbox - Description of a ZynqMP IPI mailbox + * platform data. + * @pdata: pointer to the IPI private data + * @dev: device pointer corresponding to the Xilinx ZynqMP + * IPI mailbox + * @remote_id: remote IPI agent ID + * @mbox: mailbox Controller + * @mchans: array for channels, tx channel and rx channel. + * @setup_ipi_fn: Function Pointer to set up IPI Channels + */ +struct zynqmp_ipi_mbox { + struct zynqmp_ipi_pdata *pdata; + struct device dev; + u32 remote_id; + struct mbox_controller mbox; + struct zynqmp_ipi_mchan mchans[2]; + setup_ipi_fn setup_ipi_fn; +}; + +/** + * struct zynqmp_ipi_pdata - Description of z ZynqMP IPI agent platform data. + * + * @dev: device pointer corresponding to the Xilinx ZynqMP + * IPI agent + * @irq: IPI agent interrupt ID + * @irq_type: IPI SGI or SPI IRQ type + * @method: IPI SMC or HVC is going to be used + * @local_id: local IPI agent ID + * @virq_sgi: IRQ number mapped to SGI + * @num_mboxes: number of mailboxes of this IPI agent + * @ipi_mboxes: IPI mailboxes of this IPI agent + */ +struct zynqmp_ipi_pdata { + struct device *dev; + int irq; + unsigned int irq_type; + unsigned int method; + u32 local_id; + int virq_sgi; + int num_mboxes; + struct zynqmp_ipi_mbox ipi_mboxes[] __counted_by(num_mboxes); +}; + +static DEFINE_PER_CPU(struct zynqmp_ipi_pdata *, per_cpu_pdata); + +static struct device_driver zynqmp_ipi_mbox_driver = { + .owner = THIS_MODULE, + .name = "zynqmp-ipi-mbox", +}; + +static void zynqmp_ipi_fw_call(struct zynqmp_ipi_mbox *ipi_mbox, + unsigned long a0, unsigned long a3, + struct arm_smccc_res *res) +{ + struct zynqmp_ipi_pdata *pdata = ipi_mbox->pdata; + unsigned long a1, a2; + + a1 = pdata->local_id; + a2 = ipi_mbox->remote_id; + if (pdata->method == USE_SMC) + arm_smccc_smc(a0, a1, a2, a3, 0, 0, 0, 0, res); + else + arm_smccc_hvc(a0, a1, a2, a3, 0, 0, 0, 0, res); +} + +/** + * zynqmp_ipi_interrupt - Interrupt handler for IPI notification + * + * @irq: Interrupt number + * @data: ZynqMP IPI mailbox platform data. + * + * Return: -EINVAL if there is no instance + * IRQ_NONE if the interrupt is not ours. + * IRQ_HANDLED if the rx interrupt was successfully handled. + */ +static irqreturn_t zynqmp_ipi_interrupt(int irq, void *data) +{ + struct zynqmp_ipi_pdata *pdata = data; + struct mbox_chan *chan; + struct zynqmp_ipi_mbox *ipi_mbox; + struct zynqmp_ipi_mchan *mchan; + struct zynqmp_ipi_message *msg; + u64 arg0, arg3; + struct arm_smccc_res res; + int ret, i, status = IRQ_NONE; + + (void)irq; + arg0 = SMC_IPI_MAILBOX_STATUS_ENQUIRY; + arg3 = IPI_SMC_ENQUIRY_DIRQ_MASK; + for (i = 0; i < pdata->num_mboxes; i++) { + ipi_mbox = &pdata->ipi_mboxes[i]; + mchan = &ipi_mbox->mchans[IPI_MB_CHNL_RX]; + chan = &ipi_mbox->mbox.chans[IPI_MB_CHNL_RX]; + zynqmp_ipi_fw_call(ipi_mbox, arg0, arg3, &res); + ret = (int)(res.a0 & 0xFFFFFFFF); + if (ret > 0 && ret & IPI_MB_STATUS_RECV_PENDING) { + if (mchan->is_opened) { + msg = mchan->rx_buf; + if (msg) { + msg->len = mchan->req_buf_size; + memcpy_fromio(msg->data, mchan->req_buf, + msg->len); + } + mbox_chan_received_data(chan, (void *)msg); + status = IRQ_HANDLED; + } + } + } + return status; +} + +static irqreturn_t zynqmp_sgi_interrupt(int irq, void *data) +{ + struct zynqmp_ipi_pdata **pdata_ptr = data; + struct zynqmp_ipi_pdata *pdata = *pdata_ptr; + + return zynqmp_ipi_interrupt(irq, pdata); +} + +/** + * zynqmp_ipi_peek_data - Peek to see if there are any rx messages. + * + * @chan: Channel Pointer + * + * Return: 'true' if there is pending rx data, 'false' if there is none. + */ +static bool zynqmp_ipi_peek_data(struct mbox_chan *chan) +{ + struct device *dev = chan->mbox->dev; + struct zynqmp_ipi_mbox *ipi_mbox = dev_get_drvdata(dev); + struct zynqmp_ipi_mchan *mchan = chan->con_priv; + int ret; + u64 arg0; + struct arm_smccc_res res; + + if (WARN_ON(!ipi_mbox)) { + dev_err(dev, "no platform drv data??\n"); + return false; + } + + arg0 = SMC_IPI_MAILBOX_STATUS_ENQUIRY; + zynqmp_ipi_fw_call(ipi_mbox, arg0, 0, &res); + ret = (int)(res.a0 & 0xFFFFFFFF); + + if (mchan->chan_type == IPI_MB_CHNL_TX) { + /* TX channel, check if the message has been acked + * by the remote, if yes, response is available. + */ + if (ret < 0 || ret & IPI_MB_STATUS_SEND_PENDING) + return false; + else + return true; + } else if (ret > 0 && ret & IPI_MB_STATUS_RECV_PENDING) { + /* RX channel, check if there is message arrived. */ + return true; + } + return false; +} + +/** + * zynqmp_ipi_last_tx_done - See if the last tx message is sent + * + * @chan: Channel pointer + * + * Return: 'true' is no pending tx data, 'false' if there are any. + */ +static bool zynqmp_ipi_last_tx_done(struct mbox_chan *chan) +{ + struct device *dev = chan->mbox->dev; + struct zynqmp_ipi_mbox *ipi_mbox = dev_get_drvdata(dev); + struct zynqmp_ipi_mchan *mchan = chan->con_priv; + int ret; + u64 arg0; + struct arm_smccc_res res; + + if (WARN_ON(!ipi_mbox)) { + dev_err(dev, "no platform drv data??\n"); + return false; + } + + if (mchan->chan_type == IPI_MB_CHNL_TX) { + /* We only need to check if the message been taken + * by the remote in the TX channel + */ + arg0 = SMC_IPI_MAILBOX_STATUS_ENQUIRY; + zynqmp_ipi_fw_call(ipi_mbox, arg0, 0, &res); + /* Check the SMC call status, a0 of the result */ + ret = (int)(res.a0 & 0xFFFFFFFF); + if (ret < 0 || ret & IPI_MB_STATUS_SEND_PENDING) + return false; + return true; + } + /* Always true for the response message in RX channel */ + return true; +} + +/** + * zynqmp_ipi_send_data - Send data + * + * @chan: Channel Pointer + * @data: Message Pointer + * + * Return: 0 if all goes good, else appropriate error messages. + */ +static int zynqmp_ipi_send_data(struct mbox_chan *chan, void *data) +{ + struct device *dev = chan->mbox->dev; + struct zynqmp_ipi_mbox *ipi_mbox = dev_get_drvdata(dev); + struct zynqmp_ipi_mchan *mchan = chan->con_priv; + struct zynqmp_ipi_message *msg = data; + u64 arg0; + struct arm_smccc_res res; + + if (WARN_ON(!ipi_mbox)) { + dev_err(dev, "no platform drv data??\n"); + return -EINVAL; + } + + if (mchan->chan_type == IPI_MB_CHNL_TX) { + /* Send request message */ + if (msg && msg->len > mchan->req_buf_size && mchan->req_buf) { + dev_err(dev, "channel %d message length %u > max %lu\n", + mchan->chan_type, (unsigned int)msg->len, + mchan->req_buf_size); + return -EINVAL; + } + if (msg && msg->len && mchan->req_buf) + memcpy_toio(mchan->req_buf, msg->data, msg->len); + /* Kick IPI mailbox to send message */ + arg0 = SMC_IPI_MAILBOX_NOTIFY; + zynqmp_ipi_fw_call(ipi_mbox, arg0, 0, &res); + } else { + /* Send response message */ + if (msg && msg->len > mchan->resp_buf_size && mchan->resp_buf) { + dev_err(dev, "channel %d message length %u > max %lu\n", + mchan->chan_type, (unsigned int)msg->len, + mchan->resp_buf_size); + return -EINVAL; + } + if (msg && msg->len && mchan->resp_buf) + memcpy_toio(mchan->resp_buf, msg->data, msg->len); + arg0 = SMC_IPI_MAILBOX_ACK; + zynqmp_ipi_fw_call(ipi_mbox, arg0, IPI_SMC_ACK_EIRQ_MASK, + &res); + } + return 0; +} + +/** + * zynqmp_ipi_startup - Startup the IPI channel + * + * @chan: Channel pointer + * + * Return: 0 if all goes good, else return corresponding error message + */ +static int zynqmp_ipi_startup(struct mbox_chan *chan) +{ + struct device *dev = chan->mbox->dev; + struct zynqmp_ipi_mbox *ipi_mbox = dev_get_drvdata(dev); + struct zynqmp_ipi_mchan *mchan = chan->con_priv; + u64 arg0; + struct arm_smccc_res res; + int ret = 0; + unsigned int nchan_type; + + if (mchan->is_opened) + return 0; + + /* If no channel has been opened, open the IPI mailbox */ + nchan_type = (mchan->chan_type + 1) % 2; + if (!ipi_mbox->mchans[nchan_type].is_opened) { + arg0 = SMC_IPI_MAILBOX_OPEN; + zynqmp_ipi_fw_call(ipi_mbox, arg0, 0, &res); + /* Check the SMC call status, a0 of the result */ + ret = (int)(res.a0 & 0xFFFFFFFF); + if (ret < 0) { + dev_err(dev, "SMC to open the IPI channel failed.\n"); + return ret; + } + ret = 0; + } + + /* If it is RX channel, enable the IPI notification interrupt */ + if (mchan->chan_type == IPI_MB_CHNL_RX) { + arg0 = SMC_IPI_MAILBOX_ENABLE_IRQ; + zynqmp_ipi_fw_call(ipi_mbox, arg0, 0, &res); + } + mchan->is_opened = 1; + + return ret; +} + +/** + * zynqmp_ipi_shutdown - Shutdown the IPI channel + * + * @chan: Channel pointer + */ +static void zynqmp_ipi_shutdown(struct mbox_chan *chan) +{ + struct device *dev = chan->mbox->dev; + struct zynqmp_ipi_mbox *ipi_mbox = dev_get_drvdata(dev); + struct zynqmp_ipi_mchan *mchan = chan->con_priv; + u64 arg0; + struct arm_smccc_res res; + unsigned int chan_type; + + if (!mchan->is_opened) + return; + + /* If it is RX channel, disable notification interrupt */ + chan_type = mchan->chan_type; + if (chan_type == IPI_MB_CHNL_RX) { + arg0 = SMC_IPI_MAILBOX_DISABLE_IRQ; + zynqmp_ipi_fw_call(ipi_mbox, arg0, 0, &res); + } + /* Release IPI mailbox if no other channel is opened */ + chan_type = (chan_type + 1) % 2; + if (!ipi_mbox->mchans[chan_type].is_opened) { + arg0 = SMC_IPI_MAILBOX_RELEASE; + zynqmp_ipi_fw_call(ipi_mbox, arg0, 0, &res); + } + + mchan->is_opened = 0; +} + +/* ZynqMP IPI mailbox operations */ +static const struct mbox_chan_ops zynqmp_ipi_chan_ops = { + .startup = zynqmp_ipi_startup, + .shutdown = zynqmp_ipi_shutdown, + .peek_data = zynqmp_ipi_peek_data, + .last_tx_done = zynqmp_ipi_last_tx_done, + .send_data = zynqmp_ipi_send_data, +}; + +/** + * zynqmp_ipi_of_xlate - Translate of phandle to IPI mailbox channel + * + * @mbox: mailbox controller pointer + * @p: phandle pointer + * + * Return: Mailbox channel, else return error pointer. + */ +static struct mbox_chan *zynqmp_ipi_of_xlate(struct mbox_controller *mbox, + const struct of_phandle_args *p) +{ + struct mbox_chan *chan; + struct device *dev = mbox->dev; + unsigned int chan_type; + + /* Only supports TX and RX channels */ + chan_type = p->args[0]; + if (chan_type != IPI_MB_CHNL_TX && chan_type != IPI_MB_CHNL_RX) { + dev_err(dev, "req chnl failure: invalid chnl type %u.\n", + chan_type); + return ERR_PTR(-EINVAL); + } + chan = &mbox->chans[chan_type]; + return chan; +} + +/** + * zynqmp_ipi_mbox_get_buf_res - Get buffer resource from the IPI dev node + * + * @node: IPI mbox device child node + * @name: name of the IPI buffer + * @res: pointer to where the resource information will be stored. + * + * Return: 0 for success, negative value for failure + */ +static int zynqmp_ipi_mbox_get_buf_res(struct device_node *node, + const char *name, + struct resource *res) +{ + int ret, index; + + index = of_property_match_string(node, "reg-names", name); + if (index >= 0) { + ret = of_address_to_resource(node, index, res); + if (ret < 0) + return -EINVAL; + return 0; + } + return -ENODEV; +} + +/** + * zynqmp_ipi_mbox_dev_release() - release the existence of a ipi mbox dev + * + * @dev: the ipi mailbox device + * + * This is to avoid the no device release() function kernel warning. + * + */ +static void zynqmp_ipi_mbox_dev_release(struct device *dev) +{ + (void)dev; +} + +/** + * zynqmp_ipi_mbox_probe - probe IPI mailbox resource from device node + * + * @ipi_mbox: pointer to IPI mailbox private data structure + * @node: IPI mailbox device node + * + * Return: 0 for success, negative value for failure + */ +static int zynqmp_ipi_mbox_probe(struct zynqmp_ipi_mbox *ipi_mbox, + struct device_node *node) +{ + struct mbox_chan *chans; + struct mbox_controller *mbox; + struct device *dev, *mdev; + int ret; + + dev = ipi_mbox->pdata->dev; + /* Initialize dev for IPI mailbox */ + ipi_mbox->dev.parent = dev; + ipi_mbox->dev.release = NULL; + ipi_mbox->dev.of_node = node; + dev_set_name(&ipi_mbox->dev, "%s", of_node_full_name(node)); + dev_set_drvdata(&ipi_mbox->dev, ipi_mbox); + ipi_mbox->dev.release = zynqmp_ipi_mbox_dev_release; + ipi_mbox->dev.driver = &zynqmp_ipi_mbox_driver; + ret = device_register(&ipi_mbox->dev); + if (ret) { + dev_err(dev, "Failed to register ipi mbox dev.\n"); + put_device(&ipi_mbox->dev); + return ret; + } + mdev = &ipi_mbox->dev; + + /* Get the IPI remote agent ID */ + ret = of_property_read_u32(node, "xlnx,ipi-id", &ipi_mbox->remote_id); + if (ret < 0) { + dev_err(dev, "No IPI remote ID is specified.\n"); + return ret; + } + + ret = ipi_mbox->setup_ipi_fn(ipi_mbox, node); + if (ret) { + dev_err(dev, "Failed to set up IPI Buffers.\n"); + return ret; + } + + mbox = &ipi_mbox->mbox; + mbox->dev = mdev; + mbox->ops = &zynqmp_ipi_chan_ops; + mbox->num_chans = 2; + mbox->txdone_irq = false; + mbox->txdone_poll = true; + mbox->txpoll_period = tx_poll_period; + mbox->of_xlate = zynqmp_ipi_of_xlate; + chans = devm_kzalloc(mdev, 2 * sizeof(*chans), GFP_KERNEL); + if (!chans) + return -ENOMEM; + mbox->chans = chans; + chans[IPI_MB_CHNL_TX].con_priv = &ipi_mbox->mchans[IPI_MB_CHNL_TX]; + chans[IPI_MB_CHNL_RX].con_priv = &ipi_mbox->mchans[IPI_MB_CHNL_RX]; + ipi_mbox->mchans[IPI_MB_CHNL_TX].chan_type = IPI_MB_CHNL_TX; + ipi_mbox->mchans[IPI_MB_CHNL_RX].chan_type = IPI_MB_CHNL_RX; + ret = devm_mbox_controller_register(mdev, mbox); + if (ret) + dev_err(mdev, + "Failed to register mbox_controller(%d)\n", ret); + else + dev_info(mdev, + "Registered ZynqMP IPI mbox with TX/RX channels.\n"); + return ret; +} + +/** + * zynqmp_ipi_setup - set up IPI Buffers for classic flow + * + * @ipi_mbox: pointer to IPI mailbox private data structure + * @node: IPI mailbox device node + * + * This will be used to set up IPI Buffers for ZynqMP SOC if user + * wishes to use classic driver usage model on new SOC's with only + * buffered IPIs. + * + * Note that bufferless IPIs and mixed usage of buffered and bufferless + * IPIs are not supported with this flow. + * + * This will be invoked with compatible string "xlnx,zynqmp-ipi-mailbox". + * + * Return: 0 for success, negative value for failure + */ +static int zynqmp_ipi_setup(struct zynqmp_ipi_mbox *ipi_mbox, + struct device_node *node) +{ + struct zynqmp_ipi_mchan *mchan; + struct device *mdev; + struct resource res; + const char *name; + int ret; + + mdev = &ipi_mbox->dev; + + mchan = &ipi_mbox->mchans[IPI_MB_CHNL_TX]; + name = "local_request_region"; + ret = zynqmp_ipi_mbox_get_buf_res(node, name, &res); + if (!ret) { + mchan->req_buf_size = resource_size(&res); + mchan->req_buf = devm_ioremap(mdev, res.start, + mchan->req_buf_size); + if (!mchan->req_buf) { + dev_err(mdev, "Unable to map IPI buffer I/O memory\n"); + return -ENOMEM; + } + } else if (ret != -ENODEV) { + dev_err(mdev, "Unmatched resource %s, %d.\n", name, ret); + return ret; + } + + name = "remote_response_region"; + ret = zynqmp_ipi_mbox_get_buf_res(node, name, &res); + if (!ret) { + mchan->resp_buf_size = resource_size(&res); + mchan->resp_buf = devm_ioremap(mdev, res.start, + mchan->resp_buf_size); + if (!mchan->resp_buf) { + dev_err(mdev, "Unable to map IPI buffer I/O memory\n"); + return -ENOMEM; + } + } else if (ret != -ENODEV) { + dev_err(mdev, "Unmatched resource %s.\n", name); + return ret; + } + mchan->rx_buf = devm_kzalloc(mdev, + mchan->resp_buf_size + + sizeof(struct zynqmp_ipi_message), + GFP_KERNEL); + if (!mchan->rx_buf) + return -ENOMEM; + + mchan = &ipi_mbox->mchans[IPI_MB_CHNL_RX]; + name = "remote_request_region"; + ret = zynqmp_ipi_mbox_get_buf_res(node, name, &res); + if (!ret) { + mchan->req_buf_size = resource_size(&res); + mchan->req_buf = devm_ioremap(mdev, res.start, + mchan->req_buf_size); + if (!mchan->req_buf) { + dev_err(mdev, "Unable to map IPI buffer I/O memory\n"); + return -ENOMEM; + } + } else if (ret != -ENODEV) { + dev_err(mdev, "Unmatched resource %s.\n", name); + return ret; + } + + name = "local_response_region"; + ret = zynqmp_ipi_mbox_get_buf_res(node, name, &res); + if (!ret) { + mchan->resp_buf_size = resource_size(&res); + mchan->resp_buf = devm_ioremap(mdev, res.start, + mchan->resp_buf_size); + if (!mchan->resp_buf) { + dev_err(mdev, "Unable to map IPI buffer I/O memory\n"); + return -ENOMEM; + } + } else if (ret != -ENODEV) { + dev_err(mdev, "Unmatched resource %s.\n", name); + return ret; + } + mchan->rx_buf = devm_kzalloc(mdev, + mchan->resp_buf_size + + sizeof(struct zynqmp_ipi_message), + GFP_KERNEL); + if (!mchan->rx_buf) + return -ENOMEM; + + return 0; +} + +/** + * versal_ipi_setup - Set up IPIs to support mixed usage of + * Buffered and Bufferless IPIs. + * + * @ipi_mbox: pointer to IPI mailbox private data structure + * @node: IPI mailbox device node + * + * Return: 0 for success, negative value for failure + */ +static int versal_ipi_setup(struct zynqmp_ipi_mbox *ipi_mbox, + struct device_node *node) +{ + struct zynqmp_ipi_mchan *tx_mchan, *rx_mchan; + struct resource host_res, remote_res; + struct device_node *parent_node; + int host_idx, remote_idx; + struct device *mdev; + + tx_mchan = &ipi_mbox->mchans[IPI_MB_CHNL_TX]; + rx_mchan = &ipi_mbox->mchans[IPI_MB_CHNL_RX]; + parent_node = of_get_parent(node); + mdev = &ipi_mbox->dev; + + host_idx = zynqmp_ipi_mbox_get_buf_res(parent_node, "msg", &host_res); + remote_idx = zynqmp_ipi_mbox_get_buf_res(node, "msg", &remote_res); + + /* + * Only set up buffers if both sides claim to have msg buffers. + * This is because each buffered IPI's corresponding msg buffers + * are reserved for use by other buffered IPI's. + */ + if (!host_idx && !remote_idx) { + u32 host_src, host_dst, remote_src, remote_dst; + u32 buff_sz; + + buff_sz = resource_size(&host_res); + + host_src = host_res.start & SRC_BITMASK; + remote_src = remote_res.start & SRC_BITMASK; + + host_dst = (host_src >> DST_BIT_POS) * DEST_OFFSET; + remote_dst = (remote_src >> DST_BIT_POS) * DEST_OFFSET; + + /* Validate that IPI IDs is within IPI Message buffer space. */ + if (host_dst >= buff_sz || remote_dst >= buff_sz) { + dev_err(mdev, + "Invalid IPI Message buffer values: %x %x\n", + host_dst, remote_dst); + return -EINVAL; + } + + tx_mchan->req_buf = devm_ioremap(mdev, + host_res.start | remote_dst, + IPI_BUF_SIZE); + if (!tx_mchan->req_buf) { + dev_err(mdev, "Unable to map IPI buffer I/O memory\n"); + return -ENOMEM; + } + + tx_mchan->resp_buf = devm_ioremap(mdev, + (remote_res.start | host_dst) + + RESP_OFFSET, IPI_BUF_SIZE); + if (!tx_mchan->resp_buf) { + dev_err(mdev, "Unable to map IPI buffer I/O memory\n"); + return -ENOMEM; + } + + rx_mchan->req_buf = devm_ioremap(mdev, + remote_res.start | host_dst, + IPI_BUF_SIZE); + if (!rx_mchan->req_buf) { + dev_err(mdev, "Unable to map IPI buffer I/O memory\n"); + return -ENOMEM; + } + + rx_mchan->resp_buf = devm_ioremap(mdev, + (host_res.start | remote_dst) + + RESP_OFFSET, IPI_BUF_SIZE); + if (!rx_mchan->resp_buf) { + dev_err(mdev, "Unable to map IPI buffer I/O memory\n"); + return -ENOMEM; + } + + tx_mchan->resp_buf_size = IPI_BUF_SIZE; + tx_mchan->req_buf_size = IPI_BUF_SIZE; + tx_mchan->rx_buf = devm_kzalloc(mdev, IPI_BUF_SIZE + + sizeof(struct zynqmp_ipi_message), + GFP_KERNEL); + if (!tx_mchan->rx_buf) + return -ENOMEM; + + rx_mchan->resp_buf_size = IPI_BUF_SIZE; + rx_mchan->req_buf_size = IPI_BUF_SIZE; + rx_mchan->rx_buf = devm_kzalloc(mdev, IPI_BUF_SIZE + + sizeof(struct zynqmp_ipi_message), + GFP_KERNEL); + if (!rx_mchan->rx_buf) + return -ENOMEM; + } + + return 0; +} + +static int xlnx_mbox_cpuhp_start(unsigned int cpu) +{ + struct zynqmp_ipi_pdata *pdata; + + pdata = get_cpu_var(per_cpu_pdata); + put_cpu_var(per_cpu_pdata); + enable_percpu_irq(pdata->virq_sgi, IRQ_TYPE_NONE); + + return 0; +} + +static int xlnx_mbox_cpuhp_down(unsigned int cpu) +{ + struct zynqmp_ipi_pdata *pdata; + + pdata = get_cpu_var(per_cpu_pdata); + put_cpu_var(per_cpu_pdata); + disable_percpu_irq(pdata->virq_sgi); + + return 0; +} + +static void xlnx_disable_percpu_irq(void *data) +{ + struct zynqmp_ipi_pdata *pdata; + + pdata = *this_cpu_ptr(&per_cpu_pdata); + + disable_percpu_irq(pdata->virq_sgi); +} + +static int xlnx_mbox_init_sgi(struct platform_device *pdev, + int sgi_num, + struct zynqmp_ipi_pdata *pdata) +{ + int ret = 0; + int cpu; + + /* + * IRQ related structures are used for the following: + * for each SGI interrupt ensure its mapped by GIC IRQ domain + * and that each corresponding linux IRQ for the HW IRQ has + * a handler for when receiving an interrupt from the remote + * processor. + */ + struct irq_domain *domain; + struct irq_fwspec sgi_fwspec; + struct device_node *interrupt_parent = NULL; + struct device *dev = &pdev->dev; + + /* Find GIC controller to map SGIs. */ + interrupt_parent = of_irq_find_parent(dev->of_node); + if (!interrupt_parent) { + dev_err(&pdev->dev, "Failed to find property for Interrupt parent\n"); + return -EINVAL; + } + + /* Each SGI needs to be associated with GIC's IRQ domain. */ + domain = irq_find_host(interrupt_parent); + of_node_put(interrupt_parent); + + /* Each mapping needs GIC domain when finding IRQ mapping. */ + sgi_fwspec.fwnode = domain->fwnode; + + /* + * When irq domain looks at mapping each arg is as follows: + * 3 args for: interrupt type (SGI), interrupt # (set later), type + */ + sgi_fwspec.param_count = 1; + + /* Set SGI's hwirq */ + sgi_fwspec.param[0] = sgi_num; + pdata->virq_sgi = irq_create_fwspec_mapping(&sgi_fwspec); + + for_each_possible_cpu(cpu) + per_cpu(per_cpu_pdata, cpu) = pdata; + + ret = request_percpu_irq(pdata->virq_sgi, zynqmp_sgi_interrupt, pdev->name, + &per_cpu_pdata); + WARN_ON(ret); + if (ret) { + irq_dispose_mapping(pdata->virq_sgi); + return ret; + } + + irq_set_status_flags(pdata->virq_sgi, IRQ_PER_CPU); + + /* Setup function for the CPU hot-plug cases */ + cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "mailbox/sgi:starting", + xlnx_mbox_cpuhp_start, xlnx_mbox_cpuhp_down); + + return ret; +} + +static void xlnx_mbox_cleanup_sgi(struct zynqmp_ipi_pdata *pdata) +{ + cpuhp_remove_state(CPUHP_AP_ONLINE_DYN); + + on_each_cpu(xlnx_disable_percpu_irq, NULL, 1); + + irq_clear_status_flags(pdata->virq_sgi, IRQ_PER_CPU); + free_percpu_irq(pdata->virq_sgi, &per_cpu_pdata); + irq_dispose_mapping(pdata->virq_sgi); +} + +/** + * zynqmp_ipi_free_mboxes - Free IPI mailboxes devices + * + * @pdata: IPI private data + */ +static void zynqmp_ipi_free_mboxes(struct zynqmp_ipi_pdata *pdata) +{ + struct zynqmp_ipi_mbox *ipi_mbox; + int i; + + if (pdata->irq_type == IPI_IRQ_TYPE_SGI) + xlnx_mbox_cleanup_sgi(pdata); + + i = pdata->num_mboxes - 1; + for (; i >= 0; i--) { + ipi_mbox = &pdata->ipi_mboxes[i]; + if (device_is_registered(&ipi_mbox->dev)) + device_unregister(&ipi_mbox->dev); + } +} + +static int zynqmp_ipi_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct device_node *nc, *np = pdev->dev.of_node; + struct zynqmp_ipi_pdata *pdata; + struct of_phandle_args out_irq; + struct zynqmp_ipi_mbox *mbox; + int num_mboxes, ret = -EINVAL; + setup_ipi_fn ipi_fn; + + num_mboxes = of_get_available_child_count(np); + if (num_mboxes == 0) { + dev_err(dev, "mailbox nodes not available\n"); + return -EINVAL; + } + + pdata = devm_kzalloc(dev, struct_size(pdata, ipi_mboxes, num_mboxes), + GFP_KERNEL); + if (!pdata) + return -ENOMEM; + pdata->dev = dev; + + /* Get the IPI local agents ID */ + ret = of_property_read_u32(np, "xlnx,ipi-id", &pdata->local_id); + if (ret < 0) { + dev_err(dev, "No IPI local ID is specified.\n"); + return ret; + } + + ipi_fn = (setup_ipi_fn)device_get_match_data(&pdev->dev); + if (!ipi_fn) { + dev_err(dev, + "Mbox Compatible String is missing IPI Setup fn.\n"); + return -ENODEV; + } + + pdata->num_mboxes = num_mboxes; + + mbox = pdata->ipi_mboxes; + for_each_available_child_of_node(np, nc) { + mbox->pdata = pdata; + mbox->setup_ipi_fn = ipi_fn; + + ret = zynqmp_ipi_mbox_probe(mbox, nc); + if (ret) { + of_node_put(nc); + dev_err(dev, "failed to probe subdev.\n"); + ret = -EINVAL; + goto free_mbox_dev; + } + mbox++; + } + + ret = of_irq_parse_one(dev_of_node(dev), 0, &out_irq); + if (ret < 0) { + dev_err(dev, "failed to parse interrupts\n"); + goto free_mbox_dev; + } + + /* Use interrupt type to distinguish SGI and SPI interrupts */ + pdata->irq_type = out_irq.args[0]; + + /* + * If Interrupt number is in SGI range, then request SGI else request + * IPI system IRQ. + */ + if (pdata->irq_type == IPI_IRQ_TYPE_SGI) { + pdata->irq = out_irq.args[1]; + ret = xlnx_mbox_init_sgi(pdev, pdata->irq, pdata); + if (ret) + goto free_mbox_dev; + } else { + ret = platform_get_irq(pdev, 0); + if (ret < 0) + goto free_mbox_dev; + + pdata->irq = ret; + ret = devm_request_irq(dev, pdata->irq, zynqmp_ipi_interrupt, + IRQF_SHARED, dev_name(dev), pdata); + } + + if (ret) { + dev_err(dev, "IRQ %d is not requested successfully.\n", + pdata->irq); + goto free_mbox_dev; + } + + platform_set_drvdata(pdev, pdata); + return ret; + +free_mbox_dev: + zynqmp_ipi_free_mboxes(pdata); + return ret; +} + +static void zynqmp_ipi_remove(struct platform_device *pdev) +{ + struct zynqmp_ipi_pdata *pdata; + + pdata = platform_get_drvdata(pdev); + zynqmp_ipi_free_mboxes(pdata); +} + +static const struct of_device_id zynqmp_ipi_of_match[] = { + { .compatible = "xlnx,zynqmp-ipi-mailbox", + .data = &zynqmp_ipi_setup, + }, + { .compatible = "xlnx,versal-ipi-mailbox", + .data = &versal_ipi_setup, + }, + {}, +}; +MODULE_DEVICE_TABLE(of, zynqmp_ipi_of_match); + +static struct platform_driver zynqmp_ipi_driver = { + .probe = zynqmp_ipi_probe, + .remove = zynqmp_ipi_remove, + .driver = { + .name = "zynqmp-ipi", + .of_match_table = of_match_ptr(zynqmp_ipi_of_match), + }, +}; + +static int __init zynqmp_ipi_init(void) +{ + return platform_driver_register(&zynqmp_ipi_driver); +} +subsys_initcall(zynqmp_ipi_init); + +static void __exit zynqmp_ipi_exit(void) +{ + platform_driver_unregister(&zynqmp_ipi_driver); +} +module_exit(zynqmp_ipi_exit); + +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("Xilinx ZynqMP IPI Mailbox driver"); +MODULE_AUTHOR("Xilinx Inc."); |
