summaryrefslogtreecommitdiff
path: root/drivers/mailbox
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/mailbox')
-rw-r--r--drivers/mailbox/Kconfig145
-rw-r--r--drivers/mailbox/Makefile22
-rw-r--r--drivers/mailbox/apple-mailbox.c441
-rw-r--r--drivers/mailbox/arm_mhu.c3
-rw-r--r--drivers/mailbox/arm_mhu_db.c3
-rw-r--r--drivers/mailbox/arm_mhuv2.c13
-rw-r--r--drivers/mailbox/arm_mhuv3.c1103
-rw-r--r--drivers/mailbox/ast2700-mailbox.c235
-rw-r--r--drivers/mailbox/bcm-flexrm-mailbox.c22
-rw-r--r--drivers/mailbox/bcm-pdc-mailbox.c58
-rw-r--r--drivers/mailbox/bcm2835-mailbox.c3
-rw-r--r--drivers/mailbox/bcm74110-mailbox.c656
-rw-r--r--drivers/mailbox/cix-mailbox.c645
-rw-r--r--drivers/mailbox/cv1800-mailbox.c220
-rw-r--r--drivers/mailbox/exynos-mailbox.c157
-rw-r--r--drivers/mailbox/hi3660-mailbox.c1
-rw-r--r--drivers/mailbox/hi6220-mailbox.c6
-rw-r--r--drivers/mailbox/imx-mailbox.c166
-rw-r--r--drivers/mailbox/mailbox-altera.c4
-rw-r--r--drivers/mailbox/mailbox-mchp-ipc-sbi.c504
-rw-r--r--drivers/mailbox/mailbox-mpfs.c135
-rw-r--r--drivers/mailbox/mailbox-sti.c8
-rw-r--r--drivers/mailbox/mailbox-test.c30
-rw-r--r--drivers/mailbox/mailbox-th1520.c597
-rw-r--r--drivers/mailbox/mailbox.c334
-rw-r--r--drivers/mailbox/mailbox.h2
-rw-r--r--drivers/mailbox/mtk-adsp-mailbox.c3
-rw-r--r--drivers/mailbox/mtk-cmdq-mailbox.c302
-rw-r--r--drivers/mailbox/mtk-gpueb-mailbox.c319
-rw-r--r--drivers/mailbox/omap-mailbox.c577
-rw-r--r--drivers/mailbox/pcc.c325
-rw-r--r--drivers/mailbox/pl320-ipc.c14
-rw-r--r--drivers/mailbox/platform_mhu.c5
-rw-r--r--drivers/mailbox/qcom-apcs-ipc-mailbox.c42
-rw-r--r--drivers/mailbox/qcom-cpucp-mbox.c187
-rw-r--r--drivers/mailbox/qcom-ipcc.c27
-rw-r--r--drivers/mailbox/riscv-sbi-mpxy-mbox.c1019
-rw-r--r--drivers/mailbox/rockchip-mailbox.c13
-rw-r--r--drivers/mailbox/sprd-mailbox.c27
-rw-r--r--drivers/mailbox/stm32-ipcc.c5
-rw-r--r--drivers/mailbox/sun6i-msgbox.c6
-rw-r--r--drivers/mailbox/tegra-hsp.c101
-rw-r--r--drivers/mailbox/ti-msgmgr.c42
-rw-r--r--drivers/mailbox/zynqmp-ipi-mailbox.c454
44 files changed, 7382 insertions, 1599 deletions
diff --git a/drivers/mailbox/Kconfig b/drivers/mailbox/Kconfig
index 1495965bc394..29f16f220384 100644
--- a/drivers/mailbox/Kconfig
+++ b/drivers/mailbox/Kconfig
@@ -8,18 +8,6 @@ menuconfig MAILBOX
if MAILBOX
-config APPLE_MAILBOX
- tristate "Apple Mailbox driver"
- depends on ARCH_APPLE || (ARM64 && COMPILE_TEST)
- default ARCH_APPLE
- help
- Apple SoCs have various co-processors required for certain
- peripherals to work (NVMe, display controller, etc.). This
- driver adds support for the mailbox controller used to
- communicate with those.
-
- Say Y here if you have a Apple SoC.
-
config ARM_MHU
tristate "ARM MHU Mailbox"
depends on ARM_AMBA
@@ -35,6 +23,49 @@ config ARM_MHU_V2
Say Y here if you want to build the ARM MHUv2 controller driver,
which provides unidirectional mailboxes between processing elements.
+config ARM_MHU_V3
+ tristate "ARM MHUv3 Mailbox"
+ depends on ARM64 || COMPILE_TEST
+ depends on HAS_IOMEM || COMPILE_TEST
+ depends on OF
+ help
+ Say Y here if you want to build the ARM MHUv3 controller driver,
+ which provides unidirectional mailboxes between processing elements.
+
+ ARM MHUv3 controllers can implement a varying number of extensions
+ that provides different means of transports: supported extensions
+ will be discovered and possibly managed at probe-time.
+
+config AST2700_MBOX
+ tristate "ASPEED AST2700 IPC driver"
+ depends on ARCH_ASPEED || COMPILE_TEST
+ help
+ Mailbox driver implementation for ASPEED AST27XX SoCs. This driver
+ can be used to send message between different processors in SoC.
+ The driver provides mailbox support for sending interrupts to the
+ clients. Say Y here if you want to build this driver.
+
+config CV1800_MBOX
+ tristate "cv1800 mailbox"
+ depends on ARCH_SOPHGO || COMPILE_TEST
+ help
+ Mailbox driver implementation for Sophgo CV18XX SoCs. This driver
+ can be used to send message between different processors in SoC. Any
+ processer can write data in a channel, and set co-responding register
+ to raise interrupt to notice another processor, and it is allowed to
+ send data to itself.
+
+config EXYNOS_MBOX
+ tristate "Exynos Mailbox"
+ depends on ARCH_EXYNOS || COMPILE_TEST
+ help
+ Say Y here if you want to build the Samsung Exynos Mailbox controller
+ driver. The controller has 16 flag bits for hardware interrupt
+ generation and a shared register for passing mailbox messages.
+ When the controller is used by the ACPM interface the shared register
+ is ignored and the mailbox controller acts as a doorbell that raises
+ the interrupt to the ACPM firmware.
+
config IMX_MBOX
tristate "i.MX Mailbox"
depends on ARCH_MXC || COMPILE_TEST
@@ -73,22 +104,13 @@ config ARMADA_37XX_RWTM_MBOX
config OMAP2PLUS_MBOX
tristate "OMAP2+ Mailbox framework support"
- depends on ARCH_OMAP2PLUS || ARCH_K3
+ depends on ARCH_OMAP2PLUS || ARCH_K3 || COMPILE_TEST
help
Mailbox implementation for OMAP family chips with hardware for
interprocessor communication involving DSP, IVA1.0 and IVA2 in
OMAP2/3; or IPU, IVA HD and DSP in OMAP4/5. Say Y here if you
want to use OMAP2+ Mailbox framework support.
-config OMAP_MBOX_KFIFO_SIZE
- int "Mailbox kfifo default buffer size (bytes)"
- depends on OMAP2PLUS_MBOX
- default 256
- help
- Specify the default size of mailbox's kfifo buffers (bytes).
- This can also be changed at runtime (via the mbox_kfifo_size
- module parameter).
-
config ROCKCHIP_MBOX
bool "Rockchip Soc Integrated Mailbox Support"
depends on ARCH_ROCKCHIP || COMPILE_TEST
@@ -135,7 +157,7 @@ config STI_MBOX
config TI_MESSAGE_MANAGER
tristate "Texas Instruments Message Manager Driver"
- depends on ARCH_KEYSTONE || ARCH_K3
+ depends on ARCH_KEYSTONE || ARCH_K3 || COMPILE_TEST
default ARCH_K3
help
An implementation of Message Manager slave driver for Keystone
@@ -176,7 +198,8 @@ config MAILBOX_TEST
config POLARFIRE_SOC_MAILBOX
tristate "PolarFire SoC (MPFS) Mailbox"
depends on HAS_IOMEM
- depends on SOC_MICROCHIP_POLARFIRE || COMPILE_TEST
+ depends on MFD_SYSCON
+ depends on ARCH_MICROCHIP_POLARFIRE || COMPILE_TEST
help
This driver adds support for the PolarFire SoC (MPFS) mailbox controller.
@@ -185,6 +208,19 @@ config POLARFIRE_SOC_MAILBOX
If unsure, say N.
+config MCHP_SBI_IPC_MBOX
+ tristate "Microchip Inter-processor Communication (IPC) SBI driver"
+ depends on RISCV_SBI
+ depends on ARCH_MICROCHIP || COMPILE_TEST
+ help
+ Mailbox implementation for Microchip devices with an
+ Inter-process communication (IPC) controller.
+
+ To compile this driver as a module, choose M here. the
+ module will be called mailbox-mchp-ipc-sbi.
+
+ If unsure, say N.
+
config QCOM_APCS_IPC
tristate "Qualcomm APCS IPC driver"
depends on ARCH_QCOM || COMPILE_TEST
@@ -258,8 +294,18 @@ config MTK_CMDQ_MBOX
critical time limitation, such as updating display configuration
during the vblank.
+config MTK_GPUEB_MBOX
+ tristate "MediaTek GPUEB Mailbox Support"
+ depends on ARCH_MEDIATEK || COMPILE_TEST
+ help
+ The MediaTek GPUEB mailbox is used to communicate with the embedded
+ controller in charge of GPU frequency and power management on some
+ MediaTek SoCs, such as the MT8196.
+ Say Y or m here if you want to support the MT8196 SoC in your kernel
+ build.
+
config ZYNQMP_IPI_MBOX
- bool "Xilinx ZynqMP IPI Mailbox"
+ tristate "Xilinx ZynqMP IPI Mailbox"
depends on ARCH_ZYNQMP && OF
help
Say yes here to add support for Xilinx IPI mailbox driver.
@@ -285,6 +331,14 @@ config SPRD_MBOX
to send message between application processors and MCU. Say Y here if
you want to build the Spreatrum mailbox controller driver.
+config QCOM_CPUCP_MBOX
+ tristate "Qualcomm Technologies, Inc. CPUCP mailbox driver"
+ depends on (ARCH_QCOM || COMPILE_TEST) && 64BIT
+ help
+ Qualcomm Technologies, Inc. CPUSS Control Processor (CPUCP) mailbox
+ controller driver enables communication between AP and CPUCP. Say
+ Y here if you want to build this driver.
+
config QCOM_IPCC
tristate "Qualcomm Technologies, Inc. IPCC driver"
depends on ARCH_QCOM || COMPILE_TEST
@@ -295,4 +349,45 @@ config QCOM_IPCC
acts as an interrupt controller for receiving interrupts from clients.
Say Y here if you want to build this driver.
+config THEAD_TH1520_MBOX
+ tristate "T-head TH1520 Mailbox"
+ depends on ARCH_THEAD || COMPILE_TEST
+ help
+ Mailbox driver implementation for the Thead TH-1520 platform. Enables
+ two cores within the SoC to communicate and coordinate by passing
+ messages. Could be used to communicate between E910 core, on which the
+ kernel is running, and E902 core used for power management among other
+ things.
+
+config CIX_MBOX
+ tristate "CIX Mailbox"
+ depends on ARCH_CIX || COMPILE_TEST
+ depends on OF
+ help
+ Mailbox implementation for CIX IPC system. The controller supports
+ 11 mailbox channels with different operating mode and every channel
+ is unidirectional. Say Y here if you want to use the CIX Mailbox
+ support.
+
+config BCM74110_MAILBOX
+ tristate "Brcmstb BCM74110 Mailbox"
+ depends on ARCH_BRCMSTB || COMPILE_TEST
+ default ARCH_BRCMSTB
+ help
+ Broadcom STB mailbox driver present starting with brcmstb bcm74110
+ SoCs. The mailbox is a communication channel between the host
+ processor and coprocessor that handles various power management task
+ and more.
+
+config RISCV_SBI_MPXY_MBOX
+ tristate "RISC-V SBI Message Proxy (MPXY) Mailbox"
+ depends on RISCV_SBI
+ default RISCV
+ help
+ Mailbox driver implementation for RISC-V SBI Message Proxy (MPXY)
+ extension. This mailbox driver is used to send messages to the
+ remote processor through the SBI implementation (M-mode firmware
+ or HS-mode hypervisor). Say Y here if you want to have this support.
+ If unsure say N.
+
endif
diff --git a/drivers/mailbox/Makefile b/drivers/mailbox/Makefile
index fc9376117111..81820a4f5528 100644
--- a/drivers/mailbox/Makefile
+++ b/drivers/mailbox/Makefile
@@ -9,6 +9,14 @@ obj-$(CONFIG_ARM_MHU) += arm_mhu.o arm_mhu_db.o
obj-$(CONFIG_ARM_MHU_V2) += arm_mhuv2.o
+obj-$(CONFIG_ARM_MHU_V3) += arm_mhuv3.o
+
+obj-$(CONFIG_AST2700_MBOX) += ast2700-mailbox.o
+
+obj-$(CONFIG_CV1800_MBOX) += cv1800-mailbox.o
+
+obj-$(CONFIG_EXYNOS_MBOX) += exynos-mailbox.o
+
obj-$(CONFIG_IMX_MBOX) += imx-mailbox.o
obj-$(CONFIG_ARMADA_37XX_RWTM_MBOX) += armada-37xx-rwtm-mailbox.o
@@ -43,6 +51,8 @@ obj-$(CONFIG_BCM_FLEXRM_MBOX) += bcm-flexrm-mailbox.o
obj-$(CONFIG_POLARFIRE_SOC_MAILBOX) += mailbox-mpfs.o
+obj-$(CONFIG_MCHP_SBI_IPC_MBOX) += mailbox-mchp-ipc-sbi.o
+
obj-$(CONFIG_QCOM_APCS_IPC) += qcom-apcs-ipc-mailbox.o
obj-$(CONFIG_TEGRA_HSP_MBOX) += tegra-hsp.o
@@ -53,12 +63,22 @@ obj-$(CONFIG_MTK_ADSP_MBOX) += mtk-adsp-mailbox.o
obj-$(CONFIG_MTK_CMDQ_MBOX) += mtk-cmdq-mailbox.o
+obj-$(CONFIG_MTK_GPUEB_MBOX) += mtk-gpueb-mailbox.o
+
obj-$(CONFIG_ZYNQMP_IPI_MBOX) += zynqmp-ipi-mailbox.o
obj-$(CONFIG_SUN6I_MSGBOX) += sun6i-msgbox.o
obj-$(CONFIG_SPRD_MBOX) += sprd-mailbox.o
+obj-$(CONFIG_QCOM_CPUCP_MBOX) += qcom-cpucp-mbox.o
+
obj-$(CONFIG_QCOM_IPCC) += qcom-ipcc.o
-obj-$(CONFIG_APPLE_MAILBOX) += apple-mailbox.o
+obj-$(CONFIG_THEAD_TH1520_MBOX) += mailbox-th1520.o
+
+obj-$(CONFIG_CIX_MBOX) += cix-mailbox.o
+
+obj-$(CONFIG_BCM74110_MAILBOX) += bcm74110-mailbox.o
+
+obj-$(CONFIG_RISCV_SBI_MPXY_MBOX) += riscv-sbi-mpxy-mbox.o
diff --git a/drivers/mailbox/apple-mailbox.c b/drivers/mailbox/apple-mailbox.c
deleted file mode 100644
index 2a3e8d8ff8b5..000000000000
--- a/drivers/mailbox/apple-mailbox.c
+++ /dev/null
@@ -1,441 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only OR MIT
-/*
- * Apple mailbox driver
- *
- * Copyright (C) 2021 The Asahi Linux Contributors
- *
- * This driver adds support for two mailbox variants (called ASC and M3 by
- * Apple) found in Apple SoCs such as the M1. It consists of two FIFOs used to
- * exchange 64+32 bit messages between the main CPU and a co-processor.
- * Various coprocessors implement different IPC protocols based on these simple
- * messages and shared memory buffers.
- *
- * Both the main CPU and the co-processor see the same set of registers but
- * the first FIFO (A2I) is always used to transfer messages from the application
- * processor (us) to the I/O processor and the second one (I2A) for the
- * other direction.
- */
-
-#include <linux/apple-mailbox.h>
-#include <linux/delay.h>
-#include <linux/device.h>
-#include <linux/gfp.h>
-#include <linux/interrupt.h>
-#include <linux/io.h>
-#include <linux/mailbox_controller.h>
-#include <linux/module.h>
-#include <linux/of.h>
-#include <linux/platform_device.h>
-#include <linux/spinlock.h>
-#include <linux/types.h>
-
-#define APPLE_ASC_MBOX_CONTROL_FULL BIT(16)
-#define APPLE_ASC_MBOX_CONTROL_EMPTY BIT(17)
-
-#define APPLE_ASC_MBOX_A2I_CONTROL 0x110
-#define APPLE_ASC_MBOX_A2I_SEND0 0x800
-#define APPLE_ASC_MBOX_A2I_SEND1 0x808
-#define APPLE_ASC_MBOX_A2I_RECV0 0x810
-#define APPLE_ASC_MBOX_A2I_RECV1 0x818
-
-#define APPLE_ASC_MBOX_I2A_CONTROL 0x114
-#define APPLE_ASC_MBOX_I2A_SEND0 0x820
-#define APPLE_ASC_MBOX_I2A_SEND1 0x828
-#define APPLE_ASC_MBOX_I2A_RECV0 0x830
-#define APPLE_ASC_MBOX_I2A_RECV1 0x838
-
-#define APPLE_M3_MBOX_CONTROL_FULL BIT(16)
-#define APPLE_M3_MBOX_CONTROL_EMPTY BIT(17)
-
-#define APPLE_M3_MBOX_A2I_CONTROL 0x50
-#define APPLE_M3_MBOX_A2I_SEND0 0x60
-#define APPLE_M3_MBOX_A2I_SEND1 0x68
-#define APPLE_M3_MBOX_A2I_RECV0 0x70
-#define APPLE_M3_MBOX_A2I_RECV1 0x78
-
-#define APPLE_M3_MBOX_I2A_CONTROL 0x80
-#define APPLE_M3_MBOX_I2A_SEND0 0x90
-#define APPLE_M3_MBOX_I2A_SEND1 0x98
-#define APPLE_M3_MBOX_I2A_RECV0 0xa0
-#define APPLE_M3_MBOX_I2A_RECV1 0xa8
-
-#define APPLE_M3_MBOX_IRQ_ENABLE 0x48
-#define APPLE_M3_MBOX_IRQ_ACK 0x4c
-#define APPLE_M3_MBOX_IRQ_A2I_EMPTY BIT(0)
-#define APPLE_M3_MBOX_IRQ_A2I_NOT_EMPTY BIT(1)
-#define APPLE_M3_MBOX_IRQ_I2A_EMPTY BIT(2)
-#define APPLE_M3_MBOX_IRQ_I2A_NOT_EMPTY BIT(3)
-
-#define APPLE_MBOX_MSG1_OUTCNT GENMASK(56, 52)
-#define APPLE_MBOX_MSG1_INCNT GENMASK(51, 48)
-#define APPLE_MBOX_MSG1_OUTPTR GENMASK(47, 44)
-#define APPLE_MBOX_MSG1_INPTR GENMASK(43, 40)
-#define APPLE_MBOX_MSG1_MSG GENMASK(31, 0)
-
-struct apple_mbox_hw {
- unsigned int control_full;
- unsigned int control_empty;
-
- unsigned int a2i_control;
- unsigned int a2i_send0;
- unsigned int a2i_send1;
-
- unsigned int i2a_control;
- unsigned int i2a_recv0;
- unsigned int i2a_recv1;
-
- bool has_irq_controls;
- unsigned int irq_enable;
- unsigned int irq_ack;
- unsigned int irq_bit_recv_not_empty;
- unsigned int irq_bit_send_empty;
-};
-
-struct apple_mbox {
- void __iomem *regs;
- const struct apple_mbox_hw *hw;
-
- int irq_recv_not_empty;
- int irq_send_empty;
-
- struct mbox_chan chan;
-
- struct device *dev;
- struct mbox_controller controller;
- spinlock_t rx_lock;
-};
-
-static const struct of_device_id apple_mbox_of_match[];
-
-static bool apple_mbox_hw_can_send(struct apple_mbox *apple_mbox)
-{
- u32 mbox_ctrl =
- readl_relaxed(apple_mbox->regs + apple_mbox->hw->a2i_control);
-
- return !(mbox_ctrl & apple_mbox->hw->control_full);
-}
-
-static bool apple_mbox_hw_send_empty(struct apple_mbox *apple_mbox)
-{
- u32 mbox_ctrl =
- readl_relaxed(apple_mbox->regs + apple_mbox->hw->a2i_control);
-
- return mbox_ctrl & apple_mbox->hw->control_empty;
-}
-
-static int apple_mbox_hw_send(struct apple_mbox *apple_mbox,
- struct apple_mbox_msg *msg)
-{
- if (!apple_mbox_hw_can_send(apple_mbox))
- return -EBUSY;
-
- dev_dbg(apple_mbox->dev, "> TX %016llx %08x\n", msg->msg0, msg->msg1);
-
- writeq_relaxed(msg->msg0, apple_mbox->regs + apple_mbox->hw->a2i_send0);
- writeq_relaxed(FIELD_PREP(APPLE_MBOX_MSG1_MSG, msg->msg1),
- apple_mbox->regs + apple_mbox->hw->a2i_send1);
-
- return 0;
-}
-
-static bool apple_mbox_hw_can_recv(struct apple_mbox *apple_mbox)
-{
- u32 mbox_ctrl =
- readl_relaxed(apple_mbox->regs + apple_mbox->hw->i2a_control);
-
- return !(mbox_ctrl & apple_mbox->hw->control_empty);
-}
-
-static int apple_mbox_hw_recv(struct apple_mbox *apple_mbox,
- struct apple_mbox_msg *msg)
-{
- if (!apple_mbox_hw_can_recv(apple_mbox))
- return -ENOMSG;
-
- msg->msg0 = readq_relaxed(apple_mbox->regs + apple_mbox->hw->i2a_recv0);
- msg->msg1 = FIELD_GET(
- APPLE_MBOX_MSG1_MSG,
- readq_relaxed(apple_mbox->regs + apple_mbox->hw->i2a_recv1));
-
- dev_dbg(apple_mbox->dev, "< RX %016llx %08x\n", msg->msg0, msg->msg1);
-
- return 0;
-}
-
-static int apple_mbox_chan_send_data(struct mbox_chan *chan, void *data)
-{
- struct apple_mbox *apple_mbox = chan->con_priv;
- struct apple_mbox_msg *msg = data;
- int ret;
-
- ret = apple_mbox_hw_send(apple_mbox, msg);
- if (ret)
- return ret;
-
- /*
- * The interrupt is level triggered and will keep firing as long as the
- * FIFO is empty. It will also keep firing if the FIFO was empty
- * at any point in the past until it has been acknowledged at the
- * mailbox level. By acknowledging it here we can ensure that we will
- * only get the interrupt once the FIFO has been cleared again.
- * If the FIFO is already empty before the ack it will fire again
- * immediately after the ack.
- */
- if (apple_mbox->hw->has_irq_controls) {
- writel_relaxed(apple_mbox->hw->irq_bit_send_empty,
- apple_mbox->regs + apple_mbox->hw->irq_ack);
- }
- enable_irq(apple_mbox->irq_send_empty);
-
- return 0;
-}
-
-static irqreturn_t apple_mbox_send_empty_irq(int irq, void *data)
-{
- struct apple_mbox *apple_mbox = data;
-
- /*
- * We don't need to acknowledge the interrupt at the mailbox level
- * here even if supported by the hardware. It will keep firing but that
- * doesn't matter since it's disabled at the main interrupt controller.
- * apple_mbox_chan_send_data will acknowledge it before enabling
- * it at the main controller again.
- */
- disable_irq_nosync(apple_mbox->irq_send_empty);
- mbox_chan_txdone(&apple_mbox->chan, 0);
- return IRQ_HANDLED;
-}
-
-static int apple_mbox_poll(struct apple_mbox *apple_mbox)
-{
- struct apple_mbox_msg msg;
- int ret = 0;
-
- while (apple_mbox_hw_recv(apple_mbox, &msg) == 0) {
- mbox_chan_received_data(&apple_mbox->chan, (void *)&msg);
- ret++;
- }
-
- /*
- * The interrupt will keep firing even if there are no more messages
- * unless we also acknowledge it at the mailbox level here.
- * There's no race if a message comes in between the check in the while
- * loop above and the ack below: If a new messages arrives inbetween
- * those two the interrupt will just fire again immediately after the
- * ack since it's level triggered.
- */
- if (apple_mbox->hw->has_irq_controls) {
- writel_relaxed(apple_mbox->hw->irq_bit_recv_not_empty,
- apple_mbox->regs + apple_mbox->hw->irq_ack);
- }
-
- return ret;
-}
-
-static irqreturn_t apple_mbox_recv_irq(int irq, void *data)
-{
- struct apple_mbox *apple_mbox = data;
-
- spin_lock(&apple_mbox->rx_lock);
- apple_mbox_poll(apple_mbox);
- spin_unlock(&apple_mbox->rx_lock);
-
- return IRQ_HANDLED;
-}
-
-static bool apple_mbox_chan_peek_data(struct mbox_chan *chan)
-{
- struct apple_mbox *apple_mbox = chan->con_priv;
- unsigned long flags;
- int ret;
-
- spin_lock_irqsave(&apple_mbox->rx_lock, flags);
- ret = apple_mbox_poll(apple_mbox);
- spin_unlock_irqrestore(&apple_mbox->rx_lock, flags);
-
- return ret > 0;
-}
-
-static int apple_mbox_chan_flush(struct mbox_chan *chan, unsigned long timeout)
-{
- struct apple_mbox *apple_mbox = chan->con_priv;
- unsigned long deadline = jiffies + msecs_to_jiffies(timeout);
-
- while (time_before(jiffies, deadline)) {
- if (apple_mbox_hw_send_empty(apple_mbox)) {
- mbox_chan_txdone(&apple_mbox->chan, 0);
- return 0;
- }
-
- udelay(1);
- }
-
- return -ETIME;
-}
-
-static int apple_mbox_chan_startup(struct mbox_chan *chan)
-{
- struct apple_mbox *apple_mbox = chan->con_priv;
-
- /*
- * Only some variants of this mailbox HW provide interrupt control
- * at the mailbox level. We therefore need to handle enabling/disabling
- * interrupts at the main interrupt controller anyway for hardware that
- * doesn't. Just always keep the interrupts we care about enabled at
- * the mailbox level so that both hardware revisions behave almost
- * the same.
- */
- if (apple_mbox->hw->has_irq_controls) {
- writel_relaxed(apple_mbox->hw->irq_bit_recv_not_empty |
- apple_mbox->hw->irq_bit_send_empty,
- apple_mbox->regs + apple_mbox->hw->irq_enable);
- }
-
- enable_irq(apple_mbox->irq_recv_not_empty);
- return 0;
-}
-
-static void apple_mbox_chan_shutdown(struct mbox_chan *chan)
-{
- struct apple_mbox *apple_mbox = chan->con_priv;
-
- disable_irq(apple_mbox->irq_recv_not_empty);
-}
-
-static const struct mbox_chan_ops apple_mbox_ops = {
- .send_data = apple_mbox_chan_send_data,
- .peek_data = apple_mbox_chan_peek_data,
- .flush = apple_mbox_chan_flush,
- .startup = apple_mbox_chan_startup,
- .shutdown = apple_mbox_chan_shutdown,
-};
-
-static struct mbox_chan *apple_mbox_of_xlate(struct mbox_controller *mbox,
- const struct of_phandle_args *args)
-{
- if (args->args_count != 0)
- return ERR_PTR(-EINVAL);
-
- return &mbox->chans[0];
-}
-
-static int apple_mbox_probe(struct platform_device *pdev)
-{
- int ret;
- const struct of_device_id *match;
- char *irqname;
- struct apple_mbox *mbox;
- struct device *dev = &pdev->dev;
-
- match = of_match_node(apple_mbox_of_match, pdev->dev.of_node);
- if (!match)
- return -EINVAL;
- if (!match->data)
- return -EINVAL;
-
- mbox = devm_kzalloc(dev, sizeof(*mbox), GFP_KERNEL);
- if (!mbox)
- return -ENOMEM;
- platform_set_drvdata(pdev, mbox);
-
- mbox->dev = dev;
- mbox->regs = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(mbox->regs))
- return PTR_ERR(mbox->regs);
-
- mbox->hw = match->data;
- mbox->irq_recv_not_empty =
- platform_get_irq_byname(pdev, "recv-not-empty");
- if (mbox->irq_recv_not_empty < 0)
- return -ENODEV;
-
- mbox->irq_send_empty = platform_get_irq_byname(pdev, "send-empty");
- if (mbox->irq_send_empty < 0)
- return -ENODEV;
-
- mbox->controller.dev = mbox->dev;
- mbox->controller.num_chans = 1;
- mbox->controller.chans = &mbox->chan;
- mbox->controller.ops = &apple_mbox_ops;
- mbox->controller.txdone_irq = true;
- mbox->controller.of_xlate = apple_mbox_of_xlate;
- mbox->chan.con_priv = mbox;
- spin_lock_init(&mbox->rx_lock);
-
- irqname = devm_kasprintf(dev, GFP_KERNEL, "%s-recv", dev_name(dev));
- if (!irqname)
- return -ENOMEM;
-
- ret = devm_request_threaded_irq(dev, mbox->irq_recv_not_empty, NULL,
- apple_mbox_recv_irq,
- IRQF_NO_AUTOEN | IRQF_ONESHOT, irqname,
- mbox);
- if (ret)
- return ret;
-
- irqname = devm_kasprintf(dev, GFP_KERNEL, "%s-send", dev_name(dev));
- if (!irqname)
- return -ENOMEM;
-
- ret = devm_request_irq(dev, mbox->irq_send_empty,
- apple_mbox_send_empty_irq, IRQF_NO_AUTOEN,
- irqname, mbox);
- if (ret)
- return ret;
-
- return devm_mbox_controller_register(dev, &mbox->controller);
-}
-
-static const struct apple_mbox_hw apple_mbox_asc_hw = {
- .control_full = APPLE_ASC_MBOX_CONTROL_FULL,
- .control_empty = APPLE_ASC_MBOX_CONTROL_EMPTY,
-
- .a2i_control = APPLE_ASC_MBOX_A2I_CONTROL,
- .a2i_send0 = APPLE_ASC_MBOX_A2I_SEND0,
- .a2i_send1 = APPLE_ASC_MBOX_A2I_SEND1,
-
- .i2a_control = APPLE_ASC_MBOX_I2A_CONTROL,
- .i2a_recv0 = APPLE_ASC_MBOX_I2A_RECV0,
- .i2a_recv1 = APPLE_ASC_MBOX_I2A_RECV1,
-
- .has_irq_controls = false,
-};
-
-static const struct apple_mbox_hw apple_mbox_m3_hw = {
- .control_full = APPLE_M3_MBOX_CONTROL_FULL,
- .control_empty = APPLE_M3_MBOX_CONTROL_EMPTY,
-
- .a2i_control = APPLE_M3_MBOX_A2I_CONTROL,
- .a2i_send0 = APPLE_M3_MBOX_A2I_SEND0,
- .a2i_send1 = APPLE_M3_MBOX_A2I_SEND1,
-
- .i2a_control = APPLE_M3_MBOX_I2A_CONTROL,
- .i2a_recv0 = APPLE_M3_MBOX_I2A_RECV0,
- .i2a_recv1 = APPLE_M3_MBOX_I2A_RECV1,
-
- .has_irq_controls = true,
- .irq_enable = APPLE_M3_MBOX_IRQ_ENABLE,
- .irq_ack = APPLE_M3_MBOX_IRQ_ACK,
- .irq_bit_recv_not_empty = APPLE_M3_MBOX_IRQ_I2A_NOT_EMPTY,
- .irq_bit_send_empty = APPLE_M3_MBOX_IRQ_A2I_EMPTY,
-};
-
-static const struct of_device_id apple_mbox_of_match[] = {
- { .compatible = "apple,asc-mailbox-v4", .data = &apple_mbox_asc_hw },
- { .compatible = "apple,m3-mailbox-v2", .data = &apple_mbox_m3_hw },
- {}
-};
-MODULE_DEVICE_TABLE(of, apple_mbox_of_match);
-
-static struct platform_driver apple_mbox_driver = {
- .driver = {
- .name = "apple-mailbox",
- .of_match_table = apple_mbox_of_match,
- },
- .probe = apple_mbox_probe,
-};
-module_platform_driver(apple_mbox_driver);
-
-MODULE_LICENSE("Dual MIT/GPL");
-MODULE_AUTHOR("Sven Peter <sven@svenpeter.dev>");
-MODULE_DESCRIPTION("Apple Mailbox driver");
diff --git a/drivers/mailbox/arm_mhu.c b/drivers/mailbox/arm_mhu.c
index 22243cabe056..0950b7bce184 100644
--- a/drivers/mailbox/arm_mhu.c
+++ b/drivers/mailbox/arm_mhu.c
@@ -12,6 +12,7 @@
#include <linux/io.h>
#include <linux/mailbox_controller.h>
#include <linux/module.h>
+#include <linux/of.h>
#define INTR_STAT_OFS 0x0
#define INTR_SET_OFS 0x8
@@ -152,7 +153,7 @@ static int mhu_probe(struct amba_device *adev, const struct amba_id *id)
return 0;
}
-static struct amba_id mhu_ids[] = {
+static const struct amba_id mhu_ids[] = {
{
.id = 0x1bb098,
.mask = 0xffffff,
diff --git a/drivers/mailbox/arm_mhu_db.c b/drivers/mailbox/arm_mhu_db.c
index aa0a4d83880f..9e937b09c5fb 100644
--- a/drivers/mailbox/arm_mhu_db.c
+++ b/drivers/mailbox/arm_mhu_db.c
@@ -15,7 +15,6 @@
#include <linux/mailbox_controller.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#define INTR_STAT_OFS 0x0
#define INTR_SET_OFS 0x8
@@ -329,7 +328,7 @@ static int mhu_db_probe(struct amba_device *adev, const struct amba_id *id)
return 0;
}
-static struct amba_id mhu_ids[] = {
+static const struct amba_id mhu_ids[] = {
{
.id = 0x1bb098,
.mask = 0xffffff,
diff --git a/drivers/mailbox/arm_mhuv2.c b/drivers/mailbox/arm_mhuv2.c
index c6d4957c4da8..f035284944c0 100644
--- a/drivers/mailbox/arm_mhuv2.c
+++ b/drivers/mailbox/arm_mhuv2.c
@@ -500,7 +500,7 @@ static const struct mhuv2_protocol_ops mhuv2_data_transfer_ops = {
static struct mbox_chan *get_irq_chan_comb(struct mhuv2 *mhu, u32 __iomem *reg)
{
struct mbox_chan *chans = mhu->mbox.chans;
- int channel = 0, i, offset = 0, windows, protocol, ch_wn;
+ int channel = 0, i, j, offset = 0, windows, protocol, ch_wn;
u32 stat;
for (i = 0; i < MHUV2_CMB_INT_ST_REG_CNT; i++) {
@@ -510,9 +510,9 @@ static struct mbox_chan *get_irq_chan_comb(struct mhuv2 *mhu, u32 __iomem *reg)
ch_wn = i * MHUV2_STAT_BITS + __builtin_ctz(stat);
- for (i = 0; i < mhu->length; i += 2) {
- protocol = mhu->protocols[i];
- windows = mhu->protocols[i + 1];
+ for (j = 0; j < mhu->length; j += 2) {
+ protocol = mhu->protocols[j];
+ windows = mhu->protocols[j + 1];
if (ch_wn >= offset + windows) {
if (protocol == DOORBELL)
@@ -553,7 +553,8 @@ static irqreturn_t mhuv2_sender_interrupt(int irq, void *data)
priv = chan->con_priv;
if (!IS_PROTOCOL_DOORBELL(priv)) {
- writel_relaxed(1, &mhu->send->ch_wn[priv->ch_wn_idx + priv->windows - 1].int_clr);
+ for (i = 0; i < priv->windows; i++)
+ writel_relaxed(1, &mhu->send->ch_wn[priv->ch_wn_idx + i].int_clr);
if (chan->cl) {
mbox_chan_txdone(chan, 0);
@@ -1106,7 +1107,7 @@ static void mhuv2_remove(struct amba_device *adev)
writel_relaxed(0x0, &mhu->send->access_request);
}
-static struct amba_id mhuv2_ids[] = {
+static const struct amba_id mhuv2_ids[] = {
{
/* 2.0 */
.id = 0xbb0d1,
diff --git a/drivers/mailbox/arm_mhuv3.c b/drivers/mailbox/arm_mhuv3.c
new file mode 100644
index 000000000000..0910da67f8a1
--- /dev/null
+++ b/drivers/mailbox/arm_mhuv3.c
@@ -0,0 +1,1103 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * ARM Message Handling Unit Version 3 (MHUv3) driver.
+ *
+ * Copyright (C) 2024 ARM Ltd.
+ *
+ * Based on ARM MHUv2 driver.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/bitops.h>
+#include <linux/bits.h>
+#include <linux/cleanup.h>
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/mailbox_controller.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/spinlock.h>
+#include <linux/sizes.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+
+/* ====== MHUv3 Registers ====== */
+
+/* Maximum number of Doorbell channel windows */
+#define MHUV3_DBCW_MAX 128
+/* Number of DBCH combined interrupt status registers */
+#define MHUV3_DBCH_CMB_INT_ST_REG_CNT 4
+
+/* Number of FFCH combined interrupt status registers */
+#define MHUV3_FFCH_CMB_INT_ST_REG_CNT 2
+
+#define MHUV3_FLAG_BITS 32
+
+/* Not a typo ... */
+#define MHUV3_MAJOR_VERSION 2
+
+enum {
+ MHUV3_MBOX_CELL_TYPE,
+ MHUV3_MBOX_CELL_CHWN,
+ MHUV3_MBOX_CELL_PARAM,
+ MHUV3_MBOX_CELLS
+};
+
+/* Padding bitfields/fields represents hole in the regs MMIO */
+
+/* CTRL_Page */
+struct blk_id {
+#define id GENMASK(3, 0)
+ u32 val;
+} __packed;
+
+struct feat_spt0 {
+#define dbe_spt GENMASK(3, 0)
+#define fe_spt GENMASK(7, 4)
+#define fce_spt GENMASK(11, 8)
+ u32 val;
+} __packed;
+
+struct feat_spt1 {
+#define auto_op_spt GENMASK(3, 0)
+ u32 val;
+} __packed;
+
+struct dbch_cfg0 {
+#define num_dbch GENMASK(7, 0)
+ u32 val;
+} __packed;
+
+struct ffch_cfg0 {
+#define num_ffch GENMASK(7, 0)
+#define x8ba_spt BIT(8)
+#define x16ba_spt BIT(9)
+#define x32ba_spt BIT(10)
+#define x64ba_spt BIT(11)
+#define ffch_depth GENMASK(25, 16)
+ u32 val;
+} __packed;
+
+struct fch_cfg0 {
+#define num_fch GENMASK(9, 0)
+#define fcgi_spt BIT(10) // MBX-only
+#define num_fcg GENMASK(15, 11)
+#define num_fch_per_grp GENMASK(20, 16)
+#define fch_ws GENMASK(28, 21)
+ u32 val;
+} __packed;
+
+struct ctrl {
+#define op_req BIT(0)
+#define ch_op_mask BIT(1)
+ u32 val;
+} __packed;
+
+struct fch_ctrl {
+#define _int_en BIT(2)
+ u32 val;
+} __packed;
+
+struct iidr {
+#define implementer GENMASK(11, 0)
+#define revision GENMASK(15, 12)
+#define variant GENMASK(19, 16)
+#define product_id GENMASK(31, 20)
+ u32 val;
+} __packed;
+
+struct aidr {
+#define arch_minor_rev GENMASK(3, 0)
+#define arch_major_rev GENMASK(7, 4)
+ u32 val;
+} __packed;
+
+struct ctrl_page {
+ struct blk_id blk_id;
+ u8 pad[12];
+ struct feat_spt0 feat_spt0;
+ struct feat_spt1 feat_spt1;
+ u8 pad1[8];
+ struct dbch_cfg0 dbch_cfg0;
+ u8 pad2[12];
+ struct ffch_cfg0 ffch_cfg0;
+ u8 pad3[12];
+ struct fch_cfg0 fch_cfg0;
+ u8 pad4[188];
+ struct ctrl x_ctrl;
+ /*-- MBX-only registers --*/
+ u8 pad5[60];
+ struct fch_ctrl fch_ctrl;
+ u32 fcg_int_en;
+ u8 pad6[696];
+ /*-- End of MBX-only ---- */
+ u32 dbch_int_st[MHUV3_DBCH_CMB_INT_ST_REG_CNT];
+ u32 ffch_int_st[MHUV3_FFCH_CMB_INT_ST_REG_CNT];
+ /*-- MBX-only registers --*/
+ u8 pad7[88];
+ u32 fcg_int_st;
+ u8 pad8[12];
+ u32 fcg_grp_int_st[32];
+ u8 pad9[2760];
+ /*-- End of MBX-only ---- */
+ struct iidr iidr;
+ struct aidr aidr;
+ u32 imp_def_id[12];
+} __packed;
+
+/* DBCW_Page */
+
+struct xbcw_ctrl {
+#define comb_en BIT(0)
+ u32 val;
+} __packed;
+
+struct pdbcw_int {
+#define tfr_ack BIT(0)
+ u32 val;
+} __packed;
+
+struct pdbcw_page {
+ u32 st;
+ u8 pad[8];
+ u32 set;
+ struct pdbcw_int int_st;
+ struct pdbcw_int int_clr;
+ struct pdbcw_int int_en;
+ struct xbcw_ctrl ctrl;
+} __packed;
+
+struct mdbcw_page {
+ u32 st;
+ u32 st_msk;
+ u32 clr;
+ u8 pad[4];
+ u32 msk_st;
+ u32 msk_set;
+ u32 msk_clr;
+ struct xbcw_ctrl ctrl;
+} __packed;
+
+struct dummy_page {
+ u8 pad[SZ_4K];
+} __packed;
+
+struct mhu3_pbx_frame_reg {
+ struct ctrl_page ctrl;
+ struct pdbcw_page dbcw[MHUV3_DBCW_MAX];
+ struct dummy_page ffcw;
+ struct dummy_page fcw;
+ u8 pad[SZ_4K * 11];
+ struct dummy_page impdef;
+} __packed;
+
+struct mhu3_mbx_frame_reg {
+ struct ctrl_page ctrl;
+ struct mdbcw_page dbcw[MHUV3_DBCW_MAX];
+ struct dummy_page ffcw;
+ struct dummy_page fcw;
+ u8 pad[SZ_4K * 11];
+ struct dummy_page impdef;
+} __packed;
+
+/* Macro for reading a bitmask within a physically mapped packed struct */
+#define readl_relaxed_bitmask(_regptr, _bitmask) \
+ ({ \
+ unsigned long _rval; \
+ _rval = readl_relaxed(_regptr); \
+ FIELD_GET(_bitmask, _rval); \
+ })
+
+/* Macro for writing a bitmask within a physically mapped packed struct */
+#define writel_relaxed_bitmask(_value, _regptr, _bitmask) \
+ ({ \
+ unsigned long _rval; \
+ typeof(_regptr) _rptr = _regptr; \
+ typeof(_bitmask) _bmask = _bitmask; \
+ _rval = readl_relaxed(_rptr); \
+ _rval &= ~(_bmask); \
+ _rval |= FIELD_PREP((unsigned long long)_bmask, _value);\
+ writel_relaxed(_rval, _rptr); \
+ })
+
+/* ====== MHUv3 data structures ====== */
+
+enum mhuv3_frame {
+ PBX_FRAME,
+ MBX_FRAME,
+};
+
+static char *mhuv3_str[] = {
+ "PBX",
+ "MBX"
+};
+
+enum mhuv3_extension_type {
+ DBE_EXT,
+ FCE_EXT,
+ FE_EXT,
+ NUM_EXT
+};
+
+static char *mhuv3_ext_str[] = {
+ "DBE",
+ "FCE",
+ "FE"
+};
+
+struct mhuv3;
+
+/**
+ * struct mhuv3_protocol_ops - MHUv3 operations
+ *
+ * @rx_startup: Receiver startup callback.
+ * @rx_shutdown: Receiver shutdown callback.
+ * @read_data: Read available Sender in-band LE data (if any).
+ * @rx_complete: Acknowledge data reception to the Sender. Any out-of-band data
+ * has to have been already retrieved before calling this.
+ * @tx_startup: Sender startup callback.
+ * @tx_shutdown: Sender shutdown callback.
+ * @last_tx_done: Report back to the Sender if the last transfer has completed.
+ * @send_data: Send data to the receiver.
+ *
+ * Each supported transport protocol provides its own implementation of
+ * these operations.
+ */
+struct mhuv3_protocol_ops {
+ int (*rx_startup)(struct mhuv3 *mhu, struct mbox_chan *chan);
+ void (*rx_shutdown)(struct mhuv3 *mhu, struct mbox_chan *chan);
+ void *(*read_data)(struct mhuv3 *mhu, struct mbox_chan *chan);
+ void (*rx_complete)(struct mhuv3 *mhu, struct mbox_chan *chan);
+ void (*tx_startup)(struct mhuv3 *mhu, struct mbox_chan *chan);
+ void (*tx_shutdown)(struct mhuv3 *mhu, struct mbox_chan *chan);
+ int (*last_tx_done)(struct mhuv3 *mhu, struct mbox_chan *chan);
+ int (*send_data)(struct mhuv3 *mhu, struct mbox_chan *chan, void *arg);
+};
+
+/**
+ * struct mhuv3_mbox_chan_priv - MHUv3 channel private information
+ *
+ * @ch_idx: Channel window index associated to this mailbox channel.
+ * @doorbell: Doorbell bit number within the @ch_idx window.
+ * Only relevant to Doorbell transport.
+ * @ops: Transport protocol specific operations for this channel.
+ *
+ * Transport specific data attached to mmailbox channel priv data.
+ */
+struct mhuv3_mbox_chan_priv {
+ u32 ch_idx;
+ u32 doorbell;
+ const struct mhuv3_protocol_ops *ops;
+};
+
+/**
+ * struct mhuv3_extension - MHUv3 extension descriptor
+ *
+ * @type: Type of extension
+ * @num_chans: Max number of channels found for this extension.
+ * @base_ch_idx: First channel number assigned to this extension, picked from
+ * the set of all mailbox channels descriptors created.
+ * @mbox_of_xlate: Extension specific helper to parse DT and lookup associated
+ * channel from the related 'mboxes' property.
+ * @combined_irq_setup: Extension specific helper to setup the combined irq.
+ * @channels_init: Extension specific helper to initialize channels.
+ * @chan_from_comb_irq_get: Extension specific helper to lookup which channel
+ * triggered the combined irq.
+ * @pending_db: Array of per-channel pending doorbells.
+ * @pending_lock: Protect access to pending_db.
+ */
+struct mhuv3_extension {
+ enum mhuv3_extension_type type;
+ unsigned int num_chans;
+ unsigned int base_ch_idx;
+ struct mbox_chan *(*mbox_of_xlate)(struct mhuv3 *mhu,
+ unsigned int channel,
+ unsigned int param);
+ void (*combined_irq_setup)(struct mhuv3 *mhu);
+ int (*channels_init)(struct mhuv3 *mhu);
+ struct mbox_chan *(*chan_from_comb_irq_get)(struct mhuv3 *mhu);
+ u32 pending_db[MHUV3_DBCW_MAX];
+ /* Protect access to pending_db */
+ spinlock_t pending_lock;
+};
+
+/**
+ * struct mhuv3 - MHUv3 mailbox controller data
+ *
+ * @frame: Frame type: MBX_FRAME or PBX_FRAME.
+ * @auto_op_full: Flag to indicate if the MHU supports AutoOp full mode.
+ * @major: MHUv3 controller architectural major version.
+ * @minor: MHUv3 controller architectural minor version.
+ * @implem: MHUv3 controller IIDR implementer.
+ * @rev: MHUv3 controller IIDR revision.
+ * @var: MHUv3 controller IIDR variant.
+ * @prod_id: MHUv3 controller IIDR product_id.
+ * @num_chans: The total number of channnels discovered across all extensions.
+ * @cmb_irq: Combined IRQ number if any found defined.
+ * @ctrl: A reference to the MHUv3 control page for this block.
+ * @pbx: Base address of the PBX register mapping region.
+ * @mbx: Base address of the MBX register mapping region.
+ * @ext: Array holding descriptors for any found implemented extension.
+ * @mbox: Mailbox controller belonging to the MHU frame.
+ */
+struct mhuv3 {
+ enum mhuv3_frame frame;
+ bool auto_op_full;
+ unsigned int major;
+ unsigned int minor;
+ unsigned int implem;
+ unsigned int rev;
+ unsigned int var;
+ unsigned int prod_id;
+ unsigned int num_chans;
+ int cmb_irq;
+ struct ctrl_page __iomem *ctrl;
+ union {
+ struct mhu3_pbx_frame_reg __iomem *pbx;
+ struct mhu3_mbx_frame_reg __iomem *mbx;
+ };
+ struct mhuv3_extension *ext[NUM_EXT];
+ struct mbox_controller mbox;
+};
+
+#define mhu_from_mbox(_mbox) container_of(_mbox, struct mhuv3, mbox)
+
+typedef int (*mhuv3_extension_initializer)(struct mhuv3 *mhu);
+
+/* =================== Doorbell transport protocol operations =============== */
+
+static void mhuv3_doorbell_tx_startup(struct mhuv3 *mhu, struct mbox_chan *chan)
+{
+ struct mhuv3_mbox_chan_priv *priv = chan->con_priv;
+
+ /* Enable Transfer Acknowledgment events */
+ writel_relaxed_bitmask(0x1, &mhu->pbx->dbcw[priv->ch_idx].int_en, tfr_ack);
+}
+
+static void mhuv3_doorbell_tx_shutdown(struct mhuv3 *mhu, struct mbox_chan *chan)
+{
+ struct mhuv3_mbox_chan_priv *priv = chan->con_priv;
+ struct mhuv3_extension *e = mhu->ext[DBE_EXT];
+ unsigned long flags;
+
+ /* Disable Channel Transfer Ack events */
+ writel_relaxed_bitmask(0x0, &mhu->pbx->dbcw[priv->ch_idx].int_en, tfr_ack);
+
+ /* Clear Channel Transfer Ack and pending doorbells */
+ writel_relaxed_bitmask(0x1, &mhu->pbx->dbcw[priv->ch_idx].int_clr, tfr_ack);
+ spin_lock_irqsave(&e->pending_lock, flags);
+ e->pending_db[priv->ch_idx] = 0;
+ spin_unlock_irqrestore(&e->pending_lock, flags);
+}
+
+static int mhuv3_doorbell_rx_startup(struct mhuv3 *mhu, struct mbox_chan *chan)
+{
+ struct mhuv3_mbox_chan_priv *priv = chan->con_priv;
+
+ /* Unmask Channel Transfer events */
+ writel_relaxed(BIT(priv->doorbell), &mhu->mbx->dbcw[priv->ch_idx].msk_clr);
+
+ return 0;
+}
+
+static void mhuv3_doorbell_rx_shutdown(struct mhuv3 *mhu,
+ struct mbox_chan *chan)
+{
+ struct mhuv3_mbox_chan_priv *priv = chan->con_priv;
+
+ /* Mask Channel Transfer events */
+ writel_relaxed(BIT(priv->doorbell), &mhu->mbx->dbcw[priv->ch_idx].msk_set);
+}
+
+static void mhuv3_doorbell_rx_complete(struct mhuv3 *mhu, struct mbox_chan *chan)
+{
+ struct mhuv3_mbox_chan_priv *priv = chan->con_priv;
+
+ /* Clearing the pending transfer generates the Channel Transfer Ack */
+ writel_relaxed(BIT(priv->doorbell), &mhu->mbx->dbcw[priv->ch_idx].clr);
+}
+
+static int mhuv3_doorbell_last_tx_done(struct mhuv3 *mhu,
+ struct mbox_chan *chan)
+{
+ struct mhuv3_mbox_chan_priv *priv = chan->con_priv;
+ int done;
+
+ done = !(readl_relaxed(&mhu->pbx->dbcw[priv->ch_idx].st) &
+ BIT(priv->doorbell));
+ if (done) {
+ struct mhuv3_extension *e = mhu->ext[DBE_EXT];
+ unsigned long flags;
+
+ /* Take care to clear the pending doorbell also when polling */
+ spin_lock_irqsave(&e->pending_lock, flags);
+ e->pending_db[priv->ch_idx] &= ~BIT(priv->doorbell);
+ spin_unlock_irqrestore(&e->pending_lock, flags);
+ }
+
+ return done;
+}
+
+static int mhuv3_doorbell_send_data(struct mhuv3 *mhu, struct mbox_chan *chan,
+ void *arg)
+{
+ struct mhuv3_mbox_chan_priv *priv = chan->con_priv;
+ struct mhuv3_extension *e = mhu->ext[DBE_EXT];
+
+ scoped_guard(spinlock_irqsave, &e->pending_lock) {
+ /* Only one in-flight Transfer is allowed per-doorbell */
+ if (e->pending_db[priv->ch_idx] & BIT(priv->doorbell))
+ return -EBUSY;
+
+ e->pending_db[priv->ch_idx] |= BIT(priv->doorbell);
+ }
+
+ writel_relaxed(BIT(priv->doorbell), &mhu->pbx->dbcw[priv->ch_idx].set);
+
+ return 0;
+}
+
+static const struct mhuv3_protocol_ops mhuv3_doorbell_ops = {
+ .tx_startup = mhuv3_doorbell_tx_startup,
+ .tx_shutdown = mhuv3_doorbell_tx_shutdown,
+ .rx_startup = mhuv3_doorbell_rx_startup,
+ .rx_shutdown = mhuv3_doorbell_rx_shutdown,
+ .rx_complete = mhuv3_doorbell_rx_complete,
+ .last_tx_done = mhuv3_doorbell_last_tx_done,
+ .send_data = mhuv3_doorbell_send_data,
+};
+
+/* Sender and receiver mailbox ops */
+static bool mhuv3_sender_last_tx_done(struct mbox_chan *chan)
+{
+ struct mhuv3_mbox_chan_priv *priv = chan->con_priv;
+ struct mhuv3 *mhu = mhu_from_mbox(chan->mbox);
+
+ return priv->ops->last_tx_done(mhu, chan);
+}
+
+static int mhuv3_sender_send_data(struct mbox_chan *chan, void *data)
+{
+ struct mhuv3_mbox_chan_priv *priv = chan->con_priv;
+ struct mhuv3 *mhu = mhu_from_mbox(chan->mbox);
+
+ if (!priv->ops->last_tx_done(mhu, chan))
+ return -EBUSY;
+
+ return priv->ops->send_data(mhu, chan, data);
+}
+
+static int mhuv3_sender_startup(struct mbox_chan *chan)
+{
+ struct mhuv3_mbox_chan_priv *priv = chan->con_priv;
+ struct mhuv3 *mhu = mhu_from_mbox(chan->mbox);
+
+ if (priv->ops->tx_startup)
+ priv->ops->tx_startup(mhu, chan);
+
+ return 0;
+}
+
+static void mhuv3_sender_shutdown(struct mbox_chan *chan)
+{
+ struct mhuv3_mbox_chan_priv *priv = chan->con_priv;
+ struct mhuv3 *mhu = mhu_from_mbox(chan->mbox);
+
+ if (priv->ops->tx_shutdown)
+ priv->ops->tx_shutdown(mhu, chan);
+}
+
+static const struct mbox_chan_ops mhuv3_sender_ops = {
+ .send_data = mhuv3_sender_send_data,
+ .startup = mhuv3_sender_startup,
+ .shutdown = mhuv3_sender_shutdown,
+ .last_tx_done = mhuv3_sender_last_tx_done,
+};
+
+static int mhuv3_receiver_startup(struct mbox_chan *chan)
+{
+ struct mhuv3_mbox_chan_priv *priv = chan->con_priv;
+ struct mhuv3 *mhu = mhu_from_mbox(chan->mbox);
+
+ return priv->ops->rx_startup(mhu, chan);
+}
+
+static void mhuv3_receiver_shutdown(struct mbox_chan *chan)
+{
+ struct mhuv3_mbox_chan_priv *priv = chan->con_priv;
+ struct mhuv3 *mhu = mhu_from_mbox(chan->mbox);
+
+ priv->ops->rx_shutdown(mhu, chan);
+}
+
+static int mhuv3_receiver_send_data(struct mbox_chan *chan, void *data)
+{
+ dev_err(chan->mbox->dev,
+ "Trying to transmit on a MBX MHUv3 frame\n");
+ return -EIO;
+}
+
+static bool mhuv3_receiver_last_tx_done(struct mbox_chan *chan)
+{
+ dev_err(chan->mbox->dev, "Trying to Tx poll on a MBX MHUv3 frame\n");
+ return true;
+}
+
+static const struct mbox_chan_ops mhuv3_receiver_ops = {
+ .send_data = mhuv3_receiver_send_data,
+ .startup = mhuv3_receiver_startup,
+ .shutdown = mhuv3_receiver_shutdown,
+ .last_tx_done = mhuv3_receiver_last_tx_done,
+};
+
+static struct mbox_chan *mhuv3_dbe_mbox_of_xlate(struct mhuv3 *mhu,
+ unsigned int channel,
+ unsigned int doorbell)
+{
+ struct mhuv3_extension *e = mhu->ext[DBE_EXT];
+ struct mbox_controller *mbox = &mhu->mbox;
+ struct mbox_chan *chans = mbox->chans;
+
+ if (channel >= e->num_chans || doorbell >= MHUV3_FLAG_BITS) {
+ dev_err(mbox->dev, "Couldn't xlate to a valid channel (%d: %d)\n",
+ channel, doorbell);
+ return ERR_PTR(-ENODEV);
+ }
+
+ return &chans[e->base_ch_idx + channel * MHUV3_FLAG_BITS + doorbell];
+}
+
+static void mhuv3_dbe_combined_irq_setup(struct mhuv3 *mhu)
+{
+ struct mhuv3_extension *e = mhu->ext[DBE_EXT];
+ int i;
+
+ if (mhu->frame == PBX_FRAME) {
+ struct pdbcw_page __iomem *dbcw = mhu->pbx->dbcw;
+
+ for (i = 0; i < e->num_chans; i++) {
+ writel_relaxed_bitmask(0x1, &dbcw[i].int_clr, tfr_ack);
+ writel_relaxed_bitmask(0x0, &dbcw[i].int_en, tfr_ack);
+ writel_relaxed_bitmask(0x1, &dbcw[i].ctrl, comb_en);
+ }
+ } else {
+ struct mdbcw_page __iomem *dbcw = mhu->mbx->dbcw;
+
+ for (i = 0; i < e->num_chans; i++) {
+ writel_relaxed(0xFFFFFFFF, &dbcw[i].clr);
+ writel_relaxed(0xFFFFFFFF, &dbcw[i].msk_set);
+ writel_relaxed_bitmask(0x1, &dbcw[i].ctrl, comb_en);
+ }
+ }
+}
+
+static int mhuv3_dbe_channels_init(struct mhuv3 *mhu)
+{
+ struct mhuv3_extension *e = mhu->ext[DBE_EXT];
+ struct mbox_controller *mbox = &mhu->mbox;
+ struct mbox_chan *chans;
+ int i;
+
+ chans = mbox->chans + mbox->num_chans;
+ e->base_ch_idx = mbox->num_chans;
+ for (i = 0; i < e->num_chans; i++) {
+ struct mhuv3_mbox_chan_priv *priv;
+ int k;
+
+ for (k = 0; k < MHUV3_FLAG_BITS; k++) {
+ priv = devm_kmalloc(mbox->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->ch_idx = i;
+ priv->ops = &mhuv3_doorbell_ops;
+ priv->doorbell = k;
+ chans++->con_priv = priv;
+ mbox->num_chans++;
+ }
+ }
+
+ spin_lock_init(&e->pending_lock);
+
+ return 0;
+}
+
+static bool mhuv3_dbe_doorbell_lookup(struct mhuv3 *mhu, unsigned int channel,
+ unsigned int *db)
+{
+ struct mhuv3_extension *e = mhu->ext[DBE_EXT];
+ struct device *dev = mhu->mbox.dev;
+ u32 st;
+
+ if (mhu->frame == PBX_FRAME) {
+ u32 active_dbs, fired_dbs;
+
+ st = readl_relaxed_bitmask(&mhu->pbx->dbcw[channel].int_st,
+ tfr_ack);
+ if (!st)
+ goto err_spurious;
+
+ active_dbs = readl_relaxed(&mhu->pbx->dbcw[channel].st);
+ scoped_guard(spinlock_irqsave, &e->pending_lock) {
+ fired_dbs = e->pending_db[channel] & ~active_dbs;
+ if (!fired_dbs)
+ goto err_spurious;
+
+ *db = __ffs(fired_dbs);
+ e->pending_db[channel] &= ~BIT(*db);
+ }
+ fired_dbs &= ~BIT(*db);
+ /* Clear TFR Ack if no more doorbells pending */
+ if (!fired_dbs)
+ writel_relaxed_bitmask(0x1,
+ &mhu->pbx->dbcw[channel].int_clr,
+ tfr_ack);
+ } else {
+ st = readl_relaxed(&mhu->mbx->dbcw[channel].st_msk);
+ if (!st)
+ goto err_spurious;
+
+ *db = __ffs(st);
+ }
+
+ return true;
+
+err_spurious:
+ dev_warn(dev, "Spurious IRQ on %s channel:%d\n",
+ mhuv3_str[mhu->frame], channel);
+
+ return false;
+}
+
+static struct mbox_chan *mhuv3_dbe_chan_from_comb_irq_get(struct mhuv3 *mhu)
+{
+ struct mhuv3_extension *e = mhu->ext[DBE_EXT];
+ struct device *dev = mhu->mbox.dev;
+ int i;
+
+ for (i = 0; i < MHUV3_DBCH_CMB_INT_ST_REG_CNT; i++) {
+ unsigned int channel, db;
+ u32 cmb_st;
+
+ cmb_st = readl_relaxed(&mhu->ctrl->dbch_int_st[i]);
+ if (!cmb_st)
+ continue;
+
+ channel = i * MHUV3_FLAG_BITS + __ffs(cmb_st);
+ if (channel >= e->num_chans) {
+ dev_err(dev, "Invalid %s channel:%d\n",
+ mhuv3_str[mhu->frame], channel);
+ return ERR_PTR(-EIO);
+ }
+
+ if (!mhuv3_dbe_doorbell_lookup(mhu, channel, &db))
+ continue;
+
+ dev_dbg(dev, "Found %s ch[%d]/db[%d]\n",
+ mhuv3_str[mhu->frame], channel, db);
+
+ return &mhu->mbox.chans[channel * MHUV3_FLAG_BITS + db];
+ }
+
+ return ERR_PTR(-EIO);
+}
+
+static int mhuv3_dbe_init(struct mhuv3 *mhu)
+{
+ struct device *dev = mhu->mbox.dev;
+ struct mhuv3_extension *e;
+
+ if (!readl_relaxed_bitmask(&mhu->ctrl->feat_spt0, dbe_spt))
+ return 0;
+
+ dev_dbg(dev, "%s: Initializing DBE Extension.\n", mhuv3_str[mhu->frame]);
+
+ e = devm_kzalloc(dev, sizeof(*e), GFP_KERNEL);
+ if (!e)
+ return -ENOMEM;
+
+ e->type = DBE_EXT;
+ /* Note that, by the spec, the number of channels is (num_dbch + 1) */
+ e->num_chans =
+ readl_relaxed_bitmask(&mhu->ctrl->dbch_cfg0, num_dbch) + 1;
+ e->mbox_of_xlate = mhuv3_dbe_mbox_of_xlate;
+ e->combined_irq_setup = mhuv3_dbe_combined_irq_setup;
+ e->channels_init = mhuv3_dbe_channels_init;
+ e->chan_from_comb_irq_get = mhuv3_dbe_chan_from_comb_irq_get;
+
+ mhu->num_chans += e->num_chans * MHUV3_FLAG_BITS;
+ mhu->ext[DBE_EXT] = e;
+
+ dev_dbg(dev, "%s: found %d DBE channels.\n",
+ mhuv3_str[mhu->frame], e->num_chans);
+
+ return 0;
+}
+
+static int mhuv3_fce_init(struct mhuv3 *mhu)
+{
+ struct device *dev = mhu->mbox.dev;
+
+ if (!readl_relaxed_bitmask(&mhu->ctrl->feat_spt0, fce_spt))
+ return 0;
+
+ dev_dbg(dev, "%s: FCE Extension not supported by driver.\n",
+ mhuv3_str[mhu->frame]);
+
+ return 0;
+}
+
+static int mhuv3_fe_init(struct mhuv3 *mhu)
+{
+ struct device *dev = mhu->mbox.dev;
+
+ if (!readl_relaxed_bitmask(&mhu->ctrl->feat_spt0, fe_spt))
+ return 0;
+
+ dev_dbg(dev, "%s: FE Extension not supported by driver.\n",
+ mhuv3_str[mhu->frame]);
+
+ return 0;
+}
+
+static mhuv3_extension_initializer mhuv3_extension_init[NUM_EXT] = {
+ mhuv3_dbe_init,
+ mhuv3_fce_init,
+ mhuv3_fe_init,
+};
+
+static int mhuv3_initialize_channels(struct device *dev, struct mhuv3 *mhu)
+{
+ struct mbox_controller *mbox = &mhu->mbox;
+ int i, ret = 0;
+
+ mbox->chans = devm_kcalloc(dev, mhu->num_chans,
+ sizeof(*mbox->chans), GFP_KERNEL);
+ if (!mbox->chans)
+ return dev_err_probe(dev, -ENOMEM,
+ "Failed to initialize channels\n");
+
+ for (i = 0; i < NUM_EXT && !ret; i++)
+ if (mhu->ext[i])
+ ret = mhu->ext[i]->channels_init(mhu);
+
+ return ret;
+}
+
+static struct mbox_chan *mhuv3_mbox_of_xlate(struct mbox_controller *mbox,
+ const struct of_phandle_args *pa)
+{
+ struct mhuv3 *mhu = mhu_from_mbox(mbox);
+ unsigned int type, channel, param;
+
+ if (pa->args_count != MHUV3_MBOX_CELLS)
+ return ERR_PTR(-EINVAL);
+
+ type = pa->args[MHUV3_MBOX_CELL_TYPE];
+ if (type >= NUM_EXT)
+ return ERR_PTR(-EINVAL);
+
+ channel = pa->args[MHUV3_MBOX_CELL_CHWN];
+ param = pa->args[MHUV3_MBOX_CELL_PARAM];
+
+ return mhu->ext[type]->mbox_of_xlate(mhu, channel, param);
+}
+
+static void mhu_frame_cleanup_actions(void *data)
+{
+ struct mhuv3 *mhu = data;
+
+ writel_relaxed_bitmask(0x0, &mhu->ctrl->x_ctrl, op_req);
+}
+
+static int mhuv3_frame_init(struct mhuv3 *mhu, void __iomem *regs)
+{
+ struct device *dev = mhu->mbox.dev;
+ int i;
+
+ mhu->ctrl = regs;
+ mhu->frame = readl_relaxed_bitmask(&mhu->ctrl->blk_id, id);
+ if (mhu->frame > MBX_FRAME)
+ return dev_err_probe(dev, -EINVAL,
+ "Invalid Frame type- %d\n", mhu->frame);
+
+ mhu->major = readl_relaxed_bitmask(&mhu->ctrl->aidr, arch_major_rev);
+ mhu->minor = readl_relaxed_bitmask(&mhu->ctrl->aidr, arch_minor_rev);
+ mhu->implem = readl_relaxed_bitmask(&mhu->ctrl->iidr, implementer);
+ mhu->rev = readl_relaxed_bitmask(&mhu->ctrl->iidr, revision);
+ mhu->var = readl_relaxed_bitmask(&mhu->ctrl->iidr, variant);
+ mhu->prod_id = readl_relaxed_bitmask(&mhu->ctrl->iidr, product_id);
+ if (mhu->major != MHUV3_MAJOR_VERSION)
+ return dev_err_probe(dev, -EINVAL,
+ "Unsupported MHU %s block - major:%d minor:%d\n",
+ mhuv3_str[mhu->frame], mhu->major,
+ mhu->minor);
+
+ mhu->auto_op_full =
+ !!readl_relaxed_bitmask(&mhu->ctrl->feat_spt1, auto_op_spt);
+ /* Request the PBX/MBX to remain operational */
+ if (mhu->auto_op_full) {
+ writel_relaxed_bitmask(0x1, &mhu->ctrl->x_ctrl, op_req);
+ devm_add_action_or_reset(dev, mhu_frame_cleanup_actions, mhu);
+ }
+
+ dev_dbg(dev,
+ "Found MHU %s block - major:%d minor:%d\n implem:0x%X rev:0x%X var:0x%X prod_id:0x%X",
+ mhuv3_str[mhu->frame], mhu->major, mhu->minor,
+ mhu->implem, mhu->rev, mhu->var, mhu->prod_id);
+
+ if (mhu->frame == PBX_FRAME)
+ mhu->pbx = regs;
+ else
+ mhu->mbx = regs;
+
+ for (i = 0; i < NUM_EXT; i++) {
+ int ret;
+
+ /*
+ * Note that extensions initialization fails only when such
+ * extension initialization routine fails and the extensions
+ * was found to be supported in hardware and in software.
+ */
+ ret = mhuv3_extension_init[i](mhu);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "Failed to initialize %s %s\n",
+ mhuv3_str[mhu->frame],
+ mhuv3_ext_str[i]);
+ }
+
+ return 0;
+}
+
+static irqreturn_t mhuv3_pbx_comb_interrupt(int irq, void *arg)
+{
+ unsigned int i, found = 0;
+ struct mhuv3 *mhu = arg;
+ struct mbox_chan *chan;
+ struct device *dev;
+ int ret = IRQ_NONE;
+
+ dev = mhu->mbox.dev;
+ for (i = 0; i < NUM_EXT; i++) {
+ struct mhuv3_mbox_chan_priv *priv;
+
+ /* FCE does not participate to the PBX combined */
+ if (i == FCE_EXT || !mhu->ext[i])
+ continue;
+
+ chan = mhu->ext[i]->chan_from_comb_irq_get(mhu);
+ if (IS_ERR(chan))
+ continue;
+
+ found++;
+ priv = chan->con_priv;
+ if (!chan->cl) {
+ dev_warn(dev, "TX Ack on UNBOUND channel (%u)\n",
+ priv->ch_idx);
+ continue;
+ }
+
+ mbox_chan_txdone(chan, 0);
+ ret = IRQ_HANDLED;
+ }
+
+ if (found == 0)
+ dev_warn_once(dev, "Failed to find channel for the TX interrupt\n");
+
+ return ret;
+}
+
+static irqreturn_t mhuv3_mbx_comb_interrupt(int irq, void *arg)
+{
+ unsigned int i, found = 0;
+ struct mhuv3 *mhu = arg;
+ struct mbox_chan *chan;
+ struct device *dev;
+ int ret = IRQ_NONE;
+
+ dev = mhu->mbox.dev;
+ for (i = 0; i < NUM_EXT; i++) {
+ struct mhuv3_mbox_chan_priv *priv;
+ void *data __free(kfree) = NULL;
+
+ if (!mhu->ext[i])
+ continue;
+
+ /* Process any extension which could be source of the IRQ */
+ chan = mhu->ext[i]->chan_from_comb_irq_get(mhu);
+ if (IS_ERR(chan))
+ continue;
+
+ found++;
+ /* From here on we need to call rx_complete even on error */
+ priv = chan->con_priv;
+ if (!chan->cl) {
+ dev_warn(dev, "RX Data on UNBOUND channel (%u)\n",
+ priv->ch_idx);
+ goto rx_ack;
+ }
+
+ /* Read optional in-band LE data first. */
+ if (priv->ops->read_data) {
+ data = priv->ops->read_data(mhu, chan);
+ if (IS_ERR(data)) {
+ dev_err(dev,
+ "Failed to read in-band data. err:%ld\n",
+ PTR_ERR(data));
+ goto rx_ack;
+ }
+ }
+
+ mbox_chan_received_data(chan, data);
+ ret = IRQ_HANDLED;
+
+ /*
+ * Acknowledge transfer after any possible optional
+ * out-of-band data has also been retrieved via
+ * mbox_chan_received_data().
+ */
+rx_ack:
+ if (priv->ops->rx_complete)
+ priv->ops->rx_complete(mhu, chan);
+ }
+
+ if (found == 0)
+ dev_warn_once(dev, "Failed to find channel for the RX interrupt\n");
+
+ return ret;
+}
+
+static int mhuv3_setup_pbx(struct mhuv3 *mhu)
+{
+ struct device *dev = mhu->mbox.dev;
+
+ mhu->mbox.ops = &mhuv3_sender_ops;
+
+ if (mhu->cmb_irq > 0) {
+ int ret, i;
+
+ ret = devm_request_threaded_irq(dev, mhu->cmb_irq, NULL,
+ mhuv3_pbx_comb_interrupt,
+ IRQF_ONESHOT, "mhuv3-pbx", mhu);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "Failed to request PBX IRQ\n");
+
+ mhu->mbox.txdone_irq = true;
+ mhu->mbox.txdone_poll = false;
+
+ for (i = 0; i < NUM_EXT; i++)
+ if (mhu->ext[i])
+ mhu->ext[i]->combined_irq_setup(mhu);
+
+ dev_dbg(dev, "MHUv3 PBX IRQs initialized.\n");
+
+ return 0;
+ }
+
+ dev_info(dev, "Using PBX in Tx polling mode.\n");
+ mhu->mbox.txdone_irq = false;
+ mhu->mbox.txdone_poll = true;
+ mhu->mbox.txpoll_period = 1;
+
+ return 0;
+}
+
+static int mhuv3_setup_mbx(struct mhuv3 *mhu)
+{
+ struct device *dev = mhu->mbox.dev;
+ int ret, i;
+
+ mhu->mbox.ops = &mhuv3_receiver_ops;
+
+ if (mhu->cmb_irq <= 0)
+ return dev_err_probe(dev, -EINVAL,
+ "MBX combined IRQ is missing !\n");
+
+ ret = devm_request_threaded_irq(dev, mhu->cmb_irq, NULL,
+ mhuv3_mbx_comb_interrupt, IRQF_ONESHOT,
+ "mhuv3-mbx", mhu);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to request MBX IRQ\n");
+
+ for (i = 0; i < NUM_EXT; i++)
+ if (mhu->ext[i])
+ mhu->ext[i]->combined_irq_setup(mhu);
+
+ dev_dbg(dev, "MHUv3 MBX IRQs initialized.\n");
+
+ return ret;
+}
+
+static int mhuv3_irqs_init(struct mhuv3 *mhu, struct platform_device *pdev)
+{
+ dev_dbg(mhu->mbox.dev, "Initializing %s block.\n",
+ mhuv3_str[mhu->frame]);
+
+ if (mhu->frame == PBX_FRAME) {
+ mhu->cmb_irq =
+ platform_get_irq_byname_optional(pdev, "combined");
+ return mhuv3_setup_pbx(mhu);
+ }
+
+ mhu->cmb_irq = platform_get_irq_byname(pdev, "combined");
+ return mhuv3_setup_mbx(mhu);
+}
+
+static int mhuv3_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ void __iomem *regs;
+ struct mhuv3 *mhu;
+ int ret;
+
+ mhu = devm_kzalloc(dev, sizeof(*mhu), GFP_KERNEL);
+ if (!mhu)
+ return -ENOMEM;
+
+ regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(regs))
+ return PTR_ERR(regs);
+
+ mhu->mbox.dev = dev;
+ ret = mhuv3_frame_init(mhu, regs);
+ if (ret)
+ return ret;
+
+ ret = mhuv3_irqs_init(mhu, pdev);
+ if (ret)
+ return ret;
+
+ mhu->mbox.of_xlate = mhuv3_mbox_of_xlate;
+ ret = mhuv3_initialize_channels(dev, mhu);
+ if (ret)
+ return ret;
+
+ ret = devm_mbox_controller_register(dev, &mhu->mbox);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "Failed to register ARM MHUv3 driver\n");
+
+ return ret;
+}
+
+static const struct of_device_id mhuv3_of_match[] = {
+ { .compatible = "arm,mhuv3", .data = NULL },
+ {}
+};
+MODULE_DEVICE_TABLE(of, mhuv3_of_match);
+
+static struct platform_driver mhuv3_driver = {
+ .driver = {
+ .name = "arm-mhuv3-mailbox",
+ .of_match_table = mhuv3_of_match,
+ },
+ .probe = mhuv3_probe,
+};
+module_platform_driver(mhuv3_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("ARM MHUv3 Driver");
+MODULE_AUTHOR("Cristian Marussi <cristian.marussi@arm.com>");
diff --git a/drivers/mailbox/ast2700-mailbox.c b/drivers/mailbox/ast2700-mailbox.c
new file mode 100644
index 000000000000..83c6afe5411f
--- /dev/null
+++ b/drivers/mailbox/ast2700-mailbox.c
@@ -0,0 +1,235 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright Aspeed Technology Inc. (C) 2025. All rights reserved
+ */
+
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/mailbox_controller.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+/* Each bit in the register represents an IPC ID */
+#define IPCR_TX_TRIG 0x00
+#define IPCR_ENABLE 0x04
+#define IPCR_STATUS 0x08
+#define RX_IRQ(n) BIT(n)
+#define RX_IRQ_MASK 0xf
+#define IPCR_DATA 0x10
+
+struct ast2700_mbox_data {
+ u8 num_chans;
+ u8 msg_size;
+};
+
+struct ast2700_mbox {
+ struct mbox_controller mbox;
+ u8 msg_size;
+ void __iomem *tx_regs;
+ void __iomem *rx_regs;
+ spinlock_t lock;
+};
+
+static inline int ch_num(struct mbox_chan *chan)
+{
+ return chan - chan->mbox->chans;
+}
+
+static inline bool ast2700_mbox_tx_done(struct ast2700_mbox *mb, int idx)
+{
+ return !(readl(mb->tx_regs + IPCR_STATUS) & BIT(idx));
+}
+
+static irqreturn_t ast2700_mbox_irq(int irq, void *p)
+{
+ struct ast2700_mbox *mb = p;
+ void __iomem *data_reg;
+ int num_words = mb->msg_size / sizeof(u32);
+ u32 *word_data;
+ u32 status;
+ int n, i;
+
+ /* Only examine channels that are currently enabled. */
+ status = readl(mb->rx_regs + IPCR_ENABLE) &
+ readl(mb->rx_regs + IPCR_STATUS);
+
+ if (!(status & RX_IRQ_MASK))
+ return IRQ_NONE;
+
+ for (n = 0; n < mb->mbox.num_chans; ++n) {
+ struct mbox_chan *chan = &mb->mbox.chans[n];
+
+ if (!(status & RX_IRQ(n)))
+ continue;
+
+ data_reg = mb->rx_regs + IPCR_DATA + mb->msg_size * n;
+ word_data = chan->con_priv;
+ /* Read the message data */
+ for (i = 0; i < num_words; i++)
+ word_data[i] = readl(data_reg + i * sizeof(u32));
+
+ mbox_chan_received_data(chan, chan->con_priv);
+
+ /* The IRQ can be cleared only once the FIFO is empty. */
+ writel(RX_IRQ(n), mb->rx_regs + IPCR_STATUS);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int ast2700_mbox_send_data(struct mbox_chan *chan, void *data)
+{
+ struct ast2700_mbox *mb = dev_get_drvdata(chan->mbox->dev);
+ int idx = ch_num(chan);
+ void __iomem *data_reg = mb->tx_regs + IPCR_DATA + mb->msg_size * idx;
+ u32 *word_data = data;
+ int num_words = mb->msg_size / sizeof(u32);
+ int i;
+
+ if (!(readl(mb->tx_regs + IPCR_ENABLE) & BIT(idx))) {
+ dev_warn(mb->mbox.dev, "%s: Ch-%d not enabled yet\n", __func__, idx);
+ return -ENODEV;
+ }
+
+ if (!(ast2700_mbox_tx_done(mb, idx))) {
+ dev_warn(mb->mbox.dev, "%s: Ch-%d last data has not finished\n", __func__, idx);
+ return -EBUSY;
+ }
+
+ /* Write the message data */
+ for (i = 0 ; i < num_words; i++)
+ writel(word_data[i], data_reg + i * sizeof(u32));
+
+ writel(BIT(idx), mb->tx_regs + IPCR_TX_TRIG);
+ dev_dbg(mb->mbox.dev, "%s: Ch-%d sent\n", __func__, idx);
+
+ return 0;
+}
+
+static int ast2700_mbox_startup(struct mbox_chan *chan)
+{
+ struct ast2700_mbox *mb = dev_get_drvdata(chan->mbox->dev);
+ int idx = ch_num(chan);
+ void __iomem *reg = mb->rx_regs + IPCR_ENABLE;
+ unsigned long flags;
+
+ spin_lock_irqsave(&mb->lock, flags);
+ writel(readl(reg) | BIT(idx), reg);
+ spin_unlock_irqrestore(&mb->lock, flags);
+
+ return 0;
+}
+
+static void ast2700_mbox_shutdown(struct mbox_chan *chan)
+{
+ struct ast2700_mbox *mb = dev_get_drvdata(chan->mbox->dev);
+ int idx = ch_num(chan);
+ void __iomem *reg = mb->rx_regs + IPCR_ENABLE;
+ unsigned long flags;
+
+ spin_lock_irqsave(&mb->lock, flags);
+ writel(readl(reg) & ~BIT(idx), reg);
+ spin_unlock_irqrestore(&mb->lock, flags);
+}
+
+static bool ast2700_mbox_last_tx_done(struct mbox_chan *chan)
+{
+ struct ast2700_mbox *mb = dev_get_drvdata(chan->mbox->dev);
+ int idx = ch_num(chan);
+
+ return ast2700_mbox_tx_done(mb, idx);
+}
+
+static const struct mbox_chan_ops ast2700_mbox_chan_ops = {
+ .send_data = ast2700_mbox_send_data,
+ .startup = ast2700_mbox_startup,
+ .shutdown = ast2700_mbox_shutdown,
+ .last_tx_done = ast2700_mbox_last_tx_done,
+};
+
+static int ast2700_mbox_probe(struct platform_device *pdev)
+{
+ struct ast2700_mbox *mb;
+ const struct ast2700_mbox_data *dev_data;
+ struct device *dev = &pdev->dev;
+ int irq, ret;
+
+ if (!pdev->dev.of_node)
+ return -ENODEV;
+
+ dev_data = device_get_match_data(&pdev->dev);
+
+ mb = devm_kzalloc(dev, sizeof(*mb), GFP_KERNEL);
+ if (!mb)
+ return -ENOMEM;
+
+ mb->mbox.chans = devm_kcalloc(&pdev->dev, dev_data->num_chans,
+ sizeof(*mb->mbox.chans), GFP_KERNEL);
+ if (!mb->mbox.chans)
+ return -ENOMEM;
+
+ /* con_priv of each channel is used to store the message received */
+ for (int i = 0; i < dev_data->num_chans; i++) {
+ mb->mbox.chans[i].con_priv = devm_kcalloc(dev, dev_data->msg_size,
+ sizeof(u8), GFP_KERNEL);
+ if (!mb->mbox.chans[i].con_priv)
+ return -ENOMEM;
+ }
+
+ platform_set_drvdata(pdev, mb);
+
+ mb->tx_regs = devm_platform_ioremap_resource_byname(pdev, "tx");
+ if (IS_ERR(mb->tx_regs))
+ return PTR_ERR(mb->tx_regs);
+
+ mb->rx_regs = devm_platform_ioremap_resource_byname(pdev, "rx");
+ if (IS_ERR(mb->rx_regs))
+ return PTR_ERR(mb->rx_regs);
+
+ mb->msg_size = dev_data->msg_size;
+ mb->mbox.dev = dev;
+ mb->mbox.num_chans = dev_data->num_chans;
+ mb->mbox.ops = &ast2700_mbox_chan_ops;
+ mb->mbox.txdone_irq = false;
+ mb->mbox.txdone_poll = true;
+ mb->mbox.txpoll_period = 5;
+ spin_lock_init(&mb->lock);
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ ret = devm_request_irq(dev, irq, ast2700_mbox_irq, 0, dev_name(dev), mb);
+ if (ret)
+ return ret;
+
+ return devm_mbox_controller_register(dev, &mb->mbox);
+}
+
+static const struct ast2700_mbox_data ast2700_dev_data = {
+ .num_chans = 4,
+ .msg_size = 0x20,
+};
+
+static const struct of_device_id ast2700_mbox_of_match[] = {
+ { .compatible = "aspeed,ast2700-mailbox", .data = &ast2700_dev_data },
+ {}
+};
+MODULE_DEVICE_TABLE(of, ast2700_mbox_of_match);
+
+static struct platform_driver ast2700_mbox_driver = {
+ .driver = {
+ .name = "ast2700-mailbox",
+ .of_match_table = ast2700_mbox_of_match,
+ },
+ .probe = ast2700_mbox_probe,
+};
+module_platform_driver(ast2700_mbox_driver);
+
+MODULE_AUTHOR("Jammy Huang <jammy_huang@aspeedtech.com>");
+MODULE_DESCRIPTION("ASPEED AST2700 IPC driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mailbox/bcm-flexrm-mailbox.c b/drivers/mailbox/bcm-flexrm-mailbox.c
index bf6e86b0ed09..41f79e51d9e5 100644
--- a/drivers/mailbox/bcm-flexrm-mailbox.c
+++ b/drivers/mailbox/bcm-flexrm-mailbox.c
@@ -1501,16 +1501,12 @@ static int flexrm_mbox_probe(struct platform_device *pdev)
mbox->dev = dev;
platform_set_drvdata(pdev, mbox);
- /* Get resource for registers */
- iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ /* Get resource for registers and map registers of all rings */
+ mbox->regs = devm_platform_get_and_ioremap_resource(pdev, 0, &iomem);
if (!iomem || (resource_size(iomem) < RING_REGS_SIZE)) {
ret = -ENODEV;
goto fail;
- }
-
- /* Map registers of all rings */
- mbox->regs = devm_ioremap_resource(&pdev->dev, iomem);
- if (IS_ERR(mbox->regs)) {
+ } else if (IS_ERR(mbox->regs)) {
ret = PTR_ERR(mbox->regs);
goto fail;
}
@@ -1591,8 +1587,8 @@ static int flexrm_mbox_probe(struct platform_device *pdev)
}
/* Allocate platform MSIs for each ring */
- ret = platform_msi_domain_alloc_irqs(dev, mbox->num_rings,
- flexrm_mbox_msi_write);
+ ret = platform_device_msi_init_and_alloc_irqs(dev, mbox->num_rings,
+ flexrm_mbox_msi_write);
if (ret)
goto fail_destroy_cmpl_pool;
@@ -1645,7 +1641,7 @@ skip_debugfs:
fail_free_debugfs_root:
debugfs_remove_recursive(mbox->root);
- platform_msi_domain_free_irqs(dev);
+ platform_device_msi_free_irqs_all(dev);
fail_destroy_cmpl_pool:
dma_pool_destroy(mbox->cmpl_pool);
fail_destroy_bd_pool:
@@ -1654,19 +1650,17 @@ fail:
return ret;
}
-static int flexrm_mbox_remove(struct platform_device *pdev)
+static void flexrm_mbox_remove(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct flexrm_mbox *mbox = platform_get_drvdata(pdev);
debugfs_remove_recursive(mbox->root);
- platform_msi_domain_free_irqs(dev);
+ platform_device_msi_free_irqs_all(dev);
dma_pool_destroy(mbox->cmpl_pool);
dma_pool_destroy(mbox->bd_pool);
-
- return 0;
}
static const struct of_device_id flexrm_mbox_of_match[] = {
diff --git a/drivers/mailbox/bcm-pdc-mailbox.c b/drivers/mailbox/bcm-pdc-mailbox.c
index 8d3a4c1fe761..406bc41cba60 100644
--- a/drivers/mailbox/bcm-pdc-mailbox.c
+++ b/drivers/mailbox/bcm-pdc-mailbox.c
@@ -33,10 +33,9 @@
#include <linux/interrupt.h>
#include <linux/wait.h>
#include <linux/platform_device.h>
+#include <linux/property.h>
#include <linux/io.h>
#include <linux/of.h>
-#include <linux/of_device.h>
-#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/mailbox_controller.h>
#include <linux/mailbox/brcm-message.h>
@@ -44,6 +43,7 @@
#include <linux/dma-direction.h>
#include <linux/dma-mapping.h>
#include <linux/dmapool.h>
+#include <linux/workqueue.h>
#define PDC_SUCCESS 0
@@ -158,10 +158,6 @@ enum pdc_hw {
PDC_HW /* PDC/MDE hardware (i.e. Northstar 2, Pegasus) */
};
-struct pdc_dma_map {
- void *ctx; /* opaque context associated with frame */
-};
-
/* dma descriptor */
struct dma64dd {
u32 ctrl1; /* misc control bits */
@@ -294,8 +290,8 @@ struct pdc_state {
unsigned int pdc_irq;
- /* tasklet for deferred processing after DMA rx interrupt */
- struct tasklet_struct rx_tasklet;
+ /* work for deferred processing after DMA rx interrupt */
+ struct work_struct rx_work;
/* Number of bytes of receive status prior to each rx frame */
u32 rx_status_len;
@@ -694,7 +690,7 @@ pdc_receive(struct pdc_state *pdcs)
* pdc_tx_list_sg_add() - Add the buffers in a scatterlist to the transmit
* descriptors for a given SPU. The scatterlist buffers contain the data for a
* SPU request message.
- * @spu_idx: The index of the SPU to submit the request to, [0, max_spu)
+ * @pdcs: PDC state for the SPU that will process this request
* @sg: Scatterlist whose buffers contain part of the SPU request
*
* If a scatterlist buffer is larger than PDC_DMA_BUF_MAX, multiple descriptors
@@ -861,7 +857,7 @@ static int pdc_rx_list_init(struct pdc_state *pdcs, struct scatterlist *dst_sg,
* pdc_rx_list_sg_add() - Add the buffers in a scatterlist to the receive
* descriptors for a given SPU. The caller must have already DMA mapped the
* scatterlist.
- * @spu_idx: Indicates which SPU the buffers are for
+ * @pdcs: PDC state for the SPU that will process this request
* @sg: Scatterlist whose buffers are added to the receive ring
*
* If a receive buffer in the scatterlist is larger than PDC_DMA_BUF_MAX,
@@ -953,18 +949,18 @@ static irqreturn_t pdc_irq_handler(int irq, void *data)
iowrite32(intstatus, pdcs->pdc_reg_vbase + PDC_INTSTATUS_OFFSET);
/* Wakeup IRQ thread */
- tasklet_schedule(&pdcs->rx_tasklet);
+ queue_work(system_bh_wq, &pdcs->rx_work);
return IRQ_HANDLED;
}
/**
- * pdc_tasklet_cb() - Tasklet callback that runs the deferred processing after
+ * pdc_work_cb() - Work callback that runs the deferred processing after
* a DMA receive interrupt. Reenables the receive interrupt.
- * @data: PDC state structure
+ * @t: Pointer to the Altera sSGDMA channel structure
*/
-static void pdc_tasklet_cb(struct tasklet_struct *t)
+static void pdc_work_cb(struct work_struct *t)
{
- struct pdc_state *pdcs = from_tasklet(pdcs, t, rx_tasklet);
+ struct pdc_state *pdcs = from_work(pdcs, t, rx_work);
pdc_receive(pdcs);
@@ -1494,7 +1490,6 @@ static int pdc_dt_read(struct platform_device *pdev, struct pdc_state *pdcs)
{
struct device *dev = &pdev->dev;
struct device_node *dn = pdev->dev.of_node;
- const struct of_device_id *match;
const int *hw_type;
int err;
@@ -1509,11 +1504,9 @@ static int pdc_dt_read(struct platform_device *pdev, struct pdc_state *pdcs)
pdcs->hw_type = PDC_HW;
- match = of_match_device(of_match_ptr(pdc_mbox_of_match), dev);
- if (match != NULL) {
- hw_type = match->data;
+ hw_type = device_get_match_data(dev);
+ if (hw_type)
pdcs->hw_type = *hw_type;
- }
return 0;
}
@@ -1566,19 +1559,13 @@ static int pdc_probe(struct platform_device *pdev)
if (err)
goto cleanup_ring_pool;
- pdc_regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!pdc_regs) {
- err = -ENODEV;
- goto cleanup_ring_pool;
- }
- dev_dbg(dev, "PDC register region res.start = %pa, res.end = %pa",
- &pdc_regs->start, &pdc_regs->end);
-
- pdcs->pdc_reg_vbase = devm_ioremap_resource(&pdev->dev, pdc_regs);
+ pdcs->pdc_reg_vbase = devm_platform_get_and_ioremap_resource(pdev, 0, &pdc_regs);
if (IS_ERR(pdcs->pdc_reg_vbase)) {
err = PTR_ERR(pdcs->pdc_reg_vbase);
goto cleanup_ring_pool;
}
+ dev_dbg(dev, "PDC register region res.start = %pa, res.end = %pa",
+ &pdc_regs->start, &pdc_regs->end);
/* create rx buffer pool after dt read to know how big buffers are */
err = pdc_rx_buf_pool_create(pdcs);
@@ -1587,8 +1574,8 @@ static int pdc_probe(struct platform_device *pdev)
pdc_hw_init(pdcs);
- /* Init tasklet for deferred DMA rx processing */
- tasklet_setup(&pdcs->rx_tasklet, pdc_tasklet_cb);
+ /* Init work for deferred DMA rx processing */
+ INIT_WORK(&pdcs->rx_work, pdc_work_cb);
err = pdc_interrupts_init(pdcs);
if (err)
@@ -1605,7 +1592,7 @@ static int pdc_probe(struct platform_device *pdev)
return PDC_SUCCESS;
cleanup_buf_pool:
- tasklet_kill(&pdcs->rx_tasklet);
+ cancel_work_sync(&pdcs->rx_work);
dma_pool_destroy(pdcs->rx_buf_pool);
cleanup_ring_pool:
@@ -1615,19 +1602,18 @@ cleanup:
return err;
}
-static int pdc_remove(struct platform_device *pdev)
+static void pdc_remove(struct platform_device *pdev)
{
struct pdc_state *pdcs = platform_get_drvdata(pdev);
pdc_free_debugfs();
- tasklet_kill(&pdcs->rx_tasklet);
+ cancel_work_sync(&pdcs->rx_work);
pdc_hw_disable(pdcs);
dma_pool_destroy(pdcs->rx_buf_pool);
dma_pool_destroy(pdcs->ring_pool);
- return 0;
}
static struct platform_driver pdc_mbox_driver = {
@@ -1635,7 +1621,7 @@ static struct platform_driver pdc_mbox_driver = {
.remove = pdc_remove,
.driver = {
.name = "brcm-iproc-pdc-mbox",
- .of_match_table = of_match_ptr(pdc_mbox_of_match),
+ .of_match_table = pdc_mbox_of_match,
},
};
module_platform_driver(pdc_mbox_driver);
diff --git a/drivers/mailbox/bcm2835-mailbox.c b/drivers/mailbox/bcm2835-mailbox.c
index fbfd0202047c..ea12fb8d2401 100644
--- a/drivers/mailbox/bcm2835-mailbox.c
+++ b/drivers/mailbox/bcm2835-mailbox.c
@@ -145,7 +145,8 @@ static int bcm2835_mbox_probe(struct platform_device *pdev)
spin_lock_init(&mbox->lock);
ret = devm_request_irq(dev, irq_of_parse_and_map(dev->of_node, 0),
- bcm2835_mbox_irq, 0, dev_name(dev), mbox);
+ bcm2835_mbox_irq, IRQF_NO_SUSPEND, dev_name(dev),
+ mbox);
if (ret) {
dev_err(dev, "Failed to register a mailbox IRQ handler: %d\n",
ret);
diff --git a/drivers/mailbox/bcm74110-mailbox.c b/drivers/mailbox/bcm74110-mailbox.c
new file mode 100644
index 000000000000..2e7e86f3e6a4
--- /dev/null
+++ b/drivers/mailbox/bcm74110-mailbox.c
@@ -0,0 +1,656 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Broadcom BCM74110 Mailbox Driver
+ *
+ * Copyright (c) 2025 Broadcom
+ */
+#include <linux/list.h>
+#include <linux/types.h>
+#include <linux/workqueue.h>
+#include <linux/io-64-nonatomic-hi-lo.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/delay.h>
+#include <linux/mailbox_controller.h>
+#include <linux/bitfield.h>
+#include <linux/slab.h>
+
+#define BCM_MBOX_BASE(sel) ((sel) * 0x40)
+#define BCM_MBOX_IRQ_BASE(sel) (((sel) * 0x20) + 0x800)
+
+#define BCM_MBOX_CFGA 0x0
+#define BCM_MBOX_CFGB 0x4
+#define BCM_MBOX_CFGC 0x8
+#define BCM_MBOX_CFGD 0xc
+#define BCM_MBOX_CTRL 0x10
+#define BCM_MBOX_CTRL_EN BIT(0)
+#define BCM_MBOX_CTRL_CLR BIT(1)
+#define BCM_MBOX_STATUS0 0x14
+#define BCM_MBOX_STATUS0_NOT_EMPTY BIT(28)
+#define BCM_MBOX_STATUS0_FULL BIT(29)
+#define BCM_MBOX_STATUS1 0x18
+#define BCM_MBOX_STATUS2 0x1c
+#define BCM_MBOX_WDATA 0x20
+#define BCM_MBOX_RDATA 0x28
+
+#define BCM_MBOX_IRQ_STATUS 0x0
+#define BCM_MBOX_IRQ_SET 0x4
+#define BCM_MBOX_IRQ_CLEAR 0x8
+#define BCM_MBOX_IRQ_MASK_STATUS 0xc
+#define BCM_MBOX_IRQ_MASK_SET 0x10
+#define BCM_MBOX_IRQ_MASK_CLEAR 0x14
+#define BCM_MBOX_IRQ_TIMEOUT BIT(0)
+#define BCM_MBOX_IRQ_NOT_EMPTY BIT(1)
+#define BCM_MBOX_IRQ_FULL BIT(2)
+#define BCM_MBOX_IRQ_LOW_WM BIT(3)
+#define BCM_MBOX_IRQ_HIGH_WM BIT(4)
+
+#define BCM_LINK_CODE0 0xbe0
+#define BCM_LINK_CODE1 0xbe1
+#define BCM_LINK_CODE2 0xbe2
+
+enum {
+ BCM_MSG_FUNC_LINK_START = 0,
+ BCM_MSG_FUNC_LINK_STOP,
+ BCM_MSG_FUNC_SHMEM_TX,
+ BCM_MSG_FUNC_SHMEM_RX,
+ BCM_MSG_FUNC_SHMEM_STOP,
+ BCM_MSG_FUNC_MAX,
+};
+
+enum {
+ BCM_MSG_SVC_INIT = 0,
+ BCM_MSG_SVC_PMC,
+ BCM_MSG_SVC_SCMI,
+ BCM_MSG_SVC_DPFE,
+ BCM_MSG_SVC_MAX,
+};
+
+struct bcm74110_mbox_msg {
+ struct list_head list_entry;
+#define BCM_MSG_VERSION_MASK GENMASK(31, 29)
+#define BCM_MSG_VERSION 0x1
+#define BCM_MSG_REQ_MASK BIT(28)
+#define BCM_MSG_RPLY_MASK BIT(27)
+#define BCM_MSG_SVC_MASK GENMASK(26, 24)
+#define BCM_MSG_FUNC_MASK GENMASK(23, 16)
+#define BCM_MSG_LENGTH_MASK GENMASK(15, 4)
+#define BCM_MSG_SLOT_MASK GENMASK(3, 0)
+
+#define BCM_MSG_SET_FIELD(hdr, field, val) \
+ do { \
+ hdr &= ~BCM_MSG_##field##_MASK; \
+ hdr |= FIELD_PREP(BCM_MSG_##field##_MASK, val); \
+ } while (0)
+
+#define BCM_MSG_GET_FIELD(hdr, field) \
+ FIELD_GET(BCM_MSG_##field##_MASK, hdr)
+ u32 msg;
+};
+
+struct bcm74110_mbox_chan {
+ struct bcm74110_mbox *mbox;
+ bool en;
+ int slot;
+ int type;
+};
+
+struct bcm74110_mbox {
+ struct platform_device *pdev;
+ void __iomem *base;
+
+ int tx_chan;
+ int rx_chan;
+ int rx_irq;
+ struct list_head rx_svc_init_list;
+ spinlock_t rx_svc_list_lock;
+
+ struct mbox_controller controller;
+ struct bcm74110_mbox_chan *mbox_chan;
+};
+
+#define BCM74110_OFFSET_IO_WRITEL_MACRO(name, offset_base) \
+static void bcm74110_##name##_writel(struct bcm74110_mbox *mbox,\
+ u32 val, u32 off) \
+{ \
+ writel_relaxed(val, mbox->base + offset_base + off); \
+}
+BCM74110_OFFSET_IO_WRITEL_MACRO(tx, BCM_MBOX_BASE(mbox->tx_chan));
+BCM74110_OFFSET_IO_WRITEL_MACRO(irq, BCM_MBOX_IRQ_BASE(mbox->rx_chan));
+
+#define BCM74110_OFFSET_IO_READL_MACRO(name, offset_base) \
+static u32 bcm74110_##name##_readl(struct bcm74110_mbox *mbox, \
+ u32 off) \
+{ \
+ return readl_relaxed(mbox->base + offset_base + off); \
+}
+BCM74110_OFFSET_IO_READL_MACRO(tx, BCM_MBOX_BASE(mbox->tx_chan));
+BCM74110_OFFSET_IO_READL_MACRO(rx, BCM_MBOX_BASE(mbox->rx_chan));
+BCM74110_OFFSET_IO_READL_MACRO(irq, BCM_MBOX_IRQ_BASE(mbox->rx_chan));
+
+static inline struct bcm74110_mbox *bcm74110_mbox_from_cntrl(
+ struct mbox_controller *cntrl)
+{
+ return container_of(cntrl, struct bcm74110_mbox, controller);
+}
+
+static void bcm74110_rx_push_init_msg(struct bcm74110_mbox *mbox, u32 val)
+{
+ struct bcm74110_mbox_msg *msg;
+
+ msg = kzalloc(sizeof(*msg), GFP_ATOMIC);
+ if (!msg)
+ return;
+
+ INIT_LIST_HEAD(&msg->list_entry);
+ msg->msg = val;
+
+ spin_lock(&mbox->rx_svc_list_lock);
+ list_add_tail(&msg->list_entry, &mbox->rx_svc_init_list);
+ spin_unlock(&mbox->rx_svc_list_lock);
+}
+
+static void bcm74110_rx_process_msg(struct bcm74110_mbox *mbox)
+{
+ struct device *dev = &mbox->pdev->dev;
+ struct bcm74110_mbox_chan *chan_priv;
+ struct mbox_chan *chan;
+ u32 msg, status;
+ int type;
+
+ do {
+ msg = bcm74110_rx_readl(mbox, BCM_MBOX_RDATA);
+ status = bcm74110_rx_readl(mbox, BCM_MBOX_STATUS0);
+
+ dev_dbg(dev, "rx: [{req=%lu|rply=%lu|srv=%lu|fn=%lu|length=%lu|slot=%lu]\n",
+ BCM_MSG_GET_FIELD(msg, REQ), BCM_MSG_GET_FIELD(msg, RPLY),
+ BCM_MSG_GET_FIELD(msg, SVC), BCM_MSG_GET_FIELD(msg, FUNC),
+ BCM_MSG_GET_FIELD(msg, LENGTH), BCM_MSG_GET_FIELD(msg, SLOT));
+
+ type = BCM_MSG_GET_FIELD(msg, SVC);
+ switch (type) {
+ case BCM_MSG_SVC_INIT:
+ bcm74110_rx_push_init_msg(mbox, msg);
+ break;
+ case BCM_MSG_SVC_PMC:
+ case BCM_MSG_SVC_SCMI:
+ case BCM_MSG_SVC_DPFE:
+ chan = &mbox->controller.chans[type];
+ chan_priv = chan->con_priv;
+ if (chan_priv->en)
+ mbox_chan_received_data(chan, NULL);
+ else
+ dev_warn(dev, "Channel not enabled\n");
+ break;
+ default:
+ dev_warn(dev, "Unsupported msg received\n");
+ }
+ } while (status & BCM_MBOX_STATUS0_NOT_EMPTY);
+}
+
+static irqreturn_t bcm74110_mbox_isr(int irq, void *data)
+{
+ struct bcm74110_mbox *mbox = data;
+ u32 status;
+
+ status = bcm74110_irq_readl(mbox, BCM_MBOX_IRQ_STATUS);
+
+ bcm74110_irq_writel(mbox, 0xffffffff, BCM_MBOX_IRQ_CLEAR);
+
+ if (status & BCM_MBOX_IRQ_NOT_EMPTY)
+ bcm74110_rx_process_msg(mbox);
+ else
+ dev_warn(&mbox->pdev->dev, "Spurious interrupt\n");
+
+ return IRQ_HANDLED;
+}
+
+static void bcm74110_mbox_mask_and_clear(struct bcm74110_mbox *mbox)
+{
+ bcm74110_irq_writel(mbox, 0xffffffff, BCM_MBOX_IRQ_MASK_SET);
+ bcm74110_irq_writel(mbox, 0xffffffff, BCM_MBOX_IRQ_CLEAR);
+}
+
+static int bcm74110_rx_pop_init_msg(struct bcm74110_mbox *mbox, u32 func_type,
+ u32 *val)
+{
+ struct bcm74110_mbox_msg *msg, *msg_tmp;
+ unsigned long flags;
+ bool found = false;
+
+ spin_lock_irqsave(&mbox->rx_svc_list_lock, flags);
+ list_for_each_entry_safe(msg, msg_tmp, &mbox->rx_svc_init_list,
+ list_entry) {
+ if (BCM_MSG_GET_FIELD(msg->msg, FUNC) == func_type) {
+ list_del(&msg->list_entry);
+ found = true;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&mbox->rx_svc_list_lock, flags);
+
+ if (!found)
+ return -EINVAL;
+
+ *val = msg->msg;
+ kfree(msg);
+
+ return 0;
+}
+
+static void bcm74110_rx_flush_msg(struct bcm74110_mbox *mbox)
+{
+ struct bcm74110_mbox_msg *msg, *msg_tmp;
+ LIST_HEAD(list_temp);
+ unsigned long flags;
+
+ spin_lock_irqsave(&mbox->rx_svc_list_lock, flags);
+ list_splice_init(&mbox->rx_svc_init_list, &list_temp);
+ spin_unlock_irqrestore(&mbox->rx_svc_list_lock, flags);
+
+ list_for_each_entry_safe(msg, msg_tmp, &list_temp, list_entry) {
+ list_del(&msg->list_entry);
+ kfree(msg);
+ }
+}
+
+#define BCM_DEQUEUE_TIMEOUT_MS 30
+static int bcm74110_rx_pop_init_msg_block(struct bcm74110_mbox *mbox, u32 func_type,
+ u32 *val)
+{
+ int ret, timeout = 0;
+
+ do {
+ ret = bcm74110_rx_pop_init_msg(mbox, func_type, val);
+
+ if (!ret)
+ return 0;
+
+ /* TODO: Figure out what is a good sleep here. */
+ usleep_range(1000, 2000);
+ timeout++;
+ } while (timeout < BCM_DEQUEUE_TIMEOUT_MS);
+
+ dev_warn(&mbox->pdev->dev, "Timeout waiting for service init response\n");
+ return -ETIMEDOUT;
+}
+
+static int bcm74110_mbox_create_msg(int req, int rply, int svc, int func,
+ int length, int slot)
+{
+ u32 msg = 0;
+
+ BCM_MSG_SET_FIELD(msg, REQ, req);
+ BCM_MSG_SET_FIELD(msg, RPLY, rply);
+ BCM_MSG_SET_FIELD(msg, SVC, svc);
+ BCM_MSG_SET_FIELD(msg, FUNC, func);
+ BCM_MSG_SET_FIELD(msg, LENGTH, length);
+ BCM_MSG_SET_FIELD(msg, SLOT, slot);
+
+ return msg;
+}
+
+static int bcm74110_mbox_tx_msg(struct bcm74110_mbox *mbox, u32 msg)
+{
+ int val;
+
+ /* We can potentially poll with timeout here instead */
+ val = bcm74110_tx_readl(mbox, BCM_MBOX_STATUS0);
+ if (val & BCM_MBOX_STATUS0_FULL) {
+ dev_err(&mbox->pdev->dev, "Mailbox full\n");
+ return -EINVAL;
+ }
+
+ dev_dbg(&mbox->pdev->dev, "tx: [{req=%lu|rply=%lu|srv=%lu|fn=%lu|length=%lu|slot=%lu]\n",
+ BCM_MSG_GET_FIELD(msg, REQ), BCM_MSG_GET_FIELD(msg, RPLY),
+ BCM_MSG_GET_FIELD(msg, SVC), BCM_MSG_GET_FIELD(msg, FUNC),
+ BCM_MSG_GET_FIELD(msg, LENGTH), BCM_MSG_GET_FIELD(msg, SLOT));
+
+ bcm74110_tx_writel(mbox, msg, BCM_MBOX_WDATA);
+
+ return 0;
+}
+
+#define BCM_MBOX_LINK_TRAINING_RETRIES 5
+static int bcm74110_mbox_link_training(struct bcm74110_mbox *mbox)
+{
+ int ret, retries = 0;
+ u32 msg = 0, orig_len = 0, len = BCM_LINK_CODE0;
+
+ do {
+ switch (len) {
+ case 0:
+ retries++;
+ dev_warn(&mbox->pdev->dev,
+ "Link train failed, trying again... %d\n",
+ retries);
+ if (retries > BCM_MBOX_LINK_TRAINING_RETRIES)
+ return -EINVAL;
+ len = BCM_LINK_CODE0;
+ fallthrough;
+ case BCM_LINK_CODE0:
+ case BCM_LINK_CODE1:
+ case BCM_LINK_CODE2:
+ msg = bcm74110_mbox_create_msg(1, 0, BCM_MSG_SVC_INIT,
+ BCM_MSG_FUNC_LINK_START,
+ len, BCM_MSG_SVC_INIT);
+ break;
+ default:
+ break;
+ }
+
+ bcm74110_mbox_tx_msg(mbox, msg);
+
+ /* No response expected for LINK_CODE2 */
+ if (len == BCM_LINK_CODE2)
+ return 0;
+
+ orig_len = len;
+
+ ret = bcm74110_rx_pop_init_msg_block(mbox,
+ BCM_MSG_GET_FIELD(msg, FUNC),
+ &msg);
+ if (ret) {
+ len = 0;
+ continue;
+ }
+
+ if ((BCM_MSG_GET_FIELD(msg, SVC) != BCM_MSG_SVC_INIT) ||
+ (BCM_MSG_GET_FIELD(msg, FUNC) != BCM_MSG_FUNC_LINK_START) ||
+ (BCM_MSG_GET_FIELD(msg, SLOT) != 0) ||
+ (BCM_MSG_GET_FIELD(msg, RPLY) != 1) ||
+ (BCM_MSG_GET_FIELD(msg, REQ) != 0)) {
+ len = 0;
+ continue;
+ }
+
+ len = BCM_MSG_GET_FIELD(msg, LENGTH);
+
+ /* Make sure sequence is good */
+ if (len != (orig_len + 1)) {
+ len = 0;
+ continue;
+ }
+ } while (1);
+
+ return -EINVAL;
+}
+
+static int bcm74110_mbox_tx_msg_and_wait_ack(struct bcm74110_mbox *mbox, u32 msg)
+{
+ int ret;
+ u32 recv_msg;
+
+ ret = bcm74110_mbox_tx_msg(mbox, msg);
+ if (ret)
+ return ret;
+
+ ret = bcm74110_rx_pop_init_msg_block(mbox, BCM_MSG_GET_FIELD(msg, FUNC),
+ &recv_msg);
+ if (ret)
+ return ret;
+
+ /*
+ * Modify tx message to verify rx ack.
+ * Flip RPLY/REQ for synchronous messages
+ */
+ if (BCM_MSG_GET_FIELD(msg, REQ) == 1) {
+ BCM_MSG_SET_FIELD(msg, RPLY, 1);
+ BCM_MSG_SET_FIELD(msg, REQ, 0);
+ }
+
+ if (msg != recv_msg) {
+ dev_err(&mbox->pdev->dev, "Found ack, but ack is invalid\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/* Each index points to 0x100 of HAB MEM. IDX size counts from 0 */
+#define BCM_MBOX_HAB_MEM_IDX_START 0x30
+#define BCM_MBOX_HAB_MEM_IDX_SIZE 0x0
+static int bcm74110_mbox_shmem_init(struct bcm74110_mbox *mbox)
+{
+ u32 msg = 0;
+ int ret;
+
+ msg = bcm74110_mbox_create_msg(1, 0, BCM_MSG_SVC_INIT,
+ BCM_MSG_FUNC_SHMEM_STOP,
+ 0, BCM_MSG_SVC_INIT);
+ ret = bcm74110_mbox_tx_msg_and_wait_ack(mbox, msg);
+ if (ret)
+ return -EINVAL;
+
+ msg = bcm74110_mbox_create_msg(1, 0, BCM_MSG_SVC_INIT,
+ BCM_MSG_FUNC_SHMEM_TX,
+ BCM_MBOX_HAB_MEM_IDX_START,
+ BCM_MBOX_HAB_MEM_IDX_SIZE);
+ ret = bcm74110_mbox_tx_msg_and_wait_ack(mbox, msg);
+ if (ret)
+ return -EINVAL;
+
+ msg = bcm74110_mbox_create_msg(1, 0, BCM_MSG_SVC_INIT,
+ BCM_MSG_FUNC_SHMEM_RX,
+ BCM_MBOX_HAB_MEM_IDX_START,
+ BCM_MBOX_HAB_MEM_IDX_SIZE);
+ ret = bcm74110_mbox_tx_msg_and_wait_ack(mbox, msg);
+ if (ret)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int bcm74110_mbox_init(struct bcm74110_mbox *mbox)
+{
+ int ret = 0;
+
+ /* Disable queues tx/rx */
+ bcm74110_tx_writel(mbox, 0x0, BCM_MBOX_CTRL);
+
+ /* Clear status & restart tx/rx*/
+ bcm74110_tx_writel(mbox, BCM_MBOX_CTRL_EN | BCM_MBOX_CTRL_CLR,
+ BCM_MBOX_CTRL);
+
+ /* Unmask irq */
+ bcm74110_irq_writel(mbox, BCM_MBOX_IRQ_NOT_EMPTY, BCM_MBOX_IRQ_MASK_CLEAR);
+
+ ret = bcm74110_mbox_link_training(mbox);
+ if (ret) {
+ dev_err(&mbox->pdev->dev, "Training failed\n");
+ return ret;
+ }
+
+ return bcm74110_mbox_shmem_init(mbox);
+}
+
+static int bcm74110_mbox_send_data(struct mbox_chan *chan, void *data)
+{
+ struct bcm74110_mbox_chan *chan_priv = chan->con_priv;
+ u32 msg;
+
+ switch (chan_priv->type) {
+ case BCM_MSG_SVC_PMC:
+ case BCM_MSG_SVC_SCMI:
+ case BCM_MSG_SVC_DPFE:
+ msg = bcm74110_mbox_create_msg(1, 0, chan_priv->type, 0,
+ 128 + 28, chan_priv->slot);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return bcm74110_mbox_tx_msg(chan_priv->mbox, msg);
+}
+
+static int bcm74110_mbox_chan_startup(struct mbox_chan *chan)
+{
+ struct bcm74110_mbox_chan *chan_priv = chan->con_priv;
+
+ chan_priv->en = true;
+
+ return 0;
+}
+
+static void bcm74110_mbox_chan_shutdown(struct mbox_chan *chan)
+{
+ struct bcm74110_mbox_chan *chan_priv = chan->con_priv;
+
+ chan_priv->en = false;
+}
+
+static const struct mbox_chan_ops bcm74110_mbox_chan_ops = {
+ .send_data = bcm74110_mbox_send_data,
+ .startup = bcm74110_mbox_chan_startup,
+ .shutdown = bcm74110_mbox_chan_shutdown,
+};
+
+static void bcm74110_mbox_shutdown(struct platform_device *pdev)
+{
+ struct bcm74110_mbox *mbox = dev_get_drvdata(&pdev->dev);
+ u32 msg;
+
+ msg = bcm74110_mbox_create_msg(1, 0, BCM_MSG_SVC_INIT,
+ BCM_MSG_FUNC_LINK_STOP,
+ 0, 0);
+
+ bcm74110_mbox_tx_msg_and_wait_ack(mbox, msg);
+
+ /* Even if we don't receive ACK, lets shut it down */
+
+ bcm74110_mbox_mask_and_clear(mbox);
+
+ /* Disable queues tx/rx */
+ bcm74110_tx_writel(mbox, 0x0, BCM_MBOX_CTRL);
+
+ /* Flush queues */
+ bcm74110_rx_flush_msg(mbox);
+}
+
+static struct mbox_chan *bcm74110_mbox_of_xlate(struct mbox_controller *cntrl,
+ const struct of_phandle_args *p)
+{
+ struct bcm74110_mbox *mbox = bcm74110_mbox_from_cntrl(cntrl);
+ struct device *dev = &mbox->pdev->dev;
+ struct bcm74110_mbox_chan *chan_priv;
+ int slot, type;
+
+ if (p->args_count != 2) {
+ dev_err(dev, "Invalid arguments\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ type = p->args[0];
+ slot = p->args[1];
+
+ switch (type) {
+ case BCM_MSG_SVC_PMC:
+ case BCM_MSG_SVC_SCMI:
+ case BCM_MSG_SVC_DPFE:
+ if (slot > BCM_MBOX_HAB_MEM_IDX_SIZE) {
+ dev_err(dev, "Not enough shared memory\n");
+ return ERR_PTR(-EINVAL);
+ }
+ chan_priv = cntrl->chans[type].con_priv;
+ chan_priv->slot = slot;
+ chan_priv->type = type;
+ break;
+ default:
+ dev_err(dev, "Invalid channel type: %d\n", type);
+ return ERR_PTR(-EINVAL);
+ }
+
+ return &cntrl->chans[type];
+}
+
+static int bcm74110_mbox_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct bcm74110_mbox *mbox;
+ int i, ret;
+
+ mbox = devm_kzalloc(dev, sizeof(*mbox), GFP_KERNEL);
+ if (!mbox)
+ return -ENOMEM;
+
+ mbox->pdev = pdev;
+ platform_set_drvdata(pdev, mbox);
+
+ mbox->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(mbox->base))
+ return dev_err_probe(dev, PTR_ERR(mbox->base), "Failed to iomap\n");
+
+ ret = of_property_read_u32(dev->of_node, "brcm,tx", &mbox->tx_chan);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to find tx channel\n");
+
+ ret = of_property_read_u32(dev->of_node, "brcm,rx", &mbox->rx_chan);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to find rx channel\n");
+
+ mbox->rx_irq = platform_get_irq(pdev, 0);
+ if (mbox->rx_irq < 0)
+ return mbox->rx_irq;
+
+ INIT_LIST_HEAD(&mbox->rx_svc_init_list);
+ spin_lock_init(&mbox->rx_svc_list_lock);
+ bcm74110_mbox_mask_and_clear(mbox);
+
+ ret = devm_request_irq(dev, mbox->rx_irq, bcm74110_mbox_isr,
+ IRQF_NO_SUSPEND, pdev->name, mbox);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to request irq\n");
+
+ mbox->controller.ops = &bcm74110_mbox_chan_ops;
+ mbox->controller.dev = dev;
+ mbox->controller.num_chans = BCM_MSG_SVC_MAX;
+ mbox->controller.of_xlate = &bcm74110_mbox_of_xlate;
+ mbox->controller.chans = devm_kcalloc(dev, BCM_MSG_SVC_MAX,
+ sizeof(*mbox->controller.chans),
+ GFP_KERNEL);
+ if (!mbox->controller.chans)
+ return -ENOMEM;
+
+ mbox->mbox_chan = devm_kcalloc(dev, BCM_MSG_SVC_MAX,
+ sizeof(*mbox->mbox_chan),
+ GFP_KERNEL);
+ if (!mbox->mbox_chan)
+ return -ENOMEM;
+
+ for (i = 0; i < BCM_MSG_SVC_MAX; i++) {
+ mbox->mbox_chan[i].mbox = mbox;
+ mbox->controller.chans[i].con_priv = &mbox->mbox_chan[i];
+ }
+
+ ret = devm_mbox_controller_register(dev, &mbox->controller);
+ if (ret)
+ return ret;
+
+ ret = bcm74110_mbox_init(mbox);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static const struct of_device_id bcm74110_mbox_of_match[] = {
+ { .compatible = "brcm,bcm74110-mbox", },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, bcm74110_mbox_of_match);
+
+static struct platform_driver bcm74110_mbox_driver = {
+ .driver = {
+ .name = "bcm74110-mbox",
+ .of_match_table = bcm74110_mbox_of_match,
+ },
+ .probe = bcm74110_mbox_probe,
+ .shutdown = bcm74110_mbox_shutdown,
+};
+module_platform_driver(bcm74110_mbox_driver);
+
+MODULE_AUTHOR("Justin Chen <justin.chen@broadcom.com>");
+MODULE_DESCRIPTION("BCM74110 mailbox driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mailbox/cix-mailbox.c b/drivers/mailbox/cix-mailbox.c
new file mode 100644
index 000000000000..5bb1416c26a5
--- /dev/null
+++ b/drivers/mailbox/cix-mailbox.c
@@ -0,0 +1,645 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2025 Cix Technology Group Co., Ltd.
+ */
+
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/mailbox_controller.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+#include "mailbox.h"
+
+/*
+ * The maximum transmission size is 32 words or 128 bytes.
+ */
+#define CIX_MBOX_MSG_WORDS 32 /* Max length = 32 words */
+#define CIX_MBOX_MSG_LEN_MASK 0x7fL /* Max length = 128 bytes */
+
+/* [0~7] Fast channel
+ * [8] doorbell base channel
+ * [9]fifo base channel
+ * [10] register base channel
+ */
+#define CIX_MBOX_FAST_IDX 7
+#define CIX_MBOX_DB_IDX 8
+#define CIX_MBOX_FIFO_IDX 9
+#define CIX_MBOX_REG_IDX 10
+#define CIX_MBOX_CHANS 11
+
+/* Register define */
+#define CIX_REG_MSG(n) (0x0 + 0x4*(n)) /* 0x0~0x7c */
+#define CIX_REG_DB_ACK CIX_REG_MSG(CIX_MBOX_MSG_WORDS) /* 0x80 */
+#define CIX_ERR_COMP (CIX_REG_DB_ACK + 0x4) /* 0x84 */
+#define CIX_ERR_COMP_CLR (CIX_REG_DB_ACK + 0x8) /* 0x88 */
+#define CIX_REG_F_INT(IDX) (CIX_ERR_COMP_CLR + 0x4*(IDX+1)) /* 0x8c~0xa8 */
+#define CIX_FIFO_WR (CIX_REG_F_INT(CIX_MBOX_FAST_IDX+1)) /* 0xac */
+#define CIX_FIFO_RD (CIX_FIFO_WR + 0x4) /* 0xb0 */
+#define CIX_FIFO_STAS (CIX_FIFO_WR + 0x8) /* 0xb4 */
+#define CIX_FIFO_WM (CIX_FIFO_WR + 0xc) /* 0xb8 */
+#define CIX_INT_ENABLE (CIX_FIFO_WR + 0x10) /* 0xbc */
+#define CIX_INT_ENABLE_SIDE_B (CIX_FIFO_WR + 0x14) /* 0xc0 */
+#define CIX_INT_CLEAR (CIX_FIFO_WR + 0x18) /* 0xc4 */
+#define CIX_INT_STATUS (CIX_FIFO_WR + 0x1c) /* 0xc8 */
+#define CIX_FIFO_RST (CIX_FIFO_WR + 0x20) /* 0xcc */
+
+#define CIX_MBOX_TX 0
+#define CIX_MBOX_RX 1
+
+#define CIX_DB_INT_BIT BIT(0)
+#define CIX_DB_ACK_INT_BIT BIT(1)
+
+#define CIX_FIFO_WM_DEFAULT CIX_MBOX_MSG_WORDS
+#define CIX_FIFO_STAS_WMK BIT(0)
+#define CIX_FIFO_STAS_FULL BIT(1)
+#define CIX_FIFO_STAS_EMPTY BIT(2)
+#define CIX_FIFO_STAS_UFLOW BIT(3)
+#define CIX_FIFO_STAS_OFLOW BIT(4)
+
+#define CIX_FIFO_RST_BIT BIT(0)
+
+#define CIX_DB_INT BIT(0)
+#define CIX_ACK_INT BIT(1)
+#define CIX_FIFO_FULL_INT BIT(2)
+#define CIX_FIFO_EMPTY_INT BIT(3)
+#define CIX_FIFO_WM01_INT BIT(4)
+#define CIX_FIFO_WM10_INT BIT(5)
+#define CIX_FIFO_OFLOW_INT BIT(6)
+#define CIX_FIFO_UFLOW_INT BIT(7)
+#define CIX_FIFO_N_EMPTY_INT BIT(8)
+#define CIX_FAST_CH_INT(IDX) BIT((IDX)+9)
+
+#define CIX_SHMEM_OFFSET 0x80
+
+enum cix_mbox_chan_type {
+ CIX_MBOX_TYPE_DB,
+ CIX_MBOX_TYPE_REG,
+ CIX_MBOX_TYPE_FIFO,
+ CIX_MBOX_TYPE_FAST,
+};
+
+struct cix_mbox_con_priv {
+ enum cix_mbox_chan_type type;
+ struct mbox_chan *chan;
+ int index;
+};
+
+struct cix_mbox_priv {
+ struct device *dev;
+ int irq;
+ int dir;
+ void __iomem *base; /* region for mailbox */
+ struct cix_mbox_con_priv con_priv[CIX_MBOX_CHANS];
+ struct mbox_chan mbox_chans[CIX_MBOX_CHANS];
+ struct mbox_controller mbox;
+ bool use_shmem;
+};
+
+/*
+ * The CIX mailbox supports four types of transfers:
+ * CIX_MBOX_TYPE_DB, CIX_MBOX_TYPE_FAST, CIX_MBOX_TYPE_REG, and CIX_MBOX_TYPE_FIFO.
+ * For the REG and FIFO types of transfers, the message format is as follows:
+ */
+union cix_mbox_msg_reg_fifo {
+ u32 length; /* unit is byte */
+ u32 buf[CIX_MBOX_MSG_WORDS]; /* buf[0] must be the byte length of this array */
+};
+
+static struct cix_mbox_priv *to_cix_mbox_priv(struct mbox_controller *mbox)
+{
+ return container_of(mbox, struct cix_mbox_priv, mbox);
+}
+
+static void cix_mbox_write(struct cix_mbox_priv *priv, u32 val, u32 offset)
+{
+ if (priv->use_shmem)
+ iowrite32(val, priv->base + offset - CIX_SHMEM_OFFSET);
+ else
+ iowrite32(val, priv->base + offset);
+}
+
+static u32 cix_mbox_read(struct cix_mbox_priv *priv, u32 offset)
+{
+ if (priv->use_shmem)
+ return ioread32(priv->base + offset - CIX_SHMEM_OFFSET);
+ else
+ return ioread32(priv->base + offset);
+}
+
+static bool mbox_fifo_empty(struct mbox_chan *chan)
+{
+ struct cix_mbox_priv *priv = to_cix_mbox_priv(chan->mbox);
+
+ return ((cix_mbox_read(priv, CIX_FIFO_STAS) & CIX_FIFO_STAS_EMPTY) ? true : false);
+}
+
+/*
+ *The transmission unit of the CIX mailbox is word.
+ *The byte length should be converted into the word length.
+ */
+static inline u32 mbox_get_msg_size(void *msg)
+{
+ u32 len;
+
+ len = ((u32 *)msg)[0] & CIX_MBOX_MSG_LEN_MASK;
+ return DIV_ROUND_UP(len, 4);
+}
+
+static int cix_mbox_send_data_db(struct mbox_chan *chan, void *data)
+{
+ struct cix_mbox_priv *priv = to_cix_mbox_priv(chan->mbox);
+
+ /* trigger doorbell irq */
+ cix_mbox_write(priv, CIX_DB_INT_BIT, CIX_REG_DB_ACK);
+
+ return 0;
+}
+
+static int cix_mbox_send_data_reg(struct mbox_chan *chan, void *data)
+{
+ struct cix_mbox_priv *priv = to_cix_mbox_priv(chan->mbox);
+ union cix_mbox_msg_reg_fifo *msg = data;
+ u32 len, i;
+
+ if (!data)
+ return -EINVAL;
+
+ len = mbox_get_msg_size(data);
+ for (i = 0; i < len; i++)
+ cix_mbox_write(priv, msg->buf[i], CIX_REG_MSG(i));
+
+ /* trigger doorbell irq */
+ cix_mbox_write(priv, CIX_DB_INT_BIT, CIX_REG_DB_ACK);
+
+ return 0;
+}
+
+static int cix_mbox_send_data_fifo(struct mbox_chan *chan, void *data)
+{
+ struct cix_mbox_priv *priv = to_cix_mbox_priv(chan->mbox);
+ union cix_mbox_msg_reg_fifo *msg = data;
+ u32 len, val, i;
+
+ if (!data)
+ return -EINVAL;
+
+ len = mbox_get_msg_size(data);
+ cix_mbox_write(priv, len, CIX_FIFO_WM);
+ for (i = 0; i < len; i++)
+ cix_mbox_write(priv, msg->buf[i], CIX_FIFO_WR);
+
+ /* Enable fifo empty interrupt */
+ val = cix_mbox_read(priv, CIX_INT_ENABLE);
+ val |= CIX_FIFO_EMPTY_INT;
+ cix_mbox_write(priv, val, CIX_INT_ENABLE);
+
+ return 0;
+}
+
+static int cix_mbox_send_data_fast(struct mbox_chan *chan, void *data)
+{
+ struct cix_mbox_priv *priv = to_cix_mbox_priv(chan->mbox);
+ struct cix_mbox_con_priv *cp = chan->con_priv;
+ u32 *arg = (u32 *)data;
+ int index = cp->index;
+
+ if (!data)
+ return -EINVAL;
+
+ if (index < 0 || index > CIX_MBOX_FAST_IDX) {
+ dev_err(priv->dev, "Invalid Mbox index %d\n", index);
+ return -EINVAL;
+ }
+
+ cix_mbox_write(priv, arg[0], CIX_REG_F_INT(index));
+
+ return 0;
+}
+
+static int cix_mbox_send_data(struct mbox_chan *chan, void *data)
+{
+ struct cix_mbox_priv *priv = to_cix_mbox_priv(chan->mbox);
+ struct cix_mbox_con_priv *cp = chan->con_priv;
+
+ if (priv->dir != CIX_MBOX_TX) {
+ dev_err(priv->dev, "Invalid Mbox dir %d\n", priv->dir);
+ return -EINVAL;
+ }
+
+ switch (cp->type) {
+ case CIX_MBOX_TYPE_DB:
+ cix_mbox_send_data_db(chan, data);
+ break;
+ case CIX_MBOX_TYPE_REG:
+ cix_mbox_send_data_reg(chan, data);
+ break;
+ case CIX_MBOX_TYPE_FIFO:
+ cix_mbox_send_data_fifo(chan, data);
+ break;
+ case CIX_MBOX_TYPE_FAST:
+ cix_mbox_send_data_fast(chan, data);
+ break;
+ default:
+ dev_err(priv->dev, "Invalid channel type: %d\n", cp->type);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static void cix_mbox_isr_db(struct mbox_chan *chan)
+{
+ struct cix_mbox_priv *priv = to_cix_mbox_priv(chan->mbox);
+ u32 int_status;
+
+ int_status = cix_mbox_read(priv, CIX_INT_STATUS);
+
+ if (priv->dir == CIX_MBOX_RX) {
+ /* rx interrupt is triggered */
+ if (int_status & CIX_DB_INT) {
+ cix_mbox_write(priv, CIX_DB_INT, CIX_INT_CLEAR);
+ mbox_chan_received_data(chan, NULL);
+ /* trigger ack interrupt */
+ cix_mbox_write(priv, CIX_DB_ACK_INT_BIT, CIX_REG_DB_ACK);
+ }
+ } else {
+ /* tx ack interrupt is triggered */
+ if (int_status & CIX_ACK_INT) {
+ cix_mbox_write(priv, CIX_ACK_INT, CIX_INT_CLEAR);
+ mbox_chan_received_data(chan, NULL);
+ }
+ }
+}
+
+static void cix_mbox_isr_reg(struct mbox_chan *chan)
+{
+ struct cix_mbox_priv *priv = to_cix_mbox_priv(chan->mbox);
+ u32 int_status;
+
+ int_status = cix_mbox_read(priv, CIX_INT_STATUS);
+
+ if (priv->dir == CIX_MBOX_RX) {
+ /* rx interrupt is triggered */
+ if (int_status & CIX_DB_INT) {
+ u32 data[CIX_MBOX_MSG_WORDS], len, i;
+
+ cix_mbox_write(priv, CIX_DB_INT, CIX_INT_CLEAR);
+ data[0] = cix_mbox_read(priv, CIX_REG_MSG(0));
+ len = mbox_get_msg_size(data);
+ for (i = 1; i < len; i++)
+ data[i] = cix_mbox_read(priv, CIX_REG_MSG(i));
+
+ /* trigger ack interrupt */
+ cix_mbox_write(priv, CIX_DB_ACK_INT_BIT, CIX_REG_DB_ACK);
+ mbox_chan_received_data(chan, data);
+ }
+ } else {
+ /* tx ack interrupt is triggered */
+ if (int_status & CIX_ACK_INT) {
+ cix_mbox_write(priv, CIX_ACK_INT, CIX_INT_CLEAR);
+ mbox_chan_txdone(chan, 0);
+ }
+ }
+}
+
+static void cix_mbox_isr_fifo(struct mbox_chan *chan)
+{
+ struct cix_mbox_priv *priv = to_cix_mbox_priv(chan->mbox);
+ u32 int_status, status;
+
+ int_status = cix_mbox_read(priv, CIX_INT_STATUS);
+
+ if (priv->dir == CIX_MBOX_RX) {
+ /* FIFO waterMark interrupt is generated */
+ if (int_status & (CIX_FIFO_FULL_INT | CIX_FIFO_WM01_INT)) {
+ u32 data[CIX_MBOX_MSG_WORDS] = { 0 }, i = 0;
+
+ cix_mbox_write(priv, (CIX_FIFO_FULL_INT | CIX_FIFO_WM01_INT),
+ CIX_INT_CLEAR);
+ do {
+ data[i++] = cix_mbox_read(priv, CIX_FIFO_RD);
+ } while (!mbox_fifo_empty(chan) && i < CIX_MBOX_MSG_WORDS);
+
+ mbox_chan_received_data(chan, data);
+ }
+ /* FIFO underflow is generated */
+ if (int_status & CIX_FIFO_UFLOW_INT) {
+ status = cix_mbox_read(priv, CIX_FIFO_STAS);
+ dev_err(priv->dev, "fifo underflow: int_stats %d\n", status);
+ cix_mbox_write(priv, CIX_FIFO_UFLOW_INT, CIX_INT_CLEAR);
+ }
+ } else {
+ /* FIFO empty interrupt is generated */
+ if (int_status & CIX_FIFO_EMPTY_INT) {
+ u32 val;
+
+ cix_mbox_write(priv, CIX_FIFO_EMPTY_INT, CIX_INT_CLEAR);
+ /* Disable empty irq*/
+ val = cix_mbox_read(priv, CIX_INT_ENABLE);
+ val &= ~CIX_FIFO_EMPTY_INT;
+ cix_mbox_write(priv, val, CIX_INT_ENABLE);
+ mbox_chan_txdone(chan, 0);
+ }
+ /* FIFO overflow is generated */
+ if (int_status & CIX_FIFO_OFLOW_INT) {
+ status = cix_mbox_read(priv, CIX_FIFO_STAS);
+ dev_err(priv->dev, "fifo overlow: int_stats %d\n", status);
+ cix_mbox_write(priv, CIX_FIFO_OFLOW_INT, CIX_INT_CLEAR);
+ }
+ }
+}
+
+static void cix_mbox_isr_fast(struct mbox_chan *chan)
+{
+ struct cix_mbox_priv *priv = to_cix_mbox_priv(chan->mbox);
+ struct cix_mbox_con_priv *cp = chan->con_priv;
+ u32 int_status, data;
+
+ /* no irq will be trigger for TX dir mbox */
+ if (priv->dir != CIX_MBOX_RX)
+ return;
+
+ int_status = cix_mbox_read(priv, CIX_INT_STATUS);
+
+ if (int_status & CIX_FAST_CH_INT(cp->index)) {
+ cix_mbox_write(priv, CIX_FAST_CH_INT(cp->index), CIX_INT_CLEAR);
+ data = cix_mbox_read(priv, CIX_REG_F_INT(cp->index));
+ mbox_chan_received_data(chan, &data);
+ }
+}
+
+static irqreturn_t cix_mbox_isr(int irq, void *arg)
+{
+ struct mbox_chan *chan = arg;
+ struct cix_mbox_priv *priv = to_cix_mbox_priv(chan->mbox);
+ struct cix_mbox_con_priv *cp = chan->con_priv;
+
+ switch (cp->type) {
+ case CIX_MBOX_TYPE_DB:
+ cix_mbox_isr_db(chan);
+ break;
+ case CIX_MBOX_TYPE_REG:
+ cix_mbox_isr_reg(chan);
+ break;
+ case CIX_MBOX_TYPE_FIFO:
+ cix_mbox_isr_fifo(chan);
+ break;
+ case CIX_MBOX_TYPE_FAST:
+ cix_mbox_isr_fast(chan);
+ break;
+ default:
+ dev_err(priv->dev, "Invalid channel type: %d\n", cp->type);
+ return IRQ_NONE;
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int cix_mbox_startup(struct mbox_chan *chan)
+{
+ struct cix_mbox_priv *priv = to_cix_mbox_priv(chan->mbox);
+ struct cix_mbox_con_priv *cp = chan->con_priv;
+ int index = cp->index, ret;
+ u32 val;
+
+ ret = request_irq(priv->irq, cix_mbox_isr, 0,
+ dev_name(priv->dev), chan);
+ if (ret) {
+ dev_err(priv->dev, "Unable to acquire IRQ %d\n", priv->irq);
+ return ret;
+ }
+
+ switch (cp->type) {
+ case CIX_MBOX_TYPE_DB:
+ /* Overwrite txdone_method for DB channel */
+ chan->txdone_method = TXDONE_BY_ACK;
+ fallthrough;
+ case CIX_MBOX_TYPE_REG:
+ if (priv->dir == CIX_MBOX_TX) {
+ /* Enable ACK interrupt */
+ val = cix_mbox_read(priv, CIX_INT_ENABLE);
+ val |= CIX_ACK_INT;
+ cix_mbox_write(priv, val, CIX_INT_ENABLE);
+ } else {
+ /* Enable Doorbell interrupt */
+ val = cix_mbox_read(priv, CIX_INT_ENABLE_SIDE_B);
+ val |= CIX_DB_INT;
+ cix_mbox_write(priv, val, CIX_INT_ENABLE_SIDE_B);
+ }
+ break;
+ case CIX_MBOX_TYPE_FIFO:
+ /* reset fifo */
+ cix_mbox_write(priv, CIX_FIFO_RST_BIT, CIX_FIFO_RST);
+ /* set default watermark */
+ cix_mbox_write(priv, CIX_FIFO_WM_DEFAULT, CIX_FIFO_WM);
+ if (priv->dir == CIX_MBOX_TX) {
+ /* Enable fifo overflow interrupt */
+ val = cix_mbox_read(priv, CIX_INT_ENABLE);
+ val |= CIX_FIFO_OFLOW_INT;
+ cix_mbox_write(priv, val, CIX_INT_ENABLE);
+ } else {
+ /* Enable fifo full/underflow interrupt */
+ val = cix_mbox_read(priv, CIX_INT_ENABLE_SIDE_B);
+ val |= CIX_FIFO_UFLOW_INT|CIX_FIFO_WM01_INT;
+ cix_mbox_write(priv, val, CIX_INT_ENABLE_SIDE_B);
+ }
+ break;
+ case CIX_MBOX_TYPE_FAST:
+ /* Only RX channel has intterupt */
+ if (priv->dir == CIX_MBOX_RX) {
+ if (index < 0 || index > CIX_MBOX_FAST_IDX) {
+ dev_err(priv->dev, "Invalid index %d\n", index);
+ ret = -EINVAL;
+ goto failed;
+ }
+ /* enable fast channel interrupt */
+ val = cix_mbox_read(priv, CIX_INT_ENABLE_SIDE_B);
+ val |= CIX_FAST_CH_INT(index);
+ cix_mbox_write(priv, val, CIX_INT_ENABLE_SIDE_B);
+ }
+ break;
+ default:
+ dev_err(priv->dev, "Invalid channel type: %d\n", cp->type);
+ ret = -EINVAL;
+ goto failed;
+ }
+ return 0;
+
+failed:
+ free_irq(priv->irq, chan);
+ return ret;
+}
+
+static void cix_mbox_shutdown(struct mbox_chan *chan)
+{
+ struct cix_mbox_priv *priv = to_cix_mbox_priv(chan->mbox);
+ struct cix_mbox_con_priv *cp = chan->con_priv;
+ int index = cp->index;
+ u32 val;
+
+ switch (cp->type) {
+ case CIX_MBOX_TYPE_DB:
+ case CIX_MBOX_TYPE_REG:
+ if (priv->dir == CIX_MBOX_TX) {
+ /* Disable ACK interrupt */
+ val = cix_mbox_read(priv, CIX_INT_ENABLE);
+ val &= ~CIX_ACK_INT;
+ cix_mbox_write(priv, val, CIX_INT_ENABLE);
+ } else if (priv->dir == CIX_MBOX_RX) {
+ /* Disable Doorbell interrupt */
+ val = cix_mbox_read(priv, CIX_INT_ENABLE_SIDE_B);
+ val &= ~CIX_DB_INT;
+ cix_mbox_write(priv, val, CIX_INT_ENABLE_SIDE_B);
+ }
+ break;
+ case CIX_MBOX_TYPE_FIFO:
+ if (priv->dir == CIX_MBOX_TX) {
+ /* Disable empty/fifo overflow irq*/
+ val = cix_mbox_read(priv, CIX_INT_ENABLE);
+ val &= ~(CIX_FIFO_EMPTY_INT | CIX_FIFO_OFLOW_INT);
+ cix_mbox_write(priv, val, CIX_INT_ENABLE);
+ } else if (priv->dir == CIX_MBOX_RX) {
+ /* Disable fifo WM01/underflow interrupt */
+ val = cix_mbox_read(priv, CIX_INT_ENABLE_SIDE_B);
+ val &= ~(CIX_FIFO_UFLOW_INT | CIX_FIFO_WM01_INT);
+ cix_mbox_write(priv, val, CIX_INT_ENABLE_SIDE_B);
+ }
+ break;
+ case CIX_MBOX_TYPE_FAST:
+ if (priv->dir == CIX_MBOX_RX) {
+ if (index < 0 || index > CIX_MBOX_FAST_IDX) {
+ dev_err(priv->dev, "Invalid index %d\n", index);
+ break;
+ }
+ /* Disable fast channel interrupt */
+ val = cix_mbox_read(priv, CIX_INT_ENABLE_SIDE_B);
+ val &= ~CIX_FAST_CH_INT(index);
+ cix_mbox_write(priv, val, CIX_INT_ENABLE_SIDE_B);
+ }
+ break;
+
+ default:
+ dev_err(priv->dev, "Invalid channel type: %d\n", cp->type);
+ break;
+ }
+
+ free_irq(priv->irq, chan);
+}
+
+static const struct mbox_chan_ops cix_mbox_chan_ops = {
+ .send_data = cix_mbox_send_data,
+ .startup = cix_mbox_startup,
+ .shutdown = cix_mbox_shutdown,
+};
+
+static void cix_mbox_init(struct cix_mbox_priv *priv)
+{
+ struct cix_mbox_con_priv *cp;
+ int i;
+
+ for (i = 0; i < CIX_MBOX_CHANS; i++) {
+ cp = &priv->con_priv[i];
+ cp->index = i;
+ cp->chan = &priv->mbox_chans[i];
+ priv->mbox_chans[i].con_priv = cp;
+ if (cp->index <= CIX_MBOX_FAST_IDX)
+ cp->type = CIX_MBOX_TYPE_FAST;
+ if (cp->index == CIX_MBOX_DB_IDX)
+ cp->type = CIX_MBOX_TYPE_DB;
+ if (cp->index == CIX_MBOX_FIFO_IDX)
+ cp->type = CIX_MBOX_TYPE_FIFO;
+ if (cp->index == CIX_MBOX_REG_IDX)
+ cp->type = CIX_MBOX_TYPE_REG;
+ }
+}
+
+static int cix_mbox_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct cix_mbox_priv *priv;
+ struct resource *res;
+ const char *dir_str;
+ int ret;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->dev = dev;
+ priv->base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
+ if (IS_ERR(priv->base))
+ return PTR_ERR(priv->base);
+
+ /*
+ * The first 0x80 bytes of the register space of the cix mailbox controller
+ * can be used as shared memory for clients. When this shared memory is in
+ * use, the base address of the mailbox is offset by 0x80. Therefore, when
+ * performing subsequent read/write operations, it is necessary to subtract
+ * the offset CIX_SHMEM_OFFSET.
+ *
+ * When the base address of the mailbox is offset by 0x80, it indicates
+ * that shmem is in use.
+ */
+ priv->use_shmem = !!(res->start & CIX_SHMEM_OFFSET);
+
+ priv->irq = platform_get_irq(pdev, 0);
+ if (priv->irq < 0)
+ return priv->irq;
+
+ if (device_property_read_string(dev, "cix,mbox-dir", &dir_str)) {
+ dev_err(priv->dev, "cix,mbox_dir property not found\n");
+ return -EINVAL;
+ }
+
+ if (!strcmp(dir_str, "tx"))
+ priv->dir = 0;
+ else if (!strcmp(dir_str, "rx"))
+ priv->dir = 1;
+ else {
+ dev_err(priv->dev, "cix,mbox_dir=%s is not expected\n", dir_str);
+ return -EINVAL;
+ }
+
+ cix_mbox_init(priv);
+
+ priv->mbox.dev = dev;
+ priv->mbox.ops = &cix_mbox_chan_ops;
+ priv->mbox.chans = priv->mbox_chans;
+ priv->mbox.txdone_irq = true;
+ priv->mbox.num_chans = CIX_MBOX_CHANS;
+ priv->mbox.of_xlate = NULL;
+
+ platform_set_drvdata(pdev, priv);
+ ret = devm_mbox_controller_register(dev, &priv->mbox);
+ if (ret)
+ dev_err(dev, "Failed to register mailbox %d\n", ret);
+
+ return ret;
+}
+
+static const struct of_device_id cix_mbox_dt_ids[] = {
+ { .compatible = "cix,sky1-mbox" },
+ { },
+};
+MODULE_DEVICE_TABLE(of, cix_mbox_dt_ids);
+
+static struct platform_driver cix_mbox_driver = {
+ .probe = cix_mbox_probe,
+ .driver = {
+ .name = "cix_mbox",
+ .of_match_table = cix_mbox_dt_ids,
+ },
+};
+
+static int __init cix_mailbox_init(void)
+{
+ return platform_driver_register(&cix_mbox_driver);
+}
+arch_initcall(cix_mailbox_init);
+
+MODULE_AUTHOR("Cix Technology Group Co., Ltd.");
+MODULE_DESCRIPTION("CIX mailbox driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mailbox/cv1800-mailbox.c b/drivers/mailbox/cv1800-mailbox.c
new file mode 100644
index 000000000000..4761191acf78
--- /dev/null
+++ b/drivers/mailbox/cv1800-mailbox.c
@@ -0,0 +1,220 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2024 Sophgo Technology Inc.
+ * Copyright (C) 2024 Yuntao Dai <d1581209858@live.com>
+ * Copyright (C) 2025 Junhui Liu <junhui.liu@pigmoral.tech>
+ */
+
+#include <linux/bits.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kfifo.h>
+#include <linux/mailbox_client.h>
+#include <linux/mailbox_controller.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#define RECV_CPU 1
+
+#define MAILBOX_MAX_CHAN 8
+#define MAILBOX_MSG_LEN 8
+
+#define MBOX_EN_REG(cpu) (cpu << 2)
+#define MBOX_DONE_REG(cpu) ((cpu << 2) + 2)
+#define MBOX_SET_CLR_REG(cpu) (0x10 + (cpu << 4))
+#define MBOX_SET_INT_REG(cpu) (0x18 + (cpu << 4))
+#define MBOX_SET_REG 0x60
+
+#define MAILBOX_CONTEXT_OFFSET 0x0400
+#define MAILBOX_CONTEXT_SIZE 0x0040
+
+#define MBOX_CONTEXT_BASE_INDEX(base, index) \
+ ((u64 __iomem *)(base + MAILBOX_CONTEXT_OFFSET) + index)
+
+/**
+ * struct cv1800_mbox_chan_priv - cv1800 mailbox channel private data
+ * @idx: index of channel
+ * @cpu: send to which processor
+ */
+struct cv1800_mbox_chan_priv {
+ int idx;
+ int cpu;
+};
+
+struct cv1800_mbox {
+ struct mbox_controller mbox;
+ struct cv1800_mbox_chan_priv priv[MAILBOX_MAX_CHAN];
+ struct mbox_chan chans[MAILBOX_MAX_CHAN];
+ u64 __iomem *content[MAILBOX_MAX_CHAN];
+ void __iomem *mbox_base;
+ int recvid;
+};
+
+static irqreturn_t cv1800_mbox_isr(int irq, void *dev_id)
+{
+ struct cv1800_mbox *mbox = (struct cv1800_mbox *)dev_id;
+ size_t i;
+ u64 msg;
+ int ret = IRQ_NONE;
+
+ for (i = 0; i < MAILBOX_MAX_CHAN; i++) {
+ if (mbox->content[i] && mbox->chans[i].cl) {
+ memcpy_fromio(&msg, mbox->content[i], MAILBOX_MSG_LEN);
+ mbox->content[i] = NULL;
+ mbox_chan_received_data(&mbox->chans[i], (void *)&msg);
+ ret = IRQ_HANDLED;
+ }
+ }
+
+ return ret;
+}
+
+static irqreturn_t cv1800_mbox_irq(int irq, void *dev_id)
+{
+ struct cv1800_mbox *mbox = (struct cv1800_mbox *)dev_id;
+ u8 set, valid;
+ size_t i;
+ int ret = IRQ_NONE;
+
+ set = readb(mbox->mbox_base + MBOX_SET_INT_REG(RECV_CPU));
+
+ if (!set)
+ return ret;
+
+ for (i = 0; i < MAILBOX_MAX_CHAN; i++) {
+ valid = set & BIT(i);
+ if (valid) {
+ mbox->content[i] =
+ MBOX_CONTEXT_BASE_INDEX(mbox->mbox_base, i);
+ writeb(valid, mbox->mbox_base +
+ MBOX_SET_CLR_REG(RECV_CPU));
+ writeb(~valid, mbox->mbox_base + MBOX_EN_REG(RECV_CPU));
+ ret = IRQ_WAKE_THREAD;
+ }
+ }
+
+ return ret;
+}
+
+static int cv1800_mbox_send_data(struct mbox_chan *chan, void *data)
+{
+ struct cv1800_mbox_chan_priv *priv =
+ (struct cv1800_mbox_chan_priv *)chan->con_priv;
+ struct cv1800_mbox *mbox = dev_get_drvdata(chan->mbox->dev);
+ int idx = priv->idx;
+ int cpu = priv->cpu;
+ u8 en, valid;
+
+ memcpy_toio(MBOX_CONTEXT_BASE_INDEX(mbox->mbox_base, idx),
+ data, MAILBOX_MSG_LEN);
+
+ valid = BIT(idx);
+ writeb(valid, mbox->mbox_base + MBOX_SET_CLR_REG(cpu));
+ en = readb(mbox->mbox_base + MBOX_EN_REG(cpu));
+ writeb(en | valid, mbox->mbox_base + MBOX_EN_REG(cpu));
+ writeb(valid, mbox->mbox_base + MBOX_SET_REG);
+
+ return 0;
+}
+
+static bool cv1800_last_tx_done(struct mbox_chan *chan)
+{
+ struct cv1800_mbox_chan_priv *priv =
+ (struct cv1800_mbox_chan_priv *)chan->con_priv;
+ struct cv1800_mbox *mbox = dev_get_drvdata(chan->mbox->dev);
+ u8 en;
+
+ en = readb(mbox->mbox_base + MBOX_EN_REG(priv->cpu));
+
+ return !(en & BIT(priv->idx));
+}
+
+static const struct mbox_chan_ops cv1800_mbox_chan_ops = {
+ .send_data = cv1800_mbox_send_data,
+ .last_tx_done = cv1800_last_tx_done,
+};
+
+static struct mbox_chan *cv1800_mbox_xlate(struct mbox_controller *mbox,
+ const struct of_phandle_args *spec)
+{
+ struct cv1800_mbox_chan_priv *priv;
+
+ int idx = spec->args[0];
+ int cpu = spec->args[1];
+
+ if (idx >= mbox->num_chans)
+ return ERR_PTR(-EINVAL);
+
+ priv = mbox->chans[idx].con_priv;
+ priv->cpu = cpu;
+
+ return &mbox->chans[idx];
+}
+
+static const struct of_device_id cv1800_mbox_of_match[] = {
+ { .compatible = "sophgo,cv1800b-mailbox", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, cv1800_mbox_of_match);
+
+static int cv1800_mbox_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct cv1800_mbox *mb;
+ int irq, idx, err;
+
+ mb = devm_kzalloc(dev, sizeof(*mb), GFP_KERNEL);
+ if (!mb)
+ return -ENOMEM;
+
+ mb->mbox_base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(mb->mbox_base))
+ return dev_err_probe(dev, PTR_ERR(mb->mbox_base),
+ "Failed to map resource\n");
+
+ mb->mbox.dev = dev;
+ mb->mbox.chans = mb->chans;
+ mb->mbox.txdone_poll = true;
+ mb->mbox.ops = &cv1800_mbox_chan_ops;
+ mb->mbox.num_chans = MAILBOX_MAX_CHAN;
+ mb->mbox.of_xlate = cv1800_mbox_xlate;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ err = devm_request_threaded_irq(dev, irq, cv1800_mbox_irq,
+ cv1800_mbox_isr, IRQF_ONESHOT,
+ dev_name(&pdev->dev), mb);
+ if (err < 0)
+ return dev_err_probe(dev, err, "Failed to register irq\n");
+
+ for (idx = 0; idx < MAILBOX_MAX_CHAN; idx++) {
+ mb->priv[idx].idx = idx;
+ mb->mbox.chans[idx].con_priv = &mb->priv[idx];
+ }
+
+ platform_set_drvdata(pdev, mb);
+
+ err = devm_mbox_controller_register(dev, &mb->mbox);
+ if (err)
+ return dev_err_probe(dev, err, "Failed to register mailbox\n");
+
+ return 0;
+}
+
+static struct platform_driver cv1800_mbox_driver = {
+ .driver = {
+ .name = "cv1800-mbox",
+ .of_match_table = cv1800_mbox_of_match,
+ },
+ .probe = cv1800_mbox_probe,
+};
+
+module_platform_driver(cv1800_mbox_driver);
+
+MODULE_DESCRIPTION("cv1800 mailbox driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mailbox/exynos-mailbox.c b/drivers/mailbox/exynos-mailbox.c
new file mode 100644
index 000000000000..2320649bf60c
--- /dev/null
+++ b/drivers/mailbox/exynos-mailbox.c
@@ -0,0 +1,157 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2020 Samsung Electronics Co., Ltd.
+ * Copyright 2020 Google LLC.
+ * Copyright 2024 Linaro Ltd.
+ */
+
+#include <linux/bitops.h>
+#include <linux/bits.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/mailbox_controller.h>
+#include <linux/mailbox/exynos-message.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#define EXYNOS_MBOX_MCUCTRL 0x0 /* Mailbox Control Register */
+#define EXYNOS_MBOX_INTCR0 0x24 /* Interrupt Clear Register 0 */
+#define EXYNOS_MBOX_INTMR0 0x28 /* Interrupt Mask Register 0 */
+#define EXYNOS_MBOX_INTSR0 0x2c /* Interrupt Status Register 0 */
+#define EXYNOS_MBOX_INTMSR0 0x30 /* Interrupt Mask Status Register 0 */
+#define EXYNOS_MBOX_INTGR1 0x40 /* Interrupt Generation Register 1 */
+#define EXYNOS_MBOX_INTMR1 0x48 /* Interrupt Mask Register 1 */
+#define EXYNOS_MBOX_INTSR1 0x4c /* Interrupt Status Register 1 */
+#define EXYNOS_MBOX_INTMSR1 0x50 /* Interrupt Mask Status Register 1 */
+
+#define EXYNOS_MBOX_INTMR0_MASK GENMASK(15, 0)
+#define EXYNOS_MBOX_INTGR1_MASK GENMASK(15, 0)
+
+#define EXYNOS_MBOX_CHAN_COUNT HWEIGHT32(EXYNOS_MBOX_INTGR1_MASK)
+
+/**
+ * struct exynos_mbox - driver's private data.
+ * @regs: mailbox registers base address.
+ * @mbox: pointer to the mailbox controller.
+ * @pclk: pointer to the mailbox peripheral clock.
+ */
+struct exynos_mbox {
+ void __iomem *regs;
+ struct mbox_controller *mbox;
+ struct clk *pclk;
+};
+
+static int exynos_mbox_send_data(struct mbox_chan *chan, void *data)
+{
+ struct device *dev = chan->mbox->dev;
+ struct exynos_mbox *exynos_mbox = dev_get_drvdata(dev);
+ struct exynos_mbox_msg *msg = data;
+
+ if (msg->chan_id >= exynos_mbox->mbox->num_chans) {
+ dev_err(dev, "Invalid channel ID %d\n", msg->chan_id);
+ return -EINVAL;
+ }
+
+ if (msg->chan_type != EXYNOS_MBOX_CHAN_TYPE_DOORBELL) {
+ dev_err(dev, "Unsupported channel type [%d]\n", msg->chan_type);
+ return -EINVAL;
+ }
+
+ writel(BIT(msg->chan_id), exynos_mbox->regs + EXYNOS_MBOX_INTGR1);
+
+ return 0;
+}
+
+static const struct mbox_chan_ops exynos_mbox_chan_ops = {
+ .send_data = exynos_mbox_send_data,
+};
+
+static struct mbox_chan *exynos_mbox_of_xlate(struct mbox_controller *mbox,
+ const struct of_phandle_args *sp)
+{
+ int i;
+
+ if (sp->args_count != 0)
+ return ERR_PTR(-EINVAL);
+
+ /*
+ * Return the first available channel. When we don't pass the
+ * channel ID from device tree, each channel populated by the driver is
+ * just a software construct or a virtual channel. We use 'void *data'
+ * in send_data() to pass the channel identifiers.
+ */
+ for (i = 0; i < mbox->num_chans; i++)
+ if (mbox->chans[i].cl == NULL)
+ return &mbox->chans[i];
+ return ERR_PTR(-EINVAL);
+}
+
+static const struct of_device_id exynos_mbox_match[] = {
+ { .compatible = "google,gs101-mbox" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, exynos_mbox_match);
+
+static int exynos_mbox_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct exynos_mbox *exynos_mbox;
+ struct mbox_controller *mbox;
+ struct mbox_chan *chans;
+ int i;
+
+ exynos_mbox = devm_kzalloc(dev, sizeof(*exynos_mbox), GFP_KERNEL);
+ if (!exynos_mbox)
+ return -ENOMEM;
+
+ mbox = devm_kzalloc(dev, sizeof(*mbox), GFP_KERNEL);
+ if (!mbox)
+ return -ENOMEM;
+
+ chans = devm_kcalloc(dev, EXYNOS_MBOX_CHAN_COUNT, sizeof(*chans),
+ GFP_KERNEL);
+ if (!chans)
+ return -ENOMEM;
+
+ exynos_mbox->regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(exynos_mbox->regs))
+ return PTR_ERR(exynos_mbox->regs);
+
+ exynos_mbox->pclk = devm_clk_get_enabled(dev, "pclk");
+ if (IS_ERR(exynos_mbox->pclk))
+ return dev_err_probe(dev, PTR_ERR(exynos_mbox->pclk),
+ "Failed to enable clock.\n");
+
+ mbox->num_chans = EXYNOS_MBOX_CHAN_COUNT;
+ mbox->chans = chans;
+ mbox->dev = dev;
+ mbox->ops = &exynos_mbox_chan_ops;
+ mbox->of_xlate = exynos_mbox_of_xlate;
+
+ for (i = 0; i < EXYNOS_MBOX_CHAN_COUNT; i++)
+ chans[i].mbox = mbox;
+
+ exynos_mbox->mbox = mbox;
+
+ platform_set_drvdata(pdev, exynos_mbox);
+
+ /* Mask out all interrupts. We support just polling channels for now. */
+ writel(EXYNOS_MBOX_INTMR0_MASK, exynos_mbox->regs + EXYNOS_MBOX_INTMR0);
+
+ return devm_mbox_controller_register(dev, mbox);
+}
+
+static struct platform_driver exynos_mbox_driver = {
+ .probe = exynos_mbox_probe,
+ .driver = {
+ .name = "exynos-acpm-mbox",
+ .of_match_table = exynos_mbox_match,
+ },
+};
+module_platform_driver(exynos_mbox_driver);
+
+MODULE_AUTHOR("Tudor Ambarus <tudor.ambarus@linaro.org>");
+MODULE_DESCRIPTION("Samsung Exynos mailbox driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mailbox/hi3660-mailbox.c b/drivers/mailbox/hi3660-mailbox.c
index ab24e731a782..17c29e960fbf 100644
--- a/drivers/mailbox/hi3660-mailbox.c
+++ b/drivers/mailbox/hi3660-mailbox.c
@@ -11,6 +11,7 @@
#include <linux/iopoll.h>
#include <linux/mailbox_controller.h>
#include <linux/module.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
diff --git a/drivers/mailbox/hi6220-mailbox.c b/drivers/mailbox/hi6220-mailbox.c
index fca61f5312d9..f77741ce42e7 100644
--- a/drivers/mailbox/hi6220-mailbox.c
+++ b/drivers/mailbox/hi6220-mailbox.c
@@ -15,6 +15,7 @@
#include <linux/kfifo.h>
#include <linux/mailbox_controller.h>
#include <linux/module.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
@@ -325,10 +326,7 @@ static int hi6220_mbox_probe(struct platform_device *pdev)
writel(~0x0, ACK_INT_CLR_REG(mbox->ipc));
/* use interrupt for tx's ack */
- if (of_find_property(node, "hi6220,mbox-tx-noirq", NULL))
- mbox->tx_irq_mode = false;
- else
- mbox->tx_irq_mode = true;
+ mbox->tx_irq_mode = !of_property_read_bool(node, "hi6220,mbox-tx-noirq");
if (mbox->tx_irq_mode)
mbox->controller.txdone_irq = true;
diff --git a/drivers/mailbox/imx-mailbox.c b/drivers/mailbox/imx-mailbox.c
index 20f2ec880ad6..6778afc64a04 100644
--- a/drivers/mailbox/imx-mailbox.c
+++ b/drivers/mailbox/imx-mailbox.c
@@ -4,6 +4,7 @@
* Copyright 2022 NXP, Peng Fan <peng.fan@nxp.com>
*/
+#include <linux/bitfield.h>
#include <linux/clk.h>
#include <linux/firmware/imx/ipc.h>
#include <linux/firmware/imx/s4.h>
@@ -14,19 +15,26 @@
#include <linux/kernel.h>
#include <linux/mailbox_controller.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/suspend.h>
#include <linux/slab.h>
+#include <linux/workqueue.h>
-#define IMX_MU_CHANS 17
+#include "mailbox.h"
+
+#define IMX_MU_CHANS 24
/* TX0/RX0/RXDB[0-3] */
#define IMX_MU_SCU_CHANS 6
/* TX0/RX0 */
#define IMX_MU_S4_CHANS 2
-#define IMX_MU_CHAN_NAME_SIZE 20
+#define IMX_MU_CHAN_NAME_SIZE 32
-#define IMX_MU_NUM_RR 4
+#define IMX_MU_V2_PAR_OFF 0x4
+#define IMX_MU_V2_TR_MASK GENMASK(7, 0)
+#define IMX_MU_V2_RR_MASK GENMASK(15, 8)
#define IMX_MU_SECO_TX_TOUT (msecs_to_jiffies(3000))
#define IMX_MU_SECO_RX_TOUT (msecs_to_jiffies(3000))
@@ -38,6 +46,7 @@ enum imx_mu_chan_type {
IMX_MU_TYPE_TXDB = 2, /* Tx doorbell */
IMX_MU_TYPE_RXDB = 3, /* Rx doorbell */
IMX_MU_TYPE_RST = 4, /* Reset */
+ IMX_MU_TYPE_TXDB_V2 = 5, /* Tx doorbell with S/W ACK */
};
enum imx_mu_xcr {
@@ -72,7 +81,7 @@ struct imx_mu_con_priv {
char irq_desc[IMX_MU_CHAN_NAME_SIZE];
enum imx_mu_chan_type type;
struct mbox_chan *chan;
- struct tasklet_struct txdb_tasklet;
+ struct work_struct txdb_work;
};
struct imx_mu_priv {
@@ -89,10 +98,11 @@ struct imx_mu_priv {
struct clk *clk;
int irq[IMX_MU_CHANS];
bool suspend;
-
- u32 xcr[IMX_MU_xCR_MAX];
-
bool side_b;
+
+ u32 xcr[IMX_MU_xCR_MAX];
+ u32 num_tr;
+ u32 num_rr;
};
enum imx_mu_type {
@@ -106,7 +116,7 @@ struct imx_mu_dcfg {
int (*tx)(struct imx_mu_priv *priv, struct imx_mu_con_priv *cp, void *data);
int (*rx)(struct imx_mu_priv *priv, struct imx_mu_con_priv *cp);
int (*rxdb)(struct imx_mu_priv *priv, struct imx_mu_con_priv *cp);
- void (*init)(struct imx_mu_priv *priv);
+ int (*init)(struct imx_mu_priv *priv);
enum imx_mu_type type;
u32 xTR; /* Transmit Register0 */
u32 xRR; /* Receive Register0 */
@@ -215,6 +225,8 @@ static int imx_mu_generic_tx(struct imx_mu_priv *priv,
void *data)
{
u32 *arg = data;
+ u32 val;
+ int ret, count;
switch (cp->type) {
case IMX_MU_TYPE_TX:
@@ -223,7 +235,25 @@ static int imx_mu_generic_tx(struct imx_mu_priv *priv,
break;
case IMX_MU_TYPE_TXDB:
imx_mu_xcr_rmw(priv, IMX_MU_GCR, IMX_MU_xCR_GIRn(priv->dcfg->type, cp->idx), 0);
- tasklet_schedule(&cp->txdb_tasklet);
+ queue_work(system_bh_wq, &cp->txdb_work);
+ break;
+ case IMX_MU_TYPE_TXDB_V2:
+ imx_mu_write(priv, IMX_MU_xCR_GIRn(priv->dcfg->type, cp->idx),
+ priv->dcfg->xCR[IMX_MU_GCR]);
+ ret = -ETIMEDOUT;
+ count = 0;
+ while (ret && (count < 10)) {
+ ret =
+ readl_poll_timeout(priv->base + priv->dcfg->xCR[IMX_MU_GCR], val,
+ !(val & IMX_MU_xCR_GIRn(priv->dcfg->type, cp->idx)),
+ 0, 10000);
+
+ if (ret) {
+ dev_warn_ratelimited(priv->dev,
+ "channel type: %d timeout, %d times, retry\n",
+ cp->type, ++count);
+ }
+ }
break;
default:
dev_warn_ratelimited(priv->dev, "Send data on wrong channel type: %d\n", cp->type);
@@ -257,18 +287,17 @@ static int imx_mu_generic_rxdb(struct imx_mu_priv *priv,
static int imx_mu_specific_tx(struct imx_mu_priv *priv, struct imx_mu_con_priv *cp, void *data)
{
u32 *arg = data;
+ u32 num_tr = priv->num_tr;
int i, ret;
u32 xsr;
- u32 size, max_size, num_tr;
+ u32 size, max_size;
if (priv->dcfg->type & IMX_MU_V2_S4) {
size = ((struct imx_s4_rpc_msg_max *)data)->hdr.size;
max_size = sizeof(struct imx_s4_rpc_msg_max);
- num_tr = 8;
} else {
size = ((struct imx_sc_rpc_msg_max *)data)->hdr.size;
max_size = sizeof(struct imx_sc_rpc_msg_max);
- num_tr = 4;
}
switch (cp->type) {
@@ -317,6 +346,7 @@ static int imx_mu_specific_rx(struct imx_mu_priv *priv, struct imx_mu_con_priv *
int i, ret;
u32 xsr;
u32 size, max_size;
+ u32 num_rr = priv->num_rr;
data = (u32 *)priv->msg;
@@ -338,13 +368,13 @@ static int imx_mu_specific_rx(struct imx_mu_priv *priv, struct imx_mu_con_priv *
for (i = 1; i < size; i++) {
ret = readl_poll_timeout(priv->base + priv->dcfg->xSR[IMX_MU_RSR], xsr,
- xsr & IMX_MU_xSR_RFn(priv->dcfg->type, i % 4), 0,
+ xsr & IMX_MU_xSR_RFn(priv->dcfg->type, i % num_rr), 0,
5 * USEC_PER_SEC);
if (ret) {
dev_err(priv->dev, "timeout read idx %d\n", i);
return ret;
}
- *data++ = imx_mu_read(priv, priv->dcfg->xRR + (i % 4) * 4);
+ *data++ = imx_mu_read(priv, priv->dcfg->xRR + (i % num_rr) * 4);
}
imx_mu_xcr_rmw(priv, IMX_MU_RCR, IMX_MU_xCR_RIEn(priv->dcfg->type, 0), 0);
@@ -408,7 +438,7 @@ static int imx_mu_seco_tx(struct imx_mu_priv *priv, struct imx_mu_con_priv *cp,
}
/* Simulate hack for mbox framework */
- tasklet_schedule(&cp->txdb_tasklet);
+ queue_work(system_bh_wq, &cp->txdb_work);
break;
default:
@@ -472,9 +502,9 @@ exit:
return err;
}
-static void imx_mu_txdb_tasklet(unsigned long data)
+static void imx_mu_txdb_work(struct work_struct *t)
{
- struct imx_mu_con_priv *cp = (struct imx_mu_con_priv *)data;
+ struct imx_mu_con_priv *cp = from_work(cp, t, txdb_work);
mbox_chan_txdone(cp->chan, 0);
}
@@ -553,10 +583,12 @@ static int imx_mu_startup(struct mbox_chan *chan)
int ret;
pm_runtime_get_sync(priv->dev);
+ if (cp->type == IMX_MU_TYPE_TXDB_V2)
+ return 0;
+
if (cp->type == IMX_MU_TYPE_TXDB) {
/* Tx doorbell don't have ACK support */
- tasklet_init(&cp->txdb_tasklet, imx_mu_txdb_tasklet,
- (unsigned long)cp);
+ INIT_WORK(&cp->txdb_work, imx_mu_txdb_work);
return 0;
}
@@ -594,8 +626,13 @@ static void imx_mu_shutdown(struct mbox_chan *chan)
int ret;
u32 sr;
+ if (cp->type == IMX_MU_TYPE_TXDB_V2) {
+ pm_runtime_put_sync(priv->dev);
+ return;
+ }
+
if (cp->type == IMX_MU_TYPE_TXDB) {
- tasklet_kill(&cp->txdb_tasklet);
+ cancel_work_sync(&cp->txdb_work);
pm_runtime_put_sync(priv->dev);
return;
}
@@ -670,6 +707,7 @@ static struct mbox_chan *imx_mu_specific_xlate(struct mbox_controller *mbox,
static struct mbox_chan * imx_mu_xlate(struct mbox_controller *mbox,
const struct of_phandle_args *sp)
{
+ struct mbox_chan *p_chan;
u32 type, idx, chan;
if (sp->args_count != 2) {
@@ -679,14 +717,25 @@ static struct mbox_chan * imx_mu_xlate(struct mbox_controller *mbox,
type = sp->args[0]; /* channel type */
idx = sp->args[1]; /* index */
- chan = type * 4 + idx;
+ /* RST only supports 1 channel */
+ if ((type == IMX_MU_TYPE_RST) && idx) {
+ dev_err(mbox->dev, "Invalid RST channel %d\n", idx);
+ return ERR_PTR(-EINVAL);
+ }
+
+ chan = type * 4 + idx;
if (chan >= mbox->num_chans) {
dev_err(mbox->dev, "Not supported channel number: %d. (type: %d, idx: %d)\n", chan, type, idx);
return ERR_PTR(-EINVAL);
}
- return &mbox->chans[chan];
+ p_chan = &mbox->chans[chan];
+
+ if (type == IMX_MU_TYPE_TXDB_V2)
+ p_chan->txdone_method = TXDONE_BY_ACK;
+
+ return p_chan;
}
static struct mbox_chan *imx_mu_seco_xlate(struct mbox_controller *mbox,
@@ -710,11 +759,30 @@ static struct mbox_chan *imx_mu_seco_xlate(struct mbox_controller *mbox,
return imx_mu_xlate(mbox, sp);
}
-static void imx_mu_init_generic(struct imx_mu_priv *priv)
+static void imx_mu_get_tr_rr(struct imx_mu_priv *priv)
+{
+ u32 val;
+
+ if (priv->dcfg->type & IMX_MU_V2) {
+ val = imx_mu_read(priv, IMX_MU_V2_PAR_OFF);
+ priv->num_tr = FIELD_GET(IMX_MU_V2_TR_MASK, val);
+ priv->num_rr = FIELD_GET(IMX_MU_V2_RR_MASK, val);
+ } else {
+ priv->num_tr = 4;
+ priv->num_rr = 4;
+ }
+}
+
+static int imx_mu_init_generic(struct imx_mu_priv *priv)
{
unsigned int i;
unsigned int val;
+ if (priv->num_rr > 4 || priv->num_tr > 4) {
+ WARN_ONCE(true, "%s not support TR/RR larger than 4\n", __func__);
+ return -EOPNOTSUPP;
+ }
+
for (i = 0; i < IMX_MU_CHANS; i++) {
struct imx_mu_con_priv *cp = &priv->con_priv[i];
@@ -723,14 +791,14 @@ static void imx_mu_init_generic(struct imx_mu_priv *priv)
cp->chan = &priv->mbox_chans[i];
priv->mbox_chans[i].con_priv = cp;
snprintf(cp->irq_desc, sizeof(cp->irq_desc),
- "imx_mu_chan[%i-%i]", cp->type, cp->idx);
+ "%s[%i-%u]", dev_name(priv->dev), cp->type, cp->idx);
}
priv->mbox.num_chans = IMX_MU_CHANS;
priv->mbox.of_xlate = imx_mu_xlate;
if (priv->side_b)
- return;
+ return 0;
/* Set default MU configuration */
for (i = 0; i < IMX_MU_xCR_MAX; i++)
@@ -741,11 +809,13 @@ static void imx_mu_init_generic(struct imx_mu_priv *priv)
imx_mu_write(priv, val, priv->dcfg->xSR[IMX_MU_GSR]);
/* Clear any pending RSR */
- for (i = 0; i < IMX_MU_NUM_RR; i++)
- imx_mu_read(priv, priv->dcfg->xRR + (i % 4) * 4);
+ for (i = 0; i < priv->num_rr; i++)
+ imx_mu_read(priv, priv->dcfg->xRR + i * 4);
+
+ return 0;
}
-static void imx_mu_init_specific(struct imx_mu_priv *priv)
+static int imx_mu_init_specific(struct imx_mu_priv *priv)
{
unsigned int i;
int num_chans = priv->dcfg->type & IMX_MU_V2_S4 ? IMX_MU_S4_CHANS : IMX_MU_SCU_CHANS;
@@ -758,7 +828,7 @@ static void imx_mu_init_specific(struct imx_mu_priv *priv)
cp->chan = &priv->mbox_chans[i];
priv->mbox_chans[i].con_priv = cp;
snprintf(cp->irq_desc, sizeof(cp->irq_desc),
- "imx_mu_chan[%i-%i]", cp->type, cp->idx);
+ "%s[%i-%u]", dev_name(priv->dev), cp->type, cp->idx);
}
priv->mbox.num_chans = num_chans;
@@ -767,12 +837,20 @@ static void imx_mu_init_specific(struct imx_mu_priv *priv)
/* Set default MU configuration */
for (i = 0; i < IMX_MU_xCR_MAX; i++)
imx_mu_write(priv, 0, priv->dcfg->xCR[i]);
+
+ return 0;
}
-static void imx_mu_init_seco(struct imx_mu_priv *priv)
+static int imx_mu_init_seco(struct imx_mu_priv *priv)
{
- imx_mu_init_generic(priv);
+ int ret;
+
+ ret = imx_mu_init_generic(priv);
+ if (ret)
+ return ret;
priv->mbox.of_xlate = imx_mu_seco_xlate;
+
+ return 0;
}
static int imx_mu_probe(struct platform_device *pdev)
@@ -837,9 +915,15 @@ static int imx_mu_probe(struct platform_device *pdev)
return ret;
}
+ imx_mu_get_tr_rr(priv);
+
priv->side_b = of_property_read_bool(np, "fsl,mu-side-b");
- priv->dcfg->init(priv);
+ ret = priv->dcfg->init(priv);
+ if (ret) {
+ dev_err(dev, "Failed to init MU\n");
+ goto disable_clk;
+ }
spin_lock_init(&priv->xcr_lock);
@@ -851,10 +935,10 @@ static int imx_mu_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, priv);
ret = devm_mbox_controller_register(dev, &priv->mbox);
- if (ret) {
- clk_disable_unprepare(priv->clk);
- return ret;
- }
+ if (ret)
+ goto disable_clk;
+
+ of_platform_populate(dev->of_node, NULL, NULL, dev);
pm_runtime_enable(dev);
@@ -872,17 +956,16 @@ static int imx_mu_probe(struct platform_device *pdev)
disable_runtime_pm:
pm_runtime_disable(dev);
+disable_clk:
clk_disable_unprepare(priv->clk);
return ret;
}
-static int imx_mu_remove(struct platform_device *pdev)
+static void imx_mu_remove(struct platform_device *pdev)
{
struct imx_mu_priv *priv = platform_get_drvdata(pdev);
pm_runtime_disable(priv->dev);
-
- return 0;
}
static const struct imx_mu_dcfg imx_mu_cfg_imx6sx = {
@@ -969,6 +1052,9 @@ static const struct of_device_id imx_mu_dt_ids[] = {
{ .compatible = "fsl,imx8ulp-mu", .data = &imx_mu_cfg_imx8ulp },
{ .compatible = "fsl,imx8ulp-mu-s4", .data = &imx_mu_cfg_imx8ulp_s4 },
{ .compatible = "fsl,imx93-mu-s4", .data = &imx_mu_cfg_imx93_s4 },
+ { .compatible = "fsl,imx95-mu", .data = &imx_mu_cfg_imx8ulp },
+ { .compatible = "fsl,imx95-mu-ele", .data = &imx_mu_cfg_imx8ulp_s4 },
+ { .compatible = "fsl,imx95-mu-v2x", .data = &imx_mu_cfg_imx8ulp_s4 },
{ .compatible = "fsl,imx8-mu-scu", .data = &imx_mu_cfg_imx8_scu },
{ .compatible = "fsl,imx8-mu-seco", .data = &imx_mu_cfg_imx8_seco },
{ },
diff --git a/drivers/mailbox/mailbox-altera.c b/drivers/mailbox/mailbox-altera.c
index afb320e9d69c..17278c2571d3 100644
--- a/drivers/mailbox/mailbox-altera.c
+++ b/drivers/mailbox/mailbox-altera.c
@@ -130,7 +130,7 @@ static void altera_mbox_rx_data(struct mbox_chan *chan)
static void altera_mbox_poll_rx(struct timer_list *t)
{
- struct altera_mbox *mbox = from_timer(mbox, t, rxpoll_timer);
+ struct altera_mbox *mbox = timer_container_of(mbox, t, rxpoll_timer);
altera_mbox_rx_data(mbox->chan);
@@ -270,7 +270,7 @@ static void altera_mbox_shutdown(struct mbox_chan *chan)
writel_relaxed(~0, mbox->mbox_base + MAILBOX_INTMASK_REG);
free_irq(mbox->irq, chan);
} else if (!mbox->is_sender) {
- del_timer_sync(&mbox->rxpoll_timer);
+ timer_delete_sync(&mbox->rxpoll_timer);
}
}
diff --git a/drivers/mailbox/mailbox-mchp-ipc-sbi.c b/drivers/mailbox/mailbox-mchp-ipc-sbi.c
new file mode 100644
index 000000000000..a6e52009a424
--- /dev/null
+++ b/drivers/mailbox/mailbox-mchp-ipc-sbi.c
@@ -0,0 +1,504 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Microchip Inter-Processor communication (IPC) driver
+ *
+ * Copyright (c) 2021 - 2024 Microchip Technology Inc. All rights reserved.
+ *
+ * Author: Valentina Fernandez <valentina.fernandezalanis@microchip.com>
+ *
+ */
+
+#include <linux/io.h>
+#include <linux/err.h>
+#include <linux/smp.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/of_device.h>
+#include <linux/interrupt.h>
+#include <linux/dma-mapping.h>
+#include <linux/platform_device.h>
+#include <linux/mailbox/mchp-ipc.h>
+#include <asm/sbi.h>
+#include <asm/vendorid_list.h>
+
+#define IRQ_STATUS_BITS 12
+#define NUM_CHANS_PER_CLUSTER 5
+#define IPC_DMA_BIT_MASK 32
+#define SBI_EXT_MICROCHIP_TECHNOLOGY (SBI_EXT_VENDOR_START | \
+ MICROCHIP_VENDOR_ID)
+
+enum {
+ SBI_EXT_IPC_PROBE = 0x100,
+ SBI_EXT_IPC_CH_INIT,
+ SBI_EXT_IPC_SEND,
+ SBI_EXT_IPC_RECEIVE,
+ SBI_EXT_IPC_STATUS,
+};
+
+enum ipc_hw {
+ MIV_IHC,
+};
+
+/**
+ * struct mchp_ipc_mbox_info - IPC probe message format
+ *
+ * @hw_type: IPC implementation available in the hardware
+ * @num_channels: number of IPC channels available in the hardware
+ *
+ * Used to retrieve information on the IPC implementation
+ * using the SBI_EXT_IPC_PROBE SBI function id.
+ */
+struct mchp_ipc_mbox_info {
+ enum ipc_hw hw_type;
+ u8 num_channels;
+};
+
+/**
+ * struct mchp_ipc_init - IPC channel init message format
+ *
+ * @max_msg_size: maxmimum message size in bytes of a given channel
+ *
+ * struct used by the SBI_EXT_IPC_CH_INIT SBI function id to get
+ * the max message size in bytes of the initialized channel.
+ */
+struct mchp_ipc_init {
+ u16 max_msg_size;
+};
+
+/**
+ * struct mchp_ipc_status - IPC status message format
+ *
+ * @status: interrupt status for all channels associated to a cluster
+ * @cluster: specifies the cluster instance that originated an irq
+ *
+ * struct used by the SBI_EXT_IPC_STATUS SBI function id to get
+ * the message present and message clear interrupt status for all the
+ * channels associated to a cluster.
+ */
+struct mchp_ipc_status {
+ u32 status;
+ u8 cluster;
+};
+
+/**
+ * struct mchp_ipc_sbi_msg - IPC SBI payload message
+ *
+ * @buf_addr: physical address where the received data should be copied to
+ * @size: maximum size(in bytes) that can be stored in the buffer pointed to by `buf`
+ * @irq_type: mask representing the irq types that triggered an irq
+ *
+ * struct used by the SBI_EXT_IPC_SEND/SBI_EXT_IPC_RECEIVE SBI function
+ * ids to send/receive a message from an associated processor using
+ * the IPC.
+ */
+struct mchp_ipc_sbi_msg {
+ u64 buf_addr;
+ u16 size;
+ u8 irq_type;
+};
+
+struct mchp_ipc_cluster_cfg {
+ void *buf_base;
+ phys_addr_t buf_base_addr;
+ int irq;
+};
+
+struct mchp_ipc_sbi_mbox {
+ struct device *dev;
+ struct mbox_chan *chans;
+ struct mchp_ipc_cluster_cfg *cluster_cfg;
+ void *buf_base;
+ unsigned long buf_base_addr;
+ struct mbox_controller controller;
+ enum ipc_hw hw_type;
+};
+
+static int mchp_ipc_sbi_chan_send(u32 command, u32 channel, unsigned long address)
+{
+ struct sbiret ret;
+
+ ret = sbi_ecall(SBI_EXT_MICROCHIP_TECHNOLOGY, command, channel,
+ address, 0, 0, 0, 0);
+
+ if (ret.error)
+ return sbi_err_map_linux_errno(ret.error);
+ else
+ return ret.value;
+}
+
+static int mchp_ipc_sbi_send(u32 command, unsigned long address)
+{
+ struct sbiret ret;
+
+ ret = sbi_ecall(SBI_EXT_MICROCHIP_TECHNOLOGY, command, address,
+ 0, 0, 0, 0, 0);
+
+ if (ret.error)
+ return sbi_err_map_linux_errno(ret.error);
+ else
+ return ret.value;
+}
+
+static struct mchp_ipc_sbi_mbox *to_mchp_ipc_mbox(struct mbox_controller *mbox)
+{
+ return container_of(mbox, struct mchp_ipc_sbi_mbox, controller);
+}
+
+static inline void mchp_ipc_prepare_receive_req(struct mbox_chan *chan)
+{
+ struct mchp_ipc_sbi_chan *chan_info = (struct mchp_ipc_sbi_chan *)chan->con_priv;
+ struct mchp_ipc_sbi_msg request;
+
+ request.buf_addr = chan_info->msg_buf_rx_addr;
+ request.size = chan_info->max_msg_size;
+ memcpy(chan_info->buf_base_rx, &request, sizeof(struct mchp_ipc_sbi_msg));
+}
+
+static inline void mchp_ipc_process_received_data(struct mbox_chan *chan,
+ struct mchp_ipc_msg *ipc_msg)
+{
+ struct mchp_ipc_sbi_chan *chan_info = (struct mchp_ipc_sbi_chan *)chan->con_priv;
+ struct mchp_ipc_sbi_msg sbi_msg;
+
+ memcpy(&sbi_msg, chan_info->buf_base_rx, sizeof(struct mchp_ipc_sbi_msg));
+ ipc_msg->buf = (u32 *)chan_info->msg_buf_rx;
+ ipc_msg->size = sbi_msg.size;
+}
+
+static irqreturn_t mchp_ipc_cluster_aggr_isr(int irq, void *data)
+{
+ struct mbox_chan *chan;
+ struct mchp_ipc_sbi_chan *chan_info;
+ struct mchp_ipc_sbi_mbox *ipc = (struct mchp_ipc_sbi_mbox *)data;
+ struct mchp_ipc_msg ipc_msg;
+ struct mchp_ipc_status status_msg;
+ int ret;
+ unsigned long hartid;
+ u32 i, chan_index, chan_id;
+
+ /* Find out the hart that originated the irq */
+ for_each_online_cpu(i) {
+ hartid = cpuid_to_hartid_map(i);
+ if (irq == ipc->cluster_cfg[hartid].irq)
+ break;
+ }
+
+ status_msg.cluster = hartid;
+ memcpy(ipc->cluster_cfg[hartid].buf_base, &status_msg, sizeof(struct mchp_ipc_status));
+
+ ret = mchp_ipc_sbi_send(SBI_EXT_IPC_STATUS, ipc->cluster_cfg[hartid].buf_base_addr);
+ if (ret < 0) {
+ dev_err_ratelimited(ipc->dev, "could not get IHC irq status ret=%d\n", ret);
+ return IRQ_HANDLED;
+ }
+
+ memcpy(&status_msg, ipc->cluster_cfg[hartid].buf_base, sizeof(struct mchp_ipc_status));
+
+ /*
+ * Iterate over each bit set in the IHC interrupt status register (IRQ_STATUS) to identify
+ * the channel(s) that have a message to be processed/acknowledged.
+ * The bits are organized in alternating format, where each pair of bits represents
+ * the status of the message present and message clear interrupts for each cluster/hart
+ * (from hart 0 to hart 5). Each cluster can have up to 5 fixed channels associated.
+ */
+
+ for_each_set_bit(i, (unsigned long *)&status_msg.status, IRQ_STATUS_BITS) {
+ /* Find out the destination hart that triggered the interrupt */
+ chan_index = i / 2;
+
+ /*
+ * The IP has no loopback channels, so we need to decrement the index when
+ * the target hart has a greater index than our own
+ */
+ if (chan_index >= status_msg.cluster)
+ chan_index--;
+
+ /*
+ * Calculate the channel id given the hart and channel index. Channel IDs
+ * are unique across all clusters of an IPC, and iterate contiguously
+ * across all clusters.
+ */
+ chan_id = status_msg.cluster * (NUM_CHANS_PER_CLUSTER + chan_index);
+
+ chan = &ipc->chans[chan_id];
+ chan_info = (struct mchp_ipc_sbi_chan *)chan->con_priv;
+
+ if (i % 2 == 0) {
+ mchp_ipc_prepare_receive_req(chan);
+ ret = mchp_ipc_sbi_chan_send(SBI_EXT_IPC_RECEIVE, chan_id,
+ chan_info->buf_base_rx_addr);
+ if (ret < 0)
+ continue;
+
+ mchp_ipc_process_received_data(chan, &ipc_msg);
+ mbox_chan_received_data(&ipc->chans[chan_id], (void *)&ipc_msg);
+
+ } else {
+ ret = mchp_ipc_sbi_chan_send(SBI_EXT_IPC_RECEIVE, chan_id,
+ chan_info->buf_base_rx_addr);
+ mbox_chan_txdone(&ipc->chans[chan_id], ret);
+ }
+ }
+ return IRQ_HANDLED;
+}
+
+static int mchp_ipc_send_data(struct mbox_chan *chan, void *data)
+{
+ struct mchp_ipc_sbi_chan *chan_info = (struct mchp_ipc_sbi_chan *)chan->con_priv;
+ const struct mchp_ipc_msg *msg = data;
+ struct mchp_ipc_sbi_msg sbi_payload;
+
+ memcpy(chan_info->msg_buf_tx, msg->buf, msg->size);
+ sbi_payload.buf_addr = chan_info->msg_buf_tx_addr;
+ sbi_payload.size = msg->size;
+ memcpy(chan_info->buf_base_tx, &sbi_payload, sizeof(sbi_payload));
+
+ return mchp_ipc_sbi_chan_send(SBI_EXT_IPC_SEND, chan_info->id, chan_info->buf_base_tx_addr);
+}
+
+static int mchp_ipc_startup(struct mbox_chan *chan)
+{
+ struct mchp_ipc_sbi_chan *chan_info = (struct mchp_ipc_sbi_chan *)chan->con_priv;
+ struct mchp_ipc_sbi_mbox *ipc = to_mchp_ipc_mbox(chan->mbox);
+ struct mchp_ipc_init ch_init_msg;
+ int ret;
+
+ /*
+ * The TX base buffer is used to transmit two types of messages:
+ * - struct mchp_ipc_init to initialize the channel
+ * - struct mchp_ipc_sbi_msg to transmit user data/payload
+ * Ensure the TX buffer size is large enough to accommodate either message type.
+ */
+ size_t max_size = max(sizeof(struct mchp_ipc_init), sizeof(struct mchp_ipc_sbi_msg));
+
+ chan_info->buf_base_tx = kmalloc(max_size, GFP_KERNEL);
+ if (!chan_info->buf_base_tx) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ chan_info->buf_base_tx_addr = __pa(chan_info->buf_base_tx);
+
+ chan_info->buf_base_rx = kmalloc(max_size, GFP_KERNEL);
+ if (!chan_info->buf_base_rx) {
+ ret = -ENOMEM;
+ goto fail_free_buf_base_tx;
+ }
+
+ chan_info->buf_base_rx_addr = __pa(chan_info->buf_base_rx);
+
+ ret = mchp_ipc_sbi_chan_send(SBI_EXT_IPC_CH_INIT, chan_info->id,
+ chan_info->buf_base_tx_addr);
+ if (ret < 0) {
+ dev_err(ipc->dev, "channel %u init failed\n", chan_info->id);
+ goto fail_free_buf_base_rx;
+ }
+
+ memcpy(&ch_init_msg, chan_info->buf_base_tx, sizeof(struct mchp_ipc_init));
+ chan_info->max_msg_size = ch_init_msg.max_msg_size;
+
+ chan_info->msg_buf_tx = kmalloc(chan_info->max_msg_size, GFP_KERNEL);
+ if (!chan_info->msg_buf_tx) {
+ ret = -ENOMEM;
+ goto fail_free_buf_base_rx;
+ }
+
+ chan_info->msg_buf_tx_addr = __pa(chan_info->msg_buf_tx);
+
+ chan_info->msg_buf_rx = kmalloc(chan_info->max_msg_size, GFP_KERNEL);
+ if (!chan_info->msg_buf_rx) {
+ ret = -ENOMEM;
+ goto fail_free_buf_msg_tx;
+ }
+
+ chan_info->msg_buf_rx_addr = __pa(chan_info->msg_buf_rx);
+
+ switch (ipc->hw_type) {
+ case MIV_IHC:
+ return 0;
+ default:
+ goto fail_free_buf_msg_rx;
+ }
+
+ if (ret) {
+ dev_err(ipc->dev, "failed to register interrupt(s)\n");
+ goto fail_free_buf_msg_rx;
+ }
+
+ return ret;
+
+fail_free_buf_msg_rx:
+ kfree(chan_info->msg_buf_rx);
+fail_free_buf_msg_tx:
+ kfree(chan_info->msg_buf_tx);
+fail_free_buf_base_rx:
+ kfree(chan_info->buf_base_rx);
+fail_free_buf_base_tx:
+ kfree(chan_info->buf_base_tx);
+fail:
+ return ret;
+}
+
+static void mchp_ipc_shutdown(struct mbox_chan *chan)
+{
+ struct mchp_ipc_sbi_chan *chan_info = (struct mchp_ipc_sbi_chan *)chan->con_priv;
+
+ kfree(chan_info->buf_base_tx);
+ kfree(chan_info->buf_base_rx);
+ kfree(chan_info->msg_buf_tx);
+ kfree(chan_info->msg_buf_rx);
+}
+
+static const struct mbox_chan_ops mchp_ipc_ops = {
+ .startup = mchp_ipc_startup,
+ .send_data = mchp_ipc_send_data,
+ .shutdown = mchp_ipc_shutdown,
+};
+
+static struct mbox_chan *mchp_ipc_mbox_xlate(struct mbox_controller *controller,
+ const struct of_phandle_args *spec)
+{
+ struct mchp_ipc_sbi_mbox *ipc = to_mchp_ipc_mbox(controller);
+ unsigned int chan_id = spec->args[0];
+
+ if (chan_id >= ipc->controller.num_chans) {
+ dev_err(ipc->dev, "invalid channel id %d\n", chan_id);
+ return ERR_PTR(-EINVAL);
+ }
+
+ return &ipc->chans[chan_id];
+}
+
+static int mchp_ipc_get_cluster_aggr_irq(struct mchp_ipc_sbi_mbox *ipc)
+{
+ struct platform_device *pdev = to_platform_device(ipc->dev);
+ char *irq_name;
+ int cpuid, ret;
+ unsigned long hartid;
+ bool irq_found = false;
+
+ for_each_online_cpu(cpuid) {
+ hartid = cpuid_to_hartid_map(cpuid);
+ irq_name = devm_kasprintf(ipc->dev, GFP_KERNEL, "hart-%lu", hartid);
+ ret = platform_get_irq_byname_optional(pdev, irq_name);
+ if (ret <= 0)
+ continue;
+
+ ipc->cluster_cfg[hartid].irq = ret;
+ ret = devm_request_irq(ipc->dev, ipc->cluster_cfg[hartid].irq,
+ mchp_ipc_cluster_aggr_isr, IRQF_SHARED,
+ "miv-ihc-irq", ipc);
+ if (ret)
+ return ret;
+
+ ipc->cluster_cfg[hartid].buf_base = devm_kmalloc(ipc->dev,
+ sizeof(struct mchp_ipc_status),
+ GFP_KERNEL);
+
+ if (!ipc->cluster_cfg[hartid].buf_base)
+ return -ENOMEM;
+
+ ipc->cluster_cfg[hartid].buf_base_addr = __pa(ipc->cluster_cfg[hartid].buf_base);
+
+ irq_found = true;
+ }
+
+ return irq_found;
+}
+
+static int mchp_ipc_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct mchp_ipc_mbox_info ipc_info;
+ struct mchp_ipc_sbi_mbox *ipc;
+ struct mchp_ipc_sbi_chan *priv;
+ bool irq_avail = false;
+ int ret;
+ u32 chan_id;
+
+ ret = sbi_probe_extension(SBI_EXT_MICROCHIP_TECHNOLOGY);
+ if (ret <= 0)
+ return dev_err_probe(dev, ret, "Microchip SBI extension not detected\n");
+
+ ipc = devm_kzalloc(dev, sizeof(*ipc), GFP_KERNEL);
+ if (!ipc)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, ipc);
+
+ ipc->buf_base = devm_kmalloc(dev, sizeof(struct mchp_ipc_mbox_info), GFP_KERNEL);
+ if (!ipc->buf_base)
+ return -ENOMEM;
+
+ ipc->buf_base_addr = __pa(ipc->buf_base);
+
+ ret = mchp_ipc_sbi_send(SBI_EXT_IPC_PROBE, ipc->buf_base_addr);
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "could not probe IPC SBI service\n");
+
+ memcpy(&ipc_info, ipc->buf_base, sizeof(struct mchp_ipc_mbox_info));
+ ipc->controller.num_chans = ipc_info.num_channels;
+ ipc->hw_type = ipc_info.hw_type;
+
+ ipc->chans = devm_kcalloc(dev, ipc->controller.num_chans, sizeof(*ipc->chans), GFP_KERNEL);
+ if (!ipc->chans)
+ return -ENOMEM;
+
+ ipc->dev = dev;
+ ipc->controller.txdone_irq = true;
+ ipc->controller.dev = ipc->dev;
+ ipc->controller.ops = &mchp_ipc_ops;
+ ipc->controller.chans = ipc->chans;
+ ipc->controller.of_xlate = mchp_ipc_mbox_xlate;
+
+ for (chan_id = 0; chan_id < ipc->controller.num_chans; chan_id++) {
+ priv = devm_kmalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ ipc->chans[chan_id].con_priv = priv;
+ priv->id = chan_id;
+ }
+
+ if (ipc->hw_type == MIV_IHC) {
+ ipc->cluster_cfg = devm_kcalloc(dev, num_online_cpus(),
+ sizeof(struct mchp_ipc_cluster_cfg),
+ GFP_KERNEL);
+ if (!ipc->cluster_cfg)
+ return -ENOMEM;
+
+ if (mchp_ipc_get_cluster_aggr_irq(ipc))
+ irq_avail = true;
+ }
+
+ if (!irq_avail)
+ return dev_err_probe(dev, -ENODEV, "missing interrupt property\n");
+
+ ret = devm_mbox_controller_register(dev, &ipc->controller);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "Inter-Processor communication (IPC) registration failed\n");
+
+ return 0;
+}
+
+static const struct of_device_id mchp_ipc_of_match[] = {
+ {.compatible = "microchip,sbi-ipc", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, mchp_ipc_of_match);
+
+static struct platform_driver mchp_ipc_driver = {
+ .driver = {
+ .name = "microchip_ipc",
+ .of_match_table = mchp_ipc_of_match,
+ },
+ .probe = mchp_ipc_probe,
+};
+
+module_platform_driver(mchp_ipc_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Valentina Fernandez <valentina.fernandezalanis@microchip.com>");
+MODULE_DESCRIPTION("Microchip Inter-Processor Communication (IPC) driver");
diff --git a/drivers/mailbox/mailbox-mpfs.c b/drivers/mailbox/mailbox-mpfs.c
index 853901acaeec..d5d9effece97 100644
--- a/drivers/mailbox/mailbox-mpfs.c
+++ b/drivers/mailbox/mailbox-mpfs.c
@@ -13,11 +13,15 @@
#include <linux/init.h>
#include <linux/module.h>
#include <linux/kernel.h>
+#include <linux/regmap.h>
#include <linux/interrupt.h>
+#include <linux/mfd/syscon.h>
+#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
#include <linux/mailbox_controller.h>
#include <soc/microchip/mpfs.h>
+#define MESSAGE_INT_OFFSET 0x18cu
#define SERVICES_CR_OFFSET 0x50u
#define SERVICES_SR_OFFSET 0x54u
#define MAILBOX_REG_OFFSET 0x800u
@@ -39,7 +43,7 @@
#define SCB_CTRL_NOTIFY_MASK BIT(SCB_CTRL_NOTIFY)
#define SCB_CTRL_POS (16)
-#define SCB_CTRL_MASK GENMASK_ULL(SCB_CTRL_POS + SCB_MASK_WIDTH, SCB_CTRL_POS)
+#define SCB_CTRL_MASK GENMASK(SCB_CTRL_POS + SCB_MASK_WIDTH - 1, SCB_CTRL_POS)
/* SCBCTRL service status register */
@@ -67,6 +71,7 @@ struct mpfs_mbox {
void __iomem *int_reg;
struct mbox_chan chans[1];
struct mpfs_mss_response *response;
+ struct regmap *sysreg_scb, *control_scb;
u16 resp_offset;
};
@@ -74,11 +79,39 @@ static bool mpfs_mbox_busy(struct mpfs_mbox *mbox)
{
u32 status;
- status = readl_relaxed(mbox->ctrl_base + SERVICES_SR_OFFSET);
+ if (mbox->control_scb)
+ regmap_read(mbox->control_scb, SERVICES_SR_OFFSET, &status);
+ else
+ status = readl_relaxed(mbox->ctrl_base + SERVICES_SR_OFFSET);
return status & SCB_STATUS_BUSY_MASK;
}
+static bool mpfs_mbox_last_tx_done(struct mbox_chan *chan)
+{
+ struct mpfs_mbox *mbox = (struct mpfs_mbox *)chan->con_priv;
+ struct mpfs_mss_response *response = mbox->response;
+ u32 val;
+
+ if (mpfs_mbox_busy(mbox))
+ return false;
+
+ /*
+ * The service status is stored in bits 31:16 of the SERVICES_SR
+ * register & is only valid when the system controller is not busy.
+ * Failed services are intended to generated interrupts, but in reality
+ * this does not happen, so the status must be checked here.
+ */
+ if (mbox->control_scb)
+ regmap_read(mbox->control_scb, SERVICES_SR_OFFSET, &val);
+ else
+ val = readl_relaxed(mbox->ctrl_base + SERVICES_SR_OFFSET);
+
+ response->resp_status = (val & SCB_STATUS_MASK) >> SCB_STATUS_POS;
+
+ return true;
+}
+
static int mpfs_mbox_send_data(struct mbox_chan *chan, void *data)
{
struct mpfs_mbox *mbox = (struct mpfs_mbox *)chan->con_priv;
@@ -118,9 +151,15 @@ static int mpfs_mbox_send_data(struct mbox_chan *chan, void *data)
}
opt_sel = ((msg->mbox_offset << 7u) | (msg->cmd_opcode & 0x7fu));
+
tx_trigger = (opt_sel << SCB_CTRL_POS) & SCB_CTRL_MASK;
tx_trigger |= SCB_CTRL_REQ_MASK | SCB_STATUS_NOTIFY_MASK;
- writel_relaxed(tx_trigger, mbox->ctrl_base + SERVICES_CR_OFFSET);
+
+ if (mbox->control_scb)
+ regmap_write(mbox->control_scb, SERVICES_CR_OFFSET, tx_trigger);
+ else
+ writel_relaxed(tx_trigger, mbox->ctrl_base + SERVICES_CR_OFFSET);
+
return 0;
}
@@ -130,7 +169,7 @@ static void mpfs_mbox_rx_data(struct mbox_chan *chan)
struct mpfs_mbox *mbox = (struct mpfs_mbox *)chan->con_priv;
struct mpfs_mss_response *response = mbox->response;
u16 num_words = ALIGN((response->resp_size), (4)) / 4U;
- u32 i, status;
+ u32 i;
if (!response->resp_msg) {
dev_err(mbox->dev, "failed to assign memory for response %d\n", -ENOMEM);
@@ -138,8 +177,6 @@ static void mpfs_mbox_rx_data(struct mbox_chan *chan)
}
/*
- * The status is stored in bits 31:16 of the SERVICES_SR register.
- * It is only valid when BUSY == 0.
* We should *never* get an interrupt while the controller is
* still in the busy state. If we do, something has gone badly
* wrong & the content of the mailbox would not be valid.
@@ -150,24 +187,10 @@ static void mpfs_mbox_rx_data(struct mbox_chan *chan)
return;
}
- status = readl_relaxed(mbox->ctrl_base + SERVICES_SR_OFFSET);
-
- /*
- * If the status of the individual servers is non-zero, the service has
- * failed. The contents of the mailbox at this point are not be valid,
- * so don't bother reading them. Set the status so that the driver
- * implementing the service can handle the result.
- */
- response->resp_status = (status & SCB_STATUS_MASK) >> SCB_STATUS_POS;
- if (response->resp_status)
- return;
-
- if (!mpfs_mbox_busy(mbox)) {
- for (i = 0; i < num_words; i++) {
- response->resp_msg[i] =
- readl_relaxed(mbox->mbox_base
- + mbox->resp_offset + i * 0x4);
- }
+ for (i = 0; i < num_words; i++) {
+ response->resp_msg[i] =
+ readl_relaxed(mbox->mbox_base
+ + mbox->resp_offset + i * 0x4);
}
mbox_chan_received_data(chan, response);
@@ -178,11 +201,13 @@ static irqreturn_t mpfs_mbox_inbox_isr(int irq, void *data)
struct mbox_chan *chan = data;
struct mpfs_mbox *mbox = (struct mpfs_mbox *)chan->con_priv;
- writel_relaxed(0, mbox->int_reg);
+ if (mbox->control_scb)
+ regmap_write(mbox->sysreg_scb, MESSAGE_INT_OFFSET, 0);
+ else
+ writel_relaxed(0, mbox->int_reg);
mpfs_mbox_rx_data(chan);
- mbox_chan_txdone(chan, 0);
return IRQ_HANDLED;
}
@@ -212,30 +237,65 @@ static const struct mbox_chan_ops mpfs_mbox_ops = {
.send_data = mpfs_mbox_send_data,
.startup = mpfs_mbox_startup,
.shutdown = mpfs_mbox_shutdown,
+ .last_tx_done = mpfs_mbox_last_tx_done,
};
-static int mpfs_mbox_probe(struct platform_device *pdev)
+static inline int mpfs_mbox_syscon_probe(struct mpfs_mbox *mbox, struct platform_device *pdev)
{
- struct mpfs_mbox *mbox;
- struct resource *regs;
- int ret;
+ mbox->control_scb = syscon_regmap_lookup_by_compatible("microchip,mpfs-control-scb");
+ if (IS_ERR(mbox->control_scb))
+ return PTR_ERR(mbox->control_scb);
- mbox = devm_kzalloc(&pdev->dev, sizeof(*mbox), GFP_KERNEL);
- if (!mbox)
- return -ENOMEM;
+ mbox->sysreg_scb = syscon_regmap_lookup_by_compatible("microchip,mpfs-sysreg-scb");
+ if (IS_ERR(mbox->sysreg_scb))
+ return PTR_ERR(mbox->sysreg_scb);
+
+ mbox->mbox_base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(mbox->mbox_base))
+ return PTR_ERR(mbox->mbox_base);
+
+ return 0;
+}
+
+static inline int mpfs_mbox_old_format_probe(struct mpfs_mbox *mbox, struct platform_device *pdev)
+{
+ dev_warn(&pdev->dev, "falling back to old devicetree format");
- mbox->ctrl_base = devm_platform_get_and_ioremap_resource(pdev, 0, &regs);
+ mbox->ctrl_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(mbox->ctrl_base))
return PTR_ERR(mbox->ctrl_base);
- mbox->int_reg = devm_platform_get_and_ioremap_resource(pdev, 1, &regs);
+ mbox->int_reg = devm_platform_ioremap_resource(pdev, 1);
if (IS_ERR(mbox->int_reg))
return PTR_ERR(mbox->int_reg);
- mbox->mbox_base = devm_platform_get_and_ioremap_resource(pdev, 2, &regs);
+ mbox->mbox_base = devm_platform_ioremap_resource(pdev, 2);
if (IS_ERR(mbox->mbox_base)) // account for the old dt-binding w/ 2 regs
mbox->mbox_base = mbox->ctrl_base + MAILBOX_REG_OFFSET;
+ return 0;
+}
+
+static int mpfs_mbox_probe(struct platform_device *pdev)
+{
+ struct mpfs_mbox *mbox;
+ int ret;
+
+ mbox = devm_kzalloc(&pdev->dev, sizeof(*mbox), GFP_KERNEL);
+ if (!mbox)
+ return -ENOMEM;
+
+ ret = mpfs_mbox_syscon_probe(mbox, pdev);
+ if (ret) {
+ /*
+ * set this to null, so it can be used as the decision for to
+ * regmap or not to regmap
+ */
+ mbox->control_scb = NULL;
+ ret = mpfs_mbox_old_format_probe(mbox, pdev);
+ if (ret)
+ return ret;
+ }
mbox->irq = platform_get_irq(pdev, 0);
if (mbox->irq < 0)
return mbox->irq;
@@ -247,7 +307,8 @@ static int mpfs_mbox_probe(struct platform_device *pdev)
mbox->controller.num_chans = 1;
mbox->controller.chans = mbox->chans;
mbox->controller.ops = &mpfs_mbox_ops;
- mbox->controller.txdone_irq = true;
+ mbox->controller.txdone_poll = true;
+ mbox->controller.txpoll_period = 10u;
ret = devm_mbox_controller_register(&pdev->dev, &mbox->controller);
if (ret) {
diff --git a/drivers/mailbox/mailbox-sti.c b/drivers/mailbox/mailbox-sti.c
index 823061dd8c8e..b4b5bdd503cf 100644
--- a/drivers/mailbox/mailbox-sti.c
+++ b/drivers/mailbox/mailbox-sti.c
@@ -17,8 +17,8 @@
#include <linux/mailbox_controller.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
+#include <linux/property.h>
#include <linux/slab.h>
#include "mailbox.h"
@@ -403,7 +403,6 @@ MODULE_DEVICE_TABLE(of, sti_mailbox_match);
static int sti_mbox_probe(struct platform_device *pdev)
{
- const struct of_device_id *match;
struct mbox_controller *mbox;
struct sti_mbox_device *mdev;
struct device_node *np = pdev->dev.of_node;
@@ -411,12 +410,11 @@ static int sti_mbox_probe(struct platform_device *pdev)
int irq;
int ret;
- match = of_match_device(sti_mailbox_match, &pdev->dev);
- if (!match) {
+ pdev->dev.platform_data = (struct sti_mbox_pdata *)device_get_match_data(&pdev->dev);
+ if (!pdev->dev.platform_data) {
dev_err(&pdev->dev, "No configuration found\n");
return -ENODEV;
}
- pdev->dev.platform_data = (struct sti_mbox_pdata *) match->data;
mdev = devm_kzalloc(&pdev->dev, sizeof(*mdev), GFP_KERNEL);
if (!mdev)
diff --git a/drivers/mailbox/mailbox-test.c b/drivers/mailbox/mailbox-test.c
index 4555d678fadd..3a28ab5c42e5 100644
--- a/drivers/mailbox/mailbox-test.c
+++ b/drivers/mailbox/mailbox-test.c
@@ -12,10 +12,12 @@
#include <linux/kernel.h>
#include <linux/mailbox_client.h>
#include <linux/module.h>
+#include <linux/mutex.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/poll.h>
#include <linux/slab.h>
+#include <linux/spinlock.h>
#include <linux/uaccess.h>
#include <linux/sched/signal.h>
@@ -38,6 +40,7 @@ struct mbox_test_device {
char *signal;
char *message;
spinlock_t lock;
+ struct mutex mutex;
wait_queue_head_t waitq;
struct fasync_struct *async_queue;
struct dentry *root_debugfs_dir;
@@ -95,6 +98,7 @@ static ssize_t mbox_test_message_write(struct file *filp,
size_t count, loff_t *ppos)
{
struct mbox_test_device *tdev = filp->private_data;
+ char *message;
void *data;
int ret;
@@ -110,10 +114,13 @@ static ssize_t mbox_test_message_write(struct file *filp,
return -EINVAL;
}
- tdev->message = kzalloc(MBOX_MAX_MSG_LEN, GFP_KERNEL);
- if (!tdev->message)
+ message = kzalloc(MBOX_MAX_MSG_LEN, GFP_KERNEL);
+ if (!message)
return -ENOMEM;
+ mutex_lock(&tdev->mutex);
+
+ tdev->message = message;
ret = copy_from_user(tdev->message, userbuf, count);
if (ret) {
ret = -EFAULT;
@@ -144,6 +151,8 @@ out:
kfree(tdev->message);
tdev->signal = NULL;
+ mutex_unlock(&tdev->mutex);
+
return ret < 0 ? ret : count;
}
@@ -259,7 +268,7 @@ static int mbox_test_add_debugfs(struct platform_device *pdev,
return 0;
tdev->root_debugfs_dir = debugfs_create_dir(dev_name(&pdev->dev), NULL);
- if (!tdev->root_debugfs_dir) {
+ if (IS_ERR(tdev->root_debugfs_dir)) {
dev_err(&pdev->dev, "Failed to create Mailbox debugfs\n");
return -EINVAL;
}
@@ -358,8 +367,7 @@ static int mbox_test_probe(struct platform_device *pdev)
return -ENOMEM;
/* It's okay for MMIO to be NULL */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- tdev->tx_mmio = devm_ioremap_resource(&pdev->dev, res);
+ tdev->tx_mmio = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (PTR_ERR(tdev->tx_mmio) == -EBUSY) {
/* if reserved area in SRAM, try just ioremap */
size = resource_size(res);
@@ -369,8 +377,7 @@ static int mbox_test_probe(struct platform_device *pdev)
}
/* If specified, second reg entry is Rx MMIO */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- tdev->rx_mmio = devm_ioremap_resource(&pdev->dev, res);
+ tdev->rx_mmio = devm_platform_get_and_ioremap_resource(pdev, 1, &res);
if (PTR_ERR(tdev->rx_mmio) == -EBUSY) {
size = resource_size(res);
tdev->rx_mmio = devm_ioremap(&pdev->dev, res->start, size);
@@ -381,7 +388,7 @@ static int mbox_test_probe(struct platform_device *pdev)
tdev->tx_channel = mbox_test_request_channel(pdev, "tx");
tdev->rx_channel = mbox_test_request_channel(pdev, "rx");
- if (!tdev->tx_channel && !tdev->rx_channel)
+ if (IS_ERR_OR_NULL(tdev->tx_channel) && IS_ERR_OR_NULL(tdev->rx_channel))
return -EPROBE_DEFER;
/* If Rx is not specified but has Rx MMIO, then Rx = Tx */
@@ -392,6 +399,7 @@ static int mbox_test_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, tdev);
spin_lock_init(&tdev->lock);
+ mutex_init(&tdev->mutex);
if (tdev->rx_channel) {
tdev->rx_buffer = devm_kzalloc(&pdev->dev,
@@ -410,7 +418,7 @@ static int mbox_test_probe(struct platform_device *pdev)
return 0;
}
-static int mbox_test_remove(struct platform_device *pdev)
+static void mbox_test_remove(struct platform_device *pdev)
{
struct mbox_test_device *tdev = platform_get_drvdata(pdev);
@@ -420,8 +428,6 @@ static int mbox_test_remove(struct platform_device *pdev)
mbox_free_channel(tdev->tx_channel);
if (tdev->rx_channel)
mbox_free_channel(tdev->rx_channel);
-
- return 0;
}
static const struct of_device_id mbox_test_match[] = {
@@ -435,7 +441,7 @@ static struct platform_driver mbox_test_driver = {
.name = "mailbox_test",
.of_match_table = mbox_test_match,
},
- .probe = mbox_test_probe,
+ .probe = mbox_test_probe,
.remove = mbox_test_remove,
};
module_platform_driver(mbox_test_driver);
diff --git a/drivers/mailbox/mailbox-th1520.c b/drivers/mailbox/mailbox-th1520.c
new file mode 100644
index 000000000000..626957c2e435
--- /dev/null
+++ b/drivers/mailbox/mailbox-th1520.c
@@ -0,0 +1,597 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2021 Alibaba Group Holding Limited.
+ */
+
+#include <linux/clk.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/mailbox_controller.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+/* Status Register */
+#define TH_1520_MBOX_STA 0x0
+#define TH_1520_MBOX_CLR 0x4
+#define TH_1520_MBOX_MASK 0xc
+
+/* Transmit/receive data register:
+ * INFO0 ~ INFO6
+ */
+#define TH_1520_MBOX_INFO_NUM 8
+#define TH_1520_MBOX_DATA_INFO_NUM 7
+#define TH_1520_MBOX_INFO0 0x14
+/* Transmit ack register: INFO7 */
+#define TH_1520_MBOX_INFO7 0x30
+
+/* Generate remote icu IRQ Register */
+#define TH_1520_MBOX_GEN 0x10
+#define TH_1520_MBOX_GEN_RX_DATA BIT(6)
+#define TH_1520_MBOX_GEN_TX_ACK BIT(7)
+
+#define TH_1520_MBOX_CHAN_RES_SIZE 0x1000
+#define TH_1520_MBOX_CHANS 4
+#define TH_1520_MBOX_CHAN_NAME_SIZE 20
+
+#define TH_1520_MBOX_ACK_MAGIC 0xdeadbeaf
+
+#ifdef CONFIG_PM_SLEEP
+/* store MBOX context across system-wide suspend/resume transitions */
+struct th1520_mbox_context {
+ u32 intr_mask[TH_1520_MBOX_CHANS];
+};
+#endif
+
+enum th1520_mbox_icu_cpu_id {
+ TH_1520_MBOX_ICU_KERNEL_CPU0, /* 910T */
+ TH_1520_MBOX_ICU_CPU1, /* 902 */
+ TH_1520_MBOX_ICU_CPU2, /* 906 */
+ TH_1520_MBOX_ICU_CPU3, /* 910R */
+};
+
+struct th1520_mbox_con_priv {
+ enum th1520_mbox_icu_cpu_id idx;
+ void __iomem *comm_local_base;
+ void __iomem *comm_remote_base;
+ char irq_desc[TH_1520_MBOX_CHAN_NAME_SIZE];
+ struct mbox_chan *chan;
+};
+
+struct th1520_mbox_priv {
+ struct device *dev;
+ void __iomem *local_icu[TH_1520_MBOX_CHANS];
+ void __iomem *remote_icu[TH_1520_MBOX_CHANS - 1];
+ void __iomem *cur_cpu_ch_base;
+ spinlock_t mbox_lock; /* control register lock */
+
+ struct mbox_controller mbox;
+ struct mbox_chan mbox_chans[TH_1520_MBOX_CHANS];
+ struct clk_bulk_data clocks[TH_1520_MBOX_CHANS];
+ struct th1520_mbox_con_priv con_priv[TH_1520_MBOX_CHANS];
+ int irq;
+#ifdef CONFIG_PM_SLEEP
+ struct th1520_mbox_context *ctx;
+#endif
+};
+
+static struct th1520_mbox_priv *
+to_th1520_mbox_priv(struct mbox_controller *mbox)
+{
+ return container_of(mbox, struct th1520_mbox_priv, mbox);
+}
+
+static void th1520_mbox_write(struct th1520_mbox_priv *priv, u32 val, u32 offs)
+{
+ iowrite32(val, priv->cur_cpu_ch_base + offs);
+}
+
+static u32 th1520_mbox_read(struct th1520_mbox_priv *priv, u32 offs)
+{
+ return ioread32(priv->cur_cpu_ch_base + offs);
+}
+
+static u32 th1520_mbox_rmw(struct th1520_mbox_priv *priv, u32 off, u32 set,
+ u32 clr)
+{
+ unsigned long flags;
+ u32 val;
+
+ spin_lock_irqsave(&priv->mbox_lock, flags);
+ val = th1520_mbox_read(priv, off);
+ val &= ~clr;
+ val |= set;
+ th1520_mbox_write(priv, val, off);
+ spin_unlock_irqrestore(&priv->mbox_lock, flags);
+
+ return val;
+}
+
+static void th1520_mbox_chan_write(struct th1520_mbox_con_priv *cp, u32 val,
+ u32 offs, bool is_remote)
+{
+ if (is_remote)
+ iowrite32(val, cp->comm_remote_base + offs);
+ else
+ iowrite32(val, cp->comm_local_base + offs);
+}
+
+static u32 th1520_mbox_chan_read(struct th1520_mbox_con_priv *cp, u32 offs,
+ bool is_remote)
+{
+ if (is_remote)
+ return ioread32(cp->comm_remote_base + offs);
+ else
+ return ioread32(cp->comm_local_base + offs);
+}
+
+static void th1520_mbox_chan_rmw(struct th1520_mbox_con_priv *cp, u32 off,
+ u32 set, u32 clr, bool is_remote)
+{
+ struct th1520_mbox_priv *priv = to_th1520_mbox_priv(cp->chan->mbox);
+ unsigned long flags;
+ u32 val;
+
+ spin_lock_irqsave(&priv->mbox_lock, flags);
+ val = th1520_mbox_chan_read(cp, off, is_remote);
+ val &= ~clr;
+ val |= set;
+ th1520_mbox_chan_write(cp, val, off, is_remote);
+ spin_unlock_irqrestore(&priv->mbox_lock, flags);
+}
+
+static void th1520_mbox_chan_rd_data(struct th1520_mbox_con_priv *cp,
+ void *data, bool is_remote)
+{
+ u32 off = TH_1520_MBOX_INFO0;
+ u32 *arg = data;
+ u32 i;
+
+ /* read info0 ~ info6, totally 28 bytes
+ * requires data memory size is 28 bytes
+ */
+ for (i = 0; i < TH_1520_MBOX_DATA_INFO_NUM; i++) {
+ *arg = th1520_mbox_chan_read(cp, off, is_remote);
+ off += 4;
+ arg++;
+ }
+}
+
+static void th1520_mbox_chan_wr_data(struct th1520_mbox_con_priv *cp,
+ void *data, bool is_remote)
+{
+ u32 off = TH_1520_MBOX_INFO0;
+ u32 *arg = data;
+ u32 i;
+
+ /* write info0 ~ info6, totally 28 bytes
+ * requires data memory is 28 bytes valid data
+ */
+ for (i = 0; i < TH_1520_MBOX_DATA_INFO_NUM; i++) {
+ th1520_mbox_chan_write(cp, *arg, off, is_remote);
+ off += 4;
+ arg++;
+ }
+}
+
+static void th1520_mbox_chan_wr_ack(struct th1520_mbox_con_priv *cp, void *data,
+ bool is_remote)
+{
+ u32 off = TH_1520_MBOX_INFO7;
+ u32 *arg = data;
+
+ th1520_mbox_chan_write(cp, *arg, off, is_remote);
+}
+
+static int th1520_mbox_chan_id_to_mapbit(struct th1520_mbox_con_priv *cp)
+{
+ int mapbit = 0;
+ int i;
+
+ for (i = 0; i < TH_1520_MBOX_CHANS; i++) {
+ if (i == cp->idx)
+ return mapbit;
+
+ if (i != TH_1520_MBOX_ICU_KERNEL_CPU0)
+ mapbit++;
+ }
+
+ if (i == TH_1520_MBOX_CHANS)
+ dev_err(cp->chan->mbox->dev, "convert to mapbit failed\n");
+
+ return 0;
+}
+
+static irqreturn_t th1520_mbox_isr(int irq, void *p)
+{
+ struct mbox_chan *chan = p;
+ struct th1520_mbox_priv *priv = to_th1520_mbox_priv(chan->mbox);
+ struct th1520_mbox_con_priv *cp = chan->con_priv;
+ int mapbit = th1520_mbox_chan_id_to_mapbit(cp);
+ u32 sta, dat[TH_1520_MBOX_DATA_INFO_NUM];
+ u32 ack_magic = TH_1520_MBOX_ACK_MAGIC;
+ u32 info0_data, info7_data;
+
+ sta = th1520_mbox_read(priv, TH_1520_MBOX_STA);
+ if (!(sta & BIT(mapbit)))
+ return IRQ_NONE;
+
+ /* clear chan irq bit in STA register */
+ th1520_mbox_rmw(priv, TH_1520_MBOX_CLR, BIT(mapbit), 0);
+
+ /* info0 is the protocol word, should not be zero! */
+ info0_data = th1520_mbox_chan_read(cp, TH_1520_MBOX_INFO0, false);
+ if (info0_data) {
+ /* read info0~info6 data */
+ th1520_mbox_chan_rd_data(cp, dat, false);
+
+ /* clear local info0 */
+ th1520_mbox_chan_write(cp, 0x0, TH_1520_MBOX_INFO0, false);
+
+ /* notify remote cpu */
+ th1520_mbox_chan_wr_ack(cp, &ack_magic, true);
+ /* CPU1 902/906 use polling mode to monitor info7 */
+ if (cp->idx != TH_1520_MBOX_ICU_CPU1 &&
+ cp->idx != TH_1520_MBOX_ICU_CPU2)
+ th1520_mbox_chan_rmw(cp, TH_1520_MBOX_GEN,
+ TH_1520_MBOX_GEN_TX_ACK, 0, true);
+
+ /* transfer the data to client */
+ mbox_chan_received_data(chan, (void *)dat);
+ }
+
+ /* info7 magic value mean the real ack signal, not generate bit7 */
+ info7_data = th1520_mbox_chan_read(cp, TH_1520_MBOX_INFO7, false);
+ if (info7_data == TH_1520_MBOX_ACK_MAGIC) {
+ /* clear local info7 */
+ th1520_mbox_chan_write(cp, 0x0, TH_1520_MBOX_INFO7, false);
+
+ /* notify framework the last TX has completed */
+ mbox_chan_txdone(chan, 0);
+ }
+
+ if (!info0_data && !info7_data)
+ return IRQ_NONE;
+
+ return IRQ_HANDLED;
+}
+
+static int th1520_mbox_send_data(struct mbox_chan *chan, void *data)
+{
+ struct th1520_mbox_con_priv *cp = chan->con_priv;
+
+ th1520_mbox_chan_wr_data(cp, data, true);
+ th1520_mbox_chan_rmw(cp, TH_1520_MBOX_GEN, TH_1520_MBOX_GEN_RX_DATA, 0,
+ true);
+ return 0;
+}
+
+static int th1520_mbox_startup(struct mbox_chan *chan)
+{
+ struct th1520_mbox_priv *priv = to_th1520_mbox_priv(chan->mbox);
+ struct th1520_mbox_con_priv *cp = chan->con_priv;
+ u32 data[8] = {};
+ int mask_bit;
+ int ret;
+
+ /* clear local and remote generate and info0~info7 */
+ th1520_mbox_chan_rmw(cp, TH_1520_MBOX_GEN, 0x0, 0xff, true);
+ th1520_mbox_chan_rmw(cp, TH_1520_MBOX_GEN, 0x0, 0xff, false);
+ th1520_mbox_chan_wr_ack(cp, &data[7], true);
+ th1520_mbox_chan_wr_ack(cp, &data[7], false);
+ th1520_mbox_chan_wr_data(cp, &data[0], true);
+ th1520_mbox_chan_wr_data(cp, &data[0], false);
+
+ /* enable the chan mask */
+ mask_bit = th1520_mbox_chan_id_to_mapbit(cp);
+ th1520_mbox_rmw(priv, TH_1520_MBOX_MASK, BIT(mask_bit), 0);
+
+ /*
+ * Mixing devm_ managed resources with manual IRQ handling is generally
+ * discouraged due to potential complexities with resource management,
+ * especially when dealing with shared interrupts. However, in this case,
+ * the approach is safe and effective because:
+ *
+ * 1. Each mailbox channel requests its IRQ within the .startup() callback
+ * and frees it within the .shutdown() callback.
+ * 2. During device unbinding, the devm_ managed mailbox controller first
+ * iterates through all channels, ensuring that their IRQs are freed before
+ * any other devm_ resources are released.
+ *
+ * This ordering guarantees that no interrupts can be triggered from the device
+ * while it is being unbound, preventing race conditions and ensuring system
+ * stability.
+ */
+ ret = request_irq(priv->irq, th1520_mbox_isr,
+ IRQF_SHARED | IRQF_NO_SUSPEND, cp->irq_desc, chan);
+ if (ret) {
+ dev_err(priv->dev, "Unable to acquire IRQ %d\n", priv->irq);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void th1520_mbox_shutdown(struct mbox_chan *chan)
+{
+ struct th1520_mbox_priv *priv = to_th1520_mbox_priv(chan->mbox);
+ struct th1520_mbox_con_priv *cp = chan->con_priv;
+ int mask_bit;
+
+ free_irq(priv->irq, chan);
+
+ /* clear the chan mask */
+ mask_bit = th1520_mbox_chan_id_to_mapbit(cp);
+ th1520_mbox_rmw(priv, TH_1520_MBOX_MASK, 0, BIT(mask_bit));
+}
+
+static const struct mbox_chan_ops th1520_mbox_ops = {
+ .send_data = th1520_mbox_send_data,
+ .startup = th1520_mbox_startup,
+ .shutdown = th1520_mbox_shutdown,
+};
+
+static int th1520_mbox_init_generic(struct th1520_mbox_priv *priv)
+{
+#ifdef CONFIG_PM_SLEEP
+ priv->ctx = devm_kzalloc(priv->dev, sizeof(*priv->ctx), GFP_KERNEL);
+ if (!priv->ctx)
+ return -ENOMEM;
+#endif
+ /* Set default configuration */
+ th1520_mbox_write(priv, 0xff, TH_1520_MBOX_CLR);
+ th1520_mbox_write(priv, 0x0, TH_1520_MBOX_MASK);
+ return 0;
+}
+
+static struct mbox_chan *th1520_mbox_xlate(struct mbox_controller *mbox,
+ const struct of_phandle_args *sp)
+{
+ u32 chan;
+
+ if (sp->args_count != 1) {
+ dev_err(mbox->dev, "Invalid argument count %d\n",
+ sp->args_count);
+ return ERR_PTR(-EINVAL);
+ }
+
+ chan = sp->args[0]; /* comm remote channel */
+
+ if (chan >= mbox->num_chans) {
+ dev_err(mbox->dev, "Not supported channel number: %d\n", chan);
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (chan == TH_1520_MBOX_ICU_KERNEL_CPU0) {
+ dev_err(mbox->dev, "Cannot communicate with yourself\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ return &mbox->chans[chan];
+}
+
+static void __iomem *th1520_map_mmio(struct platform_device *pdev,
+ char *res_name, size_t offset)
+{
+ void __iomem *mapped;
+ struct resource *res;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, res_name);
+
+ if (!res) {
+ dev_err(&pdev->dev, "Failed to get resource: %s\n", res_name);
+ return ERR_PTR(-EINVAL);
+ }
+
+ mapped = devm_ioremap(&pdev->dev, res->start + offset,
+ resource_size(res) - offset);
+ if (!mapped) {
+ dev_err(&pdev->dev, "Failed to map resource: %s\n", res_name);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ return mapped;
+}
+
+static void th1520_disable_clk(void *data)
+{
+ struct th1520_mbox_priv *priv = data;
+
+ clk_bulk_disable_unprepare(ARRAY_SIZE(priv->clocks), priv->clocks);
+}
+
+static int th1520_mbox_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct th1520_mbox_priv *priv;
+ unsigned int remote_idx = 0;
+ unsigned int i;
+ int ret;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->dev = dev;
+
+ priv->clocks[0].id = "clk-local";
+ priv->clocks[1].id = "clk-remote-icu0";
+ priv->clocks[2].id = "clk-remote-icu1";
+ priv->clocks[3].id = "clk-remote-icu2";
+
+ ret = devm_clk_bulk_get(dev, ARRAY_SIZE(priv->clocks),
+ priv->clocks);
+ if (ret) {
+ dev_err(dev, "Failed to get clocks\n");
+ return ret;
+ }
+
+ ret = clk_bulk_prepare_enable(ARRAY_SIZE(priv->clocks), priv->clocks);
+ if (ret) {
+ dev_err(dev, "Failed to enable clocks\n");
+ return ret;
+ }
+
+ ret = devm_add_action_or_reset(dev, th1520_disable_clk, priv);
+ if (ret)
+ return ret;
+
+ /*
+ * The address mappings in the device tree align precisely with those
+ * outlined in the manual. However, register offsets within these
+ * mapped regions are irregular, particularly for remote-icu0.
+ * Consequently, th1520_map_mmio() requires an additional parameter to
+ * handle this quirk.
+ */
+ priv->local_icu[TH_1520_MBOX_ICU_KERNEL_CPU0] =
+ th1520_map_mmio(pdev, "local", 0x0);
+ if (IS_ERR(priv->local_icu[TH_1520_MBOX_ICU_KERNEL_CPU0]))
+ return PTR_ERR(priv->local_icu[TH_1520_MBOX_ICU_KERNEL_CPU0]);
+
+ priv->remote_icu[0] = th1520_map_mmio(pdev, "remote-icu0", 0x4000);
+ if (IS_ERR(priv->remote_icu[0]))
+ return PTR_ERR(priv->remote_icu[0]);
+
+ priv->remote_icu[1] = th1520_map_mmio(pdev, "remote-icu1", 0x0);
+ if (IS_ERR(priv->remote_icu[1]))
+ return PTR_ERR(priv->remote_icu[1]);
+
+ priv->remote_icu[2] = th1520_map_mmio(pdev, "remote-icu2", 0x0);
+ if (IS_ERR(priv->remote_icu[2]))
+ return PTR_ERR(priv->remote_icu[2]);
+
+ priv->local_icu[TH_1520_MBOX_ICU_CPU1] =
+ priv->local_icu[TH_1520_MBOX_ICU_KERNEL_CPU0] +
+ TH_1520_MBOX_CHAN_RES_SIZE;
+ priv->local_icu[TH_1520_MBOX_ICU_CPU2] =
+ priv->local_icu[TH_1520_MBOX_ICU_CPU1] +
+ TH_1520_MBOX_CHAN_RES_SIZE;
+ priv->local_icu[TH_1520_MBOX_ICU_CPU3] =
+ priv->local_icu[TH_1520_MBOX_ICU_CPU2] +
+ TH_1520_MBOX_CHAN_RES_SIZE;
+
+ priv->cur_cpu_ch_base = priv->local_icu[TH_1520_MBOX_ICU_KERNEL_CPU0];
+
+ priv->irq = platform_get_irq(pdev, 0);
+ if (priv->irq < 0)
+ return priv->irq;
+
+ /* init the chans */
+ for (i = 0; i < TH_1520_MBOX_CHANS; i++) {
+ struct th1520_mbox_con_priv *cp = &priv->con_priv[i];
+
+ cp->idx = i;
+ cp->chan = &priv->mbox_chans[i];
+ priv->mbox_chans[i].con_priv = cp;
+ snprintf(cp->irq_desc, sizeof(cp->irq_desc),
+ "th1520_mbox_chan[%i]", cp->idx);
+
+ cp->comm_local_base = priv->local_icu[i];
+ if (i != TH_1520_MBOX_ICU_KERNEL_CPU0) {
+ cp->comm_remote_base = priv->remote_icu[remote_idx];
+ remote_idx++;
+ }
+ }
+
+ spin_lock_init(&priv->mbox_lock);
+
+ priv->mbox.dev = dev;
+ priv->mbox.ops = &th1520_mbox_ops;
+ priv->mbox.chans = priv->mbox_chans;
+ priv->mbox.num_chans = TH_1520_MBOX_CHANS;
+ priv->mbox.of_xlate = th1520_mbox_xlate;
+ priv->mbox.txdone_irq = true;
+
+ platform_set_drvdata(pdev, priv);
+
+ ret = th1520_mbox_init_generic(priv);
+ if (ret) {
+ dev_err(dev, "Failed to init mailbox context\n");
+ return ret;
+ }
+
+ return devm_mbox_controller_register(dev, &priv->mbox);
+}
+
+static const struct of_device_id th1520_mbox_dt_ids[] = {
+ { .compatible = "thead,th1520-mbox" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, th1520_mbox_dt_ids);
+
+#ifdef CONFIG_PM_SLEEP
+static int __maybe_unused th1520_mbox_suspend_noirq(struct device *dev)
+{
+ struct th1520_mbox_priv *priv = dev_get_drvdata(dev);
+ struct th1520_mbox_context *ctx = priv->ctx;
+ u32 i;
+ /*
+ * ONLY interrupt mask bit should be stored and restores.
+ * INFO data all assumed to be lost.
+ */
+ for (i = 0; i < TH_1520_MBOX_CHANS; i++) {
+ ctx->intr_mask[i] =
+ ioread32(priv->local_icu[i] + TH_1520_MBOX_MASK);
+ }
+ return 0;
+}
+
+static int __maybe_unused th1520_mbox_resume_noirq(struct device *dev)
+{
+ struct th1520_mbox_priv *priv = dev_get_drvdata(dev);
+ struct th1520_mbox_context *ctx = priv->ctx;
+ u32 i;
+
+ for (i = 0; i < TH_1520_MBOX_CHANS; i++) {
+ iowrite32(ctx->intr_mask[i],
+ priv->local_icu[i] + TH_1520_MBOX_MASK);
+ }
+
+ return 0;
+}
+#endif
+
+static int __maybe_unused th1520_mbox_runtime_suspend(struct device *dev)
+{
+ struct th1520_mbox_priv *priv = dev_get_drvdata(dev);
+
+ clk_bulk_disable_unprepare(ARRAY_SIZE(priv->clocks), priv->clocks);
+
+ return 0;
+}
+
+static int __maybe_unused th1520_mbox_runtime_resume(struct device *dev)
+{
+ struct th1520_mbox_priv *priv = dev_get_drvdata(dev);
+ int ret;
+
+ ret = clk_bulk_prepare_enable(ARRAY_SIZE(priv->clocks), priv->clocks);
+ if (ret)
+ dev_err(dev, "Failed to enable clocks in runtime resume\n");
+
+ return ret;
+}
+
+static const struct dev_pm_ops th1520_mbox_pm_ops = {
+#ifdef CONFIG_PM_SLEEP
+ SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(th1520_mbox_suspend_noirq,
+ th1520_mbox_resume_noirq)
+#endif
+ SET_RUNTIME_PM_OPS(th1520_mbox_runtime_suspend,
+ th1520_mbox_runtime_resume, NULL)
+};
+
+static struct platform_driver th1520_mbox_driver = {
+ .probe = th1520_mbox_probe,
+ .driver = {
+ .name = "th1520-mbox",
+ .of_match_table = th1520_mbox_dt_ids,
+ .pm = &th1520_mbox_pm_ops,
+ },
+};
+module_platform_driver(th1520_mbox_driver);
+
+MODULE_DESCRIPTION("Thead TH-1520 mailbox IPC driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mailbox/mailbox.c b/drivers/mailbox/mailbox.c
index 4229b9b5da98..2acc6ec229a4 100644
--- a/drivers/mailbox/mailbox.c
+++ b/drivers/mailbox/mailbox.c
@@ -6,17 +6,17 @@
* Author: Jassi Brar <jassisinghbrar@gmail.com>
*/
-#include <linux/interrupt.h>
-#include <linux/spinlock.h>
-#include <linux/mutex.h>
+#include <linux/cleanup.h>
#include <linux/delay.h>
-#include <linux/slab.h>
-#include <linux/err.h>
-#include <linux/module.h>
#include <linux/device.h>
-#include <linux/bitops.h>
+#include <linux/err.h>
#include <linux/mailbox_client.h>
#include <linux/mailbox_controller.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/property.h>
+#include <linux/spinlock.h>
#include "mailbox.h"
@@ -26,15 +26,12 @@ static DEFINE_MUTEX(con_mutex);
static int add_to_rbuf(struct mbox_chan *chan, void *mssg)
{
int idx;
- unsigned long flags;
- spin_lock_irqsave(&chan->lock, flags);
+ guard(spinlock_irqsave)(&chan->lock);
/* See if there is any space left */
- if (chan->msg_count == MBOX_TX_QUEUE_LEN) {
- spin_unlock_irqrestore(&chan->lock, flags);
+ if (chan->msg_count == MBOX_TX_QUEUE_LEN)
return -ENOBUFS;
- }
idx = chan->msg_free;
chan->msg_data[idx] = mssg;
@@ -45,60 +42,53 @@ static int add_to_rbuf(struct mbox_chan *chan, void *mssg)
else
chan->msg_free++;
- spin_unlock_irqrestore(&chan->lock, flags);
-
return idx;
}
static void msg_submit(struct mbox_chan *chan)
{
unsigned count, idx;
- unsigned long flags;
void *data;
int err = -EBUSY;
- spin_lock_irqsave(&chan->lock, flags);
-
- if (!chan->msg_count || chan->active_req)
- goto exit;
+ scoped_guard(spinlock_irqsave, &chan->lock) {
+ if (!chan->msg_count || chan->active_req)
+ break;
- count = chan->msg_count;
- idx = chan->msg_free;
- if (idx >= count)
- idx -= count;
- else
- idx += MBOX_TX_QUEUE_LEN - count;
+ count = chan->msg_count;
+ idx = chan->msg_free;
+ if (idx >= count)
+ idx -= count;
+ else
+ idx += MBOX_TX_QUEUE_LEN - count;
- data = chan->msg_data[idx];
+ data = chan->msg_data[idx];
- if (chan->cl->tx_prepare)
- chan->cl->tx_prepare(chan->cl, data);
- /* Try to submit a message to the MBOX controller */
- err = chan->mbox->ops->send_data(chan, data);
- if (!err) {
- chan->active_req = data;
- chan->msg_count--;
+ if (chan->cl->tx_prepare)
+ chan->cl->tx_prepare(chan->cl, data);
+ /* Try to submit a message to the MBOX controller */
+ err = chan->mbox->ops->send_data(chan, data);
+ if (!err) {
+ chan->active_req = data;
+ chan->msg_count--;
+ }
}
-exit:
- spin_unlock_irqrestore(&chan->lock, flags);
if (!err && (chan->txdone_method & TXDONE_BY_POLL)) {
/* kick start the timer immediately to avoid delays */
- spin_lock_irqsave(&chan->mbox->poll_hrt_lock, flags);
- hrtimer_start(&chan->mbox->poll_hrt, 0, HRTIMER_MODE_REL);
- spin_unlock_irqrestore(&chan->mbox->poll_hrt_lock, flags);
+ scoped_guard(spinlock_irqsave, &chan->mbox->poll_hrt_lock)
+ hrtimer_start(&chan->mbox->poll_hrt, 0, HRTIMER_MODE_REL);
}
}
static void tx_tick(struct mbox_chan *chan, int r)
{
- unsigned long flags;
void *mssg;
- spin_lock_irqsave(&chan->lock, flags);
- mssg = chan->active_req;
- chan->active_req = NULL;
- spin_unlock_irqrestore(&chan->lock, flags);
+ scoped_guard(spinlock_irqsave, &chan->lock) {
+ mssg = chan->active_req;
+ chan->active_req = NULL;
+ }
/* Submit next message */
msg_submit(chan);
@@ -120,7 +110,6 @@ static enum hrtimer_restart txdone_hrtimer(struct hrtimer *hrtimer)
container_of(hrtimer, struct mbox_controller, poll_hrt);
bool txdone, resched = false;
int i;
- unsigned long flags;
for (i = 0; i < mbox->num_chans; i++) {
struct mbox_chan *chan = &mbox->chans[i];
@@ -135,10 +124,10 @@ static enum hrtimer_restart txdone_hrtimer(struct hrtimer *hrtimer)
}
if (resched) {
- spin_lock_irqsave(&mbox->poll_hrt_lock, flags);
- if (!hrtimer_is_queued(hrtimer))
- hrtimer_forward_now(hrtimer, ms_to_ktime(mbox->txpoll_period));
- spin_unlock_irqrestore(&mbox->poll_hrt_lock, flags);
+ scoped_guard(spinlock_irqsave, &mbox->poll_hrt_lock) {
+ if (!hrtimer_is_queued(hrtimer))
+ hrtimer_forward_now(hrtimer, ms_to_ktime(mbox->txpoll_period));
+ }
return HRTIMER_RESTART;
}
@@ -317,6 +306,65 @@ int mbox_flush(struct mbox_chan *chan, unsigned long timeout)
}
EXPORT_SYMBOL_GPL(mbox_flush);
+static int __mbox_bind_client(struct mbox_chan *chan, struct mbox_client *cl)
+{
+ struct device *dev = cl->dev;
+ int ret;
+
+ if (chan->cl || !try_module_get(chan->mbox->dev->driver->owner)) {
+ dev_err(dev, "%s: mailbox not free\n", __func__);
+ return -EBUSY;
+ }
+
+ scoped_guard(spinlock_irqsave, &chan->lock) {
+ chan->msg_free = 0;
+ chan->msg_count = 0;
+ chan->active_req = NULL;
+ chan->cl = cl;
+ init_completion(&chan->tx_complete);
+
+ if (chan->txdone_method == TXDONE_BY_POLL && cl->knows_txdone)
+ chan->txdone_method = TXDONE_BY_ACK;
+ }
+
+ if (chan->mbox->ops->startup) {
+ ret = chan->mbox->ops->startup(chan);
+
+ if (ret) {
+ dev_err(dev, "Unable to startup the chan (%d)\n", ret);
+ mbox_free_channel(chan);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * mbox_bind_client - Request a mailbox channel.
+ * @chan: The mailbox channel to bind the client to.
+ * @cl: Identity of the client requesting the channel.
+ *
+ * The Client specifies its requirements and capabilities while asking for
+ * a mailbox channel. It can't be called from atomic context.
+ * The channel is exclusively allocated and can't be used by another
+ * client before the owner calls mbox_free_channel.
+ * After assignment, any packet received on this channel will be
+ * handed over to the client via the 'rx_callback'.
+ * The framework holds reference to the client, so the mbox_client
+ * structure shouldn't be modified until the mbox_free_channel returns.
+ *
+ * Return: 0 if the channel was assigned to the client successfully.
+ * <0 for request failure.
+ */
+int mbox_bind_client(struct mbox_chan *chan, struct mbox_client *cl)
+{
+ guard(mutex)(&con_mutex);
+
+ return __mbox_bind_client(chan, cl);
+}
+EXPORT_SYMBOL_GPL(mbox_bind_client);
+
/**
* mbox_request_channel - Request a mailbox channel.
* @cl: Identity of the client requesting the channel.
@@ -336,71 +384,65 @@ EXPORT_SYMBOL_GPL(mbox_flush);
*/
struct mbox_chan *mbox_request_channel(struct mbox_client *cl, int index)
{
- struct device *dev = cl->dev;
+ struct fwnode_reference_args fwspec;
+ struct fwnode_handle *fwnode;
struct mbox_controller *mbox;
struct of_phandle_args spec;
struct mbox_chan *chan;
- unsigned long flags;
+ struct device *dev;
+ unsigned int i;
int ret;
- if (!dev || !dev->of_node) {
- pr_debug("%s: No owner device node\n", __func__);
+ dev = cl->dev;
+ if (!dev) {
+ pr_debug("No owner device\n");
return ERR_PTR(-ENODEV);
}
- mutex_lock(&con_mutex);
-
- if (of_parse_phandle_with_args(dev->of_node, "mboxes",
- "#mbox-cells", index, &spec)) {
- dev_dbg(dev, "%s: can't parse \"mboxes\" property\n", __func__);
- mutex_unlock(&con_mutex);
+ fwnode = dev_fwnode(dev);
+ if (!fwnode) {
+ dev_dbg(dev, "No owner fwnode\n");
return ERR_PTR(-ENODEV);
}
- chan = ERR_PTR(-EPROBE_DEFER);
- list_for_each_entry(mbox, &mbox_cons, node)
- if (mbox->dev->of_node == spec.np) {
- chan = mbox->of_xlate(mbox, &spec);
- if (!IS_ERR(chan))
- break;
- }
-
- of_node_put(spec.np);
-
- if (IS_ERR(chan)) {
- mutex_unlock(&con_mutex);
- return chan;
- }
-
- if (chan->cl || !try_module_get(mbox->dev->driver->owner)) {
- dev_dbg(dev, "%s: mailbox not free\n", __func__);
- mutex_unlock(&con_mutex);
- return ERR_PTR(-EBUSY);
+ ret = fwnode_property_get_reference_args(fwnode, "mboxes", "#mbox-cells",
+ 0, index, &fwspec);
+ if (ret) {
+ dev_err(dev, "%s: can't parse \"%s\" property\n", __func__, "mboxes");
+ return ERR_PTR(ret);
}
- spin_lock_irqsave(&chan->lock, flags);
- chan->msg_free = 0;
- chan->msg_count = 0;
- chan->active_req = NULL;
- chan->cl = cl;
- init_completion(&chan->tx_complete);
-
- if (chan->txdone_method == TXDONE_BY_POLL && cl->knows_txdone)
- chan->txdone_method = TXDONE_BY_ACK;
+ spec.np = to_of_node(fwspec.fwnode);
+ spec.args_count = fwspec.nargs;
+ for (i = 0; i < spec.args_count; i++)
+ spec.args[i] = fwspec.args[i];
+
+ scoped_guard(mutex, &con_mutex) {
+ chan = ERR_PTR(-EPROBE_DEFER);
+ list_for_each_entry(mbox, &mbox_cons, node) {
+ if (device_match_fwnode(mbox->dev, fwspec.fwnode)) {
+ if (mbox->fw_xlate) {
+ chan = mbox->fw_xlate(mbox, &fwspec);
+ if (!IS_ERR(chan))
+ break;
+ } else if (mbox->of_xlate) {
+ chan = mbox->of_xlate(mbox, &spec);
+ if (!IS_ERR(chan))
+ break;
+ }
+ }
+ }
- spin_unlock_irqrestore(&chan->lock, flags);
+ fwnode_handle_put(fwspec.fwnode);
- if (chan->mbox->ops->startup) {
- ret = chan->mbox->ops->startup(chan);
+ if (IS_ERR(chan))
+ return chan;
- if (ret) {
- dev_err(dev, "Unable to startup the chan (%d)\n", ret);
- mbox_free_channel(chan);
+ ret = __mbox_bind_client(chan, cl);
+ if (ret)
chan = ERR_PTR(ret);
- }
}
- mutex_unlock(&con_mutex);
return chan;
}
EXPORT_SYMBOL_GPL(mbox_request_channel);
@@ -408,31 +450,14 @@ EXPORT_SYMBOL_GPL(mbox_request_channel);
struct mbox_chan *mbox_request_channel_byname(struct mbox_client *cl,
const char *name)
{
- struct device_node *np = cl->dev->of_node;
- struct property *prop;
- const char *mbox_name;
- int index = 0;
-
- if (!np) {
- dev_err(cl->dev, "%s() currently only supports DT\n", __func__);
- return ERR_PTR(-EINVAL);
- }
-
- if (!of_get_property(np, "mbox-names", NULL)) {
- dev_err(cl->dev,
- "%s() requires an \"mbox-names\" property\n", __func__);
- return ERR_PTR(-EINVAL);
- }
+ int index = device_property_match_string(cl->dev, "mbox-names", name);
- of_property_for_each_string(np, "mbox-names", prop, mbox_name) {
- if (!strncmp(name, mbox_name, strlen(name)))
- return mbox_request_channel(cl, index);
- index++;
+ if (index < 0) {
+ dev_err(cl->dev, "%s() could not locate channel named \"%s\"\n",
+ __func__, name);
+ return ERR_PTR(index);
}
-
- dev_err(cl->dev, "%s() could not locate channel named \"%s\"\n",
- __func__, name);
- return ERR_PTR(-EINVAL);
+ return mbox_request_channel(cl, index);
}
EXPORT_SYMBOL_GPL(mbox_request_channel_byname);
@@ -443,8 +468,6 @@ EXPORT_SYMBOL_GPL(mbox_request_channel_byname);
*/
void mbox_free_channel(struct mbox_chan *chan)
{
- unsigned long flags;
-
if (!chan || !chan->cl)
return;
@@ -452,20 +475,19 @@ void mbox_free_channel(struct mbox_chan *chan)
chan->mbox->ops->shutdown(chan);
/* The queued TX requests are simply aborted, no callbacks are made */
- spin_lock_irqsave(&chan->lock, flags);
- chan->cl = NULL;
- chan->active_req = NULL;
- if (chan->txdone_method == TXDONE_BY_ACK)
- chan->txdone_method = TXDONE_BY_POLL;
+ scoped_guard(spinlock_irqsave, &chan->lock) {
+ chan->cl = NULL;
+ chan->active_req = NULL;
+ if (chan->txdone_method == TXDONE_BY_ACK)
+ chan->txdone_method = TXDONE_BY_POLL;
+ }
module_put(chan->mbox->dev->driver->owner);
- spin_unlock_irqrestore(&chan->lock, flags);
}
EXPORT_SYMBOL_GPL(mbox_free_channel);
-static struct mbox_chan *
-of_mbox_index_xlate(struct mbox_controller *mbox,
- const struct of_phandle_args *sp)
+static struct mbox_chan *fw_mbox_index_xlate(struct mbox_controller *mbox,
+ const struct fwnode_reference_args *sp)
{
int ind = sp->args[0];
@@ -503,9 +525,7 @@ int mbox_controller_register(struct mbox_controller *mbox)
return -EINVAL;
}
- hrtimer_init(&mbox->poll_hrt, CLOCK_MONOTONIC,
- HRTIMER_MODE_REL);
- mbox->poll_hrt.function = txdone_hrtimer;
+ hrtimer_setup(&mbox->poll_hrt, txdone_hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
spin_lock_init(&mbox->poll_hrt_lock);
}
@@ -518,12 +538,11 @@ int mbox_controller_register(struct mbox_controller *mbox)
spin_lock_init(&chan->lock);
}
- if (!mbox->of_xlate)
- mbox->of_xlate = of_mbox_index_xlate;
+ if (!mbox->fw_xlate && !mbox->of_xlate)
+ mbox->fw_xlate = fw_mbox_index_xlate;
- mutex_lock(&con_mutex);
- list_add_tail(&mbox->node, &mbox_cons);
- mutex_unlock(&con_mutex);
+ scoped_guard(mutex, &con_mutex)
+ list_add_tail(&mbox->node, &mbox_cons);
return 0;
}
@@ -540,17 +559,15 @@ void mbox_controller_unregister(struct mbox_controller *mbox)
if (!mbox)
return;
- mutex_lock(&con_mutex);
+ scoped_guard(mutex, &con_mutex) {
+ list_del(&mbox->node);
- list_del(&mbox->node);
+ for (i = 0; i < mbox->num_chans; i++)
+ mbox_free_channel(&mbox->chans[i]);
- for (i = 0; i < mbox->num_chans; i++)
- mbox_free_channel(&mbox->chans[i]);
-
- if (mbox->txdone_poll)
- hrtimer_cancel(&mbox->poll_hrt);
-
- mutex_unlock(&con_mutex);
+ if (mbox->txdone_poll)
+ hrtimer_cancel(&mbox->poll_hrt);
+ }
}
EXPORT_SYMBOL_GPL(mbox_controller_unregister);
@@ -561,16 +578,6 @@ static void __devm_mbox_controller_unregister(struct device *dev, void *res)
mbox_controller_unregister(*mbox);
}
-static int devm_mbox_controller_match(struct device *dev, void *res, void *data)
-{
- struct mbox_controller **mbox = res;
-
- if (WARN_ON(!mbox || !*mbox))
- return 0;
-
- return *mbox == data;
-}
-
/**
* devm_mbox_controller_register() - managed mbox_controller_register()
* @dev: device owning the mailbox controller being registered
@@ -606,20 +613,3 @@ int devm_mbox_controller_register(struct device *dev,
return 0;
}
EXPORT_SYMBOL_GPL(devm_mbox_controller_register);
-
-/**
- * devm_mbox_controller_unregister() - managed mbox_controller_unregister()
- * @dev: device owning the mailbox controller being unregistered
- * @mbox: mailbox controller being unregistered
- *
- * This function unregisters the mailbox controller and removes the device-
- * managed resource that was set up to automatically unregister the mailbox
- * controller on driver probe failure or driver removal. It's typically not
- * necessary to call this function.
- */
-void devm_mbox_controller_unregister(struct device *dev, struct mbox_controller *mbox)
-{
- WARN_ON(devres_release(dev, __devm_mbox_controller_unregister,
- devm_mbox_controller_match, mbox));
-}
-EXPORT_SYMBOL_GPL(devm_mbox_controller_unregister);
diff --git a/drivers/mailbox/mailbox.h b/drivers/mailbox/mailbox.h
index 046d6d258b32..e1ec4efab693 100644
--- a/drivers/mailbox/mailbox.h
+++ b/drivers/mailbox/mailbox.h
@@ -3,6 +3,8 @@
#ifndef __MAILBOX_H
#define __MAILBOX_H
+#include <linux/bits.h>
+
#define TXDONE_BY_IRQ BIT(0) /* controller has remote RTR irq */
#define TXDONE_BY_POLL BIT(1) /* controller can read status of last TX */
#define TXDONE_BY_ACK BIT(2) /* S/W ACK received by Client ticks the TX */
diff --git a/drivers/mailbox/mtk-adsp-mailbox.c b/drivers/mailbox/mtk-adsp-mailbox.c
index 14bc0057de81..91487aa4d7da 100644
--- a/drivers/mailbox/mtk-adsp-mailbox.c
+++ b/drivers/mailbox/mtk-adsp-mailbox.c
@@ -10,7 +10,8 @@
#include <linux/kernel.h>
#include <linux/mailbox_controller.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
#include <linux/slab.h>
struct mtk_adsp_mbox_priv {
diff --git a/drivers/mailbox/mtk-cmdq-mailbox.c b/drivers/mailbox/mtk-cmdq-mailbox.c
index b18d47ea13a0..5791f80f995a 100644
--- a/drivers/mailbox/mtk-cmdq-mailbox.c
+++ b/drivers/mailbox/mtk-cmdq-mailbox.c
@@ -13,13 +13,15 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
#include <linux/mailbox_controller.h>
#include <linux/mailbox/mtk-cmdq-mailbox.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
+
+#define CMDQ_MBOX_AUTOSUSPEND_DELAY_MS 100
#define CMDQ_OP_CODE_MASK (0xff << CMDQ_OP_CODE_SHIFT)
#define CMDQ_NUM_CMD(t) (t->cmd_buf_size / CMDQ_INST_SIZE)
-#define CMDQ_GCE_NUM_MAX (2)
#define CMDQ_CURR_IRQ_STATUS 0x10
#define CMDQ_SYNC_TOKEN_UPDATE 0x68
@@ -78,7 +80,7 @@ struct cmdq {
u32 irq_mask;
const struct gce_plat *pdata;
struct cmdq_thread *thread;
- struct clk_bulk_data clocks[CMDQ_GCE_NUM_MAX];
+ struct clk_bulk_data *clocks;
bool suspended;
};
@@ -90,16 +92,16 @@ struct gce_plat {
u32 gce_num;
};
-static void cmdq_sw_ddr_enable(struct cmdq *cmdq, bool enable)
+static inline u32 cmdq_convert_gce_addr(dma_addr_t addr, const struct gce_plat *pdata)
{
- WARN_ON(clk_bulk_enable(cmdq->pdata->gce_num, cmdq->clocks));
-
- if (enable)
- writel(GCE_DDR_EN | GCE_CTRL_BY_SW, cmdq->base + GCE_GCTL_VALUE);
- else
- writel(GCE_CTRL_BY_SW, cmdq->base + GCE_GCTL_VALUE);
+ /* Convert DMA addr (PA or IOVA) to GCE readable addr */
+ return addr >> pdata->shift;
+}
- clk_bulk_disable(cmdq->pdata->gce_num, cmdq->clocks);
+static inline dma_addr_t cmdq_revert_gce_addr(u32 addr, const struct gce_plat *pdata)
+{
+ /* Revert GCE readable addr to DMA addr (PA or IOVA) */
+ return (dma_addr_t)addr << pdata->shift;
}
u8 cmdq_get_shift_pa(struct mbox_chan *chan)
@@ -110,6 +112,19 @@ u8 cmdq_get_shift_pa(struct mbox_chan *chan)
}
EXPORT_SYMBOL(cmdq_get_shift_pa);
+static void cmdq_gctl_value_toggle(struct cmdq *cmdq, bool ddr_enable)
+{
+ u32 val = cmdq->pdata->control_by_sw ? GCE_CTRL_BY_SW : 0;
+
+ if (!cmdq->pdata->control_by_sw && !cmdq->pdata->sw_ddr_en)
+ return;
+
+ if (cmdq->pdata->sw_ddr_en && ddr_enable)
+ val |= GCE_DDR_EN;
+
+ writel(val, cmdq->base + GCE_GCTL_VALUE);
+}
+
static int cmdq_thread_suspend(struct cmdq *cmdq, struct cmdq_thread *thread)
{
u32 status;
@@ -138,16 +153,10 @@ static void cmdq_thread_resume(struct cmdq_thread *thread)
static void cmdq_init(struct cmdq *cmdq)
{
int i;
- u32 gctl_regval = 0;
WARN_ON(clk_bulk_enable(cmdq->pdata->gce_num, cmdq->clocks));
- if (cmdq->pdata->control_by_sw)
- gctl_regval = GCE_CTRL_BY_SW;
- if (cmdq->pdata->sw_ddr_en)
- gctl_regval |= GCE_DDR_EN;
- if (gctl_regval)
- writel(gctl_regval, cmdq->base + GCE_GCTL_VALUE);
+ cmdq_gctl_value_toggle(cmdq, true);
writel(CMDQ_THR_ACTIVE_SLOT_CYCLES, cmdq->base + CMDQ_THR_SLOT_CYCLES);
for (i = 0; i <= CMDQ_MAX_EVENT; i++)
@@ -191,13 +200,12 @@ static void cmdq_task_insert_into_thread(struct cmdq_task *task)
struct cmdq_task *prev_task = list_last_entry(
&thread->task_busy_list, typeof(*task), list_entry);
u64 *prev_task_base = prev_task->pkt->va_base;
+ u32 gce_addr = cmdq_convert_gce_addr(task->pa_base, task->cmdq->pdata);
/* let previous task jump to this task */
dma_sync_single_for_cpu(dev, prev_task->pa_base,
prev_task->pkt->cmd_buf_size, DMA_TO_DEVICE);
- prev_task_base[CMDQ_NUM_CMD(prev_task->pkt) - 1] =
- (u64)CMDQ_JUMP_BY_PA << 32 |
- (task->pa_base >> task->cmdq->pdata->shift);
+ prev_task_base[CMDQ_NUM_CMD(prev_task->pkt) - 1] = (u64)CMDQ_JUMP_BY_PA << 32 | gce_addr;
dma_sync_single_for_device(dev, prev_task->pa_base,
prev_task->pkt->cmd_buf_size, DMA_TO_DEVICE);
@@ -240,7 +248,8 @@ static void cmdq_thread_irq_handler(struct cmdq *cmdq,
struct cmdq_thread *thread)
{
struct cmdq_task *task, *tmp, *curr_task = NULL;
- u32 curr_pa, irq_flag, task_end_pa;
+ u32 irq_flag, gce_addr;
+ dma_addr_t curr_pa, task_end_pa;
bool err;
irq_flag = readl(thread->base + CMDQ_THR_IRQ_STATUS);
@@ -262,7 +271,8 @@ static void cmdq_thread_irq_handler(struct cmdq *cmdq,
else
return;
- curr_pa = readl(thread->base + CMDQ_THR_CURR_ADDR) << cmdq->pdata->shift;
+ gce_addr = readl(thread->base + CMDQ_THR_CURR_ADDR);
+ curr_pa = cmdq_revert_gce_addr(gce_addr, cmdq->pdata);
list_for_each_entry_safe(task, tmp, &thread->task_busy_list,
list_entry) {
@@ -283,10 +293,8 @@ static void cmdq_thread_irq_handler(struct cmdq *cmdq,
break;
}
- if (list_empty(&thread->task_busy_list)) {
+ if (list_empty(&thread->task_busy_list))
cmdq_thread_disable(cmdq, thread);
- clk_bulk_disable(cmdq->pdata->gce_num, cmdq->clocks);
- }
}
static irqreturn_t cmdq_irq_handler(int irq, void *dev)
@@ -307,9 +315,33 @@ static irqreturn_t cmdq_irq_handler(int irq, void *dev)
spin_unlock_irqrestore(&thread->chan->lock, flags);
}
+ pm_runtime_mark_last_busy(cmdq->mbox.dev);
+
return IRQ_HANDLED;
}
+static int cmdq_runtime_resume(struct device *dev)
+{
+ struct cmdq *cmdq = dev_get_drvdata(dev);
+ int ret;
+
+ ret = clk_bulk_enable(cmdq->pdata->gce_num, cmdq->clocks);
+ if (ret)
+ return ret;
+
+ cmdq_gctl_value_toggle(cmdq, true);
+ return 0;
+}
+
+static int cmdq_runtime_suspend(struct device *dev)
+{
+ struct cmdq *cmdq = dev_get_drvdata(dev);
+
+ cmdq_gctl_value_toggle(cmdq, false);
+ clk_bulk_disable(cmdq->pdata->gce_num, cmdq->clocks);
+ return 0;
+}
+
static int cmdq_suspend(struct device *dev)
{
struct cmdq *cmdq = dev_get_drvdata(dev);
@@ -330,36 +362,27 @@ static int cmdq_suspend(struct device *dev)
if (task_running)
dev_warn(dev, "exist running task(s) in suspend\n");
- if (cmdq->pdata->sw_ddr_en)
- cmdq_sw_ddr_enable(cmdq, false);
-
- clk_bulk_unprepare(cmdq->pdata->gce_num, cmdq->clocks);
-
- return 0;
+ return pm_runtime_force_suspend(dev);
}
static int cmdq_resume(struct device *dev)
{
struct cmdq *cmdq = dev_get_drvdata(dev);
- WARN_ON(clk_bulk_prepare(cmdq->pdata->gce_num, cmdq->clocks));
+ WARN_ON(pm_runtime_force_resume(dev));
cmdq->suspended = false;
- if (cmdq->pdata->sw_ddr_en)
- cmdq_sw_ddr_enable(cmdq, true);
-
return 0;
}
-static int cmdq_remove(struct platform_device *pdev)
+static void cmdq_remove(struct platform_device *pdev)
{
struct cmdq *cmdq = platform_get_drvdata(pdev);
- if (cmdq->pdata->sw_ddr_en)
- cmdq_sw_ddr_enable(cmdq, false);
+ if (!IS_ENABLED(CONFIG_PM))
+ cmdq_runtime_suspend(&pdev->dev);
clk_bulk_unprepare(cmdq->pdata->gce_num, cmdq->clocks);
- return 0;
}
static int cmdq_mbox_send_data(struct mbox_chan *chan, void *data)
@@ -368,7 +391,8 @@ static int cmdq_mbox_send_data(struct mbox_chan *chan, void *data)
struct cmdq_thread *thread = (struct cmdq_thread *)chan->con_priv;
struct cmdq *cmdq = dev_get_drvdata(chan->mbox->dev);
struct cmdq_task *task;
- unsigned long curr_pa, end_pa;
+ u32 gce_addr;
+ dma_addr_t curr_pa, end_pa;
/* Client should not flush new tasks if suspended. */
WARN_ON(cmdq->suspended);
@@ -384,8 +408,6 @@ static int cmdq_mbox_send_data(struct mbox_chan *chan, void *data)
task->pkt = pkt;
if (list_empty(&thread->task_busy_list)) {
- WARN_ON(clk_bulk_enable(cmdq->pdata->gce_num, cmdq->clocks));
-
/*
* The thread reset will clear thread related register to 0,
* including pc, end, priority, irq, suspend and enable. Thus
@@ -394,20 +416,20 @@ static int cmdq_mbox_send_data(struct mbox_chan *chan, void *data)
*/
WARN_ON(cmdq_thread_reset(cmdq, thread) < 0);
- writel(task->pa_base >> cmdq->pdata->shift,
- thread->base + CMDQ_THR_CURR_ADDR);
- writel((task->pa_base + pkt->cmd_buf_size) >> cmdq->pdata->shift,
- thread->base + CMDQ_THR_END_ADDR);
+ gce_addr = cmdq_convert_gce_addr(task->pa_base, cmdq->pdata);
+ writel(gce_addr, thread->base + CMDQ_THR_CURR_ADDR);
+ gce_addr = cmdq_convert_gce_addr(task->pa_base + pkt->cmd_buf_size, cmdq->pdata);
+ writel(gce_addr, thread->base + CMDQ_THR_END_ADDR);
writel(thread->priority, thread->base + CMDQ_THR_PRIORITY);
writel(CMDQ_THR_IRQ_EN, thread->base + CMDQ_THR_IRQ_ENABLE);
writel(CMDQ_THR_ENABLED, thread->base + CMDQ_THR_ENABLE_TASK);
} else {
WARN_ON(cmdq_thread_suspend(cmdq, thread) < 0);
- curr_pa = readl(thread->base + CMDQ_THR_CURR_ADDR) <<
- cmdq->pdata->shift;
- end_pa = readl(thread->base + CMDQ_THR_END_ADDR) <<
- cmdq->pdata->shift;
+ gce_addr = readl(thread->base + CMDQ_THR_CURR_ADDR);
+ curr_pa = cmdq_revert_gce_addr(gce_addr, cmdq->pdata);
+ gce_addr = readl(thread->base + CMDQ_THR_END_ADDR);
+ end_pa = cmdq_revert_gce_addr(gce_addr, cmdq->pdata);
/* check boundary */
if (curr_pa == end_pa - CMDQ_INST_SIZE ||
curr_pa == end_pa) {
@@ -439,6 +461,8 @@ static void cmdq_mbox_shutdown(struct mbox_chan *chan)
struct cmdq_task *task, *tmp;
unsigned long flags;
+ WARN_ON(pm_runtime_get_sync(cmdq->mbox.dev) < 0);
+
spin_lock_irqsave(&thread->chan->lock, flags);
if (list_empty(&thread->task_busy_list))
goto done;
@@ -457,7 +481,6 @@ static void cmdq_mbox_shutdown(struct mbox_chan *chan)
}
cmdq_thread_disable(cmdq, thread);
- clk_bulk_disable(cmdq->pdata->gce_num, cmdq->clocks);
done:
/*
@@ -467,6 +490,9 @@ done:
* to do any operation here, only unlock and leave.
*/
spin_unlock_irqrestore(&thread->chan->lock, flags);
+
+ pm_runtime_mark_last_busy(cmdq->mbox.dev);
+ pm_runtime_put_autosuspend(cmdq->mbox.dev);
}
static int cmdq_mbox_flush(struct mbox_chan *chan, unsigned long timeout)
@@ -477,6 +503,11 @@ static int cmdq_mbox_flush(struct mbox_chan *chan, unsigned long timeout)
struct cmdq_task *task, *tmp;
unsigned long flags;
u32 enable;
+ int ret;
+
+ ret = pm_runtime_get_sync(cmdq->mbox.dev);
+ if (ret < 0)
+ return ret;
spin_lock_irqsave(&thread->chan->lock, flags);
if (list_empty(&thread->task_busy_list))
@@ -497,10 +528,12 @@ static int cmdq_mbox_flush(struct mbox_chan *chan, unsigned long timeout)
cmdq_thread_resume(thread);
cmdq_thread_disable(cmdq, thread);
- clk_bulk_disable(cmdq->pdata->gce_num, cmdq->clocks);
out:
spin_unlock_irqrestore(&thread->chan->lock, flags);
+ pm_runtime_mark_last_busy(cmdq->mbox.dev);
+ pm_runtime_put_autosuspend(cmdq->mbox.dev);
+
return 0;
wait:
@@ -513,6 +546,8 @@ wait:
return -EFAULT;
}
+ pm_runtime_mark_last_busy(cmdq->mbox.dev);
+ pm_runtime_put_autosuspend(cmdq->mbox.dev);
return 0;
}
@@ -539,16 +574,64 @@ static struct mbox_chan *cmdq_xlate(struct mbox_controller *mbox,
return &mbox->chans[ind];
}
+static int cmdq_get_clocks(struct device *dev, struct cmdq *cmdq)
+{
+ static const char * const gce_name = "gce";
+ struct device_node *node, *parent = dev->of_node->parent;
+ struct clk_bulk_data *clks;
+
+ cmdq->clocks = devm_kcalloc(dev, cmdq->pdata->gce_num,
+ sizeof(*cmdq->clocks), GFP_KERNEL);
+ if (!cmdq->clocks)
+ return -ENOMEM;
+
+ if (cmdq->pdata->gce_num == 1) {
+ clks = &cmdq->clocks[0];
+
+ clks->id = gce_name;
+ clks->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(clks->clk))
+ return dev_err_probe(dev, PTR_ERR(clks->clk),
+ "failed to get gce clock\n");
+
+ return 0;
+ }
+
+ /*
+ * If there is more than one GCE, get the clocks for the others too,
+ * as the clock of the main GCE must be enabled for additional IPs
+ * to be reachable.
+ */
+ for_each_child_of_node(parent, node) {
+ int alias_id = of_alias_get_id(node, gce_name);
+
+ if (alias_id < 0 || alias_id >= cmdq->pdata->gce_num)
+ continue;
+
+ clks = &cmdq->clocks[alias_id];
+
+ clks->id = devm_kasprintf(dev, GFP_KERNEL, "gce%d", alias_id);
+ if (!clks->id) {
+ of_node_put(node);
+ return -ENOMEM;
+ }
+
+ clks->clk = of_clk_get(node, 0);
+ if (IS_ERR(clks->clk)) {
+ of_node_put(node);
+ return dev_err_probe(dev, PTR_ERR(clks->clk),
+ "failed to get gce%d clock\n", alias_id);
+ }
+ }
+
+ return 0;
+}
+
static int cmdq_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct cmdq *cmdq;
int err, i;
- struct device_node *phandle = dev->of_node;
- struct device_node *node;
- int alias_id = 0;
- static const char * const clk_name = "gce";
- static const char * const clk_names[] = { "gce0", "gce1" };
cmdq = devm_kzalloc(dev, sizeof(*cmdq), GFP_KERNEL);
if (!cmdq)
@@ -573,29 +656,12 @@ static int cmdq_probe(struct platform_device *pdev)
dev_dbg(dev, "cmdq device: addr:0x%p, va:0x%p, irq:%d\n",
dev, cmdq->base, cmdq->irq);
- if (cmdq->pdata->gce_num > 1) {
- for_each_child_of_node(phandle->parent, node) {
- alias_id = of_alias_get_id(node, clk_name);
- if (alias_id >= 0 && alias_id < cmdq->pdata->gce_num) {
- cmdq->clocks[alias_id].id = clk_names[alias_id];
- cmdq->clocks[alias_id].clk = of_clk_get(node, 0);
- if (IS_ERR(cmdq->clocks[alias_id].clk)) {
- of_node_put(node);
- return dev_err_probe(dev,
- PTR_ERR(cmdq->clocks[alias_id].clk),
- "failed to get gce clk: %d\n",
- alias_id);
- }
- }
- }
- } else {
- cmdq->clocks[alias_id].id = clk_name;
- cmdq->clocks[alias_id].clk = devm_clk_get(&pdev->dev, clk_name);
- if (IS_ERR(cmdq->clocks[alias_id].clk)) {
- return dev_err_probe(dev, PTR_ERR(cmdq->clocks[alias_id].clk),
- "failed to get gce clk\n");
- }
- }
+ err = cmdq_get_clocks(dev, cmdq);
+ if (err)
+ return err;
+
+ dma_set_coherent_mask(dev,
+ DMA_BIT_MASK(sizeof(u32) * BITS_PER_BYTE + cmdq->pdata->shift));
cmdq->mbox.dev = dev;
cmdq->mbox.chans = devm_kcalloc(dev, cmdq->pdata->thread_nr,
@@ -623,12 +689,6 @@ static int cmdq_probe(struct platform_device *pdev)
cmdq->mbox.chans[i].con_priv = (void *)&cmdq->thread[i];
}
- err = devm_mbox_controller_register(dev, &cmdq->mbox);
- if (err < 0) {
- dev_err(dev, "failed to register mailbox: %d\n", err);
- return err;
- }
-
platform_set_drvdata(pdev, cmdq);
WARN_ON(clk_bulk_prepare(cmdq->pdata->gce_num, cmdq->clocks));
@@ -642,66 +702,97 @@ static int cmdq_probe(struct platform_device *pdev)
return err;
}
+ /* If Runtime PM is not available enable the clocks now. */
+ if (!IS_ENABLED(CONFIG_PM)) {
+ err = cmdq_runtime_resume(dev);
+ if (err)
+ return err;
+ }
+
+ err = devm_pm_runtime_enable(dev);
+ if (err)
+ return err;
+
+ pm_runtime_set_autosuspend_delay(dev, CMDQ_MBOX_AUTOSUSPEND_DELAY_MS);
+ pm_runtime_use_autosuspend(dev);
+
+ err = devm_mbox_controller_register(dev, &cmdq->mbox);
+ if (err < 0) {
+ dev_err(dev, "failed to register mailbox: %d\n", err);
+ return err;
+ }
+
return 0;
}
static const struct dev_pm_ops cmdq_pm_ops = {
.suspend = cmdq_suspend,
.resume = cmdq_resume,
+ SET_RUNTIME_PM_OPS(cmdq_runtime_suspend,
+ cmdq_runtime_resume, NULL)
};
-static const struct gce_plat gce_plat_v2 = {
- .thread_nr = 16,
- .shift = 0,
+static const struct gce_plat gce_plat_mt6779 = {
+ .thread_nr = 24,
+ .shift = 3,
.control_by_sw = false,
.gce_num = 1
};
-static const struct gce_plat gce_plat_v3 = {
- .thread_nr = 24,
+static const struct gce_plat gce_plat_mt8173 = {
+ .thread_nr = 16,
.shift = 0,
.control_by_sw = false,
.gce_num = 1
};
-static const struct gce_plat gce_plat_v4 = {
+static const struct gce_plat gce_plat_mt8183 = {
.thread_nr = 24,
- .shift = 3,
+ .shift = 0,
.control_by_sw = false,
.gce_num = 1
};
-static const struct gce_plat gce_plat_v5 = {
+static const struct gce_plat gce_plat_mt8186 = {
.thread_nr = 24,
.shift = 3,
.control_by_sw = true,
+ .sw_ddr_en = true,
.gce_num = 1
};
-static const struct gce_plat gce_plat_v6 = {
- .thread_nr = 24,
+static const struct gce_plat gce_plat_mt8188 = {
+ .thread_nr = 32,
.shift = 3,
.control_by_sw = true,
.gce_num = 2
};
-static const struct gce_plat gce_plat_v7 = {
+static const struct gce_plat gce_plat_mt8192 = {
.thread_nr = 24,
.shift = 3,
.control_by_sw = true,
- .sw_ddr_en = true,
.gce_num = 1
};
+static const struct gce_plat gce_plat_mt8195 = {
+ .thread_nr = 24,
+ .shift = 3,
+ .control_by_sw = true,
+ .gce_num = 2
+};
+
static const struct of_device_id cmdq_of_ids[] = {
- {.compatible = "mediatek,mt8173-gce", .data = (void *)&gce_plat_v2},
- {.compatible = "mediatek,mt8183-gce", .data = (void *)&gce_plat_v3},
- {.compatible = "mediatek,mt8186-gce", .data = (void *)&gce_plat_v7},
- {.compatible = "mediatek,mt6779-gce", .data = (void *)&gce_plat_v4},
- {.compatible = "mediatek,mt8192-gce", .data = (void *)&gce_plat_v5},
- {.compatible = "mediatek,mt8195-gce", .data = (void *)&gce_plat_v6},
+ {.compatible = "mediatek,mt6779-gce", .data = (void *)&gce_plat_mt6779},
+ {.compatible = "mediatek,mt8173-gce", .data = (void *)&gce_plat_mt8173},
+ {.compatible = "mediatek,mt8183-gce", .data = (void *)&gce_plat_mt8183},
+ {.compatible = "mediatek,mt8186-gce", .data = (void *)&gce_plat_mt8186},
+ {.compatible = "mediatek,mt8188-gce", .data = (void *)&gce_plat_mt8188},
+ {.compatible = "mediatek,mt8192-gce", .data = (void *)&gce_plat_mt8192},
+ {.compatible = "mediatek,mt8195-gce", .data = (void *)&gce_plat_mt8195},
{}
};
+MODULE_DEVICE_TABLE(of, cmdq_of_ids);
static struct platform_driver cmdq_drv = {
.probe = cmdq_probe,
@@ -726,4 +817,5 @@ static void __exit cmdq_drv_exit(void)
subsys_initcall(cmdq_drv_init);
module_exit(cmdq_drv_exit);
+MODULE_DESCRIPTION("Mediatek Command Queue(CMDQ) Mailbox driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/mailbox/mtk-gpueb-mailbox.c b/drivers/mailbox/mtk-gpueb-mailbox.c
new file mode 100644
index 000000000000..f6d2beccd91b
--- /dev/null
+++ b/drivers/mailbox/mtk-gpueb-mailbox.c
@@ -0,0 +1,319 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * MediaTek GPUEB mailbox driver for SoCs such as the MT8196
+ *
+ * Copyright (C) 2025, Collabora Ltd.
+ *
+ * Developers harmed in the making of this driver:
+ * - Nicolas Frattaroli <nicolas.frattaroli@collabora.com>
+ */
+
+#include <linux/atomic.h>
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/mailbox_controller.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+
+#define GPUEB_MBOX_CTL_TX_STS 0x00
+#define GPUEB_MBOX_CTL_IRQ_SET 0x04
+#define GPUEB_MBOX_CTL_IRQ_CLR 0x74
+#define GPUEB_MBOX_CTL_RX_STS 0x78
+
+#define GPUEB_MBOX_FULL BIT(0) /* i.e. we've received data */
+#define GPUEB_MBOX_BLOCKED BIT(1) /* i.e. the channel is shutdown */
+
+#define GPUEB_MBOX_MAX_RX_SIZE 32 /* in bytes */
+
+struct mtk_gpueb_mbox {
+ struct device *dev;
+ struct clk *clk;
+ void __iomem *mbox_mmio;
+ void __iomem *mbox_ctl;
+ struct mbox_controller mbox;
+ struct mtk_gpueb_mbox_chan *ch;
+ int irq;
+ const struct mtk_gpueb_mbox_variant *v;
+};
+
+/**
+ * struct mtk_gpueb_mbox_chan - per-channel runtime data
+ * @ebm: pointer to the parent &struct mtk_gpueb_mbox mailbox
+ * @full_name: descriptive name of channel for IRQ subsystem
+ * @num: channel number, starting at 0
+ * @rx_status: signifies whether channel reception is turned off, or full
+ * @c: pointer to the constant &struct mtk_gpueb_mbox_chan_desc channel data
+ */
+struct mtk_gpueb_mbox_chan {
+ struct mtk_gpueb_mbox *ebm;
+ char *full_name;
+ u8 num;
+ atomic_t rx_status;
+ const struct mtk_gpueb_mbox_chan_desc *c;
+};
+
+/**
+ * struct mtk_gpueb_mbox_chan_desc - per-channel constant data
+ * @name: name of this channel
+ * @num: index of this channel, starting at 0
+ * @tx_offset: byte offset measured from mmio base for outgoing data
+ * @tx_len: size, in bytes, of the outgoing data on this channel
+ * @rx_offset: bytes offset measured from mmio base for incoming data
+ * @rx_len: size, in bytes, of the incoming data on this channel
+ */
+struct mtk_gpueb_mbox_chan_desc {
+ const char *name;
+ const u8 num;
+ const u16 tx_offset;
+ const u8 tx_len;
+ const u16 rx_offset;
+ const u8 rx_len;
+};
+
+struct mtk_gpueb_mbox_variant {
+ const u8 num_channels;
+ const struct mtk_gpueb_mbox_chan_desc channels[] __counted_by(num_channels);
+};
+
+/**
+ * mtk_gpueb_mbox_read_rx - read RX buffer from MMIO into channel's RX buffer
+ * @buf: buffer to read into
+ * @chan: pointer to the channel to read
+ */
+static void mtk_gpueb_mbox_read_rx(void *buf, struct mtk_gpueb_mbox_chan *chan)
+{
+ memcpy_fromio(buf, chan->ebm->mbox_mmio + chan->c->rx_offset, chan->c->rx_len);
+}
+
+static irqreturn_t mtk_gpueb_mbox_isr(int irq, void *data)
+{
+ struct mtk_gpueb_mbox_chan *ch = data;
+ u32 rx_sts;
+
+ rx_sts = readl(ch->ebm->mbox_ctl + GPUEB_MBOX_CTL_RX_STS);
+
+ if (rx_sts & BIT(ch->num)) {
+ if (!atomic_cmpxchg(&ch->rx_status, 0, GPUEB_MBOX_FULL | GPUEB_MBOX_BLOCKED))
+ return IRQ_WAKE_THREAD;
+ }
+
+ return IRQ_NONE;
+}
+
+static irqreturn_t mtk_gpueb_mbox_thread(int irq, void *data)
+{
+ struct mtk_gpueb_mbox_chan *ch = data;
+ int status;
+
+ status = atomic_cmpxchg(&ch->rx_status, GPUEB_MBOX_FULL | GPUEB_MBOX_BLOCKED,
+ GPUEB_MBOX_FULL);
+ if (status == (GPUEB_MBOX_FULL | GPUEB_MBOX_BLOCKED)) {
+ u8 buf[GPUEB_MBOX_MAX_RX_SIZE] = {};
+
+ mtk_gpueb_mbox_read_rx(buf, ch);
+ writel(BIT(ch->num), ch->ebm->mbox_ctl + GPUEB_MBOX_CTL_IRQ_CLR);
+ mbox_chan_received_data(&ch->ebm->mbox.chans[ch->num], buf);
+ atomic_set(&ch->rx_status, 0);
+ return IRQ_HANDLED;
+ }
+
+ return IRQ_NONE;
+}
+
+static int mtk_gpueb_mbox_send_data(struct mbox_chan *chan, void *data)
+{
+ struct mtk_gpueb_mbox_chan *ch = chan->con_priv;
+ u32 *values = data;
+ int i;
+
+ if (atomic_read(&ch->rx_status))
+ return -EBUSY;
+
+ /*
+ * We don't want any fancy nonsense, just write the 32-bit values in
+ * order. memcpy_toio/__iowrite32_copy don't work here, as they may use
+ * writes of different sizes or memory ordering characteristics depending
+ * on the architecture, alignment and the current phase of the moon.
+ */
+ for (i = 0; i < ch->c->tx_len; i += 4)
+ writel(values[i / 4], ch->ebm->mbox_mmio + ch->c->tx_offset + i);
+
+ writel(BIT(ch->num), ch->ebm->mbox_ctl + GPUEB_MBOX_CTL_IRQ_SET);
+
+ return 0;
+}
+
+static int mtk_gpueb_mbox_startup(struct mbox_chan *chan)
+{
+ struct mtk_gpueb_mbox_chan *ch = chan->con_priv;
+ int ret;
+
+ atomic_set(&ch->rx_status, 0);
+
+ ret = clk_enable(ch->ebm->clk);
+ if (ret) {
+ dev_err(ch->ebm->dev, "Failed to enable EB clock: %pe\n",
+ ERR_PTR(ret));
+ goto err_block;
+ }
+
+ writel(BIT(ch->num), ch->ebm->mbox_ctl + GPUEB_MBOX_CTL_IRQ_CLR);
+
+ ret = devm_request_threaded_irq(ch->ebm->dev, ch->ebm->irq, mtk_gpueb_mbox_isr,
+ mtk_gpueb_mbox_thread, IRQF_SHARED | IRQF_ONESHOT,
+ ch->full_name, ch);
+ if (ret) {
+ dev_err(ch->ebm->dev, "Failed to request IRQ: %pe\n",
+ ERR_PTR(ret));
+ goto err_unclk;
+ }
+
+ return 0;
+
+err_unclk:
+ clk_disable(ch->ebm->clk);
+err_block:
+ atomic_set(&ch->rx_status, GPUEB_MBOX_BLOCKED);
+
+ return ret;
+}
+
+static void mtk_gpueb_mbox_shutdown(struct mbox_chan *chan)
+{
+ struct mtk_gpueb_mbox_chan *ch = chan->con_priv;
+
+ atomic_set(&ch->rx_status, GPUEB_MBOX_BLOCKED);
+
+ devm_free_irq(ch->ebm->dev, ch->ebm->irq, ch);
+
+ clk_disable(ch->ebm->clk);
+}
+
+static bool mtk_gpueb_mbox_last_tx_done(struct mbox_chan *chan)
+{
+ struct mtk_gpueb_mbox_chan *ch = chan->con_priv;
+
+ return !(readl(ch->ebm->mbox_ctl + GPUEB_MBOX_CTL_TX_STS) & BIT(ch->num));
+}
+
+static const struct mbox_chan_ops mtk_gpueb_mbox_ops = {
+ .send_data = mtk_gpueb_mbox_send_data,
+ .startup = mtk_gpueb_mbox_startup,
+ .shutdown = mtk_gpueb_mbox_shutdown,
+ .last_tx_done = mtk_gpueb_mbox_last_tx_done,
+};
+
+static int mtk_gpueb_mbox_probe(struct platform_device *pdev)
+{
+ struct mtk_gpueb_mbox_chan *ch;
+ struct mtk_gpueb_mbox *ebm;
+ unsigned int i;
+
+ ebm = devm_kzalloc(&pdev->dev, sizeof(*ebm), GFP_KERNEL);
+ if (!ebm)
+ return -ENOMEM;
+
+ ebm->dev = &pdev->dev;
+ ebm->v = of_device_get_match_data(ebm->dev);
+
+ ebm->irq = platform_get_irq(pdev, 0);
+ if (ebm->irq < 0)
+ return ebm->irq;
+
+ ebm->clk = devm_clk_get_prepared(ebm->dev, NULL);
+ if (IS_ERR(ebm->clk))
+ return dev_err_probe(ebm->dev, PTR_ERR(ebm->clk),
+ "Failed to get 'eb' clock\n");
+
+ ebm->mbox_mmio = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(ebm->mbox_mmio))
+ return dev_err_probe(ebm->dev, PTR_ERR(ebm->mbox_mmio),
+ "Couldn't map mailbox data registers\n");
+
+ ebm->mbox_ctl = devm_platform_ioremap_resource(pdev, 1);
+ if (IS_ERR(ebm->mbox_ctl))
+ return dev_err_probe(
+ ebm->dev, PTR_ERR(ebm->mbox_ctl),
+ "Couldn't map mailbox control registers\n");
+
+ ebm->ch = devm_kmalloc_array(ebm->dev, ebm->v->num_channels,
+ sizeof(*ebm->ch), GFP_KERNEL);
+ if (!ebm->ch)
+ return -ENOMEM;
+
+ ebm->mbox.chans = devm_kcalloc(ebm->dev, ebm->v->num_channels,
+ sizeof(struct mbox_chan), GFP_KERNEL);
+ if (!ebm->mbox.chans)
+ return -ENOMEM;
+
+ for (i = 0; i < ebm->v->num_channels; i++) {
+ ch = &ebm->ch[i];
+ ch->c = &ebm->v->channels[i];
+ if (ch->c->rx_len > GPUEB_MBOX_MAX_RX_SIZE) {
+ dev_err(ebm->dev, "Channel %s RX size (%d) too large\n",
+ ch->c->name, ch->c->rx_len);
+ return -EINVAL;
+ }
+ ch->full_name = devm_kasprintf(ebm->dev, GFP_KERNEL, "%s:%s",
+ dev_name(ebm->dev), ch->c->name);
+ if (!ch->full_name)
+ return -ENOMEM;
+
+ ch->ebm = ebm;
+ ch->num = i;
+ spin_lock_init(&ebm->mbox.chans[i].lock);
+ ebm->mbox.chans[i].con_priv = ch;
+ atomic_set(&ch->rx_status, GPUEB_MBOX_BLOCKED);
+ }
+
+ ebm->mbox.dev = ebm->dev;
+ ebm->mbox.num_chans = ebm->v->num_channels;
+ ebm->mbox.txdone_poll = true;
+ ebm->mbox.txpoll_period = 0; /* minimum hrtimer interval */
+ ebm->mbox.ops = &mtk_gpueb_mbox_ops;
+
+ dev_set_drvdata(ebm->dev, ebm);
+
+ return devm_mbox_controller_register(ebm->dev, &ebm->mbox);
+}
+
+static const struct mtk_gpueb_mbox_variant mtk_gpueb_mbox_mt8196 = {
+ .num_channels = 12,
+ .channels = {
+ { "fast-dvfs-event", 0, 0x0000, 16, 0x00e0, 16 },
+ { "gpufreq", 1, 0x0010, 32, 0x00f0, 32 },
+ { "sleep", 2, 0x0030, 12, 0x0110, 4 },
+ { "timer", 3, 0x003c, 24, 0x0114, 4 },
+ { "fhctl", 4, 0x0054, 36, 0x0118, 4 },
+ { "ccf", 5, 0x0078, 16, 0x011c, 16 },
+ { "gpumpu", 6, 0x0088, 24, 0x012c, 4 },
+ { "fast-dvfs", 7, 0x00a0, 24, 0x0130, 24 },
+ { "ipir-c-met", 8, 0x00b8, 4, 0x0148, 16 },
+ { "ipis-c-met", 9, 0x00bc, 16, 0x0158, 4 },
+ { "brisket", 10, 0x00cc, 16, 0x015c, 16 },
+ { "ppb", 11, 0x00dc, 4, 0x016c, 4 },
+ },
+};
+
+static const struct of_device_id mtk_gpueb_mbox_of_ids[] = {
+ { .compatible = "mediatek,mt8196-gpueb-mbox", .data = &mtk_gpueb_mbox_mt8196 },
+ { /* Sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, mtk_gpueb_mbox_of_ids);
+
+static struct platform_driver mtk_gpueb_mbox_drv = {
+ .probe = mtk_gpueb_mbox_probe,
+ .driver = {
+ .name = "mtk-gpueb-mbox",
+ .of_match_table = mtk_gpueb_mbox_of_ids,
+ }
+};
+module_platform_driver(mtk_gpueb_mbox_drv);
+
+MODULE_AUTHOR("Nicolas Frattaroli <nicolas.frattaroli@collabora.com>");
+MODULE_DESCRIPTION("MediaTek GPUEB mailbox driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mailbox/omap-mailbox.c b/drivers/mailbox/omap-mailbox.c
index 098c82d87137..17fe6545875d 100644
--- a/drivers/mailbox/omap-mailbox.c
+++ b/drivers/mailbox/omap-mailbox.c
@@ -15,11 +15,11 @@
#include <linux/slab.h>
#include <linux/kfifo.h>
#include <linux/err.h>
+#include <linux/io.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
-#include <linux/omap-mailbox.h>
#include <linux/mailbox_controller.h>
#include <linux/mailbox_client.h>
@@ -51,6 +51,11 @@
#define MBOX_INTR_CFG_TYPE1 0
#define MBOX_INTR_CFG_TYPE2 1
+typedef enum {
+ IRQ_TX = 1,
+ IRQ_RX = 2,
+} omap_mbox_irq_t;
+
struct omap_mbox_fifo {
unsigned long msg;
unsigned long fifo_stat;
@@ -61,16 +66,9 @@ struct omap_mbox_fifo {
u32 intr_bit;
};
-struct omap_mbox_queue {
- spinlock_t lock;
- struct kfifo fifo;
- struct work_struct work;
- struct omap_mbox *mbox;
- bool full;
-};
-
struct omap_mbox_match_data {
u32 intr_type;
+ bool is_exclusive;
};
struct omap_mbox_device {
@@ -81,29 +79,12 @@ struct omap_mbox_device {
u32 num_users;
u32 num_fifos;
u32 intr_type;
- struct omap_mbox **mboxes;
- struct mbox_controller controller;
- struct list_head elem;
-};
-
-struct omap_mbox_fifo_info {
- int tx_id;
- int tx_usr;
- int tx_irq;
-
- int rx_id;
- int rx_usr;
- int rx_irq;
-
- const char *name;
- bool send_no_irq;
+ const struct omap_mbox_match_data *mbox_data;
};
struct omap_mbox {
const char *name;
int irq;
- struct omap_mbox_queue *rxq;
- struct device *dev;
struct omap_mbox_device *parent;
struct omap_mbox_fifo tx_fifo;
struct omap_mbox_fifo rx_fifo;
@@ -112,22 +93,6 @@ struct omap_mbox {
bool send_no_irq;
};
-/* global variables for the mailbox devices */
-static DEFINE_MUTEX(omap_mbox_devices_lock);
-static LIST_HEAD(omap_mbox_devices);
-
-static unsigned int mbox_kfifo_size = CONFIG_OMAP_MBOX_KFIFO_SIZE;
-module_param(mbox_kfifo_size, uint, S_IRUGO);
-MODULE_PARM_DESC(mbox_kfifo_size, "Size of omap's mailbox kfifo (bytes)");
-
-static struct omap_mbox *mbox_chan_to_omap_mbox(struct mbox_chan *chan)
-{
- if (!chan || !chan->con_priv)
- return NULL;
-
- return (struct omap_mbox *)chan->con_priv;
-}
-
static inline
unsigned int mbox_read_reg(struct omap_mbox_device *mdev, size_t ofs)
{
@@ -197,7 +162,7 @@ static int is_mbox_irq(struct omap_mbox *mbox, omap_mbox_irq_t irq)
return (int)(enable & status & bit);
}
-static void _omap_mbox_enable_irq(struct omap_mbox *mbox, omap_mbox_irq_t irq)
+static void omap_mbox_enable_irq(struct omap_mbox *mbox, omap_mbox_irq_t irq)
{
u32 l;
struct omap_mbox_fifo *fifo = (irq == IRQ_TX) ?
@@ -210,7 +175,7 @@ static void _omap_mbox_enable_irq(struct omap_mbox *mbox, omap_mbox_irq_t irq)
mbox_write_reg(mbox->parent, l, irqenable);
}
-static void _omap_mbox_disable_irq(struct omap_mbox *mbox, omap_mbox_irq_t irq)
+static void omap_mbox_disable_irq(struct omap_mbox *mbox, omap_mbox_irq_t irq)
{
struct omap_mbox_fifo *fifo = (irq == IRQ_TX) ?
&mbox->tx_fifo : &mbox->rx_fifo;
@@ -227,87 +192,27 @@ static void _omap_mbox_disable_irq(struct omap_mbox *mbox, omap_mbox_irq_t irq)
mbox_write_reg(mbox->parent, bit, irqdisable);
}
-void omap_mbox_enable_irq(struct mbox_chan *chan, omap_mbox_irq_t irq)
-{
- struct omap_mbox *mbox = mbox_chan_to_omap_mbox(chan);
-
- if (WARN_ON(!mbox))
- return;
-
- _omap_mbox_enable_irq(mbox, irq);
-}
-EXPORT_SYMBOL(omap_mbox_enable_irq);
-
-void omap_mbox_disable_irq(struct mbox_chan *chan, omap_mbox_irq_t irq)
-{
- struct omap_mbox *mbox = mbox_chan_to_omap_mbox(chan);
-
- if (WARN_ON(!mbox))
- return;
-
- _omap_mbox_disable_irq(mbox, irq);
-}
-EXPORT_SYMBOL(omap_mbox_disable_irq);
-
-/*
- * Message receiver(workqueue)
- */
-static void mbox_rx_work(struct work_struct *work)
-{
- struct omap_mbox_queue *mq =
- container_of(work, struct omap_mbox_queue, work);
- mbox_msg_t data;
- u32 msg;
- int len;
-
- while (kfifo_len(&mq->fifo) >= sizeof(msg)) {
- len = kfifo_out(&mq->fifo, (unsigned char *)&msg, sizeof(msg));
- WARN_ON(len != sizeof(msg));
- data = msg;
-
- mbox_chan_received_data(mq->mbox->chan, (void *)data);
- spin_lock_irq(&mq->lock);
- if (mq->full) {
- mq->full = false;
- _omap_mbox_enable_irq(mq->mbox, IRQ_RX);
- }
- spin_unlock_irq(&mq->lock);
- }
-}
-
/*
* Mailbox interrupt handler
*/
static void __mbox_tx_interrupt(struct omap_mbox *mbox)
{
- _omap_mbox_disable_irq(mbox, IRQ_TX);
+ omap_mbox_disable_irq(mbox, IRQ_TX);
ack_mbox_irq(mbox, IRQ_TX);
mbox_chan_txdone(mbox->chan, 0);
}
static void __mbox_rx_interrupt(struct omap_mbox *mbox)
{
- struct omap_mbox_queue *mq = mbox->rxq;
u32 msg;
- int len;
while (!mbox_fifo_empty(mbox)) {
- if (unlikely(kfifo_avail(&mq->fifo) < sizeof(msg))) {
- _omap_mbox_disable_irq(mbox, IRQ_RX);
- mq->full = true;
- goto nomem;
- }
-
msg = mbox_fifo_read(mbox);
-
- len = kfifo_in(&mq->fifo, (unsigned char *)&msg, sizeof(msg));
- WARN_ON(len != sizeof(msg));
+ mbox_chan_received_data(mbox->chan, (void *)(uintptr_t)msg);
}
- /* no more messages in the fifo. clear IRQ source. */
+ /* clear IRQ source. */
ack_mbox_irq(mbox, IRQ_RX);
-nomem:
- schedule_work(&mbox->rxq->work);
}
static irqreturn_t mbox_interrupt(int irq, void *p)
@@ -323,202 +228,35 @@ static irqreturn_t mbox_interrupt(int irq, void *p)
return IRQ_HANDLED;
}
-static struct omap_mbox_queue *mbox_queue_alloc(struct omap_mbox *mbox,
- void (*work)(struct work_struct *))
-{
- struct omap_mbox_queue *mq;
-
- if (!work)
- return NULL;
-
- mq = kzalloc(sizeof(*mq), GFP_KERNEL);
- if (!mq)
- return NULL;
-
- spin_lock_init(&mq->lock);
-
- if (kfifo_alloc(&mq->fifo, mbox_kfifo_size, GFP_KERNEL))
- goto error;
-
- INIT_WORK(&mq->work, work);
- return mq;
-
-error:
- kfree(mq);
- return NULL;
-}
-
-static void mbox_queue_free(struct omap_mbox_queue *q)
-{
- kfifo_free(&q->fifo);
- kfree(q);
-}
-
static int omap_mbox_startup(struct omap_mbox *mbox)
{
int ret = 0;
- struct omap_mbox_queue *mq;
-
- mq = mbox_queue_alloc(mbox, mbox_rx_work);
- if (!mq)
- return -ENOMEM;
- mbox->rxq = mq;
- mq->mbox = mbox;
- ret = request_irq(mbox->irq, mbox_interrupt, IRQF_SHARED,
- mbox->name, mbox);
+ ret = request_threaded_irq(mbox->irq, NULL, mbox_interrupt,
+ IRQF_SHARED | IRQF_ONESHOT, mbox->name,
+ mbox);
if (unlikely(ret)) {
pr_err("failed to register mailbox interrupt:%d\n", ret);
- goto fail_request_irq;
+ return ret;
}
if (mbox->send_no_irq)
mbox->chan->txdone_method = TXDONE_BY_ACK;
- _omap_mbox_enable_irq(mbox, IRQ_RX);
+ omap_mbox_enable_irq(mbox, IRQ_RX);
return 0;
-
-fail_request_irq:
- mbox_queue_free(mbox->rxq);
- return ret;
}
static void omap_mbox_fini(struct omap_mbox *mbox)
{
- _omap_mbox_disable_irq(mbox, IRQ_RX);
+ omap_mbox_disable_irq(mbox, IRQ_RX);
free_irq(mbox->irq, mbox);
- flush_work(&mbox->rxq->work);
- mbox_queue_free(mbox->rxq);
-}
-
-static struct omap_mbox *omap_mbox_device_find(struct omap_mbox_device *mdev,
- const char *mbox_name)
-{
- struct omap_mbox *_mbox, *mbox = NULL;
- struct omap_mbox **mboxes = mdev->mboxes;
- int i;
-
- if (!mboxes)
- return NULL;
-
- for (i = 0; (_mbox = mboxes[i]); i++) {
- if (!strcmp(_mbox->name, mbox_name)) {
- mbox = _mbox;
- break;
- }
- }
- return mbox;
-}
-
-struct mbox_chan *omap_mbox_request_channel(struct mbox_client *cl,
- const char *chan_name)
-{
- struct device *dev = cl->dev;
- struct omap_mbox *mbox = NULL;
- struct omap_mbox_device *mdev;
- struct mbox_chan *chan;
- unsigned long flags;
- int ret;
-
- if (!dev)
- return ERR_PTR(-ENODEV);
-
- if (dev->of_node) {
- pr_err("%s: please use mbox_request_channel(), this API is supported only for OMAP non-DT usage\n",
- __func__);
- return ERR_PTR(-ENODEV);
- }
-
- mutex_lock(&omap_mbox_devices_lock);
- list_for_each_entry(mdev, &omap_mbox_devices, elem) {
- mbox = omap_mbox_device_find(mdev, chan_name);
- if (mbox)
- break;
- }
- mutex_unlock(&omap_mbox_devices_lock);
-
- if (!mbox || !mbox->chan)
- return ERR_PTR(-ENOENT);
-
- chan = mbox->chan;
- spin_lock_irqsave(&chan->lock, flags);
- chan->msg_free = 0;
- chan->msg_count = 0;
- chan->active_req = NULL;
- chan->cl = cl;
- init_completion(&chan->tx_complete);
- spin_unlock_irqrestore(&chan->lock, flags);
-
- ret = chan->mbox->ops->startup(chan);
- if (ret) {
- pr_err("Unable to startup the chan (%d)\n", ret);
- mbox_free_channel(chan);
- chan = ERR_PTR(ret);
- }
-
- return chan;
-}
-EXPORT_SYMBOL(omap_mbox_request_channel);
-
-static struct class omap_mbox_class = { .name = "mbox", };
-
-static int omap_mbox_register(struct omap_mbox_device *mdev)
-{
- int ret;
- int i;
- struct omap_mbox **mboxes;
-
- if (!mdev || !mdev->mboxes)
- return -EINVAL;
-
- mboxes = mdev->mboxes;
- for (i = 0; mboxes[i]; i++) {
- struct omap_mbox *mbox = mboxes[i];
-
- mbox->dev = device_create(&omap_mbox_class, mdev->dev,
- 0, mbox, "%s", mbox->name);
- if (IS_ERR(mbox->dev)) {
- ret = PTR_ERR(mbox->dev);
- goto err_out;
- }
- }
-
- mutex_lock(&omap_mbox_devices_lock);
- list_add(&mdev->elem, &omap_mbox_devices);
- mutex_unlock(&omap_mbox_devices_lock);
-
- ret = devm_mbox_controller_register(mdev->dev, &mdev->controller);
-
-err_out:
- if (ret) {
- while (i--)
- device_unregister(mboxes[i]->dev);
- }
- return ret;
-}
-
-static int omap_mbox_unregister(struct omap_mbox_device *mdev)
-{
- int i;
- struct omap_mbox **mboxes;
-
- if (!mdev || !mdev->mboxes)
- return -EINVAL;
-
- mutex_lock(&omap_mbox_devices_lock);
- list_del(&mdev->elem);
- mutex_unlock(&omap_mbox_devices_lock);
-
- mboxes = mdev->mboxes;
- for (i = 0; mboxes[i]; i++)
- device_unregister(mboxes[i]->dev);
- return 0;
}
static int omap_mbox_chan_startup(struct mbox_chan *chan)
{
- struct omap_mbox *mbox = mbox_chan_to_omap_mbox(chan);
+ struct omap_mbox *mbox = chan->con_priv;
struct omap_mbox_device *mdev = mbox->parent;
int ret = 0;
@@ -533,7 +271,7 @@ static int omap_mbox_chan_startup(struct mbox_chan *chan)
static void omap_mbox_chan_shutdown(struct mbox_chan *chan)
{
- struct omap_mbox *mbox = mbox_chan_to_omap_mbox(chan);
+ struct omap_mbox *mbox = chan->con_priv;
struct omap_mbox_device *mdev = mbox->parent;
mutex_lock(&mdev->cfg_lock);
@@ -544,41 +282,40 @@ static void omap_mbox_chan_shutdown(struct mbox_chan *chan)
static int omap_mbox_chan_send_noirq(struct omap_mbox *mbox, u32 msg)
{
- int ret = -EBUSY;
+ if (mbox_fifo_full(mbox))
+ return -EBUSY;
- if (!mbox_fifo_full(mbox)) {
- _omap_mbox_enable_irq(mbox, IRQ_RX);
- mbox_fifo_write(mbox, msg);
- ret = 0;
- _omap_mbox_disable_irq(mbox, IRQ_RX);
+ omap_mbox_enable_irq(mbox, IRQ_RX);
+ mbox_fifo_write(mbox, msg);
+ omap_mbox_disable_irq(mbox, IRQ_RX);
- /* we must read and ack the interrupt directly from here */
- mbox_fifo_read(mbox);
- ack_mbox_irq(mbox, IRQ_RX);
- }
+ /* we must read and ack the interrupt directly from here */
+ mbox_fifo_read(mbox);
+ ack_mbox_irq(mbox, IRQ_RX);
- return ret;
+ return 0;
}
static int omap_mbox_chan_send(struct omap_mbox *mbox, u32 msg)
{
- int ret = -EBUSY;
-
- if (!mbox_fifo_full(mbox)) {
- mbox_fifo_write(mbox, msg);
- ret = 0;
+ if (mbox_fifo_full(mbox)) {
+ /* always enable the interrupt */
+ omap_mbox_enable_irq(mbox, IRQ_TX);
+ return -EBUSY;
}
+ mbox_fifo_write(mbox, msg);
+
/* always enable the interrupt */
- _omap_mbox_enable_irq(mbox, IRQ_TX);
- return ret;
+ omap_mbox_enable_irq(mbox, IRQ_TX);
+ return 0;
}
static int omap_mbox_chan_send_data(struct mbox_chan *chan, void *data)
{
- struct omap_mbox *mbox = mbox_chan_to_omap_mbox(chan);
+ struct omap_mbox *mbox = chan->con_priv;
int ret;
- u32 msg = omap_mbox_message(data);
+ u32 msg = (u32)(uintptr_t)(data);
if (!mbox)
return -EINVAL;
@@ -606,11 +343,13 @@ static int omap_mbox_suspend(struct device *dev)
if (pm_runtime_status_suspended(dev))
return 0;
- for (fifo = 0; fifo < mdev->num_fifos; fifo++) {
- if (mbox_read_reg(mdev, MAILBOX_MSGSTATUS(fifo))) {
- dev_err(mdev->dev, "fifo %d has unexpected unread messages\n",
- fifo);
- return -EBUSY;
+ if (mdev->mbox_data->is_exclusive) {
+ for (fifo = 0; fifo < mdev->num_fifos; fifo++) {
+ if (mbox_read_reg(mdev, MAILBOX_MSGSTATUS(fifo))) {
+ dev_err(mdev->dev, "fifo %d has unexpected unread messages\n",
+ fifo);
+ return -EBUSY;
+ }
}
}
@@ -643,8 +382,9 @@ static const struct dev_pm_ops omap_mbox_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(omap_mbox_suspend, omap_mbox_resume)
};
-static const struct omap_mbox_match_data omap2_data = { MBOX_INTR_CFG_TYPE1 };
-static const struct omap_mbox_match_data omap4_data = { MBOX_INTR_CFG_TYPE2 };
+static const struct omap_mbox_match_data omap2_data = { MBOX_INTR_CFG_TYPE1, true };
+static const struct omap_mbox_match_data omap4_data = { MBOX_INTR_CFG_TYPE2, true };
+static const struct omap_mbox_match_data am654_data = { MBOX_INTR_CFG_TYPE2, false };
static const struct of_device_id omap_mailbox_of_match[] = {
{
@@ -661,11 +401,11 @@ static const struct of_device_id omap_mailbox_of_match[] = {
},
{
.compatible = "ti,am654-mailbox",
- .data = &omap4_data,
+ .data = &am654_data,
},
{
.compatible = "ti,am64-mailbox",
- .data = &omap4_data,
+ .data = &am654_data,
},
{
/* end */
@@ -680,8 +420,9 @@ static struct mbox_chan *omap_mbox_of_xlate(struct mbox_controller *controller,
struct device_node *node;
struct omap_mbox_device *mdev;
struct omap_mbox *mbox;
+ int i;
- mdev = container_of(controller, struct omap_mbox_device, controller);
+ mdev = dev_get_drvdata(controller->dev);
if (WARN_ON(!mdev))
return ERR_PTR(-EINVAL);
@@ -692,22 +433,28 @@ static struct mbox_chan *omap_mbox_of_xlate(struct mbox_controller *controller,
return ERR_PTR(-ENODEV);
}
- mbox = omap_mbox_device_find(mdev, node->name);
+ for (i = 0; i < controller->num_chans; i++) {
+ mbox = controller->chans[i].con_priv;
+ if (!strcmp(mbox->name, node->name)) {
+ of_node_put(node);
+ return &controller->chans[i];
+ }
+ }
+
of_node_put(node);
- return mbox ? mbox->chan : ERR_PTR(-ENOENT);
+ return ERR_PTR(-ENOENT);
}
static int omap_mbox_probe(struct platform_device *pdev)
{
int ret;
struct mbox_chan *chnls;
- struct omap_mbox **list, *mbox, *mboxblk;
- struct omap_mbox_fifo_info *finfo, *finfoblk;
+ struct omap_mbox *mbox;
struct omap_mbox_device *mdev;
struct omap_mbox_fifo *fifo;
struct device_node *node = pdev->dev.of_node;
struct device_node *child;
- const struct omap_mbox_match_data *match_data;
+ struct mbox_controller *controller;
u32 intr_type, info_count;
u32 num_users, num_fifos;
u32 tmp[3];
@@ -719,11 +466,6 @@ static int omap_mbox_probe(struct platform_device *pdev)
return -ENODEV;
}
- match_data = of_device_get_match_data(&pdev->dev);
- if (!match_data)
- return -ENODEV;
- intr_type = match_data->intr_type;
-
if (of_property_read_u32(node, "ti,mbox-num-users", &num_users))
return -ENODEV;
@@ -736,45 +478,16 @@ static int omap_mbox_probe(struct platform_device *pdev)
return -ENODEV;
}
- finfoblk = devm_kcalloc(&pdev->dev, info_count, sizeof(*finfoblk),
- GFP_KERNEL);
- if (!finfoblk)
- return -ENOMEM;
-
- finfo = finfoblk;
- child = NULL;
- for (i = 0; i < info_count; i++, finfo++) {
- child = of_get_next_available_child(node, child);
- ret = of_property_read_u32_array(child, "ti,mbox-tx", tmp,
- ARRAY_SIZE(tmp));
- if (ret)
- return ret;
- finfo->tx_id = tmp[0];
- finfo->tx_irq = tmp[1];
- finfo->tx_usr = tmp[2];
-
- ret = of_property_read_u32_array(child, "ti,mbox-rx", tmp,
- ARRAY_SIZE(tmp));
- if (ret)
- return ret;
- finfo->rx_id = tmp[0];
- finfo->rx_irq = tmp[1];
- finfo->rx_usr = tmp[2];
-
- finfo->name = child->name;
-
- if (of_find_property(child, "ti,mbox-send-noirq", NULL))
- finfo->send_no_irq = true;
-
- if (finfo->tx_id >= num_fifos || finfo->rx_id >= num_fifos ||
- finfo->tx_usr >= num_users || finfo->rx_usr >= num_users)
- return -EINVAL;
- }
-
mdev = devm_kzalloc(&pdev->dev, sizeof(*mdev), GFP_KERNEL);
if (!mdev)
return -ENOMEM;
+ mdev->mbox_data = device_get_match_data(&pdev->dev);
+ if (!mdev->mbox_data)
+ return -ENODEV;
+
+ intr_type = mdev->mbox_data->intr_type;
+
mdev->mbox_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(mdev->mbox_base))
return PTR_ERR(mdev->mbox_base);
@@ -784,52 +497,67 @@ static int omap_mbox_probe(struct platform_device *pdev)
if (!mdev->irq_ctx)
return -ENOMEM;
- /* allocate one extra for marking end of list */
- list = devm_kcalloc(&pdev->dev, info_count + 1, sizeof(*list),
- GFP_KERNEL);
- if (!list)
- return -ENOMEM;
-
chnls = devm_kcalloc(&pdev->dev, info_count + 1, sizeof(*chnls),
GFP_KERNEL);
if (!chnls)
return -ENOMEM;
- mboxblk = devm_kcalloc(&pdev->dev, info_count, sizeof(*mbox),
- GFP_KERNEL);
- if (!mboxblk)
- return -ENOMEM;
+ child = NULL;
+ for (i = 0; i < info_count; i++) {
+ int tx_id, tx_irq, tx_usr;
+ int rx_id, rx_usr;
+
+ mbox = devm_kzalloc(&pdev->dev, sizeof(*mbox), GFP_KERNEL);
+ if (!mbox)
+ return -ENOMEM;
+
+ child = of_get_next_available_child(node, child);
+ ret = of_property_read_u32_array(child, "ti,mbox-tx", tmp,
+ ARRAY_SIZE(tmp));
+ if (ret)
+ return ret;
+ tx_id = tmp[0];
+ tx_irq = tmp[1];
+ tx_usr = tmp[2];
+
+ ret = of_property_read_u32_array(child, "ti,mbox-rx", tmp,
+ ARRAY_SIZE(tmp));
+ if (ret)
+ return ret;
+ rx_id = tmp[0];
+ /* rx_irq = tmp[1]; */
+ rx_usr = tmp[2];
+
+ if (tx_id >= num_fifos || rx_id >= num_fifos ||
+ tx_usr >= num_users || rx_usr >= num_users)
+ return -EINVAL;
- mbox = mboxblk;
- finfo = finfoblk;
- for (i = 0; i < info_count; i++, finfo++) {
fifo = &mbox->tx_fifo;
- fifo->msg = MAILBOX_MESSAGE(finfo->tx_id);
- fifo->fifo_stat = MAILBOX_FIFOSTATUS(finfo->tx_id);
- fifo->intr_bit = MAILBOX_IRQ_NOTFULL(finfo->tx_id);
- fifo->irqenable = MAILBOX_IRQENABLE(intr_type, finfo->tx_usr);
- fifo->irqstatus = MAILBOX_IRQSTATUS(intr_type, finfo->tx_usr);
- fifo->irqdisable = MAILBOX_IRQDISABLE(intr_type, finfo->tx_usr);
+ fifo->msg = MAILBOX_MESSAGE(tx_id);
+ fifo->fifo_stat = MAILBOX_FIFOSTATUS(tx_id);
+ fifo->intr_bit = MAILBOX_IRQ_NOTFULL(tx_id);
+ fifo->irqenable = MAILBOX_IRQENABLE(intr_type, tx_usr);
+ fifo->irqstatus = MAILBOX_IRQSTATUS(intr_type, tx_usr);
+ fifo->irqdisable = MAILBOX_IRQDISABLE(intr_type, tx_usr);
fifo = &mbox->rx_fifo;
- fifo->msg = MAILBOX_MESSAGE(finfo->rx_id);
- fifo->msg_stat = MAILBOX_MSGSTATUS(finfo->rx_id);
- fifo->intr_bit = MAILBOX_IRQ_NEWMSG(finfo->rx_id);
- fifo->irqenable = MAILBOX_IRQENABLE(intr_type, finfo->rx_usr);
- fifo->irqstatus = MAILBOX_IRQSTATUS(intr_type, finfo->rx_usr);
- fifo->irqdisable = MAILBOX_IRQDISABLE(intr_type, finfo->rx_usr);
-
- mbox->send_no_irq = finfo->send_no_irq;
+ fifo->msg = MAILBOX_MESSAGE(rx_id);
+ fifo->msg_stat = MAILBOX_MSGSTATUS(rx_id);
+ fifo->intr_bit = MAILBOX_IRQ_NEWMSG(rx_id);
+ fifo->irqenable = MAILBOX_IRQENABLE(intr_type, rx_usr);
+ fifo->irqstatus = MAILBOX_IRQSTATUS(intr_type, rx_usr);
+ fifo->irqdisable = MAILBOX_IRQDISABLE(intr_type, rx_usr);
+
+ mbox->send_no_irq = of_property_read_bool(child, "ti,mbox-send-noirq");
mbox->intr_type = intr_type;
mbox->parent = mdev;
- mbox->name = finfo->name;
- mbox->irq = platform_get_irq(pdev, finfo->tx_irq);
+ mbox->name = child->name;
+ mbox->irq = platform_get_irq(pdev, tx_irq);
if (mbox->irq < 0)
return mbox->irq;
mbox->chan = &chnls[i];
chnls[i].con_priv = mbox;
- list[i] = mbox++;
}
mutex_init(&mdev->cfg_lock);
@@ -837,28 +565,30 @@ static int omap_mbox_probe(struct platform_device *pdev)
mdev->num_users = num_users;
mdev->num_fifos = num_fifos;
mdev->intr_type = intr_type;
- mdev->mboxes = list;
+ controller = devm_kzalloc(&pdev->dev, sizeof(*controller), GFP_KERNEL);
+ if (!controller)
+ return -ENOMEM;
/*
* OMAP/K3 Mailbox IP does not have a Tx-Done IRQ, but rather a Tx-Ready
* IRQ and is needed to run the Tx state machine
*/
- mdev->controller.txdone_irq = true;
- mdev->controller.dev = mdev->dev;
- mdev->controller.ops = &omap_mbox_chan_ops;
- mdev->controller.chans = chnls;
- mdev->controller.num_chans = info_count;
- mdev->controller.of_xlate = omap_mbox_of_xlate;
- ret = omap_mbox_register(mdev);
+ controller->txdone_irq = true;
+ controller->dev = mdev->dev;
+ controller->ops = &omap_mbox_chan_ops;
+ controller->chans = chnls;
+ controller->num_chans = info_count;
+ controller->of_xlate = omap_mbox_of_xlate;
+ ret = devm_mbox_controller_register(mdev->dev, controller);
if (ret)
return ret;
platform_set_drvdata(pdev, mdev);
- pm_runtime_enable(mdev->dev);
+ devm_pm_runtime_enable(mdev->dev);
ret = pm_runtime_resume_and_get(mdev->dev);
if (ret < 0)
- goto unregister;
+ return ret;
/*
* just print the raw revision register, the format is not
@@ -869,63 +599,20 @@ static int omap_mbox_probe(struct platform_device *pdev)
ret = pm_runtime_put_sync(mdev->dev);
if (ret < 0 && ret != -ENOSYS)
- goto unregister;
-
- devm_kfree(&pdev->dev, finfoblk);
- return 0;
-
-unregister:
- pm_runtime_disable(mdev->dev);
- omap_mbox_unregister(mdev);
- return ret;
-}
-
-static int omap_mbox_remove(struct platform_device *pdev)
-{
- struct omap_mbox_device *mdev = platform_get_drvdata(pdev);
-
- pm_runtime_disable(mdev->dev);
- omap_mbox_unregister(mdev);
+ return ret;
return 0;
}
static struct platform_driver omap_mbox_driver = {
.probe = omap_mbox_probe,
- .remove = omap_mbox_remove,
.driver = {
.name = "omap-mailbox",
.pm = &omap_mbox_pm_ops,
- .of_match_table = of_match_ptr(omap_mailbox_of_match),
+ .of_match_table = omap_mailbox_of_match,
},
};
-
-static int __init omap_mbox_init(void)
-{
- int err;
-
- err = class_register(&omap_mbox_class);
- if (err)
- return err;
-
- /* kfifo size sanity check: alignment and minimal size */
- mbox_kfifo_size = ALIGN(mbox_kfifo_size, sizeof(u32));
- mbox_kfifo_size = max_t(unsigned int, mbox_kfifo_size, sizeof(u32));
-
- err = platform_driver_register(&omap_mbox_driver);
- if (err)
- class_unregister(&omap_mbox_class);
-
- return err;
-}
-subsys_initcall(omap_mbox_init);
-
-static void __exit omap_mbox_exit(void)
-{
- platform_driver_unregister(&omap_mbox_driver);
- class_unregister(&omap_mbox_class);
-}
-module_exit(omap_mbox_exit);
+module_platform_driver(omap_mbox_driver);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("omap mailbox: interrupt driven messaging");
diff --git a/drivers/mailbox/pcc.c b/drivers/mailbox/pcc.c
index 105d46c9801b..ff292b9e0be9 100644
--- a/drivers/mailbox/pcc.c
+++ b/drivers/mailbox/pcc.c
@@ -91,6 +91,14 @@ struct pcc_chan_reg {
* @cmd_update: PCC register bundle for the command complete update register
* @error: PCC register bundle for the error status register
* @plat_irq: platform interrupt
+ * @type: PCC subspace type
+ * @plat_irq_flags: platform interrupt flags
+ * @chan_in_use: this flag is used just to check if the interrupt needs
+ * handling when it is shared. Since only one transfer can occur
+ * at a time and mailbox takes care of locking, this flag can be
+ * accessed without a lock. Note: the type only support the
+ * communication from OSPM to Platform, like type3, use it, and
+ * other types completely ignore it.
*/
struct pcc_chan_info {
struct pcc_mbox_chan chan;
@@ -100,6 +108,9 @@ struct pcc_chan_info {
struct pcc_chan_reg cmd_update;
struct pcc_chan_reg error;
int plat_irq;
+ u8 type;
+ unsigned int plat_irq_flags;
+ bool chan_in_use;
};
#define to_pcc_chan_info(c) container_of(c, struct pcc_chan_info, chan)
@@ -221,6 +232,95 @@ static int pcc_map_interrupt(u32 interrupt, u32 flags)
return acpi_register_gsi(NULL, interrupt, trigger, polarity);
}
+static bool pcc_chan_plat_irq_can_be_shared(struct pcc_chan_info *pchan)
+{
+ return (pchan->plat_irq_flags & ACPI_PCCT_INTERRUPT_MODE) ==
+ ACPI_LEVEL_SENSITIVE;
+}
+
+static bool pcc_mbox_cmd_complete_check(struct pcc_chan_info *pchan)
+{
+ u64 val;
+ int ret;
+
+ if (!pchan->cmd_complete.gas)
+ return true;
+
+ ret = pcc_chan_reg_read(&pchan->cmd_complete, &val);
+ if (ret)
+ return false;
+
+ /*
+ * Judge if the channel respond the interrupt based on the value of
+ * command complete.
+ */
+ val &= pchan->cmd_complete.status_mask;
+
+ /*
+ * If this is PCC slave subspace channel, and the command complete
+ * bit 0 indicates that Platform is sending a notification and OSPM
+ * needs to respond this interrupt to process this command.
+ */
+ if (pchan->type == ACPI_PCCT_TYPE_EXT_PCC_SLAVE_SUBSPACE)
+ return !val;
+
+ return !!val;
+}
+
+static int pcc_mbox_error_check_and_clear(struct pcc_chan_info *pchan)
+{
+ u64 val;
+ int ret;
+
+ ret = pcc_chan_reg_read(&pchan->error, &val);
+ if (ret)
+ return ret;
+
+ if (val & pchan->error.status_mask) {
+ val &= pchan->error.preserve_mask;
+ pcc_chan_reg_write(&pchan->error, val);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static void pcc_chan_acknowledge(struct pcc_chan_info *pchan)
+{
+ struct acpi_pcct_ext_pcc_shared_memory __iomem *pcc_hdr;
+
+ if (pchan->type != ACPI_PCCT_TYPE_EXT_PCC_SLAVE_SUBSPACE)
+ return;
+
+ pcc_chan_reg_read_modify_write(&pchan->cmd_update);
+
+ pcc_hdr = pchan->chan.shmem;
+
+ /*
+ * The PCC slave subspace channel needs to set the command
+ * complete bit after processing message. If the PCC_ACK_FLAG
+ * is set, it should also ring the doorbell.
+ */
+ if (ioread32(&pcc_hdr->flags) & PCC_CMD_COMPLETION_NOTIFY)
+ pcc_chan_reg_read_modify_write(&pchan->db);
+}
+
+static void *write_response(struct pcc_chan_info *pchan)
+{
+ struct pcc_header pcc_header;
+ void *buffer;
+ int data_len;
+
+ memcpy_fromio(&pcc_header, pchan->chan.shmem,
+ sizeof(pcc_header));
+ data_len = pcc_header.length - sizeof(u32) + sizeof(struct pcc_header);
+
+ buffer = pchan->chan.rx_alloc(pchan->chan.mchan->cl, data_len);
+ if (buffer != NULL)
+ memcpy_fromio(buffer, pchan->chan.shmem, data_len);
+ return buffer;
+}
+
/**
* pcc_mbox_irq - PCC mailbox interrupt handler
* @irq: interrupt number
@@ -232,35 +332,44 @@ static irqreturn_t pcc_mbox_irq(int irq, void *p)
{
struct pcc_chan_info *pchan;
struct mbox_chan *chan = p;
- u64 val;
- int ret;
+ struct pcc_header *pcc_header = chan->active_req;
+ void *handle = NULL;
pchan = chan->con_priv;
- ret = pcc_chan_reg_read(&pchan->cmd_complete, &val);
- if (ret)
+ if (pcc_chan_reg_read_modify_write(&pchan->plat_irq_ack))
return IRQ_NONE;
- if (val) { /* Ensure GAS exists and value is non-zero */
- val &= pchan->cmd_complete.status_mask;
- if (!val)
- return IRQ_NONE;
- }
+ if (pchan->type == ACPI_PCCT_TYPE_EXT_PCC_MASTER_SUBSPACE &&
+ !pchan->chan_in_use)
+ return IRQ_NONE;
- ret = pcc_chan_reg_read(&pchan->error, &val);
- if (ret)
+ if (!pcc_mbox_cmd_complete_check(pchan))
return IRQ_NONE;
- val &= pchan->error.status_mask;
- if (val) {
- val &= ~pchan->error.status_mask;
- pcc_chan_reg_write(&pchan->error, val);
+
+ if (pcc_mbox_error_check_and_clear(pchan))
return IRQ_NONE;
+
+ /*
+ * Clear this flag after updating interrupt ack register and just
+ * before mbox_chan_received_data() which might call pcc_send_data()
+ * where the flag is set again to start new transfer. This is
+ * required to avoid any possible race in updatation of this flag.
+ */
+ pchan->chan_in_use = false;
+
+ if (pchan->chan.rx_alloc)
+ handle = write_response(pchan);
+
+ if (chan->active_req) {
+ pcc_header = chan->active_req;
+ if (pcc_header->flags & PCC_CMD_COMPLETION_NOTIFY)
+ mbox_chan_txdone(chan, 0);
}
- if (pcc_chan_reg_read_modify_write(&pchan->plat_irq_ack))
- return IRQ_NONE;
+ mbox_chan_received_data(chan, handle);
- mbox_chan_received_data(chan, NULL);
+ pcc_chan_acknowledge(pchan);
return IRQ_HANDLED;
}
@@ -280,10 +389,10 @@ static irqreturn_t pcc_mbox_irq(int irq, void *p)
struct pcc_mbox_chan *
pcc_mbox_request_channel(struct mbox_client *cl, int subspace_id)
{
+ struct pcc_mbox_chan *pcc_mchan;
struct pcc_chan_info *pchan;
struct mbox_chan *chan;
- struct device *dev;
- unsigned long flags;
+ int rc;
if (subspace_id < 0 || subspace_id >= pcc_chan_count)
return ERR_PTR(-ENOENT);
@@ -294,34 +403,34 @@ pcc_mbox_request_channel(struct mbox_client *cl, int subspace_id)
pr_err("Channel not found for idx: %d\n", subspace_id);
return ERR_PTR(-EBUSY);
}
- dev = chan->mbox->dev;
- spin_lock_irqsave(&chan->lock, flags);
- chan->msg_free = 0;
- chan->msg_count = 0;
- chan->active_req = NULL;
- chan->cl = cl;
- init_completion(&chan->tx_complete);
+ rc = mbox_bind_client(chan, cl);
+ if (rc)
+ return ERR_PTR(rc);
- if (chan->txdone_method == TXDONE_BY_POLL && cl->knows_txdone)
- chan->txdone_method = TXDONE_BY_ACK;
+ pcc_mchan = &pchan->chan;
+ pcc_mchan->shmem = acpi_os_ioremap(pcc_mchan->shmem_base_addr,
+ pcc_mchan->shmem_size);
+ if (!pcc_mchan->shmem)
+ goto err;
- spin_unlock_irqrestore(&chan->lock, flags);
+ pcc_mchan->manage_writes = false;
- if (pchan->plat_irq > 0) {
- int rc;
+ /* This indicates that the channel is ready to accept messages.
+ * This needs to happen after the channel has registered
+ * its callback. There is no access point to do that in
+ * the mailbox API. That implies that the mailbox client must
+ * have set the allocate callback function prior to
+ * sending any messages.
+ */
+ if (pchan->type == ACPI_PCCT_TYPE_EXT_PCC_SLAVE_SUBSPACE)
+ pcc_chan_reg_read_modify_write(&pchan->cmd_update);
- rc = devm_request_irq(dev, pchan->plat_irq, pcc_mbox_irq, 0,
- MBOX_IRQ_NAME, chan);
- if (unlikely(rc)) {
- dev_err(dev, "failed to register PCC interrupt %d\n",
- pchan->plat_irq);
- pcc_mbox_free_channel(&pchan->chan);
- return ERR_PTR(rc);
- }
- }
+ return pcc_mchan;
- return &pchan->chan;
+err:
+ mbox_free_channel(chan);
+ return ERR_PTR(-ENXIO);
}
EXPORT_SYMBOL_GPL(pcc_mbox_request_channel);
@@ -333,28 +442,55 @@ EXPORT_SYMBOL_GPL(pcc_mbox_request_channel);
*/
void pcc_mbox_free_channel(struct pcc_mbox_chan *pchan)
{
- struct pcc_chan_info *pchan_info = to_pcc_chan_info(pchan);
struct mbox_chan *chan = pchan->mchan;
- unsigned long flags;
+ struct pcc_chan_info *pchan_info;
+ struct pcc_mbox_chan *pcc_mbox_chan;
if (!chan || !chan->cl)
return;
+ pchan_info = chan->con_priv;
+ pcc_mbox_chan = &pchan_info->chan;
+ if (pcc_mbox_chan->shmem) {
+ iounmap(pcc_mbox_chan->shmem);
+ pcc_mbox_chan->shmem = NULL;
+ }
+
+ mbox_free_channel(chan);
+}
+EXPORT_SYMBOL_GPL(pcc_mbox_free_channel);
- if (pchan_info->plat_irq > 0)
- devm_free_irq(chan->mbox->dev, pchan_info->plat_irq, chan);
+static int pcc_write_to_buffer(struct mbox_chan *chan, void *data)
+{
+ struct pcc_chan_info *pchan = chan->con_priv;
+ struct pcc_mbox_chan *pcc_mbox_chan = &pchan->chan;
+ struct pcc_header *pcc_header = data;
- spin_lock_irqsave(&chan->lock, flags);
- chan->cl = NULL;
- chan->active_req = NULL;
- if (chan->txdone_method == TXDONE_BY_ACK)
- chan->txdone_method = TXDONE_BY_POLL;
+ if (!pchan->chan.manage_writes)
+ return 0;
- spin_unlock_irqrestore(&chan->lock, flags);
+ /* The PCC header length includes the command field
+ * but not the other values from the header.
+ */
+ int len = pcc_header->length - sizeof(u32) + sizeof(struct pcc_header);
+ u64 val;
+
+ pcc_chan_reg_read(&pchan->cmd_complete, &val);
+ if (!val) {
+ pr_info("%s pchan->cmd_complete not set", __func__);
+ return -1;
+ }
+ memcpy_toio(pcc_mbox_chan->shmem, data, len);
+ return 0;
}
-EXPORT_SYMBOL_GPL(pcc_mbox_free_channel);
+
/**
- * pcc_send_data - Called from Mailbox Controller code. Used
+ * pcc_send_data - Called from Mailbox Controller code. If
+ * pchan->chan.rx_alloc is set, then the command complete
+ * flag is checked and the data is written to the shared
+ * buffer io memory.
+ *
+ * If pchan->chan.rx_alloc is not set, then it is used
* here only to ring the channel doorbell. The PCC client
* specific read/write is done in the client driver in
* order to maintain atomicity over PCC channel once
@@ -370,15 +506,83 @@ static int pcc_send_data(struct mbox_chan *chan, void *data)
int ret;
struct pcc_chan_info *pchan = chan->con_priv;
+ ret = pcc_write_to_buffer(chan, data);
+ if (ret)
+ return ret;
+
ret = pcc_chan_reg_read_modify_write(&pchan->cmd_update);
if (ret)
return ret;
- return pcc_chan_reg_read_modify_write(&pchan->db);
+ ret = pcc_chan_reg_read_modify_write(&pchan->db);
+
+ if (!ret && pchan->plat_irq > 0)
+ pchan->chan_in_use = true;
+
+ return ret;
+}
+
+
+static bool pcc_last_tx_done(struct mbox_chan *chan)
+{
+ struct pcc_chan_info *pchan = chan->con_priv;
+ u64 val;
+
+ pcc_chan_reg_read(&pchan->cmd_complete, &val);
+ if (!val)
+ return false;
+ else
+ return true;
+}
+
+
+
+/**
+ * pcc_startup - Called from Mailbox Controller code. Used here
+ * to request the interrupt.
+ * @chan: Pointer to Mailbox channel to startup.
+ *
+ * Return: Err if something failed else 0 for success.
+ */
+static int pcc_startup(struct mbox_chan *chan)
+{
+ struct pcc_chan_info *pchan = chan->con_priv;
+ unsigned long irqflags;
+ int rc;
+
+ if (pchan->plat_irq > 0) {
+ irqflags = pcc_chan_plat_irq_can_be_shared(pchan) ?
+ IRQF_SHARED | IRQF_ONESHOT : 0;
+ rc = devm_request_irq(chan->mbox->dev, pchan->plat_irq, pcc_mbox_irq,
+ irqflags, MBOX_IRQ_NAME, chan);
+ if (unlikely(rc)) {
+ dev_err(chan->mbox->dev, "failed to register PCC interrupt %d\n",
+ pchan->plat_irq);
+ return rc;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * pcc_shutdown - Called from Mailbox Controller code. Used here
+ * to free the interrupt.
+ * @chan: Pointer to Mailbox channel to shutdown.
+ */
+static void pcc_shutdown(struct mbox_chan *chan)
+{
+ struct pcc_chan_info *pchan = chan->con_priv;
+
+ if (pchan->plat_irq > 0)
+ devm_free_irq(chan->mbox->dev, pchan->plat_irq, chan);
}
static const struct mbox_chan_ops pcc_chan_ops = {
.send_data = pcc_send_data,
+ .startup = pcc_startup,
+ .shutdown = pcc_shutdown,
+ .last_tx_done = pcc_last_tx_done,
};
/**
@@ -457,6 +661,7 @@ static int pcc_parse_subspace_irq(struct pcc_chan_info *pchan,
pcct_ss->platform_interrupt);
return -EINVAL;
}
+ pchan->plat_irq_flags = pcct_ss->flags;
if (pcct_ss->header.type == ACPI_PCCT_TYPE_HW_REDUCED_SUBSPACE_TYPE2) {
struct acpi_pcct_hw_reduced_type2 *pcct2_ss = (void *)pcct_ss;
@@ -478,6 +683,12 @@ static int pcc_parse_subspace_irq(struct pcc_chan_info *pchan,
"PLAT IRQ ACK");
}
+ if (pcc_chan_plat_irq_can_be_shared(pchan) &&
+ !pchan->plat_irq_ack.gas) {
+ pr_err("PCC subspace has level IRQ with no ACK register\n");
+ return -EINVAL;
+ }
+
return ret;
}
@@ -533,7 +744,8 @@ static int pcc_parse_subspace_db_reg(struct pcc_chan_info *pchan,
ret = pcc_chan_reg_init(&pchan->error,
&pcct_ext->error_status_register,
- 0, 0, pcct_ext->error_status_mask,
+ ~pcct_ext->error_status_mask, 0,
+ pcct_ext->error_status_mask,
"Error Status");
}
return ret;
@@ -692,6 +904,7 @@ static int pcc_mbox_probe(struct platform_device *pdev)
pcc_parse_subspace_shmem(pchan, pcct_entry);
+ pchan->type = pcct_entry->type;
pcct_entry = (struct acpi_subtable_header *)
((unsigned long) pcct_entry + pcct_entry->length);
}
diff --git a/drivers/mailbox/pl320-ipc.c b/drivers/mailbox/pl320-ipc.c
index fbcf07930390..606f26a2a6fd 100644
--- a/drivers/mailbox/pl320-ipc.c
+++ b/drivers/mailbox/pl320-ipc.c
@@ -45,18 +45,6 @@ static DEFINE_MUTEX(ipc_m1_lock);
static DECLARE_COMPLETION(ipc_completion);
static ATOMIC_NOTIFIER_HEAD(ipc_notifier);
-static inline void set_destination(int source, int mbox)
-{
- writel_relaxed(CHAN_MASK(source), ipc_base + IPCMxDSET(mbox));
- writel_relaxed(CHAN_MASK(source), ipc_base + IPCMxMSET(mbox));
-}
-
-static inline void clear_destination(int source, int mbox)
-{
- writel_relaxed(CHAN_MASK(source), ipc_base + IPCMxDCLEAR(mbox));
- writel_relaxed(CHAN_MASK(source), ipc_base + IPCMxMCLEAR(mbox));
-}
-
static void __ipc_send(int mbox, u32 *data)
{
int i;
@@ -164,7 +152,7 @@ err:
return ret;
}
-static struct amba_id pl320_ids[] = {
+static const struct amba_id pl320_ids[] = {
{
.id = 0x00041320,
.mask = 0x000fffff,
diff --git a/drivers/mailbox/platform_mhu.c b/drivers/mailbox/platform_mhu.c
index a5922ac0b0bf..834aecd720ac 100644
--- a/drivers/mailbox/platform_mhu.c
+++ b/drivers/mailbox/platform_mhu.c
@@ -15,6 +15,7 @@
#include <linux/slab.h>
#include <linux/err.h>
#include <linux/io.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/mailbox_controller.h>
@@ -135,10 +136,8 @@ static int platform_mhu_probe(struct platform_device *pdev)
for (i = 0; i < MHU_CHANS; i++) {
mhu->chan[i].con_priv = &mhu->mlink[i];
mhu->mlink[i].irq = platform_get_irq(pdev, i);
- if (mhu->mlink[i].irq < 0) {
- dev_err(dev, "failed to get irq%d\n", i);
+ if (mhu->mlink[i].irq < 0)
return mhu->mlink[i].irq;
- }
mhu->mlink[i].rx_reg = mhu->base + platform_mhu_reg[i];
mhu->mlink[i].tx_reg = mhu->mlink[i].rx_reg + TX_REG_OFFSET;
}
diff --git a/drivers/mailbox/qcom-apcs-ipc-mailbox.c b/drivers/mailbox/qcom-apcs-ipc-mailbox.c
index 0e9f9cba8668..d3a8f6b4a03b 100644
--- a/drivers/mailbox/qcom-apcs-ipc-mailbox.c
+++ b/drivers/mailbox/qcom-apcs-ipc-mailbox.c
@@ -42,7 +42,7 @@ static const struct qcom_apcs_ipc_data msm8994_apcs_data = {
};
static const struct qcom_apcs_ipc_data msm8996_apcs_data = {
- .offset = 16, .clk_name = NULL
+ .offset = 16, .clk_name = "qcom-apcs-msm8996-clk"
};
static const struct qcom_apcs_ipc_data apps_shared_apcs_data = {
@@ -58,7 +58,6 @@ static const struct regmap_config apcs_regmap_config = {
.reg_stride = 4,
.val_bits = 32,
.max_register = 0x1008,
- .fast_io = true,
};
static int qcom_apcs_ipc_send_data(struct mbox_chan *chan, void *data)
@@ -116,10 +115,18 @@ static int qcom_apcs_ipc_probe(struct platform_device *pdev)
}
if (apcs_data->clk_name) {
- apcs->clk = platform_device_register_data(&pdev->dev,
- apcs_data->clk_name,
- PLATFORM_DEVID_AUTO,
- NULL, 0);
+ struct device_node *np = of_get_child_by_name(pdev->dev.of_node,
+ "clock-controller");
+ struct platform_device_info pdevinfo = {
+ .parent = &pdev->dev,
+ .name = apcs_data->clk_name,
+ .id = PLATFORM_DEVID_AUTO,
+ .fwnode = of_fwnode_handle(np) ?: pdev->dev.fwnode,
+ .of_node_reused = !np,
+ };
+
+ apcs->clk = platform_device_register_full(&pdevinfo);
+ of_node_put(np);
if (IS_ERR(apcs->clk))
dev_err(&pdev->dev, "failed to register APCS clk\n");
}
@@ -129,38 +136,39 @@ static int qcom_apcs_ipc_probe(struct platform_device *pdev)
return 0;
}
-static int qcom_apcs_ipc_remove(struct platform_device *pdev)
+static void qcom_apcs_ipc_remove(struct platform_device *pdev)
{
struct qcom_apcs_ipc *apcs = platform_get_drvdata(pdev);
struct platform_device *clk = apcs->clk;
platform_device_unregister(clk);
-
- return 0;
}
/* .data is the offset of the ipc register within the global block */
static const struct of_device_id qcom_apcs_ipc_of_match[] = {
{ .compatible = "qcom,ipq6018-apcs-apps-global", .data = &ipq6018_apcs_data },
- { .compatible = "qcom,ipq8074-apcs-apps-global", .data = &ipq6018_apcs_data },
{ .compatible = "qcom,msm8916-apcs-kpss-global", .data = &msm8916_apcs_data },
{ .compatible = "qcom,msm8939-apcs-kpss-global", .data = &msm8916_apcs_data },
{ .compatible = "qcom,msm8953-apcs-kpss-global", .data = &msm8994_apcs_data },
- { .compatible = "qcom,msm8976-apcs-kpss-global", .data = &msm8994_apcs_data },
{ .compatible = "qcom,msm8994-apcs-kpss-global", .data = &msm8994_apcs_data },
{ .compatible = "qcom,msm8996-apcs-hmss-global", .data = &msm8996_apcs_data },
- { .compatible = "qcom,msm8998-apcs-hmss-global", .data = &msm8994_apcs_data },
{ .compatible = "qcom,qcm2290-apcs-hmss-global", .data = &msm8994_apcs_data },
+ { .compatible = "qcom,sdm845-apss-shared", .data = &apps_shared_apcs_data },
+ { .compatible = "qcom,sdx55-apcs-gcc", .data = &sdx55_apcs_data },
+ /* Do not add any more entries using existing driver data */
+ { .compatible = "qcom,msm8976-apcs-kpss-global", .data = &msm8994_apcs_data },
+ { .compatible = "qcom,msm8998-apcs-hmss-global", .data = &msm8994_apcs_data },
{ .compatible = "qcom,qcs404-apcs-apps-global", .data = &msm8916_apcs_data },
- { .compatible = "qcom,sc7180-apss-shared", .data = &apps_shared_apcs_data },
- { .compatible = "qcom,sc8180x-apss-shared", .data = &apps_shared_apcs_data },
{ .compatible = "qcom,sdm660-apcs-hmss-global", .data = &msm8994_apcs_data },
- { .compatible = "qcom,sdm845-apss-shared", .data = &apps_shared_apcs_data },
{ .compatible = "qcom,sm4250-apcs-hmss-global", .data = &msm8994_apcs_data },
{ .compatible = "qcom,sm6125-apcs-hmss-global", .data = &msm8994_apcs_data },
- { .compatible = "qcom,sm8150-apss-shared", .data = &apps_shared_apcs_data },
{ .compatible = "qcom,sm6115-apcs-hmss-global", .data = &msm8994_apcs_data },
- { .compatible = "qcom,sdx55-apcs-gcc", .data = &sdx55_apcs_data },
+ { .compatible = "qcom,ipq5332-apcs-apps-global", .data = &ipq6018_apcs_data },
+ { .compatible = "qcom,ipq5424-apcs-apps-global", .data = &msm8994_apcs_data },
+ { .compatible = "qcom,ipq8074-apcs-apps-global", .data = &ipq6018_apcs_data },
+ { .compatible = "qcom,sc7180-apss-shared", .data = &apps_shared_apcs_data },
+ { .compatible = "qcom,sc8180x-apss-shared", .data = &apps_shared_apcs_data },
+ { .compatible = "qcom,sm8150-apss-shared", .data = &apps_shared_apcs_data },
{}
};
MODULE_DEVICE_TABLE(of, qcom_apcs_ipc_of_match);
diff --git a/drivers/mailbox/qcom-cpucp-mbox.c b/drivers/mailbox/qcom-cpucp-mbox.c
new file mode 100644
index 000000000000..44f4ed15f818
--- /dev/null
+++ b/drivers/mailbox/qcom-cpucp-mbox.c
@@ -0,0 +1,187 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2024, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/bitops.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/mailbox_controller.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+#define APSS_CPUCP_IPC_CHAN_SUPPORTED 3
+#define APSS_CPUCP_MBOX_CMD_OFF 0x4
+
+/* Tx Registers */
+#define APSS_CPUCP_TX_MBOX_CMD(i) (0x100 + ((i) * 8))
+
+/* Rx Registers */
+#define APSS_CPUCP_RX_MBOX_CMD(i) (0x100 + ((i) * 8))
+#define APSS_CPUCP_RX_MBOX_MAP 0x4000
+#define APSS_CPUCP_RX_MBOX_STAT 0x4400
+#define APSS_CPUCP_RX_MBOX_CLEAR 0x4800
+#define APSS_CPUCP_RX_MBOX_EN 0x4c00
+#define APSS_CPUCP_RX_MBOX_CMD_MASK GENMASK_ULL(63, 0)
+
+/**
+ * struct qcom_cpucp_mbox - Holder for the mailbox driver
+ * @chans: The mailbox channel
+ * @mbox: The mailbox controller
+ * @tx_base: Base address of the CPUCP tx registers
+ * @rx_base: Base address of the CPUCP rx registers
+ */
+struct qcom_cpucp_mbox {
+ struct mbox_chan chans[APSS_CPUCP_IPC_CHAN_SUPPORTED];
+ struct mbox_controller mbox;
+ void __iomem *tx_base;
+ void __iomem *rx_base;
+};
+
+static inline int channel_number(struct mbox_chan *chan)
+{
+ return chan - chan->mbox->chans;
+}
+
+static irqreturn_t qcom_cpucp_mbox_irq_fn(int irq, void *data)
+{
+ struct qcom_cpucp_mbox *cpucp = data;
+ u64 status;
+ int i;
+
+ status = readq(cpucp->rx_base + APSS_CPUCP_RX_MBOX_STAT);
+
+ for_each_set_bit(i, (unsigned long *)&status, APSS_CPUCP_IPC_CHAN_SUPPORTED) {
+ u32 val = readl(cpucp->rx_base + APSS_CPUCP_RX_MBOX_CMD(i) + APSS_CPUCP_MBOX_CMD_OFF);
+ struct mbox_chan *chan = &cpucp->chans[i];
+ unsigned long flags;
+
+ /* Provide mutual exclusion with changes to chan->cl */
+ spin_lock_irqsave(&chan->lock, flags);
+ if (chan->cl)
+ mbox_chan_received_data(chan, &val);
+ writeq(BIT(i), cpucp->rx_base + APSS_CPUCP_RX_MBOX_CLEAR);
+ spin_unlock_irqrestore(&chan->lock, flags);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int qcom_cpucp_mbox_startup(struct mbox_chan *chan)
+{
+ struct qcom_cpucp_mbox *cpucp = container_of(chan->mbox, struct qcom_cpucp_mbox, mbox);
+ unsigned long chan_id = channel_number(chan);
+ u64 val;
+
+ val = readq(cpucp->rx_base + APSS_CPUCP_RX_MBOX_EN);
+ val |= BIT(chan_id);
+ writeq(val, cpucp->rx_base + APSS_CPUCP_RX_MBOX_EN);
+
+ return 0;
+}
+
+static void qcom_cpucp_mbox_shutdown(struct mbox_chan *chan)
+{
+ struct qcom_cpucp_mbox *cpucp = container_of(chan->mbox, struct qcom_cpucp_mbox, mbox);
+ unsigned long chan_id = channel_number(chan);
+ u64 val;
+
+ val = readq(cpucp->rx_base + APSS_CPUCP_RX_MBOX_EN);
+ val &= ~BIT(chan_id);
+ writeq(val, cpucp->rx_base + APSS_CPUCP_RX_MBOX_EN);
+}
+
+static int qcom_cpucp_mbox_send_data(struct mbox_chan *chan, void *data)
+{
+ struct qcom_cpucp_mbox *cpucp = container_of(chan->mbox, struct qcom_cpucp_mbox, mbox);
+ unsigned long chan_id = channel_number(chan);
+ u32 *val = data;
+
+ writel(*val, cpucp->tx_base + APSS_CPUCP_TX_MBOX_CMD(chan_id) + APSS_CPUCP_MBOX_CMD_OFF);
+
+ return 0;
+}
+
+static const struct mbox_chan_ops qcom_cpucp_mbox_chan_ops = {
+ .startup = qcom_cpucp_mbox_startup,
+ .send_data = qcom_cpucp_mbox_send_data,
+ .shutdown = qcom_cpucp_mbox_shutdown
+};
+
+static int qcom_cpucp_mbox_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct qcom_cpucp_mbox *cpucp;
+ struct mbox_controller *mbox;
+ int irq, ret;
+
+ cpucp = devm_kzalloc(dev, sizeof(*cpucp), GFP_KERNEL);
+ if (!cpucp)
+ return -ENOMEM;
+
+ cpucp->rx_base = devm_of_iomap(dev, dev->of_node, 0, NULL);
+ if (IS_ERR(cpucp->rx_base))
+ return PTR_ERR(cpucp->rx_base);
+
+ cpucp->tx_base = devm_of_iomap(dev, dev->of_node, 1, NULL);
+ if (IS_ERR(cpucp->tx_base))
+ return PTR_ERR(cpucp->tx_base);
+
+ writeq(0, cpucp->rx_base + APSS_CPUCP_RX_MBOX_EN);
+ writeq(0, cpucp->rx_base + APSS_CPUCP_RX_MBOX_CLEAR);
+ writeq(0, cpucp->rx_base + APSS_CPUCP_RX_MBOX_MAP);
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ ret = devm_request_irq(dev, irq, qcom_cpucp_mbox_irq_fn,
+ IRQF_TRIGGER_HIGH | IRQF_NO_SUSPEND, "apss_cpucp_mbox", cpucp);
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "Failed to register irq: %d\n", irq);
+
+ writeq(APSS_CPUCP_RX_MBOX_CMD_MASK, cpucp->rx_base + APSS_CPUCP_RX_MBOX_MAP);
+
+ mbox = &cpucp->mbox;
+ mbox->dev = dev;
+ mbox->num_chans = APSS_CPUCP_IPC_CHAN_SUPPORTED;
+ mbox->chans = cpucp->chans;
+ mbox->ops = &qcom_cpucp_mbox_chan_ops;
+
+ ret = devm_mbox_controller_register(dev, mbox);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to create mailbox\n");
+
+ return 0;
+}
+
+static const struct of_device_id qcom_cpucp_mbox_of_match[] = {
+ { .compatible = "qcom,x1e80100-cpucp-mbox" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, qcom_cpucp_mbox_of_match);
+
+static struct platform_driver qcom_cpucp_mbox_driver = {
+ .probe = qcom_cpucp_mbox_probe,
+ .driver = {
+ .name = "qcom_cpucp_mbox",
+ .of_match_table = qcom_cpucp_mbox_of_match,
+ },
+};
+
+static int __init qcom_cpucp_mbox_init(void)
+{
+ return platform_driver_register(&qcom_cpucp_mbox_driver);
+}
+core_initcall(qcom_cpucp_mbox_init);
+
+static void __exit qcom_cpucp_mbox_exit(void)
+{
+ platform_driver_unregister(&qcom_cpucp_mbox_driver);
+}
+module_exit(qcom_cpucp_mbox_exit);
+
+MODULE_DESCRIPTION("QTI CPUCP MBOX Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mailbox/qcom-ipcc.c b/drivers/mailbox/qcom-ipcc.c
index 7e27acf6c0cc..d957d989c0ce 100644
--- a/drivers/mailbox/qcom-ipcc.c
+++ b/drivers/mailbox/qcom-ipcc.c
@@ -14,6 +14,7 @@
#include <dt-bindings/mailbox/qcom-ipcc.h>
/* IPCC Register offsets */
+#define IPCC_REG_CONFIG 0x08
#define IPCC_REG_SEND_ID 0x0c
#define IPCC_REG_RECV_ID 0x10
#define IPCC_REG_RECV_SIGNAL_ENABLE 0x14
@@ -21,6 +22,7 @@
#define IPCC_REG_RECV_SIGNAL_CLEAR 0x1c
#define IPCC_REG_CLIENT_CLEAR 0x38
+#define IPCC_CLEAR_ON_RECV_RD BIT(0)
#define IPCC_SIGNAL_ID_MASK GENMASK(15, 0)
#define IPCC_CLIENT_ID_MASK GENMASK(31, 16)
@@ -227,10 +229,8 @@ static int qcom_ipcc_setup_mbox(struct qcom_ipcc *ipcc,
ret = of_parse_phandle_with_args(client_dn, "mboxes",
"#mbox-cells", j, &curr_ph);
of_node_put(curr_ph.np);
- if (!ret && curr_ph.np == controller_dn) {
+ if (!ret && curr_ph.np == controller_dn)
ipcc->num_chans++;
- break;
- }
}
}
@@ -276,6 +276,7 @@ static int qcom_ipcc_pm_resume(struct device *dev)
static int qcom_ipcc_probe(struct platform_device *pdev)
{
struct qcom_ipcc *ipcc;
+ u32 config_value;
static int id;
char *name;
int ret;
@@ -290,6 +291,19 @@ static int qcom_ipcc_probe(struct platform_device *pdev)
if (IS_ERR(ipcc->base))
return PTR_ERR(ipcc->base);
+ /*
+ * It is possible that boot firmware is using the same IPCC instance
+ * as of the HLOS and it has kept CLEAR_ON_RECV_RD set which basically
+ * means Interrupt pending registers are cleared when RECV_ID is read.
+ * The register automatically updates to the next pending interrupt/client
+ * status based on priority.
+ */
+ config_value = readl(ipcc->base + IPCC_REG_CONFIG);
+ if (config_value & IPCC_CLEAR_ON_RECV_RD) {
+ config_value &= ~(IPCC_CLEAR_ON_RECV_RD);
+ writel(config_value, ipcc->base + IPCC_REG_CONFIG);
+ }
+
ipcc->irq = platform_get_irq(pdev, 0);
if (ipcc->irq < 0)
return ipcc->irq;
@@ -298,8 +312,7 @@ static int qcom_ipcc_probe(struct platform_device *pdev)
if (!name)
return -ENOMEM;
- ipcc->irq_domain = irq_domain_add_tree(pdev->dev.of_node,
- &qcom_ipcc_irq_ops, ipcc);
+ ipcc->irq_domain = irq_domain_create_tree(dev_fwnode(&pdev->dev), &qcom_ipcc_irq_ops, ipcc);
if (!ipcc->irq_domain)
return -ENOMEM;
@@ -328,14 +341,12 @@ err_mbox:
return ret;
}
-static int qcom_ipcc_remove(struct platform_device *pdev)
+static void qcom_ipcc_remove(struct platform_device *pdev)
{
struct qcom_ipcc *ipcc = platform_get_drvdata(pdev);
disable_irq_wake(ipcc->irq);
irq_domain_remove(ipcc->irq_domain);
-
- return 0;
}
static const struct of_device_id qcom_ipcc_of_match[] = {
diff --git a/drivers/mailbox/riscv-sbi-mpxy-mbox.c b/drivers/mailbox/riscv-sbi-mpxy-mbox.c
new file mode 100644
index 000000000000..7c9c006b7244
--- /dev/null
+++ b/drivers/mailbox/riscv-sbi-mpxy-mbox.c
@@ -0,0 +1,1019 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * RISC-V SBI Message Proxy (MPXY) mailbox controller driver
+ *
+ * Copyright (C) 2025 Ventana Micro Systems Inc.
+ */
+
+#include <linux/acpi.h>
+#include <linux/cpu.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/irqchip/riscv-imsic.h>
+#include <linux/mailbox_controller.h>
+#include <linux/mailbox/riscv-rpmi-message.h>
+#include <linux/minmax.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/msi.h>
+#include <linux/of_irq.h>
+#include <linux/percpu.h>
+#include <linux/platform_device.h>
+#include <linux/smp.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <asm/byteorder.h>
+#include <asm/sbi.h>
+
+/* ====== SBI MPXY extension data structures ====== */
+
+/* SBI MPXY MSI related channel attributes */
+struct sbi_mpxy_msi_info {
+ /* Lower 32-bits of the MSI target address */
+ u32 msi_addr_lo;
+ /* Upper 32-bits of the MSI target address */
+ u32 msi_addr_hi;
+ /* MSI data value */
+ u32 msi_data;
+};
+
+/*
+ * SBI MPXY standard channel attributes.
+ *
+ * NOTE: The sequence of attribute fields are as-per the
+ * defined sequence in the attribute table in spec (or
+ * as-per the enum sbi_mpxy_attribute_id).
+ */
+struct sbi_mpxy_channel_attrs {
+ /* Message protocol ID */
+ u32 msg_proto_id;
+ /* Message protocol version */
+ u32 msg_proto_version;
+ /* Message protocol maximum message length */
+ u32 msg_max_len;
+ /* Message protocol message send timeout in microseconds */
+ u32 msg_send_timeout;
+ /* Message protocol message completion timeout in microseconds */
+ u32 msg_completion_timeout;
+ /* Bit array for channel capabilities */
+ u32 capability;
+ /* SSE event ID */
+ u32 sse_event_id;
+ /* MSI enable/disable control knob */
+ u32 msi_control;
+ /* Channel MSI info */
+ struct sbi_mpxy_msi_info msi_info;
+ /* Events state control */
+ u32 events_state_ctrl;
+};
+
+/*
+ * RPMI specific SBI MPXY channel attributes.
+ *
+ * NOTE: The sequence of attribute fields are as-per the
+ * defined sequence in the attribute table in spec (or
+ * as-per the enum sbi_mpxy_rpmi_attribute_id).
+ */
+struct sbi_mpxy_rpmi_channel_attrs {
+ /* RPMI service group ID */
+ u32 servicegroup_id;
+ /* RPMI service group version */
+ u32 servicegroup_version;
+ /* RPMI implementation ID */
+ u32 impl_id;
+ /* RPMI implementation version */
+ u32 impl_version;
+};
+
+/* SBI MPXY channel IDs data in shared memory */
+struct sbi_mpxy_channel_ids_data {
+ /* Remaining number of channel ids */
+ __le32 remaining;
+ /* Returned channel ids in current function call */
+ __le32 returned;
+ /* Returned channel id array */
+ __le32 channel_array[];
+};
+
+/* SBI MPXY notification data in shared memory */
+struct sbi_mpxy_notification_data {
+ /* Remaining number of notification events */
+ __le32 remaining;
+ /* Number of notification events returned */
+ __le32 returned;
+ /* Number of notification events lost */
+ __le32 lost;
+ /* Reserved for future use */
+ __le32 reserved;
+ /* Returned channel id array */
+ u8 events_data[];
+};
+
+/* ====== MPXY data structures & helper routines ====== */
+
+/* MPXY Per-CPU or local context */
+struct mpxy_local {
+ /* Shared memory base address */
+ void *shmem;
+ /* Shared memory physical address */
+ phys_addr_t shmem_phys_addr;
+ /* Flag representing whether shared memory is active or not */
+ bool shmem_active;
+};
+
+static DEFINE_PER_CPU(struct mpxy_local, mpxy_local);
+static unsigned long mpxy_shmem_size;
+static bool mpxy_shmem_init_done;
+
+static int mpxy_get_channel_count(u32 *channel_count)
+{
+ struct mpxy_local *mpxy = this_cpu_ptr(&mpxy_local);
+ struct sbi_mpxy_channel_ids_data *sdata = mpxy->shmem;
+ u32 remaining, returned;
+ struct sbiret sret;
+
+ if (!mpxy->shmem_active)
+ return -ENODEV;
+ if (!channel_count)
+ return -EINVAL;
+
+ get_cpu();
+
+ /* Get the remaining and returned fields to calculate total */
+ sret = sbi_ecall(SBI_EXT_MPXY, SBI_EXT_MPXY_GET_CHANNEL_IDS,
+ 0, 0, 0, 0, 0, 0);
+ if (sret.error)
+ goto err_put_cpu;
+
+ remaining = le32_to_cpu(sdata->remaining);
+ returned = le32_to_cpu(sdata->returned);
+ *channel_count = remaining + returned;
+
+err_put_cpu:
+ put_cpu();
+ return sbi_err_map_linux_errno(sret.error);
+}
+
+static int mpxy_get_channel_ids(u32 channel_count, u32 *channel_ids)
+{
+ struct mpxy_local *mpxy = this_cpu_ptr(&mpxy_local);
+ struct sbi_mpxy_channel_ids_data *sdata = mpxy->shmem;
+ u32 remaining, returned, count, start_index = 0;
+ struct sbiret sret;
+
+ if (!mpxy->shmem_active)
+ return -ENODEV;
+ if (!channel_count || !channel_ids)
+ return -EINVAL;
+
+ get_cpu();
+
+ do {
+ sret = sbi_ecall(SBI_EXT_MPXY, SBI_EXT_MPXY_GET_CHANNEL_IDS,
+ start_index, 0, 0, 0, 0, 0);
+ if (sret.error)
+ goto err_put_cpu;
+
+ remaining = le32_to_cpu(sdata->remaining);
+ returned = le32_to_cpu(sdata->returned);
+
+ count = returned < (channel_count - start_index) ?
+ returned : (channel_count - start_index);
+ memcpy_from_le32(&channel_ids[start_index], sdata->channel_array, count);
+ start_index += count;
+ } while (remaining && start_index < channel_count);
+
+err_put_cpu:
+ put_cpu();
+ return sbi_err_map_linux_errno(sret.error);
+}
+
+static int mpxy_read_attrs(u32 channel_id, u32 base_attrid, u32 attr_count,
+ u32 *attrs_buf)
+{
+ struct mpxy_local *mpxy = this_cpu_ptr(&mpxy_local);
+ struct sbiret sret;
+
+ if (!mpxy->shmem_active)
+ return -ENODEV;
+ if (!attr_count || !attrs_buf)
+ return -EINVAL;
+
+ get_cpu();
+
+ sret = sbi_ecall(SBI_EXT_MPXY, SBI_EXT_MPXY_READ_ATTRS,
+ channel_id, base_attrid, attr_count, 0, 0, 0);
+ if (sret.error)
+ goto err_put_cpu;
+
+ memcpy_from_le32(attrs_buf, (__le32 *)mpxy->shmem, attr_count);
+
+err_put_cpu:
+ put_cpu();
+ return sbi_err_map_linux_errno(sret.error);
+}
+
+static int mpxy_write_attrs(u32 channel_id, u32 base_attrid, u32 attr_count,
+ u32 *attrs_buf)
+{
+ struct mpxy_local *mpxy = this_cpu_ptr(&mpxy_local);
+ struct sbiret sret;
+
+ if (!mpxy->shmem_active)
+ return -ENODEV;
+ if (!attr_count || !attrs_buf)
+ return -EINVAL;
+
+ get_cpu();
+
+ memcpy_to_le32((__le32 *)mpxy->shmem, attrs_buf, attr_count);
+ sret = sbi_ecall(SBI_EXT_MPXY, SBI_EXT_MPXY_WRITE_ATTRS,
+ channel_id, base_attrid, attr_count, 0, 0, 0);
+
+ put_cpu();
+ return sbi_err_map_linux_errno(sret.error);
+}
+
+static int mpxy_send_message_with_resp(u32 channel_id, u32 msg_id,
+ void *tx, unsigned long tx_len,
+ void *rx, unsigned long max_rx_len,
+ unsigned long *rx_len)
+{
+ struct mpxy_local *mpxy = this_cpu_ptr(&mpxy_local);
+ unsigned long rx_bytes;
+ struct sbiret sret;
+
+ if (!mpxy->shmem_active)
+ return -ENODEV;
+ if (!tx && tx_len)
+ return -EINVAL;
+
+ get_cpu();
+
+ /* Message protocols allowed to have no data in messages */
+ if (tx_len)
+ memcpy(mpxy->shmem, tx, tx_len);
+
+ sret = sbi_ecall(SBI_EXT_MPXY, SBI_EXT_MPXY_SEND_MSG_WITH_RESP,
+ channel_id, msg_id, tx_len, 0, 0, 0);
+ if (rx && !sret.error) {
+ rx_bytes = sret.value;
+ if (rx_bytes > max_rx_len) {
+ put_cpu();
+ return -ENOSPC;
+ }
+
+ memcpy(rx, mpxy->shmem, rx_bytes);
+ if (rx_len)
+ *rx_len = rx_bytes;
+ }
+
+ put_cpu();
+ return sbi_err_map_linux_errno(sret.error);
+}
+
+static int mpxy_send_message_without_resp(u32 channel_id, u32 msg_id,
+ void *tx, unsigned long tx_len)
+{
+ struct mpxy_local *mpxy = this_cpu_ptr(&mpxy_local);
+ struct sbiret sret;
+
+ if (!mpxy->shmem_active)
+ return -ENODEV;
+ if (!tx && tx_len)
+ return -EINVAL;
+
+ get_cpu();
+
+ /* Message protocols allowed to have no data in messages */
+ if (tx_len)
+ memcpy(mpxy->shmem, tx, tx_len);
+
+ sret = sbi_ecall(SBI_EXT_MPXY, SBI_EXT_MPXY_SEND_MSG_WITHOUT_RESP,
+ channel_id, msg_id, tx_len, 0, 0, 0);
+
+ put_cpu();
+ return sbi_err_map_linux_errno(sret.error);
+}
+
+static int mpxy_get_notifications(u32 channel_id,
+ struct sbi_mpxy_notification_data *notif_data,
+ unsigned long *events_data_len)
+{
+ struct mpxy_local *mpxy = this_cpu_ptr(&mpxy_local);
+ struct sbiret sret;
+
+ if (!mpxy->shmem_active)
+ return -ENODEV;
+ if (!notif_data || !events_data_len)
+ return -EINVAL;
+
+ get_cpu();
+
+ sret = sbi_ecall(SBI_EXT_MPXY, SBI_EXT_MPXY_GET_NOTIFICATION_EVENTS,
+ channel_id, 0, 0, 0, 0, 0);
+ if (sret.error)
+ goto err_put_cpu;
+
+ memcpy(notif_data, mpxy->shmem, sret.value + 16);
+ *events_data_len = sret.value;
+
+err_put_cpu:
+ put_cpu();
+ return sbi_err_map_linux_errno(sret.error);
+}
+
+static int mpxy_get_shmem_size(unsigned long *shmem_size)
+{
+ struct sbiret sret;
+
+ sret = sbi_ecall(SBI_EXT_MPXY, SBI_EXT_MPXY_GET_SHMEM_SIZE,
+ 0, 0, 0, 0, 0, 0);
+ if (sret.error)
+ return sbi_err_map_linux_errno(sret.error);
+ if (shmem_size)
+ *shmem_size = sret.value;
+ return 0;
+}
+
+static int mpxy_setup_shmem(unsigned int cpu)
+{
+ struct page *shmem_page;
+ struct mpxy_local *mpxy;
+ struct sbiret sret;
+
+ mpxy = per_cpu_ptr(&mpxy_local, cpu);
+ if (mpxy->shmem_active)
+ return 0;
+
+ shmem_page = alloc_pages(GFP_KERNEL | __GFP_ZERO, get_order(mpxy_shmem_size));
+ if (!shmem_page)
+ return -ENOMEM;
+
+ /*
+ * Linux setup of shmem is done in mpxy OVERWRITE mode.
+ * flags[1:0] = 00b
+ */
+ sret = sbi_ecall(SBI_EXT_MPXY, SBI_EXT_MPXY_SET_SHMEM,
+ page_to_phys(shmem_page), 0, 0, 0, 0, 0);
+ if (sret.error) {
+ free_pages((unsigned long)page_to_virt(shmem_page),
+ get_order(mpxy_shmem_size));
+ return sbi_err_map_linux_errno(sret.error);
+ }
+
+ mpxy->shmem = page_to_virt(shmem_page);
+ mpxy->shmem_phys_addr = page_to_phys(shmem_page);
+ mpxy->shmem_active = true;
+
+ return 0;
+}
+
+/* ====== MPXY mailbox data structures ====== */
+
+/* MPXY mailbox channel */
+struct mpxy_mbox_channel {
+ struct mpxy_mbox *mbox;
+ u32 channel_id;
+ struct sbi_mpxy_channel_attrs attrs;
+ struct sbi_mpxy_rpmi_channel_attrs rpmi_attrs;
+ struct sbi_mpxy_notification_data *notif;
+ u32 max_xfer_len;
+ bool have_events_state;
+ u32 msi_index;
+ u32 msi_irq;
+ bool started;
+};
+
+/* MPXY mailbox */
+struct mpxy_mbox {
+ struct device *dev;
+ u32 channel_count;
+ struct mpxy_mbox_channel *channels;
+ u32 msi_count;
+ struct mpxy_mbox_channel **msi_index_to_channel;
+ struct mbox_controller controller;
+};
+
+/* ====== MPXY RPMI processing ====== */
+
+static void mpxy_mbox_send_rpmi_data(struct mpxy_mbox_channel *mchan,
+ struct rpmi_mbox_message *msg)
+{
+ msg->error = 0;
+ switch (msg->type) {
+ case RPMI_MBOX_MSG_TYPE_GET_ATTRIBUTE:
+ switch (msg->attr.id) {
+ case RPMI_MBOX_ATTR_SPEC_VERSION:
+ msg->attr.value = mchan->attrs.msg_proto_version;
+ break;
+ case RPMI_MBOX_ATTR_MAX_MSG_DATA_SIZE:
+ msg->attr.value = mchan->max_xfer_len;
+ break;
+ case RPMI_MBOX_ATTR_SERVICEGROUP_ID:
+ msg->attr.value = mchan->rpmi_attrs.servicegroup_id;
+ break;
+ case RPMI_MBOX_ATTR_SERVICEGROUP_VERSION:
+ msg->attr.value = mchan->rpmi_attrs.servicegroup_version;
+ break;
+ case RPMI_MBOX_ATTR_IMPL_ID:
+ msg->attr.value = mchan->rpmi_attrs.impl_id;
+ break;
+ case RPMI_MBOX_ATTR_IMPL_VERSION:
+ msg->attr.value = mchan->rpmi_attrs.impl_version;
+ break;
+ default:
+ msg->error = -EOPNOTSUPP;
+ break;
+ }
+ break;
+ case RPMI_MBOX_MSG_TYPE_SET_ATTRIBUTE:
+ /* None of the RPMI linux mailbox attributes are writeable */
+ msg->error = -EOPNOTSUPP;
+ break;
+ case RPMI_MBOX_MSG_TYPE_SEND_WITH_RESPONSE:
+ if ((!msg->data.request && msg->data.request_len) ||
+ (msg->data.request && msg->data.request_len > mchan->max_xfer_len) ||
+ (!msg->data.response && msg->data.max_response_len)) {
+ msg->error = -EINVAL;
+ break;
+ }
+ if (!(mchan->attrs.capability & SBI_MPXY_CHAN_CAP_SEND_WITH_RESP)) {
+ msg->error = -EIO;
+ break;
+ }
+ msg->error = mpxy_send_message_with_resp(mchan->channel_id,
+ msg->data.service_id,
+ msg->data.request,
+ msg->data.request_len,
+ msg->data.response,
+ msg->data.max_response_len,
+ &msg->data.out_response_len);
+ break;
+ case RPMI_MBOX_MSG_TYPE_SEND_WITHOUT_RESPONSE:
+ if ((!msg->data.request && msg->data.request_len) ||
+ (msg->data.request && msg->data.request_len > mchan->max_xfer_len)) {
+ msg->error = -EINVAL;
+ break;
+ }
+ if (!(mchan->attrs.capability & SBI_MPXY_CHAN_CAP_SEND_WITHOUT_RESP)) {
+ msg->error = -EIO;
+ break;
+ }
+ msg->error = mpxy_send_message_without_resp(mchan->channel_id,
+ msg->data.service_id,
+ msg->data.request,
+ msg->data.request_len);
+ break;
+ default:
+ msg->error = -EOPNOTSUPP;
+ break;
+ }
+}
+
+static void mpxy_mbox_peek_rpmi_data(struct mbox_chan *chan,
+ struct mpxy_mbox_channel *mchan,
+ struct sbi_mpxy_notification_data *notif,
+ unsigned long events_data_len)
+{
+ struct rpmi_notification_event *event;
+ struct rpmi_mbox_message msg;
+ unsigned long pos = 0;
+
+ while (pos < events_data_len && (events_data_len - pos) <= sizeof(*event)) {
+ event = (struct rpmi_notification_event *)(notif->events_data + pos);
+
+ msg.type = RPMI_MBOX_MSG_TYPE_NOTIFICATION_EVENT;
+ msg.notif.event_datalen = le16_to_cpu(event->event_datalen);
+ msg.notif.event_id = event->event_id;
+ msg.notif.event_data = event->event_data;
+ msg.error = 0;
+
+ mbox_chan_received_data(chan, &msg);
+ pos += sizeof(*event) + msg.notif.event_datalen;
+ }
+}
+
+static int mpxy_mbox_read_rpmi_attrs(struct mpxy_mbox_channel *mchan)
+{
+ return mpxy_read_attrs(mchan->channel_id,
+ SBI_MPXY_ATTR_MSGPROTO_ATTR_START,
+ sizeof(mchan->rpmi_attrs) / sizeof(u32),
+ (u32 *)&mchan->rpmi_attrs);
+}
+
+/* ====== MPXY mailbox callbacks ====== */
+
+static int mpxy_mbox_send_data(struct mbox_chan *chan, void *data)
+{
+ struct mpxy_mbox_channel *mchan = chan->con_priv;
+
+ if (mchan->attrs.msg_proto_id == SBI_MPXY_MSGPROTO_RPMI_ID) {
+ mpxy_mbox_send_rpmi_data(mchan, data);
+ return 0;
+ }
+
+ return -EOPNOTSUPP;
+}
+
+static bool mpxy_mbox_peek_data(struct mbox_chan *chan)
+{
+ struct mpxy_mbox_channel *mchan = chan->con_priv;
+ struct sbi_mpxy_notification_data *notif = mchan->notif;
+ bool have_notifications = false;
+ unsigned long data_len;
+ int rc;
+
+ if (!(mchan->attrs.capability & SBI_MPXY_CHAN_CAP_GET_NOTIFICATIONS))
+ return false;
+
+ do {
+ rc = mpxy_get_notifications(mchan->channel_id, notif, &data_len);
+ if (rc || !data_len)
+ break;
+
+ if (mchan->attrs.msg_proto_id == SBI_MPXY_MSGPROTO_RPMI_ID)
+ mpxy_mbox_peek_rpmi_data(chan, mchan, notif, data_len);
+
+ have_notifications = true;
+ } while (1);
+
+ return have_notifications;
+}
+
+static irqreturn_t mpxy_mbox_irq_thread(int irq, void *dev_id)
+{
+ mpxy_mbox_peek_data(dev_id);
+ return IRQ_HANDLED;
+}
+
+static int mpxy_mbox_setup_msi(struct mbox_chan *chan,
+ struct mpxy_mbox_channel *mchan)
+{
+ struct device *dev = mchan->mbox->dev;
+ int rc;
+
+ /* Do nothing if MSI not supported */
+ if (mchan->msi_irq == U32_MAX)
+ return 0;
+
+ /* Fail if MSI already enabled */
+ if (mchan->attrs.msi_control)
+ return -EALREADY;
+
+ /* Request channel MSI handler */
+ rc = request_threaded_irq(mchan->msi_irq, NULL, mpxy_mbox_irq_thread,
+ 0, dev_name(dev), chan);
+ if (rc) {
+ dev_err(dev, "failed to request MPXY channel 0x%x IRQ\n",
+ mchan->channel_id);
+ return rc;
+ }
+
+ /* Enable channel MSI control */
+ mchan->attrs.msi_control = 1;
+ rc = mpxy_write_attrs(mchan->channel_id, SBI_MPXY_ATTR_MSI_CONTROL,
+ 1, &mchan->attrs.msi_control);
+ if (rc) {
+ dev_err(dev, "enable MSI control failed for MPXY channel 0x%x\n",
+ mchan->channel_id);
+ mchan->attrs.msi_control = 0;
+ free_irq(mchan->msi_irq, chan);
+ return rc;
+ }
+
+ return 0;
+}
+
+static void mpxy_mbox_cleanup_msi(struct mbox_chan *chan,
+ struct mpxy_mbox_channel *mchan)
+{
+ struct device *dev = mchan->mbox->dev;
+ int rc;
+
+ /* Do nothing if MSI not supported */
+ if (mchan->msi_irq == U32_MAX)
+ return;
+
+ /* Do nothing if MSI already disabled */
+ if (!mchan->attrs.msi_control)
+ return;
+
+ /* Disable channel MSI control */
+ mchan->attrs.msi_control = 0;
+ rc = mpxy_write_attrs(mchan->channel_id, SBI_MPXY_ATTR_MSI_CONTROL,
+ 1, &mchan->attrs.msi_control);
+ if (rc) {
+ dev_err(dev, "disable MSI control failed for MPXY channel 0x%x\n",
+ mchan->channel_id);
+ }
+
+ /* Free channel MSI handler */
+ free_irq(mchan->msi_irq, chan);
+}
+
+static int mpxy_mbox_setup_events(struct mpxy_mbox_channel *mchan)
+{
+ struct device *dev = mchan->mbox->dev;
+ int rc;
+
+ /* Do nothing if events state not supported */
+ if (!mchan->have_events_state)
+ return 0;
+
+ /* Fail if events state already enabled */
+ if (mchan->attrs.events_state_ctrl)
+ return -EALREADY;
+
+ /* Enable channel events state */
+ mchan->attrs.events_state_ctrl = 1;
+ rc = mpxy_write_attrs(mchan->channel_id, SBI_MPXY_ATTR_EVENTS_STATE_CONTROL,
+ 1, &mchan->attrs.events_state_ctrl);
+ if (rc) {
+ dev_err(dev, "enable events state failed for MPXY channel 0x%x\n",
+ mchan->channel_id);
+ mchan->attrs.events_state_ctrl = 0;
+ return rc;
+ }
+
+ return 0;
+}
+
+static void mpxy_mbox_cleanup_events(struct mpxy_mbox_channel *mchan)
+{
+ struct device *dev = mchan->mbox->dev;
+ int rc;
+
+ /* Do nothing if events state not supported */
+ if (!mchan->have_events_state)
+ return;
+
+ /* Do nothing if events state already disabled */
+ if (!mchan->attrs.events_state_ctrl)
+ return;
+
+ /* Disable channel events state */
+ mchan->attrs.events_state_ctrl = 0;
+ rc = mpxy_write_attrs(mchan->channel_id, SBI_MPXY_ATTR_EVENTS_STATE_CONTROL,
+ 1, &mchan->attrs.events_state_ctrl);
+ if (rc)
+ dev_err(dev, "disable events state failed for MPXY channel 0x%x\n",
+ mchan->channel_id);
+}
+
+static int mpxy_mbox_startup(struct mbox_chan *chan)
+{
+ struct mpxy_mbox_channel *mchan = chan->con_priv;
+ int rc;
+
+ if (mchan->started)
+ return -EALREADY;
+
+ /* Setup channel MSI */
+ rc = mpxy_mbox_setup_msi(chan, mchan);
+ if (rc)
+ return rc;
+
+ /* Setup channel notification events */
+ rc = mpxy_mbox_setup_events(mchan);
+ if (rc) {
+ mpxy_mbox_cleanup_msi(chan, mchan);
+ return rc;
+ }
+
+ /* Mark the channel as started */
+ mchan->started = true;
+
+ return 0;
+}
+
+static void mpxy_mbox_shutdown(struct mbox_chan *chan)
+{
+ struct mpxy_mbox_channel *mchan = chan->con_priv;
+
+ if (!mchan->started)
+ return;
+
+ /* Mark the channel as stopped */
+ mchan->started = false;
+
+ /* Cleanup channel notification events */
+ mpxy_mbox_cleanup_events(mchan);
+
+ /* Cleanup channel MSI */
+ mpxy_mbox_cleanup_msi(chan, mchan);
+}
+
+static const struct mbox_chan_ops mpxy_mbox_ops = {
+ .send_data = mpxy_mbox_send_data,
+ .peek_data = mpxy_mbox_peek_data,
+ .startup = mpxy_mbox_startup,
+ .shutdown = mpxy_mbox_shutdown,
+};
+
+/* ====== MPXY platform driver ===== */
+
+static void mpxy_mbox_msi_write(struct msi_desc *desc, struct msi_msg *msg)
+{
+ struct device *dev = msi_desc_to_dev(desc);
+ struct mpxy_mbox *mbox = dev_get_drvdata(dev);
+ struct mpxy_mbox_channel *mchan;
+ struct sbi_mpxy_msi_info *minfo;
+ int rc;
+
+ mchan = mbox->msi_index_to_channel[desc->msi_index];
+ if (!mchan) {
+ dev_warn(dev, "MPXY channel not available for MSI index %d\n",
+ desc->msi_index);
+ return;
+ }
+
+ minfo = &mchan->attrs.msi_info;
+ minfo->msi_addr_lo = msg->address_lo;
+ minfo->msi_addr_hi = msg->address_hi;
+ minfo->msi_data = msg->data;
+
+ rc = mpxy_write_attrs(mchan->channel_id, SBI_MPXY_ATTR_MSI_ADDR_LO,
+ sizeof(*minfo) / sizeof(u32), (u32 *)minfo);
+ if (rc) {
+ dev_warn(dev, "failed to write MSI info for MPXY channel 0x%x\n",
+ mchan->channel_id);
+ }
+}
+
+static struct mbox_chan *mpxy_mbox_fw_xlate(struct mbox_controller *ctlr,
+ const struct fwnode_reference_args *pa)
+{
+ struct mpxy_mbox *mbox = container_of(ctlr, struct mpxy_mbox, controller);
+ struct mpxy_mbox_channel *mchan;
+ u32 i;
+
+ if (pa->nargs != 2)
+ return ERR_PTR(-EINVAL);
+
+ for (i = 0; i < mbox->channel_count; i++) {
+ mchan = &mbox->channels[i];
+ if (mchan->channel_id == pa->args[0] &&
+ mchan->attrs.msg_proto_id == pa->args[1])
+ return &mbox->controller.chans[i];
+ }
+
+ return ERR_PTR(-ENOENT);
+}
+
+static int mpxy_mbox_populate_channels(struct mpxy_mbox *mbox)
+{
+ u32 i, *channel_ids __free(kfree) = NULL;
+ struct mpxy_mbox_channel *mchan;
+ int rc;
+
+ /* Find-out of number of channels */
+ rc = mpxy_get_channel_count(&mbox->channel_count);
+ if (rc)
+ return dev_err_probe(mbox->dev, rc, "failed to get number of MPXY channels\n");
+ if (!mbox->channel_count)
+ return dev_err_probe(mbox->dev, -ENODEV, "no MPXY channels available\n");
+
+ /* Allocate and fetch all channel IDs */
+ channel_ids = kcalloc(mbox->channel_count, sizeof(*channel_ids), GFP_KERNEL);
+ if (!channel_ids)
+ return -ENOMEM;
+ rc = mpxy_get_channel_ids(mbox->channel_count, channel_ids);
+ if (rc)
+ return dev_err_probe(mbox->dev, rc, "failed to get MPXY channel IDs\n");
+
+ /* Populate all channels */
+ mbox->channels = devm_kcalloc(mbox->dev, mbox->channel_count,
+ sizeof(*mbox->channels), GFP_KERNEL);
+ if (!mbox->channels)
+ return -ENOMEM;
+ for (i = 0; i < mbox->channel_count; i++) {
+ mchan = &mbox->channels[i];
+ mchan->mbox = mbox;
+ mchan->channel_id = channel_ids[i];
+
+ rc = mpxy_read_attrs(mchan->channel_id, SBI_MPXY_ATTR_MSG_PROT_ID,
+ sizeof(mchan->attrs) / sizeof(u32),
+ (u32 *)&mchan->attrs);
+ if (rc) {
+ return dev_err_probe(mbox->dev, rc,
+ "MPXY channel 0x%x read attrs failed\n",
+ mchan->channel_id);
+ }
+
+ if (mchan->attrs.msg_proto_id == SBI_MPXY_MSGPROTO_RPMI_ID) {
+ rc = mpxy_mbox_read_rpmi_attrs(mchan);
+ if (rc) {
+ return dev_err_probe(mbox->dev, rc,
+ "MPXY channel 0x%x read RPMI attrs failed\n",
+ mchan->channel_id);
+ }
+ }
+
+ mchan->notif = devm_kzalloc(mbox->dev, mpxy_shmem_size, GFP_KERNEL);
+ if (!mchan->notif)
+ return -ENOMEM;
+
+ mchan->max_xfer_len = min(mpxy_shmem_size, mchan->attrs.msg_max_len);
+
+ if ((mchan->attrs.capability & SBI_MPXY_CHAN_CAP_GET_NOTIFICATIONS) &&
+ (mchan->attrs.capability & SBI_MPXY_CHAN_CAP_EVENTS_STATE))
+ mchan->have_events_state = true;
+
+ if ((mchan->attrs.capability & SBI_MPXY_CHAN_CAP_GET_NOTIFICATIONS) &&
+ (mchan->attrs.capability & SBI_MPXY_CHAN_CAP_MSI))
+ mchan->msi_index = mbox->msi_count++;
+ else
+ mchan->msi_index = U32_MAX;
+ mchan->msi_irq = U32_MAX;
+ }
+
+ return 0;
+}
+
+static int mpxy_mbox_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct mpxy_mbox_channel *mchan;
+ struct mpxy_mbox *mbox;
+ int msi_idx, rc;
+ u32 i;
+
+ /*
+ * Initialize MPXY shared memory only once. This also ensures
+ * that SBI MPXY mailbox is probed only once.
+ */
+ if (mpxy_shmem_init_done) {
+ dev_err(dev, "SBI MPXY mailbox already initialized\n");
+ return -EALREADY;
+ }
+
+ /* Probe for SBI MPXY extension */
+ if (sbi_spec_version < sbi_mk_version(1, 0) ||
+ sbi_probe_extension(SBI_EXT_MPXY) <= 0) {
+ dev_info(dev, "SBI MPXY extension not available\n");
+ return -ENODEV;
+ }
+
+ /* Find-out shared memory size */
+ rc = mpxy_get_shmem_size(&mpxy_shmem_size);
+ if (rc)
+ return dev_err_probe(dev, rc, "failed to get MPXY shared memory size\n");
+
+ /*
+ * Setup MPXY shared memory on each CPU
+ *
+ * Note: Don't cleanup MPXY shared memory upon CPU power-down
+ * because the RPMI System MSI irqchip driver needs it to be
+ * available when migrating IRQs in CPU power-down path.
+ */
+ cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "riscv/sbi-mpxy-shmem",
+ mpxy_setup_shmem, NULL);
+
+ /* Mark as MPXY shared memory initialization done */
+ mpxy_shmem_init_done = true;
+
+ /* Allocate mailbox instance */
+ mbox = devm_kzalloc(dev, sizeof(*mbox), GFP_KERNEL);
+ if (!mbox)
+ return -ENOMEM;
+ mbox->dev = dev;
+ platform_set_drvdata(pdev, mbox);
+
+ /* Populate mailbox channels */
+ rc = mpxy_mbox_populate_channels(mbox);
+ if (rc)
+ return rc;
+
+ /* Initialize mailbox controller */
+ mbox->controller.txdone_irq = false;
+ mbox->controller.txdone_poll = false;
+ mbox->controller.ops = &mpxy_mbox_ops;
+ mbox->controller.dev = dev;
+ mbox->controller.num_chans = mbox->channel_count;
+ mbox->controller.fw_xlate = mpxy_mbox_fw_xlate;
+ mbox->controller.chans = devm_kcalloc(dev, mbox->channel_count,
+ sizeof(*mbox->controller.chans),
+ GFP_KERNEL);
+ if (!mbox->controller.chans)
+ return -ENOMEM;
+ for (i = 0; i < mbox->channel_count; i++)
+ mbox->controller.chans[i].con_priv = &mbox->channels[i];
+
+ /* Setup MSIs for mailbox (if required) */
+ if (mbox->msi_count) {
+ /*
+ * The device MSI domain for platform devices on RISC-V architecture
+ * is only available after the MSI controller driver is probed so,
+ * explicitly configure here.
+ */
+ if (!dev_get_msi_domain(dev)) {
+ struct fwnode_handle *fwnode = dev_fwnode(dev);
+
+ /*
+ * The device MSI domain for OF devices is only set at the
+ * time of populating/creating OF device. If the device MSI
+ * domain is discovered later after the OF device is created
+ * then we need to set it explicitly before using any platform
+ * MSI functions.
+ */
+ if (is_of_node(fwnode)) {
+ of_msi_configure(dev, dev_of_node(dev));
+ } else if (is_acpi_device_node(fwnode)) {
+ struct irq_domain *msi_domain;
+
+ msi_domain = irq_find_matching_fwnode(imsic_acpi_get_fwnode(dev),
+ DOMAIN_BUS_PLATFORM_MSI);
+ dev_set_msi_domain(dev, msi_domain);
+ }
+
+ if (!dev_get_msi_domain(dev))
+ return -EPROBE_DEFER;
+ }
+
+ mbox->msi_index_to_channel = devm_kcalloc(dev, mbox->msi_count,
+ sizeof(*mbox->msi_index_to_channel),
+ GFP_KERNEL);
+ if (!mbox->msi_index_to_channel)
+ return -ENOMEM;
+
+ for (msi_idx = 0; msi_idx < mbox->msi_count; msi_idx++) {
+ for (i = 0; i < mbox->channel_count; i++) {
+ mchan = &mbox->channels[i];
+ if (mchan->msi_index == msi_idx) {
+ mbox->msi_index_to_channel[msi_idx] = mchan;
+ break;
+ }
+ }
+ }
+
+ rc = platform_device_msi_init_and_alloc_irqs(dev, mbox->msi_count,
+ mpxy_mbox_msi_write);
+ if (rc) {
+ return dev_err_probe(dev, rc, "Failed to allocate %d MSIs\n",
+ mbox->msi_count);
+ }
+
+ for (i = 0; i < mbox->channel_count; i++) {
+ mchan = &mbox->channels[i];
+ if (mchan->msi_index == U32_MAX)
+ continue;
+ mchan->msi_irq = msi_get_virq(dev, mchan->msi_index);
+ }
+ }
+
+ /* Register mailbox controller */
+ rc = devm_mbox_controller_register(dev, &mbox->controller);
+ if (rc) {
+ dev_err_probe(dev, rc, "Registering SBI MPXY mailbox failed\n");
+ if (mbox->msi_count)
+ platform_device_msi_free_irqs_all(dev);
+ return rc;
+ }
+
+#ifdef CONFIG_ACPI
+ struct acpi_device *adev = ACPI_COMPANION(dev);
+
+ if (adev)
+ acpi_dev_clear_dependencies(adev);
+#endif
+
+ dev_info(dev, "mailbox registered with %d channels\n",
+ mbox->channel_count);
+ return 0;
+}
+
+static void mpxy_mbox_remove(struct platform_device *pdev)
+{
+ struct mpxy_mbox *mbox = platform_get_drvdata(pdev);
+
+ if (mbox->msi_count)
+ platform_device_msi_free_irqs_all(mbox->dev);
+}
+
+static const struct of_device_id mpxy_mbox_of_match[] = {
+ { .compatible = "riscv,sbi-mpxy-mbox" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, mpxy_mbox_of_match);
+
+static const struct acpi_device_id mpxy_mbox_acpi_match[] = {
+ { "RSCV0005" },
+ {}
+};
+MODULE_DEVICE_TABLE(acpi, mpxy_mbox_acpi_match);
+
+static struct platform_driver mpxy_mbox_driver = {
+ .driver = {
+ .name = "riscv-sbi-mpxy-mbox",
+ .of_match_table = mpxy_mbox_of_match,
+ .acpi_match_table = mpxy_mbox_acpi_match,
+ },
+ .probe = mpxy_mbox_probe,
+ .remove = mpxy_mbox_remove,
+};
+module_platform_driver(mpxy_mbox_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Anup Patel <apatel@ventanamicro.com>");
+MODULE_DESCRIPTION("RISC-V SBI MPXY mailbox controller driver");
diff --git a/drivers/mailbox/rockchip-mailbox.c b/drivers/mailbox/rockchip-mailbox.c
index e02d3c9e3693..4d966cb2ed03 100644
--- a/drivers/mailbox/rockchip-mailbox.c
+++ b/drivers/mailbox/rockchip-mailbox.c
@@ -8,8 +8,8 @@
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/mailbox_controller.h>
+#include <linux/of.h>
#include <linux/module.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
#define MAILBOX_A2B_INTEN 0x00
@@ -159,7 +159,7 @@ static const struct of_device_id rockchip_mbox_of_match[] = {
{ .compatible = "rockchip,rk3368-mailbox", .data = &rk3368_drv_data},
{ },
};
-MODULE_DEVICE_TABLE(of, rockchp_mbox_of_match);
+MODULE_DEVICE_TABLE(of, rockchip_mbox_of_match);
static int rockchip_mbox_probe(struct platform_device *pdev)
{
@@ -194,11 +194,7 @@ static int rockchip_mbox_probe(struct platform_device *pdev)
mb->mbox.ops = &rockchip_mbox_chan_ops;
mb->mbox.txdone_irq = true;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res)
- return -ENODEV;
-
- mb->mbox_base = devm_ioremap_resource(&pdev->dev, res);
+ mb->mbox_base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(mb->mbox_base))
return PTR_ERR(mb->mbox_base);
@@ -248,13 +244,12 @@ static struct platform_driver rockchip_mbox_driver = {
.probe = rockchip_mbox_probe,
.driver = {
.name = "rockchip-mailbox",
- .of_match_table = of_match_ptr(rockchip_mbox_of_match),
+ .of_match_table = rockchip_mbox_of_match,
},
};
module_platform_driver(rockchip_mbox_driver);
-MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("Rockchip mailbox: communicate between CPU cores and MCU");
MODULE_AUTHOR("Addy Ke <addy.ke@rock-chips.com>");
MODULE_AUTHOR("Caesar Wang <wxt@rock-chips.com>");
diff --git a/drivers/mailbox/sprd-mailbox.c b/drivers/mailbox/sprd-mailbox.c
index e3c899abeed8..ee8539dfcef5 100644
--- a/drivers/mailbox/sprd-mailbox.c
+++ b/drivers/mailbox/sprd-mailbox.c
@@ -11,7 +11,7 @@
#include <linux/io.h>
#include <linux/mailbox_controller.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/clk.h>
@@ -62,7 +62,6 @@ struct sprd_mbox_priv {
void __iomem *outbox_base;
/* Base register address for supplementary outbox */
void __iomem *supp_base;
- struct clk *clk;
u32 outbox_fifo_depth;
struct mutex lock;
@@ -291,19 +290,13 @@ static const struct mbox_chan_ops sprd_mbox_ops = {
.shutdown = sprd_mbox_shutdown,
};
-static void sprd_mbox_disable(void *data)
-{
- struct sprd_mbox_priv *priv = data;
-
- clk_disable_unprepare(priv->clk);
-}
-
static int sprd_mbox_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct sprd_mbox_priv *priv;
int ret, inbox_irq, outbox_irq, supp_irq;
unsigned long id, supp;
+ struct clk *clk;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
@@ -331,20 +324,10 @@ static int sprd_mbox_probe(struct platform_device *pdev)
if (IS_ERR(priv->outbox_base))
return PTR_ERR(priv->outbox_base);
- priv->clk = devm_clk_get(dev, "enable");
- if (IS_ERR(priv->clk)) {
+ clk = devm_clk_get_enabled(dev, "enable");
+ if (IS_ERR(clk)) {
dev_err(dev, "failed to get mailbox clock\n");
- return PTR_ERR(priv->clk);
- }
-
- ret = clk_prepare_enable(priv->clk);
- if (ret)
- return ret;
-
- ret = devm_add_action_or_reset(dev, sprd_mbox_disable, priv);
- if (ret) {
- dev_err(dev, "failed to add mailbox disable action\n");
- return ret;
+ return PTR_ERR(clk);
}
inbox_irq = platform_get_irq_byname(pdev, "inbox");
diff --git a/drivers/mailbox/stm32-ipcc.c b/drivers/mailbox/stm32-ipcc.c
index 15d538fe2113..4f63f1a14ca6 100644
--- a/drivers/mailbox/stm32-ipcc.c
+++ b/drivers/mailbox/stm32-ipcc.c
@@ -11,6 +11,7 @@
#include <linux/io.h>
#include <linux/mailbox_controller.h>
#include <linux/module.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm_wakeirq.h>
@@ -330,7 +331,7 @@ err_clk:
return ret;
}
-static int stm32_ipcc_remove(struct platform_device *pdev)
+static void stm32_ipcc_remove(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -338,8 +339,6 @@ static int stm32_ipcc_remove(struct platform_device *pdev)
dev_pm_clear_wake_irq(&pdev->dev);
device_set_wakeup_capable(dev, false);
-
- return 0;
}
#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/mailbox/sun6i-msgbox.c b/drivers/mailbox/sun6i-msgbox.c
index 7f8d931042d3..6ba6920f4645 100644
--- a/drivers/mailbox/sun6i-msgbox.c
+++ b/drivers/mailbox/sun6i-msgbox.c
@@ -287,15 +287,13 @@ err_disable_unprepare:
return ret;
}
-static int sun6i_msgbox_remove(struct platform_device *pdev)
+static void sun6i_msgbox_remove(struct platform_device *pdev)
{
struct sun6i_msgbox *mbox = platform_get_drvdata(pdev);
mbox_controller_unregister(&mbox->controller);
/* See the comment in sun6i_msgbox_probe about the reset line. */
clk_disable_unprepare(mbox->clk);
-
- return 0;
}
static const struct of_device_id sun6i_msgbox_of_match[] = {
@@ -309,7 +307,7 @@ static struct platform_driver sun6i_msgbox_driver = {
.name = "sun6i-msgbox",
.of_match_table = sun6i_msgbox_of_match,
},
- .probe = sun6i_msgbox_probe,
+ .probe = sun6i_msgbox_probe,
.remove = sun6i_msgbox_remove,
};
module_platform_driver(sun6i_msgbox_driver);
diff --git a/drivers/mailbox/tegra-hsp.c b/drivers/mailbox/tegra-hsp.c
index 573481e436f5..ed9a0bb2bcd8 100644
--- a/drivers/mailbox/tegra-hsp.c
+++ b/drivers/mailbox/tegra-hsp.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved.
+ * Copyright (c) 2016-2025, NVIDIA CORPORATION. All rights reserved.
*/
#include <linux/delay.h>
@@ -8,7 +8,6 @@
#include <linux/io.h>
#include <linux/mailbox_controller.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/pm.h>
#include <linux/slab.h>
@@ -29,12 +28,6 @@
#define HSP_INT_FULL_MASK 0xff
#define HSP_INT_DIMENSIONING 0x380
-#define HSP_nSM_SHIFT 0
-#define HSP_nSS_SHIFT 4
-#define HSP_nAS_SHIFT 8
-#define HSP_nDB_SHIFT 12
-#define HSP_nSI_SHIFT 16
-#define HSP_nINT_MASK 0xf
#define HSP_DB_TRIGGER 0x0
#define HSP_DB_ENABLE 0x4
@@ -97,6 +90,21 @@ struct tegra_hsp_soc {
const struct tegra_hsp_db_map *map;
bool has_per_mb_ie;
bool has_128_bit_mb;
+ unsigned int reg_stride;
+
+ /* Shifts for dimensioning register. */
+ unsigned int si_shift;
+ unsigned int db_shift;
+ unsigned int as_shift;
+ unsigned int ss_shift;
+ unsigned int sm_shift;
+
+ /* Masks for dimensioning register. */
+ unsigned int si_mask;
+ unsigned int db_mask;
+ unsigned int as_mask;
+ unsigned int ss_mask;
+ unsigned int sm_mask;
};
struct tegra_hsp {
@@ -279,7 +287,7 @@ tegra_hsp_doorbell_create(struct tegra_hsp *hsp, const char *name,
return ERR_PTR(-ENOMEM);
offset = (1 + (hsp->num_sm / 2) + hsp->num_ss + hsp->num_as) * SZ_64K;
- offset += index * 0x100;
+ offset += index * hsp->soc->reg_stride;
db->channel.regs = hsp->regs + offset;
db->channel.hsp = hsp;
@@ -388,7 +396,6 @@ static void tegra_hsp_sm_recv32(struct tegra_hsp_channel *channel)
value = tegra_hsp_channel_readl(channel, HSP_SM_SHRD_MBOX);
value &= ~HSP_SM_SHRD_MBOX_FULL;
msg = (void *)(unsigned long)value;
- mbox_chan_received_data(channel->chan, msg);
/*
* Need to clear all bits here since some producers, such as TCU, depend
@@ -398,6 +405,8 @@ static void tegra_hsp_sm_recv32(struct tegra_hsp_channel *channel)
* explicitly, so we have to make sure we cover all possible cases.
*/
tegra_hsp_channel_writel(channel, 0x0, HSP_SM_SHRD_MBOX);
+
+ mbox_chan_received_data(channel->chan, msg);
}
static const struct tegra_hsp_sm_ops tegra_hsp_sm_32bit_ops = {
@@ -433,7 +442,6 @@ static void tegra_hsp_sm_recv128(struct tegra_hsp_channel *channel)
value[3] = tegra_hsp_channel_readl(channel, HSP_SHRD_MBOX_TYPE1_DATA3);
msg = (void *)(unsigned long)value;
- mbox_chan_received_data(channel->chan, msg);
/*
* Clear data registers and tag.
@@ -443,6 +451,8 @@ static void tegra_hsp_sm_recv128(struct tegra_hsp_channel *channel)
tegra_hsp_channel_writel(channel, 0x0, HSP_SHRD_MBOX_TYPE1_DATA2);
tegra_hsp_channel_writel(channel, 0x0, HSP_SHRD_MBOX_TYPE1_DATA3);
tegra_hsp_channel_writel(channel, 0x0, HSP_SHRD_MBOX_TYPE1_TAG);
+
+ mbox_chan_received_data(channel->chan, msg);
}
static const struct tegra_hsp_sm_ops tegra_hsp_sm_128bit_ops = {
@@ -727,7 +737,6 @@ static int tegra_hsp_request_shared_irq(struct tegra_hsp *hsp)
static int tegra_hsp_probe(struct platform_device *pdev)
{
struct tegra_hsp *hsp;
- struct resource *res;
unsigned int i;
u32 value;
int err;
@@ -741,17 +750,16 @@ static int tegra_hsp_probe(struct platform_device *pdev)
INIT_LIST_HEAD(&hsp->doorbells);
spin_lock_init(&hsp->lock);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- hsp->regs = devm_ioremap_resource(&pdev->dev, res);
+ hsp->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(hsp->regs))
return PTR_ERR(hsp->regs);
value = tegra_hsp_readl(hsp, HSP_INT_DIMENSIONING);
- hsp->num_sm = (value >> HSP_nSM_SHIFT) & HSP_nINT_MASK;
- hsp->num_ss = (value >> HSP_nSS_SHIFT) & HSP_nINT_MASK;
- hsp->num_as = (value >> HSP_nAS_SHIFT) & HSP_nINT_MASK;
- hsp->num_db = (value >> HSP_nDB_SHIFT) & HSP_nINT_MASK;
- hsp->num_si = (value >> HSP_nSI_SHIFT) & HSP_nINT_MASK;
+ hsp->num_sm = (value >> hsp->soc->sm_shift) & hsp->soc->sm_mask;
+ hsp->num_ss = (value >> hsp->soc->ss_shift) & hsp->soc->ss_mask;
+ hsp->num_as = (value >> hsp->soc->as_shift) & hsp->soc->as_mask;
+ hsp->num_db = (value >> hsp->soc->db_shift) & hsp->soc->db_mask;
+ hsp->num_si = (value >> hsp->soc->si_shift) & hsp->soc->si_mask;
err = platform_get_irq_byname_optional(pdev, "doorbell");
if (err >= 0)
@@ -870,13 +878,11 @@ static int tegra_hsp_probe(struct platform_device *pdev)
return 0;
}
-static int tegra_hsp_remove(struct platform_device *pdev)
+static void tegra_hsp_remove(struct platform_device *pdev)
{
struct tegra_hsp *hsp = platform_get_drvdata(pdev);
lockdep_unregister_key(&hsp->lock_key);
-
- return 0;
}
static int __maybe_unused tegra_hsp_resume(struct device *dev)
@@ -916,24 +922,75 @@ static const struct tegra_hsp_soc tegra186_hsp_soc = {
.map = tegra186_hsp_db_map,
.has_per_mb_ie = false,
.has_128_bit_mb = false,
+ .reg_stride = 0x100,
+ .si_shift = 16,
+ .db_shift = 12,
+ .as_shift = 8,
+ .ss_shift = 4,
+ .sm_shift = 0,
+ .si_mask = 0xf,
+ .db_mask = 0xf,
+ .as_mask = 0xf,
+ .ss_mask = 0xf,
+ .sm_mask = 0xf,
};
static const struct tegra_hsp_soc tegra194_hsp_soc = {
.map = tegra186_hsp_db_map,
.has_per_mb_ie = true,
.has_128_bit_mb = false,
+ .reg_stride = 0x100,
+ .si_shift = 16,
+ .db_shift = 12,
+ .as_shift = 8,
+ .ss_shift = 4,
+ .sm_shift = 0,
+ .si_mask = 0xf,
+ .db_mask = 0xf,
+ .as_mask = 0xf,
+ .ss_mask = 0xf,
+ .sm_mask = 0xf,
};
static const struct tegra_hsp_soc tegra234_hsp_soc = {
.map = tegra186_hsp_db_map,
.has_per_mb_ie = false,
.has_128_bit_mb = true,
+ .reg_stride = 0x100,
+ .si_shift = 16,
+ .db_shift = 12,
+ .as_shift = 8,
+ .ss_shift = 4,
+ .sm_shift = 0,
+ .si_mask = 0xf,
+ .db_mask = 0xf,
+ .as_mask = 0xf,
+ .ss_mask = 0xf,
+ .sm_mask = 0xf,
+};
+
+static const struct tegra_hsp_soc tegra264_hsp_soc = {
+ .map = tegra186_hsp_db_map,
+ .has_per_mb_ie = false,
+ .has_128_bit_mb = true,
+ .reg_stride = 0x1000,
+ .si_shift = 17,
+ .db_shift = 12,
+ .as_shift = 8,
+ .ss_shift = 4,
+ .sm_shift = 0,
+ .si_mask = 0x1f,
+ .db_mask = 0x1f,
+ .as_mask = 0xf,
+ .ss_mask = 0xf,
+ .sm_mask = 0xf,
};
static const struct of_device_id tegra_hsp_match[] = {
{ .compatible = "nvidia,tegra186-hsp", .data = &tegra186_hsp_soc },
{ .compatible = "nvidia,tegra194-hsp", .data = &tegra194_hsp_soc },
{ .compatible = "nvidia,tegra234-hsp", .data = &tegra234_hsp_soc },
+ { .compatible = "nvidia,tegra264-hsp", .data = &tegra264_hsp_soc },
{ }
};
diff --git a/drivers/mailbox/ti-msgmgr.c b/drivers/mailbox/ti-msgmgr.c
index ddac423ac1a9..8eb8df8d95a4 100644
--- a/drivers/mailbox/ti-msgmgr.c
+++ b/drivers/mailbox/ti-msgmgr.c
@@ -15,10 +15,10 @@
#include <linux/kernel.h>
#include <linux/mailbox_controller.h>
#include <linux/module.h>
-#include <linux/of_device.h>
#include <linux/of.h>
#include <linux/of_irq.h>
#include <linux/platform_device.h>
+#include <linux/property.h>
#include <linux/soc/ti/ti-msgmgr.h>
#define Q_DATA_OFFSET(proxy, queue, reg) \
@@ -430,14 +430,20 @@ static int ti_msgmgr_send_data(struct mbox_chan *chan, void *data)
/* Ensure all unused data is 0 */
data_trail &= 0xFFFFFFFF >> (8 * (sizeof(u32) - trail_bytes));
writel(data_trail, data_reg);
- data_reg++;
+ data_reg += sizeof(u32);
}
+
/*
* 'data_reg' indicates next register to write. If we did not already
* write on tx complete reg(last reg), we must do so for transmit
+ * In addition, we also need to make sure all intermediate data
+ * registers(if any required), are reset to 0 for TISCI backward
+ * compatibility to be maintained.
*/
- if (data_reg <= qinst->queue_buff_end)
- writel(0, qinst->queue_buff_end);
+ while (data_reg <= qinst->queue_buff_end) {
+ writel(0, data_reg);
+ data_reg += sizeof(u32);
+ }
/* If we are in polled mode, wait for a response before proceeding */
if (ti_msgmgr_chan_has_polled_queue_rx(message->chan_rx))
@@ -804,9 +810,7 @@ MODULE_DEVICE_TABLE(of, ti_msgmgr_of_match);
static int ti_msgmgr_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- const struct of_device_id *of_id;
struct device_node *np;
- struct resource *res;
const struct ti_msgmgr_desc *desc;
struct ti_msgmgr_inst *inst;
struct ti_queue_inst *qinst;
@@ -823,36 +827,26 @@ static int ti_msgmgr_probe(struct platform_device *pdev)
}
np = dev->of_node;
- of_id = of_match_device(ti_msgmgr_of_match, dev);
- if (!of_id) {
- dev_err(dev, "OF data missing\n");
- return -EINVAL;
- }
- desc = of_id->data;
-
inst = devm_kzalloc(dev, sizeof(*inst), GFP_KERNEL);
if (!inst)
return -ENOMEM;
inst->dev = dev;
- inst->desc = desc;
+ inst->desc = desc = device_get_match_data(dev);
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
- desc->data_region_name);
- inst->queue_proxy_region = devm_ioremap_resource(dev, res);
+ inst->queue_proxy_region =
+ devm_platform_ioremap_resource_byname(pdev, desc->data_region_name);
if (IS_ERR(inst->queue_proxy_region))
return PTR_ERR(inst->queue_proxy_region);
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
- desc->status_region_name);
- inst->queue_state_debug_region = devm_ioremap_resource(dev, res);
+ inst->queue_state_debug_region =
+ devm_platform_ioremap_resource_byname(pdev, desc->status_region_name);
if (IS_ERR(inst->queue_state_debug_region))
return PTR_ERR(inst->queue_state_debug_region);
if (desc->is_sproxy) {
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
- desc->ctrl_region_name);
- inst->queue_ctrl_region = devm_ioremap_resource(dev, res);
+ inst->queue_ctrl_region =
+ devm_platform_ioremap_resource_byname(pdev, desc->ctrl_region_name);
if (IS_ERR(inst->queue_ctrl_region))
return PTR_ERR(inst->queue_ctrl_region);
}
@@ -926,7 +920,7 @@ static struct platform_driver ti_msgmgr_driver = {
.probe = ti_msgmgr_probe,
.driver = {
.name = "ti-msgmgr",
- .of_match_table = of_match_ptr(ti_msgmgr_of_match),
+ .of_match_table = ti_msgmgr_of_match,
.pm = &ti_msgmgr_pm_ops,
},
};
diff --git a/drivers/mailbox/zynqmp-ipi-mailbox.c b/drivers/mailbox/zynqmp-ipi-mailbox.c
index 12e004ff1a14..967967b2b8a9 100644
--- a/drivers/mailbox/zynqmp-ipi-mailbox.c
+++ b/drivers/mailbox/zynqmp-ipi-mailbox.c
@@ -6,9 +6,11 @@
*/
#include <linux/arm-smccc.h>
+#include <linux/cpuhotplug.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/interrupt.h>
+#include <linux/irqdomain.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/mailbox_controller.h>
@@ -16,7 +18,6 @@
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_address.h>
-#include <linux/of_device.h>
#include <linux/of_irq.h>
#include <linux/platform_device.h>
@@ -54,6 +55,23 @@
#define IPI_MB_CHNL_TX 0 /* IPI mailbox TX channel */
#define IPI_MB_CHNL_RX 1 /* IPI mailbox RX channel */
+/* IPI Message Buffer Information */
+#define RESP_OFFSET 0x20U
+#define DEST_OFFSET 0x40U
+#define IPI_BUF_SIZE 0x20U
+#define DST_BIT_POS 9U
+#define SRC_BITMASK GENMASK(11, 8)
+
+/* Macro to represent SGI type for IPI IRQs */
+#define IPI_IRQ_TYPE_SGI 2
+
+/*
+ * Module parameters
+ */
+static int tx_poll_period = 5;
+module_param_named(tx_poll_period, tx_poll_period, int, 0644);
+MODULE_PARM_DESC(tx_poll_period, "Poll period waiting for ack after send.");
+
/**
* struct zynqmp_ipi_mchan - Description of a Xilinx ZynqMP IPI mailbox channel
* @is_opened: indicate if the IPI channel is opened
@@ -74,6 +92,10 @@ struct zynqmp_ipi_mchan {
unsigned int chan_type;
};
+struct zynqmp_ipi_mbox;
+
+typedef int (*setup_ipi_fn)(struct zynqmp_ipi_mbox *ipi_mbox, struct device_node *node);
+
/**
* struct zynqmp_ipi_mbox - Description of a ZynqMP IPI mailbox
* platform data.
@@ -83,7 +105,7 @@ struct zynqmp_ipi_mchan {
* @remote_id: remote IPI agent ID
* @mbox: mailbox Controller
* @mchans: array for channels, tx channel and rx channel.
- * @irq: IPI agent interrupt ID
+ * @setup_ipi_fn: Function Pointer to set up IPI Channels
*/
struct zynqmp_ipi_mbox {
struct zynqmp_ipi_pdata *pdata;
@@ -91,6 +113,7 @@ struct zynqmp_ipi_mbox {
u32 remote_id;
struct mbox_controller mbox;
struct zynqmp_ipi_mchan mchans[2];
+ setup_ipi_fn setup_ipi_fn;
};
/**
@@ -99,20 +122,26 @@ struct zynqmp_ipi_mbox {
* @dev: device pointer corresponding to the Xilinx ZynqMP
* IPI agent
* @irq: IPI agent interrupt ID
+ * @irq_type: IPI SGI or SPI IRQ type
* @method: IPI SMC or HVC is going to be used
* @local_id: local IPI agent ID
+ * @virq_sgi: IRQ number mapped to SGI
* @num_mboxes: number of mailboxes of this IPI agent
* @ipi_mboxes: IPI mailboxes of this IPI agent
*/
struct zynqmp_ipi_pdata {
struct device *dev;
int irq;
+ unsigned int irq_type;
unsigned int method;
u32 local_id;
+ int virq_sgi;
int num_mboxes;
- struct zynqmp_ipi_mbox *ipi_mboxes;
+ struct zynqmp_ipi_mbox ipi_mboxes[] __counted_by(num_mboxes);
};
+static DEFINE_PER_CPU(struct zynqmp_ipi_pdata *, per_cpu_pdata);
+
static struct device_driver zynqmp_ipi_mbox_driver = {
.owner = THIS_MODULE,
.name = "zynqmp-ipi-mbox",
@@ -152,7 +181,7 @@ static irqreturn_t zynqmp_ipi_interrupt(int irq, void *data)
struct zynqmp_ipi_message *msg;
u64 arg0, arg3;
struct arm_smccc_res res;
- int ret, i;
+ int ret, i, status = IRQ_NONE;
(void)irq;
arg0 = SMC_IPI_MAILBOX_STATUS_ENQUIRY;
@@ -166,15 +195,25 @@ static irqreturn_t zynqmp_ipi_interrupt(int irq, void *data)
if (ret > 0 && ret & IPI_MB_STATUS_RECV_PENDING) {
if (mchan->is_opened) {
msg = mchan->rx_buf;
- msg->len = mchan->req_buf_size;
- memcpy_fromio(msg->data, mchan->req_buf,
- msg->len);
+ if (msg) {
+ msg->len = mchan->req_buf_size;
+ memcpy_fromio(msg->data, mchan->req_buf,
+ msg->len);
+ }
mbox_chan_received_data(chan, (void *)msg);
- return IRQ_HANDLED;
+ status = IRQ_HANDLED;
}
}
}
- return IRQ_NONE;
+ return status;
+}
+
+static irqreturn_t zynqmp_sgi_interrupt(int irq, void *data)
+{
+ struct zynqmp_ipi_pdata **pdata_ptr = data;
+ struct zynqmp_ipi_pdata *pdata = *pdata_ptr;
+
+ return zynqmp_ipi_interrupt(irq, pdata);
}
/**
@@ -278,26 +317,26 @@ static int zynqmp_ipi_send_data(struct mbox_chan *chan, void *data)
if (mchan->chan_type == IPI_MB_CHNL_TX) {
/* Send request message */
- if (msg && msg->len > mchan->req_buf_size) {
+ if (msg && msg->len > mchan->req_buf_size && mchan->req_buf) {
dev_err(dev, "channel %d message length %u > max %lu\n",
mchan->chan_type, (unsigned int)msg->len,
mchan->req_buf_size);
return -EINVAL;
}
- if (msg && msg->len)
+ if (msg && msg->len && mchan->req_buf)
memcpy_toio(mchan->req_buf, msg->data, msg->len);
/* Kick IPI mailbox to send message */
arg0 = SMC_IPI_MAILBOX_NOTIFY;
zynqmp_ipi_fw_call(ipi_mbox, arg0, 0, &res);
} else {
/* Send response message */
- if (msg && msg->len > mchan->resp_buf_size) {
+ if (msg && msg->len > mchan->resp_buf_size && mchan->resp_buf) {
dev_err(dev, "channel %d message length %u > max %lu\n",
mchan->chan_type, (unsigned int)msg->len,
mchan->resp_buf_size);
return -EINVAL;
}
- if (msg && msg->len)
+ if (msg && msg->len && mchan->resp_buf)
memcpy_toio(mchan->resp_buf, msg->data, msg->len);
arg0 = SMC_IPI_MAILBOX_ACK;
zynqmp_ipi_fw_call(ipi_mbox, arg0, IPI_SMC_ACK_EIRQ_MASK,
@@ -418,12 +457,6 @@ static struct mbox_chan *zynqmp_ipi_of_xlate(struct mbox_controller *mbox,
return chan;
}
-static const struct of_device_id zynqmp_ipi_of_match[] = {
- { .compatible = "xlnx,zynqmp-ipi-mailbox" },
- {},
-};
-MODULE_DEVICE_TABLE(of, zynqmp_ipi_of_match);
-
/**
* zynqmp_ipi_mbox_get_buf_res - Get buffer resource from the IPI dev node
*
@@ -473,12 +506,9 @@ static void zynqmp_ipi_mbox_dev_release(struct device *dev)
static int zynqmp_ipi_mbox_probe(struct zynqmp_ipi_mbox *ipi_mbox,
struct device_node *node)
{
- struct zynqmp_ipi_mchan *mchan;
struct mbox_chan *chans;
struct mbox_controller *mbox;
- struct resource res;
struct device *dev, *mdev;
- const char *name;
int ret;
dev = ipi_mbox->pdata->dev;
@@ -498,6 +528,73 @@ static int zynqmp_ipi_mbox_probe(struct zynqmp_ipi_mbox *ipi_mbox,
}
mdev = &ipi_mbox->dev;
+ /* Get the IPI remote agent ID */
+ ret = of_property_read_u32(node, "xlnx,ipi-id", &ipi_mbox->remote_id);
+ if (ret < 0) {
+ dev_err(dev, "No IPI remote ID is specified.\n");
+ return ret;
+ }
+
+ ret = ipi_mbox->setup_ipi_fn(ipi_mbox, node);
+ if (ret) {
+ dev_err(dev, "Failed to set up IPI Buffers.\n");
+ return ret;
+ }
+
+ mbox = &ipi_mbox->mbox;
+ mbox->dev = mdev;
+ mbox->ops = &zynqmp_ipi_chan_ops;
+ mbox->num_chans = 2;
+ mbox->txdone_irq = false;
+ mbox->txdone_poll = true;
+ mbox->txpoll_period = tx_poll_period;
+ mbox->of_xlate = zynqmp_ipi_of_xlate;
+ chans = devm_kzalloc(mdev, 2 * sizeof(*chans), GFP_KERNEL);
+ if (!chans)
+ return -ENOMEM;
+ mbox->chans = chans;
+ chans[IPI_MB_CHNL_TX].con_priv = &ipi_mbox->mchans[IPI_MB_CHNL_TX];
+ chans[IPI_MB_CHNL_RX].con_priv = &ipi_mbox->mchans[IPI_MB_CHNL_RX];
+ ipi_mbox->mchans[IPI_MB_CHNL_TX].chan_type = IPI_MB_CHNL_TX;
+ ipi_mbox->mchans[IPI_MB_CHNL_RX].chan_type = IPI_MB_CHNL_RX;
+ ret = devm_mbox_controller_register(mdev, mbox);
+ if (ret)
+ dev_err(mdev,
+ "Failed to register mbox_controller(%d)\n", ret);
+ else
+ dev_info(mdev,
+ "Registered ZynqMP IPI mbox with TX/RX channels.\n");
+ return ret;
+}
+
+/**
+ * zynqmp_ipi_setup - set up IPI Buffers for classic flow
+ *
+ * @ipi_mbox: pointer to IPI mailbox private data structure
+ * @node: IPI mailbox device node
+ *
+ * This will be used to set up IPI Buffers for ZynqMP SOC if user
+ * wishes to use classic driver usage model on new SOC's with only
+ * buffered IPIs.
+ *
+ * Note that bufferless IPIs and mixed usage of buffered and bufferless
+ * IPIs are not supported with this flow.
+ *
+ * This will be invoked with compatible string "xlnx,zynqmp-ipi-mailbox".
+ *
+ * Return: 0 for success, negative value for failure
+ */
+static int zynqmp_ipi_setup(struct zynqmp_ipi_mbox *ipi_mbox,
+ struct device_node *node)
+{
+ struct zynqmp_ipi_mchan *mchan;
+ struct device *mdev;
+ struct resource res;
+ const char *name;
+ int ret;
+
+ mdev = &ipi_mbox->dev;
+
mchan = &ipi_mbox->mchans[IPI_MB_CHNL_TX];
name = "local_request_region";
ret = zynqmp_ipi_mbox_get_buf_res(node, name, &res);
@@ -572,39 +669,217 @@ static int zynqmp_ipi_mbox_probe(struct zynqmp_ipi_mbox *ipi_mbox,
if (!mchan->rx_buf)
return -ENOMEM;
- /* Get the IPI remote agent ID */
- ret = of_property_read_u32(node, "xlnx,ipi-id", &ipi_mbox->remote_id);
- if (ret < 0) {
- dev_err(dev, "No IPI remote ID is specified.\n");
+ return 0;
+}
+
+/**
+ * versal_ipi_setup - Set up IPIs to support mixed usage of
+ * Buffered and Bufferless IPIs.
+ *
+ * @ipi_mbox: pointer to IPI mailbox private data structure
+ * @node: IPI mailbox device node
+ *
+ * Return: 0 for success, negative value for failure
+ */
+static int versal_ipi_setup(struct zynqmp_ipi_mbox *ipi_mbox,
+ struct device_node *node)
+{
+ struct zynqmp_ipi_mchan *tx_mchan, *rx_mchan;
+ struct resource host_res, remote_res;
+ struct device_node *parent_node;
+ int host_idx, remote_idx;
+ struct device *mdev;
+
+ tx_mchan = &ipi_mbox->mchans[IPI_MB_CHNL_TX];
+ rx_mchan = &ipi_mbox->mchans[IPI_MB_CHNL_RX];
+ parent_node = of_get_parent(node);
+ mdev = &ipi_mbox->dev;
+
+ host_idx = zynqmp_ipi_mbox_get_buf_res(parent_node, "msg", &host_res);
+ remote_idx = zynqmp_ipi_mbox_get_buf_res(node, "msg", &remote_res);
+
+ /*
+ * Only set up buffers if both sides claim to have msg buffers.
+ * This is because each buffered IPI's corresponding msg buffers
+ * are reserved for use by other buffered IPI's.
+ */
+ if (!host_idx && !remote_idx) {
+ u32 host_src, host_dst, remote_src, remote_dst;
+ u32 buff_sz;
+
+ buff_sz = resource_size(&host_res);
+
+ host_src = host_res.start & SRC_BITMASK;
+ remote_src = remote_res.start & SRC_BITMASK;
+
+ host_dst = (host_src >> DST_BIT_POS) * DEST_OFFSET;
+ remote_dst = (remote_src >> DST_BIT_POS) * DEST_OFFSET;
+
+ /* Validate that IPI IDs is within IPI Message buffer space. */
+ if (host_dst >= buff_sz || remote_dst >= buff_sz) {
+ dev_err(mdev,
+ "Invalid IPI Message buffer values: %x %x\n",
+ host_dst, remote_dst);
+ return -EINVAL;
+ }
+
+ tx_mchan->req_buf = devm_ioremap(mdev,
+ host_res.start | remote_dst,
+ IPI_BUF_SIZE);
+ if (!tx_mchan->req_buf) {
+ dev_err(mdev, "Unable to map IPI buffer I/O memory\n");
+ return -ENOMEM;
+ }
+
+ tx_mchan->resp_buf = devm_ioremap(mdev,
+ (remote_res.start | host_dst) +
+ RESP_OFFSET, IPI_BUF_SIZE);
+ if (!tx_mchan->resp_buf) {
+ dev_err(mdev, "Unable to map IPI buffer I/O memory\n");
+ return -ENOMEM;
+ }
+
+ rx_mchan->req_buf = devm_ioremap(mdev,
+ remote_res.start | host_dst,
+ IPI_BUF_SIZE);
+ if (!rx_mchan->req_buf) {
+ dev_err(mdev, "Unable to map IPI buffer I/O memory\n");
+ return -ENOMEM;
+ }
+
+ rx_mchan->resp_buf = devm_ioremap(mdev,
+ (host_res.start | remote_dst) +
+ RESP_OFFSET, IPI_BUF_SIZE);
+ if (!rx_mchan->resp_buf) {
+ dev_err(mdev, "Unable to map IPI buffer I/O memory\n");
+ return -ENOMEM;
+ }
+
+ tx_mchan->resp_buf_size = IPI_BUF_SIZE;
+ tx_mchan->req_buf_size = IPI_BUF_SIZE;
+ tx_mchan->rx_buf = devm_kzalloc(mdev, IPI_BUF_SIZE +
+ sizeof(struct zynqmp_ipi_message),
+ GFP_KERNEL);
+ if (!tx_mchan->rx_buf)
+ return -ENOMEM;
+
+ rx_mchan->resp_buf_size = IPI_BUF_SIZE;
+ rx_mchan->req_buf_size = IPI_BUF_SIZE;
+ rx_mchan->rx_buf = devm_kzalloc(mdev, IPI_BUF_SIZE +
+ sizeof(struct zynqmp_ipi_message),
+ GFP_KERNEL);
+ if (!rx_mchan->rx_buf)
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static int xlnx_mbox_cpuhp_start(unsigned int cpu)
+{
+ struct zynqmp_ipi_pdata *pdata;
+
+ pdata = get_cpu_var(per_cpu_pdata);
+ put_cpu_var(per_cpu_pdata);
+ enable_percpu_irq(pdata->virq_sgi, IRQ_TYPE_NONE);
+
+ return 0;
+}
+
+static int xlnx_mbox_cpuhp_down(unsigned int cpu)
+{
+ struct zynqmp_ipi_pdata *pdata;
+
+ pdata = get_cpu_var(per_cpu_pdata);
+ put_cpu_var(per_cpu_pdata);
+ disable_percpu_irq(pdata->virq_sgi);
+
+ return 0;
+}
+
+static void xlnx_disable_percpu_irq(void *data)
+{
+ struct zynqmp_ipi_pdata *pdata;
+
+ pdata = *this_cpu_ptr(&per_cpu_pdata);
+
+ disable_percpu_irq(pdata->virq_sgi);
+}
+
+static int xlnx_mbox_init_sgi(struct platform_device *pdev,
+ int sgi_num,
+ struct zynqmp_ipi_pdata *pdata)
+{
+ int ret = 0;
+ int cpu;
+
+ /*
+ * IRQ related structures are used for the following:
+ * for each SGI interrupt ensure its mapped by GIC IRQ domain
+ * and that each corresponding linux IRQ for the HW IRQ has
+ * a handler for when receiving an interrupt from the remote
+ * processor.
+ */
+ struct irq_domain *domain;
+ struct irq_fwspec sgi_fwspec;
+ struct device_node *interrupt_parent = NULL;
+ struct device *dev = &pdev->dev;
+
+ /* Find GIC controller to map SGIs. */
+ interrupt_parent = of_irq_find_parent(dev->of_node);
+ if (!interrupt_parent) {
+ dev_err(&pdev->dev, "Failed to find property for Interrupt parent\n");
+ return -EINVAL;
+ }
+
+ /* Each SGI needs to be associated with GIC's IRQ domain. */
+ domain = irq_find_host(interrupt_parent);
+ of_node_put(interrupt_parent);
+
+ /* Each mapping needs GIC domain when finding IRQ mapping. */
+ sgi_fwspec.fwnode = domain->fwnode;
+
+ /*
+ * When irq domain looks at mapping each arg is as follows:
+ * 3 args for: interrupt type (SGI), interrupt # (set later), type
+ */
+ sgi_fwspec.param_count = 1;
+
+ /* Set SGI's hwirq */
+ sgi_fwspec.param[0] = sgi_num;
+ pdata->virq_sgi = irq_create_fwspec_mapping(&sgi_fwspec);
+
+ for_each_possible_cpu(cpu)
+ per_cpu(per_cpu_pdata, cpu) = pdata;
+
+ ret = request_percpu_irq(pdata->virq_sgi, zynqmp_sgi_interrupt, pdev->name,
+ &per_cpu_pdata);
+ WARN_ON(ret);
+ if (ret) {
+ irq_dispose_mapping(pdata->virq_sgi);
return ret;
}
- mbox = &ipi_mbox->mbox;
- mbox->dev = mdev;
- mbox->ops = &zynqmp_ipi_chan_ops;
- mbox->num_chans = 2;
- mbox->txdone_irq = false;
- mbox->txdone_poll = true;
- mbox->txpoll_period = 5;
- mbox->of_xlate = zynqmp_ipi_of_xlate;
- chans = devm_kzalloc(mdev, 2 * sizeof(*chans), GFP_KERNEL);
- if (!chans)
- return -ENOMEM;
- mbox->chans = chans;
- chans[IPI_MB_CHNL_TX].con_priv = &ipi_mbox->mchans[IPI_MB_CHNL_TX];
- chans[IPI_MB_CHNL_RX].con_priv = &ipi_mbox->mchans[IPI_MB_CHNL_RX];
- ipi_mbox->mchans[IPI_MB_CHNL_TX].chan_type = IPI_MB_CHNL_TX;
- ipi_mbox->mchans[IPI_MB_CHNL_RX].chan_type = IPI_MB_CHNL_RX;
- ret = devm_mbox_controller_register(mdev, mbox);
- if (ret)
- dev_err(mdev,
- "Failed to register mbox_controller(%d)\n", ret);
- else
- dev_info(mdev,
- "Registered ZynqMP IPI mbox with TX/RX channels.\n");
+ irq_set_status_flags(pdata->virq_sgi, IRQ_PER_CPU);
+
+ /* Setup function for the CPU hot-plug cases */
+ cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "mailbox/sgi:starting",
+ xlnx_mbox_cpuhp_start, xlnx_mbox_cpuhp_down);
+
return ret;
}
+static void xlnx_mbox_cleanup_sgi(struct zynqmp_ipi_pdata *pdata)
+{
+ cpuhp_remove_state(CPUHP_AP_ONLINE_DYN);
+
+ on_each_cpu(xlnx_disable_percpu_irq, NULL, 1);
+
+ irq_clear_status_flags(pdata->virq_sgi, IRQ_PER_CPU);
+ free_percpu_irq(pdata->virq_sgi, &per_cpu_pdata);
+ irq_dispose_mapping(pdata->virq_sgi);
+}
+
/**
* zynqmp_ipi_free_mboxes - Free IPI mailboxes devices
*
@@ -615,14 +890,14 @@ static void zynqmp_ipi_free_mboxes(struct zynqmp_ipi_pdata *pdata)
struct zynqmp_ipi_mbox *ipi_mbox;
int i;
- i = pdata->num_mboxes;
+ if (pdata->irq_type == IPI_IRQ_TYPE_SGI)
+ xlnx_mbox_cleanup_sgi(pdata);
+
+ i = pdata->num_mboxes - 1;
for (; i >= 0; i--) {
ipi_mbox = &pdata->ipi_mboxes[i];
- if (ipi_mbox->dev.parent) {
- mbox_controller_unregister(&ipi_mbox->mbox);
- if (device_is_registered(&ipi_mbox->dev))
- device_unregister(&ipi_mbox->dev);
- }
+ if (device_is_registered(&ipi_mbox->dev))
+ device_unregister(&ipi_mbox->dev);
}
}
@@ -631,11 +906,18 @@ static int zynqmp_ipi_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct device_node *nc, *np = pdev->dev.of_node;
struct zynqmp_ipi_pdata *pdata;
+ struct of_phandle_args out_irq;
struct zynqmp_ipi_mbox *mbox;
int num_mboxes, ret = -EINVAL;
+ setup_ipi_fn ipi_fn;
- num_mboxes = of_get_child_count(np);
- pdata = devm_kzalloc(dev, sizeof(*pdata) + (num_mboxes * sizeof(*mbox)),
+ num_mboxes = of_get_available_child_count(np);
+ if (num_mboxes == 0) {
+ dev_err(dev, "mailbox nodes not available\n");
+ return -EINVAL;
+ }
+
+ pdata = devm_kzalloc(dev, struct_size(pdata, ipi_mboxes, num_mboxes),
GFP_KERNEL);
if (!pdata)
return -ENOMEM;
@@ -648,13 +930,20 @@ static int zynqmp_ipi_probe(struct platform_device *pdev)
return ret;
}
+ ipi_fn = (setup_ipi_fn)device_get_match_data(&pdev->dev);
+ if (!ipi_fn) {
+ dev_err(dev,
+ "Mbox Compatible String is missing IPI Setup fn.\n");
+ return -ENODEV;
+ }
+
pdata->num_mboxes = num_mboxes;
- pdata->ipi_mboxes = (struct zynqmp_ipi_mbox *)
- ((char *)pdata + sizeof(*pdata));
mbox = pdata->ipi_mboxes;
for_each_available_child_of_node(np, nc) {
mbox->pdata = pdata;
+ mbox->setup_ipi_fn = ipi_fn;
+
ret = zynqmp_ipi_mbox_probe(mbox, nc);
if (ret) {
of_node_put(nc);
@@ -665,14 +954,34 @@ static int zynqmp_ipi_probe(struct platform_device *pdev)
mbox++;
}
- /* IPI IRQ */
- ret = platform_get_irq(pdev, 0);
- if (ret < 0)
+ ret = of_irq_parse_one(dev_of_node(dev), 0, &out_irq);
+ if (ret < 0) {
+ dev_err(dev, "failed to parse interrupts\n");
goto free_mbox_dev;
+ }
+
+ /* Use interrupt type to distinguish SGI and SPI interrupts */
+ pdata->irq_type = out_irq.args[0];
+
+ /*
+ * If Interrupt number is in SGI range, then request SGI else request
+ * IPI system IRQ.
+ */
+ if (pdata->irq_type == IPI_IRQ_TYPE_SGI) {
+ pdata->irq = out_irq.args[1];
+ ret = xlnx_mbox_init_sgi(pdev, pdata->irq, pdata);
+ if (ret)
+ goto free_mbox_dev;
+ } else {
+ ret = platform_get_irq(pdev, 0);
+ if (ret < 0)
+ goto free_mbox_dev;
+
+ pdata->irq = ret;
+ ret = devm_request_irq(dev, pdata->irq, zynqmp_ipi_interrupt,
+ IRQF_SHARED, dev_name(dev), pdata);
+ }
- pdata->irq = ret;
- ret = devm_request_irq(dev, pdata->irq, zynqmp_ipi_interrupt,
- IRQF_SHARED, dev_name(dev), pdata);
if (ret) {
dev_err(dev, "IRQ %d is not requested successfully.\n",
pdata->irq);
@@ -687,16 +996,25 @@ free_mbox_dev:
return ret;
}
-static int zynqmp_ipi_remove(struct platform_device *pdev)
+static void zynqmp_ipi_remove(struct platform_device *pdev)
{
struct zynqmp_ipi_pdata *pdata;
pdata = platform_get_drvdata(pdev);
zynqmp_ipi_free_mboxes(pdata);
-
- return 0;
}
+static const struct of_device_id zynqmp_ipi_of_match[] = {
+ { .compatible = "xlnx,zynqmp-ipi-mailbox",
+ .data = &zynqmp_ipi_setup,
+ },
+ { .compatible = "xlnx,versal-ipi-mailbox",
+ .data = &versal_ipi_setup,
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(of, zynqmp_ipi_of_match);
+
static struct platform_driver zynqmp_ipi_driver = {
.probe = zynqmp_ipi_probe,
.remove = zynqmp_ipi_remove,