summaryrefslogtreecommitdiff
path: root/drivers/mailbox
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/mailbox')
-rw-r--r--drivers/mailbox/Kconfig84
-rw-r--r--drivers/mailbox/Makefile16
-rw-r--r--drivers/mailbox/arm_mhu.c2
-rw-r--r--drivers/mailbox/arm_mhu_db.c2
-rw-r--r--drivers/mailbox/arm_mhuv2.c2
-rw-r--r--drivers/mailbox/arm_mhuv3.c2
-rw-r--r--drivers/mailbox/ast2700-mailbox.c235
-rw-r--r--drivers/mailbox/bcm74110-mailbox.c656
-rw-r--r--drivers/mailbox/cix-mailbox.c645
-rw-r--r--drivers/mailbox/cv1800-mailbox.c220
-rw-r--r--drivers/mailbox/exynos-mailbox.c157
-rw-r--r--drivers/mailbox/imx-mailbox.c21
-rw-r--r--drivers/mailbox/mailbox-altera.c4
-rw-r--r--drivers/mailbox/mailbox-mchp-ipc-sbi.c504
-rw-r--r--drivers/mailbox/mailbox-mpfs.c2
-rw-r--r--drivers/mailbox/mailbox-test.c2
-rw-r--r--drivers/mailbox/mailbox-th1520.c10
-rw-r--r--drivers/mailbox/mailbox.c267
-rw-r--r--drivers/mailbox/mailbox.h2
-rw-r--r--drivers/mailbox/mtk-cmdq-mailbox.c106
-rw-r--r--drivers/mailbox/mtk-gpueb-mailbox.c319
-rw-r--r--drivers/mailbox/omap-mailbox.c36
-rw-r--r--drivers/mailbox/pcc.c199
-rw-r--r--drivers/mailbox/pl320-ipc.c14
-rw-r--r--drivers/mailbox/qcom-apcs-ipc-mailbox.c18
-rw-r--r--drivers/mailbox/qcom-ipcc.c19
-rw-r--r--drivers/mailbox/riscv-sbi-mpxy-mbox.c1019
-rw-r--r--drivers/mailbox/tegra-hsp.c78
-rw-r--r--drivers/mailbox/zynqmp-ipi-mailbox.c26
29 files changed, 4327 insertions, 340 deletions
diff --git a/drivers/mailbox/Kconfig b/drivers/mailbox/Kconfig
index 8ecba7fb999e..29f16f220384 100644
--- a/drivers/mailbox/Kconfig
+++ b/drivers/mailbox/Kconfig
@@ -36,6 +36,36 @@ config ARM_MHU_V3
that provides different means of transports: supported extensions
will be discovered and possibly managed at probe-time.
+config AST2700_MBOX
+ tristate "ASPEED AST2700 IPC driver"
+ depends on ARCH_ASPEED || COMPILE_TEST
+ help
+ Mailbox driver implementation for ASPEED AST27XX SoCs. This driver
+ can be used to send message between different processors in SoC.
+ The driver provides mailbox support for sending interrupts to the
+ clients. Say Y here if you want to build this driver.
+
+config CV1800_MBOX
+ tristate "cv1800 mailbox"
+ depends on ARCH_SOPHGO || COMPILE_TEST
+ help
+ Mailbox driver implementation for Sophgo CV18XX SoCs. This driver
+ can be used to send message between different processors in SoC. Any
+ processer can write data in a channel, and set co-responding register
+ to raise interrupt to notice another processor, and it is allowed to
+ send data to itself.
+
+config EXYNOS_MBOX
+ tristate "Exynos Mailbox"
+ depends on ARCH_EXYNOS || COMPILE_TEST
+ help
+ Say Y here if you want to build the Samsung Exynos Mailbox controller
+ driver. The controller has 16 flag bits for hardware interrupt
+ generation and a shared register for passing mailbox messages.
+ When the controller is used by the ACPM interface the shared register
+ is ignored and the mailbox controller acts as a doorbell that raises
+ the interrupt to the ACPM firmware.
+
config IMX_MBOX
tristate "i.MX Mailbox"
depends on ARCH_MXC || COMPILE_TEST
@@ -178,6 +208,19 @@ config POLARFIRE_SOC_MAILBOX
If unsure, say N.
+config MCHP_SBI_IPC_MBOX
+ tristate "Microchip Inter-processor Communication (IPC) SBI driver"
+ depends on RISCV_SBI
+ depends on ARCH_MICROCHIP || COMPILE_TEST
+ help
+ Mailbox implementation for Microchip devices with an
+ Inter-process communication (IPC) controller.
+
+ To compile this driver as a module, choose M here. the
+ module will be called mailbox-mchp-ipc-sbi.
+
+ If unsure, say N.
+
config QCOM_APCS_IPC
tristate "Qualcomm APCS IPC driver"
depends on ARCH_QCOM || COMPILE_TEST
@@ -251,6 +294,16 @@ config MTK_CMDQ_MBOX
critical time limitation, such as updating display configuration
during the vblank.
+config MTK_GPUEB_MBOX
+ tristate "MediaTek GPUEB Mailbox Support"
+ depends on ARCH_MEDIATEK || COMPILE_TEST
+ help
+ The MediaTek GPUEB mailbox is used to communicate with the embedded
+ controller in charge of GPU frequency and power management on some
+ MediaTek SoCs, such as the MT8196.
+ Say Y or m here if you want to support the MT8196 SoC in your kernel
+ build.
+
config ZYNQMP_IPI_MBOX
tristate "Xilinx ZynqMP IPI Mailbox"
depends on ARCH_ZYNQMP && OF
@@ -306,4 +359,35 @@ config THEAD_TH1520_MBOX
kernel is running, and E902 core used for power management among other
things.
+config CIX_MBOX
+ tristate "CIX Mailbox"
+ depends on ARCH_CIX || COMPILE_TEST
+ depends on OF
+ help
+ Mailbox implementation for CIX IPC system. The controller supports
+ 11 mailbox channels with different operating mode and every channel
+ is unidirectional. Say Y here if you want to use the CIX Mailbox
+ support.
+
+config BCM74110_MAILBOX
+ tristate "Brcmstb BCM74110 Mailbox"
+ depends on ARCH_BRCMSTB || COMPILE_TEST
+ default ARCH_BRCMSTB
+ help
+ Broadcom STB mailbox driver present starting with brcmstb bcm74110
+ SoCs. The mailbox is a communication channel between the host
+ processor and coprocessor that handles various power management task
+ and more.
+
+config RISCV_SBI_MPXY_MBOX
+ tristate "RISC-V SBI Message Proxy (MPXY) Mailbox"
+ depends on RISCV_SBI
+ default RISCV
+ help
+ Mailbox driver implementation for RISC-V SBI Message Proxy (MPXY)
+ extension. This mailbox driver is used to send messages to the
+ remote processor through the SBI implementation (M-mode firmware
+ or HS-mode hypervisor). Say Y here if you want to have this support.
+ If unsure say N.
+
endif
diff --git a/drivers/mailbox/Makefile b/drivers/mailbox/Makefile
index 5f4f5b0ce2cc..81820a4f5528 100644
--- a/drivers/mailbox/Makefile
+++ b/drivers/mailbox/Makefile
@@ -11,6 +11,12 @@ obj-$(CONFIG_ARM_MHU_V2) += arm_mhuv2.o
obj-$(CONFIG_ARM_MHU_V3) += arm_mhuv3.o
+obj-$(CONFIG_AST2700_MBOX) += ast2700-mailbox.o
+
+obj-$(CONFIG_CV1800_MBOX) += cv1800-mailbox.o
+
+obj-$(CONFIG_EXYNOS_MBOX) += exynos-mailbox.o
+
obj-$(CONFIG_IMX_MBOX) += imx-mailbox.o
obj-$(CONFIG_ARMADA_37XX_RWTM_MBOX) += armada-37xx-rwtm-mailbox.o
@@ -45,6 +51,8 @@ obj-$(CONFIG_BCM_FLEXRM_MBOX) += bcm-flexrm-mailbox.o
obj-$(CONFIG_POLARFIRE_SOC_MAILBOX) += mailbox-mpfs.o
+obj-$(CONFIG_MCHP_SBI_IPC_MBOX) += mailbox-mchp-ipc-sbi.o
+
obj-$(CONFIG_QCOM_APCS_IPC) += qcom-apcs-ipc-mailbox.o
obj-$(CONFIG_TEGRA_HSP_MBOX) += tegra-hsp.o
@@ -55,6 +63,8 @@ obj-$(CONFIG_MTK_ADSP_MBOX) += mtk-adsp-mailbox.o
obj-$(CONFIG_MTK_CMDQ_MBOX) += mtk-cmdq-mailbox.o
+obj-$(CONFIG_MTK_GPUEB_MBOX) += mtk-gpueb-mailbox.o
+
obj-$(CONFIG_ZYNQMP_IPI_MBOX) += zynqmp-ipi-mailbox.o
obj-$(CONFIG_SUN6I_MSGBOX) += sun6i-msgbox.o
@@ -66,3 +76,9 @@ obj-$(CONFIG_QCOM_CPUCP_MBOX) += qcom-cpucp-mbox.o
obj-$(CONFIG_QCOM_IPCC) += qcom-ipcc.o
obj-$(CONFIG_THEAD_TH1520_MBOX) += mailbox-th1520.o
+
+obj-$(CONFIG_CIX_MBOX) += cix-mailbox.o
+
+obj-$(CONFIG_BCM74110_MAILBOX) += bcm74110-mailbox.o
+
+obj-$(CONFIG_RISCV_SBI_MPXY_MBOX) += riscv-sbi-mpxy-mbox.o
diff --git a/drivers/mailbox/arm_mhu.c b/drivers/mailbox/arm_mhu.c
index 537f7bfb7b06..0950b7bce184 100644
--- a/drivers/mailbox/arm_mhu.c
+++ b/drivers/mailbox/arm_mhu.c
@@ -153,7 +153,7 @@ static int mhu_probe(struct amba_device *adev, const struct amba_id *id)
return 0;
}
-static struct amba_id mhu_ids[] = {
+static const struct amba_id mhu_ids[] = {
{
.id = 0x1bb098,
.mask = 0xffffff,
diff --git a/drivers/mailbox/arm_mhu_db.c b/drivers/mailbox/arm_mhu_db.c
index 27a510d46908..9e937b09c5fb 100644
--- a/drivers/mailbox/arm_mhu_db.c
+++ b/drivers/mailbox/arm_mhu_db.c
@@ -328,7 +328,7 @@ static int mhu_db_probe(struct amba_device *adev, const struct amba_id *id)
return 0;
}
-static struct amba_id mhu_ids[] = {
+static const struct amba_id mhu_ids[] = {
{
.id = 0x1bb098,
.mask = 0xffffff,
diff --git a/drivers/mailbox/arm_mhuv2.c b/drivers/mailbox/arm_mhuv2.c
index cff7c343ee08..f035284944c0 100644
--- a/drivers/mailbox/arm_mhuv2.c
+++ b/drivers/mailbox/arm_mhuv2.c
@@ -1107,7 +1107,7 @@ static void mhuv2_remove(struct amba_device *adev)
writel_relaxed(0x0, &mhu->send->access_request);
}
-static struct amba_id mhuv2_ids[] = {
+static const struct amba_id mhuv2_ids[] = {
{
/* 2.0 */
.id = 0xbb0d1,
diff --git a/drivers/mailbox/arm_mhuv3.c b/drivers/mailbox/arm_mhuv3.c
index b97e79a5870f..0910da67f8a1 100644
--- a/drivers/mailbox/arm_mhuv3.c
+++ b/drivers/mailbox/arm_mhuv3.c
@@ -945,7 +945,7 @@ static irqreturn_t mhuv3_mbx_comb_interrupt(int irq, void *arg)
if (IS_ERR(data)) {
dev_err(dev,
"Failed to read in-band data. err:%ld\n",
- PTR_ERR(no_free_ptr(data)));
+ PTR_ERR(data));
goto rx_ack;
}
}
diff --git a/drivers/mailbox/ast2700-mailbox.c b/drivers/mailbox/ast2700-mailbox.c
new file mode 100644
index 000000000000..83c6afe5411f
--- /dev/null
+++ b/drivers/mailbox/ast2700-mailbox.c
@@ -0,0 +1,235 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright Aspeed Technology Inc. (C) 2025. All rights reserved
+ */
+
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/mailbox_controller.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+/* Each bit in the register represents an IPC ID */
+#define IPCR_TX_TRIG 0x00
+#define IPCR_ENABLE 0x04
+#define IPCR_STATUS 0x08
+#define RX_IRQ(n) BIT(n)
+#define RX_IRQ_MASK 0xf
+#define IPCR_DATA 0x10
+
+struct ast2700_mbox_data {
+ u8 num_chans;
+ u8 msg_size;
+};
+
+struct ast2700_mbox {
+ struct mbox_controller mbox;
+ u8 msg_size;
+ void __iomem *tx_regs;
+ void __iomem *rx_regs;
+ spinlock_t lock;
+};
+
+static inline int ch_num(struct mbox_chan *chan)
+{
+ return chan - chan->mbox->chans;
+}
+
+static inline bool ast2700_mbox_tx_done(struct ast2700_mbox *mb, int idx)
+{
+ return !(readl(mb->tx_regs + IPCR_STATUS) & BIT(idx));
+}
+
+static irqreturn_t ast2700_mbox_irq(int irq, void *p)
+{
+ struct ast2700_mbox *mb = p;
+ void __iomem *data_reg;
+ int num_words = mb->msg_size / sizeof(u32);
+ u32 *word_data;
+ u32 status;
+ int n, i;
+
+ /* Only examine channels that are currently enabled. */
+ status = readl(mb->rx_regs + IPCR_ENABLE) &
+ readl(mb->rx_regs + IPCR_STATUS);
+
+ if (!(status & RX_IRQ_MASK))
+ return IRQ_NONE;
+
+ for (n = 0; n < mb->mbox.num_chans; ++n) {
+ struct mbox_chan *chan = &mb->mbox.chans[n];
+
+ if (!(status & RX_IRQ(n)))
+ continue;
+
+ data_reg = mb->rx_regs + IPCR_DATA + mb->msg_size * n;
+ word_data = chan->con_priv;
+ /* Read the message data */
+ for (i = 0; i < num_words; i++)
+ word_data[i] = readl(data_reg + i * sizeof(u32));
+
+ mbox_chan_received_data(chan, chan->con_priv);
+
+ /* The IRQ can be cleared only once the FIFO is empty. */
+ writel(RX_IRQ(n), mb->rx_regs + IPCR_STATUS);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int ast2700_mbox_send_data(struct mbox_chan *chan, void *data)
+{
+ struct ast2700_mbox *mb = dev_get_drvdata(chan->mbox->dev);
+ int idx = ch_num(chan);
+ void __iomem *data_reg = mb->tx_regs + IPCR_DATA + mb->msg_size * idx;
+ u32 *word_data = data;
+ int num_words = mb->msg_size / sizeof(u32);
+ int i;
+
+ if (!(readl(mb->tx_regs + IPCR_ENABLE) & BIT(idx))) {
+ dev_warn(mb->mbox.dev, "%s: Ch-%d not enabled yet\n", __func__, idx);
+ return -ENODEV;
+ }
+
+ if (!(ast2700_mbox_tx_done(mb, idx))) {
+ dev_warn(mb->mbox.dev, "%s: Ch-%d last data has not finished\n", __func__, idx);
+ return -EBUSY;
+ }
+
+ /* Write the message data */
+ for (i = 0 ; i < num_words; i++)
+ writel(word_data[i], data_reg + i * sizeof(u32));
+
+ writel(BIT(idx), mb->tx_regs + IPCR_TX_TRIG);
+ dev_dbg(mb->mbox.dev, "%s: Ch-%d sent\n", __func__, idx);
+
+ return 0;
+}
+
+static int ast2700_mbox_startup(struct mbox_chan *chan)
+{
+ struct ast2700_mbox *mb = dev_get_drvdata(chan->mbox->dev);
+ int idx = ch_num(chan);
+ void __iomem *reg = mb->rx_regs + IPCR_ENABLE;
+ unsigned long flags;
+
+ spin_lock_irqsave(&mb->lock, flags);
+ writel(readl(reg) | BIT(idx), reg);
+ spin_unlock_irqrestore(&mb->lock, flags);
+
+ return 0;
+}
+
+static void ast2700_mbox_shutdown(struct mbox_chan *chan)
+{
+ struct ast2700_mbox *mb = dev_get_drvdata(chan->mbox->dev);
+ int idx = ch_num(chan);
+ void __iomem *reg = mb->rx_regs + IPCR_ENABLE;
+ unsigned long flags;
+
+ spin_lock_irqsave(&mb->lock, flags);
+ writel(readl(reg) & ~BIT(idx), reg);
+ spin_unlock_irqrestore(&mb->lock, flags);
+}
+
+static bool ast2700_mbox_last_tx_done(struct mbox_chan *chan)
+{
+ struct ast2700_mbox *mb = dev_get_drvdata(chan->mbox->dev);
+ int idx = ch_num(chan);
+
+ return ast2700_mbox_tx_done(mb, idx);
+}
+
+static const struct mbox_chan_ops ast2700_mbox_chan_ops = {
+ .send_data = ast2700_mbox_send_data,
+ .startup = ast2700_mbox_startup,
+ .shutdown = ast2700_mbox_shutdown,
+ .last_tx_done = ast2700_mbox_last_tx_done,
+};
+
+static int ast2700_mbox_probe(struct platform_device *pdev)
+{
+ struct ast2700_mbox *mb;
+ const struct ast2700_mbox_data *dev_data;
+ struct device *dev = &pdev->dev;
+ int irq, ret;
+
+ if (!pdev->dev.of_node)
+ return -ENODEV;
+
+ dev_data = device_get_match_data(&pdev->dev);
+
+ mb = devm_kzalloc(dev, sizeof(*mb), GFP_KERNEL);
+ if (!mb)
+ return -ENOMEM;
+
+ mb->mbox.chans = devm_kcalloc(&pdev->dev, dev_data->num_chans,
+ sizeof(*mb->mbox.chans), GFP_KERNEL);
+ if (!mb->mbox.chans)
+ return -ENOMEM;
+
+ /* con_priv of each channel is used to store the message received */
+ for (int i = 0; i < dev_data->num_chans; i++) {
+ mb->mbox.chans[i].con_priv = devm_kcalloc(dev, dev_data->msg_size,
+ sizeof(u8), GFP_KERNEL);
+ if (!mb->mbox.chans[i].con_priv)
+ return -ENOMEM;
+ }
+
+ platform_set_drvdata(pdev, mb);
+
+ mb->tx_regs = devm_platform_ioremap_resource_byname(pdev, "tx");
+ if (IS_ERR(mb->tx_regs))
+ return PTR_ERR(mb->tx_regs);
+
+ mb->rx_regs = devm_platform_ioremap_resource_byname(pdev, "rx");
+ if (IS_ERR(mb->rx_regs))
+ return PTR_ERR(mb->rx_regs);
+
+ mb->msg_size = dev_data->msg_size;
+ mb->mbox.dev = dev;
+ mb->mbox.num_chans = dev_data->num_chans;
+ mb->mbox.ops = &ast2700_mbox_chan_ops;
+ mb->mbox.txdone_irq = false;
+ mb->mbox.txdone_poll = true;
+ mb->mbox.txpoll_period = 5;
+ spin_lock_init(&mb->lock);
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ ret = devm_request_irq(dev, irq, ast2700_mbox_irq, 0, dev_name(dev), mb);
+ if (ret)
+ return ret;
+
+ return devm_mbox_controller_register(dev, &mb->mbox);
+}
+
+static const struct ast2700_mbox_data ast2700_dev_data = {
+ .num_chans = 4,
+ .msg_size = 0x20,
+};
+
+static const struct of_device_id ast2700_mbox_of_match[] = {
+ { .compatible = "aspeed,ast2700-mailbox", .data = &ast2700_dev_data },
+ {}
+};
+MODULE_DEVICE_TABLE(of, ast2700_mbox_of_match);
+
+static struct platform_driver ast2700_mbox_driver = {
+ .driver = {
+ .name = "ast2700-mailbox",
+ .of_match_table = ast2700_mbox_of_match,
+ },
+ .probe = ast2700_mbox_probe,
+};
+module_platform_driver(ast2700_mbox_driver);
+
+MODULE_AUTHOR("Jammy Huang <jammy_huang@aspeedtech.com>");
+MODULE_DESCRIPTION("ASPEED AST2700 IPC driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mailbox/bcm74110-mailbox.c b/drivers/mailbox/bcm74110-mailbox.c
new file mode 100644
index 000000000000..2e7e86f3e6a4
--- /dev/null
+++ b/drivers/mailbox/bcm74110-mailbox.c
@@ -0,0 +1,656 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Broadcom BCM74110 Mailbox Driver
+ *
+ * Copyright (c) 2025 Broadcom
+ */
+#include <linux/list.h>
+#include <linux/types.h>
+#include <linux/workqueue.h>
+#include <linux/io-64-nonatomic-hi-lo.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/delay.h>
+#include <linux/mailbox_controller.h>
+#include <linux/bitfield.h>
+#include <linux/slab.h>
+
+#define BCM_MBOX_BASE(sel) ((sel) * 0x40)
+#define BCM_MBOX_IRQ_BASE(sel) (((sel) * 0x20) + 0x800)
+
+#define BCM_MBOX_CFGA 0x0
+#define BCM_MBOX_CFGB 0x4
+#define BCM_MBOX_CFGC 0x8
+#define BCM_MBOX_CFGD 0xc
+#define BCM_MBOX_CTRL 0x10
+#define BCM_MBOX_CTRL_EN BIT(0)
+#define BCM_MBOX_CTRL_CLR BIT(1)
+#define BCM_MBOX_STATUS0 0x14
+#define BCM_MBOX_STATUS0_NOT_EMPTY BIT(28)
+#define BCM_MBOX_STATUS0_FULL BIT(29)
+#define BCM_MBOX_STATUS1 0x18
+#define BCM_MBOX_STATUS2 0x1c
+#define BCM_MBOX_WDATA 0x20
+#define BCM_MBOX_RDATA 0x28
+
+#define BCM_MBOX_IRQ_STATUS 0x0
+#define BCM_MBOX_IRQ_SET 0x4
+#define BCM_MBOX_IRQ_CLEAR 0x8
+#define BCM_MBOX_IRQ_MASK_STATUS 0xc
+#define BCM_MBOX_IRQ_MASK_SET 0x10
+#define BCM_MBOX_IRQ_MASK_CLEAR 0x14
+#define BCM_MBOX_IRQ_TIMEOUT BIT(0)
+#define BCM_MBOX_IRQ_NOT_EMPTY BIT(1)
+#define BCM_MBOX_IRQ_FULL BIT(2)
+#define BCM_MBOX_IRQ_LOW_WM BIT(3)
+#define BCM_MBOX_IRQ_HIGH_WM BIT(4)
+
+#define BCM_LINK_CODE0 0xbe0
+#define BCM_LINK_CODE1 0xbe1
+#define BCM_LINK_CODE2 0xbe2
+
+enum {
+ BCM_MSG_FUNC_LINK_START = 0,
+ BCM_MSG_FUNC_LINK_STOP,
+ BCM_MSG_FUNC_SHMEM_TX,
+ BCM_MSG_FUNC_SHMEM_RX,
+ BCM_MSG_FUNC_SHMEM_STOP,
+ BCM_MSG_FUNC_MAX,
+};
+
+enum {
+ BCM_MSG_SVC_INIT = 0,
+ BCM_MSG_SVC_PMC,
+ BCM_MSG_SVC_SCMI,
+ BCM_MSG_SVC_DPFE,
+ BCM_MSG_SVC_MAX,
+};
+
+struct bcm74110_mbox_msg {
+ struct list_head list_entry;
+#define BCM_MSG_VERSION_MASK GENMASK(31, 29)
+#define BCM_MSG_VERSION 0x1
+#define BCM_MSG_REQ_MASK BIT(28)
+#define BCM_MSG_RPLY_MASK BIT(27)
+#define BCM_MSG_SVC_MASK GENMASK(26, 24)
+#define BCM_MSG_FUNC_MASK GENMASK(23, 16)
+#define BCM_MSG_LENGTH_MASK GENMASK(15, 4)
+#define BCM_MSG_SLOT_MASK GENMASK(3, 0)
+
+#define BCM_MSG_SET_FIELD(hdr, field, val) \
+ do { \
+ hdr &= ~BCM_MSG_##field##_MASK; \
+ hdr |= FIELD_PREP(BCM_MSG_##field##_MASK, val); \
+ } while (0)
+
+#define BCM_MSG_GET_FIELD(hdr, field) \
+ FIELD_GET(BCM_MSG_##field##_MASK, hdr)
+ u32 msg;
+};
+
+struct bcm74110_mbox_chan {
+ struct bcm74110_mbox *mbox;
+ bool en;
+ int slot;
+ int type;
+};
+
+struct bcm74110_mbox {
+ struct platform_device *pdev;
+ void __iomem *base;
+
+ int tx_chan;
+ int rx_chan;
+ int rx_irq;
+ struct list_head rx_svc_init_list;
+ spinlock_t rx_svc_list_lock;
+
+ struct mbox_controller controller;
+ struct bcm74110_mbox_chan *mbox_chan;
+};
+
+#define BCM74110_OFFSET_IO_WRITEL_MACRO(name, offset_base) \
+static void bcm74110_##name##_writel(struct bcm74110_mbox *mbox,\
+ u32 val, u32 off) \
+{ \
+ writel_relaxed(val, mbox->base + offset_base + off); \
+}
+BCM74110_OFFSET_IO_WRITEL_MACRO(tx, BCM_MBOX_BASE(mbox->tx_chan));
+BCM74110_OFFSET_IO_WRITEL_MACRO(irq, BCM_MBOX_IRQ_BASE(mbox->rx_chan));
+
+#define BCM74110_OFFSET_IO_READL_MACRO(name, offset_base) \
+static u32 bcm74110_##name##_readl(struct bcm74110_mbox *mbox, \
+ u32 off) \
+{ \
+ return readl_relaxed(mbox->base + offset_base + off); \
+}
+BCM74110_OFFSET_IO_READL_MACRO(tx, BCM_MBOX_BASE(mbox->tx_chan));
+BCM74110_OFFSET_IO_READL_MACRO(rx, BCM_MBOX_BASE(mbox->rx_chan));
+BCM74110_OFFSET_IO_READL_MACRO(irq, BCM_MBOX_IRQ_BASE(mbox->rx_chan));
+
+static inline struct bcm74110_mbox *bcm74110_mbox_from_cntrl(
+ struct mbox_controller *cntrl)
+{
+ return container_of(cntrl, struct bcm74110_mbox, controller);
+}
+
+static void bcm74110_rx_push_init_msg(struct bcm74110_mbox *mbox, u32 val)
+{
+ struct bcm74110_mbox_msg *msg;
+
+ msg = kzalloc(sizeof(*msg), GFP_ATOMIC);
+ if (!msg)
+ return;
+
+ INIT_LIST_HEAD(&msg->list_entry);
+ msg->msg = val;
+
+ spin_lock(&mbox->rx_svc_list_lock);
+ list_add_tail(&msg->list_entry, &mbox->rx_svc_init_list);
+ spin_unlock(&mbox->rx_svc_list_lock);
+}
+
+static void bcm74110_rx_process_msg(struct bcm74110_mbox *mbox)
+{
+ struct device *dev = &mbox->pdev->dev;
+ struct bcm74110_mbox_chan *chan_priv;
+ struct mbox_chan *chan;
+ u32 msg, status;
+ int type;
+
+ do {
+ msg = bcm74110_rx_readl(mbox, BCM_MBOX_RDATA);
+ status = bcm74110_rx_readl(mbox, BCM_MBOX_STATUS0);
+
+ dev_dbg(dev, "rx: [{req=%lu|rply=%lu|srv=%lu|fn=%lu|length=%lu|slot=%lu]\n",
+ BCM_MSG_GET_FIELD(msg, REQ), BCM_MSG_GET_FIELD(msg, RPLY),
+ BCM_MSG_GET_FIELD(msg, SVC), BCM_MSG_GET_FIELD(msg, FUNC),
+ BCM_MSG_GET_FIELD(msg, LENGTH), BCM_MSG_GET_FIELD(msg, SLOT));
+
+ type = BCM_MSG_GET_FIELD(msg, SVC);
+ switch (type) {
+ case BCM_MSG_SVC_INIT:
+ bcm74110_rx_push_init_msg(mbox, msg);
+ break;
+ case BCM_MSG_SVC_PMC:
+ case BCM_MSG_SVC_SCMI:
+ case BCM_MSG_SVC_DPFE:
+ chan = &mbox->controller.chans[type];
+ chan_priv = chan->con_priv;
+ if (chan_priv->en)
+ mbox_chan_received_data(chan, NULL);
+ else
+ dev_warn(dev, "Channel not enabled\n");
+ break;
+ default:
+ dev_warn(dev, "Unsupported msg received\n");
+ }
+ } while (status & BCM_MBOX_STATUS0_NOT_EMPTY);
+}
+
+static irqreturn_t bcm74110_mbox_isr(int irq, void *data)
+{
+ struct bcm74110_mbox *mbox = data;
+ u32 status;
+
+ status = bcm74110_irq_readl(mbox, BCM_MBOX_IRQ_STATUS);
+
+ bcm74110_irq_writel(mbox, 0xffffffff, BCM_MBOX_IRQ_CLEAR);
+
+ if (status & BCM_MBOX_IRQ_NOT_EMPTY)
+ bcm74110_rx_process_msg(mbox);
+ else
+ dev_warn(&mbox->pdev->dev, "Spurious interrupt\n");
+
+ return IRQ_HANDLED;
+}
+
+static void bcm74110_mbox_mask_and_clear(struct bcm74110_mbox *mbox)
+{
+ bcm74110_irq_writel(mbox, 0xffffffff, BCM_MBOX_IRQ_MASK_SET);
+ bcm74110_irq_writel(mbox, 0xffffffff, BCM_MBOX_IRQ_CLEAR);
+}
+
+static int bcm74110_rx_pop_init_msg(struct bcm74110_mbox *mbox, u32 func_type,
+ u32 *val)
+{
+ struct bcm74110_mbox_msg *msg, *msg_tmp;
+ unsigned long flags;
+ bool found = false;
+
+ spin_lock_irqsave(&mbox->rx_svc_list_lock, flags);
+ list_for_each_entry_safe(msg, msg_tmp, &mbox->rx_svc_init_list,
+ list_entry) {
+ if (BCM_MSG_GET_FIELD(msg->msg, FUNC) == func_type) {
+ list_del(&msg->list_entry);
+ found = true;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&mbox->rx_svc_list_lock, flags);
+
+ if (!found)
+ return -EINVAL;
+
+ *val = msg->msg;
+ kfree(msg);
+
+ return 0;
+}
+
+static void bcm74110_rx_flush_msg(struct bcm74110_mbox *mbox)
+{
+ struct bcm74110_mbox_msg *msg, *msg_tmp;
+ LIST_HEAD(list_temp);
+ unsigned long flags;
+
+ spin_lock_irqsave(&mbox->rx_svc_list_lock, flags);
+ list_splice_init(&mbox->rx_svc_init_list, &list_temp);
+ spin_unlock_irqrestore(&mbox->rx_svc_list_lock, flags);
+
+ list_for_each_entry_safe(msg, msg_tmp, &list_temp, list_entry) {
+ list_del(&msg->list_entry);
+ kfree(msg);
+ }
+}
+
+#define BCM_DEQUEUE_TIMEOUT_MS 30
+static int bcm74110_rx_pop_init_msg_block(struct bcm74110_mbox *mbox, u32 func_type,
+ u32 *val)
+{
+ int ret, timeout = 0;
+
+ do {
+ ret = bcm74110_rx_pop_init_msg(mbox, func_type, val);
+
+ if (!ret)
+ return 0;
+
+ /* TODO: Figure out what is a good sleep here. */
+ usleep_range(1000, 2000);
+ timeout++;
+ } while (timeout < BCM_DEQUEUE_TIMEOUT_MS);
+
+ dev_warn(&mbox->pdev->dev, "Timeout waiting for service init response\n");
+ return -ETIMEDOUT;
+}
+
+static int bcm74110_mbox_create_msg(int req, int rply, int svc, int func,
+ int length, int slot)
+{
+ u32 msg = 0;
+
+ BCM_MSG_SET_FIELD(msg, REQ, req);
+ BCM_MSG_SET_FIELD(msg, RPLY, rply);
+ BCM_MSG_SET_FIELD(msg, SVC, svc);
+ BCM_MSG_SET_FIELD(msg, FUNC, func);
+ BCM_MSG_SET_FIELD(msg, LENGTH, length);
+ BCM_MSG_SET_FIELD(msg, SLOT, slot);
+
+ return msg;
+}
+
+static int bcm74110_mbox_tx_msg(struct bcm74110_mbox *mbox, u32 msg)
+{
+ int val;
+
+ /* We can potentially poll with timeout here instead */
+ val = bcm74110_tx_readl(mbox, BCM_MBOX_STATUS0);
+ if (val & BCM_MBOX_STATUS0_FULL) {
+ dev_err(&mbox->pdev->dev, "Mailbox full\n");
+ return -EINVAL;
+ }
+
+ dev_dbg(&mbox->pdev->dev, "tx: [{req=%lu|rply=%lu|srv=%lu|fn=%lu|length=%lu|slot=%lu]\n",
+ BCM_MSG_GET_FIELD(msg, REQ), BCM_MSG_GET_FIELD(msg, RPLY),
+ BCM_MSG_GET_FIELD(msg, SVC), BCM_MSG_GET_FIELD(msg, FUNC),
+ BCM_MSG_GET_FIELD(msg, LENGTH), BCM_MSG_GET_FIELD(msg, SLOT));
+
+ bcm74110_tx_writel(mbox, msg, BCM_MBOX_WDATA);
+
+ return 0;
+}
+
+#define BCM_MBOX_LINK_TRAINING_RETRIES 5
+static int bcm74110_mbox_link_training(struct bcm74110_mbox *mbox)
+{
+ int ret, retries = 0;
+ u32 msg = 0, orig_len = 0, len = BCM_LINK_CODE0;
+
+ do {
+ switch (len) {
+ case 0:
+ retries++;
+ dev_warn(&mbox->pdev->dev,
+ "Link train failed, trying again... %d\n",
+ retries);
+ if (retries > BCM_MBOX_LINK_TRAINING_RETRIES)
+ return -EINVAL;
+ len = BCM_LINK_CODE0;
+ fallthrough;
+ case BCM_LINK_CODE0:
+ case BCM_LINK_CODE1:
+ case BCM_LINK_CODE2:
+ msg = bcm74110_mbox_create_msg(1, 0, BCM_MSG_SVC_INIT,
+ BCM_MSG_FUNC_LINK_START,
+ len, BCM_MSG_SVC_INIT);
+ break;
+ default:
+ break;
+ }
+
+ bcm74110_mbox_tx_msg(mbox, msg);
+
+ /* No response expected for LINK_CODE2 */
+ if (len == BCM_LINK_CODE2)
+ return 0;
+
+ orig_len = len;
+
+ ret = bcm74110_rx_pop_init_msg_block(mbox,
+ BCM_MSG_GET_FIELD(msg, FUNC),
+ &msg);
+ if (ret) {
+ len = 0;
+ continue;
+ }
+
+ if ((BCM_MSG_GET_FIELD(msg, SVC) != BCM_MSG_SVC_INIT) ||
+ (BCM_MSG_GET_FIELD(msg, FUNC) != BCM_MSG_FUNC_LINK_START) ||
+ (BCM_MSG_GET_FIELD(msg, SLOT) != 0) ||
+ (BCM_MSG_GET_FIELD(msg, RPLY) != 1) ||
+ (BCM_MSG_GET_FIELD(msg, REQ) != 0)) {
+ len = 0;
+ continue;
+ }
+
+ len = BCM_MSG_GET_FIELD(msg, LENGTH);
+
+ /* Make sure sequence is good */
+ if (len != (orig_len + 1)) {
+ len = 0;
+ continue;
+ }
+ } while (1);
+
+ return -EINVAL;
+}
+
+static int bcm74110_mbox_tx_msg_and_wait_ack(struct bcm74110_mbox *mbox, u32 msg)
+{
+ int ret;
+ u32 recv_msg;
+
+ ret = bcm74110_mbox_tx_msg(mbox, msg);
+ if (ret)
+ return ret;
+
+ ret = bcm74110_rx_pop_init_msg_block(mbox, BCM_MSG_GET_FIELD(msg, FUNC),
+ &recv_msg);
+ if (ret)
+ return ret;
+
+ /*
+ * Modify tx message to verify rx ack.
+ * Flip RPLY/REQ for synchronous messages
+ */
+ if (BCM_MSG_GET_FIELD(msg, REQ) == 1) {
+ BCM_MSG_SET_FIELD(msg, RPLY, 1);
+ BCM_MSG_SET_FIELD(msg, REQ, 0);
+ }
+
+ if (msg != recv_msg) {
+ dev_err(&mbox->pdev->dev, "Found ack, but ack is invalid\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/* Each index points to 0x100 of HAB MEM. IDX size counts from 0 */
+#define BCM_MBOX_HAB_MEM_IDX_START 0x30
+#define BCM_MBOX_HAB_MEM_IDX_SIZE 0x0
+static int bcm74110_mbox_shmem_init(struct bcm74110_mbox *mbox)
+{
+ u32 msg = 0;
+ int ret;
+
+ msg = bcm74110_mbox_create_msg(1, 0, BCM_MSG_SVC_INIT,
+ BCM_MSG_FUNC_SHMEM_STOP,
+ 0, BCM_MSG_SVC_INIT);
+ ret = bcm74110_mbox_tx_msg_and_wait_ack(mbox, msg);
+ if (ret)
+ return -EINVAL;
+
+ msg = bcm74110_mbox_create_msg(1, 0, BCM_MSG_SVC_INIT,
+ BCM_MSG_FUNC_SHMEM_TX,
+ BCM_MBOX_HAB_MEM_IDX_START,
+ BCM_MBOX_HAB_MEM_IDX_SIZE);
+ ret = bcm74110_mbox_tx_msg_and_wait_ack(mbox, msg);
+ if (ret)
+ return -EINVAL;
+
+ msg = bcm74110_mbox_create_msg(1, 0, BCM_MSG_SVC_INIT,
+ BCM_MSG_FUNC_SHMEM_RX,
+ BCM_MBOX_HAB_MEM_IDX_START,
+ BCM_MBOX_HAB_MEM_IDX_SIZE);
+ ret = bcm74110_mbox_tx_msg_and_wait_ack(mbox, msg);
+ if (ret)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int bcm74110_mbox_init(struct bcm74110_mbox *mbox)
+{
+ int ret = 0;
+
+ /* Disable queues tx/rx */
+ bcm74110_tx_writel(mbox, 0x0, BCM_MBOX_CTRL);
+
+ /* Clear status & restart tx/rx*/
+ bcm74110_tx_writel(mbox, BCM_MBOX_CTRL_EN | BCM_MBOX_CTRL_CLR,
+ BCM_MBOX_CTRL);
+
+ /* Unmask irq */
+ bcm74110_irq_writel(mbox, BCM_MBOX_IRQ_NOT_EMPTY, BCM_MBOX_IRQ_MASK_CLEAR);
+
+ ret = bcm74110_mbox_link_training(mbox);
+ if (ret) {
+ dev_err(&mbox->pdev->dev, "Training failed\n");
+ return ret;
+ }
+
+ return bcm74110_mbox_shmem_init(mbox);
+}
+
+static int bcm74110_mbox_send_data(struct mbox_chan *chan, void *data)
+{
+ struct bcm74110_mbox_chan *chan_priv = chan->con_priv;
+ u32 msg;
+
+ switch (chan_priv->type) {
+ case BCM_MSG_SVC_PMC:
+ case BCM_MSG_SVC_SCMI:
+ case BCM_MSG_SVC_DPFE:
+ msg = bcm74110_mbox_create_msg(1, 0, chan_priv->type, 0,
+ 128 + 28, chan_priv->slot);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return bcm74110_mbox_tx_msg(chan_priv->mbox, msg);
+}
+
+static int bcm74110_mbox_chan_startup(struct mbox_chan *chan)
+{
+ struct bcm74110_mbox_chan *chan_priv = chan->con_priv;
+
+ chan_priv->en = true;
+
+ return 0;
+}
+
+static void bcm74110_mbox_chan_shutdown(struct mbox_chan *chan)
+{
+ struct bcm74110_mbox_chan *chan_priv = chan->con_priv;
+
+ chan_priv->en = false;
+}
+
+static const struct mbox_chan_ops bcm74110_mbox_chan_ops = {
+ .send_data = bcm74110_mbox_send_data,
+ .startup = bcm74110_mbox_chan_startup,
+ .shutdown = bcm74110_mbox_chan_shutdown,
+};
+
+static void bcm74110_mbox_shutdown(struct platform_device *pdev)
+{
+ struct bcm74110_mbox *mbox = dev_get_drvdata(&pdev->dev);
+ u32 msg;
+
+ msg = bcm74110_mbox_create_msg(1, 0, BCM_MSG_SVC_INIT,
+ BCM_MSG_FUNC_LINK_STOP,
+ 0, 0);
+
+ bcm74110_mbox_tx_msg_and_wait_ack(mbox, msg);
+
+ /* Even if we don't receive ACK, lets shut it down */
+
+ bcm74110_mbox_mask_and_clear(mbox);
+
+ /* Disable queues tx/rx */
+ bcm74110_tx_writel(mbox, 0x0, BCM_MBOX_CTRL);
+
+ /* Flush queues */
+ bcm74110_rx_flush_msg(mbox);
+}
+
+static struct mbox_chan *bcm74110_mbox_of_xlate(struct mbox_controller *cntrl,
+ const struct of_phandle_args *p)
+{
+ struct bcm74110_mbox *mbox = bcm74110_mbox_from_cntrl(cntrl);
+ struct device *dev = &mbox->pdev->dev;
+ struct bcm74110_mbox_chan *chan_priv;
+ int slot, type;
+
+ if (p->args_count != 2) {
+ dev_err(dev, "Invalid arguments\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ type = p->args[0];
+ slot = p->args[1];
+
+ switch (type) {
+ case BCM_MSG_SVC_PMC:
+ case BCM_MSG_SVC_SCMI:
+ case BCM_MSG_SVC_DPFE:
+ if (slot > BCM_MBOX_HAB_MEM_IDX_SIZE) {
+ dev_err(dev, "Not enough shared memory\n");
+ return ERR_PTR(-EINVAL);
+ }
+ chan_priv = cntrl->chans[type].con_priv;
+ chan_priv->slot = slot;
+ chan_priv->type = type;
+ break;
+ default:
+ dev_err(dev, "Invalid channel type: %d\n", type);
+ return ERR_PTR(-EINVAL);
+ }
+
+ return &cntrl->chans[type];
+}
+
+static int bcm74110_mbox_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct bcm74110_mbox *mbox;
+ int i, ret;
+
+ mbox = devm_kzalloc(dev, sizeof(*mbox), GFP_KERNEL);
+ if (!mbox)
+ return -ENOMEM;
+
+ mbox->pdev = pdev;
+ platform_set_drvdata(pdev, mbox);
+
+ mbox->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(mbox->base))
+ return dev_err_probe(dev, PTR_ERR(mbox->base), "Failed to iomap\n");
+
+ ret = of_property_read_u32(dev->of_node, "brcm,tx", &mbox->tx_chan);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to find tx channel\n");
+
+ ret = of_property_read_u32(dev->of_node, "brcm,rx", &mbox->rx_chan);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to find rx channel\n");
+
+ mbox->rx_irq = platform_get_irq(pdev, 0);
+ if (mbox->rx_irq < 0)
+ return mbox->rx_irq;
+
+ INIT_LIST_HEAD(&mbox->rx_svc_init_list);
+ spin_lock_init(&mbox->rx_svc_list_lock);
+ bcm74110_mbox_mask_and_clear(mbox);
+
+ ret = devm_request_irq(dev, mbox->rx_irq, bcm74110_mbox_isr,
+ IRQF_NO_SUSPEND, pdev->name, mbox);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to request irq\n");
+
+ mbox->controller.ops = &bcm74110_mbox_chan_ops;
+ mbox->controller.dev = dev;
+ mbox->controller.num_chans = BCM_MSG_SVC_MAX;
+ mbox->controller.of_xlate = &bcm74110_mbox_of_xlate;
+ mbox->controller.chans = devm_kcalloc(dev, BCM_MSG_SVC_MAX,
+ sizeof(*mbox->controller.chans),
+ GFP_KERNEL);
+ if (!mbox->controller.chans)
+ return -ENOMEM;
+
+ mbox->mbox_chan = devm_kcalloc(dev, BCM_MSG_SVC_MAX,
+ sizeof(*mbox->mbox_chan),
+ GFP_KERNEL);
+ if (!mbox->mbox_chan)
+ return -ENOMEM;
+
+ for (i = 0; i < BCM_MSG_SVC_MAX; i++) {
+ mbox->mbox_chan[i].mbox = mbox;
+ mbox->controller.chans[i].con_priv = &mbox->mbox_chan[i];
+ }
+
+ ret = devm_mbox_controller_register(dev, &mbox->controller);
+ if (ret)
+ return ret;
+
+ ret = bcm74110_mbox_init(mbox);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static const struct of_device_id bcm74110_mbox_of_match[] = {
+ { .compatible = "brcm,bcm74110-mbox", },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, bcm74110_mbox_of_match);
+
+static struct platform_driver bcm74110_mbox_driver = {
+ .driver = {
+ .name = "bcm74110-mbox",
+ .of_match_table = bcm74110_mbox_of_match,
+ },
+ .probe = bcm74110_mbox_probe,
+ .shutdown = bcm74110_mbox_shutdown,
+};
+module_platform_driver(bcm74110_mbox_driver);
+
+MODULE_AUTHOR("Justin Chen <justin.chen@broadcom.com>");
+MODULE_DESCRIPTION("BCM74110 mailbox driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mailbox/cix-mailbox.c b/drivers/mailbox/cix-mailbox.c
new file mode 100644
index 000000000000..5bb1416c26a5
--- /dev/null
+++ b/drivers/mailbox/cix-mailbox.c
@@ -0,0 +1,645 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2025 Cix Technology Group Co., Ltd.
+ */
+
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/mailbox_controller.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+#include "mailbox.h"
+
+/*
+ * The maximum transmission size is 32 words or 128 bytes.
+ */
+#define CIX_MBOX_MSG_WORDS 32 /* Max length = 32 words */
+#define CIX_MBOX_MSG_LEN_MASK 0x7fL /* Max length = 128 bytes */
+
+/* [0~7] Fast channel
+ * [8] doorbell base channel
+ * [9]fifo base channel
+ * [10] register base channel
+ */
+#define CIX_MBOX_FAST_IDX 7
+#define CIX_MBOX_DB_IDX 8
+#define CIX_MBOX_FIFO_IDX 9
+#define CIX_MBOX_REG_IDX 10
+#define CIX_MBOX_CHANS 11
+
+/* Register define */
+#define CIX_REG_MSG(n) (0x0 + 0x4*(n)) /* 0x0~0x7c */
+#define CIX_REG_DB_ACK CIX_REG_MSG(CIX_MBOX_MSG_WORDS) /* 0x80 */
+#define CIX_ERR_COMP (CIX_REG_DB_ACK + 0x4) /* 0x84 */
+#define CIX_ERR_COMP_CLR (CIX_REG_DB_ACK + 0x8) /* 0x88 */
+#define CIX_REG_F_INT(IDX) (CIX_ERR_COMP_CLR + 0x4*(IDX+1)) /* 0x8c~0xa8 */
+#define CIX_FIFO_WR (CIX_REG_F_INT(CIX_MBOX_FAST_IDX+1)) /* 0xac */
+#define CIX_FIFO_RD (CIX_FIFO_WR + 0x4) /* 0xb0 */
+#define CIX_FIFO_STAS (CIX_FIFO_WR + 0x8) /* 0xb4 */
+#define CIX_FIFO_WM (CIX_FIFO_WR + 0xc) /* 0xb8 */
+#define CIX_INT_ENABLE (CIX_FIFO_WR + 0x10) /* 0xbc */
+#define CIX_INT_ENABLE_SIDE_B (CIX_FIFO_WR + 0x14) /* 0xc0 */
+#define CIX_INT_CLEAR (CIX_FIFO_WR + 0x18) /* 0xc4 */
+#define CIX_INT_STATUS (CIX_FIFO_WR + 0x1c) /* 0xc8 */
+#define CIX_FIFO_RST (CIX_FIFO_WR + 0x20) /* 0xcc */
+
+#define CIX_MBOX_TX 0
+#define CIX_MBOX_RX 1
+
+#define CIX_DB_INT_BIT BIT(0)
+#define CIX_DB_ACK_INT_BIT BIT(1)
+
+#define CIX_FIFO_WM_DEFAULT CIX_MBOX_MSG_WORDS
+#define CIX_FIFO_STAS_WMK BIT(0)
+#define CIX_FIFO_STAS_FULL BIT(1)
+#define CIX_FIFO_STAS_EMPTY BIT(2)
+#define CIX_FIFO_STAS_UFLOW BIT(3)
+#define CIX_FIFO_STAS_OFLOW BIT(4)
+
+#define CIX_FIFO_RST_BIT BIT(0)
+
+#define CIX_DB_INT BIT(0)
+#define CIX_ACK_INT BIT(1)
+#define CIX_FIFO_FULL_INT BIT(2)
+#define CIX_FIFO_EMPTY_INT BIT(3)
+#define CIX_FIFO_WM01_INT BIT(4)
+#define CIX_FIFO_WM10_INT BIT(5)
+#define CIX_FIFO_OFLOW_INT BIT(6)
+#define CIX_FIFO_UFLOW_INT BIT(7)
+#define CIX_FIFO_N_EMPTY_INT BIT(8)
+#define CIX_FAST_CH_INT(IDX) BIT((IDX)+9)
+
+#define CIX_SHMEM_OFFSET 0x80
+
+enum cix_mbox_chan_type {
+ CIX_MBOX_TYPE_DB,
+ CIX_MBOX_TYPE_REG,
+ CIX_MBOX_TYPE_FIFO,
+ CIX_MBOX_TYPE_FAST,
+};
+
+struct cix_mbox_con_priv {
+ enum cix_mbox_chan_type type;
+ struct mbox_chan *chan;
+ int index;
+};
+
+struct cix_mbox_priv {
+ struct device *dev;
+ int irq;
+ int dir;
+ void __iomem *base; /* region for mailbox */
+ struct cix_mbox_con_priv con_priv[CIX_MBOX_CHANS];
+ struct mbox_chan mbox_chans[CIX_MBOX_CHANS];
+ struct mbox_controller mbox;
+ bool use_shmem;
+};
+
+/*
+ * The CIX mailbox supports four types of transfers:
+ * CIX_MBOX_TYPE_DB, CIX_MBOX_TYPE_FAST, CIX_MBOX_TYPE_REG, and CIX_MBOX_TYPE_FIFO.
+ * For the REG and FIFO types of transfers, the message format is as follows:
+ */
+union cix_mbox_msg_reg_fifo {
+ u32 length; /* unit is byte */
+ u32 buf[CIX_MBOX_MSG_WORDS]; /* buf[0] must be the byte length of this array */
+};
+
+static struct cix_mbox_priv *to_cix_mbox_priv(struct mbox_controller *mbox)
+{
+ return container_of(mbox, struct cix_mbox_priv, mbox);
+}
+
+static void cix_mbox_write(struct cix_mbox_priv *priv, u32 val, u32 offset)
+{
+ if (priv->use_shmem)
+ iowrite32(val, priv->base + offset - CIX_SHMEM_OFFSET);
+ else
+ iowrite32(val, priv->base + offset);
+}
+
+static u32 cix_mbox_read(struct cix_mbox_priv *priv, u32 offset)
+{
+ if (priv->use_shmem)
+ return ioread32(priv->base + offset - CIX_SHMEM_OFFSET);
+ else
+ return ioread32(priv->base + offset);
+}
+
+static bool mbox_fifo_empty(struct mbox_chan *chan)
+{
+ struct cix_mbox_priv *priv = to_cix_mbox_priv(chan->mbox);
+
+ return ((cix_mbox_read(priv, CIX_FIFO_STAS) & CIX_FIFO_STAS_EMPTY) ? true : false);
+}
+
+/*
+ *The transmission unit of the CIX mailbox is word.
+ *The byte length should be converted into the word length.
+ */
+static inline u32 mbox_get_msg_size(void *msg)
+{
+ u32 len;
+
+ len = ((u32 *)msg)[0] & CIX_MBOX_MSG_LEN_MASK;
+ return DIV_ROUND_UP(len, 4);
+}
+
+static int cix_mbox_send_data_db(struct mbox_chan *chan, void *data)
+{
+ struct cix_mbox_priv *priv = to_cix_mbox_priv(chan->mbox);
+
+ /* trigger doorbell irq */
+ cix_mbox_write(priv, CIX_DB_INT_BIT, CIX_REG_DB_ACK);
+
+ return 0;
+}
+
+static int cix_mbox_send_data_reg(struct mbox_chan *chan, void *data)
+{
+ struct cix_mbox_priv *priv = to_cix_mbox_priv(chan->mbox);
+ union cix_mbox_msg_reg_fifo *msg = data;
+ u32 len, i;
+
+ if (!data)
+ return -EINVAL;
+
+ len = mbox_get_msg_size(data);
+ for (i = 0; i < len; i++)
+ cix_mbox_write(priv, msg->buf[i], CIX_REG_MSG(i));
+
+ /* trigger doorbell irq */
+ cix_mbox_write(priv, CIX_DB_INT_BIT, CIX_REG_DB_ACK);
+
+ return 0;
+}
+
+static int cix_mbox_send_data_fifo(struct mbox_chan *chan, void *data)
+{
+ struct cix_mbox_priv *priv = to_cix_mbox_priv(chan->mbox);
+ union cix_mbox_msg_reg_fifo *msg = data;
+ u32 len, val, i;
+
+ if (!data)
+ return -EINVAL;
+
+ len = mbox_get_msg_size(data);
+ cix_mbox_write(priv, len, CIX_FIFO_WM);
+ for (i = 0; i < len; i++)
+ cix_mbox_write(priv, msg->buf[i], CIX_FIFO_WR);
+
+ /* Enable fifo empty interrupt */
+ val = cix_mbox_read(priv, CIX_INT_ENABLE);
+ val |= CIX_FIFO_EMPTY_INT;
+ cix_mbox_write(priv, val, CIX_INT_ENABLE);
+
+ return 0;
+}
+
+static int cix_mbox_send_data_fast(struct mbox_chan *chan, void *data)
+{
+ struct cix_mbox_priv *priv = to_cix_mbox_priv(chan->mbox);
+ struct cix_mbox_con_priv *cp = chan->con_priv;
+ u32 *arg = (u32 *)data;
+ int index = cp->index;
+
+ if (!data)
+ return -EINVAL;
+
+ if (index < 0 || index > CIX_MBOX_FAST_IDX) {
+ dev_err(priv->dev, "Invalid Mbox index %d\n", index);
+ return -EINVAL;
+ }
+
+ cix_mbox_write(priv, arg[0], CIX_REG_F_INT(index));
+
+ return 0;
+}
+
+static int cix_mbox_send_data(struct mbox_chan *chan, void *data)
+{
+ struct cix_mbox_priv *priv = to_cix_mbox_priv(chan->mbox);
+ struct cix_mbox_con_priv *cp = chan->con_priv;
+
+ if (priv->dir != CIX_MBOX_TX) {
+ dev_err(priv->dev, "Invalid Mbox dir %d\n", priv->dir);
+ return -EINVAL;
+ }
+
+ switch (cp->type) {
+ case CIX_MBOX_TYPE_DB:
+ cix_mbox_send_data_db(chan, data);
+ break;
+ case CIX_MBOX_TYPE_REG:
+ cix_mbox_send_data_reg(chan, data);
+ break;
+ case CIX_MBOX_TYPE_FIFO:
+ cix_mbox_send_data_fifo(chan, data);
+ break;
+ case CIX_MBOX_TYPE_FAST:
+ cix_mbox_send_data_fast(chan, data);
+ break;
+ default:
+ dev_err(priv->dev, "Invalid channel type: %d\n", cp->type);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static void cix_mbox_isr_db(struct mbox_chan *chan)
+{
+ struct cix_mbox_priv *priv = to_cix_mbox_priv(chan->mbox);
+ u32 int_status;
+
+ int_status = cix_mbox_read(priv, CIX_INT_STATUS);
+
+ if (priv->dir == CIX_MBOX_RX) {
+ /* rx interrupt is triggered */
+ if (int_status & CIX_DB_INT) {
+ cix_mbox_write(priv, CIX_DB_INT, CIX_INT_CLEAR);
+ mbox_chan_received_data(chan, NULL);
+ /* trigger ack interrupt */
+ cix_mbox_write(priv, CIX_DB_ACK_INT_BIT, CIX_REG_DB_ACK);
+ }
+ } else {
+ /* tx ack interrupt is triggered */
+ if (int_status & CIX_ACK_INT) {
+ cix_mbox_write(priv, CIX_ACK_INT, CIX_INT_CLEAR);
+ mbox_chan_received_data(chan, NULL);
+ }
+ }
+}
+
+static void cix_mbox_isr_reg(struct mbox_chan *chan)
+{
+ struct cix_mbox_priv *priv = to_cix_mbox_priv(chan->mbox);
+ u32 int_status;
+
+ int_status = cix_mbox_read(priv, CIX_INT_STATUS);
+
+ if (priv->dir == CIX_MBOX_RX) {
+ /* rx interrupt is triggered */
+ if (int_status & CIX_DB_INT) {
+ u32 data[CIX_MBOX_MSG_WORDS], len, i;
+
+ cix_mbox_write(priv, CIX_DB_INT, CIX_INT_CLEAR);
+ data[0] = cix_mbox_read(priv, CIX_REG_MSG(0));
+ len = mbox_get_msg_size(data);
+ for (i = 1; i < len; i++)
+ data[i] = cix_mbox_read(priv, CIX_REG_MSG(i));
+
+ /* trigger ack interrupt */
+ cix_mbox_write(priv, CIX_DB_ACK_INT_BIT, CIX_REG_DB_ACK);
+ mbox_chan_received_data(chan, data);
+ }
+ } else {
+ /* tx ack interrupt is triggered */
+ if (int_status & CIX_ACK_INT) {
+ cix_mbox_write(priv, CIX_ACK_INT, CIX_INT_CLEAR);
+ mbox_chan_txdone(chan, 0);
+ }
+ }
+}
+
+static void cix_mbox_isr_fifo(struct mbox_chan *chan)
+{
+ struct cix_mbox_priv *priv = to_cix_mbox_priv(chan->mbox);
+ u32 int_status, status;
+
+ int_status = cix_mbox_read(priv, CIX_INT_STATUS);
+
+ if (priv->dir == CIX_MBOX_RX) {
+ /* FIFO waterMark interrupt is generated */
+ if (int_status & (CIX_FIFO_FULL_INT | CIX_FIFO_WM01_INT)) {
+ u32 data[CIX_MBOX_MSG_WORDS] = { 0 }, i = 0;
+
+ cix_mbox_write(priv, (CIX_FIFO_FULL_INT | CIX_FIFO_WM01_INT),
+ CIX_INT_CLEAR);
+ do {
+ data[i++] = cix_mbox_read(priv, CIX_FIFO_RD);
+ } while (!mbox_fifo_empty(chan) && i < CIX_MBOX_MSG_WORDS);
+
+ mbox_chan_received_data(chan, data);
+ }
+ /* FIFO underflow is generated */
+ if (int_status & CIX_FIFO_UFLOW_INT) {
+ status = cix_mbox_read(priv, CIX_FIFO_STAS);
+ dev_err(priv->dev, "fifo underflow: int_stats %d\n", status);
+ cix_mbox_write(priv, CIX_FIFO_UFLOW_INT, CIX_INT_CLEAR);
+ }
+ } else {
+ /* FIFO empty interrupt is generated */
+ if (int_status & CIX_FIFO_EMPTY_INT) {
+ u32 val;
+
+ cix_mbox_write(priv, CIX_FIFO_EMPTY_INT, CIX_INT_CLEAR);
+ /* Disable empty irq*/
+ val = cix_mbox_read(priv, CIX_INT_ENABLE);
+ val &= ~CIX_FIFO_EMPTY_INT;
+ cix_mbox_write(priv, val, CIX_INT_ENABLE);
+ mbox_chan_txdone(chan, 0);
+ }
+ /* FIFO overflow is generated */
+ if (int_status & CIX_FIFO_OFLOW_INT) {
+ status = cix_mbox_read(priv, CIX_FIFO_STAS);
+ dev_err(priv->dev, "fifo overlow: int_stats %d\n", status);
+ cix_mbox_write(priv, CIX_FIFO_OFLOW_INT, CIX_INT_CLEAR);
+ }
+ }
+}
+
+static void cix_mbox_isr_fast(struct mbox_chan *chan)
+{
+ struct cix_mbox_priv *priv = to_cix_mbox_priv(chan->mbox);
+ struct cix_mbox_con_priv *cp = chan->con_priv;
+ u32 int_status, data;
+
+ /* no irq will be trigger for TX dir mbox */
+ if (priv->dir != CIX_MBOX_RX)
+ return;
+
+ int_status = cix_mbox_read(priv, CIX_INT_STATUS);
+
+ if (int_status & CIX_FAST_CH_INT(cp->index)) {
+ cix_mbox_write(priv, CIX_FAST_CH_INT(cp->index), CIX_INT_CLEAR);
+ data = cix_mbox_read(priv, CIX_REG_F_INT(cp->index));
+ mbox_chan_received_data(chan, &data);
+ }
+}
+
+static irqreturn_t cix_mbox_isr(int irq, void *arg)
+{
+ struct mbox_chan *chan = arg;
+ struct cix_mbox_priv *priv = to_cix_mbox_priv(chan->mbox);
+ struct cix_mbox_con_priv *cp = chan->con_priv;
+
+ switch (cp->type) {
+ case CIX_MBOX_TYPE_DB:
+ cix_mbox_isr_db(chan);
+ break;
+ case CIX_MBOX_TYPE_REG:
+ cix_mbox_isr_reg(chan);
+ break;
+ case CIX_MBOX_TYPE_FIFO:
+ cix_mbox_isr_fifo(chan);
+ break;
+ case CIX_MBOX_TYPE_FAST:
+ cix_mbox_isr_fast(chan);
+ break;
+ default:
+ dev_err(priv->dev, "Invalid channel type: %d\n", cp->type);
+ return IRQ_NONE;
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int cix_mbox_startup(struct mbox_chan *chan)
+{
+ struct cix_mbox_priv *priv = to_cix_mbox_priv(chan->mbox);
+ struct cix_mbox_con_priv *cp = chan->con_priv;
+ int index = cp->index, ret;
+ u32 val;
+
+ ret = request_irq(priv->irq, cix_mbox_isr, 0,
+ dev_name(priv->dev), chan);
+ if (ret) {
+ dev_err(priv->dev, "Unable to acquire IRQ %d\n", priv->irq);
+ return ret;
+ }
+
+ switch (cp->type) {
+ case CIX_MBOX_TYPE_DB:
+ /* Overwrite txdone_method for DB channel */
+ chan->txdone_method = TXDONE_BY_ACK;
+ fallthrough;
+ case CIX_MBOX_TYPE_REG:
+ if (priv->dir == CIX_MBOX_TX) {
+ /* Enable ACK interrupt */
+ val = cix_mbox_read(priv, CIX_INT_ENABLE);
+ val |= CIX_ACK_INT;
+ cix_mbox_write(priv, val, CIX_INT_ENABLE);
+ } else {
+ /* Enable Doorbell interrupt */
+ val = cix_mbox_read(priv, CIX_INT_ENABLE_SIDE_B);
+ val |= CIX_DB_INT;
+ cix_mbox_write(priv, val, CIX_INT_ENABLE_SIDE_B);
+ }
+ break;
+ case CIX_MBOX_TYPE_FIFO:
+ /* reset fifo */
+ cix_mbox_write(priv, CIX_FIFO_RST_BIT, CIX_FIFO_RST);
+ /* set default watermark */
+ cix_mbox_write(priv, CIX_FIFO_WM_DEFAULT, CIX_FIFO_WM);
+ if (priv->dir == CIX_MBOX_TX) {
+ /* Enable fifo overflow interrupt */
+ val = cix_mbox_read(priv, CIX_INT_ENABLE);
+ val |= CIX_FIFO_OFLOW_INT;
+ cix_mbox_write(priv, val, CIX_INT_ENABLE);
+ } else {
+ /* Enable fifo full/underflow interrupt */
+ val = cix_mbox_read(priv, CIX_INT_ENABLE_SIDE_B);
+ val |= CIX_FIFO_UFLOW_INT|CIX_FIFO_WM01_INT;
+ cix_mbox_write(priv, val, CIX_INT_ENABLE_SIDE_B);
+ }
+ break;
+ case CIX_MBOX_TYPE_FAST:
+ /* Only RX channel has intterupt */
+ if (priv->dir == CIX_MBOX_RX) {
+ if (index < 0 || index > CIX_MBOX_FAST_IDX) {
+ dev_err(priv->dev, "Invalid index %d\n", index);
+ ret = -EINVAL;
+ goto failed;
+ }
+ /* enable fast channel interrupt */
+ val = cix_mbox_read(priv, CIX_INT_ENABLE_SIDE_B);
+ val |= CIX_FAST_CH_INT(index);
+ cix_mbox_write(priv, val, CIX_INT_ENABLE_SIDE_B);
+ }
+ break;
+ default:
+ dev_err(priv->dev, "Invalid channel type: %d\n", cp->type);
+ ret = -EINVAL;
+ goto failed;
+ }
+ return 0;
+
+failed:
+ free_irq(priv->irq, chan);
+ return ret;
+}
+
+static void cix_mbox_shutdown(struct mbox_chan *chan)
+{
+ struct cix_mbox_priv *priv = to_cix_mbox_priv(chan->mbox);
+ struct cix_mbox_con_priv *cp = chan->con_priv;
+ int index = cp->index;
+ u32 val;
+
+ switch (cp->type) {
+ case CIX_MBOX_TYPE_DB:
+ case CIX_MBOX_TYPE_REG:
+ if (priv->dir == CIX_MBOX_TX) {
+ /* Disable ACK interrupt */
+ val = cix_mbox_read(priv, CIX_INT_ENABLE);
+ val &= ~CIX_ACK_INT;
+ cix_mbox_write(priv, val, CIX_INT_ENABLE);
+ } else if (priv->dir == CIX_MBOX_RX) {
+ /* Disable Doorbell interrupt */
+ val = cix_mbox_read(priv, CIX_INT_ENABLE_SIDE_B);
+ val &= ~CIX_DB_INT;
+ cix_mbox_write(priv, val, CIX_INT_ENABLE_SIDE_B);
+ }
+ break;
+ case CIX_MBOX_TYPE_FIFO:
+ if (priv->dir == CIX_MBOX_TX) {
+ /* Disable empty/fifo overflow irq*/
+ val = cix_mbox_read(priv, CIX_INT_ENABLE);
+ val &= ~(CIX_FIFO_EMPTY_INT | CIX_FIFO_OFLOW_INT);
+ cix_mbox_write(priv, val, CIX_INT_ENABLE);
+ } else if (priv->dir == CIX_MBOX_RX) {
+ /* Disable fifo WM01/underflow interrupt */
+ val = cix_mbox_read(priv, CIX_INT_ENABLE_SIDE_B);
+ val &= ~(CIX_FIFO_UFLOW_INT | CIX_FIFO_WM01_INT);
+ cix_mbox_write(priv, val, CIX_INT_ENABLE_SIDE_B);
+ }
+ break;
+ case CIX_MBOX_TYPE_FAST:
+ if (priv->dir == CIX_MBOX_RX) {
+ if (index < 0 || index > CIX_MBOX_FAST_IDX) {
+ dev_err(priv->dev, "Invalid index %d\n", index);
+ break;
+ }
+ /* Disable fast channel interrupt */
+ val = cix_mbox_read(priv, CIX_INT_ENABLE_SIDE_B);
+ val &= ~CIX_FAST_CH_INT(index);
+ cix_mbox_write(priv, val, CIX_INT_ENABLE_SIDE_B);
+ }
+ break;
+
+ default:
+ dev_err(priv->dev, "Invalid channel type: %d\n", cp->type);
+ break;
+ }
+
+ free_irq(priv->irq, chan);
+}
+
+static const struct mbox_chan_ops cix_mbox_chan_ops = {
+ .send_data = cix_mbox_send_data,
+ .startup = cix_mbox_startup,
+ .shutdown = cix_mbox_shutdown,
+};
+
+static void cix_mbox_init(struct cix_mbox_priv *priv)
+{
+ struct cix_mbox_con_priv *cp;
+ int i;
+
+ for (i = 0; i < CIX_MBOX_CHANS; i++) {
+ cp = &priv->con_priv[i];
+ cp->index = i;
+ cp->chan = &priv->mbox_chans[i];
+ priv->mbox_chans[i].con_priv = cp;
+ if (cp->index <= CIX_MBOX_FAST_IDX)
+ cp->type = CIX_MBOX_TYPE_FAST;
+ if (cp->index == CIX_MBOX_DB_IDX)
+ cp->type = CIX_MBOX_TYPE_DB;
+ if (cp->index == CIX_MBOX_FIFO_IDX)
+ cp->type = CIX_MBOX_TYPE_FIFO;
+ if (cp->index == CIX_MBOX_REG_IDX)
+ cp->type = CIX_MBOX_TYPE_REG;
+ }
+}
+
+static int cix_mbox_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct cix_mbox_priv *priv;
+ struct resource *res;
+ const char *dir_str;
+ int ret;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->dev = dev;
+ priv->base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
+ if (IS_ERR(priv->base))
+ return PTR_ERR(priv->base);
+
+ /*
+ * The first 0x80 bytes of the register space of the cix mailbox controller
+ * can be used as shared memory for clients. When this shared memory is in
+ * use, the base address of the mailbox is offset by 0x80. Therefore, when
+ * performing subsequent read/write operations, it is necessary to subtract
+ * the offset CIX_SHMEM_OFFSET.
+ *
+ * When the base address of the mailbox is offset by 0x80, it indicates
+ * that shmem is in use.
+ */
+ priv->use_shmem = !!(res->start & CIX_SHMEM_OFFSET);
+
+ priv->irq = platform_get_irq(pdev, 0);
+ if (priv->irq < 0)
+ return priv->irq;
+
+ if (device_property_read_string(dev, "cix,mbox-dir", &dir_str)) {
+ dev_err(priv->dev, "cix,mbox_dir property not found\n");
+ return -EINVAL;
+ }
+
+ if (!strcmp(dir_str, "tx"))
+ priv->dir = 0;
+ else if (!strcmp(dir_str, "rx"))
+ priv->dir = 1;
+ else {
+ dev_err(priv->dev, "cix,mbox_dir=%s is not expected\n", dir_str);
+ return -EINVAL;
+ }
+
+ cix_mbox_init(priv);
+
+ priv->mbox.dev = dev;
+ priv->mbox.ops = &cix_mbox_chan_ops;
+ priv->mbox.chans = priv->mbox_chans;
+ priv->mbox.txdone_irq = true;
+ priv->mbox.num_chans = CIX_MBOX_CHANS;
+ priv->mbox.of_xlate = NULL;
+
+ platform_set_drvdata(pdev, priv);
+ ret = devm_mbox_controller_register(dev, &priv->mbox);
+ if (ret)
+ dev_err(dev, "Failed to register mailbox %d\n", ret);
+
+ return ret;
+}
+
+static const struct of_device_id cix_mbox_dt_ids[] = {
+ { .compatible = "cix,sky1-mbox" },
+ { },
+};
+MODULE_DEVICE_TABLE(of, cix_mbox_dt_ids);
+
+static struct platform_driver cix_mbox_driver = {
+ .probe = cix_mbox_probe,
+ .driver = {
+ .name = "cix_mbox",
+ .of_match_table = cix_mbox_dt_ids,
+ },
+};
+
+static int __init cix_mailbox_init(void)
+{
+ return platform_driver_register(&cix_mbox_driver);
+}
+arch_initcall(cix_mailbox_init);
+
+MODULE_AUTHOR("Cix Technology Group Co., Ltd.");
+MODULE_DESCRIPTION("CIX mailbox driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mailbox/cv1800-mailbox.c b/drivers/mailbox/cv1800-mailbox.c
new file mode 100644
index 000000000000..4761191acf78
--- /dev/null
+++ b/drivers/mailbox/cv1800-mailbox.c
@@ -0,0 +1,220 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2024 Sophgo Technology Inc.
+ * Copyright (C) 2024 Yuntao Dai <d1581209858@live.com>
+ * Copyright (C) 2025 Junhui Liu <junhui.liu@pigmoral.tech>
+ */
+
+#include <linux/bits.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kfifo.h>
+#include <linux/mailbox_client.h>
+#include <linux/mailbox_controller.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#define RECV_CPU 1
+
+#define MAILBOX_MAX_CHAN 8
+#define MAILBOX_MSG_LEN 8
+
+#define MBOX_EN_REG(cpu) (cpu << 2)
+#define MBOX_DONE_REG(cpu) ((cpu << 2) + 2)
+#define MBOX_SET_CLR_REG(cpu) (0x10 + (cpu << 4))
+#define MBOX_SET_INT_REG(cpu) (0x18 + (cpu << 4))
+#define MBOX_SET_REG 0x60
+
+#define MAILBOX_CONTEXT_OFFSET 0x0400
+#define MAILBOX_CONTEXT_SIZE 0x0040
+
+#define MBOX_CONTEXT_BASE_INDEX(base, index) \
+ ((u64 __iomem *)(base + MAILBOX_CONTEXT_OFFSET) + index)
+
+/**
+ * struct cv1800_mbox_chan_priv - cv1800 mailbox channel private data
+ * @idx: index of channel
+ * @cpu: send to which processor
+ */
+struct cv1800_mbox_chan_priv {
+ int idx;
+ int cpu;
+};
+
+struct cv1800_mbox {
+ struct mbox_controller mbox;
+ struct cv1800_mbox_chan_priv priv[MAILBOX_MAX_CHAN];
+ struct mbox_chan chans[MAILBOX_MAX_CHAN];
+ u64 __iomem *content[MAILBOX_MAX_CHAN];
+ void __iomem *mbox_base;
+ int recvid;
+};
+
+static irqreturn_t cv1800_mbox_isr(int irq, void *dev_id)
+{
+ struct cv1800_mbox *mbox = (struct cv1800_mbox *)dev_id;
+ size_t i;
+ u64 msg;
+ int ret = IRQ_NONE;
+
+ for (i = 0; i < MAILBOX_MAX_CHAN; i++) {
+ if (mbox->content[i] && mbox->chans[i].cl) {
+ memcpy_fromio(&msg, mbox->content[i], MAILBOX_MSG_LEN);
+ mbox->content[i] = NULL;
+ mbox_chan_received_data(&mbox->chans[i], (void *)&msg);
+ ret = IRQ_HANDLED;
+ }
+ }
+
+ return ret;
+}
+
+static irqreturn_t cv1800_mbox_irq(int irq, void *dev_id)
+{
+ struct cv1800_mbox *mbox = (struct cv1800_mbox *)dev_id;
+ u8 set, valid;
+ size_t i;
+ int ret = IRQ_NONE;
+
+ set = readb(mbox->mbox_base + MBOX_SET_INT_REG(RECV_CPU));
+
+ if (!set)
+ return ret;
+
+ for (i = 0; i < MAILBOX_MAX_CHAN; i++) {
+ valid = set & BIT(i);
+ if (valid) {
+ mbox->content[i] =
+ MBOX_CONTEXT_BASE_INDEX(mbox->mbox_base, i);
+ writeb(valid, mbox->mbox_base +
+ MBOX_SET_CLR_REG(RECV_CPU));
+ writeb(~valid, mbox->mbox_base + MBOX_EN_REG(RECV_CPU));
+ ret = IRQ_WAKE_THREAD;
+ }
+ }
+
+ return ret;
+}
+
+static int cv1800_mbox_send_data(struct mbox_chan *chan, void *data)
+{
+ struct cv1800_mbox_chan_priv *priv =
+ (struct cv1800_mbox_chan_priv *)chan->con_priv;
+ struct cv1800_mbox *mbox = dev_get_drvdata(chan->mbox->dev);
+ int idx = priv->idx;
+ int cpu = priv->cpu;
+ u8 en, valid;
+
+ memcpy_toio(MBOX_CONTEXT_BASE_INDEX(mbox->mbox_base, idx),
+ data, MAILBOX_MSG_LEN);
+
+ valid = BIT(idx);
+ writeb(valid, mbox->mbox_base + MBOX_SET_CLR_REG(cpu));
+ en = readb(mbox->mbox_base + MBOX_EN_REG(cpu));
+ writeb(en | valid, mbox->mbox_base + MBOX_EN_REG(cpu));
+ writeb(valid, mbox->mbox_base + MBOX_SET_REG);
+
+ return 0;
+}
+
+static bool cv1800_last_tx_done(struct mbox_chan *chan)
+{
+ struct cv1800_mbox_chan_priv *priv =
+ (struct cv1800_mbox_chan_priv *)chan->con_priv;
+ struct cv1800_mbox *mbox = dev_get_drvdata(chan->mbox->dev);
+ u8 en;
+
+ en = readb(mbox->mbox_base + MBOX_EN_REG(priv->cpu));
+
+ return !(en & BIT(priv->idx));
+}
+
+static const struct mbox_chan_ops cv1800_mbox_chan_ops = {
+ .send_data = cv1800_mbox_send_data,
+ .last_tx_done = cv1800_last_tx_done,
+};
+
+static struct mbox_chan *cv1800_mbox_xlate(struct mbox_controller *mbox,
+ const struct of_phandle_args *spec)
+{
+ struct cv1800_mbox_chan_priv *priv;
+
+ int idx = spec->args[0];
+ int cpu = spec->args[1];
+
+ if (idx >= mbox->num_chans)
+ return ERR_PTR(-EINVAL);
+
+ priv = mbox->chans[idx].con_priv;
+ priv->cpu = cpu;
+
+ return &mbox->chans[idx];
+}
+
+static const struct of_device_id cv1800_mbox_of_match[] = {
+ { .compatible = "sophgo,cv1800b-mailbox", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, cv1800_mbox_of_match);
+
+static int cv1800_mbox_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct cv1800_mbox *mb;
+ int irq, idx, err;
+
+ mb = devm_kzalloc(dev, sizeof(*mb), GFP_KERNEL);
+ if (!mb)
+ return -ENOMEM;
+
+ mb->mbox_base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(mb->mbox_base))
+ return dev_err_probe(dev, PTR_ERR(mb->mbox_base),
+ "Failed to map resource\n");
+
+ mb->mbox.dev = dev;
+ mb->mbox.chans = mb->chans;
+ mb->mbox.txdone_poll = true;
+ mb->mbox.ops = &cv1800_mbox_chan_ops;
+ mb->mbox.num_chans = MAILBOX_MAX_CHAN;
+ mb->mbox.of_xlate = cv1800_mbox_xlate;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ err = devm_request_threaded_irq(dev, irq, cv1800_mbox_irq,
+ cv1800_mbox_isr, IRQF_ONESHOT,
+ dev_name(&pdev->dev), mb);
+ if (err < 0)
+ return dev_err_probe(dev, err, "Failed to register irq\n");
+
+ for (idx = 0; idx < MAILBOX_MAX_CHAN; idx++) {
+ mb->priv[idx].idx = idx;
+ mb->mbox.chans[idx].con_priv = &mb->priv[idx];
+ }
+
+ platform_set_drvdata(pdev, mb);
+
+ err = devm_mbox_controller_register(dev, &mb->mbox);
+ if (err)
+ return dev_err_probe(dev, err, "Failed to register mailbox\n");
+
+ return 0;
+}
+
+static struct platform_driver cv1800_mbox_driver = {
+ .driver = {
+ .name = "cv1800-mbox",
+ .of_match_table = cv1800_mbox_of_match,
+ },
+ .probe = cv1800_mbox_probe,
+};
+
+module_platform_driver(cv1800_mbox_driver);
+
+MODULE_DESCRIPTION("cv1800 mailbox driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mailbox/exynos-mailbox.c b/drivers/mailbox/exynos-mailbox.c
new file mode 100644
index 000000000000..2320649bf60c
--- /dev/null
+++ b/drivers/mailbox/exynos-mailbox.c
@@ -0,0 +1,157 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2020 Samsung Electronics Co., Ltd.
+ * Copyright 2020 Google LLC.
+ * Copyright 2024 Linaro Ltd.
+ */
+
+#include <linux/bitops.h>
+#include <linux/bits.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/mailbox_controller.h>
+#include <linux/mailbox/exynos-message.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#define EXYNOS_MBOX_MCUCTRL 0x0 /* Mailbox Control Register */
+#define EXYNOS_MBOX_INTCR0 0x24 /* Interrupt Clear Register 0 */
+#define EXYNOS_MBOX_INTMR0 0x28 /* Interrupt Mask Register 0 */
+#define EXYNOS_MBOX_INTSR0 0x2c /* Interrupt Status Register 0 */
+#define EXYNOS_MBOX_INTMSR0 0x30 /* Interrupt Mask Status Register 0 */
+#define EXYNOS_MBOX_INTGR1 0x40 /* Interrupt Generation Register 1 */
+#define EXYNOS_MBOX_INTMR1 0x48 /* Interrupt Mask Register 1 */
+#define EXYNOS_MBOX_INTSR1 0x4c /* Interrupt Status Register 1 */
+#define EXYNOS_MBOX_INTMSR1 0x50 /* Interrupt Mask Status Register 1 */
+
+#define EXYNOS_MBOX_INTMR0_MASK GENMASK(15, 0)
+#define EXYNOS_MBOX_INTGR1_MASK GENMASK(15, 0)
+
+#define EXYNOS_MBOX_CHAN_COUNT HWEIGHT32(EXYNOS_MBOX_INTGR1_MASK)
+
+/**
+ * struct exynos_mbox - driver's private data.
+ * @regs: mailbox registers base address.
+ * @mbox: pointer to the mailbox controller.
+ * @pclk: pointer to the mailbox peripheral clock.
+ */
+struct exynos_mbox {
+ void __iomem *regs;
+ struct mbox_controller *mbox;
+ struct clk *pclk;
+};
+
+static int exynos_mbox_send_data(struct mbox_chan *chan, void *data)
+{
+ struct device *dev = chan->mbox->dev;
+ struct exynos_mbox *exynos_mbox = dev_get_drvdata(dev);
+ struct exynos_mbox_msg *msg = data;
+
+ if (msg->chan_id >= exynos_mbox->mbox->num_chans) {
+ dev_err(dev, "Invalid channel ID %d\n", msg->chan_id);
+ return -EINVAL;
+ }
+
+ if (msg->chan_type != EXYNOS_MBOX_CHAN_TYPE_DOORBELL) {
+ dev_err(dev, "Unsupported channel type [%d]\n", msg->chan_type);
+ return -EINVAL;
+ }
+
+ writel(BIT(msg->chan_id), exynos_mbox->regs + EXYNOS_MBOX_INTGR1);
+
+ return 0;
+}
+
+static const struct mbox_chan_ops exynos_mbox_chan_ops = {
+ .send_data = exynos_mbox_send_data,
+};
+
+static struct mbox_chan *exynos_mbox_of_xlate(struct mbox_controller *mbox,
+ const struct of_phandle_args *sp)
+{
+ int i;
+
+ if (sp->args_count != 0)
+ return ERR_PTR(-EINVAL);
+
+ /*
+ * Return the first available channel. When we don't pass the
+ * channel ID from device tree, each channel populated by the driver is
+ * just a software construct or a virtual channel. We use 'void *data'
+ * in send_data() to pass the channel identifiers.
+ */
+ for (i = 0; i < mbox->num_chans; i++)
+ if (mbox->chans[i].cl == NULL)
+ return &mbox->chans[i];
+ return ERR_PTR(-EINVAL);
+}
+
+static const struct of_device_id exynos_mbox_match[] = {
+ { .compatible = "google,gs101-mbox" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, exynos_mbox_match);
+
+static int exynos_mbox_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct exynos_mbox *exynos_mbox;
+ struct mbox_controller *mbox;
+ struct mbox_chan *chans;
+ int i;
+
+ exynos_mbox = devm_kzalloc(dev, sizeof(*exynos_mbox), GFP_KERNEL);
+ if (!exynos_mbox)
+ return -ENOMEM;
+
+ mbox = devm_kzalloc(dev, sizeof(*mbox), GFP_KERNEL);
+ if (!mbox)
+ return -ENOMEM;
+
+ chans = devm_kcalloc(dev, EXYNOS_MBOX_CHAN_COUNT, sizeof(*chans),
+ GFP_KERNEL);
+ if (!chans)
+ return -ENOMEM;
+
+ exynos_mbox->regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(exynos_mbox->regs))
+ return PTR_ERR(exynos_mbox->regs);
+
+ exynos_mbox->pclk = devm_clk_get_enabled(dev, "pclk");
+ if (IS_ERR(exynos_mbox->pclk))
+ return dev_err_probe(dev, PTR_ERR(exynos_mbox->pclk),
+ "Failed to enable clock.\n");
+
+ mbox->num_chans = EXYNOS_MBOX_CHAN_COUNT;
+ mbox->chans = chans;
+ mbox->dev = dev;
+ mbox->ops = &exynos_mbox_chan_ops;
+ mbox->of_xlate = exynos_mbox_of_xlate;
+
+ for (i = 0; i < EXYNOS_MBOX_CHAN_COUNT; i++)
+ chans[i].mbox = mbox;
+
+ exynos_mbox->mbox = mbox;
+
+ platform_set_drvdata(pdev, exynos_mbox);
+
+ /* Mask out all interrupts. We support just polling channels for now. */
+ writel(EXYNOS_MBOX_INTMR0_MASK, exynos_mbox->regs + EXYNOS_MBOX_INTMR0);
+
+ return devm_mbox_controller_register(dev, mbox);
+}
+
+static struct platform_driver exynos_mbox_driver = {
+ .probe = exynos_mbox_probe,
+ .driver = {
+ .name = "exynos-acpm-mbox",
+ .of_match_table = exynos_mbox_match,
+ },
+};
+module_platform_driver(exynos_mbox_driver);
+
+MODULE_AUTHOR("Tudor Ambarus <tudor.ambarus@linaro.org>");
+MODULE_DESCRIPTION("Samsung Exynos mailbox driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mailbox/imx-mailbox.c b/drivers/mailbox/imx-mailbox.c
index 6ef8338add0d..6778afc64a04 100644
--- a/drivers/mailbox/imx-mailbox.c
+++ b/drivers/mailbox/imx-mailbox.c
@@ -226,7 +226,7 @@ static int imx_mu_generic_tx(struct imx_mu_priv *priv,
{
u32 *arg = data;
u32 val;
- int ret;
+ int ret, count;
switch (cp->type) {
case IMX_MU_TYPE_TX:
@@ -240,11 +240,20 @@ static int imx_mu_generic_tx(struct imx_mu_priv *priv,
case IMX_MU_TYPE_TXDB_V2:
imx_mu_write(priv, IMX_MU_xCR_GIRn(priv->dcfg->type, cp->idx),
priv->dcfg->xCR[IMX_MU_GCR]);
- ret = readl_poll_timeout(priv->base + priv->dcfg->xCR[IMX_MU_GCR], val,
- !(val & IMX_MU_xCR_GIRn(priv->dcfg->type, cp->idx)),
- 0, 1000);
- if (ret)
- dev_warn_ratelimited(priv->dev, "channel type: %d failure\n", cp->type);
+ ret = -ETIMEDOUT;
+ count = 0;
+ while (ret && (count < 10)) {
+ ret =
+ readl_poll_timeout(priv->base + priv->dcfg->xCR[IMX_MU_GCR], val,
+ !(val & IMX_MU_xCR_GIRn(priv->dcfg->type, cp->idx)),
+ 0, 10000);
+
+ if (ret) {
+ dev_warn_ratelimited(priv->dev,
+ "channel type: %d timeout, %d times, retry\n",
+ cp->type, ++count);
+ }
+ }
break;
default:
dev_warn_ratelimited(priv->dev, "Send data on wrong channel type: %d\n", cp->type);
diff --git a/drivers/mailbox/mailbox-altera.c b/drivers/mailbox/mailbox-altera.c
index afb320e9d69c..17278c2571d3 100644
--- a/drivers/mailbox/mailbox-altera.c
+++ b/drivers/mailbox/mailbox-altera.c
@@ -130,7 +130,7 @@ static void altera_mbox_rx_data(struct mbox_chan *chan)
static void altera_mbox_poll_rx(struct timer_list *t)
{
- struct altera_mbox *mbox = from_timer(mbox, t, rxpoll_timer);
+ struct altera_mbox *mbox = timer_container_of(mbox, t, rxpoll_timer);
altera_mbox_rx_data(mbox->chan);
@@ -270,7 +270,7 @@ static void altera_mbox_shutdown(struct mbox_chan *chan)
writel_relaxed(~0, mbox->mbox_base + MAILBOX_INTMASK_REG);
free_irq(mbox->irq, chan);
} else if (!mbox->is_sender) {
- del_timer_sync(&mbox->rxpoll_timer);
+ timer_delete_sync(&mbox->rxpoll_timer);
}
}
diff --git a/drivers/mailbox/mailbox-mchp-ipc-sbi.c b/drivers/mailbox/mailbox-mchp-ipc-sbi.c
new file mode 100644
index 000000000000..a6e52009a424
--- /dev/null
+++ b/drivers/mailbox/mailbox-mchp-ipc-sbi.c
@@ -0,0 +1,504 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Microchip Inter-Processor communication (IPC) driver
+ *
+ * Copyright (c) 2021 - 2024 Microchip Technology Inc. All rights reserved.
+ *
+ * Author: Valentina Fernandez <valentina.fernandezalanis@microchip.com>
+ *
+ */
+
+#include <linux/io.h>
+#include <linux/err.h>
+#include <linux/smp.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/of_device.h>
+#include <linux/interrupt.h>
+#include <linux/dma-mapping.h>
+#include <linux/platform_device.h>
+#include <linux/mailbox/mchp-ipc.h>
+#include <asm/sbi.h>
+#include <asm/vendorid_list.h>
+
+#define IRQ_STATUS_BITS 12
+#define NUM_CHANS_PER_CLUSTER 5
+#define IPC_DMA_BIT_MASK 32
+#define SBI_EXT_MICROCHIP_TECHNOLOGY (SBI_EXT_VENDOR_START | \
+ MICROCHIP_VENDOR_ID)
+
+enum {
+ SBI_EXT_IPC_PROBE = 0x100,
+ SBI_EXT_IPC_CH_INIT,
+ SBI_EXT_IPC_SEND,
+ SBI_EXT_IPC_RECEIVE,
+ SBI_EXT_IPC_STATUS,
+};
+
+enum ipc_hw {
+ MIV_IHC,
+};
+
+/**
+ * struct mchp_ipc_mbox_info - IPC probe message format
+ *
+ * @hw_type: IPC implementation available in the hardware
+ * @num_channels: number of IPC channels available in the hardware
+ *
+ * Used to retrieve information on the IPC implementation
+ * using the SBI_EXT_IPC_PROBE SBI function id.
+ */
+struct mchp_ipc_mbox_info {
+ enum ipc_hw hw_type;
+ u8 num_channels;
+};
+
+/**
+ * struct mchp_ipc_init - IPC channel init message format
+ *
+ * @max_msg_size: maxmimum message size in bytes of a given channel
+ *
+ * struct used by the SBI_EXT_IPC_CH_INIT SBI function id to get
+ * the max message size in bytes of the initialized channel.
+ */
+struct mchp_ipc_init {
+ u16 max_msg_size;
+};
+
+/**
+ * struct mchp_ipc_status - IPC status message format
+ *
+ * @status: interrupt status for all channels associated to a cluster
+ * @cluster: specifies the cluster instance that originated an irq
+ *
+ * struct used by the SBI_EXT_IPC_STATUS SBI function id to get
+ * the message present and message clear interrupt status for all the
+ * channels associated to a cluster.
+ */
+struct mchp_ipc_status {
+ u32 status;
+ u8 cluster;
+};
+
+/**
+ * struct mchp_ipc_sbi_msg - IPC SBI payload message
+ *
+ * @buf_addr: physical address where the received data should be copied to
+ * @size: maximum size(in bytes) that can be stored in the buffer pointed to by `buf`
+ * @irq_type: mask representing the irq types that triggered an irq
+ *
+ * struct used by the SBI_EXT_IPC_SEND/SBI_EXT_IPC_RECEIVE SBI function
+ * ids to send/receive a message from an associated processor using
+ * the IPC.
+ */
+struct mchp_ipc_sbi_msg {
+ u64 buf_addr;
+ u16 size;
+ u8 irq_type;
+};
+
+struct mchp_ipc_cluster_cfg {
+ void *buf_base;
+ phys_addr_t buf_base_addr;
+ int irq;
+};
+
+struct mchp_ipc_sbi_mbox {
+ struct device *dev;
+ struct mbox_chan *chans;
+ struct mchp_ipc_cluster_cfg *cluster_cfg;
+ void *buf_base;
+ unsigned long buf_base_addr;
+ struct mbox_controller controller;
+ enum ipc_hw hw_type;
+};
+
+static int mchp_ipc_sbi_chan_send(u32 command, u32 channel, unsigned long address)
+{
+ struct sbiret ret;
+
+ ret = sbi_ecall(SBI_EXT_MICROCHIP_TECHNOLOGY, command, channel,
+ address, 0, 0, 0, 0);
+
+ if (ret.error)
+ return sbi_err_map_linux_errno(ret.error);
+ else
+ return ret.value;
+}
+
+static int mchp_ipc_sbi_send(u32 command, unsigned long address)
+{
+ struct sbiret ret;
+
+ ret = sbi_ecall(SBI_EXT_MICROCHIP_TECHNOLOGY, command, address,
+ 0, 0, 0, 0, 0);
+
+ if (ret.error)
+ return sbi_err_map_linux_errno(ret.error);
+ else
+ return ret.value;
+}
+
+static struct mchp_ipc_sbi_mbox *to_mchp_ipc_mbox(struct mbox_controller *mbox)
+{
+ return container_of(mbox, struct mchp_ipc_sbi_mbox, controller);
+}
+
+static inline void mchp_ipc_prepare_receive_req(struct mbox_chan *chan)
+{
+ struct mchp_ipc_sbi_chan *chan_info = (struct mchp_ipc_sbi_chan *)chan->con_priv;
+ struct mchp_ipc_sbi_msg request;
+
+ request.buf_addr = chan_info->msg_buf_rx_addr;
+ request.size = chan_info->max_msg_size;
+ memcpy(chan_info->buf_base_rx, &request, sizeof(struct mchp_ipc_sbi_msg));
+}
+
+static inline void mchp_ipc_process_received_data(struct mbox_chan *chan,
+ struct mchp_ipc_msg *ipc_msg)
+{
+ struct mchp_ipc_sbi_chan *chan_info = (struct mchp_ipc_sbi_chan *)chan->con_priv;
+ struct mchp_ipc_sbi_msg sbi_msg;
+
+ memcpy(&sbi_msg, chan_info->buf_base_rx, sizeof(struct mchp_ipc_sbi_msg));
+ ipc_msg->buf = (u32 *)chan_info->msg_buf_rx;
+ ipc_msg->size = sbi_msg.size;
+}
+
+static irqreturn_t mchp_ipc_cluster_aggr_isr(int irq, void *data)
+{
+ struct mbox_chan *chan;
+ struct mchp_ipc_sbi_chan *chan_info;
+ struct mchp_ipc_sbi_mbox *ipc = (struct mchp_ipc_sbi_mbox *)data;
+ struct mchp_ipc_msg ipc_msg;
+ struct mchp_ipc_status status_msg;
+ int ret;
+ unsigned long hartid;
+ u32 i, chan_index, chan_id;
+
+ /* Find out the hart that originated the irq */
+ for_each_online_cpu(i) {
+ hartid = cpuid_to_hartid_map(i);
+ if (irq == ipc->cluster_cfg[hartid].irq)
+ break;
+ }
+
+ status_msg.cluster = hartid;
+ memcpy(ipc->cluster_cfg[hartid].buf_base, &status_msg, sizeof(struct mchp_ipc_status));
+
+ ret = mchp_ipc_sbi_send(SBI_EXT_IPC_STATUS, ipc->cluster_cfg[hartid].buf_base_addr);
+ if (ret < 0) {
+ dev_err_ratelimited(ipc->dev, "could not get IHC irq status ret=%d\n", ret);
+ return IRQ_HANDLED;
+ }
+
+ memcpy(&status_msg, ipc->cluster_cfg[hartid].buf_base, sizeof(struct mchp_ipc_status));
+
+ /*
+ * Iterate over each bit set in the IHC interrupt status register (IRQ_STATUS) to identify
+ * the channel(s) that have a message to be processed/acknowledged.
+ * The bits are organized in alternating format, where each pair of bits represents
+ * the status of the message present and message clear interrupts for each cluster/hart
+ * (from hart 0 to hart 5). Each cluster can have up to 5 fixed channels associated.
+ */
+
+ for_each_set_bit(i, (unsigned long *)&status_msg.status, IRQ_STATUS_BITS) {
+ /* Find out the destination hart that triggered the interrupt */
+ chan_index = i / 2;
+
+ /*
+ * The IP has no loopback channels, so we need to decrement the index when
+ * the target hart has a greater index than our own
+ */
+ if (chan_index >= status_msg.cluster)
+ chan_index--;
+
+ /*
+ * Calculate the channel id given the hart and channel index. Channel IDs
+ * are unique across all clusters of an IPC, and iterate contiguously
+ * across all clusters.
+ */
+ chan_id = status_msg.cluster * (NUM_CHANS_PER_CLUSTER + chan_index);
+
+ chan = &ipc->chans[chan_id];
+ chan_info = (struct mchp_ipc_sbi_chan *)chan->con_priv;
+
+ if (i % 2 == 0) {
+ mchp_ipc_prepare_receive_req(chan);
+ ret = mchp_ipc_sbi_chan_send(SBI_EXT_IPC_RECEIVE, chan_id,
+ chan_info->buf_base_rx_addr);
+ if (ret < 0)
+ continue;
+
+ mchp_ipc_process_received_data(chan, &ipc_msg);
+ mbox_chan_received_data(&ipc->chans[chan_id], (void *)&ipc_msg);
+
+ } else {
+ ret = mchp_ipc_sbi_chan_send(SBI_EXT_IPC_RECEIVE, chan_id,
+ chan_info->buf_base_rx_addr);
+ mbox_chan_txdone(&ipc->chans[chan_id], ret);
+ }
+ }
+ return IRQ_HANDLED;
+}
+
+static int mchp_ipc_send_data(struct mbox_chan *chan, void *data)
+{
+ struct mchp_ipc_sbi_chan *chan_info = (struct mchp_ipc_sbi_chan *)chan->con_priv;
+ const struct mchp_ipc_msg *msg = data;
+ struct mchp_ipc_sbi_msg sbi_payload;
+
+ memcpy(chan_info->msg_buf_tx, msg->buf, msg->size);
+ sbi_payload.buf_addr = chan_info->msg_buf_tx_addr;
+ sbi_payload.size = msg->size;
+ memcpy(chan_info->buf_base_tx, &sbi_payload, sizeof(sbi_payload));
+
+ return mchp_ipc_sbi_chan_send(SBI_EXT_IPC_SEND, chan_info->id, chan_info->buf_base_tx_addr);
+}
+
+static int mchp_ipc_startup(struct mbox_chan *chan)
+{
+ struct mchp_ipc_sbi_chan *chan_info = (struct mchp_ipc_sbi_chan *)chan->con_priv;
+ struct mchp_ipc_sbi_mbox *ipc = to_mchp_ipc_mbox(chan->mbox);
+ struct mchp_ipc_init ch_init_msg;
+ int ret;
+
+ /*
+ * The TX base buffer is used to transmit two types of messages:
+ * - struct mchp_ipc_init to initialize the channel
+ * - struct mchp_ipc_sbi_msg to transmit user data/payload
+ * Ensure the TX buffer size is large enough to accommodate either message type.
+ */
+ size_t max_size = max(sizeof(struct mchp_ipc_init), sizeof(struct mchp_ipc_sbi_msg));
+
+ chan_info->buf_base_tx = kmalloc(max_size, GFP_KERNEL);
+ if (!chan_info->buf_base_tx) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ chan_info->buf_base_tx_addr = __pa(chan_info->buf_base_tx);
+
+ chan_info->buf_base_rx = kmalloc(max_size, GFP_KERNEL);
+ if (!chan_info->buf_base_rx) {
+ ret = -ENOMEM;
+ goto fail_free_buf_base_tx;
+ }
+
+ chan_info->buf_base_rx_addr = __pa(chan_info->buf_base_rx);
+
+ ret = mchp_ipc_sbi_chan_send(SBI_EXT_IPC_CH_INIT, chan_info->id,
+ chan_info->buf_base_tx_addr);
+ if (ret < 0) {
+ dev_err(ipc->dev, "channel %u init failed\n", chan_info->id);
+ goto fail_free_buf_base_rx;
+ }
+
+ memcpy(&ch_init_msg, chan_info->buf_base_tx, sizeof(struct mchp_ipc_init));
+ chan_info->max_msg_size = ch_init_msg.max_msg_size;
+
+ chan_info->msg_buf_tx = kmalloc(chan_info->max_msg_size, GFP_KERNEL);
+ if (!chan_info->msg_buf_tx) {
+ ret = -ENOMEM;
+ goto fail_free_buf_base_rx;
+ }
+
+ chan_info->msg_buf_tx_addr = __pa(chan_info->msg_buf_tx);
+
+ chan_info->msg_buf_rx = kmalloc(chan_info->max_msg_size, GFP_KERNEL);
+ if (!chan_info->msg_buf_rx) {
+ ret = -ENOMEM;
+ goto fail_free_buf_msg_tx;
+ }
+
+ chan_info->msg_buf_rx_addr = __pa(chan_info->msg_buf_rx);
+
+ switch (ipc->hw_type) {
+ case MIV_IHC:
+ return 0;
+ default:
+ goto fail_free_buf_msg_rx;
+ }
+
+ if (ret) {
+ dev_err(ipc->dev, "failed to register interrupt(s)\n");
+ goto fail_free_buf_msg_rx;
+ }
+
+ return ret;
+
+fail_free_buf_msg_rx:
+ kfree(chan_info->msg_buf_rx);
+fail_free_buf_msg_tx:
+ kfree(chan_info->msg_buf_tx);
+fail_free_buf_base_rx:
+ kfree(chan_info->buf_base_rx);
+fail_free_buf_base_tx:
+ kfree(chan_info->buf_base_tx);
+fail:
+ return ret;
+}
+
+static void mchp_ipc_shutdown(struct mbox_chan *chan)
+{
+ struct mchp_ipc_sbi_chan *chan_info = (struct mchp_ipc_sbi_chan *)chan->con_priv;
+
+ kfree(chan_info->buf_base_tx);
+ kfree(chan_info->buf_base_rx);
+ kfree(chan_info->msg_buf_tx);
+ kfree(chan_info->msg_buf_rx);
+}
+
+static const struct mbox_chan_ops mchp_ipc_ops = {
+ .startup = mchp_ipc_startup,
+ .send_data = mchp_ipc_send_data,
+ .shutdown = mchp_ipc_shutdown,
+};
+
+static struct mbox_chan *mchp_ipc_mbox_xlate(struct mbox_controller *controller,
+ const struct of_phandle_args *spec)
+{
+ struct mchp_ipc_sbi_mbox *ipc = to_mchp_ipc_mbox(controller);
+ unsigned int chan_id = spec->args[0];
+
+ if (chan_id >= ipc->controller.num_chans) {
+ dev_err(ipc->dev, "invalid channel id %d\n", chan_id);
+ return ERR_PTR(-EINVAL);
+ }
+
+ return &ipc->chans[chan_id];
+}
+
+static int mchp_ipc_get_cluster_aggr_irq(struct mchp_ipc_sbi_mbox *ipc)
+{
+ struct platform_device *pdev = to_platform_device(ipc->dev);
+ char *irq_name;
+ int cpuid, ret;
+ unsigned long hartid;
+ bool irq_found = false;
+
+ for_each_online_cpu(cpuid) {
+ hartid = cpuid_to_hartid_map(cpuid);
+ irq_name = devm_kasprintf(ipc->dev, GFP_KERNEL, "hart-%lu", hartid);
+ ret = platform_get_irq_byname_optional(pdev, irq_name);
+ if (ret <= 0)
+ continue;
+
+ ipc->cluster_cfg[hartid].irq = ret;
+ ret = devm_request_irq(ipc->dev, ipc->cluster_cfg[hartid].irq,
+ mchp_ipc_cluster_aggr_isr, IRQF_SHARED,
+ "miv-ihc-irq", ipc);
+ if (ret)
+ return ret;
+
+ ipc->cluster_cfg[hartid].buf_base = devm_kmalloc(ipc->dev,
+ sizeof(struct mchp_ipc_status),
+ GFP_KERNEL);
+
+ if (!ipc->cluster_cfg[hartid].buf_base)
+ return -ENOMEM;
+
+ ipc->cluster_cfg[hartid].buf_base_addr = __pa(ipc->cluster_cfg[hartid].buf_base);
+
+ irq_found = true;
+ }
+
+ return irq_found;
+}
+
+static int mchp_ipc_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct mchp_ipc_mbox_info ipc_info;
+ struct mchp_ipc_sbi_mbox *ipc;
+ struct mchp_ipc_sbi_chan *priv;
+ bool irq_avail = false;
+ int ret;
+ u32 chan_id;
+
+ ret = sbi_probe_extension(SBI_EXT_MICROCHIP_TECHNOLOGY);
+ if (ret <= 0)
+ return dev_err_probe(dev, ret, "Microchip SBI extension not detected\n");
+
+ ipc = devm_kzalloc(dev, sizeof(*ipc), GFP_KERNEL);
+ if (!ipc)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, ipc);
+
+ ipc->buf_base = devm_kmalloc(dev, sizeof(struct mchp_ipc_mbox_info), GFP_KERNEL);
+ if (!ipc->buf_base)
+ return -ENOMEM;
+
+ ipc->buf_base_addr = __pa(ipc->buf_base);
+
+ ret = mchp_ipc_sbi_send(SBI_EXT_IPC_PROBE, ipc->buf_base_addr);
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "could not probe IPC SBI service\n");
+
+ memcpy(&ipc_info, ipc->buf_base, sizeof(struct mchp_ipc_mbox_info));
+ ipc->controller.num_chans = ipc_info.num_channels;
+ ipc->hw_type = ipc_info.hw_type;
+
+ ipc->chans = devm_kcalloc(dev, ipc->controller.num_chans, sizeof(*ipc->chans), GFP_KERNEL);
+ if (!ipc->chans)
+ return -ENOMEM;
+
+ ipc->dev = dev;
+ ipc->controller.txdone_irq = true;
+ ipc->controller.dev = ipc->dev;
+ ipc->controller.ops = &mchp_ipc_ops;
+ ipc->controller.chans = ipc->chans;
+ ipc->controller.of_xlate = mchp_ipc_mbox_xlate;
+
+ for (chan_id = 0; chan_id < ipc->controller.num_chans; chan_id++) {
+ priv = devm_kmalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ ipc->chans[chan_id].con_priv = priv;
+ priv->id = chan_id;
+ }
+
+ if (ipc->hw_type == MIV_IHC) {
+ ipc->cluster_cfg = devm_kcalloc(dev, num_online_cpus(),
+ sizeof(struct mchp_ipc_cluster_cfg),
+ GFP_KERNEL);
+ if (!ipc->cluster_cfg)
+ return -ENOMEM;
+
+ if (mchp_ipc_get_cluster_aggr_irq(ipc))
+ irq_avail = true;
+ }
+
+ if (!irq_avail)
+ return dev_err_probe(dev, -ENODEV, "missing interrupt property\n");
+
+ ret = devm_mbox_controller_register(dev, &ipc->controller);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "Inter-Processor communication (IPC) registration failed\n");
+
+ return 0;
+}
+
+static const struct of_device_id mchp_ipc_of_match[] = {
+ {.compatible = "microchip,sbi-ipc", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, mchp_ipc_of_match);
+
+static struct platform_driver mchp_ipc_driver = {
+ .driver = {
+ .name = "microchip_ipc",
+ .of_match_table = mchp_ipc_of_match,
+ },
+ .probe = mchp_ipc_probe,
+};
+
+module_platform_driver(mchp_ipc_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Valentina Fernandez <valentina.fernandezalanis@microchip.com>");
+MODULE_DESCRIPTION("Microchip Inter-Processor Communication (IPC) driver");
diff --git a/drivers/mailbox/mailbox-mpfs.c b/drivers/mailbox/mailbox-mpfs.c
index 4df546e3b7ea..d5d9effece97 100644
--- a/drivers/mailbox/mailbox-mpfs.c
+++ b/drivers/mailbox/mailbox-mpfs.c
@@ -251,7 +251,7 @@ static inline int mpfs_mbox_syscon_probe(struct mpfs_mbox *mbox, struct platform
return PTR_ERR(mbox->sysreg_scb);
mbox->mbox_base = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(mbox->ctrl_base))
+ if (IS_ERR(mbox->mbox_base))
return PTR_ERR(mbox->mbox_base);
return 0;
diff --git a/drivers/mailbox/mailbox-test.c b/drivers/mailbox/mailbox-test.c
index c9dd8c42c0cd..3a28ab5c42e5 100644
--- a/drivers/mailbox/mailbox-test.c
+++ b/drivers/mailbox/mailbox-test.c
@@ -268,7 +268,7 @@ static int mbox_test_add_debugfs(struct platform_device *pdev,
return 0;
tdev->root_debugfs_dir = debugfs_create_dir(dev_name(&pdev->dev), NULL);
- if (!tdev->root_debugfs_dir) {
+ if (IS_ERR(tdev->root_debugfs_dir)) {
dev_err(&pdev->dev, "Failed to create Mailbox debugfs\n");
return -EINVAL;
}
diff --git a/drivers/mailbox/mailbox-th1520.c b/drivers/mailbox/mailbox-th1520.c
index 4e84640ac3b8..626957c2e435 100644
--- a/drivers/mailbox/mailbox-th1520.c
+++ b/drivers/mailbox/mailbox-th1520.c
@@ -41,7 +41,7 @@
#ifdef CONFIG_PM_SLEEP
/* store MBOX context across system-wide suspend/resume transitions */
struct th1520_mbox_context {
- u32 intr_mask[TH_1520_MBOX_CHANS - 1];
+ u32 intr_mask[TH_1520_MBOX_CHANS];
};
#endif
@@ -387,8 +387,10 @@ static void __iomem *th1520_map_mmio(struct platform_device *pdev,
mapped = devm_ioremap(&pdev->dev, res->start + offset,
resource_size(res) - offset);
- if (IS_ERR(mapped))
+ if (!mapped) {
dev_err(&pdev->dev, "Failed to map resource: %s\n", res_name);
+ return ERR_PTR(-ENOMEM);
+ }
return mapped;
}
@@ -433,10 +435,8 @@ static int th1520_mbox_probe(struct platform_device *pdev)
}
ret = devm_add_action_or_reset(dev, th1520_disable_clk, priv);
- if (ret) {
- clk_bulk_disable_unprepare(ARRAY_SIZE(priv->clocks), priv->clocks);
+ if (ret)
return ret;
- }
/*
* The address mappings in the device tree align precisely with those
diff --git a/drivers/mailbox/mailbox.c b/drivers/mailbox/mailbox.c
index d3d26a2c9895..2acc6ec229a4 100644
--- a/drivers/mailbox/mailbox.c
+++ b/drivers/mailbox/mailbox.c
@@ -6,18 +6,17 @@
* Author: Jassi Brar <jassisinghbrar@gmail.com>
*/
-#include <linux/interrupt.h>
-#include <linux/spinlock.h>
-#include <linux/mutex.h>
+#include <linux/cleanup.h>
#include <linux/delay.h>
-#include <linux/slab.h>
-#include <linux/err.h>
-#include <linux/module.h>
#include <linux/device.h>
-#include <linux/bitops.h>
+#include <linux/err.h>
#include <linux/mailbox_client.h>
#include <linux/mailbox_controller.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
#include <linux/of.h>
+#include <linux/property.h>
+#include <linux/spinlock.h>
#include "mailbox.h"
@@ -27,15 +26,12 @@ static DEFINE_MUTEX(con_mutex);
static int add_to_rbuf(struct mbox_chan *chan, void *mssg)
{
int idx;
- unsigned long flags;
- spin_lock_irqsave(&chan->lock, flags);
+ guard(spinlock_irqsave)(&chan->lock);
/* See if there is any space left */
- if (chan->msg_count == MBOX_TX_QUEUE_LEN) {
- spin_unlock_irqrestore(&chan->lock, flags);
+ if (chan->msg_count == MBOX_TX_QUEUE_LEN)
return -ENOBUFS;
- }
idx = chan->msg_free;
chan->msg_data[idx] = mssg;
@@ -46,60 +42,53 @@ static int add_to_rbuf(struct mbox_chan *chan, void *mssg)
else
chan->msg_free++;
- spin_unlock_irqrestore(&chan->lock, flags);
-
return idx;
}
static void msg_submit(struct mbox_chan *chan)
{
unsigned count, idx;
- unsigned long flags;
void *data;
int err = -EBUSY;
- spin_lock_irqsave(&chan->lock, flags);
+ scoped_guard(spinlock_irqsave, &chan->lock) {
+ if (!chan->msg_count || chan->active_req)
+ break;
- if (!chan->msg_count || chan->active_req)
- goto exit;
-
- count = chan->msg_count;
- idx = chan->msg_free;
- if (idx >= count)
- idx -= count;
- else
- idx += MBOX_TX_QUEUE_LEN - count;
+ count = chan->msg_count;
+ idx = chan->msg_free;
+ if (idx >= count)
+ idx -= count;
+ else
+ idx += MBOX_TX_QUEUE_LEN - count;
- data = chan->msg_data[idx];
+ data = chan->msg_data[idx];
- if (chan->cl->tx_prepare)
- chan->cl->tx_prepare(chan->cl, data);
- /* Try to submit a message to the MBOX controller */
- err = chan->mbox->ops->send_data(chan, data);
- if (!err) {
- chan->active_req = data;
- chan->msg_count--;
+ if (chan->cl->tx_prepare)
+ chan->cl->tx_prepare(chan->cl, data);
+ /* Try to submit a message to the MBOX controller */
+ err = chan->mbox->ops->send_data(chan, data);
+ if (!err) {
+ chan->active_req = data;
+ chan->msg_count--;
+ }
}
-exit:
- spin_unlock_irqrestore(&chan->lock, flags);
if (!err && (chan->txdone_method & TXDONE_BY_POLL)) {
/* kick start the timer immediately to avoid delays */
- spin_lock_irqsave(&chan->mbox->poll_hrt_lock, flags);
- hrtimer_start(&chan->mbox->poll_hrt, 0, HRTIMER_MODE_REL);
- spin_unlock_irqrestore(&chan->mbox->poll_hrt_lock, flags);
+ scoped_guard(spinlock_irqsave, &chan->mbox->poll_hrt_lock)
+ hrtimer_start(&chan->mbox->poll_hrt, 0, HRTIMER_MODE_REL);
}
}
static void tx_tick(struct mbox_chan *chan, int r)
{
- unsigned long flags;
void *mssg;
- spin_lock_irqsave(&chan->lock, flags);
- mssg = chan->active_req;
- chan->active_req = NULL;
- spin_unlock_irqrestore(&chan->lock, flags);
+ scoped_guard(spinlock_irqsave, &chan->lock) {
+ mssg = chan->active_req;
+ chan->active_req = NULL;
+ }
/* Submit next message */
msg_submit(chan);
@@ -121,7 +110,6 @@ static enum hrtimer_restart txdone_hrtimer(struct hrtimer *hrtimer)
container_of(hrtimer, struct mbox_controller, poll_hrt);
bool txdone, resched = false;
int i;
- unsigned long flags;
for (i = 0; i < mbox->num_chans; i++) {
struct mbox_chan *chan = &mbox->chans[i];
@@ -136,10 +124,10 @@ static enum hrtimer_restart txdone_hrtimer(struct hrtimer *hrtimer)
}
if (resched) {
- spin_lock_irqsave(&mbox->poll_hrt_lock, flags);
- if (!hrtimer_is_queued(hrtimer))
- hrtimer_forward_now(hrtimer, ms_to_ktime(mbox->txpoll_period));
- spin_unlock_irqrestore(&mbox->poll_hrt_lock, flags);
+ scoped_guard(spinlock_irqsave, &mbox->poll_hrt_lock) {
+ if (!hrtimer_is_queued(hrtimer))
+ hrtimer_forward_now(hrtimer, ms_to_ktime(mbox->txpoll_period));
+ }
return HRTIMER_RESTART;
}
@@ -321,25 +309,23 @@ EXPORT_SYMBOL_GPL(mbox_flush);
static int __mbox_bind_client(struct mbox_chan *chan, struct mbox_client *cl)
{
struct device *dev = cl->dev;
- unsigned long flags;
int ret;
if (chan->cl || !try_module_get(chan->mbox->dev->driver->owner)) {
- dev_dbg(dev, "%s: mailbox not free\n", __func__);
+ dev_err(dev, "%s: mailbox not free\n", __func__);
return -EBUSY;
}
- spin_lock_irqsave(&chan->lock, flags);
- chan->msg_free = 0;
- chan->msg_count = 0;
- chan->active_req = NULL;
- chan->cl = cl;
- init_completion(&chan->tx_complete);
-
- if (chan->txdone_method == TXDONE_BY_POLL && cl->knows_txdone)
- chan->txdone_method = TXDONE_BY_ACK;
+ scoped_guard(spinlock_irqsave, &chan->lock) {
+ chan->msg_free = 0;
+ chan->msg_count = 0;
+ chan->active_req = NULL;
+ chan->cl = cl;
+ init_completion(&chan->tx_complete);
- spin_unlock_irqrestore(&chan->lock, flags);
+ if (chan->txdone_method == TXDONE_BY_POLL && cl->knows_txdone)
+ chan->txdone_method = TXDONE_BY_ACK;
+ }
if (chan->mbox->ops->startup) {
ret = chan->mbox->ops->startup(chan);
@@ -373,13 +359,9 @@ static int __mbox_bind_client(struct mbox_chan *chan, struct mbox_client *cl)
*/
int mbox_bind_client(struct mbox_chan *chan, struct mbox_client *cl)
{
- int ret;
-
- mutex_lock(&con_mutex);
- ret = __mbox_bind_client(chan, cl);
- mutex_unlock(&con_mutex);
+ guard(mutex)(&con_mutex);
- return ret;
+ return __mbox_bind_client(chan, cl);
}
EXPORT_SYMBOL_GPL(mbox_bind_client);
@@ -402,46 +384,65 @@ EXPORT_SYMBOL_GPL(mbox_bind_client);
*/
struct mbox_chan *mbox_request_channel(struct mbox_client *cl, int index)
{
- struct device *dev = cl->dev;
+ struct fwnode_reference_args fwspec;
+ struct fwnode_handle *fwnode;
struct mbox_controller *mbox;
struct of_phandle_args spec;
struct mbox_chan *chan;
+ struct device *dev;
+ unsigned int i;
int ret;
- if (!dev || !dev->of_node) {
- pr_debug("%s: No owner device node\n", __func__);
+ dev = cl->dev;
+ if (!dev) {
+ pr_debug("No owner device\n");
return ERR_PTR(-ENODEV);
}
- mutex_lock(&con_mutex);
-
- if (of_parse_phandle_with_args(dev->of_node, "mboxes",
- "#mbox-cells", index, &spec)) {
- dev_dbg(dev, "%s: can't parse \"mboxes\" property\n", __func__);
- mutex_unlock(&con_mutex);
+ fwnode = dev_fwnode(dev);
+ if (!fwnode) {
+ dev_dbg(dev, "No owner fwnode\n");
return ERR_PTR(-ENODEV);
}
- chan = ERR_PTR(-EPROBE_DEFER);
- list_for_each_entry(mbox, &mbox_cons, node)
- if (mbox->dev->of_node == spec.np) {
- chan = mbox->of_xlate(mbox, &spec);
- if (!IS_ERR(chan))
- break;
+ ret = fwnode_property_get_reference_args(fwnode, "mboxes", "#mbox-cells",
+ 0, index, &fwspec);
+ if (ret) {
+ dev_err(dev, "%s: can't parse \"%s\" property\n", __func__, "mboxes");
+ return ERR_PTR(ret);
+ }
+
+ spec.np = to_of_node(fwspec.fwnode);
+ spec.args_count = fwspec.nargs;
+ for (i = 0; i < spec.args_count; i++)
+ spec.args[i] = fwspec.args[i];
+
+ scoped_guard(mutex, &con_mutex) {
+ chan = ERR_PTR(-EPROBE_DEFER);
+ list_for_each_entry(mbox, &mbox_cons, node) {
+ if (device_match_fwnode(mbox->dev, fwspec.fwnode)) {
+ if (mbox->fw_xlate) {
+ chan = mbox->fw_xlate(mbox, &fwspec);
+ if (!IS_ERR(chan))
+ break;
+ } else if (mbox->of_xlate) {
+ chan = mbox->of_xlate(mbox, &spec);
+ if (!IS_ERR(chan))
+ break;
+ }
+ }
}
- of_node_put(spec.np);
+ fwnode_handle_put(fwspec.fwnode);
- if (IS_ERR(chan)) {
- mutex_unlock(&con_mutex);
- return chan;
- }
+ if (IS_ERR(chan))
+ return chan;
- ret = __mbox_bind_client(chan, cl);
- if (ret)
- chan = ERR_PTR(ret);
+ ret = __mbox_bind_client(chan, cl);
+ if (ret)
+ chan = ERR_PTR(ret);
+ }
- mutex_unlock(&con_mutex);
return chan;
}
EXPORT_SYMBOL_GPL(mbox_request_channel);
@@ -449,19 +450,12 @@ EXPORT_SYMBOL_GPL(mbox_request_channel);
struct mbox_chan *mbox_request_channel_byname(struct mbox_client *cl,
const char *name)
{
- struct device_node *np = cl->dev->of_node;
- int index;
+ int index = device_property_match_string(cl->dev, "mbox-names", name);
- if (!np) {
- dev_err(cl->dev, "%s() currently only supports DT\n", __func__);
- return ERR_PTR(-EINVAL);
- }
-
- index = of_property_match_string(np, "mbox-names", name);
if (index < 0) {
dev_err(cl->dev, "%s() could not locate channel named \"%s\"\n",
__func__, name);
- return ERR_PTR(-EINVAL);
+ return ERR_PTR(index);
}
return mbox_request_channel(cl, index);
}
@@ -474,8 +468,6 @@ EXPORT_SYMBOL_GPL(mbox_request_channel_byname);
*/
void mbox_free_channel(struct mbox_chan *chan)
{
- unsigned long flags;
-
if (!chan || !chan->cl)
return;
@@ -483,20 +475,19 @@ void mbox_free_channel(struct mbox_chan *chan)
chan->mbox->ops->shutdown(chan);
/* The queued TX requests are simply aborted, no callbacks are made */
- spin_lock_irqsave(&chan->lock, flags);
- chan->cl = NULL;
- chan->active_req = NULL;
- if (chan->txdone_method == TXDONE_BY_ACK)
- chan->txdone_method = TXDONE_BY_POLL;
+ scoped_guard(spinlock_irqsave, &chan->lock) {
+ chan->cl = NULL;
+ chan->active_req = NULL;
+ if (chan->txdone_method == TXDONE_BY_ACK)
+ chan->txdone_method = TXDONE_BY_POLL;
+ }
module_put(chan->mbox->dev->driver->owner);
- spin_unlock_irqrestore(&chan->lock, flags);
}
EXPORT_SYMBOL_GPL(mbox_free_channel);
-static struct mbox_chan *
-of_mbox_index_xlate(struct mbox_controller *mbox,
- const struct of_phandle_args *sp)
+static struct mbox_chan *fw_mbox_index_xlate(struct mbox_controller *mbox,
+ const struct fwnode_reference_args *sp)
{
int ind = sp->args[0];
@@ -534,9 +525,7 @@ int mbox_controller_register(struct mbox_controller *mbox)
return -EINVAL;
}
- hrtimer_init(&mbox->poll_hrt, CLOCK_MONOTONIC,
- HRTIMER_MODE_REL);
- mbox->poll_hrt.function = txdone_hrtimer;
+ hrtimer_setup(&mbox->poll_hrt, txdone_hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
spin_lock_init(&mbox->poll_hrt_lock);
}
@@ -549,12 +538,11 @@ int mbox_controller_register(struct mbox_controller *mbox)
spin_lock_init(&chan->lock);
}
- if (!mbox->of_xlate)
- mbox->of_xlate = of_mbox_index_xlate;
+ if (!mbox->fw_xlate && !mbox->of_xlate)
+ mbox->fw_xlate = fw_mbox_index_xlate;
- mutex_lock(&con_mutex);
- list_add_tail(&mbox->node, &mbox_cons);
- mutex_unlock(&con_mutex);
+ scoped_guard(mutex, &con_mutex)
+ list_add_tail(&mbox->node, &mbox_cons);
return 0;
}
@@ -571,17 +559,15 @@ void mbox_controller_unregister(struct mbox_controller *mbox)
if (!mbox)
return;
- mutex_lock(&con_mutex);
+ scoped_guard(mutex, &con_mutex) {
+ list_del(&mbox->node);
- list_del(&mbox->node);
+ for (i = 0; i < mbox->num_chans; i++)
+ mbox_free_channel(&mbox->chans[i]);
- for (i = 0; i < mbox->num_chans; i++)
- mbox_free_channel(&mbox->chans[i]);
-
- if (mbox->txdone_poll)
- hrtimer_cancel(&mbox->poll_hrt);
-
- mutex_unlock(&con_mutex);
+ if (mbox->txdone_poll)
+ hrtimer_cancel(&mbox->poll_hrt);
+ }
}
EXPORT_SYMBOL_GPL(mbox_controller_unregister);
@@ -592,16 +578,6 @@ static void __devm_mbox_controller_unregister(struct device *dev, void *res)
mbox_controller_unregister(*mbox);
}
-static int devm_mbox_controller_match(struct device *dev, void *res, void *data)
-{
- struct mbox_controller **mbox = res;
-
- if (WARN_ON(!mbox || !*mbox))
- return 0;
-
- return *mbox == data;
-}
-
/**
* devm_mbox_controller_register() - managed mbox_controller_register()
* @dev: device owning the mailbox controller being registered
@@ -637,20 +613,3 @@ int devm_mbox_controller_register(struct device *dev,
return 0;
}
EXPORT_SYMBOL_GPL(devm_mbox_controller_register);
-
-/**
- * devm_mbox_controller_unregister() - managed mbox_controller_unregister()
- * @dev: device owning the mailbox controller being unregistered
- * @mbox: mailbox controller being unregistered
- *
- * This function unregisters the mailbox controller and removes the device-
- * managed resource that was set up to automatically unregister the mailbox
- * controller on driver probe failure or driver removal. It's typically not
- * necessary to call this function.
- */
-void devm_mbox_controller_unregister(struct device *dev, struct mbox_controller *mbox)
-{
- WARN_ON(devres_release(dev, __devm_mbox_controller_unregister,
- devm_mbox_controller_match, mbox));
-}
-EXPORT_SYMBOL_GPL(devm_mbox_controller_unregister);
diff --git a/drivers/mailbox/mailbox.h b/drivers/mailbox/mailbox.h
index 046d6d258b32..e1ec4efab693 100644
--- a/drivers/mailbox/mailbox.h
+++ b/drivers/mailbox/mailbox.h
@@ -3,6 +3,8 @@
#ifndef __MAILBOX_H
#define __MAILBOX_H
+#include <linux/bits.h>
+
#define TXDONE_BY_IRQ BIT(0) /* controller has remote RTR irq */
#define TXDONE_BY_POLL BIT(1) /* controller can read status of last TX */
#define TXDONE_BY_ACK BIT(2) /* S/W ACK received by Client ticks the TX */
diff --git a/drivers/mailbox/mtk-cmdq-mailbox.c b/drivers/mailbox/mtk-cmdq-mailbox.c
index d186865b8dce..5791f80f995a 100644
--- a/drivers/mailbox/mtk-cmdq-mailbox.c
+++ b/drivers/mailbox/mtk-cmdq-mailbox.c
@@ -92,16 +92,16 @@ struct gce_plat {
u32 gce_num;
};
-static void cmdq_sw_ddr_enable(struct cmdq *cmdq, bool enable)
+static inline u32 cmdq_convert_gce_addr(dma_addr_t addr, const struct gce_plat *pdata)
{
- WARN_ON(clk_bulk_enable(cmdq->pdata->gce_num, cmdq->clocks));
-
- if (enable)
- writel(GCE_DDR_EN | GCE_CTRL_BY_SW, cmdq->base + GCE_GCTL_VALUE);
- else
- writel(GCE_CTRL_BY_SW, cmdq->base + GCE_GCTL_VALUE);
+ /* Convert DMA addr (PA or IOVA) to GCE readable addr */
+ return addr >> pdata->shift;
+}
- clk_bulk_disable(cmdq->pdata->gce_num, cmdq->clocks);
+static inline dma_addr_t cmdq_revert_gce_addr(u32 addr, const struct gce_plat *pdata)
+{
+ /* Revert GCE readable addr to DMA addr (PA or IOVA) */
+ return (dma_addr_t)addr << pdata->shift;
}
u8 cmdq_get_shift_pa(struct mbox_chan *chan)
@@ -112,6 +112,19 @@ u8 cmdq_get_shift_pa(struct mbox_chan *chan)
}
EXPORT_SYMBOL(cmdq_get_shift_pa);
+static void cmdq_gctl_value_toggle(struct cmdq *cmdq, bool ddr_enable)
+{
+ u32 val = cmdq->pdata->control_by_sw ? GCE_CTRL_BY_SW : 0;
+
+ if (!cmdq->pdata->control_by_sw && !cmdq->pdata->sw_ddr_en)
+ return;
+
+ if (cmdq->pdata->sw_ddr_en && ddr_enable)
+ val |= GCE_DDR_EN;
+
+ writel(val, cmdq->base + GCE_GCTL_VALUE);
+}
+
static int cmdq_thread_suspend(struct cmdq *cmdq, struct cmdq_thread *thread)
{
u32 status;
@@ -140,16 +153,10 @@ static void cmdq_thread_resume(struct cmdq_thread *thread)
static void cmdq_init(struct cmdq *cmdq)
{
int i;
- u32 gctl_regval = 0;
WARN_ON(clk_bulk_enable(cmdq->pdata->gce_num, cmdq->clocks));
- if (cmdq->pdata->control_by_sw)
- gctl_regval = GCE_CTRL_BY_SW;
- if (cmdq->pdata->sw_ddr_en)
- gctl_regval |= GCE_DDR_EN;
- if (gctl_regval)
- writel(gctl_regval, cmdq->base + GCE_GCTL_VALUE);
+ cmdq_gctl_value_toggle(cmdq, true);
writel(CMDQ_THR_ACTIVE_SLOT_CYCLES, cmdq->base + CMDQ_THR_SLOT_CYCLES);
for (i = 0; i <= CMDQ_MAX_EVENT; i++)
@@ -193,13 +200,12 @@ static void cmdq_task_insert_into_thread(struct cmdq_task *task)
struct cmdq_task *prev_task = list_last_entry(
&thread->task_busy_list, typeof(*task), list_entry);
u64 *prev_task_base = prev_task->pkt->va_base;
+ u32 gce_addr = cmdq_convert_gce_addr(task->pa_base, task->cmdq->pdata);
/* let previous task jump to this task */
dma_sync_single_for_cpu(dev, prev_task->pa_base,
prev_task->pkt->cmd_buf_size, DMA_TO_DEVICE);
- prev_task_base[CMDQ_NUM_CMD(prev_task->pkt) - 1] =
- (u64)CMDQ_JUMP_BY_PA << 32 |
- (task->pa_base >> task->cmdq->pdata->shift);
+ prev_task_base[CMDQ_NUM_CMD(prev_task->pkt) - 1] = (u64)CMDQ_JUMP_BY_PA << 32 | gce_addr;
dma_sync_single_for_device(dev, prev_task->pa_base,
prev_task->pkt->cmd_buf_size, DMA_TO_DEVICE);
@@ -242,7 +248,8 @@ static void cmdq_thread_irq_handler(struct cmdq *cmdq,
struct cmdq_thread *thread)
{
struct cmdq_task *task, *tmp, *curr_task = NULL;
- u32 curr_pa, irq_flag, task_end_pa;
+ u32 irq_flag, gce_addr;
+ dma_addr_t curr_pa, task_end_pa;
bool err;
irq_flag = readl(thread->base + CMDQ_THR_IRQ_STATUS);
@@ -264,7 +271,8 @@ static void cmdq_thread_irq_handler(struct cmdq *cmdq,
else
return;
- curr_pa = readl(thread->base + CMDQ_THR_CURR_ADDR) << cmdq->pdata->shift;
+ gce_addr = readl(thread->base + CMDQ_THR_CURR_ADDR);
+ curr_pa = cmdq_revert_gce_addr(gce_addr, cmdq->pdata);
list_for_each_entry_safe(task, tmp, &thread->task_busy_list,
list_entry) {
@@ -315,14 +323,21 @@ static irqreturn_t cmdq_irq_handler(int irq, void *dev)
static int cmdq_runtime_resume(struct device *dev)
{
struct cmdq *cmdq = dev_get_drvdata(dev);
+ int ret;
+
+ ret = clk_bulk_enable(cmdq->pdata->gce_num, cmdq->clocks);
+ if (ret)
+ return ret;
- return clk_bulk_enable(cmdq->pdata->gce_num, cmdq->clocks);
+ cmdq_gctl_value_toggle(cmdq, true);
+ return 0;
}
static int cmdq_runtime_suspend(struct device *dev)
{
struct cmdq *cmdq = dev_get_drvdata(dev);
+ cmdq_gctl_value_toggle(cmdq, false);
clk_bulk_disable(cmdq->pdata->gce_num, cmdq->clocks);
return 0;
}
@@ -347,9 +362,6 @@ static int cmdq_suspend(struct device *dev)
if (task_running)
dev_warn(dev, "exist running task(s) in suspend\n");
- if (cmdq->pdata->sw_ddr_en)
- cmdq_sw_ddr_enable(cmdq, false);
-
return pm_runtime_force_suspend(dev);
}
@@ -360,9 +372,6 @@ static int cmdq_resume(struct device *dev)
WARN_ON(pm_runtime_force_resume(dev));
cmdq->suspended = false;
- if (cmdq->pdata->sw_ddr_en)
- cmdq_sw_ddr_enable(cmdq, true);
-
return 0;
}
@@ -370,9 +379,6 @@ static void cmdq_remove(struct platform_device *pdev)
{
struct cmdq *cmdq = platform_get_drvdata(pdev);
- if (cmdq->pdata->sw_ddr_en)
- cmdq_sw_ddr_enable(cmdq, false);
-
if (!IS_ENABLED(CONFIG_PM))
cmdq_runtime_suspend(&pdev->dev);
@@ -385,21 +391,15 @@ static int cmdq_mbox_send_data(struct mbox_chan *chan, void *data)
struct cmdq_thread *thread = (struct cmdq_thread *)chan->con_priv;
struct cmdq *cmdq = dev_get_drvdata(chan->mbox->dev);
struct cmdq_task *task;
- unsigned long curr_pa, end_pa;
- int ret;
+ u32 gce_addr;
+ dma_addr_t curr_pa, end_pa;
/* Client should not flush new tasks if suspended. */
WARN_ON(cmdq->suspended);
- ret = pm_runtime_get_sync(cmdq->mbox.dev);
- if (ret < 0)
- return ret;
-
task = kzalloc(sizeof(*task), GFP_ATOMIC);
- if (!task) {
- __pm_runtime_put_autosuspend(cmdq->mbox.dev);
+ if (!task)
return -ENOMEM;
- }
task->cmdq = cmdq;
INIT_LIST_HEAD(&task->list_entry);
@@ -416,20 +416,20 @@ static int cmdq_mbox_send_data(struct mbox_chan *chan, void *data)
*/
WARN_ON(cmdq_thread_reset(cmdq, thread) < 0);
- writel(task->pa_base >> cmdq->pdata->shift,
- thread->base + CMDQ_THR_CURR_ADDR);
- writel((task->pa_base + pkt->cmd_buf_size) >> cmdq->pdata->shift,
- thread->base + CMDQ_THR_END_ADDR);
+ gce_addr = cmdq_convert_gce_addr(task->pa_base, cmdq->pdata);
+ writel(gce_addr, thread->base + CMDQ_THR_CURR_ADDR);
+ gce_addr = cmdq_convert_gce_addr(task->pa_base + pkt->cmd_buf_size, cmdq->pdata);
+ writel(gce_addr, thread->base + CMDQ_THR_END_ADDR);
writel(thread->priority, thread->base + CMDQ_THR_PRIORITY);
writel(CMDQ_THR_IRQ_EN, thread->base + CMDQ_THR_IRQ_ENABLE);
writel(CMDQ_THR_ENABLED, thread->base + CMDQ_THR_ENABLE_TASK);
} else {
WARN_ON(cmdq_thread_suspend(cmdq, thread) < 0);
- curr_pa = readl(thread->base + CMDQ_THR_CURR_ADDR) <<
- cmdq->pdata->shift;
- end_pa = readl(thread->base + CMDQ_THR_END_ADDR) <<
- cmdq->pdata->shift;
+ gce_addr = readl(thread->base + CMDQ_THR_CURR_ADDR);
+ curr_pa = cmdq_revert_gce_addr(gce_addr, cmdq->pdata);
+ gce_addr = readl(thread->base + CMDQ_THR_END_ADDR);
+ end_pa = cmdq_revert_gce_addr(gce_addr, cmdq->pdata);
/* check boundary */
if (curr_pa == end_pa - CMDQ_INST_SIZE ||
curr_pa == end_pa) {
@@ -446,9 +446,6 @@ static int cmdq_mbox_send_data(struct mbox_chan *chan, void *data)
}
list_move_tail(&task->list_entry, &thread->task_busy_list);
- pm_runtime_mark_last_busy(cmdq->mbox.dev);
- __pm_runtime_put_autosuspend(cmdq->mbox.dev);
-
return 0;
}
@@ -495,7 +492,7 @@ done:
spin_unlock_irqrestore(&thread->chan->lock, flags);
pm_runtime_mark_last_busy(cmdq->mbox.dev);
- __pm_runtime_put_autosuspend(cmdq->mbox.dev);
+ pm_runtime_put_autosuspend(cmdq->mbox.dev);
}
static int cmdq_mbox_flush(struct mbox_chan *chan, unsigned long timeout)
@@ -535,7 +532,7 @@ static int cmdq_mbox_flush(struct mbox_chan *chan, unsigned long timeout)
out:
spin_unlock_irqrestore(&thread->chan->lock, flags);
pm_runtime_mark_last_busy(cmdq->mbox.dev);
- __pm_runtime_put_autosuspend(cmdq->mbox.dev);
+ pm_runtime_put_autosuspend(cmdq->mbox.dev);
return 0;
@@ -550,7 +547,7 @@ wait:
return -EFAULT;
}
pm_runtime_mark_last_busy(cmdq->mbox.dev);
- __pm_runtime_put_autosuspend(cmdq->mbox.dev);
+ pm_runtime_put_autosuspend(cmdq->mbox.dev);
return 0;
}
@@ -663,6 +660,9 @@ static int cmdq_probe(struct platform_device *pdev)
if (err)
return err;
+ dma_set_coherent_mask(dev,
+ DMA_BIT_MASK(sizeof(u32) * BITS_PER_BYTE + cmdq->pdata->shift));
+
cmdq->mbox.dev = dev;
cmdq->mbox.chans = devm_kcalloc(dev, cmdq->pdata->thread_nr,
sizeof(*cmdq->mbox.chans), GFP_KERNEL);
diff --git a/drivers/mailbox/mtk-gpueb-mailbox.c b/drivers/mailbox/mtk-gpueb-mailbox.c
new file mode 100644
index 000000000000..f6d2beccd91b
--- /dev/null
+++ b/drivers/mailbox/mtk-gpueb-mailbox.c
@@ -0,0 +1,319 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * MediaTek GPUEB mailbox driver for SoCs such as the MT8196
+ *
+ * Copyright (C) 2025, Collabora Ltd.
+ *
+ * Developers harmed in the making of this driver:
+ * - Nicolas Frattaroli <nicolas.frattaroli@collabora.com>
+ */
+
+#include <linux/atomic.h>
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/mailbox_controller.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+
+#define GPUEB_MBOX_CTL_TX_STS 0x00
+#define GPUEB_MBOX_CTL_IRQ_SET 0x04
+#define GPUEB_MBOX_CTL_IRQ_CLR 0x74
+#define GPUEB_MBOX_CTL_RX_STS 0x78
+
+#define GPUEB_MBOX_FULL BIT(0) /* i.e. we've received data */
+#define GPUEB_MBOX_BLOCKED BIT(1) /* i.e. the channel is shutdown */
+
+#define GPUEB_MBOX_MAX_RX_SIZE 32 /* in bytes */
+
+struct mtk_gpueb_mbox {
+ struct device *dev;
+ struct clk *clk;
+ void __iomem *mbox_mmio;
+ void __iomem *mbox_ctl;
+ struct mbox_controller mbox;
+ struct mtk_gpueb_mbox_chan *ch;
+ int irq;
+ const struct mtk_gpueb_mbox_variant *v;
+};
+
+/**
+ * struct mtk_gpueb_mbox_chan - per-channel runtime data
+ * @ebm: pointer to the parent &struct mtk_gpueb_mbox mailbox
+ * @full_name: descriptive name of channel for IRQ subsystem
+ * @num: channel number, starting at 0
+ * @rx_status: signifies whether channel reception is turned off, or full
+ * @c: pointer to the constant &struct mtk_gpueb_mbox_chan_desc channel data
+ */
+struct mtk_gpueb_mbox_chan {
+ struct mtk_gpueb_mbox *ebm;
+ char *full_name;
+ u8 num;
+ atomic_t rx_status;
+ const struct mtk_gpueb_mbox_chan_desc *c;
+};
+
+/**
+ * struct mtk_gpueb_mbox_chan_desc - per-channel constant data
+ * @name: name of this channel
+ * @num: index of this channel, starting at 0
+ * @tx_offset: byte offset measured from mmio base for outgoing data
+ * @tx_len: size, in bytes, of the outgoing data on this channel
+ * @rx_offset: bytes offset measured from mmio base for incoming data
+ * @rx_len: size, in bytes, of the incoming data on this channel
+ */
+struct mtk_gpueb_mbox_chan_desc {
+ const char *name;
+ const u8 num;
+ const u16 tx_offset;
+ const u8 tx_len;
+ const u16 rx_offset;
+ const u8 rx_len;
+};
+
+struct mtk_gpueb_mbox_variant {
+ const u8 num_channels;
+ const struct mtk_gpueb_mbox_chan_desc channels[] __counted_by(num_channels);
+};
+
+/**
+ * mtk_gpueb_mbox_read_rx - read RX buffer from MMIO into channel's RX buffer
+ * @buf: buffer to read into
+ * @chan: pointer to the channel to read
+ */
+static void mtk_gpueb_mbox_read_rx(void *buf, struct mtk_gpueb_mbox_chan *chan)
+{
+ memcpy_fromio(buf, chan->ebm->mbox_mmio + chan->c->rx_offset, chan->c->rx_len);
+}
+
+static irqreturn_t mtk_gpueb_mbox_isr(int irq, void *data)
+{
+ struct mtk_gpueb_mbox_chan *ch = data;
+ u32 rx_sts;
+
+ rx_sts = readl(ch->ebm->mbox_ctl + GPUEB_MBOX_CTL_RX_STS);
+
+ if (rx_sts & BIT(ch->num)) {
+ if (!atomic_cmpxchg(&ch->rx_status, 0, GPUEB_MBOX_FULL | GPUEB_MBOX_BLOCKED))
+ return IRQ_WAKE_THREAD;
+ }
+
+ return IRQ_NONE;
+}
+
+static irqreturn_t mtk_gpueb_mbox_thread(int irq, void *data)
+{
+ struct mtk_gpueb_mbox_chan *ch = data;
+ int status;
+
+ status = atomic_cmpxchg(&ch->rx_status, GPUEB_MBOX_FULL | GPUEB_MBOX_BLOCKED,
+ GPUEB_MBOX_FULL);
+ if (status == (GPUEB_MBOX_FULL | GPUEB_MBOX_BLOCKED)) {
+ u8 buf[GPUEB_MBOX_MAX_RX_SIZE] = {};
+
+ mtk_gpueb_mbox_read_rx(buf, ch);
+ writel(BIT(ch->num), ch->ebm->mbox_ctl + GPUEB_MBOX_CTL_IRQ_CLR);
+ mbox_chan_received_data(&ch->ebm->mbox.chans[ch->num], buf);
+ atomic_set(&ch->rx_status, 0);
+ return IRQ_HANDLED;
+ }
+
+ return IRQ_NONE;
+}
+
+static int mtk_gpueb_mbox_send_data(struct mbox_chan *chan, void *data)
+{
+ struct mtk_gpueb_mbox_chan *ch = chan->con_priv;
+ u32 *values = data;
+ int i;
+
+ if (atomic_read(&ch->rx_status))
+ return -EBUSY;
+
+ /*
+ * We don't want any fancy nonsense, just write the 32-bit values in
+ * order. memcpy_toio/__iowrite32_copy don't work here, as they may use
+ * writes of different sizes or memory ordering characteristics depending
+ * on the architecture, alignment and the current phase of the moon.
+ */
+ for (i = 0; i < ch->c->tx_len; i += 4)
+ writel(values[i / 4], ch->ebm->mbox_mmio + ch->c->tx_offset + i);
+
+ writel(BIT(ch->num), ch->ebm->mbox_ctl + GPUEB_MBOX_CTL_IRQ_SET);
+
+ return 0;
+}
+
+static int mtk_gpueb_mbox_startup(struct mbox_chan *chan)
+{
+ struct mtk_gpueb_mbox_chan *ch = chan->con_priv;
+ int ret;
+
+ atomic_set(&ch->rx_status, 0);
+
+ ret = clk_enable(ch->ebm->clk);
+ if (ret) {
+ dev_err(ch->ebm->dev, "Failed to enable EB clock: %pe\n",
+ ERR_PTR(ret));
+ goto err_block;
+ }
+
+ writel(BIT(ch->num), ch->ebm->mbox_ctl + GPUEB_MBOX_CTL_IRQ_CLR);
+
+ ret = devm_request_threaded_irq(ch->ebm->dev, ch->ebm->irq, mtk_gpueb_mbox_isr,
+ mtk_gpueb_mbox_thread, IRQF_SHARED | IRQF_ONESHOT,
+ ch->full_name, ch);
+ if (ret) {
+ dev_err(ch->ebm->dev, "Failed to request IRQ: %pe\n",
+ ERR_PTR(ret));
+ goto err_unclk;
+ }
+
+ return 0;
+
+err_unclk:
+ clk_disable(ch->ebm->clk);
+err_block:
+ atomic_set(&ch->rx_status, GPUEB_MBOX_BLOCKED);
+
+ return ret;
+}
+
+static void mtk_gpueb_mbox_shutdown(struct mbox_chan *chan)
+{
+ struct mtk_gpueb_mbox_chan *ch = chan->con_priv;
+
+ atomic_set(&ch->rx_status, GPUEB_MBOX_BLOCKED);
+
+ devm_free_irq(ch->ebm->dev, ch->ebm->irq, ch);
+
+ clk_disable(ch->ebm->clk);
+}
+
+static bool mtk_gpueb_mbox_last_tx_done(struct mbox_chan *chan)
+{
+ struct mtk_gpueb_mbox_chan *ch = chan->con_priv;
+
+ return !(readl(ch->ebm->mbox_ctl + GPUEB_MBOX_CTL_TX_STS) & BIT(ch->num));
+}
+
+static const struct mbox_chan_ops mtk_gpueb_mbox_ops = {
+ .send_data = mtk_gpueb_mbox_send_data,
+ .startup = mtk_gpueb_mbox_startup,
+ .shutdown = mtk_gpueb_mbox_shutdown,
+ .last_tx_done = mtk_gpueb_mbox_last_tx_done,
+};
+
+static int mtk_gpueb_mbox_probe(struct platform_device *pdev)
+{
+ struct mtk_gpueb_mbox_chan *ch;
+ struct mtk_gpueb_mbox *ebm;
+ unsigned int i;
+
+ ebm = devm_kzalloc(&pdev->dev, sizeof(*ebm), GFP_KERNEL);
+ if (!ebm)
+ return -ENOMEM;
+
+ ebm->dev = &pdev->dev;
+ ebm->v = of_device_get_match_data(ebm->dev);
+
+ ebm->irq = platform_get_irq(pdev, 0);
+ if (ebm->irq < 0)
+ return ebm->irq;
+
+ ebm->clk = devm_clk_get_prepared(ebm->dev, NULL);
+ if (IS_ERR(ebm->clk))
+ return dev_err_probe(ebm->dev, PTR_ERR(ebm->clk),
+ "Failed to get 'eb' clock\n");
+
+ ebm->mbox_mmio = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(ebm->mbox_mmio))
+ return dev_err_probe(ebm->dev, PTR_ERR(ebm->mbox_mmio),
+ "Couldn't map mailbox data registers\n");
+
+ ebm->mbox_ctl = devm_platform_ioremap_resource(pdev, 1);
+ if (IS_ERR(ebm->mbox_ctl))
+ return dev_err_probe(
+ ebm->dev, PTR_ERR(ebm->mbox_ctl),
+ "Couldn't map mailbox control registers\n");
+
+ ebm->ch = devm_kmalloc_array(ebm->dev, ebm->v->num_channels,
+ sizeof(*ebm->ch), GFP_KERNEL);
+ if (!ebm->ch)
+ return -ENOMEM;
+
+ ebm->mbox.chans = devm_kcalloc(ebm->dev, ebm->v->num_channels,
+ sizeof(struct mbox_chan), GFP_KERNEL);
+ if (!ebm->mbox.chans)
+ return -ENOMEM;
+
+ for (i = 0; i < ebm->v->num_channels; i++) {
+ ch = &ebm->ch[i];
+ ch->c = &ebm->v->channels[i];
+ if (ch->c->rx_len > GPUEB_MBOX_MAX_RX_SIZE) {
+ dev_err(ebm->dev, "Channel %s RX size (%d) too large\n",
+ ch->c->name, ch->c->rx_len);
+ return -EINVAL;
+ }
+ ch->full_name = devm_kasprintf(ebm->dev, GFP_KERNEL, "%s:%s",
+ dev_name(ebm->dev), ch->c->name);
+ if (!ch->full_name)
+ return -ENOMEM;
+
+ ch->ebm = ebm;
+ ch->num = i;
+ spin_lock_init(&ebm->mbox.chans[i].lock);
+ ebm->mbox.chans[i].con_priv = ch;
+ atomic_set(&ch->rx_status, GPUEB_MBOX_BLOCKED);
+ }
+
+ ebm->mbox.dev = ebm->dev;
+ ebm->mbox.num_chans = ebm->v->num_channels;
+ ebm->mbox.txdone_poll = true;
+ ebm->mbox.txpoll_period = 0; /* minimum hrtimer interval */
+ ebm->mbox.ops = &mtk_gpueb_mbox_ops;
+
+ dev_set_drvdata(ebm->dev, ebm);
+
+ return devm_mbox_controller_register(ebm->dev, &ebm->mbox);
+}
+
+static const struct mtk_gpueb_mbox_variant mtk_gpueb_mbox_mt8196 = {
+ .num_channels = 12,
+ .channels = {
+ { "fast-dvfs-event", 0, 0x0000, 16, 0x00e0, 16 },
+ { "gpufreq", 1, 0x0010, 32, 0x00f0, 32 },
+ { "sleep", 2, 0x0030, 12, 0x0110, 4 },
+ { "timer", 3, 0x003c, 24, 0x0114, 4 },
+ { "fhctl", 4, 0x0054, 36, 0x0118, 4 },
+ { "ccf", 5, 0x0078, 16, 0x011c, 16 },
+ { "gpumpu", 6, 0x0088, 24, 0x012c, 4 },
+ { "fast-dvfs", 7, 0x00a0, 24, 0x0130, 24 },
+ { "ipir-c-met", 8, 0x00b8, 4, 0x0148, 16 },
+ { "ipis-c-met", 9, 0x00bc, 16, 0x0158, 4 },
+ { "brisket", 10, 0x00cc, 16, 0x015c, 16 },
+ { "ppb", 11, 0x00dc, 4, 0x016c, 4 },
+ },
+};
+
+static const struct of_device_id mtk_gpueb_mbox_of_ids[] = {
+ { .compatible = "mediatek,mt8196-gpueb-mbox", .data = &mtk_gpueb_mbox_mt8196 },
+ { /* Sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, mtk_gpueb_mbox_of_ids);
+
+static struct platform_driver mtk_gpueb_mbox_drv = {
+ .probe = mtk_gpueb_mbox_probe,
+ .driver = {
+ .name = "mtk-gpueb-mbox",
+ .of_match_table = mtk_gpueb_mbox_of_ids,
+ }
+};
+module_platform_driver(mtk_gpueb_mbox_drv);
+
+MODULE_AUTHOR("Nicolas Frattaroli <nicolas.frattaroli@collabora.com>");
+MODULE_DESCRIPTION("MediaTek GPUEB mailbox driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mailbox/omap-mailbox.c b/drivers/mailbox/omap-mailbox.c
index 6797770474a5..17fe6545875d 100644
--- a/drivers/mailbox/omap-mailbox.c
+++ b/drivers/mailbox/omap-mailbox.c
@@ -15,6 +15,7 @@
#include <linux/slab.h>
#include <linux/kfifo.h>
#include <linux/err.h>
+#include <linux/io.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
@@ -67,6 +68,7 @@ struct omap_mbox_fifo {
struct omap_mbox_match_data {
u32 intr_type;
+ bool is_exclusive;
};
struct omap_mbox_device {
@@ -77,6 +79,7 @@ struct omap_mbox_device {
u32 num_users;
u32 num_fifos;
u32 intr_type;
+ const struct omap_mbox_match_data *mbox_data;
};
struct omap_mbox {
@@ -340,11 +343,13 @@ static int omap_mbox_suspend(struct device *dev)
if (pm_runtime_status_suspended(dev))
return 0;
- for (fifo = 0; fifo < mdev->num_fifos; fifo++) {
- if (mbox_read_reg(mdev, MAILBOX_MSGSTATUS(fifo))) {
- dev_err(mdev->dev, "fifo %d has unexpected unread messages\n",
- fifo);
- return -EBUSY;
+ if (mdev->mbox_data->is_exclusive) {
+ for (fifo = 0; fifo < mdev->num_fifos; fifo++) {
+ if (mbox_read_reg(mdev, MAILBOX_MSGSTATUS(fifo))) {
+ dev_err(mdev->dev, "fifo %d has unexpected unread messages\n",
+ fifo);
+ return -EBUSY;
+ }
}
}
@@ -377,8 +382,9 @@ static const struct dev_pm_ops omap_mbox_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(omap_mbox_suspend, omap_mbox_resume)
};
-static const struct omap_mbox_match_data omap2_data = { MBOX_INTR_CFG_TYPE1 };
-static const struct omap_mbox_match_data omap4_data = { MBOX_INTR_CFG_TYPE2 };
+static const struct omap_mbox_match_data omap2_data = { MBOX_INTR_CFG_TYPE1, true };
+static const struct omap_mbox_match_data omap4_data = { MBOX_INTR_CFG_TYPE2, true };
+static const struct omap_mbox_match_data am654_data = { MBOX_INTR_CFG_TYPE2, false };
static const struct of_device_id omap_mailbox_of_match[] = {
{
@@ -395,11 +401,11 @@ static const struct of_device_id omap_mailbox_of_match[] = {
},
{
.compatible = "ti,am654-mailbox",
- .data = &omap4_data,
+ .data = &am654_data,
},
{
.compatible = "ti,am64-mailbox",
- .data = &omap4_data,
+ .data = &am654_data,
},
{
/* end */
@@ -448,7 +454,6 @@ static int omap_mbox_probe(struct platform_device *pdev)
struct omap_mbox_fifo *fifo;
struct device_node *node = pdev->dev.of_node;
struct device_node *child;
- const struct omap_mbox_match_data *match_data;
struct mbox_controller *controller;
u32 intr_type, info_count;
u32 num_users, num_fifos;
@@ -461,11 +466,6 @@ static int omap_mbox_probe(struct platform_device *pdev)
return -ENODEV;
}
- match_data = of_device_get_match_data(&pdev->dev);
- if (!match_data)
- return -ENODEV;
- intr_type = match_data->intr_type;
-
if (of_property_read_u32(node, "ti,mbox-num-users", &num_users))
return -ENODEV;
@@ -482,6 +482,12 @@ static int omap_mbox_probe(struct platform_device *pdev)
if (!mdev)
return -ENOMEM;
+ mdev->mbox_data = device_get_match_data(&pdev->dev);
+ if (!mdev->mbox_data)
+ return -ENODEV;
+
+ intr_type = mdev->mbox_data->intr_type;
+
mdev->mbox_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(mdev->mbox_base))
return PTR_ERR(mdev->mbox_base);
diff --git a/drivers/mailbox/pcc.c b/drivers/mailbox/pcc.c
index 82102a4c5d68..ff292b9e0be9 100644
--- a/drivers/mailbox/pcc.c
+++ b/drivers/mailbox/pcc.c
@@ -117,8 +117,6 @@ struct pcc_chan_info {
static struct pcc_chan_info *chan_info;
static int pcc_chan_count;
-static int pcc_send_data(struct mbox_chan *chan, void *data);
-
/*
* PCC can be used with perf critical drivers such as CPPC
* So it makes sense to locally cache the virtual address and
@@ -245,13 +243,13 @@ static bool pcc_mbox_cmd_complete_check(struct pcc_chan_info *pchan)
u64 val;
int ret;
+ if (!pchan->cmd_complete.gas)
+ return true;
+
ret = pcc_chan_reg_read(&pchan->cmd_complete, &val);
if (ret)
return false;
- if (!pchan->cmd_complete.gas)
- return true;
-
/*
* Judge if the channel respond the interrupt based on the value of
* command complete.
@@ -269,33 +267,58 @@ static bool pcc_mbox_cmd_complete_check(struct pcc_chan_info *pchan)
return !!val;
}
-static void check_and_ack(struct pcc_chan_info *pchan, struct mbox_chan *chan)
+static int pcc_mbox_error_check_and_clear(struct pcc_chan_info *pchan)
+{
+ u64 val;
+ int ret;
+
+ ret = pcc_chan_reg_read(&pchan->error, &val);
+ if (ret)
+ return ret;
+
+ if (val & pchan->error.status_mask) {
+ val &= pchan->error.preserve_mask;
+ pcc_chan_reg_write(&pchan->error, val);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static void pcc_chan_acknowledge(struct pcc_chan_info *pchan)
{
- struct acpi_pcct_ext_pcc_shared_memory pcc_hdr;
+ struct acpi_pcct_ext_pcc_shared_memory __iomem *pcc_hdr;
if (pchan->type != ACPI_PCCT_TYPE_EXT_PCC_SLAVE_SUBSPACE)
return;
- /* If the memory region has not been mapped, we cannot
- * determine if we need to send the message, but we still
- * need to set the cmd_update flag before returning.
- */
- if (pchan->chan.shmem == NULL) {
- pcc_chan_reg_read_modify_write(&pchan->cmd_update);
- return;
- }
- memcpy_fromio(&pcc_hdr, pchan->chan.shmem,
- sizeof(struct acpi_pcct_ext_pcc_shared_memory));
+
+ pcc_chan_reg_read_modify_write(&pchan->cmd_update);
+
+ pcc_hdr = pchan->chan.shmem;
+
/*
- * The PCC slave subspace channel needs to set the command complete bit
- * after processing message. If the PCC_ACK_FLAG is set, it should also
- * ring the doorbell.
- *
- * The PCC master subspace channel clears chan_in_use to free channel.
+ * The PCC slave subspace channel needs to set the command
+ * complete bit after processing message. If the PCC_ACK_FLAG
+ * is set, it should also ring the doorbell.
*/
- if (le32_to_cpup(&pcc_hdr.flags) & PCC_ACK_FLAG_MASK)
- pcc_send_data(chan, NULL);
- else
- pcc_chan_reg_read_modify_write(&pchan->cmd_update);
+ if (ioread32(&pcc_hdr->flags) & PCC_CMD_COMPLETION_NOTIFY)
+ pcc_chan_reg_read_modify_write(&pchan->db);
+}
+
+static void *write_response(struct pcc_chan_info *pchan)
+{
+ struct pcc_header pcc_header;
+ void *buffer;
+ int data_len;
+
+ memcpy_fromio(&pcc_header, pchan->chan.shmem,
+ sizeof(pcc_header));
+ data_len = pcc_header.length - sizeof(u32) + sizeof(struct pcc_header);
+
+ buffer = pchan->chan.rx_alloc(pchan->chan.mchan->cl, data_len);
+ if (buffer != NULL)
+ memcpy_fromio(buffer, pchan->chan.shmem, data_len);
+ return buffer;
}
/**
@@ -309,10 +332,14 @@ static irqreturn_t pcc_mbox_irq(int irq, void *p)
{
struct pcc_chan_info *pchan;
struct mbox_chan *chan = p;
- u64 val;
- int ret;
+ struct pcc_header *pcc_header = chan->active_req;
+ void *handle = NULL;
pchan = chan->con_priv;
+
+ if (pcc_chan_reg_read_modify_write(&pchan->plat_irq_ack))
+ return IRQ_NONE;
+
if (pchan->type == ACPI_PCCT_TYPE_EXT_PCC_MASTER_SUBSPACE &&
!pchan->chan_in_use)
return IRQ_NONE;
@@ -320,23 +347,29 @@ static irqreturn_t pcc_mbox_irq(int irq, void *p)
if (!pcc_mbox_cmd_complete_check(pchan))
return IRQ_NONE;
- ret = pcc_chan_reg_read(&pchan->error, &val);
- if (ret)
+ if (pcc_mbox_error_check_and_clear(pchan))
return IRQ_NONE;
- val &= pchan->error.status_mask;
- if (val) {
- val &= ~pchan->error.status_mask;
- pcc_chan_reg_write(&pchan->error, val);
- return IRQ_NONE;
- }
- if (pcc_chan_reg_read_modify_write(&pchan->plat_irq_ack))
- return IRQ_NONE;
+ /*
+ * Clear this flag after updating interrupt ack register and just
+ * before mbox_chan_received_data() which might call pcc_send_data()
+ * where the flag is set again to start new transfer. This is
+ * required to avoid any possible race in updatation of this flag.
+ */
+ pchan->chan_in_use = false;
- mbox_chan_received_data(chan, NULL);
+ if (pchan->chan.rx_alloc)
+ handle = write_response(pchan);
- check_and_ack(pchan, chan);
- pchan->chan_in_use = false;
+ if (chan->active_req) {
+ pcc_header = chan->active_req;
+ if (pcc_header->flags & PCC_CMD_COMPLETION_NOTIFY)
+ mbox_chan_txdone(chan, 0);
+ }
+
+ mbox_chan_received_data(chan, handle);
+
+ pcc_chan_acknowledge(pchan);
return IRQ_HANDLED;
}
@@ -356,6 +389,7 @@ static irqreturn_t pcc_mbox_irq(int irq, void *p)
struct pcc_mbox_chan *
pcc_mbox_request_channel(struct mbox_client *cl, int subspace_id)
{
+ struct pcc_mbox_chan *pcc_mchan;
struct pcc_chan_info *pchan;
struct mbox_chan *chan;
int rc;
@@ -374,7 +408,29 @@ pcc_mbox_request_channel(struct mbox_client *cl, int subspace_id)
if (rc)
return ERR_PTR(rc);
- return &pchan->chan;
+ pcc_mchan = &pchan->chan;
+ pcc_mchan->shmem = acpi_os_ioremap(pcc_mchan->shmem_base_addr,
+ pcc_mchan->shmem_size);
+ if (!pcc_mchan->shmem)
+ goto err;
+
+ pcc_mchan->manage_writes = false;
+
+ /* This indicates that the channel is ready to accept messages.
+ * This needs to happen after the channel has registered
+ * its callback. There is no access point to do that in
+ * the mailbox API. That implies that the mailbox client must
+ * have set the allocate callback function prior to
+ * sending any messages.
+ */
+ if (pchan->type == ACPI_PCCT_TYPE_EXT_PCC_SLAVE_SUBSPACE)
+ pcc_chan_reg_read_modify_write(&pchan->cmd_update);
+
+ return pcc_mchan;
+
+err:
+ mbox_free_channel(chan);
+ return ERR_PTR(-ENXIO);
}
EXPORT_SYMBOL_GPL(pcc_mbox_request_channel);
@@ -403,23 +459,38 @@ void pcc_mbox_free_channel(struct pcc_mbox_chan *pchan)
}
EXPORT_SYMBOL_GPL(pcc_mbox_free_channel);
-int pcc_mbox_ioremap(struct mbox_chan *chan)
+static int pcc_write_to_buffer(struct mbox_chan *chan, void *data)
{
- struct pcc_chan_info *pchan_info;
- struct pcc_mbox_chan *pcc_mbox_chan;
+ struct pcc_chan_info *pchan = chan->con_priv;
+ struct pcc_mbox_chan *pcc_mbox_chan = &pchan->chan;
+ struct pcc_header *pcc_header = data;
- if (!chan || !chan->cl)
+ if (!pchan->chan.manage_writes)
+ return 0;
+
+ /* The PCC header length includes the command field
+ * but not the other values from the header.
+ */
+ int len = pcc_header->length - sizeof(u32) + sizeof(struct pcc_header);
+ u64 val;
+
+ pcc_chan_reg_read(&pchan->cmd_complete, &val);
+ if (!val) {
+ pr_info("%s pchan->cmd_complete not set", __func__);
return -1;
- pchan_info = chan->con_priv;
- pcc_mbox_chan = &pchan_info->chan;
- pcc_mbox_chan->shmem = ioremap(pcc_mbox_chan->shmem_base_addr,
- pcc_mbox_chan->shmem_size);
+ }
+ memcpy_toio(pcc_mbox_chan->shmem, data, len);
return 0;
}
-EXPORT_SYMBOL_GPL(pcc_mbox_ioremap);
+
/**
- * pcc_send_data - Called from Mailbox Controller code. Used
+ * pcc_send_data - Called from Mailbox Controller code. If
+ * pchan->chan.rx_alloc is set, then the command complete
+ * flag is checked and the data is written to the shared
+ * buffer io memory.
+ *
+ * If pchan->chan.rx_alloc is not set, then it is used
* here only to ring the channel doorbell. The PCC client
* specific read/write is done in the client driver in
* order to maintain atomicity over PCC channel once
@@ -435,17 +506,37 @@ static int pcc_send_data(struct mbox_chan *chan, void *data)
int ret;
struct pcc_chan_info *pchan = chan->con_priv;
+ ret = pcc_write_to_buffer(chan, data);
+ if (ret)
+ return ret;
+
ret = pcc_chan_reg_read_modify_write(&pchan->cmd_update);
if (ret)
return ret;
ret = pcc_chan_reg_read_modify_write(&pchan->db);
+
if (!ret && pchan->plat_irq > 0)
pchan->chan_in_use = true;
return ret;
}
+
+static bool pcc_last_tx_done(struct mbox_chan *chan)
+{
+ struct pcc_chan_info *pchan = chan->con_priv;
+ u64 val;
+
+ pcc_chan_reg_read(&pchan->cmd_complete, &val);
+ if (!val)
+ return false;
+ else
+ return true;
+}
+
+
+
/**
* pcc_startup - Called from Mailbox Controller code. Used here
* to request the interrupt.
@@ -491,6 +582,7 @@ static const struct mbox_chan_ops pcc_chan_ops = {
.send_data = pcc_send_data,
.startup = pcc_startup,
.shutdown = pcc_shutdown,
+ .last_tx_done = pcc_last_tx_done,
};
/**
@@ -652,7 +744,8 @@ static int pcc_parse_subspace_db_reg(struct pcc_chan_info *pchan,
ret = pcc_chan_reg_init(&pchan->error,
&pcct_ext->error_status_register,
- 0, 0, pcct_ext->error_status_mask,
+ ~pcct_ext->error_status_mask, 0,
+ pcct_ext->error_status_mask,
"Error Status");
}
return ret;
diff --git a/drivers/mailbox/pl320-ipc.c b/drivers/mailbox/pl320-ipc.c
index fbcf07930390..606f26a2a6fd 100644
--- a/drivers/mailbox/pl320-ipc.c
+++ b/drivers/mailbox/pl320-ipc.c
@@ -45,18 +45,6 @@ static DEFINE_MUTEX(ipc_m1_lock);
static DECLARE_COMPLETION(ipc_completion);
static ATOMIC_NOTIFIER_HEAD(ipc_notifier);
-static inline void set_destination(int source, int mbox)
-{
- writel_relaxed(CHAN_MASK(source), ipc_base + IPCMxDSET(mbox));
- writel_relaxed(CHAN_MASK(source), ipc_base + IPCMxMSET(mbox));
-}
-
-static inline void clear_destination(int source, int mbox)
-{
- writel_relaxed(CHAN_MASK(source), ipc_base + IPCMxDCLEAR(mbox));
- writel_relaxed(CHAN_MASK(source), ipc_base + IPCMxMCLEAR(mbox));
-}
-
static void __ipc_send(int mbox, u32 *data)
{
int i;
@@ -164,7 +152,7 @@ err:
return ret;
}
-static struct amba_id pl320_ids[] = {
+static const struct amba_id pl320_ids[] = {
{
.id = 0x00041320,
.mask = 0x000fffff,
diff --git a/drivers/mailbox/qcom-apcs-ipc-mailbox.c b/drivers/mailbox/qcom-apcs-ipc-mailbox.c
index f0d1fc0fb9ff..d3a8f6b4a03b 100644
--- a/drivers/mailbox/qcom-apcs-ipc-mailbox.c
+++ b/drivers/mailbox/qcom-apcs-ipc-mailbox.c
@@ -58,7 +58,6 @@ static const struct regmap_config apcs_regmap_config = {
.reg_stride = 4,
.val_bits = 32,
.max_register = 0x1008,
- .fast_io = true,
};
static int qcom_apcs_ipc_send_data(struct mbox_chan *chan, void *data)
@@ -116,10 +115,18 @@ static int qcom_apcs_ipc_probe(struct platform_device *pdev)
}
if (apcs_data->clk_name) {
- apcs->clk = platform_device_register_data(&pdev->dev,
- apcs_data->clk_name,
- PLATFORM_DEVID_AUTO,
- NULL, 0);
+ struct device_node *np = of_get_child_by_name(pdev->dev.of_node,
+ "clock-controller");
+ struct platform_device_info pdevinfo = {
+ .parent = &pdev->dev,
+ .name = apcs_data->clk_name,
+ .id = PLATFORM_DEVID_AUTO,
+ .fwnode = of_fwnode_handle(np) ?: pdev->dev.fwnode,
+ .of_node_reused = !np,
+ };
+
+ apcs->clk = platform_device_register_full(&pdevinfo);
+ of_node_put(np);
if (IS_ERR(apcs->clk))
dev_err(&pdev->dev, "failed to register APCS clk\n");
}
@@ -157,6 +164,7 @@ static const struct of_device_id qcom_apcs_ipc_of_match[] = {
{ .compatible = "qcom,sm6125-apcs-hmss-global", .data = &msm8994_apcs_data },
{ .compatible = "qcom,sm6115-apcs-hmss-global", .data = &msm8994_apcs_data },
{ .compatible = "qcom,ipq5332-apcs-apps-global", .data = &ipq6018_apcs_data },
+ { .compatible = "qcom,ipq5424-apcs-apps-global", .data = &msm8994_apcs_data },
{ .compatible = "qcom,ipq8074-apcs-apps-global", .data = &ipq6018_apcs_data },
{ .compatible = "qcom,sc7180-apss-shared", .data = &apps_shared_apcs_data },
{ .compatible = "qcom,sc8180x-apss-shared", .data = &apps_shared_apcs_data },
diff --git a/drivers/mailbox/qcom-ipcc.c b/drivers/mailbox/qcom-ipcc.c
index 14c7907c6632..d957d989c0ce 100644
--- a/drivers/mailbox/qcom-ipcc.c
+++ b/drivers/mailbox/qcom-ipcc.c
@@ -14,6 +14,7 @@
#include <dt-bindings/mailbox/qcom-ipcc.h>
/* IPCC Register offsets */
+#define IPCC_REG_CONFIG 0x08
#define IPCC_REG_SEND_ID 0x0c
#define IPCC_REG_RECV_ID 0x10
#define IPCC_REG_RECV_SIGNAL_ENABLE 0x14
@@ -21,6 +22,7 @@
#define IPCC_REG_RECV_SIGNAL_CLEAR 0x1c
#define IPCC_REG_CLIENT_CLEAR 0x38
+#define IPCC_CLEAR_ON_RECV_RD BIT(0)
#define IPCC_SIGNAL_ID_MASK GENMASK(15, 0)
#define IPCC_CLIENT_ID_MASK GENMASK(31, 16)
@@ -274,6 +276,7 @@ static int qcom_ipcc_pm_resume(struct device *dev)
static int qcom_ipcc_probe(struct platform_device *pdev)
{
struct qcom_ipcc *ipcc;
+ u32 config_value;
static int id;
char *name;
int ret;
@@ -288,6 +291,19 @@ static int qcom_ipcc_probe(struct platform_device *pdev)
if (IS_ERR(ipcc->base))
return PTR_ERR(ipcc->base);
+ /*
+ * It is possible that boot firmware is using the same IPCC instance
+ * as of the HLOS and it has kept CLEAR_ON_RECV_RD set which basically
+ * means Interrupt pending registers are cleared when RECV_ID is read.
+ * The register automatically updates to the next pending interrupt/client
+ * status based on priority.
+ */
+ config_value = readl(ipcc->base + IPCC_REG_CONFIG);
+ if (config_value & IPCC_CLEAR_ON_RECV_RD) {
+ config_value &= ~(IPCC_CLEAR_ON_RECV_RD);
+ writel(config_value, ipcc->base + IPCC_REG_CONFIG);
+ }
+
ipcc->irq = platform_get_irq(pdev, 0);
if (ipcc->irq < 0)
return ipcc->irq;
@@ -296,8 +312,7 @@ static int qcom_ipcc_probe(struct platform_device *pdev)
if (!name)
return -ENOMEM;
- ipcc->irq_domain = irq_domain_add_tree(pdev->dev.of_node,
- &qcom_ipcc_irq_ops, ipcc);
+ ipcc->irq_domain = irq_domain_create_tree(dev_fwnode(&pdev->dev), &qcom_ipcc_irq_ops, ipcc);
if (!ipcc->irq_domain)
return -ENOMEM;
diff --git a/drivers/mailbox/riscv-sbi-mpxy-mbox.c b/drivers/mailbox/riscv-sbi-mpxy-mbox.c
new file mode 100644
index 000000000000..7c9c006b7244
--- /dev/null
+++ b/drivers/mailbox/riscv-sbi-mpxy-mbox.c
@@ -0,0 +1,1019 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * RISC-V SBI Message Proxy (MPXY) mailbox controller driver
+ *
+ * Copyright (C) 2025 Ventana Micro Systems Inc.
+ */
+
+#include <linux/acpi.h>
+#include <linux/cpu.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/irqchip/riscv-imsic.h>
+#include <linux/mailbox_controller.h>
+#include <linux/mailbox/riscv-rpmi-message.h>
+#include <linux/minmax.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/msi.h>
+#include <linux/of_irq.h>
+#include <linux/percpu.h>
+#include <linux/platform_device.h>
+#include <linux/smp.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <asm/byteorder.h>
+#include <asm/sbi.h>
+
+/* ====== SBI MPXY extension data structures ====== */
+
+/* SBI MPXY MSI related channel attributes */
+struct sbi_mpxy_msi_info {
+ /* Lower 32-bits of the MSI target address */
+ u32 msi_addr_lo;
+ /* Upper 32-bits of the MSI target address */
+ u32 msi_addr_hi;
+ /* MSI data value */
+ u32 msi_data;
+};
+
+/*
+ * SBI MPXY standard channel attributes.
+ *
+ * NOTE: The sequence of attribute fields are as-per the
+ * defined sequence in the attribute table in spec (or
+ * as-per the enum sbi_mpxy_attribute_id).
+ */
+struct sbi_mpxy_channel_attrs {
+ /* Message protocol ID */
+ u32 msg_proto_id;
+ /* Message protocol version */
+ u32 msg_proto_version;
+ /* Message protocol maximum message length */
+ u32 msg_max_len;
+ /* Message protocol message send timeout in microseconds */
+ u32 msg_send_timeout;
+ /* Message protocol message completion timeout in microseconds */
+ u32 msg_completion_timeout;
+ /* Bit array for channel capabilities */
+ u32 capability;
+ /* SSE event ID */
+ u32 sse_event_id;
+ /* MSI enable/disable control knob */
+ u32 msi_control;
+ /* Channel MSI info */
+ struct sbi_mpxy_msi_info msi_info;
+ /* Events state control */
+ u32 events_state_ctrl;
+};
+
+/*
+ * RPMI specific SBI MPXY channel attributes.
+ *
+ * NOTE: The sequence of attribute fields are as-per the
+ * defined sequence in the attribute table in spec (or
+ * as-per the enum sbi_mpxy_rpmi_attribute_id).
+ */
+struct sbi_mpxy_rpmi_channel_attrs {
+ /* RPMI service group ID */
+ u32 servicegroup_id;
+ /* RPMI service group version */
+ u32 servicegroup_version;
+ /* RPMI implementation ID */
+ u32 impl_id;
+ /* RPMI implementation version */
+ u32 impl_version;
+};
+
+/* SBI MPXY channel IDs data in shared memory */
+struct sbi_mpxy_channel_ids_data {
+ /* Remaining number of channel ids */
+ __le32 remaining;
+ /* Returned channel ids in current function call */
+ __le32 returned;
+ /* Returned channel id array */
+ __le32 channel_array[];
+};
+
+/* SBI MPXY notification data in shared memory */
+struct sbi_mpxy_notification_data {
+ /* Remaining number of notification events */
+ __le32 remaining;
+ /* Number of notification events returned */
+ __le32 returned;
+ /* Number of notification events lost */
+ __le32 lost;
+ /* Reserved for future use */
+ __le32 reserved;
+ /* Returned channel id array */
+ u8 events_data[];
+};
+
+/* ====== MPXY data structures & helper routines ====== */
+
+/* MPXY Per-CPU or local context */
+struct mpxy_local {
+ /* Shared memory base address */
+ void *shmem;
+ /* Shared memory physical address */
+ phys_addr_t shmem_phys_addr;
+ /* Flag representing whether shared memory is active or not */
+ bool shmem_active;
+};
+
+static DEFINE_PER_CPU(struct mpxy_local, mpxy_local);
+static unsigned long mpxy_shmem_size;
+static bool mpxy_shmem_init_done;
+
+static int mpxy_get_channel_count(u32 *channel_count)
+{
+ struct mpxy_local *mpxy = this_cpu_ptr(&mpxy_local);
+ struct sbi_mpxy_channel_ids_data *sdata = mpxy->shmem;
+ u32 remaining, returned;
+ struct sbiret sret;
+
+ if (!mpxy->shmem_active)
+ return -ENODEV;
+ if (!channel_count)
+ return -EINVAL;
+
+ get_cpu();
+
+ /* Get the remaining and returned fields to calculate total */
+ sret = sbi_ecall(SBI_EXT_MPXY, SBI_EXT_MPXY_GET_CHANNEL_IDS,
+ 0, 0, 0, 0, 0, 0);
+ if (sret.error)
+ goto err_put_cpu;
+
+ remaining = le32_to_cpu(sdata->remaining);
+ returned = le32_to_cpu(sdata->returned);
+ *channel_count = remaining + returned;
+
+err_put_cpu:
+ put_cpu();
+ return sbi_err_map_linux_errno(sret.error);
+}
+
+static int mpxy_get_channel_ids(u32 channel_count, u32 *channel_ids)
+{
+ struct mpxy_local *mpxy = this_cpu_ptr(&mpxy_local);
+ struct sbi_mpxy_channel_ids_data *sdata = mpxy->shmem;
+ u32 remaining, returned, count, start_index = 0;
+ struct sbiret sret;
+
+ if (!mpxy->shmem_active)
+ return -ENODEV;
+ if (!channel_count || !channel_ids)
+ return -EINVAL;
+
+ get_cpu();
+
+ do {
+ sret = sbi_ecall(SBI_EXT_MPXY, SBI_EXT_MPXY_GET_CHANNEL_IDS,
+ start_index, 0, 0, 0, 0, 0);
+ if (sret.error)
+ goto err_put_cpu;
+
+ remaining = le32_to_cpu(sdata->remaining);
+ returned = le32_to_cpu(sdata->returned);
+
+ count = returned < (channel_count - start_index) ?
+ returned : (channel_count - start_index);
+ memcpy_from_le32(&channel_ids[start_index], sdata->channel_array, count);
+ start_index += count;
+ } while (remaining && start_index < channel_count);
+
+err_put_cpu:
+ put_cpu();
+ return sbi_err_map_linux_errno(sret.error);
+}
+
+static int mpxy_read_attrs(u32 channel_id, u32 base_attrid, u32 attr_count,
+ u32 *attrs_buf)
+{
+ struct mpxy_local *mpxy = this_cpu_ptr(&mpxy_local);
+ struct sbiret sret;
+
+ if (!mpxy->shmem_active)
+ return -ENODEV;
+ if (!attr_count || !attrs_buf)
+ return -EINVAL;
+
+ get_cpu();
+
+ sret = sbi_ecall(SBI_EXT_MPXY, SBI_EXT_MPXY_READ_ATTRS,
+ channel_id, base_attrid, attr_count, 0, 0, 0);
+ if (sret.error)
+ goto err_put_cpu;
+
+ memcpy_from_le32(attrs_buf, (__le32 *)mpxy->shmem, attr_count);
+
+err_put_cpu:
+ put_cpu();
+ return sbi_err_map_linux_errno(sret.error);
+}
+
+static int mpxy_write_attrs(u32 channel_id, u32 base_attrid, u32 attr_count,
+ u32 *attrs_buf)
+{
+ struct mpxy_local *mpxy = this_cpu_ptr(&mpxy_local);
+ struct sbiret sret;
+
+ if (!mpxy->shmem_active)
+ return -ENODEV;
+ if (!attr_count || !attrs_buf)
+ return -EINVAL;
+
+ get_cpu();
+
+ memcpy_to_le32((__le32 *)mpxy->shmem, attrs_buf, attr_count);
+ sret = sbi_ecall(SBI_EXT_MPXY, SBI_EXT_MPXY_WRITE_ATTRS,
+ channel_id, base_attrid, attr_count, 0, 0, 0);
+
+ put_cpu();
+ return sbi_err_map_linux_errno(sret.error);
+}
+
+static int mpxy_send_message_with_resp(u32 channel_id, u32 msg_id,
+ void *tx, unsigned long tx_len,
+ void *rx, unsigned long max_rx_len,
+ unsigned long *rx_len)
+{
+ struct mpxy_local *mpxy = this_cpu_ptr(&mpxy_local);
+ unsigned long rx_bytes;
+ struct sbiret sret;
+
+ if (!mpxy->shmem_active)
+ return -ENODEV;
+ if (!tx && tx_len)
+ return -EINVAL;
+
+ get_cpu();
+
+ /* Message protocols allowed to have no data in messages */
+ if (tx_len)
+ memcpy(mpxy->shmem, tx, tx_len);
+
+ sret = sbi_ecall(SBI_EXT_MPXY, SBI_EXT_MPXY_SEND_MSG_WITH_RESP,
+ channel_id, msg_id, tx_len, 0, 0, 0);
+ if (rx && !sret.error) {
+ rx_bytes = sret.value;
+ if (rx_bytes > max_rx_len) {
+ put_cpu();
+ return -ENOSPC;
+ }
+
+ memcpy(rx, mpxy->shmem, rx_bytes);
+ if (rx_len)
+ *rx_len = rx_bytes;
+ }
+
+ put_cpu();
+ return sbi_err_map_linux_errno(sret.error);
+}
+
+static int mpxy_send_message_without_resp(u32 channel_id, u32 msg_id,
+ void *tx, unsigned long tx_len)
+{
+ struct mpxy_local *mpxy = this_cpu_ptr(&mpxy_local);
+ struct sbiret sret;
+
+ if (!mpxy->shmem_active)
+ return -ENODEV;
+ if (!tx && tx_len)
+ return -EINVAL;
+
+ get_cpu();
+
+ /* Message protocols allowed to have no data in messages */
+ if (tx_len)
+ memcpy(mpxy->shmem, tx, tx_len);
+
+ sret = sbi_ecall(SBI_EXT_MPXY, SBI_EXT_MPXY_SEND_MSG_WITHOUT_RESP,
+ channel_id, msg_id, tx_len, 0, 0, 0);
+
+ put_cpu();
+ return sbi_err_map_linux_errno(sret.error);
+}
+
+static int mpxy_get_notifications(u32 channel_id,
+ struct sbi_mpxy_notification_data *notif_data,
+ unsigned long *events_data_len)
+{
+ struct mpxy_local *mpxy = this_cpu_ptr(&mpxy_local);
+ struct sbiret sret;
+
+ if (!mpxy->shmem_active)
+ return -ENODEV;
+ if (!notif_data || !events_data_len)
+ return -EINVAL;
+
+ get_cpu();
+
+ sret = sbi_ecall(SBI_EXT_MPXY, SBI_EXT_MPXY_GET_NOTIFICATION_EVENTS,
+ channel_id, 0, 0, 0, 0, 0);
+ if (sret.error)
+ goto err_put_cpu;
+
+ memcpy(notif_data, mpxy->shmem, sret.value + 16);
+ *events_data_len = sret.value;
+
+err_put_cpu:
+ put_cpu();
+ return sbi_err_map_linux_errno(sret.error);
+}
+
+static int mpxy_get_shmem_size(unsigned long *shmem_size)
+{
+ struct sbiret sret;
+
+ sret = sbi_ecall(SBI_EXT_MPXY, SBI_EXT_MPXY_GET_SHMEM_SIZE,
+ 0, 0, 0, 0, 0, 0);
+ if (sret.error)
+ return sbi_err_map_linux_errno(sret.error);
+ if (shmem_size)
+ *shmem_size = sret.value;
+ return 0;
+}
+
+static int mpxy_setup_shmem(unsigned int cpu)
+{
+ struct page *shmem_page;
+ struct mpxy_local *mpxy;
+ struct sbiret sret;
+
+ mpxy = per_cpu_ptr(&mpxy_local, cpu);
+ if (mpxy->shmem_active)
+ return 0;
+
+ shmem_page = alloc_pages(GFP_KERNEL | __GFP_ZERO, get_order(mpxy_shmem_size));
+ if (!shmem_page)
+ return -ENOMEM;
+
+ /*
+ * Linux setup of shmem is done in mpxy OVERWRITE mode.
+ * flags[1:0] = 00b
+ */
+ sret = sbi_ecall(SBI_EXT_MPXY, SBI_EXT_MPXY_SET_SHMEM,
+ page_to_phys(shmem_page), 0, 0, 0, 0, 0);
+ if (sret.error) {
+ free_pages((unsigned long)page_to_virt(shmem_page),
+ get_order(mpxy_shmem_size));
+ return sbi_err_map_linux_errno(sret.error);
+ }
+
+ mpxy->shmem = page_to_virt(shmem_page);
+ mpxy->shmem_phys_addr = page_to_phys(shmem_page);
+ mpxy->shmem_active = true;
+
+ return 0;
+}
+
+/* ====== MPXY mailbox data structures ====== */
+
+/* MPXY mailbox channel */
+struct mpxy_mbox_channel {
+ struct mpxy_mbox *mbox;
+ u32 channel_id;
+ struct sbi_mpxy_channel_attrs attrs;
+ struct sbi_mpxy_rpmi_channel_attrs rpmi_attrs;
+ struct sbi_mpxy_notification_data *notif;
+ u32 max_xfer_len;
+ bool have_events_state;
+ u32 msi_index;
+ u32 msi_irq;
+ bool started;
+};
+
+/* MPXY mailbox */
+struct mpxy_mbox {
+ struct device *dev;
+ u32 channel_count;
+ struct mpxy_mbox_channel *channels;
+ u32 msi_count;
+ struct mpxy_mbox_channel **msi_index_to_channel;
+ struct mbox_controller controller;
+};
+
+/* ====== MPXY RPMI processing ====== */
+
+static void mpxy_mbox_send_rpmi_data(struct mpxy_mbox_channel *mchan,
+ struct rpmi_mbox_message *msg)
+{
+ msg->error = 0;
+ switch (msg->type) {
+ case RPMI_MBOX_MSG_TYPE_GET_ATTRIBUTE:
+ switch (msg->attr.id) {
+ case RPMI_MBOX_ATTR_SPEC_VERSION:
+ msg->attr.value = mchan->attrs.msg_proto_version;
+ break;
+ case RPMI_MBOX_ATTR_MAX_MSG_DATA_SIZE:
+ msg->attr.value = mchan->max_xfer_len;
+ break;
+ case RPMI_MBOX_ATTR_SERVICEGROUP_ID:
+ msg->attr.value = mchan->rpmi_attrs.servicegroup_id;
+ break;
+ case RPMI_MBOX_ATTR_SERVICEGROUP_VERSION:
+ msg->attr.value = mchan->rpmi_attrs.servicegroup_version;
+ break;
+ case RPMI_MBOX_ATTR_IMPL_ID:
+ msg->attr.value = mchan->rpmi_attrs.impl_id;
+ break;
+ case RPMI_MBOX_ATTR_IMPL_VERSION:
+ msg->attr.value = mchan->rpmi_attrs.impl_version;
+ break;
+ default:
+ msg->error = -EOPNOTSUPP;
+ break;
+ }
+ break;
+ case RPMI_MBOX_MSG_TYPE_SET_ATTRIBUTE:
+ /* None of the RPMI linux mailbox attributes are writeable */
+ msg->error = -EOPNOTSUPP;
+ break;
+ case RPMI_MBOX_MSG_TYPE_SEND_WITH_RESPONSE:
+ if ((!msg->data.request && msg->data.request_len) ||
+ (msg->data.request && msg->data.request_len > mchan->max_xfer_len) ||
+ (!msg->data.response && msg->data.max_response_len)) {
+ msg->error = -EINVAL;
+ break;
+ }
+ if (!(mchan->attrs.capability & SBI_MPXY_CHAN_CAP_SEND_WITH_RESP)) {
+ msg->error = -EIO;
+ break;
+ }
+ msg->error = mpxy_send_message_with_resp(mchan->channel_id,
+ msg->data.service_id,
+ msg->data.request,
+ msg->data.request_len,
+ msg->data.response,
+ msg->data.max_response_len,
+ &msg->data.out_response_len);
+ break;
+ case RPMI_MBOX_MSG_TYPE_SEND_WITHOUT_RESPONSE:
+ if ((!msg->data.request && msg->data.request_len) ||
+ (msg->data.request && msg->data.request_len > mchan->max_xfer_len)) {
+ msg->error = -EINVAL;
+ break;
+ }
+ if (!(mchan->attrs.capability & SBI_MPXY_CHAN_CAP_SEND_WITHOUT_RESP)) {
+ msg->error = -EIO;
+ break;
+ }
+ msg->error = mpxy_send_message_without_resp(mchan->channel_id,
+ msg->data.service_id,
+ msg->data.request,
+ msg->data.request_len);
+ break;
+ default:
+ msg->error = -EOPNOTSUPP;
+ break;
+ }
+}
+
+static void mpxy_mbox_peek_rpmi_data(struct mbox_chan *chan,
+ struct mpxy_mbox_channel *mchan,
+ struct sbi_mpxy_notification_data *notif,
+ unsigned long events_data_len)
+{
+ struct rpmi_notification_event *event;
+ struct rpmi_mbox_message msg;
+ unsigned long pos = 0;
+
+ while (pos < events_data_len && (events_data_len - pos) <= sizeof(*event)) {
+ event = (struct rpmi_notification_event *)(notif->events_data + pos);
+
+ msg.type = RPMI_MBOX_MSG_TYPE_NOTIFICATION_EVENT;
+ msg.notif.event_datalen = le16_to_cpu(event->event_datalen);
+ msg.notif.event_id = event->event_id;
+ msg.notif.event_data = event->event_data;
+ msg.error = 0;
+
+ mbox_chan_received_data(chan, &msg);
+ pos += sizeof(*event) + msg.notif.event_datalen;
+ }
+}
+
+static int mpxy_mbox_read_rpmi_attrs(struct mpxy_mbox_channel *mchan)
+{
+ return mpxy_read_attrs(mchan->channel_id,
+ SBI_MPXY_ATTR_MSGPROTO_ATTR_START,
+ sizeof(mchan->rpmi_attrs) / sizeof(u32),
+ (u32 *)&mchan->rpmi_attrs);
+}
+
+/* ====== MPXY mailbox callbacks ====== */
+
+static int mpxy_mbox_send_data(struct mbox_chan *chan, void *data)
+{
+ struct mpxy_mbox_channel *mchan = chan->con_priv;
+
+ if (mchan->attrs.msg_proto_id == SBI_MPXY_MSGPROTO_RPMI_ID) {
+ mpxy_mbox_send_rpmi_data(mchan, data);
+ return 0;
+ }
+
+ return -EOPNOTSUPP;
+}
+
+static bool mpxy_mbox_peek_data(struct mbox_chan *chan)
+{
+ struct mpxy_mbox_channel *mchan = chan->con_priv;
+ struct sbi_mpxy_notification_data *notif = mchan->notif;
+ bool have_notifications = false;
+ unsigned long data_len;
+ int rc;
+
+ if (!(mchan->attrs.capability & SBI_MPXY_CHAN_CAP_GET_NOTIFICATIONS))
+ return false;
+
+ do {
+ rc = mpxy_get_notifications(mchan->channel_id, notif, &data_len);
+ if (rc || !data_len)
+ break;
+
+ if (mchan->attrs.msg_proto_id == SBI_MPXY_MSGPROTO_RPMI_ID)
+ mpxy_mbox_peek_rpmi_data(chan, mchan, notif, data_len);
+
+ have_notifications = true;
+ } while (1);
+
+ return have_notifications;
+}
+
+static irqreturn_t mpxy_mbox_irq_thread(int irq, void *dev_id)
+{
+ mpxy_mbox_peek_data(dev_id);
+ return IRQ_HANDLED;
+}
+
+static int mpxy_mbox_setup_msi(struct mbox_chan *chan,
+ struct mpxy_mbox_channel *mchan)
+{
+ struct device *dev = mchan->mbox->dev;
+ int rc;
+
+ /* Do nothing if MSI not supported */
+ if (mchan->msi_irq == U32_MAX)
+ return 0;
+
+ /* Fail if MSI already enabled */
+ if (mchan->attrs.msi_control)
+ return -EALREADY;
+
+ /* Request channel MSI handler */
+ rc = request_threaded_irq(mchan->msi_irq, NULL, mpxy_mbox_irq_thread,
+ 0, dev_name(dev), chan);
+ if (rc) {
+ dev_err(dev, "failed to request MPXY channel 0x%x IRQ\n",
+ mchan->channel_id);
+ return rc;
+ }
+
+ /* Enable channel MSI control */
+ mchan->attrs.msi_control = 1;
+ rc = mpxy_write_attrs(mchan->channel_id, SBI_MPXY_ATTR_MSI_CONTROL,
+ 1, &mchan->attrs.msi_control);
+ if (rc) {
+ dev_err(dev, "enable MSI control failed for MPXY channel 0x%x\n",
+ mchan->channel_id);
+ mchan->attrs.msi_control = 0;
+ free_irq(mchan->msi_irq, chan);
+ return rc;
+ }
+
+ return 0;
+}
+
+static void mpxy_mbox_cleanup_msi(struct mbox_chan *chan,
+ struct mpxy_mbox_channel *mchan)
+{
+ struct device *dev = mchan->mbox->dev;
+ int rc;
+
+ /* Do nothing if MSI not supported */
+ if (mchan->msi_irq == U32_MAX)
+ return;
+
+ /* Do nothing if MSI already disabled */
+ if (!mchan->attrs.msi_control)
+ return;
+
+ /* Disable channel MSI control */
+ mchan->attrs.msi_control = 0;
+ rc = mpxy_write_attrs(mchan->channel_id, SBI_MPXY_ATTR_MSI_CONTROL,
+ 1, &mchan->attrs.msi_control);
+ if (rc) {
+ dev_err(dev, "disable MSI control failed for MPXY channel 0x%x\n",
+ mchan->channel_id);
+ }
+
+ /* Free channel MSI handler */
+ free_irq(mchan->msi_irq, chan);
+}
+
+static int mpxy_mbox_setup_events(struct mpxy_mbox_channel *mchan)
+{
+ struct device *dev = mchan->mbox->dev;
+ int rc;
+
+ /* Do nothing if events state not supported */
+ if (!mchan->have_events_state)
+ return 0;
+
+ /* Fail if events state already enabled */
+ if (mchan->attrs.events_state_ctrl)
+ return -EALREADY;
+
+ /* Enable channel events state */
+ mchan->attrs.events_state_ctrl = 1;
+ rc = mpxy_write_attrs(mchan->channel_id, SBI_MPXY_ATTR_EVENTS_STATE_CONTROL,
+ 1, &mchan->attrs.events_state_ctrl);
+ if (rc) {
+ dev_err(dev, "enable events state failed for MPXY channel 0x%x\n",
+ mchan->channel_id);
+ mchan->attrs.events_state_ctrl = 0;
+ return rc;
+ }
+
+ return 0;
+}
+
+static void mpxy_mbox_cleanup_events(struct mpxy_mbox_channel *mchan)
+{
+ struct device *dev = mchan->mbox->dev;
+ int rc;
+
+ /* Do nothing if events state not supported */
+ if (!mchan->have_events_state)
+ return;
+
+ /* Do nothing if events state already disabled */
+ if (!mchan->attrs.events_state_ctrl)
+ return;
+
+ /* Disable channel events state */
+ mchan->attrs.events_state_ctrl = 0;
+ rc = mpxy_write_attrs(mchan->channel_id, SBI_MPXY_ATTR_EVENTS_STATE_CONTROL,
+ 1, &mchan->attrs.events_state_ctrl);
+ if (rc)
+ dev_err(dev, "disable events state failed for MPXY channel 0x%x\n",
+ mchan->channel_id);
+}
+
+static int mpxy_mbox_startup(struct mbox_chan *chan)
+{
+ struct mpxy_mbox_channel *mchan = chan->con_priv;
+ int rc;
+
+ if (mchan->started)
+ return -EALREADY;
+
+ /* Setup channel MSI */
+ rc = mpxy_mbox_setup_msi(chan, mchan);
+ if (rc)
+ return rc;
+
+ /* Setup channel notification events */
+ rc = mpxy_mbox_setup_events(mchan);
+ if (rc) {
+ mpxy_mbox_cleanup_msi(chan, mchan);
+ return rc;
+ }
+
+ /* Mark the channel as started */
+ mchan->started = true;
+
+ return 0;
+}
+
+static void mpxy_mbox_shutdown(struct mbox_chan *chan)
+{
+ struct mpxy_mbox_channel *mchan = chan->con_priv;
+
+ if (!mchan->started)
+ return;
+
+ /* Mark the channel as stopped */
+ mchan->started = false;
+
+ /* Cleanup channel notification events */
+ mpxy_mbox_cleanup_events(mchan);
+
+ /* Cleanup channel MSI */
+ mpxy_mbox_cleanup_msi(chan, mchan);
+}
+
+static const struct mbox_chan_ops mpxy_mbox_ops = {
+ .send_data = mpxy_mbox_send_data,
+ .peek_data = mpxy_mbox_peek_data,
+ .startup = mpxy_mbox_startup,
+ .shutdown = mpxy_mbox_shutdown,
+};
+
+/* ====== MPXY platform driver ===== */
+
+static void mpxy_mbox_msi_write(struct msi_desc *desc, struct msi_msg *msg)
+{
+ struct device *dev = msi_desc_to_dev(desc);
+ struct mpxy_mbox *mbox = dev_get_drvdata(dev);
+ struct mpxy_mbox_channel *mchan;
+ struct sbi_mpxy_msi_info *minfo;
+ int rc;
+
+ mchan = mbox->msi_index_to_channel[desc->msi_index];
+ if (!mchan) {
+ dev_warn(dev, "MPXY channel not available for MSI index %d\n",
+ desc->msi_index);
+ return;
+ }
+
+ minfo = &mchan->attrs.msi_info;
+ minfo->msi_addr_lo = msg->address_lo;
+ minfo->msi_addr_hi = msg->address_hi;
+ minfo->msi_data = msg->data;
+
+ rc = mpxy_write_attrs(mchan->channel_id, SBI_MPXY_ATTR_MSI_ADDR_LO,
+ sizeof(*minfo) / sizeof(u32), (u32 *)minfo);
+ if (rc) {
+ dev_warn(dev, "failed to write MSI info for MPXY channel 0x%x\n",
+ mchan->channel_id);
+ }
+}
+
+static struct mbox_chan *mpxy_mbox_fw_xlate(struct mbox_controller *ctlr,
+ const struct fwnode_reference_args *pa)
+{
+ struct mpxy_mbox *mbox = container_of(ctlr, struct mpxy_mbox, controller);
+ struct mpxy_mbox_channel *mchan;
+ u32 i;
+
+ if (pa->nargs != 2)
+ return ERR_PTR(-EINVAL);
+
+ for (i = 0; i < mbox->channel_count; i++) {
+ mchan = &mbox->channels[i];
+ if (mchan->channel_id == pa->args[0] &&
+ mchan->attrs.msg_proto_id == pa->args[1])
+ return &mbox->controller.chans[i];
+ }
+
+ return ERR_PTR(-ENOENT);
+}
+
+static int mpxy_mbox_populate_channels(struct mpxy_mbox *mbox)
+{
+ u32 i, *channel_ids __free(kfree) = NULL;
+ struct mpxy_mbox_channel *mchan;
+ int rc;
+
+ /* Find-out of number of channels */
+ rc = mpxy_get_channel_count(&mbox->channel_count);
+ if (rc)
+ return dev_err_probe(mbox->dev, rc, "failed to get number of MPXY channels\n");
+ if (!mbox->channel_count)
+ return dev_err_probe(mbox->dev, -ENODEV, "no MPXY channels available\n");
+
+ /* Allocate and fetch all channel IDs */
+ channel_ids = kcalloc(mbox->channel_count, sizeof(*channel_ids), GFP_KERNEL);
+ if (!channel_ids)
+ return -ENOMEM;
+ rc = mpxy_get_channel_ids(mbox->channel_count, channel_ids);
+ if (rc)
+ return dev_err_probe(mbox->dev, rc, "failed to get MPXY channel IDs\n");
+
+ /* Populate all channels */
+ mbox->channels = devm_kcalloc(mbox->dev, mbox->channel_count,
+ sizeof(*mbox->channels), GFP_KERNEL);
+ if (!mbox->channels)
+ return -ENOMEM;
+ for (i = 0; i < mbox->channel_count; i++) {
+ mchan = &mbox->channels[i];
+ mchan->mbox = mbox;
+ mchan->channel_id = channel_ids[i];
+
+ rc = mpxy_read_attrs(mchan->channel_id, SBI_MPXY_ATTR_MSG_PROT_ID,
+ sizeof(mchan->attrs) / sizeof(u32),
+ (u32 *)&mchan->attrs);
+ if (rc) {
+ return dev_err_probe(mbox->dev, rc,
+ "MPXY channel 0x%x read attrs failed\n",
+ mchan->channel_id);
+ }
+
+ if (mchan->attrs.msg_proto_id == SBI_MPXY_MSGPROTO_RPMI_ID) {
+ rc = mpxy_mbox_read_rpmi_attrs(mchan);
+ if (rc) {
+ return dev_err_probe(mbox->dev, rc,
+ "MPXY channel 0x%x read RPMI attrs failed\n",
+ mchan->channel_id);
+ }
+ }
+
+ mchan->notif = devm_kzalloc(mbox->dev, mpxy_shmem_size, GFP_KERNEL);
+ if (!mchan->notif)
+ return -ENOMEM;
+
+ mchan->max_xfer_len = min(mpxy_shmem_size, mchan->attrs.msg_max_len);
+
+ if ((mchan->attrs.capability & SBI_MPXY_CHAN_CAP_GET_NOTIFICATIONS) &&
+ (mchan->attrs.capability & SBI_MPXY_CHAN_CAP_EVENTS_STATE))
+ mchan->have_events_state = true;
+
+ if ((mchan->attrs.capability & SBI_MPXY_CHAN_CAP_GET_NOTIFICATIONS) &&
+ (mchan->attrs.capability & SBI_MPXY_CHAN_CAP_MSI))
+ mchan->msi_index = mbox->msi_count++;
+ else
+ mchan->msi_index = U32_MAX;
+ mchan->msi_irq = U32_MAX;
+ }
+
+ return 0;
+}
+
+static int mpxy_mbox_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct mpxy_mbox_channel *mchan;
+ struct mpxy_mbox *mbox;
+ int msi_idx, rc;
+ u32 i;
+
+ /*
+ * Initialize MPXY shared memory only once. This also ensures
+ * that SBI MPXY mailbox is probed only once.
+ */
+ if (mpxy_shmem_init_done) {
+ dev_err(dev, "SBI MPXY mailbox already initialized\n");
+ return -EALREADY;
+ }
+
+ /* Probe for SBI MPXY extension */
+ if (sbi_spec_version < sbi_mk_version(1, 0) ||
+ sbi_probe_extension(SBI_EXT_MPXY) <= 0) {
+ dev_info(dev, "SBI MPXY extension not available\n");
+ return -ENODEV;
+ }
+
+ /* Find-out shared memory size */
+ rc = mpxy_get_shmem_size(&mpxy_shmem_size);
+ if (rc)
+ return dev_err_probe(dev, rc, "failed to get MPXY shared memory size\n");
+
+ /*
+ * Setup MPXY shared memory on each CPU
+ *
+ * Note: Don't cleanup MPXY shared memory upon CPU power-down
+ * because the RPMI System MSI irqchip driver needs it to be
+ * available when migrating IRQs in CPU power-down path.
+ */
+ cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "riscv/sbi-mpxy-shmem",
+ mpxy_setup_shmem, NULL);
+
+ /* Mark as MPXY shared memory initialization done */
+ mpxy_shmem_init_done = true;
+
+ /* Allocate mailbox instance */
+ mbox = devm_kzalloc(dev, sizeof(*mbox), GFP_KERNEL);
+ if (!mbox)
+ return -ENOMEM;
+ mbox->dev = dev;
+ platform_set_drvdata(pdev, mbox);
+
+ /* Populate mailbox channels */
+ rc = mpxy_mbox_populate_channels(mbox);
+ if (rc)
+ return rc;
+
+ /* Initialize mailbox controller */
+ mbox->controller.txdone_irq = false;
+ mbox->controller.txdone_poll = false;
+ mbox->controller.ops = &mpxy_mbox_ops;
+ mbox->controller.dev = dev;
+ mbox->controller.num_chans = mbox->channel_count;
+ mbox->controller.fw_xlate = mpxy_mbox_fw_xlate;
+ mbox->controller.chans = devm_kcalloc(dev, mbox->channel_count,
+ sizeof(*mbox->controller.chans),
+ GFP_KERNEL);
+ if (!mbox->controller.chans)
+ return -ENOMEM;
+ for (i = 0; i < mbox->channel_count; i++)
+ mbox->controller.chans[i].con_priv = &mbox->channels[i];
+
+ /* Setup MSIs for mailbox (if required) */
+ if (mbox->msi_count) {
+ /*
+ * The device MSI domain for platform devices on RISC-V architecture
+ * is only available after the MSI controller driver is probed so,
+ * explicitly configure here.
+ */
+ if (!dev_get_msi_domain(dev)) {
+ struct fwnode_handle *fwnode = dev_fwnode(dev);
+
+ /*
+ * The device MSI domain for OF devices is only set at the
+ * time of populating/creating OF device. If the device MSI
+ * domain is discovered later after the OF device is created
+ * then we need to set it explicitly before using any platform
+ * MSI functions.
+ */
+ if (is_of_node(fwnode)) {
+ of_msi_configure(dev, dev_of_node(dev));
+ } else if (is_acpi_device_node(fwnode)) {
+ struct irq_domain *msi_domain;
+
+ msi_domain = irq_find_matching_fwnode(imsic_acpi_get_fwnode(dev),
+ DOMAIN_BUS_PLATFORM_MSI);
+ dev_set_msi_domain(dev, msi_domain);
+ }
+
+ if (!dev_get_msi_domain(dev))
+ return -EPROBE_DEFER;
+ }
+
+ mbox->msi_index_to_channel = devm_kcalloc(dev, mbox->msi_count,
+ sizeof(*mbox->msi_index_to_channel),
+ GFP_KERNEL);
+ if (!mbox->msi_index_to_channel)
+ return -ENOMEM;
+
+ for (msi_idx = 0; msi_idx < mbox->msi_count; msi_idx++) {
+ for (i = 0; i < mbox->channel_count; i++) {
+ mchan = &mbox->channels[i];
+ if (mchan->msi_index == msi_idx) {
+ mbox->msi_index_to_channel[msi_idx] = mchan;
+ break;
+ }
+ }
+ }
+
+ rc = platform_device_msi_init_and_alloc_irqs(dev, mbox->msi_count,
+ mpxy_mbox_msi_write);
+ if (rc) {
+ return dev_err_probe(dev, rc, "Failed to allocate %d MSIs\n",
+ mbox->msi_count);
+ }
+
+ for (i = 0; i < mbox->channel_count; i++) {
+ mchan = &mbox->channels[i];
+ if (mchan->msi_index == U32_MAX)
+ continue;
+ mchan->msi_irq = msi_get_virq(dev, mchan->msi_index);
+ }
+ }
+
+ /* Register mailbox controller */
+ rc = devm_mbox_controller_register(dev, &mbox->controller);
+ if (rc) {
+ dev_err_probe(dev, rc, "Registering SBI MPXY mailbox failed\n");
+ if (mbox->msi_count)
+ platform_device_msi_free_irqs_all(dev);
+ return rc;
+ }
+
+#ifdef CONFIG_ACPI
+ struct acpi_device *adev = ACPI_COMPANION(dev);
+
+ if (adev)
+ acpi_dev_clear_dependencies(adev);
+#endif
+
+ dev_info(dev, "mailbox registered with %d channels\n",
+ mbox->channel_count);
+ return 0;
+}
+
+static void mpxy_mbox_remove(struct platform_device *pdev)
+{
+ struct mpxy_mbox *mbox = platform_get_drvdata(pdev);
+
+ if (mbox->msi_count)
+ platform_device_msi_free_irqs_all(mbox->dev);
+}
+
+static const struct of_device_id mpxy_mbox_of_match[] = {
+ { .compatible = "riscv,sbi-mpxy-mbox" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, mpxy_mbox_of_match);
+
+static const struct acpi_device_id mpxy_mbox_acpi_match[] = {
+ { "RSCV0005" },
+ {}
+};
+MODULE_DEVICE_TABLE(acpi, mpxy_mbox_acpi_match);
+
+static struct platform_driver mpxy_mbox_driver = {
+ .driver = {
+ .name = "riscv-sbi-mpxy-mbox",
+ .of_match_table = mpxy_mbox_of_match,
+ .acpi_match_table = mpxy_mbox_acpi_match,
+ },
+ .probe = mpxy_mbox_probe,
+ .remove = mpxy_mbox_remove,
+};
+module_platform_driver(mpxy_mbox_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Anup Patel <apatel@ventanamicro.com>");
+MODULE_DESCRIPTION("RISC-V SBI MPXY mailbox controller driver");
diff --git a/drivers/mailbox/tegra-hsp.c b/drivers/mailbox/tegra-hsp.c
index 8d5e2d7dc03b..ed9a0bb2bcd8 100644
--- a/drivers/mailbox/tegra-hsp.c
+++ b/drivers/mailbox/tegra-hsp.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright (c) 2016-2023, NVIDIA CORPORATION. All rights reserved.
+ * Copyright (c) 2016-2025, NVIDIA CORPORATION. All rights reserved.
*/
#include <linux/delay.h>
@@ -28,12 +28,6 @@
#define HSP_INT_FULL_MASK 0xff
#define HSP_INT_DIMENSIONING 0x380
-#define HSP_nSM_SHIFT 0
-#define HSP_nSS_SHIFT 4
-#define HSP_nAS_SHIFT 8
-#define HSP_nDB_SHIFT 12
-#define HSP_nSI_SHIFT 16
-#define HSP_nINT_MASK 0xf
#define HSP_DB_TRIGGER 0x0
#define HSP_DB_ENABLE 0x4
@@ -97,6 +91,20 @@ struct tegra_hsp_soc {
bool has_per_mb_ie;
bool has_128_bit_mb;
unsigned int reg_stride;
+
+ /* Shifts for dimensioning register. */
+ unsigned int si_shift;
+ unsigned int db_shift;
+ unsigned int as_shift;
+ unsigned int ss_shift;
+ unsigned int sm_shift;
+
+ /* Masks for dimensioning register. */
+ unsigned int si_mask;
+ unsigned int db_mask;
+ unsigned int as_mask;
+ unsigned int ss_mask;
+ unsigned int sm_mask;
};
struct tegra_hsp {
@@ -388,7 +396,6 @@ static void tegra_hsp_sm_recv32(struct tegra_hsp_channel *channel)
value = tegra_hsp_channel_readl(channel, HSP_SM_SHRD_MBOX);
value &= ~HSP_SM_SHRD_MBOX_FULL;
msg = (void *)(unsigned long)value;
- mbox_chan_received_data(channel->chan, msg);
/*
* Need to clear all bits here since some producers, such as TCU, depend
@@ -398,6 +405,8 @@ static void tegra_hsp_sm_recv32(struct tegra_hsp_channel *channel)
* explicitly, so we have to make sure we cover all possible cases.
*/
tegra_hsp_channel_writel(channel, 0x0, HSP_SM_SHRD_MBOX);
+
+ mbox_chan_received_data(channel->chan, msg);
}
static const struct tegra_hsp_sm_ops tegra_hsp_sm_32bit_ops = {
@@ -433,7 +442,6 @@ static void tegra_hsp_sm_recv128(struct tegra_hsp_channel *channel)
value[3] = tegra_hsp_channel_readl(channel, HSP_SHRD_MBOX_TYPE1_DATA3);
msg = (void *)(unsigned long)value;
- mbox_chan_received_data(channel->chan, msg);
/*
* Clear data registers and tag.
@@ -443,6 +451,8 @@ static void tegra_hsp_sm_recv128(struct tegra_hsp_channel *channel)
tegra_hsp_channel_writel(channel, 0x0, HSP_SHRD_MBOX_TYPE1_DATA2);
tegra_hsp_channel_writel(channel, 0x0, HSP_SHRD_MBOX_TYPE1_DATA3);
tegra_hsp_channel_writel(channel, 0x0, HSP_SHRD_MBOX_TYPE1_TAG);
+
+ mbox_chan_received_data(channel->chan, msg);
}
static const struct tegra_hsp_sm_ops tegra_hsp_sm_128bit_ops = {
@@ -745,11 +755,11 @@ static int tegra_hsp_probe(struct platform_device *pdev)
return PTR_ERR(hsp->regs);
value = tegra_hsp_readl(hsp, HSP_INT_DIMENSIONING);
- hsp->num_sm = (value >> HSP_nSM_SHIFT) & HSP_nINT_MASK;
- hsp->num_ss = (value >> HSP_nSS_SHIFT) & HSP_nINT_MASK;
- hsp->num_as = (value >> HSP_nAS_SHIFT) & HSP_nINT_MASK;
- hsp->num_db = (value >> HSP_nDB_SHIFT) & HSP_nINT_MASK;
- hsp->num_si = (value >> HSP_nSI_SHIFT) & HSP_nINT_MASK;
+ hsp->num_sm = (value >> hsp->soc->sm_shift) & hsp->soc->sm_mask;
+ hsp->num_ss = (value >> hsp->soc->ss_shift) & hsp->soc->ss_mask;
+ hsp->num_as = (value >> hsp->soc->as_shift) & hsp->soc->as_mask;
+ hsp->num_db = (value >> hsp->soc->db_shift) & hsp->soc->db_mask;
+ hsp->num_si = (value >> hsp->soc->si_shift) & hsp->soc->si_mask;
err = platform_get_irq_byname_optional(pdev, "doorbell");
if (err >= 0)
@@ -913,6 +923,16 @@ static const struct tegra_hsp_soc tegra186_hsp_soc = {
.has_per_mb_ie = false,
.has_128_bit_mb = false,
.reg_stride = 0x100,
+ .si_shift = 16,
+ .db_shift = 12,
+ .as_shift = 8,
+ .ss_shift = 4,
+ .sm_shift = 0,
+ .si_mask = 0xf,
+ .db_mask = 0xf,
+ .as_mask = 0xf,
+ .ss_mask = 0xf,
+ .sm_mask = 0xf,
};
static const struct tegra_hsp_soc tegra194_hsp_soc = {
@@ -920,6 +940,16 @@ static const struct tegra_hsp_soc tegra194_hsp_soc = {
.has_per_mb_ie = true,
.has_128_bit_mb = false,
.reg_stride = 0x100,
+ .si_shift = 16,
+ .db_shift = 12,
+ .as_shift = 8,
+ .ss_shift = 4,
+ .sm_shift = 0,
+ .si_mask = 0xf,
+ .db_mask = 0xf,
+ .as_mask = 0xf,
+ .ss_mask = 0xf,
+ .sm_mask = 0xf,
};
static const struct tegra_hsp_soc tegra234_hsp_soc = {
@@ -927,6 +957,16 @@ static const struct tegra_hsp_soc tegra234_hsp_soc = {
.has_per_mb_ie = false,
.has_128_bit_mb = true,
.reg_stride = 0x100,
+ .si_shift = 16,
+ .db_shift = 12,
+ .as_shift = 8,
+ .ss_shift = 4,
+ .sm_shift = 0,
+ .si_mask = 0xf,
+ .db_mask = 0xf,
+ .as_mask = 0xf,
+ .ss_mask = 0xf,
+ .sm_mask = 0xf,
};
static const struct tegra_hsp_soc tegra264_hsp_soc = {
@@ -934,6 +974,16 @@ static const struct tegra_hsp_soc tegra264_hsp_soc = {
.has_per_mb_ie = false,
.has_128_bit_mb = true,
.reg_stride = 0x1000,
+ .si_shift = 17,
+ .db_shift = 12,
+ .as_shift = 8,
+ .ss_shift = 4,
+ .sm_shift = 0,
+ .si_mask = 0x1f,
+ .db_mask = 0x1f,
+ .as_mask = 0xf,
+ .ss_mask = 0xf,
+ .sm_mask = 0xf,
};
static const struct of_device_id tegra_hsp_match[] = {
diff --git a/drivers/mailbox/zynqmp-ipi-mailbox.c b/drivers/mailbox/zynqmp-ipi-mailbox.c
index aa5249da59b2..967967b2b8a9 100644
--- a/drivers/mailbox/zynqmp-ipi-mailbox.c
+++ b/drivers/mailbox/zynqmp-ipi-mailbox.c
@@ -62,7 +62,8 @@
#define DST_BIT_POS 9U
#define SRC_BITMASK GENMASK(11, 8)
-#define MAX_SGI 16
+/* Macro to represent SGI type for IPI IRQs */
+#define IPI_IRQ_TYPE_SGI 2
/*
* Module parameters
@@ -121,6 +122,7 @@ struct zynqmp_ipi_mbox {
* @dev: device pointer corresponding to the Xilinx ZynqMP
* IPI agent
* @irq: IPI agent interrupt ID
+ * @irq_type: IPI SGI or SPI IRQ type
* @method: IPI SMC or HVC is going to be used
* @local_id: local IPI agent ID
* @virq_sgi: IRQ number mapped to SGI
@@ -130,6 +132,7 @@ struct zynqmp_ipi_mbox {
struct zynqmp_ipi_pdata {
struct device *dev;
int irq;
+ unsigned int irq_type;
unsigned int method;
u32 local_id;
int virq_sgi;
@@ -887,17 +890,14 @@ static void zynqmp_ipi_free_mboxes(struct zynqmp_ipi_pdata *pdata)
struct zynqmp_ipi_mbox *ipi_mbox;
int i;
- if (pdata->irq < MAX_SGI)
+ if (pdata->irq_type == IPI_IRQ_TYPE_SGI)
xlnx_mbox_cleanup_sgi(pdata);
- i = pdata->num_mboxes;
+ i = pdata->num_mboxes - 1;
for (; i >= 0; i--) {
ipi_mbox = &pdata->ipi_mboxes[i];
- if (ipi_mbox->dev.parent) {
- mbox_controller_unregister(&ipi_mbox->mbox);
- if (device_is_registered(&ipi_mbox->dev))
- device_unregister(&ipi_mbox->dev);
- }
+ if (device_is_registered(&ipi_mbox->dev))
+ device_unregister(&ipi_mbox->dev);
}
}
@@ -905,7 +905,7 @@ static int zynqmp_ipi_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *nc, *np = pdev->dev.of_node;
- struct zynqmp_ipi_pdata __percpu *pdata;
+ struct zynqmp_ipi_pdata *pdata;
struct of_phandle_args out_irq;
struct zynqmp_ipi_mbox *mbox;
int num_mboxes, ret = -EINVAL;
@@ -959,14 +959,16 @@ static int zynqmp_ipi_probe(struct platform_device *pdev)
dev_err(dev, "failed to parse interrupts\n");
goto free_mbox_dev;
}
- ret = out_irq.args[1];
+
+ /* Use interrupt type to distinguish SGI and SPI interrupts */
+ pdata->irq_type = out_irq.args[0];
/*
* If Interrupt number is in SGI range, then request SGI else request
* IPI system IRQ.
*/
- if (ret < MAX_SGI) {
- pdata->irq = ret;
+ if (pdata->irq_type == IPI_IRQ_TYPE_SGI) {
+ pdata->irq = out_irq.args[1];
ret = xlnx_mbox_init_sgi(pdev, pdata->irq, pdata);
if (ret)
goto free_mbox_dev;