summaryrefslogtreecommitdiff
path: root/drivers/mailbox
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/mailbox')
-rw-r--r--drivers/mailbox/Kconfig21
-rw-r--r--drivers/mailbox/Makefile4
-rw-r--r--drivers/mailbox/arm_mhuv3.c2
-rw-r--r--drivers/mailbox/mailbox.c65
-rw-r--r--drivers/mailbox/mtk-cmdq-mailbox.c12
-rw-r--r--drivers/mailbox/mtk-gpueb-mailbox.c319
-rw-r--r--drivers/mailbox/qcom-apcs-ipc-mailbox.c1
-rw-r--r--drivers/mailbox/riscv-sbi-mpxy-mbox.c1019
-rw-r--r--drivers/mailbox/zynqmp-ipi-mailbox.c24
9 files changed, 1418 insertions, 49 deletions
diff --git a/drivers/mailbox/Kconfig b/drivers/mailbox/Kconfig
index 02432d4a5ccd..29f16f220384 100644
--- a/drivers/mailbox/Kconfig
+++ b/drivers/mailbox/Kconfig
@@ -294,6 +294,16 @@ config MTK_CMDQ_MBOX
critical time limitation, such as updating display configuration
during the vblank.
+config MTK_GPUEB_MBOX
+ tristate "MediaTek GPUEB Mailbox Support"
+ depends on ARCH_MEDIATEK || COMPILE_TEST
+ help
+ The MediaTek GPUEB mailbox is used to communicate with the embedded
+ controller in charge of GPU frequency and power management on some
+ MediaTek SoCs, such as the MT8196.
+ Say Y or m here if you want to support the MT8196 SoC in your kernel
+ build.
+
config ZYNQMP_IPI_MBOX
tristate "Xilinx ZynqMP IPI Mailbox"
depends on ARCH_ZYNQMP && OF
@@ -369,4 +379,15 @@ config BCM74110_MAILBOX
processor and coprocessor that handles various power management task
and more.
+config RISCV_SBI_MPXY_MBOX
+ tristate "RISC-V SBI Message Proxy (MPXY) Mailbox"
+ depends on RISCV_SBI
+ default RISCV
+ help
+ Mailbox driver implementation for RISC-V SBI Message Proxy (MPXY)
+ extension. This mailbox driver is used to send messages to the
+ remote processor through the SBI implementation (M-mode firmware
+ or HS-mode hypervisor). Say Y here if you want to have this support.
+ If unsure say N.
+
endif
diff --git a/drivers/mailbox/Makefile b/drivers/mailbox/Makefile
index 98a68f838486..81820a4f5528 100644
--- a/drivers/mailbox/Makefile
+++ b/drivers/mailbox/Makefile
@@ -63,6 +63,8 @@ obj-$(CONFIG_MTK_ADSP_MBOX) += mtk-adsp-mailbox.o
obj-$(CONFIG_MTK_CMDQ_MBOX) += mtk-cmdq-mailbox.o
+obj-$(CONFIG_MTK_GPUEB_MBOX) += mtk-gpueb-mailbox.o
+
obj-$(CONFIG_ZYNQMP_IPI_MBOX) += zynqmp-ipi-mailbox.o
obj-$(CONFIG_SUN6I_MSGBOX) += sun6i-msgbox.o
@@ -78,3 +80,5 @@ obj-$(CONFIG_THEAD_TH1520_MBOX) += mailbox-th1520.o
obj-$(CONFIG_CIX_MBOX) += cix-mailbox.o
obj-$(CONFIG_BCM74110_MAILBOX) += bcm74110-mailbox.o
+
+obj-$(CONFIG_RISCV_SBI_MPXY_MBOX) += riscv-sbi-mpxy-mbox.o
diff --git a/drivers/mailbox/arm_mhuv3.c b/drivers/mailbox/arm_mhuv3.c
index b97e79a5870f..0910da67f8a1 100644
--- a/drivers/mailbox/arm_mhuv3.c
+++ b/drivers/mailbox/arm_mhuv3.c
@@ -945,7 +945,7 @@ static irqreturn_t mhuv3_mbx_comb_interrupt(int irq, void *arg)
if (IS_ERR(data)) {
dev_err(dev,
"Failed to read in-band data. err:%ld\n",
- PTR_ERR(no_free_ptr(data)));
+ PTR_ERR(data));
goto rx_ack;
}
}
diff --git a/drivers/mailbox/mailbox.c b/drivers/mailbox/mailbox.c
index 5cd8ae222073..2acc6ec229a4 100644
--- a/drivers/mailbox/mailbox.c
+++ b/drivers/mailbox/mailbox.c
@@ -15,6 +15,7 @@
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/of.h>
+#include <linux/property.h>
#include <linux/spinlock.h>
#include "mailbox.h"
@@ -383,34 +384,56 @@ EXPORT_SYMBOL_GPL(mbox_bind_client);
*/
struct mbox_chan *mbox_request_channel(struct mbox_client *cl, int index)
{
- struct device *dev = cl->dev;
+ struct fwnode_reference_args fwspec;
+ struct fwnode_handle *fwnode;
struct mbox_controller *mbox;
struct of_phandle_args spec;
struct mbox_chan *chan;
+ struct device *dev;
+ unsigned int i;
int ret;
- if (!dev || !dev->of_node) {
- pr_debug("%s: No owner device node\n", __func__);
+ dev = cl->dev;
+ if (!dev) {
+ pr_debug("No owner device\n");
return ERR_PTR(-ENODEV);
}
- ret = of_parse_phandle_with_args(dev->of_node, "mboxes", "#mbox-cells",
- index, &spec);
+ fwnode = dev_fwnode(dev);
+ if (!fwnode) {
+ dev_dbg(dev, "No owner fwnode\n");
+ return ERR_PTR(-ENODEV);
+ }
+
+ ret = fwnode_property_get_reference_args(fwnode, "mboxes", "#mbox-cells",
+ 0, index, &fwspec);
if (ret) {
- dev_err(dev, "%s: can't parse \"mboxes\" property\n", __func__);
+ dev_err(dev, "%s: can't parse \"%s\" property\n", __func__, "mboxes");
return ERR_PTR(ret);
}
+ spec.np = to_of_node(fwspec.fwnode);
+ spec.args_count = fwspec.nargs;
+ for (i = 0; i < spec.args_count; i++)
+ spec.args[i] = fwspec.args[i];
+
scoped_guard(mutex, &con_mutex) {
chan = ERR_PTR(-EPROBE_DEFER);
- list_for_each_entry(mbox, &mbox_cons, node)
- if (mbox->dev->of_node == spec.np) {
- chan = mbox->of_xlate(mbox, &spec);
- if (!IS_ERR(chan))
- break;
+ list_for_each_entry(mbox, &mbox_cons, node) {
+ if (device_match_fwnode(mbox->dev, fwspec.fwnode)) {
+ if (mbox->fw_xlate) {
+ chan = mbox->fw_xlate(mbox, &fwspec);
+ if (!IS_ERR(chan))
+ break;
+ } else if (mbox->of_xlate) {
+ chan = mbox->of_xlate(mbox, &spec);
+ if (!IS_ERR(chan))
+ break;
+ }
}
+ }
- of_node_put(spec.np);
+ fwnode_handle_put(fwspec.fwnode);
if (IS_ERR(chan))
return chan;
@@ -427,15 +450,8 @@ EXPORT_SYMBOL_GPL(mbox_request_channel);
struct mbox_chan *mbox_request_channel_byname(struct mbox_client *cl,
const char *name)
{
- struct device_node *np = cl->dev->of_node;
- int index;
-
- if (!np) {
- dev_err(cl->dev, "%s() currently only supports DT\n", __func__);
- return ERR_PTR(-EINVAL);
- }
+ int index = device_property_match_string(cl->dev, "mbox-names", name);
- index = of_property_match_string(np, "mbox-names", name);
if (index < 0) {
dev_err(cl->dev, "%s() could not locate channel named \"%s\"\n",
__func__, name);
@@ -470,9 +486,8 @@ void mbox_free_channel(struct mbox_chan *chan)
}
EXPORT_SYMBOL_GPL(mbox_free_channel);
-static struct mbox_chan *
-of_mbox_index_xlate(struct mbox_controller *mbox,
- const struct of_phandle_args *sp)
+static struct mbox_chan *fw_mbox_index_xlate(struct mbox_controller *mbox,
+ const struct fwnode_reference_args *sp)
{
int ind = sp->args[0];
@@ -523,8 +538,8 @@ int mbox_controller_register(struct mbox_controller *mbox)
spin_lock_init(&chan->lock);
}
- if (!mbox->of_xlate)
- mbox->of_xlate = of_mbox_index_xlate;
+ if (!mbox->fw_xlate && !mbox->of_xlate)
+ mbox->fw_xlate = fw_mbox_index_xlate;
scoped_guard(mutex, &con_mutex)
list_add_tail(&mbox->node, &mbox_cons);
diff --git a/drivers/mailbox/mtk-cmdq-mailbox.c b/drivers/mailbox/mtk-cmdq-mailbox.c
index 532929916e99..654a60f63756 100644
--- a/drivers/mailbox/mtk-cmdq-mailbox.c
+++ b/drivers/mailbox/mtk-cmdq-mailbox.c
@@ -379,20 +379,13 @@ static int cmdq_mbox_send_data(struct mbox_chan *chan, void *data)
struct cmdq *cmdq = dev_get_drvdata(chan->mbox->dev);
struct cmdq_task *task;
unsigned long curr_pa, end_pa;
- int ret;
/* Client should not flush new tasks if suspended. */
WARN_ON(cmdq->suspended);
- ret = pm_runtime_get_sync(cmdq->mbox.dev);
- if (ret < 0)
- return ret;
-
task = kzalloc(sizeof(*task), GFP_ATOMIC);
- if (!task) {
- pm_runtime_put_autosuspend(cmdq->mbox.dev);
+ if (!task)
return -ENOMEM;
- }
task->cmdq = cmdq;
INIT_LIST_HEAD(&task->list_entry);
@@ -439,9 +432,6 @@ static int cmdq_mbox_send_data(struct mbox_chan *chan, void *data)
}
list_move_tail(&task->list_entry, &thread->task_busy_list);
- pm_runtime_mark_last_busy(cmdq->mbox.dev);
- pm_runtime_put_autosuspend(cmdq->mbox.dev);
-
return 0;
}
diff --git a/drivers/mailbox/mtk-gpueb-mailbox.c b/drivers/mailbox/mtk-gpueb-mailbox.c
new file mode 100644
index 000000000000..925bcf21f650
--- /dev/null
+++ b/drivers/mailbox/mtk-gpueb-mailbox.c
@@ -0,0 +1,319 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * MediaTek GPUEB mailbox driver for SoCs such as the MT8196
+ *
+ * Copyright (C) 2025, Collabora Ltd.
+ *
+ * Developers harmed in the making of this driver:
+ * - Nicolas Frattaroli <nicolas.frattaroli@collabora.com>
+ */
+
+#include <linux/atomic.h>
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/mailbox_controller.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+
+#define GPUEB_MBOX_CTL_TX_STS 0x00
+#define GPUEB_MBOX_CTL_IRQ_SET 0x04
+#define GPUEB_MBOX_CTL_IRQ_CLR 0x74
+#define GPUEB_MBOX_CTL_RX_STS 0x78
+
+#define GPUEB_MBOX_FULL BIT(0) /* i.e. we've received data */
+#define GPUEB_MBOX_BLOCKED BIT(1) /* i.e. the channel is shutdown */
+
+#define GPUEB_MBOX_MAX_RX_SIZE 32 /* in bytes */
+
+struct mtk_gpueb_mbox {
+ struct device *dev;
+ struct clk *clk;
+ void __iomem *mbox_mmio;
+ void __iomem *mbox_ctl;
+ struct mbox_controller mbox;
+ struct mtk_gpueb_mbox_chan *ch;
+ int irq;
+ const struct mtk_gpueb_mbox_variant *v;
+};
+
+/**
+ * struct mtk_gpueb_mbox_chan - per-channel runtime data
+ * @ebm: pointer to the parent &struct mtk_gpueb_mbox mailbox
+ * @full_name: descriptive name of channel for IRQ subsystem
+ * @num: channel number, starting at 0
+ * @rx_status: signifies whether channel reception is turned off, or full
+ * @c: pointer to the constant &struct mtk_gpueb_mbox_chan_desc channel data
+ */
+struct mtk_gpueb_mbox_chan {
+ struct mtk_gpueb_mbox *ebm;
+ char *full_name;
+ u8 num;
+ atomic_t rx_status;
+ const struct mtk_gpueb_mbox_chan_desc *c;
+};
+
+/**
+ * struct mtk_gpueb_mbox_chan_desc - per-channel constant data
+ * @name: name of this channel
+ * @num: index of this channel, starting at 0
+ * @tx_offset: byte offset measured from mmio base for outgoing data
+ * @tx_len: size, in bytes, of the outgoing data on this channel
+ * @rx_offset: bytes offset measured from mmio base for incoming data
+ * @rx_len: size, in bytes, of the incoming data on this channel
+ */
+struct mtk_gpueb_mbox_chan_desc {
+ const char *name;
+ const u8 num;
+ const u16 tx_offset;
+ const u8 tx_len;
+ const u16 rx_offset;
+ const u8 rx_len;
+};
+
+struct mtk_gpueb_mbox_variant {
+ const u8 num_channels;
+ const struct mtk_gpueb_mbox_chan_desc channels[] __counted_by(num_channels);
+};
+
+/**
+ * mtk_gpueb_mbox_read_rx - read RX buffer from MMIO into channel's RX buffer
+ * @buf: buffer to read into
+ * @chan: pointer to the channel to read
+ */
+static void mtk_gpueb_mbox_read_rx(void *buf, struct mtk_gpueb_mbox_chan *chan)
+{
+ memcpy_fromio(buf, chan->ebm->mbox_mmio + chan->c->rx_offset, chan->c->rx_len);
+}
+
+static irqreturn_t mtk_gpueb_mbox_isr(int irq, void *data)
+{
+ struct mtk_gpueb_mbox_chan *ch = data;
+ u32 rx_sts;
+
+ rx_sts = readl(ch->ebm->mbox_ctl + GPUEB_MBOX_CTL_RX_STS);
+
+ if (rx_sts & BIT(ch->num)) {
+ if (!atomic_cmpxchg(&ch->rx_status, 0, GPUEB_MBOX_FULL | GPUEB_MBOX_BLOCKED))
+ return IRQ_WAKE_THREAD;
+ }
+
+ return IRQ_NONE;
+}
+
+static irqreturn_t mtk_gpueb_mbox_thread(int irq, void *data)
+{
+ struct mtk_gpueb_mbox_chan *ch = data;
+ int status;
+
+ status = atomic_cmpxchg(&ch->rx_status, GPUEB_MBOX_FULL | GPUEB_MBOX_BLOCKED,
+ GPUEB_MBOX_FULL);
+ if (status == (GPUEB_MBOX_FULL | GPUEB_MBOX_BLOCKED)) {
+ u8 buf[GPUEB_MBOX_MAX_RX_SIZE] = {};
+
+ mtk_gpueb_mbox_read_rx(buf, ch);
+ writel(BIT(ch->num), ch->ebm->mbox_ctl + GPUEB_MBOX_CTL_IRQ_CLR);
+ mbox_chan_received_data(&ch->ebm->mbox.chans[ch->num], buf);
+ atomic_set(&ch->rx_status, 0);
+ return IRQ_HANDLED;
+ }
+
+ return IRQ_NONE;
+}
+
+static int mtk_gpueb_mbox_send_data(struct mbox_chan *chan, void *data)
+{
+ struct mtk_gpueb_mbox_chan *ch = chan->con_priv;
+ u32 *values = data;
+ int i;
+
+ if (atomic_read(&ch->rx_status))
+ return -EBUSY;
+
+ /*
+ * We don't want any fancy nonsense, just write the 32-bit values in
+ * order. memcpy_toio/__iowrite32_copy don't work here, as they may use
+ * writes of different sizes or memory ordering characteristics depending
+ * on the architecture, alignment and the current phase of the moon.
+ */
+ for (i = 0; i < ch->c->tx_len; i += 4)
+ writel(values[i / 4], ch->ebm->mbox_mmio + ch->c->tx_offset + i);
+
+ writel(BIT(ch->num), ch->ebm->mbox_ctl + GPUEB_MBOX_CTL_IRQ_SET);
+
+ return 0;
+}
+
+static int mtk_gpueb_mbox_startup(struct mbox_chan *chan)
+{
+ struct mtk_gpueb_mbox_chan *ch = chan->con_priv;
+ int ret;
+
+ atomic_set(&ch->rx_status, 0);
+
+ ret = clk_enable(ch->ebm->clk);
+ if (ret) {
+ dev_err(ch->ebm->dev, "Failed to enable EB clock: %pe\n",
+ ERR_PTR(ret));
+ goto err_block;
+ }
+
+ writel(BIT(ch->num), ch->ebm->mbox_ctl + GPUEB_MBOX_CTL_IRQ_CLR);
+
+ ret = devm_request_threaded_irq(ch->ebm->dev, ch->ebm->irq, mtk_gpueb_mbox_isr,
+ mtk_gpueb_mbox_thread, IRQF_SHARED | IRQF_ONESHOT,
+ ch->full_name, ch);
+ if (ret) {
+ dev_err(ch->ebm->dev, "Failed to request IRQ: %pe\n",
+ ERR_PTR(ret));
+ goto err_unclk;
+ }
+
+ return 0;
+
+err_unclk:
+ clk_disable(ch->ebm->clk);
+err_block:
+ atomic_set(&ch->rx_status, GPUEB_MBOX_BLOCKED);
+
+ return ret;
+}
+
+static void mtk_gpueb_mbox_shutdown(struct mbox_chan *chan)
+{
+ struct mtk_gpueb_mbox_chan *ch = chan->con_priv;
+
+ atomic_set(&ch->rx_status, GPUEB_MBOX_BLOCKED);
+
+ devm_free_irq(ch->ebm->dev, ch->ebm->irq, ch);
+
+ clk_disable(ch->ebm->clk);
+}
+
+static bool mtk_gpueb_mbox_last_tx_done(struct mbox_chan *chan)
+{
+ struct mtk_gpueb_mbox_chan *ch = chan->con_priv;
+
+ return !(readl(ch->ebm->mbox_ctl + GPUEB_MBOX_CTL_TX_STS) & BIT(ch->num));
+}
+
+const struct mbox_chan_ops mtk_gpueb_mbox_ops = {
+ .send_data = mtk_gpueb_mbox_send_data,
+ .startup = mtk_gpueb_mbox_startup,
+ .shutdown = mtk_gpueb_mbox_shutdown,
+ .last_tx_done = mtk_gpueb_mbox_last_tx_done,
+};
+
+static int mtk_gpueb_mbox_probe(struct platform_device *pdev)
+{
+ struct mtk_gpueb_mbox_chan *ch;
+ struct mtk_gpueb_mbox *ebm;
+ unsigned int i;
+
+ ebm = devm_kzalloc(&pdev->dev, sizeof(*ebm), GFP_KERNEL);
+ if (!ebm)
+ return -ENOMEM;
+
+ ebm->dev = &pdev->dev;
+ ebm->v = of_device_get_match_data(ebm->dev);
+
+ ebm->irq = platform_get_irq(pdev, 0);
+ if (ebm->irq < 0)
+ return ebm->irq;
+
+ ebm->clk = devm_clk_get_prepared(ebm->dev, NULL);
+ if (IS_ERR(ebm->clk))
+ return dev_err_probe(ebm->dev, PTR_ERR(ebm->clk),
+ "Failed to get 'eb' clock\n");
+
+ ebm->mbox_mmio = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(ebm->mbox_mmio))
+ return dev_err_probe(ebm->dev, PTR_ERR(ebm->mbox_mmio),
+ "Couldn't map mailbox data registers\n");
+
+ ebm->mbox_ctl = devm_platform_ioremap_resource(pdev, 1);
+ if (IS_ERR(ebm->mbox_ctl))
+ return dev_err_probe(
+ ebm->dev, PTR_ERR(ebm->mbox_ctl),
+ "Couldn't map mailbox control registers\n");
+
+ ebm->ch = devm_kmalloc_array(ebm->dev, ebm->v->num_channels,
+ sizeof(*ebm->ch), GFP_KERNEL);
+ if (!ebm->ch)
+ return -ENOMEM;
+
+ ebm->mbox.chans = devm_kcalloc(ebm->dev, ebm->v->num_channels,
+ sizeof(struct mbox_chan), GFP_KERNEL);
+ if (!ebm->mbox.chans)
+ return -ENOMEM;
+
+ for (i = 0; i < ebm->v->num_channels; i++) {
+ ch = &ebm->ch[i];
+ ch->c = &ebm->v->channels[i];
+ if (ch->c->rx_len > GPUEB_MBOX_MAX_RX_SIZE) {
+ dev_err(ebm->dev, "Channel %s RX size (%d) too large\n",
+ ch->c->name, ch->c->rx_len);
+ return -EINVAL;
+ }
+ ch->full_name = devm_kasprintf(ebm->dev, GFP_KERNEL, "%s:%s",
+ dev_name(ebm->dev), ch->c->name);
+ if (!ch->full_name)
+ return -ENOMEM;
+
+ ch->ebm = ebm;
+ ch->num = i;
+ spin_lock_init(&ebm->mbox.chans[i].lock);
+ ebm->mbox.chans[i].con_priv = ch;
+ atomic_set(&ch->rx_status, GPUEB_MBOX_BLOCKED);
+ }
+
+ ebm->mbox.dev = ebm->dev;
+ ebm->mbox.num_chans = ebm->v->num_channels;
+ ebm->mbox.txdone_poll = true;
+ ebm->mbox.txpoll_period = 0; /* minimum hrtimer interval */
+ ebm->mbox.ops = &mtk_gpueb_mbox_ops;
+
+ dev_set_drvdata(ebm->dev, ebm);
+
+ return devm_mbox_controller_register(ebm->dev, &ebm->mbox);
+}
+
+static const struct mtk_gpueb_mbox_variant mtk_gpueb_mbox_mt8196 = {
+ .num_channels = 12,
+ .channels = {
+ { "fast-dvfs-event", 0, 0x0000, 16, 0x00e0, 16 },
+ { "gpufreq", 1, 0x0010, 32, 0x00f0, 32 },
+ { "sleep", 2, 0x0030, 12, 0x0110, 4 },
+ { "timer", 3, 0x003c, 24, 0x0114, 4 },
+ { "fhctl", 4, 0x0054, 36, 0x0118, 4 },
+ { "ccf", 5, 0x0078, 16, 0x011c, 16 },
+ { "gpumpu", 6, 0x0088, 24, 0x012c, 4 },
+ { "fast-dvfs", 7, 0x00a0, 24, 0x0130, 24 },
+ { "ipir-c-met", 8, 0x00b8, 4, 0x0148, 16 },
+ { "ipis-c-met", 9, 0x00bc, 16, 0x0158, 4 },
+ { "brisket", 10, 0x00cc, 16, 0x015c, 16 },
+ { "ppb", 11, 0x00dc, 4, 0x016c, 4 },
+ },
+};
+
+static const struct of_device_id mtk_gpueb_mbox_of_ids[] = {
+ { .compatible = "mediatek,mt8196-gpueb-mbox", .data = &mtk_gpueb_mbox_mt8196 },
+ { /* Sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, mtk_gpueb_mbox_of_ids);
+
+static struct platform_driver mtk_gpueb_mbox_drv = {
+ .probe = mtk_gpueb_mbox_probe,
+ .driver = {
+ .name = "mtk-gpueb-mbox",
+ .of_match_table = mtk_gpueb_mbox_of_ids,
+ }
+};
+module_platform_driver(mtk_gpueb_mbox_drv);
+
+MODULE_AUTHOR("Nicolas Frattaroli <nicolas.frattaroli@collabora.com>");
+MODULE_DESCRIPTION("MediaTek GPUEB mailbox driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mailbox/qcom-apcs-ipc-mailbox.c b/drivers/mailbox/qcom-apcs-ipc-mailbox.c
index 8b24ec0fa191..d3a8f6b4a03b 100644
--- a/drivers/mailbox/qcom-apcs-ipc-mailbox.c
+++ b/drivers/mailbox/qcom-apcs-ipc-mailbox.c
@@ -58,7 +58,6 @@ static const struct regmap_config apcs_regmap_config = {
.reg_stride = 4,
.val_bits = 32,
.max_register = 0x1008,
- .fast_io = true,
};
static int qcom_apcs_ipc_send_data(struct mbox_chan *chan, void *data)
diff --git a/drivers/mailbox/riscv-sbi-mpxy-mbox.c b/drivers/mailbox/riscv-sbi-mpxy-mbox.c
new file mode 100644
index 000000000000..7c9c006b7244
--- /dev/null
+++ b/drivers/mailbox/riscv-sbi-mpxy-mbox.c
@@ -0,0 +1,1019 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * RISC-V SBI Message Proxy (MPXY) mailbox controller driver
+ *
+ * Copyright (C) 2025 Ventana Micro Systems Inc.
+ */
+
+#include <linux/acpi.h>
+#include <linux/cpu.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/irqchip/riscv-imsic.h>
+#include <linux/mailbox_controller.h>
+#include <linux/mailbox/riscv-rpmi-message.h>
+#include <linux/minmax.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/msi.h>
+#include <linux/of_irq.h>
+#include <linux/percpu.h>
+#include <linux/platform_device.h>
+#include <linux/smp.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <asm/byteorder.h>
+#include <asm/sbi.h>
+
+/* ====== SBI MPXY extension data structures ====== */
+
+/* SBI MPXY MSI related channel attributes */
+struct sbi_mpxy_msi_info {
+ /* Lower 32-bits of the MSI target address */
+ u32 msi_addr_lo;
+ /* Upper 32-bits of the MSI target address */
+ u32 msi_addr_hi;
+ /* MSI data value */
+ u32 msi_data;
+};
+
+/*
+ * SBI MPXY standard channel attributes.
+ *
+ * NOTE: The sequence of attribute fields are as-per the
+ * defined sequence in the attribute table in spec (or
+ * as-per the enum sbi_mpxy_attribute_id).
+ */
+struct sbi_mpxy_channel_attrs {
+ /* Message protocol ID */
+ u32 msg_proto_id;
+ /* Message protocol version */
+ u32 msg_proto_version;
+ /* Message protocol maximum message length */
+ u32 msg_max_len;
+ /* Message protocol message send timeout in microseconds */
+ u32 msg_send_timeout;
+ /* Message protocol message completion timeout in microseconds */
+ u32 msg_completion_timeout;
+ /* Bit array for channel capabilities */
+ u32 capability;
+ /* SSE event ID */
+ u32 sse_event_id;
+ /* MSI enable/disable control knob */
+ u32 msi_control;
+ /* Channel MSI info */
+ struct sbi_mpxy_msi_info msi_info;
+ /* Events state control */
+ u32 events_state_ctrl;
+};
+
+/*
+ * RPMI specific SBI MPXY channel attributes.
+ *
+ * NOTE: The sequence of attribute fields are as-per the
+ * defined sequence in the attribute table in spec (or
+ * as-per the enum sbi_mpxy_rpmi_attribute_id).
+ */
+struct sbi_mpxy_rpmi_channel_attrs {
+ /* RPMI service group ID */
+ u32 servicegroup_id;
+ /* RPMI service group version */
+ u32 servicegroup_version;
+ /* RPMI implementation ID */
+ u32 impl_id;
+ /* RPMI implementation version */
+ u32 impl_version;
+};
+
+/* SBI MPXY channel IDs data in shared memory */
+struct sbi_mpxy_channel_ids_data {
+ /* Remaining number of channel ids */
+ __le32 remaining;
+ /* Returned channel ids in current function call */
+ __le32 returned;
+ /* Returned channel id array */
+ __le32 channel_array[];
+};
+
+/* SBI MPXY notification data in shared memory */
+struct sbi_mpxy_notification_data {
+ /* Remaining number of notification events */
+ __le32 remaining;
+ /* Number of notification events returned */
+ __le32 returned;
+ /* Number of notification events lost */
+ __le32 lost;
+ /* Reserved for future use */
+ __le32 reserved;
+ /* Returned channel id array */
+ u8 events_data[];
+};
+
+/* ====== MPXY data structures & helper routines ====== */
+
+/* MPXY Per-CPU or local context */
+struct mpxy_local {
+ /* Shared memory base address */
+ void *shmem;
+ /* Shared memory physical address */
+ phys_addr_t shmem_phys_addr;
+ /* Flag representing whether shared memory is active or not */
+ bool shmem_active;
+};
+
+static DEFINE_PER_CPU(struct mpxy_local, mpxy_local);
+static unsigned long mpxy_shmem_size;
+static bool mpxy_shmem_init_done;
+
+static int mpxy_get_channel_count(u32 *channel_count)
+{
+ struct mpxy_local *mpxy = this_cpu_ptr(&mpxy_local);
+ struct sbi_mpxy_channel_ids_data *sdata = mpxy->shmem;
+ u32 remaining, returned;
+ struct sbiret sret;
+
+ if (!mpxy->shmem_active)
+ return -ENODEV;
+ if (!channel_count)
+ return -EINVAL;
+
+ get_cpu();
+
+ /* Get the remaining and returned fields to calculate total */
+ sret = sbi_ecall(SBI_EXT_MPXY, SBI_EXT_MPXY_GET_CHANNEL_IDS,
+ 0, 0, 0, 0, 0, 0);
+ if (sret.error)
+ goto err_put_cpu;
+
+ remaining = le32_to_cpu(sdata->remaining);
+ returned = le32_to_cpu(sdata->returned);
+ *channel_count = remaining + returned;
+
+err_put_cpu:
+ put_cpu();
+ return sbi_err_map_linux_errno(sret.error);
+}
+
+static int mpxy_get_channel_ids(u32 channel_count, u32 *channel_ids)
+{
+ struct mpxy_local *mpxy = this_cpu_ptr(&mpxy_local);
+ struct sbi_mpxy_channel_ids_data *sdata = mpxy->shmem;
+ u32 remaining, returned, count, start_index = 0;
+ struct sbiret sret;
+
+ if (!mpxy->shmem_active)
+ return -ENODEV;
+ if (!channel_count || !channel_ids)
+ return -EINVAL;
+
+ get_cpu();
+
+ do {
+ sret = sbi_ecall(SBI_EXT_MPXY, SBI_EXT_MPXY_GET_CHANNEL_IDS,
+ start_index, 0, 0, 0, 0, 0);
+ if (sret.error)
+ goto err_put_cpu;
+
+ remaining = le32_to_cpu(sdata->remaining);
+ returned = le32_to_cpu(sdata->returned);
+
+ count = returned < (channel_count - start_index) ?
+ returned : (channel_count - start_index);
+ memcpy_from_le32(&channel_ids[start_index], sdata->channel_array, count);
+ start_index += count;
+ } while (remaining && start_index < channel_count);
+
+err_put_cpu:
+ put_cpu();
+ return sbi_err_map_linux_errno(sret.error);
+}
+
+static int mpxy_read_attrs(u32 channel_id, u32 base_attrid, u32 attr_count,
+ u32 *attrs_buf)
+{
+ struct mpxy_local *mpxy = this_cpu_ptr(&mpxy_local);
+ struct sbiret sret;
+
+ if (!mpxy->shmem_active)
+ return -ENODEV;
+ if (!attr_count || !attrs_buf)
+ return -EINVAL;
+
+ get_cpu();
+
+ sret = sbi_ecall(SBI_EXT_MPXY, SBI_EXT_MPXY_READ_ATTRS,
+ channel_id, base_attrid, attr_count, 0, 0, 0);
+ if (sret.error)
+ goto err_put_cpu;
+
+ memcpy_from_le32(attrs_buf, (__le32 *)mpxy->shmem, attr_count);
+
+err_put_cpu:
+ put_cpu();
+ return sbi_err_map_linux_errno(sret.error);
+}
+
+static int mpxy_write_attrs(u32 channel_id, u32 base_attrid, u32 attr_count,
+ u32 *attrs_buf)
+{
+ struct mpxy_local *mpxy = this_cpu_ptr(&mpxy_local);
+ struct sbiret sret;
+
+ if (!mpxy->shmem_active)
+ return -ENODEV;
+ if (!attr_count || !attrs_buf)
+ return -EINVAL;
+
+ get_cpu();
+
+ memcpy_to_le32((__le32 *)mpxy->shmem, attrs_buf, attr_count);
+ sret = sbi_ecall(SBI_EXT_MPXY, SBI_EXT_MPXY_WRITE_ATTRS,
+ channel_id, base_attrid, attr_count, 0, 0, 0);
+
+ put_cpu();
+ return sbi_err_map_linux_errno(sret.error);
+}
+
+static int mpxy_send_message_with_resp(u32 channel_id, u32 msg_id,
+ void *tx, unsigned long tx_len,
+ void *rx, unsigned long max_rx_len,
+ unsigned long *rx_len)
+{
+ struct mpxy_local *mpxy = this_cpu_ptr(&mpxy_local);
+ unsigned long rx_bytes;
+ struct sbiret sret;
+
+ if (!mpxy->shmem_active)
+ return -ENODEV;
+ if (!tx && tx_len)
+ return -EINVAL;
+
+ get_cpu();
+
+ /* Message protocols allowed to have no data in messages */
+ if (tx_len)
+ memcpy(mpxy->shmem, tx, tx_len);
+
+ sret = sbi_ecall(SBI_EXT_MPXY, SBI_EXT_MPXY_SEND_MSG_WITH_RESP,
+ channel_id, msg_id, tx_len, 0, 0, 0);
+ if (rx && !sret.error) {
+ rx_bytes = sret.value;
+ if (rx_bytes > max_rx_len) {
+ put_cpu();
+ return -ENOSPC;
+ }
+
+ memcpy(rx, mpxy->shmem, rx_bytes);
+ if (rx_len)
+ *rx_len = rx_bytes;
+ }
+
+ put_cpu();
+ return sbi_err_map_linux_errno(sret.error);
+}
+
+static int mpxy_send_message_without_resp(u32 channel_id, u32 msg_id,
+ void *tx, unsigned long tx_len)
+{
+ struct mpxy_local *mpxy = this_cpu_ptr(&mpxy_local);
+ struct sbiret sret;
+
+ if (!mpxy->shmem_active)
+ return -ENODEV;
+ if (!tx && tx_len)
+ return -EINVAL;
+
+ get_cpu();
+
+ /* Message protocols allowed to have no data in messages */
+ if (tx_len)
+ memcpy(mpxy->shmem, tx, tx_len);
+
+ sret = sbi_ecall(SBI_EXT_MPXY, SBI_EXT_MPXY_SEND_MSG_WITHOUT_RESP,
+ channel_id, msg_id, tx_len, 0, 0, 0);
+
+ put_cpu();
+ return sbi_err_map_linux_errno(sret.error);
+}
+
+static int mpxy_get_notifications(u32 channel_id,
+ struct sbi_mpxy_notification_data *notif_data,
+ unsigned long *events_data_len)
+{
+ struct mpxy_local *mpxy = this_cpu_ptr(&mpxy_local);
+ struct sbiret sret;
+
+ if (!mpxy->shmem_active)
+ return -ENODEV;
+ if (!notif_data || !events_data_len)
+ return -EINVAL;
+
+ get_cpu();
+
+ sret = sbi_ecall(SBI_EXT_MPXY, SBI_EXT_MPXY_GET_NOTIFICATION_EVENTS,
+ channel_id, 0, 0, 0, 0, 0);
+ if (sret.error)
+ goto err_put_cpu;
+
+ memcpy(notif_data, mpxy->shmem, sret.value + 16);
+ *events_data_len = sret.value;
+
+err_put_cpu:
+ put_cpu();
+ return sbi_err_map_linux_errno(sret.error);
+}
+
+static int mpxy_get_shmem_size(unsigned long *shmem_size)
+{
+ struct sbiret sret;
+
+ sret = sbi_ecall(SBI_EXT_MPXY, SBI_EXT_MPXY_GET_SHMEM_SIZE,
+ 0, 0, 0, 0, 0, 0);
+ if (sret.error)
+ return sbi_err_map_linux_errno(sret.error);
+ if (shmem_size)
+ *shmem_size = sret.value;
+ return 0;
+}
+
+static int mpxy_setup_shmem(unsigned int cpu)
+{
+ struct page *shmem_page;
+ struct mpxy_local *mpxy;
+ struct sbiret sret;
+
+ mpxy = per_cpu_ptr(&mpxy_local, cpu);
+ if (mpxy->shmem_active)
+ return 0;
+
+ shmem_page = alloc_pages(GFP_KERNEL | __GFP_ZERO, get_order(mpxy_shmem_size));
+ if (!shmem_page)
+ return -ENOMEM;
+
+ /*
+ * Linux setup of shmem is done in mpxy OVERWRITE mode.
+ * flags[1:0] = 00b
+ */
+ sret = sbi_ecall(SBI_EXT_MPXY, SBI_EXT_MPXY_SET_SHMEM,
+ page_to_phys(shmem_page), 0, 0, 0, 0, 0);
+ if (sret.error) {
+ free_pages((unsigned long)page_to_virt(shmem_page),
+ get_order(mpxy_shmem_size));
+ return sbi_err_map_linux_errno(sret.error);
+ }
+
+ mpxy->shmem = page_to_virt(shmem_page);
+ mpxy->shmem_phys_addr = page_to_phys(shmem_page);
+ mpxy->shmem_active = true;
+
+ return 0;
+}
+
+/* ====== MPXY mailbox data structures ====== */
+
+/* MPXY mailbox channel */
+struct mpxy_mbox_channel {
+ struct mpxy_mbox *mbox;
+ u32 channel_id;
+ struct sbi_mpxy_channel_attrs attrs;
+ struct sbi_mpxy_rpmi_channel_attrs rpmi_attrs;
+ struct sbi_mpxy_notification_data *notif;
+ u32 max_xfer_len;
+ bool have_events_state;
+ u32 msi_index;
+ u32 msi_irq;
+ bool started;
+};
+
+/* MPXY mailbox */
+struct mpxy_mbox {
+ struct device *dev;
+ u32 channel_count;
+ struct mpxy_mbox_channel *channels;
+ u32 msi_count;
+ struct mpxy_mbox_channel **msi_index_to_channel;
+ struct mbox_controller controller;
+};
+
+/* ====== MPXY RPMI processing ====== */
+
+static void mpxy_mbox_send_rpmi_data(struct mpxy_mbox_channel *mchan,
+ struct rpmi_mbox_message *msg)
+{
+ msg->error = 0;
+ switch (msg->type) {
+ case RPMI_MBOX_MSG_TYPE_GET_ATTRIBUTE:
+ switch (msg->attr.id) {
+ case RPMI_MBOX_ATTR_SPEC_VERSION:
+ msg->attr.value = mchan->attrs.msg_proto_version;
+ break;
+ case RPMI_MBOX_ATTR_MAX_MSG_DATA_SIZE:
+ msg->attr.value = mchan->max_xfer_len;
+ break;
+ case RPMI_MBOX_ATTR_SERVICEGROUP_ID:
+ msg->attr.value = mchan->rpmi_attrs.servicegroup_id;
+ break;
+ case RPMI_MBOX_ATTR_SERVICEGROUP_VERSION:
+ msg->attr.value = mchan->rpmi_attrs.servicegroup_version;
+ break;
+ case RPMI_MBOX_ATTR_IMPL_ID:
+ msg->attr.value = mchan->rpmi_attrs.impl_id;
+ break;
+ case RPMI_MBOX_ATTR_IMPL_VERSION:
+ msg->attr.value = mchan->rpmi_attrs.impl_version;
+ break;
+ default:
+ msg->error = -EOPNOTSUPP;
+ break;
+ }
+ break;
+ case RPMI_MBOX_MSG_TYPE_SET_ATTRIBUTE:
+ /* None of the RPMI linux mailbox attributes are writeable */
+ msg->error = -EOPNOTSUPP;
+ break;
+ case RPMI_MBOX_MSG_TYPE_SEND_WITH_RESPONSE:
+ if ((!msg->data.request && msg->data.request_len) ||
+ (msg->data.request && msg->data.request_len > mchan->max_xfer_len) ||
+ (!msg->data.response && msg->data.max_response_len)) {
+ msg->error = -EINVAL;
+ break;
+ }
+ if (!(mchan->attrs.capability & SBI_MPXY_CHAN_CAP_SEND_WITH_RESP)) {
+ msg->error = -EIO;
+ break;
+ }
+ msg->error = mpxy_send_message_with_resp(mchan->channel_id,
+ msg->data.service_id,
+ msg->data.request,
+ msg->data.request_len,
+ msg->data.response,
+ msg->data.max_response_len,
+ &msg->data.out_response_len);
+ break;
+ case RPMI_MBOX_MSG_TYPE_SEND_WITHOUT_RESPONSE:
+ if ((!msg->data.request && msg->data.request_len) ||
+ (msg->data.request && msg->data.request_len > mchan->max_xfer_len)) {
+ msg->error = -EINVAL;
+ break;
+ }
+ if (!(mchan->attrs.capability & SBI_MPXY_CHAN_CAP_SEND_WITHOUT_RESP)) {
+ msg->error = -EIO;
+ break;
+ }
+ msg->error = mpxy_send_message_without_resp(mchan->channel_id,
+ msg->data.service_id,
+ msg->data.request,
+ msg->data.request_len);
+ break;
+ default:
+ msg->error = -EOPNOTSUPP;
+ break;
+ }
+}
+
+static void mpxy_mbox_peek_rpmi_data(struct mbox_chan *chan,
+ struct mpxy_mbox_channel *mchan,
+ struct sbi_mpxy_notification_data *notif,
+ unsigned long events_data_len)
+{
+ struct rpmi_notification_event *event;
+ struct rpmi_mbox_message msg;
+ unsigned long pos = 0;
+
+ while (pos < events_data_len && (events_data_len - pos) <= sizeof(*event)) {
+ event = (struct rpmi_notification_event *)(notif->events_data + pos);
+
+ msg.type = RPMI_MBOX_MSG_TYPE_NOTIFICATION_EVENT;
+ msg.notif.event_datalen = le16_to_cpu(event->event_datalen);
+ msg.notif.event_id = event->event_id;
+ msg.notif.event_data = event->event_data;
+ msg.error = 0;
+
+ mbox_chan_received_data(chan, &msg);
+ pos += sizeof(*event) + msg.notif.event_datalen;
+ }
+}
+
+static int mpxy_mbox_read_rpmi_attrs(struct mpxy_mbox_channel *mchan)
+{
+ return mpxy_read_attrs(mchan->channel_id,
+ SBI_MPXY_ATTR_MSGPROTO_ATTR_START,
+ sizeof(mchan->rpmi_attrs) / sizeof(u32),
+ (u32 *)&mchan->rpmi_attrs);
+}
+
+/* ====== MPXY mailbox callbacks ====== */
+
+static int mpxy_mbox_send_data(struct mbox_chan *chan, void *data)
+{
+ struct mpxy_mbox_channel *mchan = chan->con_priv;
+
+ if (mchan->attrs.msg_proto_id == SBI_MPXY_MSGPROTO_RPMI_ID) {
+ mpxy_mbox_send_rpmi_data(mchan, data);
+ return 0;
+ }
+
+ return -EOPNOTSUPP;
+}
+
+static bool mpxy_mbox_peek_data(struct mbox_chan *chan)
+{
+ struct mpxy_mbox_channel *mchan = chan->con_priv;
+ struct sbi_mpxy_notification_data *notif = mchan->notif;
+ bool have_notifications = false;
+ unsigned long data_len;
+ int rc;
+
+ if (!(mchan->attrs.capability & SBI_MPXY_CHAN_CAP_GET_NOTIFICATIONS))
+ return false;
+
+ do {
+ rc = mpxy_get_notifications(mchan->channel_id, notif, &data_len);
+ if (rc || !data_len)
+ break;
+
+ if (mchan->attrs.msg_proto_id == SBI_MPXY_MSGPROTO_RPMI_ID)
+ mpxy_mbox_peek_rpmi_data(chan, mchan, notif, data_len);
+
+ have_notifications = true;
+ } while (1);
+
+ return have_notifications;
+}
+
+static irqreturn_t mpxy_mbox_irq_thread(int irq, void *dev_id)
+{
+ mpxy_mbox_peek_data(dev_id);
+ return IRQ_HANDLED;
+}
+
+static int mpxy_mbox_setup_msi(struct mbox_chan *chan,
+ struct mpxy_mbox_channel *mchan)
+{
+ struct device *dev = mchan->mbox->dev;
+ int rc;
+
+ /* Do nothing if MSI not supported */
+ if (mchan->msi_irq == U32_MAX)
+ return 0;
+
+ /* Fail if MSI already enabled */
+ if (mchan->attrs.msi_control)
+ return -EALREADY;
+
+ /* Request channel MSI handler */
+ rc = request_threaded_irq(mchan->msi_irq, NULL, mpxy_mbox_irq_thread,
+ 0, dev_name(dev), chan);
+ if (rc) {
+ dev_err(dev, "failed to request MPXY channel 0x%x IRQ\n",
+ mchan->channel_id);
+ return rc;
+ }
+
+ /* Enable channel MSI control */
+ mchan->attrs.msi_control = 1;
+ rc = mpxy_write_attrs(mchan->channel_id, SBI_MPXY_ATTR_MSI_CONTROL,
+ 1, &mchan->attrs.msi_control);
+ if (rc) {
+ dev_err(dev, "enable MSI control failed for MPXY channel 0x%x\n",
+ mchan->channel_id);
+ mchan->attrs.msi_control = 0;
+ free_irq(mchan->msi_irq, chan);
+ return rc;
+ }
+
+ return 0;
+}
+
+static void mpxy_mbox_cleanup_msi(struct mbox_chan *chan,
+ struct mpxy_mbox_channel *mchan)
+{
+ struct device *dev = mchan->mbox->dev;
+ int rc;
+
+ /* Do nothing if MSI not supported */
+ if (mchan->msi_irq == U32_MAX)
+ return;
+
+ /* Do nothing if MSI already disabled */
+ if (!mchan->attrs.msi_control)
+ return;
+
+ /* Disable channel MSI control */
+ mchan->attrs.msi_control = 0;
+ rc = mpxy_write_attrs(mchan->channel_id, SBI_MPXY_ATTR_MSI_CONTROL,
+ 1, &mchan->attrs.msi_control);
+ if (rc) {
+ dev_err(dev, "disable MSI control failed for MPXY channel 0x%x\n",
+ mchan->channel_id);
+ }
+
+ /* Free channel MSI handler */
+ free_irq(mchan->msi_irq, chan);
+}
+
+static int mpxy_mbox_setup_events(struct mpxy_mbox_channel *mchan)
+{
+ struct device *dev = mchan->mbox->dev;
+ int rc;
+
+ /* Do nothing if events state not supported */
+ if (!mchan->have_events_state)
+ return 0;
+
+ /* Fail if events state already enabled */
+ if (mchan->attrs.events_state_ctrl)
+ return -EALREADY;
+
+ /* Enable channel events state */
+ mchan->attrs.events_state_ctrl = 1;
+ rc = mpxy_write_attrs(mchan->channel_id, SBI_MPXY_ATTR_EVENTS_STATE_CONTROL,
+ 1, &mchan->attrs.events_state_ctrl);
+ if (rc) {
+ dev_err(dev, "enable events state failed for MPXY channel 0x%x\n",
+ mchan->channel_id);
+ mchan->attrs.events_state_ctrl = 0;
+ return rc;
+ }
+
+ return 0;
+}
+
+static void mpxy_mbox_cleanup_events(struct mpxy_mbox_channel *mchan)
+{
+ struct device *dev = mchan->mbox->dev;
+ int rc;
+
+ /* Do nothing if events state not supported */
+ if (!mchan->have_events_state)
+ return;
+
+ /* Do nothing if events state already disabled */
+ if (!mchan->attrs.events_state_ctrl)
+ return;
+
+ /* Disable channel events state */
+ mchan->attrs.events_state_ctrl = 0;
+ rc = mpxy_write_attrs(mchan->channel_id, SBI_MPXY_ATTR_EVENTS_STATE_CONTROL,
+ 1, &mchan->attrs.events_state_ctrl);
+ if (rc)
+ dev_err(dev, "disable events state failed for MPXY channel 0x%x\n",
+ mchan->channel_id);
+}
+
+static int mpxy_mbox_startup(struct mbox_chan *chan)
+{
+ struct mpxy_mbox_channel *mchan = chan->con_priv;
+ int rc;
+
+ if (mchan->started)
+ return -EALREADY;
+
+ /* Setup channel MSI */
+ rc = mpxy_mbox_setup_msi(chan, mchan);
+ if (rc)
+ return rc;
+
+ /* Setup channel notification events */
+ rc = mpxy_mbox_setup_events(mchan);
+ if (rc) {
+ mpxy_mbox_cleanup_msi(chan, mchan);
+ return rc;
+ }
+
+ /* Mark the channel as started */
+ mchan->started = true;
+
+ return 0;
+}
+
+static void mpxy_mbox_shutdown(struct mbox_chan *chan)
+{
+ struct mpxy_mbox_channel *mchan = chan->con_priv;
+
+ if (!mchan->started)
+ return;
+
+ /* Mark the channel as stopped */
+ mchan->started = false;
+
+ /* Cleanup channel notification events */
+ mpxy_mbox_cleanup_events(mchan);
+
+ /* Cleanup channel MSI */
+ mpxy_mbox_cleanup_msi(chan, mchan);
+}
+
+static const struct mbox_chan_ops mpxy_mbox_ops = {
+ .send_data = mpxy_mbox_send_data,
+ .peek_data = mpxy_mbox_peek_data,
+ .startup = mpxy_mbox_startup,
+ .shutdown = mpxy_mbox_shutdown,
+};
+
+/* ====== MPXY platform driver ===== */
+
+static void mpxy_mbox_msi_write(struct msi_desc *desc, struct msi_msg *msg)
+{
+ struct device *dev = msi_desc_to_dev(desc);
+ struct mpxy_mbox *mbox = dev_get_drvdata(dev);
+ struct mpxy_mbox_channel *mchan;
+ struct sbi_mpxy_msi_info *minfo;
+ int rc;
+
+ mchan = mbox->msi_index_to_channel[desc->msi_index];
+ if (!mchan) {
+ dev_warn(dev, "MPXY channel not available for MSI index %d\n",
+ desc->msi_index);
+ return;
+ }
+
+ minfo = &mchan->attrs.msi_info;
+ minfo->msi_addr_lo = msg->address_lo;
+ minfo->msi_addr_hi = msg->address_hi;
+ minfo->msi_data = msg->data;
+
+ rc = mpxy_write_attrs(mchan->channel_id, SBI_MPXY_ATTR_MSI_ADDR_LO,
+ sizeof(*minfo) / sizeof(u32), (u32 *)minfo);
+ if (rc) {
+ dev_warn(dev, "failed to write MSI info for MPXY channel 0x%x\n",
+ mchan->channel_id);
+ }
+}
+
+static struct mbox_chan *mpxy_mbox_fw_xlate(struct mbox_controller *ctlr,
+ const struct fwnode_reference_args *pa)
+{
+ struct mpxy_mbox *mbox = container_of(ctlr, struct mpxy_mbox, controller);
+ struct mpxy_mbox_channel *mchan;
+ u32 i;
+
+ if (pa->nargs != 2)
+ return ERR_PTR(-EINVAL);
+
+ for (i = 0; i < mbox->channel_count; i++) {
+ mchan = &mbox->channels[i];
+ if (mchan->channel_id == pa->args[0] &&
+ mchan->attrs.msg_proto_id == pa->args[1])
+ return &mbox->controller.chans[i];
+ }
+
+ return ERR_PTR(-ENOENT);
+}
+
+static int mpxy_mbox_populate_channels(struct mpxy_mbox *mbox)
+{
+ u32 i, *channel_ids __free(kfree) = NULL;
+ struct mpxy_mbox_channel *mchan;
+ int rc;
+
+ /* Find-out of number of channels */
+ rc = mpxy_get_channel_count(&mbox->channel_count);
+ if (rc)
+ return dev_err_probe(mbox->dev, rc, "failed to get number of MPXY channels\n");
+ if (!mbox->channel_count)
+ return dev_err_probe(mbox->dev, -ENODEV, "no MPXY channels available\n");
+
+ /* Allocate and fetch all channel IDs */
+ channel_ids = kcalloc(mbox->channel_count, sizeof(*channel_ids), GFP_KERNEL);
+ if (!channel_ids)
+ return -ENOMEM;
+ rc = mpxy_get_channel_ids(mbox->channel_count, channel_ids);
+ if (rc)
+ return dev_err_probe(mbox->dev, rc, "failed to get MPXY channel IDs\n");
+
+ /* Populate all channels */
+ mbox->channels = devm_kcalloc(mbox->dev, mbox->channel_count,
+ sizeof(*mbox->channels), GFP_KERNEL);
+ if (!mbox->channels)
+ return -ENOMEM;
+ for (i = 0; i < mbox->channel_count; i++) {
+ mchan = &mbox->channels[i];
+ mchan->mbox = mbox;
+ mchan->channel_id = channel_ids[i];
+
+ rc = mpxy_read_attrs(mchan->channel_id, SBI_MPXY_ATTR_MSG_PROT_ID,
+ sizeof(mchan->attrs) / sizeof(u32),
+ (u32 *)&mchan->attrs);
+ if (rc) {
+ return dev_err_probe(mbox->dev, rc,
+ "MPXY channel 0x%x read attrs failed\n",
+ mchan->channel_id);
+ }
+
+ if (mchan->attrs.msg_proto_id == SBI_MPXY_MSGPROTO_RPMI_ID) {
+ rc = mpxy_mbox_read_rpmi_attrs(mchan);
+ if (rc) {
+ return dev_err_probe(mbox->dev, rc,
+ "MPXY channel 0x%x read RPMI attrs failed\n",
+ mchan->channel_id);
+ }
+ }
+
+ mchan->notif = devm_kzalloc(mbox->dev, mpxy_shmem_size, GFP_KERNEL);
+ if (!mchan->notif)
+ return -ENOMEM;
+
+ mchan->max_xfer_len = min(mpxy_shmem_size, mchan->attrs.msg_max_len);
+
+ if ((mchan->attrs.capability & SBI_MPXY_CHAN_CAP_GET_NOTIFICATIONS) &&
+ (mchan->attrs.capability & SBI_MPXY_CHAN_CAP_EVENTS_STATE))
+ mchan->have_events_state = true;
+
+ if ((mchan->attrs.capability & SBI_MPXY_CHAN_CAP_GET_NOTIFICATIONS) &&
+ (mchan->attrs.capability & SBI_MPXY_CHAN_CAP_MSI))
+ mchan->msi_index = mbox->msi_count++;
+ else
+ mchan->msi_index = U32_MAX;
+ mchan->msi_irq = U32_MAX;
+ }
+
+ return 0;
+}
+
+static int mpxy_mbox_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct mpxy_mbox_channel *mchan;
+ struct mpxy_mbox *mbox;
+ int msi_idx, rc;
+ u32 i;
+
+ /*
+ * Initialize MPXY shared memory only once. This also ensures
+ * that SBI MPXY mailbox is probed only once.
+ */
+ if (mpxy_shmem_init_done) {
+ dev_err(dev, "SBI MPXY mailbox already initialized\n");
+ return -EALREADY;
+ }
+
+ /* Probe for SBI MPXY extension */
+ if (sbi_spec_version < sbi_mk_version(1, 0) ||
+ sbi_probe_extension(SBI_EXT_MPXY) <= 0) {
+ dev_info(dev, "SBI MPXY extension not available\n");
+ return -ENODEV;
+ }
+
+ /* Find-out shared memory size */
+ rc = mpxy_get_shmem_size(&mpxy_shmem_size);
+ if (rc)
+ return dev_err_probe(dev, rc, "failed to get MPXY shared memory size\n");
+
+ /*
+ * Setup MPXY shared memory on each CPU
+ *
+ * Note: Don't cleanup MPXY shared memory upon CPU power-down
+ * because the RPMI System MSI irqchip driver needs it to be
+ * available when migrating IRQs in CPU power-down path.
+ */
+ cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "riscv/sbi-mpxy-shmem",
+ mpxy_setup_shmem, NULL);
+
+ /* Mark as MPXY shared memory initialization done */
+ mpxy_shmem_init_done = true;
+
+ /* Allocate mailbox instance */
+ mbox = devm_kzalloc(dev, sizeof(*mbox), GFP_KERNEL);
+ if (!mbox)
+ return -ENOMEM;
+ mbox->dev = dev;
+ platform_set_drvdata(pdev, mbox);
+
+ /* Populate mailbox channels */
+ rc = mpxy_mbox_populate_channels(mbox);
+ if (rc)
+ return rc;
+
+ /* Initialize mailbox controller */
+ mbox->controller.txdone_irq = false;
+ mbox->controller.txdone_poll = false;
+ mbox->controller.ops = &mpxy_mbox_ops;
+ mbox->controller.dev = dev;
+ mbox->controller.num_chans = mbox->channel_count;
+ mbox->controller.fw_xlate = mpxy_mbox_fw_xlate;
+ mbox->controller.chans = devm_kcalloc(dev, mbox->channel_count,
+ sizeof(*mbox->controller.chans),
+ GFP_KERNEL);
+ if (!mbox->controller.chans)
+ return -ENOMEM;
+ for (i = 0; i < mbox->channel_count; i++)
+ mbox->controller.chans[i].con_priv = &mbox->channels[i];
+
+ /* Setup MSIs for mailbox (if required) */
+ if (mbox->msi_count) {
+ /*
+ * The device MSI domain for platform devices on RISC-V architecture
+ * is only available after the MSI controller driver is probed so,
+ * explicitly configure here.
+ */
+ if (!dev_get_msi_domain(dev)) {
+ struct fwnode_handle *fwnode = dev_fwnode(dev);
+
+ /*
+ * The device MSI domain for OF devices is only set at the
+ * time of populating/creating OF device. If the device MSI
+ * domain is discovered later after the OF device is created
+ * then we need to set it explicitly before using any platform
+ * MSI functions.
+ */
+ if (is_of_node(fwnode)) {
+ of_msi_configure(dev, dev_of_node(dev));
+ } else if (is_acpi_device_node(fwnode)) {
+ struct irq_domain *msi_domain;
+
+ msi_domain = irq_find_matching_fwnode(imsic_acpi_get_fwnode(dev),
+ DOMAIN_BUS_PLATFORM_MSI);
+ dev_set_msi_domain(dev, msi_domain);
+ }
+
+ if (!dev_get_msi_domain(dev))
+ return -EPROBE_DEFER;
+ }
+
+ mbox->msi_index_to_channel = devm_kcalloc(dev, mbox->msi_count,
+ sizeof(*mbox->msi_index_to_channel),
+ GFP_KERNEL);
+ if (!mbox->msi_index_to_channel)
+ return -ENOMEM;
+
+ for (msi_idx = 0; msi_idx < mbox->msi_count; msi_idx++) {
+ for (i = 0; i < mbox->channel_count; i++) {
+ mchan = &mbox->channels[i];
+ if (mchan->msi_index == msi_idx) {
+ mbox->msi_index_to_channel[msi_idx] = mchan;
+ break;
+ }
+ }
+ }
+
+ rc = platform_device_msi_init_and_alloc_irqs(dev, mbox->msi_count,
+ mpxy_mbox_msi_write);
+ if (rc) {
+ return dev_err_probe(dev, rc, "Failed to allocate %d MSIs\n",
+ mbox->msi_count);
+ }
+
+ for (i = 0; i < mbox->channel_count; i++) {
+ mchan = &mbox->channels[i];
+ if (mchan->msi_index == U32_MAX)
+ continue;
+ mchan->msi_irq = msi_get_virq(dev, mchan->msi_index);
+ }
+ }
+
+ /* Register mailbox controller */
+ rc = devm_mbox_controller_register(dev, &mbox->controller);
+ if (rc) {
+ dev_err_probe(dev, rc, "Registering SBI MPXY mailbox failed\n");
+ if (mbox->msi_count)
+ platform_device_msi_free_irqs_all(dev);
+ return rc;
+ }
+
+#ifdef CONFIG_ACPI
+ struct acpi_device *adev = ACPI_COMPANION(dev);
+
+ if (adev)
+ acpi_dev_clear_dependencies(adev);
+#endif
+
+ dev_info(dev, "mailbox registered with %d channels\n",
+ mbox->channel_count);
+ return 0;
+}
+
+static void mpxy_mbox_remove(struct platform_device *pdev)
+{
+ struct mpxy_mbox *mbox = platform_get_drvdata(pdev);
+
+ if (mbox->msi_count)
+ platform_device_msi_free_irqs_all(mbox->dev);
+}
+
+static const struct of_device_id mpxy_mbox_of_match[] = {
+ { .compatible = "riscv,sbi-mpxy-mbox" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, mpxy_mbox_of_match);
+
+static const struct acpi_device_id mpxy_mbox_acpi_match[] = {
+ { "RSCV0005" },
+ {}
+};
+MODULE_DEVICE_TABLE(acpi, mpxy_mbox_acpi_match);
+
+static struct platform_driver mpxy_mbox_driver = {
+ .driver = {
+ .name = "riscv-sbi-mpxy-mbox",
+ .of_match_table = mpxy_mbox_of_match,
+ .acpi_match_table = mpxy_mbox_acpi_match,
+ },
+ .probe = mpxy_mbox_probe,
+ .remove = mpxy_mbox_remove,
+};
+module_platform_driver(mpxy_mbox_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Anup Patel <apatel@ventanamicro.com>");
+MODULE_DESCRIPTION("RISC-V SBI MPXY mailbox controller driver");
diff --git a/drivers/mailbox/zynqmp-ipi-mailbox.c b/drivers/mailbox/zynqmp-ipi-mailbox.c
index 0c143beaafda..967967b2b8a9 100644
--- a/drivers/mailbox/zynqmp-ipi-mailbox.c
+++ b/drivers/mailbox/zynqmp-ipi-mailbox.c
@@ -62,7 +62,8 @@
#define DST_BIT_POS 9U
#define SRC_BITMASK GENMASK(11, 8)
-#define MAX_SGI 16
+/* Macro to represent SGI type for IPI IRQs */
+#define IPI_IRQ_TYPE_SGI 2
/*
* Module parameters
@@ -121,6 +122,7 @@ struct zynqmp_ipi_mbox {
* @dev: device pointer corresponding to the Xilinx ZynqMP
* IPI agent
* @irq: IPI agent interrupt ID
+ * @irq_type: IPI SGI or SPI IRQ type
* @method: IPI SMC or HVC is going to be used
* @local_id: local IPI agent ID
* @virq_sgi: IRQ number mapped to SGI
@@ -130,6 +132,7 @@ struct zynqmp_ipi_mbox {
struct zynqmp_ipi_pdata {
struct device *dev;
int irq;
+ unsigned int irq_type;
unsigned int method;
u32 local_id;
int virq_sgi;
@@ -887,17 +890,14 @@ static void zynqmp_ipi_free_mboxes(struct zynqmp_ipi_pdata *pdata)
struct zynqmp_ipi_mbox *ipi_mbox;
int i;
- if (pdata->irq < MAX_SGI)
+ if (pdata->irq_type == IPI_IRQ_TYPE_SGI)
xlnx_mbox_cleanup_sgi(pdata);
- i = pdata->num_mboxes;
+ i = pdata->num_mboxes - 1;
for (; i >= 0; i--) {
ipi_mbox = &pdata->ipi_mboxes[i];
- if (ipi_mbox->dev.parent) {
- mbox_controller_unregister(&ipi_mbox->mbox);
- if (device_is_registered(&ipi_mbox->dev))
- device_unregister(&ipi_mbox->dev);
- }
+ if (device_is_registered(&ipi_mbox->dev))
+ device_unregister(&ipi_mbox->dev);
}
}
@@ -959,14 +959,16 @@ static int zynqmp_ipi_probe(struct platform_device *pdev)
dev_err(dev, "failed to parse interrupts\n");
goto free_mbox_dev;
}
- ret = out_irq.args[1];
+
+ /* Use interrupt type to distinguish SGI and SPI interrupts */
+ pdata->irq_type = out_irq.args[0];
/*
* If Interrupt number is in SGI range, then request SGI else request
* IPI system IRQ.
*/
- if (ret < MAX_SGI) {
- pdata->irq = ret;
+ if (pdata->irq_type == IPI_IRQ_TYPE_SGI) {
+ pdata->irq = out_irq.args[1];
ret = xlnx_mbox_init_sgi(pdev, pdata->irq, pdata);
if (ret)
goto free_mbox_dev;