summaryrefslogtreecommitdiff
path: root/drivers/soc
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/soc')
-rw-r--r--drivers/soc/apple/Kconfig15
-rw-r--r--drivers/soc/apple/Makefile4
-rw-r--r--drivers/soc/apple/mailbox.c437
-rw-r--r--drivers/soc/apple/mailbox.h48
-rw-r--r--drivers/soc/apple/rtkit-internal.h8
-rw-r--r--drivers/soc/apple/rtkit.c133
-rw-r--r--drivers/soc/fsl/qe/qmc.c658
-rw-r--r--drivers/soc/fsl/qe/tsa.c22
-rw-r--r--drivers/soc/hisilicon/kunpeng_hccs.c152
-rw-r--r--drivers/soc/hisilicon/kunpeng_hccs.h15
10 files changed, 1178 insertions, 314 deletions
diff --git a/drivers/soc/apple/Kconfig b/drivers/soc/apple/Kconfig
index eff486a77337..6388cbe1e56b 100644
--- a/drivers/soc/apple/Kconfig
+++ b/drivers/soc/apple/Kconfig
@@ -4,9 +4,22 @@ if ARCH_APPLE || COMPILE_TEST
menu "Apple SoC drivers"
+config APPLE_MAILBOX
+ tristate "Apple SoC mailboxes"
+ depends on PM
+ depends on ARCH_APPLE || (64BIT && COMPILE_TEST)
+ default ARCH_APPLE
+ help
+ Apple SoCs have various co-processors required for certain
+ peripherals to work (NVMe, display controller, etc.). This
+ driver adds support for the mailbox controller used to
+ communicate with those.
+
+ Say Y here if you have an Apple SoC.
+
config APPLE_RTKIT
tristate "Apple RTKit co-processor IPC protocol"
- depends on MAILBOX
+ depends on APPLE_MAILBOX
depends on ARCH_APPLE || COMPILE_TEST
default ARCH_APPLE
help
diff --git a/drivers/soc/apple/Makefile b/drivers/soc/apple/Makefile
index b241e6a65e5b..4d9ab8f3037b 100644
--- a/drivers/soc/apple/Makefile
+++ b/drivers/soc/apple/Makefile
@@ -1,4 +1,8 @@
# SPDX-License-Identifier: GPL-2.0-only
+
+obj-$(CONFIG_APPLE_MAILBOX) += apple-mailbox.o
+apple-mailbox-y = mailbox.o
+
obj-$(CONFIG_APPLE_RTKIT) += apple-rtkit.o
apple-rtkit-y = rtkit.o rtkit-crashlog.o
diff --git a/drivers/soc/apple/mailbox.c b/drivers/soc/apple/mailbox.c
new file mode 100644
index 000000000000..780199bf351e
--- /dev/null
+++ b/drivers/soc/apple/mailbox.c
@@ -0,0 +1,437 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+/*
+ * Apple mailbox driver
+ *
+ * Copyright The Asahi Linux Contributors
+ *
+ * This driver adds support for two mailbox variants (called ASC and M3 by
+ * Apple) found in Apple SoCs such as the M1. It consists of two FIFOs used to
+ * exchange 64+32 bit messages between the main CPU and a co-processor.
+ * Various coprocessors implement different IPC protocols based on these simple
+ * messages and shared memory buffers.
+ *
+ * Both the main CPU and the co-processor see the same set of registers but
+ * the first FIFO (A2I) is always used to transfer messages from the application
+ * processor (us) to the I/O processor and the second one (I2A) for the
+ * other direction.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+#include "mailbox.h"
+
+#define APPLE_ASC_MBOX_CONTROL_FULL BIT(16)
+#define APPLE_ASC_MBOX_CONTROL_EMPTY BIT(17)
+
+#define APPLE_ASC_MBOX_A2I_CONTROL 0x110
+#define APPLE_ASC_MBOX_A2I_SEND0 0x800
+#define APPLE_ASC_MBOX_A2I_SEND1 0x808
+#define APPLE_ASC_MBOX_A2I_RECV0 0x810
+#define APPLE_ASC_MBOX_A2I_RECV1 0x818
+
+#define APPLE_ASC_MBOX_I2A_CONTROL 0x114
+#define APPLE_ASC_MBOX_I2A_SEND0 0x820
+#define APPLE_ASC_MBOX_I2A_SEND1 0x828
+#define APPLE_ASC_MBOX_I2A_RECV0 0x830
+#define APPLE_ASC_MBOX_I2A_RECV1 0x838
+
+#define APPLE_M3_MBOX_CONTROL_FULL BIT(16)
+#define APPLE_M3_MBOX_CONTROL_EMPTY BIT(17)
+
+#define APPLE_M3_MBOX_A2I_CONTROL 0x50
+#define APPLE_M3_MBOX_A2I_SEND0 0x60
+#define APPLE_M3_MBOX_A2I_SEND1 0x68
+#define APPLE_M3_MBOX_A2I_RECV0 0x70
+#define APPLE_M3_MBOX_A2I_RECV1 0x78
+
+#define APPLE_M3_MBOX_I2A_CONTROL 0x80
+#define APPLE_M3_MBOX_I2A_SEND0 0x90
+#define APPLE_M3_MBOX_I2A_SEND1 0x98
+#define APPLE_M3_MBOX_I2A_RECV0 0xa0
+#define APPLE_M3_MBOX_I2A_RECV1 0xa8
+
+#define APPLE_M3_MBOX_IRQ_ENABLE 0x48
+#define APPLE_M3_MBOX_IRQ_ACK 0x4c
+#define APPLE_M3_MBOX_IRQ_A2I_EMPTY BIT(0)
+#define APPLE_M3_MBOX_IRQ_A2I_NOT_EMPTY BIT(1)
+#define APPLE_M3_MBOX_IRQ_I2A_EMPTY BIT(2)
+#define APPLE_M3_MBOX_IRQ_I2A_NOT_EMPTY BIT(3)
+
+#define APPLE_MBOX_MSG1_OUTCNT GENMASK(56, 52)
+#define APPLE_MBOX_MSG1_INCNT GENMASK(51, 48)
+#define APPLE_MBOX_MSG1_OUTPTR GENMASK(47, 44)
+#define APPLE_MBOX_MSG1_INPTR GENMASK(43, 40)
+#define APPLE_MBOX_MSG1_MSG GENMASK(31, 0)
+
+#define APPLE_MBOX_TX_TIMEOUT 500
+
+struct apple_mbox_hw {
+ unsigned int control_full;
+ unsigned int control_empty;
+
+ unsigned int a2i_control;
+ unsigned int a2i_send0;
+ unsigned int a2i_send1;
+
+ unsigned int i2a_control;
+ unsigned int i2a_recv0;
+ unsigned int i2a_recv1;
+
+ bool has_irq_controls;
+ unsigned int irq_enable;
+ unsigned int irq_ack;
+ unsigned int irq_bit_recv_not_empty;
+ unsigned int irq_bit_send_empty;
+};
+
+int apple_mbox_send(struct apple_mbox *mbox, const struct apple_mbox_msg msg,
+ bool atomic)
+{
+ unsigned long flags;
+ int ret;
+ u32 mbox_ctrl;
+ long t;
+
+ spin_lock_irqsave(&mbox->tx_lock, flags);
+ mbox_ctrl = readl_relaxed(mbox->regs + mbox->hw->a2i_control);
+
+ while (mbox_ctrl & mbox->hw->control_full) {
+ if (atomic) {
+ ret = readl_poll_timeout_atomic(
+ mbox->regs + mbox->hw->a2i_control, mbox_ctrl,
+ !(mbox_ctrl & mbox->hw->control_full), 100,
+ APPLE_MBOX_TX_TIMEOUT * 1000);
+
+ if (ret) {
+ spin_unlock_irqrestore(&mbox->tx_lock, flags);
+ return ret;
+ }
+
+ break;
+ }
+ /*
+ * The interrupt is level triggered and will keep firing as long as the
+ * FIFO is empty. It will also keep firing if the FIFO was empty
+ * at any point in the past until it has been acknowledged at the
+ * mailbox level. By acknowledging it here we can ensure that we will
+ * only get the interrupt once the FIFO has been cleared again.
+ * If the FIFO is already empty before the ack it will fire again
+ * immediately after the ack.
+ */
+ if (mbox->hw->has_irq_controls) {
+ writel_relaxed(mbox->hw->irq_bit_send_empty,
+ mbox->regs + mbox->hw->irq_ack);
+ }
+ enable_irq(mbox->irq_send_empty);
+ reinit_completion(&mbox->tx_empty);
+ spin_unlock_irqrestore(&mbox->tx_lock, flags);
+
+ t = wait_for_completion_interruptible_timeout(
+ &mbox->tx_empty,
+ msecs_to_jiffies(APPLE_MBOX_TX_TIMEOUT));
+ if (t < 0)
+ return t;
+ else if (t == 0)
+ return -ETIMEDOUT;
+
+ spin_lock_irqsave(&mbox->tx_lock, flags);
+ mbox_ctrl = readl_relaxed(mbox->regs + mbox->hw->a2i_control);
+ }
+
+ writeq_relaxed(msg.msg0, mbox->regs + mbox->hw->a2i_send0);
+ writeq_relaxed(FIELD_PREP(APPLE_MBOX_MSG1_MSG, msg.msg1),
+ mbox->regs + mbox->hw->a2i_send1);
+
+ spin_unlock_irqrestore(&mbox->tx_lock, flags);
+
+ return 0;
+}
+EXPORT_SYMBOL(apple_mbox_send);
+
+static irqreturn_t apple_mbox_send_empty_irq(int irq, void *data)
+{
+ struct apple_mbox *mbox = data;
+
+ /*
+ * We don't need to acknowledge the interrupt at the mailbox level
+ * here even if supported by the hardware. It will keep firing but that
+ * doesn't matter since it's disabled at the main interrupt controller.
+ * apple_mbox_send will acknowledge it before enabling
+ * it at the main controller again.
+ */
+ spin_lock(&mbox->tx_lock);
+ disable_irq_nosync(mbox->irq_send_empty);
+ complete(&mbox->tx_empty);
+ spin_unlock(&mbox->tx_lock);
+
+ return IRQ_HANDLED;
+}
+
+static int apple_mbox_poll_locked(struct apple_mbox *mbox)
+{
+ struct apple_mbox_msg msg;
+ int ret = 0;
+
+ u32 mbox_ctrl = readl_relaxed(mbox->regs + mbox->hw->i2a_control);
+
+ while (!(mbox_ctrl & mbox->hw->control_empty)) {
+ msg.msg0 = readq_relaxed(mbox->regs + mbox->hw->i2a_recv0);
+ msg.msg1 = FIELD_GET(
+ APPLE_MBOX_MSG1_MSG,
+ readq_relaxed(mbox->regs + mbox->hw->i2a_recv1));
+
+ mbox->rx(mbox, msg, mbox->cookie);
+ ret++;
+ mbox_ctrl = readl_relaxed(mbox->regs + mbox->hw->i2a_control);
+ }
+
+ /*
+ * The interrupt will keep firing even if there are no more messages
+ * unless we also acknowledge it at the mailbox level here.
+ * There's no race if a message comes in between the check in the while
+ * loop above and the ack below: If a new messages arrives inbetween
+ * those two the interrupt will just fire again immediately after the
+ * ack since it's level triggered.
+ */
+ if (mbox->hw->has_irq_controls) {
+ writel_relaxed(mbox->hw->irq_bit_recv_not_empty,
+ mbox->regs + mbox->hw->irq_ack);
+ }
+
+ return ret;
+}
+
+static irqreturn_t apple_mbox_recv_irq(int irq, void *data)
+{
+ struct apple_mbox *mbox = data;
+
+ spin_lock(&mbox->rx_lock);
+ apple_mbox_poll_locked(mbox);
+ spin_unlock(&mbox->rx_lock);
+
+ return IRQ_HANDLED;
+}
+
+int apple_mbox_poll(struct apple_mbox *mbox)
+{
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&mbox->rx_lock, flags);
+ ret = apple_mbox_poll_locked(mbox);
+ spin_unlock_irqrestore(&mbox->rx_lock, flags);
+
+ return ret;
+}
+EXPORT_SYMBOL(apple_mbox_poll);
+
+int apple_mbox_start(struct apple_mbox *mbox)
+{
+ int ret;
+
+ if (mbox->active)
+ return 0;
+
+ ret = pm_runtime_resume_and_get(mbox->dev);
+ if (ret)
+ return ret;
+
+ /*
+ * Only some variants of this mailbox HW provide interrupt control
+ * at the mailbox level. We therefore need to handle enabling/disabling
+ * interrupts at the main interrupt controller anyway for hardware that
+ * doesn't. Just always keep the interrupts we care about enabled at
+ * the mailbox level so that both hardware revisions behave almost
+ * the same.
+ */
+ if (mbox->hw->has_irq_controls) {
+ writel_relaxed(mbox->hw->irq_bit_recv_not_empty |
+ mbox->hw->irq_bit_send_empty,
+ mbox->regs + mbox->hw->irq_enable);
+ }
+
+ enable_irq(mbox->irq_recv_not_empty);
+ mbox->active = true;
+ return 0;
+}
+EXPORT_SYMBOL(apple_mbox_start);
+
+void apple_mbox_stop(struct apple_mbox *mbox)
+{
+ if (!mbox->active)
+ return;
+
+ mbox->active = false;
+ disable_irq(mbox->irq_recv_not_empty);
+ pm_runtime_mark_last_busy(mbox->dev);
+ pm_runtime_put_autosuspend(mbox->dev);
+}
+EXPORT_SYMBOL(apple_mbox_stop);
+
+struct apple_mbox *apple_mbox_get(struct device *dev, int index)
+{
+ struct of_phandle_args args;
+ struct platform_device *pdev;
+ struct apple_mbox *mbox;
+ int ret;
+
+ ret = of_parse_phandle_with_args(dev->of_node, "mboxes", "#mbox-cells",
+ index, &args);
+ if (ret || !args.np)
+ return ERR_PTR(ret);
+
+ pdev = of_find_device_by_node(args.np);
+ of_node_put(args.np);
+
+ if (!pdev)
+ return ERR_PTR(EPROBE_DEFER);
+
+ mbox = platform_get_drvdata(pdev);
+ if (!mbox)
+ return ERR_PTR(EPROBE_DEFER);
+
+ if (!device_link_add(dev, &pdev->dev, DL_FLAG_AUTOREMOVE_CONSUMER))
+ return ERR_PTR(ENODEV);
+
+ return mbox;
+}
+EXPORT_SYMBOL(apple_mbox_get);
+
+struct apple_mbox *apple_mbox_get_byname(struct device *dev, const char *name)
+{
+ int index;
+
+ index = of_property_match_string(dev->of_node, "mbox-names", name);
+ if (index < 0)
+ return ERR_PTR(index);
+
+ return apple_mbox_get(dev, index);
+}
+EXPORT_SYMBOL(apple_mbox_get_byname);
+
+static int apple_mbox_probe(struct platform_device *pdev)
+{
+ int ret;
+ char *irqname;
+ struct apple_mbox *mbox;
+ struct device *dev = &pdev->dev;
+
+ mbox = devm_kzalloc(dev, sizeof(*mbox), GFP_KERNEL);
+ if (!mbox)
+ return -ENOMEM;
+
+ mbox->dev = &pdev->dev;
+ mbox->hw = of_device_get_match_data(dev);
+ if (!mbox->hw)
+ return -EINVAL;
+
+ mbox->regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(mbox->regs))
+ return PTR_ERR(mbox->regs);
+
+ mbox->irq_recv_not_empty =
+ platform_get_irq_byname(pdev, "recv-not-empty");
+ if (mbox->irq_recv_not_empty < 0)
+ return -ENODEV;
+
+ mbox->irq_send_empty = platform_get_irq_byname(pdev, "send-empty");
+ if (mbox->irq_send_empty < 0)
+ return -ENODEV;
+
+ spin_lock_init(&mbox->rx_lock);
+ spin_lock_init(&mbox->tx_lock);
+ init_completion(&mbox->tx_empty);
+
+ irqname = devm_kasprintf(dev, GFP_KERNEL, "%s-recv", dev_name(dev));
+ if (!irqname)
+ return -ENOMEM;
+
+ ret = devm_request_irq(dev, mbox->irq_recv_not_empty,
+ apple_mbox_recv_irq,
+ IRQF_NO_AUTOEN | IRQF_NO_SUSPEND, irqname, mbox);
+ if (ret)
+ return ret;
+
+ irqname = devm_kasprintf(dev, GFP_KERNEL, "%s-send", dev_name(dev));
+ if (!irqname)
+ return -ENOMEM;
+
+ ret = devm_request_irq(dev, mbox->irq_send_empty,
+ apple_mbox_send_empty_irq,
+ IRQF_NO_AUTOEN | IRQF_NO_SUSPEND, irqname, mbox);
+ if (ret)
+ return ret;
+
+ ret = devm_pm_runtime_enable(dev);
+ if (ret)
+ return ret;
+
+ platform_set_drvdata(pdev, mbox);
+ return 0;
+}
+
+static const struct apple_mbox_hw apple_mbox_asc_hw = {
+ .control_full = APPLE_ASC_MBOX_CONTROL_FULL,
+ .control_empty = APPLE_ASC_MBOX_CONTROL_EMPTY,
+
+ .a2i_control = APPLE_ASC_MBOX_A2I_CONTROL,
+ .a2i_send0 = APPLE_ASC_MBOX_A2I_SEND0,
+ .a2i_send1 = APPLE_ASC_MBOX_A2I_SEND1,
+
+ .i2a_control = APPLE_ASC_MBOX_I2A_CONTROL,
+ .i2a_recv0 = APPLE_ASC_MBOX_I2A_RECV0,
+ .i2a_recv1 = APPLE_ASC_MBOX_I2A_RECV1,
+
+ .has_irq_controls = false,
+};
+
+static const struct apple_mbox_hw apple_mbox_m3_hw = {
+ .control_full = APPLE_M3_MBOX_CONTROL_FULL,
+ .control_empty = APPLE_M3_MBOX_CONTROL_EMPTY,
+
+ .a2i_control = APPLE_M3_MBOX_A2I_CONTROL,
+ .a2i_send0 = APPLE_M3_MBOX_A2I_SEND0,
+ .a2i_send1 = APPLE_M3_MBOX_A2I_SEND1,
+
+ .i2a_control = APPLE_M3_MBOX_I2A_CONTROL,
+ .i2a_recv0 = APPLE_M3_MBOX_I2A_RECV0,
+ .i2a_recv1 = APPLE_M3_MBOX_I2A_RECV1,
+
+ .has_irq_controls = true,
+ .irq_enable = APPLE_M3_MBOX_IRQ_ENABLE,
+ .irq_ack = APPLE_M3_MBOX_IRQ_ACK,
+ .irq_bit_recv_not_empty = APPLE_M3_MBOX_IRQ_I2A_NOT_EMPTY,
+ .irq_bit_send_empty = APPLE_M3_MBOX_IRQ_A2I_EMPTY,
+};
+
+static const struct of_device_id apple_mbox_of_match[] = {
+ { .compatible = "apple,asc-mailbox-v4", .data = &apple_mbox_asc_hw },
+ { .compatible = "apple,m3-mailbox-v2", .data = &apple_mbox_m3_hw },
+ {}
+};
+MODULE_DEVICE_TABLE(of, apple_mbox_of_match);
+
+static struct platform_driver apple_mbox_driver = {
+ .driver = {
+ .name = "apple-mailbox",
+ .of_match_table = apple_mbox_of_match,
+ },
+ .probe = apple_mbox_probe,
+};
+module_platform_driver(apple_mbox_driver);
+
+MODULE_LICENSE("Dual MIT/GPL");
+MODULE_AUTHOR("Sven Peter <sven@svenpeter.dev>");
+MODULE_DESCRIPTION("Apple Mailbox driver");
diff --git a/drivers/soc/apple/mailbox.h b/drivers/soc/apple/mailbox.h
new file mode 100644
index 000000000000..f73a8913da95
--- /dev/null
+++ b/drivers/soc/apple/mailbox.h
@@ -0,0 +1,48 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR MIT */
+/*
+ * Apple mailbox message format
+ *
+ * Copyright The Asahi Linux Contributors
+ */
+
+#ifndef _APPLE_MAILBOX_H_
+#define _APPLE_MAILBOX_H_
+
+#include <linux/device.h>
+#include <linux/types.h>
+
+/* encodes a single 96bit message sent over the single channel */
+struct apple_mbox_msg {
+ u64 msg0;
+ u32 msg1;
+};
+
+struct apple_mbox {
+ struct device *dev;
+ void __iomem *regs;
+ const struct apple_mbox_hw *hw;
+ bool active;
+
+ int irq_recv_not_empty;
+ int irq_send_empty;
+
+ spinlock_t rx_lock;
+ spinlock_t tx_lock;
+
+ struct completion tx_empty;
+
+ /** Receive callback for incoming messages */
+ void (*rx)(struct apple_mbox *mbox, struct apple_mbox_msg msg, void *cookie);
+ void *cookie;
+};
+
+struct apple_mbox *apple_mbox_get(struct device *dev, int index);
+struct apple_mbox *apple_mbox_get_byname(struct device *dev, const char *name);
+
+int apple_mbox_start(struct apple_mbox *mbox);
+void apple_mbox_stop(struct apple_mbox *mbox);
+int apple_mbox_poll(struct apple_mbox *mbox);
+int apple_mbox_send(struct apple_mbox *mbox, struct apple_mbox_msg msg,
+ bool atomic);
+
+#endif
diff --git a/drivers/soc/apple/rtkit-internal.h b/drivers/soc/apple/rtkit-internal.h
index 24bd619ec5e4..27c9fa745fd5 100644
--- a/drivers/soc/apple/rtkit-internal.h
+++ b/drivers/soc/apple/rtkit-internal.h
@@ -7,18 +7,17 @@
#ifndef _APPLE_RTKIT_INTERAL_H
#define _APPLE_RTKIT_INTERAL_H
-#include <linux/apple-mailbox.h>
#include <linux/bitfield.h>
#include <linux/bitmap.h>
#include <linux/completion.h>
#include <linux/dma-mapping.h>
#include <linux/io.h>
#include <linux/kernel.h>
-#include <linux/mailbox_client.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/soc/apple/rtkit.h>
#include <linux/workqueue.h>
+#include "mailbox.h"
#define APPLE_RTKIT_APP_ENDPOINT_START 0x20
#define APPLE_RTKIT_MAX_ENDPOINTS 0x100
@@ -28,10 +27,7 @@ struct apple_rtkit {
const struct apple_rtkit_ops *ops;
struct device *dev;
- const char *mbox_name;
- int mbox_idx;
- struct mbox_client mbox_cl;
- struct mbox_chan *mbox_chan;
+ struct apple_mbox *mbox;
struct completion epmap_completion;
struct completion iop_pwr_ack_completion;
diff --git a/drivers/soc/apple/rtkit.c b/drivers/soc/apple/rtkit.c
index d9f19dc99da5..e6d940292c9f 100644
--- a/drivers/soc/apple/rtkit.c
+++ b/drivers/soc/apple/rtkit.c
@@ -72,11 +72,6 @@ enum {
#define APPLE_RTKIT_MIN_SUPPORTED_VERSION 11
#define APPLE_RTKIT_MAX_SUPPORTED_VERSION 12
-struct apple_rtkit_msg {
- struct completion *completion;
- struct apple_mbox_msg mbox_msg;
-};
-
struct apple_rtkit_rx_work {
struct apple_rtkit *rtk;
u8 ep;
@@ -550,12 +545,12 @@ static void apple_rtkit_rx_work(struct work_struct *work)
kfree(rtk_work);
}
-static void apple_rtkit_rx(struct mbox_client *cl, void *mssg)
+static void apple_rtkit_rx(struct apple_mbox *mbox, struct apple_mbox_msg msg,
+ void *cookie)
{
- struct apple_rtkit *rtk = container_of(cl, struct apple_rtkit, mbox_cl);
- struct apple_mbox_msg *msg = mssg;
+ struct apple_rtkit *rtk = cookie;
struct apple_rtkit_rx_work *work;
- u8 ep = msg->msg1;
+ u8 ep = msg.msg1;
/*
* The message was read from a MMIO FIFO and we have to make
@@ -571,7 +566,7 @@ static void apple_rtkit_rx(struct mbox_client *cl, void *mssg)
if (ep >= APPLE_RTKIT_APP_ENDPOINT_START &&
rtk->ops->recv_message_early &&
- rtk->ops->recv_message_early(rtk->cookie, ep, msg->msg0))
+ rtk->ops->recv_message_early(rtk->cookie, ep, msg.msg0))
return;
work = kzalloc(sizeof(*work), GFP_ATOMIC);
@@ -580,30 +575,18 @@ static void apple_rtkit_rx(struct mbox_client *cl, void *mssg)
work->rtk = rtk;
work->ep = ep;
- work->msg = msg->msg0;
+ work->msg = msg.msg0;
INIT_WORK(&work->work, apple_rtkit_rx_work);
queue_work(rtk->wq, &work->work);
}
-static void apple_rtkit_tx_done(struct mbox_client *cl, void *mssg, int r)
-{
- struct apple_rtkit_msg *msg =
- container_of(mssg, struct apple_rtkit_msg, mbox_msg);
-
- if (r == -ETIME)
- return;
-
- if (msg->completion)
- complete(msg->completion);
- kfree(msg);
-}
-
int apple_rtkit_send_message(struct apple_rtkit *rtk, u8 ep, u64 message,
struct completion *completion, bool atomic)
{
- struct apple_rtkit_msg *msg;
- int ret;
- gfp_t flags;
+ struct apple_mbox_msg msg = {
+ .msg0 = message,
+ .msg1 = ep,
+ };
if (rtk->crashed)
return -EINVAL;
@@ -611,19 +594,6 @@ int apple_rtkit_send_message(struct apple_rtkit *rtk, u8 ep, u64 message,
!apple_rtkit_is_running(rtk))
return -EINVAL;
- if (atomic)
- flags = GFP_ATOMIC;
- else
- flags = GFP_KERNEL;
-
- msg = kzalloc(sizeof(*msg), flags);
- if (!msg)
- return -ENOMEM;
-
- msg->mbox_msg.msg0 = message;
- msg->mbox_msg.msg1 = ep;
- msg->completion = completion;
-
/*
* The message will be sent with a MMIO write. We need the barrier
* here to ensure any previous writes to buffers are visible to the
@@ -631,51 +601,13 @@ int apple_rtkit_send_message(struct apple_rtkit *rtk, u8 ep, u64 message,
*/
dma_wmb();
- ret = mbox_send_message(rtk->mbox_chan, &msg->mbox_msg);
- if (ret < 0) {
- kfree(msg);
- return ret;
- }
-
- return 0;
+ return apple_mbox_send(rtk->mbox, msg, atomic);
}
EXPORT_SYMBOL_GPL(apple_rtkit_send_message);
-int apple_rtkit_send_message_wait(struct apple_rtkit *rtk, u8 ep, u64 message,
- unsigned long timeout, bool atomic)
-{
- DECLARE_COMPLETION_ONSTACK(completion);
- int ret;
- long t;
-
- ret = apple_rtkit_send_message(rtk, ep, message, &completion, atomic);
- if (ret < 0)
- return ret;
-
- if (atomic) {
- ret = mbox_flush(rtk->mbox_chan, timeout);
- if (ret < 0)
- return ret;
-
- if (try_wait_for_completion(&completion))
- return 0;
-
- return -ETIME;
- } else {
- t = wait_for_completion_interruptible_timeout(
- &completion, msecs_to_jiffies(timeout));
- if (t < 0)
- return t;
- else if (t == 0)
- return -ETIME;
- return 0;
- }
-}
-EXPORT_SYMBOL_GPL(apple_rtkit_send_message_wait);
-
int apple_rtkit_poll(struct apple_rtkit *rtk)
{
- return mbox_client_peek_data(rtk->mbox_chan);
+ return apple_mbox_poll(rtk->mbox);
}
EXPORT_SYMBOL_GPL(apple_rtkit_poll);
@@ -697,20 +629,6 @@ int apple_rtkit_start_ep(struct apple_rtkit *rtk, u8 endpoint)
}
EXPORT_SYMBOL_GPL(apple_rtkit_start_ep);
-static int apple_rtkit_request_mbox_chan(struct apple_rtkit *rtk)
-{
- if (rtk->mbox_name)
- rtk->mbox_chan = mbox_request_channel_byname(&rtk->mbox_cl,
- rtk->mbox_name);
- else
- rtk->mbox_chan =
- mbox_request_channel(&rtk->mbox_cl, rtk->mbox_idx);
-
- if (IS_ERR(rtk->mbox_chan))
- return PTR_ERR(rtk->mbox_chan);
- return 0;
-}
-
struct apple_rtkit *apple_rtkit_init(struct device *dev, void *cookie,
const char *mbox_name, int mbox_idx,
const struct apple_rtkit_ops *ops)
@@ -736,13 +654,18 @@ struct apple_rtkit *apple_rtkit_init(struct device *dev, void *cookie,
bitmap_zero(rtk->endpoints, APPLE_RTKIT_MAX_ENDPOINTS);
set_bit(APPLE_RTKIT_EP_MGMT, rtk->endpoints);
- rtk->mbox_name = mbox_name;
- rtk->mbox_idx = mbox_idx;
- rtk->mbox_cl.dev = dev;
- rtk->mbox_cl.tx_block = false;
- rtk->mbox_cl.knows_txdone = false;
- rtk->mbox_cl.rx_callback = &apple_rtkit_rx;
- rtk->mbox_cl.tx_done = &apple_rtkit_tx_done;
+ if (mbox_name)
+ rtk->mbox = apple_mbox_get_byname(dev, mbox_name);
+ else
+ rtk->mbox = apple_mbox_get(dev, mbox_idx);
+
+ if (IS_ERR(rtk->mbox)) {
+ ret = PTR_ERR(rtk->mbox);
+ goto free_rtk;
+ }
+
+ rtk->mbox->rx = apple_rtkit_rx;
+ rtk->mbox->cookie = rtk;
rtk->wq = alloc_ordered_workqueue("rtkit-%s", WQ_MEM_RECLAIM,
dev_name(rtk->dev));
@@ -751,7 +674,7 @@ struct apple_rtkit *apple_rtkit_init(struct device *dev, void *cookie,
goto free_rtk;
}
- ret = apple_rtkit_request_mbox_chan(rtk);
+ ret = apple_mbox_start(rtk->mbox);
if (ret)
goto destroy_wq;
@@ -782,7 +705,7 @@ static int apple_rtkit_wait_for_completion(struct completion *c)
int apple_rtkit_reinit(struct apple_rtkit *rtk)
{
/* make sure we don't handle any messages while reinitializing */
- mbox_free_channel(rtk->mbox_chan);
+ apple_mbox_stop(rtk->mbox);
flush_workqueue(rtk->wq);
apple_rtkit_free_buffer(rtk, &rtk->ioreport_buffer);
@@ -806,7 +729,7 @@ int apple_rtkit_reinit(struct apple_rtkit *rtk)
rtk->iop_power_state = APPLE_RTKIT_PWR_STATE_OFF;
rtk->ap_power_state = APPLE_RTKIT_PWR_STATE_OFF;
- return apple_rtkit_request_mbox_chan(rtk);
+ return apple_mbox_start(rtk->mbox);
}
EXPORT_SYMBOL_GPL(apple_rtkit_reinit);
@@ -962,7 +885,7 @@ EXPORT_SYMBOL_GPL(apple_rtkit_wake);
void apple_rtkit_free(struct apple_rtkit *rtk)
{
- mbox_free_channel(rtk->mbox_chan);
+ apple_mbox_stop(rtk->mbox);
destroy_workqueue(rtk->wq);
apple_rtkit_free_buffer(rtk, &rtk->ioreport_buffer);
diff --git a/drivers/soc/fsl/qe/qmc.c b/drivers/soc/fsl/qe/qmc.c
index 92ec76c03965..f498db9abe35 100644
--- a/drivers/soc/fsl/qe/qmc.c
+++ b/drivers/soc/fsl/qe/qmc.c
@@ -166,7 +166,7 @@
struct qmc_xfer_desc {
union {
void (*tx_complete)(void *context);
- void (*rx_complete)(void *context, size_t length);
+ void (*rx_complete)(void *context, size_t length, unsigned int flags);
};
void *context;
};
@@ -175,9 +175,12 @@ struct qmc_chan {
struct list_head list;
unsigned int id;
struct qmc *qmc;
- void *__iomem s_param;
+ void __iomem *s_param;
enum qmc_mode mode;
+ spinlock_t ts_lock; /* Protect timeslots */
+ u64 tx_ts_mask_avail;
u64 tx_ts_mask;
+ u64 rx_ts_mask_avail;
u64 rx_ts_mask;
bool is_reverse_data;
@@ -203,9 +206,9 @@ struct qmc_chan {
struct qmc {
struct device *dev;
struct tsa_serial *tsa_serial;
- void *__iomem scc_regs;
- void *__iomem scc_pram;
- void *__iomem dpram;
+ void __iomem *scc_regs;
+ void __iomem *scc_pram;
+ void __iomem *dpram;
u16 scc_pram_offset;
cbd_t __iomem *bd_table;
dma_addr_t bd_dma_addr;
@@ -214,41 +217,47 @@ struct qmc {
u16 __iomem *int_curr;
dma_addr_t int_dma_addr;
size_t int_size;
+ bool is_tsa_64rxtx;
struct list_head chan_head;
struct qmc_chan *chans[64];
};
-static inline void qmc_write16(void *__iomem addr, u16 val)
+static void qmc_write16(void __iomem *addr, u16 val)
{
iowrite16be(val, addr);
}
-static inline u16 qmc_read16(void *__iomem addr)
+static u16 qmc_read16(void __iomem *addr)
{
return ioread16be(addr);
}
-static inline void qmc_setbits16(void *__iomem addr, u16 set)
+static void qmc_setbits16(void __iomem *addr, u16 set)
{
qmc_write16(addr, qmc_read16(addr) | set);
}
-static inline void qmc_clrbits16(void *__iomem addr, u16 clr)
+static void qmc_clrbits16(void __iomem *addr, u16 clr)
{
qmc_write16(addr, qmc_read16(addr) & ~clr);
}
-static inline void qmc_write32(void *__iomem addr, u32 val)
+static void qmc_clrsetbits16(void __iomem *addr, u16 clr, u16 set)
+{
+ qmc_write16(addr, (qmc_read16(addr) & ~clr) | set);
+}
+
+static void qmc_write32(void __iomem *addr, u32 val)
{
iowrite32be(val, addr);
}
-static inline u32 qmc_read32(void *__iomem addr)
+static u32 qmc_read32(void __iomem *addr)
{
return ioread32be(addr);
}
-static inline void qmc_setbits32(void *__iomem addr, u32 set)
+static void qmc_setbits32(void __iomem *addr, u32 set)
{
qmc_write32(addr, qmc_read32(addr) | set);
}
@@ -257,6 +266,7 @@ static inline void qmc_setbits32(void *__iomem addr, u32 set)
int qmc_chan_get_info(struct qmc_chan *chan, struct qmc_chan_info *info)
{
struct tsa_serial_info tsa_info;
+ unsigned long flags;
int ret;
/* Retrieve info from the TSA related serial */
@@ -264,6 +274,8 @@ int qmc_chan_get_info(struct qmc_chan *chan, struct qmc_chan_info *info)
if (ret)
return ret;
+ spin_lock_irqsave(&chan->ts_lock, flags);
+
info->mode = chan->mode;
info->rx_fs_rate = tsa_info.rx_fs_rate;
info->rx_bit_rate = tsa_info.rx_bit_rate;
@@ -272,10 +284,63 @@ int qmc_chan_get_info(struct qmc_chan *chan, struct qmc_chan_info *info)
info->tx_bit_rate = tsa_info.tx_bit_rate;
info->nb_rx_ts = hweight64(chan->rx_ts_mask);
+ spin_unlock_irqrestore(&chan->ts_lock, flags);
+
return 0;
}
EXPORT_SYMBOL(qmc_chan_get_info);
+int qmc_chan_get_ts_info(struct qmc_chan *chan, struct qmc_chan_ts_info *ts_info)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&chan->ts_lock, flags);
+
+ ts_info->rx_ts_mask_avail = chan->rx_ts_mask_avail;
+ ts_info->tx_ts_mask_avail = chan->tx_ts_mask_avail;
+ ts_info->rx_ts_mask = chan->rx_ts_mask;
+ ts_info->tx_ts_mask = chan->tx_ts_mask;
+
+ spin_unlock_irqrestore(&chan->ts_lock, flags);
+
+ return 0;
+}
+EXPORT_SYMBOL(qmc_chan_get_ts_info);
+
+int qmc_chan_set_ts_info(struct qmc_chan *chan, const struct qmc_chan_ts_info *ts_info)
+{
+ unsigned long flags;
+ int ret;
+
+ /* Only a subset of available timeslots is allowed */
+ if ((ts_info->rx_ts_mask & chan->rx_ts_mask_avail) != ts_info->rx_ts_mask)
+ return -EINVAL;
+ if ((ts_info->tx_ts_mask & chan->tx_ts_mask_avail) != ts_info->tx_ts_mask)
+ return -EINVAL;
+
+ /* In case of common rx/tx table, rx/tx masks must be identical */
+ if (chan->qmc->is_tsa_64rxtx) {
+ if (ts_info->rx_ts_mask != ts_info->tx_ts_mask)
+ return -EINVAL;
+ }
+
+ spin_lock_irqsave(&chan->ts_lock, flags);
+
+ if ((chan->tx_ts_mask != ts_info->tx_ts_mask && !chan->is_tx_stopped) ||
+ (chan->rx_ts_mask != ts_info->rx_ts_mask && !chan->is_rx_stopped)) {
+ dev_err(chan->qmc->dev, "Channel rx and/or tx not stopped\n");
+ ret = -EBUSY;
+ } else {
+ chan->tx_ts_mask = ts_info->tx_ts_mask;
+ chan->rx_ts_mask = ts_info->rx_ts_mask;
+ ret = 0;
+ }
+ spin_unlock_irqrestore(&chan->ts_lock, flags);
+
+ return ret;
+}
+EXPORT_SYMBOL(qmc_chan_set_ts_info);
+
int qmc_chan_set_param(struct qmc_chan *chan, const struct qmc_chan_param *param)
{
if (param->mode != chan->mode)
@@ -318,7 +383,7 @@ int qmc_chan_write_submit(struct qmc_chan *chan, dma_addr_t addr, size_t length,
{
struct qmc_xfer_desc *xfer_desc;
unsigned long flags;
- cbd_t *__iomem bd;
+ cbd_t __iomem *bd;
u16 ctrl;
int ret;
@@ -374,7 +439,7 @@ static void qmc_chan_write_done(struct qmc_chan *chan)
void (*complete)(void *context);
unsigned long flags;
void *context;
- cbd_t *__iomem bd;
+ cbd_t __iomem *bd;
u16 ctrl;
/*
@@ -421,11 +486,12 @@ end:
}
int qmc_chan_read_submit(struct qmc_chan *chan, dma_addr_t addr, size_t length,
- void (*complete)(void *context, size_t length), void *context)
+ void (*complete)(void *context, size_t length, unsigned int flags),
+ void *context)
{
struct qmc_xfer_desc *xfer_desc;
unsigned long flags;
- cbd_t *__iomem bd;
+ cbd_t __iomem *bd;
u16 ctrl;
int ret;
@@ -454,6 +520,10 @@ int qmc_chan_read_submit(struct qmc_chan *chan, dma_addr_t addr, size_t length,
xfer_desc->rx_complete = complete;
xfer_desc->context = context;
+ /* Clear previous status flags */
+ ctrl &= ~(QMC_BD_RX_L | QMC_BD_RX_F | QMC_BD_RX_LG | QMC_BD_RX_NO |
+ QMC_BD_RX_AB | QMC_BD_RX_CR);
+
/* Activate the descriptor */
ctrl |= (QMC_BD_RX_E | QMC_BD_RX_UB);
wmb(); /* Be sure to flush data before descriptor activation */
@@ -485,10 +555,10 @@ EXPORT_SYMBOL(qmc_chan_read_submit);
static void qmc_chan_read_done(struct qmc_chan *chan)
{
- void (*complete)(void *context, size_t size);
+ void (*complete)(void *context, size_t size, unsigned int flags);
struct qmc_xfer_desc *xfer_desc;
unsigned long flags;
- cbd_t *__iomem bd;
+ cbd_t __iomem *bd;
void *context;
u16 datalen;
u16 ctrl;
@@ -527,7 +597,23 @@ static void qmc_chan_read_done(struct qmc_chan *chan)
if (complete) {
spin_unlock_irqrestore(&chan->rx_lock, flags);
- complete(context, datalen);
+
+ /*
+ * Avoid conversion between internal hardware flags and
+ * the software API flags.
+ * -> Be sure that the software API flags are consistent
+ * with the hardware flags
+ */
+ BUILD_BUG_ON(QMC_RX_FLAG_HDLC_LAST != QMC_BD_RX_L);
+ BUILD_BUG_ON(QMC_RX_FLAG_HDLC_FIRST != QMC_BD_RX_F);
+ BUILD_BUG_ON(QMC_RX_FLAG_HDLC_OVF != QMC_BD_RX_LG);
+ BUILD_BUG_ON(QMC_RX_FLAG_HDLC_UNA != QMC_BD_RX_NO);
+ BUILD_BUG_ON(QMC_RX_FLAG_HDLC_ABORT != QMC_BD_RX_AB);
+ BUILD_BUG_ON(QMC_RX_FLAG_HDLC_CRC != QMC_BD_RX_CR);
+
+ complete(context, datalen,
+ ctrl & (QMC_BD_RX_L | QMC_BD_RX_F | QMC_BD_RX_LG |
+ QMC_BD_RX_NO | QMC_BD_RX_AB | QMC_BD_RX_CR));
spin_lock_irqsave(&chan->rx_lock, flags);
}
@@ -539,6 +625,155 @@ end:
spin_unlock_irqrestore(&chan->rx_lock, flags);
}
+static int qmc_chan_setup_tsa_64rxtx(struct qmc_chan *chan, const struct tsa_serial_info *info,
+ bool enable)
+{
+ unsigned int i;
+ u16 curr;
+ u16 val;
+
+ /*
+ * Use a common Tx/Rx 64 entries table.
+ * Tx and Rx related stuffs must be identical
+ */
+ if (chan->tx_ts_mask != chan->rx_ts_mask) {
+ dev_err(chan->qmc->dev, "chan %u uses different Rx and Tx TS\n", chan->id);
+ return -EINVAL;
+ }
+
+ val = QMC_TSA_VALID | QMC_TSA_MASK | QMC_TSA_CHANNEL(chan->id);
+
+ /* Check entries based on Rx stuff*/
+ for (i = 0; i < info->nb_rx_ts; i++) {
+ if (!(chan->rx_ts_mask & (((u64)1) << i)))
+ continue;
+
+ curr = qmc_read16(chan->qmc->scc_pram + QMC_GBL_TSATRX + (i * 2));
+ if (curr & QMC_TSA_VALID && (curr & ~QMC_TSA_WRAP) != val) {
+ dev_err(chan->qmc->dev, "chan %u TxRx entry %d already used\n",
+ chan->id, i);
+ return -EBUSY;
+ }
+ }
+
+ /* Set entries based on Rx stuff*/
+ for (i = 0; i < info->nb_rx_ts; i++) {
+ if (!(chan->rx_ts_mask & (((u64)1) << i)))
+ continue;
+
+ qmc_clrsetbits16(chan->qmc->scc_pram + QMC_GBL_TSATRX + (i * 2),
+ ~QMC_TSA_WRAP, enable ? val : 0x0000);
+ }
+
+ return 0;
+}
+
+static int qmc_chan_setup_tsa_32rx(struct qmc_chan *chan, const struct tsa_serial_info *info,
+ bool enable)
+{
+ unsigned int i;
+ u16 curr;
+ u16 val;
+
+ /* Use a Rx 32 entries table */
+
+ val = QMC_TSA_VALID | QMC_TSA_MASK | QMC_TSA_CHANNEL(chan->id);
+
+ /* Check entries based on Rx stuff */
+ for (i = 0; i < info->nb_rx_ts; i++) {
+ if (!(chan->rx_ts_mask & (((u64)1) << i)))
+ continue;
+
+ curr = qmc_read16(chan->qmc->scc_pram + QMC_GBL_TSATRX + (i * 2));
+ if (curr & QMC_TSA_VALID && (curr & ~QMC_TSA_WRAP) != val) {
+ dev_err(chan->qmc->dev, "chan %u Rx entry %d already used\n",
+ chan->id, i);
+ return -EBUSY;
+ }
+ }
+
+ /* Set entries based on Rx stuff */
+ for (i = 0; i < info->nb_rx_ts; i++) {
+ if (!(chan->rx_ts_mask & (((u64)1) << i)))
+ continue;
+
+ qmc_clrsetbits16(chan->qmc->scc_pram + QMC_GBL_TSATRX + (i * 2),
+ ~QMC_TSA_WRAP, enable ? val : 0x0000);
+ }
+
+ return 0;
+}
+
+static int qmc_chan_setup_tsa_32tx(struct qmc_chan *chan, const struct tsa_serial_info *info,
+ bool enable)
+{
+ unsigned int i;
+ u16 curr;
+ u16 val;
+
+ /* Use a Tx 32 entries table */
+
+ val = QMC_TSA_VALID | QMC_TSA_MASK | QMC_TSA_CHANNEL(chan->id);
+
+ /* Check entries based on Tx stuff */
+ for (i = 0; i < info->nb_tx_ts; i++) {
+ if (!(chan->tx_ts_mask & (((u64)1) << i)))
+ continue;
+
+ curr = qmc_read16(chan->qmc->scc_pram + QMC_GBL_TSATTX + (i * 2));
+ if (curr & QMC_TSA_VALID && (curr & ~QMC_TSA_WRAP) != val) {
+ dev_err(chan->qmc->dev, "chan %u Tx entry %d already used\n",
+ chan->id, i);
+ return -EBUSY;
+ }
+ }
+
+ /* Set entries based on Tx stuff */
+ for (i = 0; i < info->nb_tx_ts; i++) {
+ if (!(chan->tx_ts_mask & (((u64)1) << i)))
+ continue;
+
+ qmc_clrsetbits16(chan->qmc->scc_pram + QMC_GBL_TSATTX + (i * 2),
+ ~QMC_TSA_WRAP, enable ? val : 0x0000);
+ }
+
+ return 0;
+}
+
+static int qmc_chan_setup_tsa_tx(struct qmc_chan *chan, bool enable)
+{
+ struct tsa_serial_info info;
+ int ret;
+
+ /* Retrieve info from the TSA related serial */
+ ret = tsa_serial_get_info(chan->qmc->tsa_serial, &info);
+ if (ret)
+ return ret;
+
+ /* Setup entries */
+ if (chan->qmc->is_tsa_64rxtx)
+ return qmc_chan_setup_tsa_64rxtx(chan, &info, enable);
+
+ return qmc_chan_setup_tsa_32tx(chan, &info, enable);
+}
+
+static int qmc_chan_setup_tsa_rx(struct qmc_chan *chan, bool enable)
+{
+ struct tsa_serial_info info;
+ int ret;
+
+ /* Retrieve info from the TSA related serial */
+ ret = tsa_serial_get_info(chan->qmc->tsa_serial, &info);
+ if (ret)
+ return ret;
+
+ /* Setup entries */
+ if (chan->qmc->is_tsa_64rxtx)
+ return qmc_chan_setup_tsa_64rxtx(chan, &info, enable);
+
+ return qmc_chan_setup_tsa_32rx(chan, &info, enable);
+}
+
static int qmc_chan_command(struct qmc_chan *chan, u8 qmc_opcode)
{
return cpm_command(chan->id << 2, (qmc_opcode << 4) | 0x0E);
@@ -551,6 +786,12 @@ static int qmc_chan_stop_rx(struct qmc_chan *chan)
spin_lock_irqsave(&chan->rx_lock, flags);
+ if (chan->is_rx_stopped) {
+ /* The channel is already stopped -> simply return ok */
+ ret = 0;
+ goto end;
+ }
+
/* Send STOP RECEIVE command */
ret = qmc_chan_command(chan, 0x0);
if (ret) {
@@ -561,6 +802,15 @@ static int qmc_chan_stop_rx(struct qmc_chan *chan)
chan->is_rx_stopped = true;
+ if (!chan->qmc->is_tsa_64rxtx || chan->is_tx_stopped) {
+ ret = qmc_chan_setup_tsa_rx(chan, false);
+ if (ret) {
+ dev_err(chan->qmc->dev, "chan %u: Disable tsa entries failed (%d)\n",
+ chan->id, ret);
+ goto end;
+ }
+ }
+
end:
spin_unlock_irqrestore(&chan->rx_lock, flags);
return ret;
@@ -573,6 +823,12 @@ static int qmc_chan_stop_tx(struct qmc_chan *chan)
spin_lock_irqsave(&chan->tx_lock, flags);
+ if (chan->is_tx_stopped) {
+ /* The channel is already stopped -> simply return ok */
+ ret = 0;
+ goto end;
+ }
+
/* Send STOP TRANSMIT command */
ret = qmc_chan_command(chan, 0x1);
if (ret) {
@@ -583,37 +839,114 @@ static int qmc_chan_stop_tx(struct qmc_chan *chan)
chan->is_tx_stopped = true;
+ if (!chan->qmc->is_tsa_64rxtx || chan->is_rx_stopped) {
+ ret = qmc_chan_setup_tsa_tx(chan, false);
+ if (ret) {
+ dev_err(chan->qmc->dev, "chan %u: Disable tsa entries failed (%d)\n",
+ chan->id, ret);
+ goto end;
+ }
+ }
+
end:
spin_unlock_irqrestore(&chan->tx_lock, flags);
return ret;
}
+static int qmc_chan_start_rx(struct qmc_chan *chan);
+
int qmc_chan_stop(struct qmc_chan *chan, int direction)
{
- int ret;
+ bool is_rx_rollback_needed = false;
+ unsigned long flags;
+ int ret = 0;
+
+ spin_lock_irqsave(&chan->ts_lock, flags);
if (direction & QMC_CHAN_READ) {
+ is_rx_rollback_needed = !chan->is_rx_stopped;
ret = qmc_chan_stop_rx(chan);
if (ret)
- return ret;
+ goto end;
}
if (direction & QMC_CHAN_WRITE) {
ret = qmc_chan_stop_tx(chan);
- if (ret)
- return ret;
+ if (ret) {
+ /* Restart rx if needed */
+ if (is_rx_rollback_needed)
+ qmc_chan_start_rx(chan);
+ goto end;
+ }
}
- return 0;
+end:
+ spin_unlock_irqrestore(&chan->ts_lock, flags);
+ return ret;
}
EXPORT_SYMBOL(qmc_chan_stop);
-static void qmc_chan_start_rx(struct qmc_chan *chan)
+static int qmc_setup_chan_trnsync(struct qmc *qmc, struct qmc_chan *chan)
+{
+ struct tsa_serial_info info;
+ u16 first_rx, last_tx;
+ u16 trnsync;
+ int ret;
+
+ /* Retrieve info from the TSA related serial */
+ ret = tsa_serial_get_info(chan->qmc->tsa_serial, &info);
+ if (ret)
+ return ret;
+
+ /* Find the first Rx TS allocated to the channel */
+ first_rx = chan->rx_ts_mask ? __ffs64(chan->rx_ts_mask) + 1 : 0;
+
+ /* Find the last Tx TS allocated to the channel */
+ last_tx = fls64(chan->tx_ts_mask);
+
+ trnsync = 0;
+ if (info.nb_rx_ts)
+ trnsync |= QMC_SPE_TRNSYNC_RX((first_rx % info.nb_rx_ts) * 2);
+ if (info.nb_tx_ts)
+ trnsync |= QMC_SPE_TRNSYNC_TX((last_tx % info.nb_tx_ts) * 2);
+
+ qmc_write16(chan->s_param + QMC_SPE_TRNSYNC, trnsync);
+
+ dev_dbg(qmc->dev, "chan %u: trnsync=0x%04x, rx %u/%u 0x%llx, tx %u/%u 0x%llx\n",
+ chan->id, trnsync,
+ first_rx, info.nb_rx_ts, chan->rx_ts_mask,
+ last_tx, info.nb_tx_ts, chan->tx_ts_mask);
+
+ return 0;
+}
+
+static int qmc_chan_start_rx(struct qmc_chan *chan)
{
unsigned long flags;
+ int ret;
spin_lock_irqsave(&chan->rx_lock, flags);
+ if (!chan->is_rx_stopped) {
+ /* The channel is already started -> simply return ok */
+ ret = 0;
+ goto end;
+ }
+
+ ret = qmc_chan_setup_tsa_rx(chan, true);
+ if (ret) {
+ dev_err(chan->qmc->dev, "chan %u: Enable tsa entries failed (%d)\n",
+ chan->id, ret);
+ goto end;
+ }
+
+ ret = qmc_setup_chan_trnsync(chan->qmc, chan);
+ if (ret) {
+ dev_err(chan->qmc->dev, "chan %u: setup TRNSYNC failed (%d)\n",
+ chan->id, ret);
+ goto end;
+ }
+
/* Restart the receiver */
if (chan->mode == QMC_TRANSPARENT)
qmc_write32(chan->s_param + QMC_SPE_ZDSTATE, 0x18000080);
@@ -624,15 +957,38 @@ static void qmc_chan_start_rx(struct qmc_chan *chan)
chan->is_rx_stopped = false;
+end:
spin_unlock_irqrestore(&chan->rx_lock, flags);
+ return ret;
}
-static void qmc_chan_start_tx(struct qmc_chan *chan)
+static int qmc_chan_start_tx(struct qmc_chan *chan)
{
unsigned long flags;
+ int ret;
spin_lock_irqsave(&chan->tx_lock, flags);
+ if (!chan->is_tx_stopped) {
+ /* The channel is already started -> simply return ok */
+ ret = 0;
+ goto end;
+ }
+
+ ret = qmc_chan_setup_tsa_tx(chan, true);
+ if (ret) {
+ dev_err(chan->qmc->dev, "chan %u: Enable tsa entries failed (%d)\n",
+ chan->id, ret);
+ goto end;
+ }
+
+ ret = qmc_setup_chan_trnsync(chan->qmc, chan);
+ if (ret) {
+ dev_err(chan->qmc->dev, "chan %u: setup TRNSYNC failed (%d)\n",
+ chan->id, ret);
+ goto end;
+ }
+
/*
* Enable channel transmitter as it could be disabled if
* qmc_chan_reset() was called.
@@ -644,18 +1000,39 @@ static void qmc_chan_start_tx(struct qmc_chan *chan)
chan->is_tx_stopped = false;
+end:
spin_unlock_irqrestore(&chan->tx_lock, flags);
+ return ret;
}
int qmc_chan_start(struct qmc_chan *chan, int direction)
{
- if (direction & QMC_CHAN_READ)
- qmc_chan_start_rx(chan);
+ bool is_rx_rollback_needed = false;
+ unsigned long flags;
+ int ret = 0;
- if (direction & QMC_CHAN_WRITE)
- qmc_chan_start_tx(chan);
+ spin_lock_irqsave(&chan->ts_lock, flags);
- return 0;
+ if (direction & QMC_CHAN_READ) {
+ is_rx_rollback_needed = chan->is_rx_stopped;
+ ret = qmc_chan_start_rx(chan);
+ if (ret)
+ goto end;
+ }
+
+ if (direction & QMC_CHAN_WRITE) {
+ ret = qmc_chan_start_tx(chan);
+ if (ret) {
+ /* Restop rx if needed */
+ if (is_rx_rollback_needed)
+ qmc_chan_stop_rx(chan);
+ goto end;
+ }
+ }
+
+end:
+ spin_unlock_irqrestore(&chan->ts_lock, flags);
+ return ret;
}
EXPORT_SYMBOL(qmc_chan_start);
@@ -663,7 +1040,7 @@ static void qmc_chan_reset_rx(struct qmc_chan *chan)
{
struct qmc_xfer_desc *xfer_desc;
unsigned long flags;
- cbd_t *__iomem bd;
+ cbd_t __iomem *bd;
u16 ctrl;
spin_lock_irqsave(&chan->rx_lock, flags);
@@ -685,7 +1062,6 @@ static void qmc_chan_reset_rx(struct qmc_chan *chan)
qmc_read16(chan->s_param + QMC_SPE_RBASE));
chan->rx_pending = 0;
- chan->is_rx_stopped = false;
spin_unlock_irqrestore(&chan->rx_lock, flags);
}
@@ -694,7 +1070,7 @@ static void qmc_chan_reset_tx(struct qmc_chan *chan)
{
struct qmc_xfer_desc *xfer_desc;
unsigned long flags;
- cbd_t *__iomem bd;
+ cbd_t __iomem *bd;
u16 ctrl;
spin_lock_irqsave(&chan->tx_lock, flags);
@@ -741,10 +1117,7 @@ EXPORT_SYMBOL(qmc_chan_reset);
static int qmc_check_chans(struct qmc *qmc)
{
struct tsa_serial_info info;
- bool is_one_table = false;
struct qmc_chan *chan;
- u64 tx_ts_mask = 0;
- u64 rx_ts_mask = 0;
u64 tx_ts_assigned_mask;
u64 rx_ts_assigned_mask;
int ret;
@@ -768,38 +1141,21 @@ static int qmc_check_chans(struct qmc *qmc)
dev_err(qmc->dev, "Number of TSA Tx/Rx TS assigned are not equal\n");
return -EINVAL;
}
- is_one_table = true;
}
tx_ts_assigned_mask = info.nb_tx_ts == 64 ? U64_MAX : (((u64)1) << info.nb_tx_ts) - 1;
rx_ts_assigned_mask = info.nb_rx_ts == 64 ? U64_MAX : (((u64)1) << info.nb_rx_ts) - 1;
list_for_each_entry(chan, &qmc->chan_head, list) {
- if (chan->tx_ts_mask > tx_ts_assigned_mask) {
- dev_err(qmc->dev, "chan %u uses TSA unassigned Tx TS\n", chan->id);
- return -EINVAL;
- }
- if (tx_ts_mask & chan->tx_ts_mask) {
- dev_err(qmc->dev, "chan %u uses an already used Tx TS\n", chan->id);
+ if (chan->tx_ts_mask_avail > tx_ts_assigned_mask) {
+ dev_err(qmc->dev, "chan %u can use TSA unassigned Tx TS\n", chan->id);
return -EINVAL;
}
- if (chan->rx_ts_mask > rx_ts_assigned_mask) {
- dev_err(qmc->dev, "chan %u uses TSA unassigned Rx TS\n", chan->id);
- return -EINVAL;
- }
- if (rx_ts_mask & chan->rx_ts_mask) {
- dev_err(qmc->dev, "chan %u uses an already used Rx TS\n", chan->id);
+ if (chan->rx_ts_mask_avail > rx_ts_assigned_mask) {
+ dev_err(qmc->dev, "chan %u can use TSA unassigned Rx TS\n", chan->id);
return -EINVAL;
}
-
- if (is_one_table && (chan->tx_ts_mask != chan->rx_ts_mask)) {
- dev_err(qmc->dev, "chan %u uses different Rx and Tx TS\n", chan->id);
- return -EINVAL;
- }
-
- tx_ts_mask |= chan->tx_ts_mask;
- rx_ts_mask |= chan->rx_ts_mask;
}
return 0;
@@ -845,6 +1201,7 @@ static int qmc_of_parse_chans(struct qmc *qmc, struct device_node *np)
}
chan->id = chan_id;
+ spin_lock_init(&chan->ts_lock);
spin_lock_init(&chan->rx_lock);
spin_lock_init(&chan->tx_lock);
@@ -855,7 +1212,8 @@ static int qmc_of_parse_chans(struct qmc *qmc, struct device_node *np)
of_node_put(chan_np);
return ret;
}
- chan->tx_ts_mask = ts_mask;
+ chan->tx_ts_mask_avail = ts_mask;
+ chan->tx_ts_mask = chan->tx_ts_mask_avail;
ret = of_property_read_u64(chan_np, "fsl,rx-ts-mask", &ts_mask);
if (ret) {
@@ -864,7 +1222,8 @@ static int qmc_of_parse_chans(struct qmc *qmc, struct device_node *np)
of_node_put(chan_np);
return ret;
}
- chan->rx_ts_mask = ts_mask;
+ chan->rx_ts_mask_avail = ts_mask;
+ chan->rx_ts_mask = chan->rx_ts_mask_avail;
mode = "transparent";
ret = of_property_read_string(chan_np, "fsl,operational-mode", &mode);
@@ -895,9 +1254,8 @@ static int qmc_of_parse_chans(struct qmc *qmc, struct device_node *np)
return qmc_check_chans(qmc);
}
-static int qmc_setup_tsa_64rxtx(struct qmc *qmc, const struct tsa_serial_info *info)
+static int qmc_init_tsa_64rxtx(struct qmc *qmc, const struct tsa_serial_info *info)
{
- struct qmc_chan *chan;
unsigned int i;
u16 val;
@@ -906,23 +1264,12 @@ static int qmc_setup_tsa_64rxtx(struct qmc *qmc, const struct tsa_serial_info *i
* Everything was previously checked, Tx and Rx related stuffs are
* identical -> Used Rx related stuff to build the table
*/
+ qmc->is_tsa_64rxtx = true;
/* Invalidate all entries */
for (i = 0; i < 64; i++)
qmc_write16(qmc->scc_pram + QMC_GBL_TSATRX + (i * 2), 0x0000);
- /* Set entries based on Rx stuff*/
- list_for_each_entry(chan, &qmc->chan_head, list) {
- for (i = 0; i < info->nb_rx_ts; i++) {
- if (!(chan->rx_ts_mask & (((u64)1) << i)))
- continue;
-
- val = QMC_TSA_VALID | QMC_TSA_MASK |
- QMC_TSA_CHANNEL(chan->id);
- qmc_write16(qmc->scc_pram + QMC_GBL_TSATRX + (i * 2), val);
- }
- }
-
/* Set Wrap bit on last entry */
qmc_setbits16(qmc->scc_pram + QMC_GBL_TSATRX + ((info->nb_rx_ts - 1) * 2),
QMC_TSA_WRAP);
@@ -937,9 +1284,8 @@ static int qmc_setup_tsa_64rxtx(struct qmc *qmc, const struct tsa_serial_info *i
return 0;
}
-static int qmc_setup_tsa_32rx_32tx(struct qmc *qmc, const struct tsa_serial_info *info)
+static int qmc_init_tsa_32rx_32tx(struct qmc *qmc, const struct tsa_serial_info *info)
{
- struct qmc_chan *chan;
unsigned int i;
u16 val;
@@ -947,6 +1293,7 @@ static int qmc_setup_tsa_32rx_32tx(struct qmc *qmc, const struct tsa_serial_info
* Use a Tx 32 entries table and a Rx 32 entries table.
* Everything was previously checked.
*/
+ qmc->is_tsa_64rxtx = false;
/* Invalidate all entries */
for (i = 0; i < 32; i++) {
@@ -954,28 +1301,6 @@ static int qmc_setup_tsa_32rx_32tx(struct qmc *qmc, const struct tsa_serial_info
qmc_write16(qmc->scc_pram + QMC_GBL_TSATTX + (i * 2), 0x0000);
}
- /* Set entries based on Rx and Tx stuff*/
- list_for_each_entry(chan, &qmc->chan_head, list) {
- /* Rx part */
- for (i = 0; i < info->nb_rx_ts; i++) {
- if (!(chan->rx_ts_mask & (((u64)1) << i)))
- continue;
-
- val = QMC_TSA_VALID | QMC_TSA_MASK |
- QMC_TSA_CHANNEL(chan->id);
- qmc_write16(qmc->scc_pram + QMC_GBL_TSATRX + (i * 2), val);
- }
- /* Tx part */
- for (i = 0; i < info->nb_tx_ts; i++) {
- if (!(chan->tx_ts_mask & (((u64)1) << i)))
- continue;
-
- val = QMC_TSA_VALID | QMC_TSA_MASK |
- QMC_TSA_CHANNEL(chan->id);
- qmc_write16(qmc->scc_pram + QMC_GBL_TSATTX + (i * 2), val);
- }
- }
-
/* Set Wrap bit on last entries */
qmc_setbits16(qmc->scc_pram + QMC_GBL_TSATRX + ((info->nb_rx_ts - 1) * 2),
QMC_TSA_WRAP);
@@ -995,7 +1320,7 @@ static int qmc_setup_tsa_32rx_32tx(struct qmc *qmc, const struct tsa_serial_info
return 0;
}
-static int qmc_setup_tsa(struct qmc *qmc)
+static int qmc_init_tsa(struct qmc *qmc)
{
struct tsa_serial_info info;
int ret;
@@ -1006,46 +1331,12 @@ static int qmc_setup_tsa(struct qmc *qmc)
return ret;
/*
- * Setup one common 64 entries table or two 32 entries (one for Tx and
- * one for Tx) according to assigned TS numbers.
+ * Initialize one common 64 entries table or two 32 entries (one for Tx
+ * and one for Tx) according to assigned TS numbers.
*/
return ((info.nb_tx_ts > 32) || (info.nb_rx_ts > 32)) ?
- qmc_setup_tsa_64rxtx(qmc, &info) :
- qmc_setup_tsa_32rx_32tx(qmc, &info);
-}
-
-static int qmc_setup_chan_trnsync(struct qmc *qmc, struct qmc_chan *chan)
-{
- struct tsa_serial_info info;
- u16 first_rx, last_tx;
- u16 trnsync;
- int ret;
-
- /* Retrieve info from the TSA related serial */
- ret = tsa_serial_get_info(chan->qmc->tsa_serial, &info);
- if (ret)
- return ret;
-
- /* Find the first Rx TS allocated to the channel */
- first_rx = chan->rx_ts_mask ? __ffs64(chan->rx_ts_mask) + 1 : 0;
-
- /* Find the last Tx TS allocated to the channel */
- last_tx = fls64(chan->tx_ts_mask);
-
- trnsync = 0;
- if (info.nb_rx_ts)
- trnsync |= QMC_SPE_TRNSYNC_RX((first_rx % info.nb_rx_ts) * 2);
- if (info.nb_tx_ts)
- trnsync |= QMC_SPE_TRNSYNC_TX((last_tx % info.nb_tx_ts) * 2);
-
- qmc_write16(chan->s_param + QMC_SPE_TRNSYNC, trnsync);
-
- dev_dbg(qmc->dev, "chan %u: trnsync=0x%04x, rx %u/%u 0x%llx, tx %u/%u 0x%llx\n",
- chan->id, trnsync,
- first_rx, info.nb_rx_ts, chan->rx_ts_mask,
- last_tx, info.nb_tx_ts, chan->tx_ts_mask);
-
- return 0;
+ qmc_init_tsa_64rxtx(qmc, &info) :
+ qmc_init_tsa_32rx_32tx(qmc, &info);
}
static int qmc_setup_chan(struct qmc *qmc, struct qmc_chan *chan)
@@ -1367,7 +1658,7 @@ static int qmc_probe(struct platform_device *pdev)
qmc_write32(qmc->scc_pram + QMC_GBL_C_MASK32, 0xDEBB20E3);
qmc_write16(qmc->scc_pram + QMC_GBL_C_MASK16, 0xF0B8);
- ret = qmc_setup_tsa(qmc);
+ ret = qmc_init_tsa(qmc);
if (ret)
goto err_tsa_serial_disconnect;
@@ -1405,8 +1696,16 @@ static int qmc_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, qmc);
+ /* Populate channel related devices */
+ ret = devm_of_platform_populate(qmc->dev);
+ if (ret)
+ goto err_disable_txrx;
+
return 0;
+err_disable_txrx:
+ qmc_setbits32(qmc->scc_regs + SCC_GSMRL, 0);
+
err_disable_intr:
qmc_write16(qmc->scc_regs + SCC_SCCM, 0);
@@ -1445,26 +1744,16 @@ static struct platform_driver qmc_driver = {
};
module_platform_driver(qmc_driver);
-struct qmc_chan *qmc_chan_get_byphandle(struct device_node *np, const char *phandle_name)
+static struct qmc_chan *qmc_chan_get_from_qmc(struct device_node *qmc_np, unsigned int chan_index)
{
- struct of_phandle_args out_args;
struct platform_device *pdev;
struct qmc_chan *qmc_chan;
struct qmc *qmc;
- int ret;
-
- ret = of_parse_phandle_with_fixed_args(np, phandle_name, 1, 0,
- &out_args);
- if (ret < 0)
- return ERR_PTR(ret);
- if (!of_match_node(qmc_driver.driver.of_match_table, out_args.np)) {
- of_node_put(out_args.np);
+ if (!of_match_node(qmc_driver.driver.of_match_table, qmc_np))
return ERR_PTR(-EINVAL);
- }
- pdev = of_find_device_by_node(out_args.np);
- of_node_put(out_args.np);
+ pdev = of_find_device_by_node(qmc_np);
if (!pdev)
return ERR_PTR(-ENODEV);
@@ -1474,17 +1763,12 @@ struct qmc_chan *qmc_chan_get_byphandle(struct device_node *np, const char *phan
return ERR_PTR(-EPROBE_DEFER);
}
- if (out_args.args_count != 1) {
+ if (chan_index >= ARRAY_SIZE(qmc->chans)) {
platform_device_put(pdev);
return ERR_PTR(-EINVAL);
}
- if (out_args.args[0] >= ARRAY_SIZE(qmc->chans)) {
- platform_device_put(pdev);
- return ERR_PTR(-EINVAL);
- }
-
- qmc_chan = qmc->chans[out_args.args[0]];
+ qmc_chan = qmc->chans[chan_index];
if (!qmc_chan) {
platform_device_put(pdev);
return ERR_PTR(-ENOENT);
@@ -1492,8 +1776,44 @@ struct qmc_chan *qmc_chan_get_byphandle(struct device_node *np, const char *phan
return qmc_chan;
}
+
+struct qmc_chan *qmc_chan_get_byphandle(struct device_node *np, const char *phandle_name)
+{
+ struct of_phandle_args out_args;
+ struct qmc_chan *qmc_chan;
+ int ret;
+
+ ret = of_parse_phandle_with_fixed_args(np, phandle_name, 1, 0,
+ &out_args);
+ if (ret < 0)
+ return ERR_PTR(ret);
+
+ if (out_args.args_count != 1) {
+ of_node_put(out_args.np);
+ return ERR_PTR(-EINVAL);
+ }
+
+ qmc_chan = qmc_chan_get_from_qmc(out_args.np, out_args.args[0]);
+ of_node_put(out_args.np);
+ return qmc_chan;
+}
EXPORT_SYMBOL(qmc_chan_get_byphandle);
+struct qmc_chan *qmc_chan_get_bychild(struct device_node *np)
+{
+ struct device_node *qmc_np;
+ u32 chan_index;
+ int ret;
+
+ qmc_np = np->parent;
+ ret = of_property_read_u32(np, "reg", &chan_index);
+ if (ret)
+ return ERR_PTR(-EINVAL);
+
+ return qmc_chan_get_from_qmc(qmc_np, chan_index);
+}
+EXPORT_SYMBOL(qmc_chan_get_bychild);
+
void qmc_chan_put(struct qmc_chan *chan)
{
put_device(chan->qmc->dev);
@@ -1530,6 +1850,28 @@ struct qmc_chan *devm_qmc_chan_get_byphandle(struct device *dev,
}
EXPORT_SYMBOL(devm_qmc_chan_get_byphandle);
+struct qmc_chan *devm_qmc_chan_get_bychild(struct device *dev,
+ struct device_node *np)
+{
+ struct qmc_chan *qmc_chan;
+ struct qmc_chan **dr;
+
+ dr = devres_alloc(devm_qmc_chan_release, sizeof(*dr), GFP_KERNEL);
+ if (!dr)
+ return ERR_PTR(-ENOMEM);
+
+ qmc_chan = qmc_chan_get_bychild(np);
+ if (!IS_ERR(qmc_chan)) {
+ *dr = qmc_chan;
+ devres_add(dev, dr);
+ } else {
+ devres_free(dr);
+ }
+
+ return qmc_chan;
+}
+EXPORT_SYMBOL(devm_qmc_chan_get_bychild);
+
MODULE_AUTHOR("Herve Codina <herve.codina@bootlin.com>");
MODULE_DESCRIPTION("CPM QMC driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/soc/fsl/qe/tsa.c b/drivers/soc/fsl/qe/tsa.c
index 3f9981335590..6c5741cf5e9d 100644
--- a/drivers/soc/fsl/qe/tsa.c
+++ b/drivers/soc/fsl/qe/tsa.c
@@ -98,9 +98,9 @@
#define TSA_SIRP 0x10
struct tsa_entries_area {
- void *__iomem entries_start;
- void *__iomem entries_next;
- void *__iomem last_entry;
+ void __iomem *entries_start;
+ void __iomem *entries_next;
+ void __iomem *last_entry;
};
struct tsa_tdm {
@@ -117,8 +117,8 @@ struct tsa_tdm {
struct tsa {
struct device *dev;
- void *__iomem si_regs;
- void *__iomem si_ram;
+ void __iomem *si_regs;
+ void __iomem *si_ram;
resource_size_t si_ram_sz;
spinlock_t lock;
int tdms; /* TSA_TDMx ORed */
@@ -135,27 +135,27 @@ static inline struct tsa *tsa_serial_get_tsa(struct tsa_serial *tsa_serial)
return container_of(tsa_serial, struct tsa, serials[tsa_serial->id]);
}
-static inline void tsa_write32(void *__iomem addr, u32 val)
+static inline void tsa_write32(void __iomem *addr, u32 val)
{
iowrite32be(val, addr);
}
-static inline void tsa_write8(void *__iomem addr, u32 val)
+static inline void tsa_write8(void __iomem *addr, u32 val)
{
iowrite8(val, addr);
}
-static inline u32 tsa_read32(void *__iomem addr)
+static inline u32 tsa_read32(void __iomem *addr)
{
return ioread32be(addr);
}
-static inline void tsa_clrbits32(void *__iomem addr, u32 clr)
+static inline void tsa_clrbits32(void __iomem *addr, u32 clr)
{
tsa_write32(addr, tsa_read32(addr) & ~clr);
}
-static inline void tsa_clrsetbits32(void *__iomem addr, u32 clr, u32 set)
+static inline void tsa_clrsetbits32(void __iomem *addr, u32 clr, u32 set)
{
tsa_write32(addr, (tsa_read32(addr) & ~clr) | set);
}
@@ -313,7 +313,7 @@ static u32 tsa_serial_id2csel(struct tsa *tsa, u32 serial_id)
static int tsa_add_entry(struct tsa *tsa, struct tsa_entries_area *area,
u32 count, u32 serial_id)
{
- void *__iomem addr;
+ void __iomem *addr;
u32 left;
u32 val;
u32 cnt;
diff --git a/drivers/soc/hisilicon/kunpeng_hccs.c b/drivers/soc/hisilicon/kunpeng_hccs.c
index e31791659560..9ff70b38e5e9 100644
--- a/drivers/soc/hisilicon/kunpeng_hccs.c
+++ b/drivers/soc/hisilicon/kunpeng_hccs.c
@@ -85,8 +85,10 @@ static int hccs_get_pcc_chan_id(struct hccs_dev *hdev)
struct hccs_register_ctx ctx = {0};
acpi_status status;
- if (!acpi_has_method(handle, METHOD_NAME__CRS))
+ if (!acpi_has_method(handle, METHOD_NAME__CRS)) {
+ dev_err(hdev->dev, "No _CRS method.\n");
return -ENODEV;
+ }
ctx.dev = hdev->dev;
status = acpi_walk_resources(handle, METHOD_NAME__CRS,
@@ -108,6 +110,14 @@ static void hccs_chan_tx_done(struct mbox_client *cl, void *msg, int ret)
*(u8 *)msg, ret);
}
+static void hccs_pcc_rx_callback(struct mbox_client *cl, void *mssg)
+{
+ struct hccs_mbox_client_info *cl_info =
+ container_of(cl, struct hccs_mbox_client_info, client);
+
+ complete(&cl_info->done);
+}
+
static void hccs_unregister_pcc_channel(struct hccs_dev *hdev)
{
struct hccs_mbox_client_info *cl_info = &hdev->cl_info;
@@ -129,6 +139,9 @@ static int hccs_register_pcc_channel(struct hccs_dev *hdev)
cl->tx_block = false;
cl->knows_txdone = true;
cl->tx_done = hccs_chan_tx_done;
+ cl->rx_callback = hdev->verspec_data->rx_callback;
+ init_completion(&cl_info->done);
+
pcc_chan = pcc_mbox_request_channel(cl, hdev->chan_id);
if (IS_ERR(pcc_chan)) {
dev_err(dev, "PPC channel request failed.\n");
@@ -145,17 +158,23 @@ static int hccs_register_pcc_channel(struct hccs_dev *hdev)
*/
cl_info->deadline_us =
HCCS_PCC_CMD_WAIT_RETRIES_NUM * pcc_chan->latency;
- if (cl_info->mbox_chan->mbox->txdone_irq) {
+ if (!hdev->verspec_data->has_txdone_irq &&
+ cl_info->mbox_chan->mbox->txdone_irq) {
dev_err(dev, "PCC IRQ in PCCT is enabled.\n");
rc = -EINVAL;
goto err_mbx_channel_free;
+ } else if (hdev->verspec_data->has_txdone_irq &&
+ !cl_info->mbox_chan->mbox->txdone_irq) {
+ dev_err(dev, "PCC IRQ in PCCT isn't supported.\n");
+ rc = -EINVAL;
+ goto err_mbx_channel_free;
}
if (pcc_chan->shmem_base_addr) {
cl_info->pcc_comm_addr = ioremap(pcc_chan->shmem_base_addr,
pcc_chan->shmem_size);
if (!cl_info->pcc_comm_addr) {
- dev_err(dev, "Failed to ioremap PCC communication region for channel-%d.\n",
+ dev_err(dev, "Failed to ioremap PCC communication region for channel-%u.\n",
hdev->chan_id);
rc = -ENOMEM;
goto err_mbx_channel_free;
@@ -170,7 +189,7 @@ out:
return rc;
}
-static int hccs_check_chan_cmd_complete(struct hccs_dev *hdev)
+static int hccs_wait_cmd_complete_by_poll(struct hccs_dev *hdev)
{
struct hccs_mbox_client_info *cl_info = &hdev->cl_info;
struct acpi_pcct_shared_memory __iomem *comm_base =
@@ -192,30 +211,74 @@ static int hccs_check_chan_cmd_complete(struct hccs_dev *hdev)
return ret;
}
+static int hccs_wait_cmd_complete_by_irq(struct hccs_dev *hdev)
+{
+ struct hccs_mbox_client_info *cl_info = &hdev->cl_info;
+
+ if (!wait_for_completion_timeout(&cl_info->done,
+ usecs_to_jiffies(cl_info->deadline_us))) {
+ dev_err(hdev->dev, "PCC command executed timeout!\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static inline void hccs_fill_pcc_shared_mem_region(struct hccs_dev *hdev,
+ u8 cmd,
+ struct hccs_desc *desc,
+ void __iomem *comm_space,
+ u16 space_size)
+{
+ struct acpi_pcct_shared_memory tmp = {
+ .signature = PCC_SIGNATURE | hdev->chan_id,
+ .command = cmd,
+ .status = 0,
+ };
+
+ memcpy_toio(hdev->cl_info.pcc_comm_addr, (void *)&tmp,
+ sizeof(struct acpi_pcct_shared_memory));
+
+ /* Copy the message to the PCC comm space */
+ memcpy_toio(comm_space, (void *)desc, space_size);
+}
+
+static inline void hccs_fill_ext_pcc_shared_mem_region(struct hccs_dev *hdev,
+ u8 cmd,
+ struct hccs_desc *desc,
+ void __iomem *comm_space,
+ u16 space_size)
+{
+ struct acpi_pcct_ext_pcc_shared_memory tmp = {
+ .signature = PCC_SIGNATURE | hdev->chan_id,
+ .flags = PCC_CMD_COMPLETION_NOTIFY,
+ .length = HCCS_PCC_SHARE_MEM_BYTES,
+ .command = cmd,
+ };
+
+ memcpy_toio(hdev->cl_info.pcc_comm_addr, (void *)&tmp,
+ sizeof(struct acpi_pcct_ext_pcc_shared_memory));
+
+ /* Copy the message to the PCC comm space */
+ memcpy_toio(comm_space, (void *)desc, space_size);
+}
+
static int hccs_pcc_cmd_send(struct hccs_dev *hdev, u8 cmd,
struct hccs_desc *desc)
{
+ const struct hccs_verspecific_data *verspec_data = hdev->verspec_data;
struct hccs_mbox_client_info *cl_info = &hdev->cl_info;
- void __iomem *comm_space = cl_info->pcc_comm_addr +
- sizeof(struct acpi_pcct_shared_memory);
struct hccs_fw_inner_head *fw_inner_head;
- struct acpi_pcct_shared_memory tmp = {0};
- u16 comm_space_size;
+ void __iomem *comm_space;
+ u16 space_size;
int ret;
- /* Write signature for this subspace */
- tmp.signature = PCC_SIGNATURE | hdev->chan_id;
- /* Write to the shared command region */
- tmp.command = cmd;
- /* Clear cmd complete bit */
- tmp.status = 0;
- memcpy_toio(cl_info->pcc_comm_addr, (void *)&tmp,
- sizeof(struct acpi_pcct_shared_memory));
-
- /* Copy the message to the PCC comm space */
- comm_space_size = HCCS_PCC_SHARE_MEM_BYTES -
- sizeof(struct acpi_pcct_shared_memory);
- memcpy_toio(comm_space, (void *)desc, comm_space_size);
+ comm_space = cl_info->pcc_comm_addr + verspec_data->shared_mem_size;
+ space_size = HCCS_PCC_SHARE_MEM_BYTES - verspec_data->shared_mem_size;
+ verspec_data->fill_pcc_shared_mem(hdev, cmd, desc,
+ comm_space, space_size);
+ if (verspec_data->has_txdone_irq)
+ reinit_completion(&cl_info->done);
/* Ring doorbell */
ret = mbox_send_message(cl_info->mbox_chan, &cmd);
@@ -225,13 +288,12 @@ static int hccs_pcc_cmd_send(struct hccs_dev *hdev, u8 cmd,
goto end;
}
- /* Wait for completion */
- ret = hccs_check_chan_cmd_complete(hdev);
+ ret = verspec_data->wait_cmd_complete(hdev);
if (ret)
goto end;
/* Copy response data */
- memcpy_fromio((void *)desc, comm_space, comm_space_size);
+ memcpy_fromio((void *)desc, comm_space, space_size);
fw_inner_head = &desc->rsp.fw_inner_head;
if (fw_inner_head->retStatus) {
dev_err(hdev->dev, "Execute PCC command failed, error code = %u.\n",
@@ -240,7 +302,10 @@ static int hccs_pcc_cmd_send(struct hccs_dev *hdev, u8 cmd,
}
end:
- mbox_client_txdone(cl_info->mbox_chan, ret);
+ if (verspec_data->has_txdone_irq)
+ mbox_chan_txdone(cl_info->mbox_chan, ret);
+ else
+ mbox_client_txdone(cl_info->mbox_chan, ret);
return ret;
}
@@ -527,7 +592,6 @@ out:
static int hccs_query_all_port_info_on_platform(struct hccs_dev *hdev)
{
-
struct device *dev = hdev->dev;
struct hccs_chip_info *chip;
struct hccs_die_info *die;
@@ -1097,7 +1161,7 @@ static int hccs_create_hccs_dir(struct hccs_dev *hdev,
int ret;
ret = kobject_init_and_add(&port->kobj, &hccs_port_type,
- &die->kobj, "hccs%d", port->port_id);
+ &die->kobj, "hccs%u", port->port_id);
if (ret) {
kobject_put(&port->kobj);
return ret;
@@ -1115,7 +1179,7 @@ static int hccs_create_die_dir(struct hccs_dev *hdev,
u16 i;
ret = kobject_init_and_add(&die->kobj, &hccs_die_type,
- &chip->kobj, "die%d", die->die_id);
+ &chip->kobj, "die%u", die->die_id);
if (ret) {
kobject_put(&die->kobj);
return ret;
@@ -1125,7 +1189,7 @@ static int hccs_create_die_dir(struct hccs_dev *hdev,
port = &die->ports[i];
ret = hccs_create_hccs_dir(hdev, die, port);
if (ret) {
- dev_err(hdev->dev, "create hccs%d dir failed.\n",
+ dev_err(hdev->dev, "create hccs%u dir failed.\n",
port->port_id);
goto err;
}
@@ -1147,7 +1211,7 @@ static int hccs_create_chip_dir(struct hccs_dev *hdev,
u16 id;
ret = kobject_init_and_add(&chip->kobj, &hccs_chip_type,
- &hdev->dev->kobj, "chip%d", chip->chip_id);
+ &hdev->dev->kobj, "chip%u", chip->chip_id);
if (ret) {
kobject_put(&chip->kobj);
return ret;
@@ -1178,7 +1242,7 @@ static int hccs_create_topo_dirs(struct hccs_dev *hdev)
chip = &hdev->chips[id];
ret = hccs_create_chip_dir(hdev, chip);
if (ret) {
- dev_err(hdev->dev, "init chip%d dir failed!\n", id);
+ dev_err(hdev->dev, "init chip%u dir failed!\n", id);
goto err;
}
}
@@ -1212,6 +1276,11 @@ static int hccs_probe(struct platform_device *pdev)
hdev->dev = &pdev->dev;
platform_set_drvdata(pdev, hdev);
+ /*
+ * Here would never be failure as the driver and device has been matched.
+ */
+ hdev->verspec_data = acpi_device_get_match_data(hdev->dev);
+
mutex_init(&hdev->lock);
rc = hccs_get_pcc_chan_id(hdev);
if (rc)
@@ -1248,9 +1317,26 @@ static void hccs_remove(struct platform_device *pdev)
hccs_unregister_pcc_channel(hdev);
}
+static const struct hccs_verspecific_data hisi04b1_verspec_data = {
+ .rx_callback = NULL,
+ .wait_cmd_complete = hccs_wait_cmd_complete_by_poll,
+ .fill_pcc_shared_mem = hccs_fill_pcc_shared_mem_region,
+ .shared_mem_size = sizeof(struct acpi_pcct_shared_memory),
+ .has_txdone_irq = false,
+};
+
+static const struct hccs_verspecific_data hisi04b2_verspec_data = {
+ .rx_callback = hccs_pcc_rx_callback,
+ .wait_cmd_complete = hccs_wait_cmd_complete_by_irq,
+ .fill_pcc_shared_mem = hccs_fill_ext_pcc_shared_mem_region,
+ .shared_mem_size = sizeof(struct acpi_pcct_ext_pcc_shared_memory),
+ .has_txdone_irq = true,
+};
+
static const struct acpi_device_id hccs_acpi_match[] = {
- { "HISI04B1"},
- { ""},
+ { "HISI04B1", (unsigned long)&hisi04b1_verspec_data},
+ { "HISI04B2", (unsigned long)&hisi04b2_verspec_data},
+ { }
};
MODULE_DEVICE_TABLE(acpi, hccs_acpi_match);
diff --git a/drivers/soc/hisilicon/kunpeng_hccs.h b/drivers/soc/hisilicon/kunpeng_hccs.h
index 6012d2776028..c3adbc01b471 100644
--- a/drivers/soc/hisilicon/kunpeng_hccs.h
+++ b/drivers/soc/hisilicon/kunpeng_hccs.h
@@ -51,11 +51,26 @@ struct hccs_mbox_client_info {
struct pcc_mbox_chan *pcc_chan;
u64 deadline_us;
void __iomem *pcc_comm_addr;
+ struct completion done;
+};
+
+struct hccs_desc;
+
+struct hccs_verspecific_data {
+ void (*rx_callback)(struct mbox_client *cl, void *mssg);
+ int (*wait_cmd_complete)(struct hccs_dev *hdev);
+ void (*fill_pcc_shared_mem)(struct hccs_dev *hdev,
+ u8 cmd, struct hccs_desc *desc,
+ void __iomem *comm_space,
+ u16 space_size);
+ u16 shared_mem_size;
+ bool has_txdone_irq;
};
struct hccs_dev {
struct device *dev;
struct acpi_device *acpi_dev;
+ const struct hccs_verspecific_data *verspec_data;
u64 caps;
u8 chip_num;
struct hccs_chip_info *chips;