summaryrefslogtreecommitdiff
path: root/drivers/soc
diff options
context:
space:
mode:
authorOlof Johansson <olof@lixom.net>2018-12-12 13:32:27 -0800
committerOlof Johansson <olof@lixom.net>2018-12-12 13:32:27 -0800
commit8986f4c2172f9d31f3eaa4e0e4a1efb2a7cbefb5 (patch)
treeec5dd27c15f77c3b011400010ee3832a3bfc16f4 /drivers/soc
parentbb7ece5fc43fb597ff965c79ddd62c3806f3e319 (diff)
parent576f1b4bc80220e1f88f1de5ecb25d99a6e9fa04 (diff)
Merge tag 'v4.20-next-soc' of https://git.kernel.org/pub/scm/linux/kernel/git/matthias.bgg/linux into next/drivers
add helper functions to create and send commands to the global command engine (GCE) device using the command queue driver (cmdq). * tag 'v4.20-next-soc' of https://git.kernel.org/pub/scm/linux/kernel/git/matthias.bgg/linux: soc: mediatek: Add Mediatek CMDQ helper Signed-off-by: Olof Johansson <olof@lixom.net>
Diffstat (limited to 'drivers/soc')
-rw-r--r--drivers/soc/mediatek/Kconfig12
-rw-r--r--drivers/soc/mediatek/Makefile1
-rw-r--r--drivers/soc/mediatek/mtk-cmdq-helper.c300
3 files changed, 313 insertions, 0 deletions
diff --git a/drivers/soc/mediatek/Kconfig b/drivers/soc/mediatek/Kconfig
index a7d0667338f2..17bd7590464f 100644
--- a/drivers/soc/mediatek/Kconfig
+++ b/drivers/soc/mediatek/Kconfig
@@ -4,6 +4,18 @@
menu "MediaTek SoC drivers"
depends on ARCH_MEDIATEK || COMPILE_TEST
+config MTK_CMDQ
+ tristate "MediaTek CMDQ Support"
+ depends on ARCH_MEDIATEK || COMPILE_TEST
+ select MAILBOX
+ select MTK_CMDQ_MBOX
+ select MTK_INFRACFG
+ help
+ Say yes here to add support for the MediaTek Command Queue (CMDQ)
+ driver. The CMDQ is used to help read/write registers with critical
+ time limitation, such as updating display configuration during the
+ vblank.
+
config MTK_INFRACFG
bool "MediaTek INFRACFG Support"
select REGMAP
diff --git a/drivers/soc/mediatek/Makefile b/drivers/soc/mediatek/Makefile
index 12998b08819e..64ce5eeaba32 100644
--- a/drivers/soc/mediatek/Makefile
+++ b/drivers/soc/mediatek/Makefile
@@ -1,3 +1,4 @@
+obj-$(CONFIG_MTK_CMDQ) += mtk-cmdq-helper.o
obj-$(CONFIG_MTK_INFRACFG) += mtk-infracfg.o
obj-$(CONFIG_MTK_PMIC_WRAP) += mtk-pmic-wrap.o
obj-$(CONFIG_MTK_SCPSYS) += mtk-scpsys.o
diff --git a/drivers/soc/mediatek/mtk-cmdq-helper.c b/drivers/soc/mediatek/mtk-cmdq-helper.c
new file mode 100644
index 000000000000..ff9fef5a032b
--- /dev/null
+++ b/drivers/soc/mediatek/mtk-cmdq-helper.c
@@ -0,0 +1,300 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Copyright (c) 2018 MediaTek Inc.
+
+#include <linux/completion.h>
+#include <linux/errno.h>
+#include <linux/dma-mapping.h>
+#include <linux/module.h>
+#include <linux/mailbox_controller.h>
+#include <linux/soc/mediatek/mtk-cmdq.h>
+
+#define CMDQ_ARG_A_WRITE_MASK 0xffff
+#define CMDQ_WRITE_ENABLE_MASK BIT(0)
+#define CMDQ_EOC_IRQ_EN BIT(0)
+#define CMDQ_EOC_CMD ((u64)((CMDQ_CODE_EOC << CMDQ_OP_CODE_SHIFT)) \
+ << 32 | CMDQ_EOC_IRQ_EN)
+
+static void cmdq_client_timeout(struct timer_list *t)
+{
+ struct cmdq_client *client = from_timer(client, t, timer);
+
+ dev_err(client->client.dev, "cmdq timeout!\n");
+}
+
+struct cmdq_client *cmdq_mbox_create(struct device *dev, int index, u32 timeout)
+{
+ struct cmdq_client *client;
+
+ client = kzalloc(sizeof(*client), GFP_KERNEL);
+ if (!client)
+ return (struct cmdq_client *)-ENOMEM;
+
+ client->timeout_ms = timeout;
+ if (timeout != CMDQ_NO_TIMEOUT) {
+ spin_lock_init(&client->lock);
+ timer_setup(&client->timer, cmdq_client_timeout, 0);
+ }
+ client->pkt_cnt = 0;
+ client->client.dev = dev;
+ client->client.tx_block = false;
+ client->chan = mbox_request_channel(&client->client, index);
+
+ if (IS_ERR(client->chan)) {
+ long err;
+
+ dev_err(dev, "failed to request channel\n");
+ err = PTR_ERR(client->chan);
+ kfree(client);
+
+ return ERR_PTR(err);
+ }
+
+ return client;
+}
+EXPORT_SYMBOL(cmdq_mbox_create);
+
+void cmdq_mbox_destroy(struct cmdq_client *client)
+{
+ if (client->timeout_ms != CMDQ_NO_TIMEOUT) {
+ spin_lock(&client->lock);
+ del_timer_sync(&client->timer);
+ spin_unlock(&client->lock);
+ }
+ mbox_free_channel(client->chan);
+ kfree(client);
+}
+EXPORT_SYMBOL(cmdq_mbox_destroy);
+
+struct cmdq_pkt *cmdq_pkt_create(struct cmdq_client *client, size_t size)
+{
+ struct cmdq_pkt *pkt;
+ struct device *dev;
+ dma_addr_t dma_addr;
+
+ pkt = kzalloc(sizeof(*pkt), GFP_KERNEL);
+ if (!pkt)
+ return ERR_PTR(-ENOMEM);
+ pkt->va_base = kzalloc(size, GFP_KERNEL);
+ if (!pkt->va_base) {
+ kfree(pkt);
+ return ERR_PTR(-ENOMEM);
+ }
+ pkt->buf_size = size;
+ pkt->cl = (void *)client;
+
+ dev = client->chan->mbox->dev;
+ dma_addr = dma_map_single(dev, pkt->va_base, pkt->buf_size,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, dma_addr)) {
+ dev_err(dev, "dma map failed, size=%u\n", (u32)(u64)size);
+ kfree(pkt->va_base);
+ kfree(pkt);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ pkt->pa_base = dma_addr;
+
+ return pkt;
+}
+EXPORT_SYMBOL(cmdq_pkt_create);
+
+void cmdq_pkt_destroy(struct cmdq_pkt *pkt)
+{
+ struct cmdq_client *client = (struct cmdq_client *)pkt->cl;
+
+ dma_unmap_single(client->chan->mbox->dev, pkt->pa_base, pkt->buf_size,
+ DMA_TO_DEVICE);
+ kfree(pkt->va_base);
+ kfree(pkt);
+}
+EXPORT_SYMBOL(cmdq_pkt_destroy);
+
+static int cmdq_pkt_append_command(struct cmdq_pkt *pkt, enum cmdq_code code,
+ u32 arg_a, u32 arg_b)
+{
+ u64 *cmd_ptr;
+
+ if (unlikely(pkt->cmd_buf_size + CMDQ_INST_SIZE > pkt->buf_size)) {
+ /*
+ * In the case of allocated buffer size (pkt->buf_size) is used
+ * up, the real required size (pkt->cmdq_buf_size) is still
+ * increased, so that the user knows how much memory should be
+ * ultimately allocated after appending all commands and
+ * flushing the command packet. Therefor, the user can call
+ * cmdq_pkt_create() again with the real required buffer size.
+ */
+ pkt->cmd_buf_size += CMDQ_INST_SIZE;
+ WARN_ONCE(1, "%s: buffer size %u is too small !\n",
+ __func__, (u32)pkt->buf_size);
+ return -ENOMEM;
+ }
+ cmd_ptr = pkt->va_base + pkt->cmd_buf_size;
+ (*cmd_ptr) = (u64)((code << CMDQ_OP_CODE_SHIFT) | arg_a) << 32 | arg_b;
+ pkt->cmd_buf_size += CMDQ_INST_SIZE;
+
+ return 0;
+}
+
+int cmdq_pkt_write(struct cmdq_pkt *pkt, u32 value, u32 subsys, u32 offset)
+{
+ u32 arg_a = (offset & CMDQ_ARG_A_WRITE_MASK) |
+ (subsys << CMDQ_SUBSYS_SHIFT);
+
+ return cmdq_pkt_append_command(pkt, CMDQ_CODE_WRITE, arg_a, value);
+}
+EXPORT_SYMBOL(cmdq_pkt_write);
+
+int cmdq_pkt_write_mask(struct cmdq_pkt *pkt, u32 value,
+ u32 subsys, u32 offset, u32 mask)
+{
+ u32 offset_mask = offset;
+ int err = 0;
+
+ if (mask != 0xffffffff) {
+ err = cmdq_pkt_append_command(pkt, CMDQ_CODE_MASK, 0, ~mask);
+ offset_mask |= CMDQ_WRITE_ENABLE_MASK;
+ }
+ err |= cmdq_pkt_write(pkt, value, subsys, offset_mask);
+
+ return err;
+}
+EXPORT_SYMBOL(cmdq_pkt_write_mask);
+
+int cmdq_pkt_wfe(struct cmdq_pkt *pkt, u32 event)
+{
+ u32 arg_b;
+
+ if (event >= CMDQ_MAX_EVENT)
+ return -EINVAL;
+
+ /*
+ * WFE arg_b
+ * bit 0-11: wait value
+ * bit 15: 1 - wait, 0 - no wait
+ * bit 16-27: update value
+ * bit 31: 1 - update, 0 - no update
+ */
+ arg_b = CMDQ_WFE_UPDATE | CMDQ_WFE_WAIT | CMDQ_WFE_WAIT_VALUE;
+
+ return cmdq_pkt_append_command(pkt, CMDQ_CODE_WFE, event, arg_b);
+}
+EXPORT_SYMBOL(cmdq_pkt_wfe);
+
+int cmdq_pkt_clear_event(struct cmdq_pkt *pkt, u32 event)
+{
+ if (event >= CMDQ_MAX_EVENT)
+ return -EINVAL;
+
+ return cmdq_pkt_append_command(pkt, CMDQ_CODE_WFE, event,
+ CMDQ_WFE_UPDATE);
+}
+EXPORT_SYMBOL(cmdq_pkt_clear_event);
+
+static int cmdq_pkt_finalize(struct cmdq_pkt *pkt)
+{
+ int err;
+
+ /* insert EOC and generate IRQ for each command iteration */
+ err = cmdq_pkt_append_command(pkt, CMDQ_CODE_EOC, 0, CMDQ_EOC_IRQ_EN);
+
+ /* JUMP to end */
+ err |= cmdq_pkt_append_command(pkt, CMDQ_CODE_JUMP, 0, CMDQ_JUMP_PASS);
+
+ return err;
+}
+
+static void cmdq_pkt_flush_async_cb(struct cmdq_cb_data data)
+{
+ struct cmdq_pkt *pkt = (struct cmdq_pkt *)data.data;
+ struct cmdq_task_cb *cb = &pkt->cb;
+ struct cmdq_client *client = (struct cmdq_client *)pkt->cl;
+
+ if (client->timeout_ms != CMDQ_NO_TIMEOUT) {
+ unsigned long flags = 0;
+
+ spin_lock_irqsave(&client->lock, flags);
+ if (--client->pkt_cnt == 0)
+ del_timer(&client->timer);
+ else
+ mod_timer(&client->timer, jiffies +
+ msecs_to_jiffies(client->timeout_ms));
+ spin_unlock_irqrestore(&client->lock, flags);
+ }
+
+ dma_sync_single_for_cpu(client->chan->mbox->dev, pkt->pa_base,
+ pkt->cmd_buf_size, DMA_TO_DEVICE);
+ if (cb->cb) {
+ data.data = cb->data;
+ cb->cb(data);
+ }
+}
+
+int cmdq_pkt_flush_async(struct cmdq_pkt *pkt, cmdq_async_flush_cb cb,
+ void *data)
+{
+ int err;
+ unsigned long flags = 0;
+ struct cmdq_client *client = (struct cmdq_client *)pkt->cl;
+
+ err = cmdq_pkt_finalize(pkt);
+ if (err < 0)
+ return err;
+
+ pkt->cb.cb = cb;
+ pkt->cb.data = data;
+ pkt->async_cb.cb = cmdq_pkt_flush_async_cb;
+ pkt->async_cb.data = pkt;
+
+ dma_sync_single_for_device(client->chan->mbox->dev, pkt->pa_base,
+ pkt->cmd_buf_size, DMA_TO_DEVICE);
+
+ if (client->timeout_ms != CMDQ_NO_TIMEOUT) {
+ spin_lock_irqsave(&client->lock, flags);
+ if (client->pkt_cnt++ == 0)
+ mod_timer(&client->timer, jiffies +
+ msecs_to_jiffies(client->timeout_ms));
+ spin_unlock_irqrestore(&client->lock, flags);
+ }
+
+ mbox_send_message(client->chan, pkt);
+ /* We can send next packet immediately, so just call txdone. */
+ mbox_client_txdone(client->chan, 0);
+
+ return 0;
+}
+EXPORT_SYMBOL(cmdq_pkt_flush_async);
+
+struct cmdq_flush_completion {
+ struct completion cmplt;
+ bool err;
+};
+
+static void cmdq_pkt_flush_cb(struct cmdq_cb_data data)
+{
+ struct cmdq_flush_completion *cmplt;
+
+ cmplt = (struct cmdq_flush_completion *)data.data;
+ if (data.sta != CMDQ_CB_NORMAL)
+ cmplt->err = true;
+ else
+ cmplt->err = false;
+ complete(&cmplt->cmplt);
+}
+
+int cmdq_pkt_flush(struct cmdq_pkt *pkt)
+{
+ struct cmdq_flush_completion cmplt;
+ int err;
+
+ init_completion(&cmplt.cmplt);
+ err = cmdq_pkt_flush_async(pkt, cmdq_pkt_flush_cb, &cmplt);
+ if (err < 0)
+ return err;
+ wait_for_completion(&cmplt.cmplt);
+
+ return cmplt.err ? -EFAULT : 0;
+}
+EXPORT_SYMBOL(cmdq_pkt_flush);
+
+MODULE_LICENSE("GPL v2");