summaryrefslogtreecommitdiff
path: root/drivers/mailbox
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2017-11-15 13:39:18 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2017-11-15 13:39:18 -0800
commit3c18767a45650009d02537677ffb7997bd402a2c (patch)
treebfe4d355208b0e0156367d9ca4dfa4a2c6d8d17f /drivers/mailbox
parent19b9aaf8a5fa634b2f16c3a2cfa819b74991273e (diff)
parent1f90a2162fb3cdfd9c44380bf16209af00f7acbe (diff)
Merge tag 'mailbox-v4.15' of git://git.linaro.org/landing-teams/working/fujitsu/integration
Pull mailbox updates from Jassi Brar: "Change to POLL api and fixes for FlexRM and OMAP driver. Summary: - Core: Prefer ACK method over POLL, if both supported - Test: use flag instead of special character - FlexRM: Usual driver internal minor churn - Omap: fix error path" * tag 'mailbox-v4.15' of git://git.linaro.org/landing-teams/working/fujitsu/integration: mailbox/omap: unregister mbox class mailbox: mailbox-test: don't rely on rx_buffer content to signal data ready mailbox: reset txdone_method TXDONE_BY_POLL if client knows_txdone mailbox: Build Broadcom FlexRM driver as loadable module for iProc SOCs mailbox: bcm-flexrm-mailbox: Use common GPL comment header mailbox: bcm-flexrm-mailbox: add depends on ARCH_BCM_IPROC mailbox: bcm-flexrm-mailbox: Print ring number in errors and warnings mailbox: bcm-flexrm-mailbox: Fix FlexRM ring flush sequence
Diffstat (limited to 'drivers/mailbox')
-rw-r--r--drivers/mailbox/Kconfig3
-rw-r--r--drivers/mailbox/bcm-flexrm-mailbox.c66
-rw-r--r--drivers/mailbox/mailbox-test.c11
-rw-r--r--drivers/mailbox/mailbox.c4
-rw-r--r--drivers/mailbox/omap-mailbox.c6
-rw-r--r--drivers/mailbox/pcc.c4
6 files changed, 66 insertions, 28 deletions
diff --git a/drivers/mailbox/Kconfig b/drivers/mailbox/Kconfig
index c5731e5e3c6c..ba2f1525f4ee 100644
--- a/drivers/mailbox/Kconfig
+++ b/drivers/mailbox/Kconfig
@@ -163,9 +163,10 @@ config BCM_PDC_MBOX
config BCM_FLEXRM_MBOX
tristate "Broadcom FlexRM Mailbox"
depends on ARM64
+ depends on ARCH_BCM_IPROC || COMPILE_TEST
depends on HAS_DMA
select GENERIC_MSI_IRQ_DOMAIN
- default ARCH_BCM_IPROC
+ default m if ARCH_BCM_IPROC
help
Mailbox implementation of the Broadcom FlexRM ring manager,
which provides access to various offload engines on Broadcom
diff --git a/drivers/mailbox/bcm-flexrm-mailbox.c b/drivers/mailbox/bcm-flexrm-mailbox.c
index ae6146311934..a8cf4333a68f 100644
--- a/drivers/mailbox/bcm-flexrm-mailbox.c
+++ b/drivers/mailbox/bcm-flexrm-mailbox.c
@@ -1,10 +1,18 @@
-/* Broadcom FlexRM Mailbox Driver
- *
+/*
* Copyright (C) 2017 Broadcom
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/*
+ * Broadcom FlexRM Mailbox Driver
*
* Each Broadcom FlexSparx4 offload engine is implemented as an
* extension to Broadcom FlexRM ring manager. The FlexRM ring
@@ -1116,8 +1124,8 @@ static int flexrm_process_completions(struct flexrm_ring *ring)
err = flexrm_cmpl_desc_to_error(desc);
if (err < 0) {
dev_warn(ring->mbox->dev,
- "got completion desc=0x%lx with error %d",
- (unsigned long)desc, err);
+ "ring%d got completion desc=0x%lx with error %d\n",
+ ring->num, (unsigned long)desc, err);
}
/* Determine request id from completion descriptor */
@@ -1127,8 +1135,8 @@ static int flexrm_process_completions(struct flexrm_ring *ring)
msg = ring->requests[reqid];
if (!msg) {
dev_warn(ring->mbox->dev,
- "null msg pointer for completion desc=0x%lx",
- (unsigned long)desc);
+ "ring%d null msg pointer for completion desc=0x%lx\n",
+ ring->num, (unsigned long)desc);
continue;
}
@@ -1238,7 +1246,9 @@ static int flexrm_startup(struct mbox_chan *chan)
ring->bd_base = dma_pool_alloc(ring->mbox->bd_pool,
GFP_KERNEL, &ring->bd_dma_base);
if (!ring->bd_base) {
- dev_err(ring->mbox->dev, "can't allocate BD memory\n");
+ dev_err(ring->mbox->dev,
+ "can't allocate BD memory for ring%d\n",
+ ring->num);
ret = -ENOMEM;
goto fail;
}
@@ -1261,7 +1271,9 @@ static int flexrm_startup(struct mbox_chan *chan)
ring->cmpl_base = dma_pool_alloc(ring->mbox->cmpl_pool,
GFP_KERNEL, &ring->cmpl_dma_base);
if (!ring->cmpl_base) {
- dev_err(ring->mbox->dev, "can't allocate completion memory\n");
+ dev_err(ring->mbox->dev,
+ "can't allocate completion memory for ring%d\n",
+ ring->num);
ret = -ENOMEM;
goto fail_free_bd_memory;
}
@@ -1269,7 +1281,8 @@ static int flexrm_startup(struct mbox_chan *chan)
/* Request IRQ */
if (ring->irq == UINT_MAX) {
- dev_err(ring->mbox->dev, "ring IRQ not available\n");
+ dev_err(ring->mbox->dev,
+ "ring%d IRQ not available\n", ring->num);
ret = -ENODEV;
goto fail_free_cmpl_memory;
}
@@ -1278,7 +1291,8 @@ static int flexrm_startup(struct mbox_chan *chan)
flexrm_irq_thread,
0, dev_name(ring->mbox->dev), ring);
if (ret) {
- dev_err(ring->mbox->dev, "failed to request ring IRQ\n");
+ dev_err(ring->mbox->dev,
+ "failed to request ring%d IRQ\n", ring->num);
goto fail_free_cmpl_memory;
}
ring->irq_requested = true;
@@ -1291,7 +1305,9 @@ static int flexrm_startup(struct mbox_chan *chan)
&ring->irq_aff_hint);
ret = irq_set_affinity_hint(ring->irq, &ring->irq_aff_hint);
if (ret) {
- dev_err(ring->mbox->dev, "failed to set IRQ affinity hint\n");
+ dev_err(ring->mbox->dev,
+ "failed to set IRQ affinity hint for ring%d\n",
+ ring->num);
goto fail_free_irq;
}
@@ -1365,8 +1381,8 @@ static void flexrm_shutdown(struct mbox_chan *chan)
/* Disable/inactivate ring */
writel_relaxed(0x0, ring->regs + RING_CONTROL);
- /* Flush ring with timeout of 1s */
- timeout = 1000;
+ /* Set ring flush state */
+ timeout = 1000; /* timeout of 1s */
writel_relaxed(BIT(CONTROL_FLUSH_SHIFT),
ring->regs + RING_CONTROL);
do {
@@ -1374,7 +1390,23 @@ static void flexrm_shutdown(struct mbox_chan *chan)
FLUSH_DONE_MASK)
break;
mdelay(1);
- } while (timeout--);
+ } while (--timeout);
+ if (!timeout)
+ dev_err(ring->mbox->dev,
+ "setting ring%d flush state timedout\n", ring->num);
+
+ /* Clear ring flush state */
+ timeout = 1000; /* timeout of 1s */
+ writel_relaxed(0x0, ring + RING_CONTROL);
+ do {
+ if (!(readl_relaxed(ring + RING_FLUSH_DONE) &
+ FLUSH_DONE_MASK))
+ break;
+ mdelay(1);
+ } while (--timeout);
+ if (!timeout)
+ dev_err(ring->mbox->dev,
+ "clearing ring%d flush state timedout\n", ring->num);
/* Abort all in-flight requests */
for (reqid = 0; reqid < RING_MAX_REQ_COUNT; reqid++) {
diff --git a/drivers/mailbox/mailbox-test.c b/drivers/mailbox/mailbox-test.c
index 97fb956bb6e0..93f3d4d61fa7 100644
--- a/drivers/mailbox/mailbox-test.c
+++ b/drivers/mailbox/mailbox-test.c
@@ -30,6 +30,7 @@
#define MBOX_HEXDUMP_MAX_LEN (MBOX_HEXDUMP_LINE_LEN * \
(MBOX_MAX_MSG_LEN / MBOX_BYTES_PER_LINE))
+static bool mbox_data_ready;
static struct dentry *root_debugfs_dir;
struct mbox_test_device {
@@ -152,16 +153,14 @@ out:
static bool mbox_test_message_data_ready(struct mbox_test_device *tdev)
{
- unsigned char data;
+ bool data_ready;
unsigned long flags;
spin_lock_irqsave(&tdev->lock, flags);
- data = tdev->rx_buffer[0];
+ data_ready = mbox_data_ready;
spin_unlock_irqrestore(&tdev->lock, flags);
- if (data != '\0')
- return true;
- return false;
+ return data_ready;
}
static ssize_t mbox_test_message_read(struct file *filp, char __user *userbuf,
@@ -223,6 +222,7 @@ static ssize_t mbox_test_message_read(struct file *filp, char __user *userbuf,
*(touser + l) = '\0';
memset(tdev->rx_buffer, 0, MBOX_MAX_MSG_LEN);
+ mbox_data_ready = false;
spin_unlock_irqrestore(&tdev->lock, flags);
@@ -292,6 +292,7 @@ static void mbox_test_receive_message(struct mbox_client *client, void *message)
message, MBOX_MAX_MSG_LEN);
memcpy(tdev->rx_buffer, message, MBOX_MAX_MSG_LEN);
}
+ mbox_data_ready = true;
spin_unlock_irqrestore(&tdev->lock, flags);
wake_up_interruptible(&tdev->waitq);
diff --git a/drivers/mailbox/mailbox.c b/drivers/mailbox/mailbox.c
index 537f4f6d009b..674b35f402f5 100644
--- a/drivers/mailbox/mailbox.c
+++ b/drivers/mailbox/mailbox.c
@@ -351,7 +351,7 @@ struct mbox_chan *mbox_request_channel(struct mbox_client *cl, int index)
init_completion(&chan->tx_complete);
if (chan->txdone_method == TXDONE_BY_POLL && cl->knows_txdone)
- chan->txdone_method |= TXDONE_BY_ACK;
+ chan->txdone_method = TXDONE_BY_ACK;
spin_unlock_irqrestore(&chan->lock, flags);
@@ -418,7 +418,7 @@ void mbox_free_channel(struct mbox_chan *chan)
spin_lock_irqsave(&chan->lock, flags);
chan->cl = NULL;
chan->active_req = NULL;
- if (chan->txdone_method == (TXDONE_BY_POLL | TXDONE_BY_ACK))
+ if (chan->txdone_method == TXDONE_BY_ACK)
chan->txdone_method = TXDONE_BY_POLL;
module_put(chan->mbox->dev->driver->owner);
diff --git a/drivers/mailbox/omap-mailbox.c b/drivers/mailbox/omap-mailbox.c
index c5e8b9cb170d..2517038a8452 100644
--- a/drivers/mailbox/omap-mailbox.c
+++ b/drivers/mailbox/omap-mailbox.c
@@ -906,7 +906,11 @@ static int __init omap_mbox_init(void)
mbox_kfifo_size = max_t(unsigned int, mbox_kfifo_size,
sizeof(mbox_msg_t));
- return platform_driver_register(&omap_mbox_driver);
+ err = platform_driver_register(&omap_mbox_driver);
+ if (err)
+ class_unregister(&omap_mbox_class);
+
+ return err;
}
subsys_initcall(omap_mbox_init);
diff --git a/drivers/mailbox/pcc.c b/drivers/mailbox/pcc.c
index e5a69679cfa2..3ef7f036ceea 100644
--- a/drivers/mailbox/pcc.c
+++ b/drivers/mailbox/pcc.c
@@ -265,7 +265,7 @@ struct mbox_chan *pcc_mbox_request_channel(struct mbox_client *cl,
init_completion(&chan->tx_complete);
if (chan->txdone_method == TXDONE_BY_POLL && cl->knows_txdone)
- chan->txdone_method |= TXDONE_BY_ACK;
+ chan->txdone_method = TXDONE_BY_ACK;
spin_unlock_irqrestore(&chan->lock, flags);
@@ -311,7 +311,7 @@ void pcc_mbox_free_channel(struct mbox_chan *chan)
spin_lock_irqsave(&chan->lock, flags);
chan->cl = NULL;
chan->active_req = NULL;
- if (chan->txdone_method == (TXDONE_BY_POLL | TXDONE_BY_ACK))
+ if (chan->txdone_method == TXDONE_BY_ACK)
chan->txdone_method = TXDONE_BY_POLL;
spin_unlock_irqrestore(&chan->lock, flags);