summaryrefslogtreecommitdiff
path: root/drivers/mailbox/bcm-flexrm-mailbox.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/mailbox/bcm-flexrm-mailbox.c')
-rw-r--r--drivers/mailbox/bcm-flexrm-mailbox.c349
1 files changed, 219 insertions, 130 deletions
diff --git a/drivers/mailbox/bcm-flexrm-mailbox.c b/drivers/mailbox/bcm-flexrm-mailbox.c
index da67882caa7b..41f79e51d9e5 100644
--- a/drivers/mailbox/bcm-flexrm-mailbox.c
+++ b/drivers/mailbox/bcm-flexrm-mailbox.c
@@ -1,10 +1,8 @@
-/* Broadcom FlexRM Mailbox Driver
- *
- * Copyright (C) 2017 Broadcom
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
+// SPDX-License-Identifier: GPL-2.0-only
+// Copyright (C) 2017 Broadcom
+
+/*
+ * Broadcom FlexRM Mailbox Driver
*
* Each Broadcom FlexSparx4 offload engine is implemented as an
* extension to Broadcom FlexRM ring manager. The FlexRM ring
@@ -17,12 +15,14 @@
#include <asm/barrier.h>
#include <asm/byteorder.h>
+#include <linux/atomic.h>
+#include <linux/bitmap.h>
+#include <linux/debugfs.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/dma-mapping.h>
#include <linux/dmapool.h>
#include <linux/err.h>
-#include <linux/idr.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/mailbox_controller.h>
@@ -95,7 +95,7 @@
/* Register RING_CMPL_START_ADDR fields */
#define CMPL_START_ADDR_VALUE(pa) \
- ((u32)((((u64)(pa)) >> RING_CMPL_ALIGN_ORDER) & 0x03ffffff))
+ ((u32)((((u64)(pa)) >> RING_CMPL_ALIGN_ORDER) & 0x07ffffff))
/* Register RING_CONTROL fields */
#define CONTROL_MASK_DISABLE_CONTROL 12
@@ -260,18 +260,21 @@ struct flexrm_ring {
void __iomem *regs;
bool irq_requested;
unsigned int irq;
+ cpumask_t irq_aff_hint;
unsigned int msi_timer_val;
unsigned int msi_count_threshold;
- struct ida requests_ida;
struct brcm_message *requests[RING_MAX_REQ_COUNT];
void *bd_base;
dma_addr_t bd_dma_base;
u32 bd_write_offset;
void *cmpl_base;
dma_addr_t cmpl_dma_base;
+ /* Atomic stats */
+ atomic_t msg_send_count;
+ atomic_t msg_cmpl_count;
/* Protected members */
spinlock_t lock;
- struct brcm_message *last_pending_msg;
+ DECLARE_BITMAP(requests_bmap, RING_MAX_REQ_COUNT);
u32 cmpl_read_offset;
};
@@ -282,6 +285,7 @@ struct flexrm_mbox {
struct flexrm_ring *rings;
struct dma_pool *bd_pool;
struct dma_pool *cmpl_pool;
+ struct dentry *root;
struct mbox_controller controller;
};
@@ -359,7 +363,7 @@ static u32 flexrm_estimate_header_desc_count(u32 nhcnt)
return hcnt;
}
-static void flexrm_flip_header_toogle(void *desc_ptr)
+static void flexrm_flip_header_toggle(void *desc_ptr)
{
u64 desc = flexrm_read_desc(desc_ptr);
@@ -409,7 +413,7 @@ static void flexrm_enqueue_desc(u32 nhpos, u32 nhcnt, u32 reqid,
*
* In general use, number of non-HEADER descriptors can easily go
* beyond 31. To tackle this situation, we have packet (or request)
- * extenstion bits (STARTPKT and ENDPKT) in the HEADER descriptor.
+ * extension bits (STARTPKT and ENDPKT) in the HEADER descriptor.
*
* To use packet extension, the first HEADER descriptor of request
* (or packet) will have STARTPKT=1 and ENDPKT=0. The intermediate
@@ -618,15 +622,15 @@ static int flexrm_spu_dma_map(struct device *dev, struct brcm_message *msg)
rc = dma_map_sg(dev, msg->spu.src, sg_nents(msg->spu.src),
DMA_TO_DEVICE);
- if (rc < 0)
- return rc;
+ if (!rc)
+ return -EIO;
rc = dma_map_sg(dev, msg->spu.dst, sg_nents(msg->spu.dst),
DMA_FROM_DEVICE);
- if (rc < 0) {
+ if (!rc) {
dma_unmap_sg(dev, msg->spu.src, sg_nents(msg->spu.src),
DMA_TO_DEVICE);
- return rc;
+ return -EIO;
}
return 0;
@@ -693,7 +697,7 @@ static void *flexrm_spu_write_descs(struct brcm_message *msg, u32 nhcnt,
wmb();
/* Flip toggle bit in header */
- flexrm_flip_header_toogle(orig_desc_ptr);
+ flexrm_flip_header_toggle(orig_desc_ptr);
return desc_ptr;
}
@@ -822,7 +826,7 @@ static void *flexrm_sba_write_descs(struct brcm_message *msg, u32 nhcnt,
wmb();
/* Flip toggle bit in header */
- flexrm_flip_header_toogle(orig_desc_ptr);
+ flexrm_flip_header_toggle(orig_desc_ptr);
return desc_ptr;
}
@@ -912,6 +916,62 @@ static void *flexrm_write_descs(struct brcm_message *msg, u32 nhcnt,
/* ====== FlexRM driver helper routines ===== */
+static void flexrm_write_config_in_seqfile(struct flexrm_mbox *mbox,
+ struct seq_file *file)
+{
+ int i;
+ const char *state;
+ struct flexrm_ring *ring;
+
+ seq_printf(file, "%-5s %-9s %-18s %-10s %-18s %-10s\n",
+ "Ring#", "State", "BD_Addr", "BD_Size",
+ "Cmpl_Addr", "Cmpl_Size");
+
+ for (i = 0; i < mbox->num_rings; i++) {
+ ring = &mbox->rings[i];
+ if (readl(ring->regs + RING_CONTROL) &
+ BIT(CONTROL_ACTIVE_SHIFT))
+ state = "active";
+ else
+ state = "inactive";
+ seq_printf(file,
+ "%-5d %-9s 0x%016llx 0x%08x 0x%016llx 0x%08x\n",
+ ring->num, state,
+ (unsigned long long)ring->bd_dma_base,
+ (u32)RING_BD_SIZE,
+ (unsigned long long)ring->cmpl_dma_base,
+ (u32)RING_CMPL_SIZE);
+ }
+}
+
+static void flexrm_write_stats_in_seqfile(struct flexrm_mbox *mbox,
+ struct seq_file *file)
+{
+ int i;
+ u32 val, bd_read_offset;
+ struct flexrm_ring *ring;
+
+ seq_printf(file, "%-5s %-10s %-10s %-10s %-11s %-11s\n",
+ "Ring#", "BD_Read", "BD_Write",
+ "Cmpl_Read", "Submitted", "Completed");
+
+ for (i = 0; i < mbox->num_rings; i++) {
+ ring = &mbox->rings[i];
+ bd_read_offset = readl_relaxed(ring->regs + RING_BD_READ_PTR);
+ val = readl_relaxed(ring->regs + RING_BD_START_ADDR);
+ bd_read_offset *= RING_DESC_SIZE;
+ bd_read_offset += (u32)(BD_START_ADDR_DECODE(val) -
+ ring->bd_dma_base);
+ seq_printf(file, "%-5d 0x%08x 0x%08x 0x%08x %-11d %-11d\n",
+ ring->num,
+ (u32)bd_read_offset,
+ (u32)ring->bd_write_offset,
+ (u32)ring->cmpl_read_offset,
+ (u32)atomic_read(&ring->msg_send_count),
+ (u32)atomic_read(&ring->msg_cmpl_count));
+ }
+}
+
static int flexrm_new_request(struct flexrm_ring *ring,
struct brcm_message *batch_msg,
struct brcm_message *msg)
@@ -929,38 +989,24 @@ static int flexrm_new_request(struct flexrm_ring *ring,
msg->error = 0;
/* If no requests possible then save data pointer and goto done. */
- reqid = ida_simple_get(&ring->requests_ida, 0,
- RING_MAX_REQ_COUNT, GFP_KERNEL);
- if (reqid < 0) {
- spin_lock_irqsave(&ring->lock, flags);
- if (batch_msg)
- ring->last_pending_msg = batch_msg;
- else
- ring->last_pending_msg = msg;
- spin_unlock_irqrestore(&ring->lock, flags);
- return 0;
- }
+ spin_lock_irqsave(&ring->lock, flags);
+ reqid = bitmap_find_free_region(ring->requests_bmap,
+ RING_MAX_REQ_COUNT, 0);
+ spin_unlock_irqrestore(&ring->lock, flags);
+ if (reqid < 0)
+ return -ENOSPC;
ring->requests[reqid] = msg;
/* Do DMA mappings for the message */
ret = flexrm_dma_map(ring->mbox->dev, msg);
if (ret < 0) {
ring->requests[reqid] = NULL;
- ida_simple_remove(&ring->requests_ida, reqid);
+ spin_lock_irqsave(&ring->lock, flags);
+ bitmap_release_region(ring->requests_bmap, reqid, 0);
+ spin_unlock_irqrestore(&ring->lock, flags);
return ret;
}
- /* If last_pending_msg is already set then goto done with error */
- spin_lock_irqsave(&ring->lock, flags);
- if (ring->last_pending_msg)
- ret = -ENOSPC;
- spin_unlock_irqrestore(&ring->lock, flags);
- if (ret < 0) {
- dev_warn(ring->mbox->dev, "no space in ring %d\n", ring->num);
- exit_cleanup = true;
- goto exit;
- }
-
/* Determine current HW BD read offset */
read_offset = readl_relaxed(ring->regs + RING_BD_READ_PTR);
val = readl_relaxed(ring->regs + RING_BD_START_ADDR);
@@ -987,13 +1033,7 @@ static int flexrm_new_request(struct flexrm_ring *ring,
break;
}
if (count) {
- spin_lock_irqsave(&ring->lock, flags);
- if (batch_msg)
- ring->last_pending_msg = batch_msg;
- else
- ring->last_pending_msg = msg;
- spin_unlock_irqrestore(&ring->lock, flags);
- ret = 0;
+ ret = -ENOSPC;
exit_cleanup = true;
goto exit;
}
@@ -1012,6 +1052,9 @@ static int flexrm_new_request(struct flexrm_ring *ring,
/* Save ring BD write offset */
ring->bd_write_offset = (unsigned long)(next - ring->bd_base);
+ /* Increment number of messages sent */
+ atomic_inc_return(&ring->msg_send_count);
+
exit:
/* Update error status in message */
msg->error = ret;
@@ -1020,7 +1063,9 @@ exit:
if (exit_cleanup) {
flexrm_dma_unmap(ring->mbox->dev, msg);
ring->requests[reqid] = NULL;
- ida_simple_remove(&ring->requests_ida, reqid);
+ spin_lock_irqsave(&ring->lock, flags);
+ bitmap_release_region(ring->requests_bmap, reqid, 0);
+ spin_unlock_irqrestore(&ring->lock, flags);
}
return ret;
@@ -1037,16 +1082,10 @@ static int flexrm_process_completions(struct flexrm_ring *ring)
spin_lock_irqsave(&ring->lock, flags);
- /* Check last_pending_msg */
- if (ring->last_pending_msg) {
- msg = ring->last_pending_msg;
- ring->last_pending_msg = NULL;
- }
-
/*
* Get current completion read and write offset
*
- * Note: We should read completion write pointer atleast once
+ * Note: We should read completion write pointer at least once
* after we get a MSI interrupt because HW maintains internal
* MSI status which will allow next MSI interrupt only after
* completion write pointer is read.
@@ -1058,10 +1097,6 @@ static int flexrm_process_completions(struct flexrm_ring *ring)
spin_unlock_irqrestore(&ring->lock, flags);
- /* If last_pending_msg was set then queue it back */
- if (msg)
- mbox_send_message(chan, msg);
-
/* For each completed request notify mailbox clients */
reqid = 0;
while (cmpl_read_offset != cmpl_write_offset) {
@@ -1077,8 +1112,8 @@ static int flexrm_process_completions(struct flexrm_ring *ring)
err = flexrm_cmpl_desc_to_error(desc);
if (err < 0) {
dev_warn(ring->mbox->dev,
- "got completion desc=0x%lx with error %d",
- (unsigned long)desc, err);
+ "ring%d got completion desc=0x%lx with error %d\n",
+ ring->num, (unsigned long)desc, err);
}
/* Determine request id from completion descriptor */
@@ -1088,14 +1123,16 @@ static int flexrm_process_completions(struct flexrm_ring *ring)
msg = ring->requests[reqid];
if (!msg) {
dev_warn(ring->mbox->dev,
- "null msg pointer for completion desc=0x%lx",
- (unsigned long)desc);
+ "ring%d null msg pointer for completion desc=0x%lx\n",
+ ring->num, (unsigned long)desc);
continue;
}
/* Release reqid for recycling */
ring->requests[reqid] = NULL;
- ida_simple_remove(&ring->requests_ida, reqid);
+ spin_lock_irqsave(&ring->lock, flags);
+ bitmap_release_region(ring->requests_bmap, reqid, 0);
+ spin_unlock_irqrestore(&ring->lock, flags);
/* Unmap DMA mappings */
flexrm_dma_unmap(ring->mbox->dev, msg);
@@ -1105,12 +1142,35 @@ static int flexrm_process_completions(struct flexrm_ring *ring)
mbox_chan_received_data(chan, msg);
/* Increment number of completions processed */
+ atomic_inc_return(&ring->msg_cmpl_count);
count++;
}
return count;
}
+/* ====== FlexRM Debugfs callbacks ====== */
+
+static int flexrm_debugfs_conf_show(struct seq_file *file, void *offset)
+{
+ struct flexrm_mbox *mbox = dev_get_drvdata(file->private);
+
+ /* Write config in file */
+ flexrm_write_config_in_seqfile(mbox, file);
+
+ return 0;
+}
+
+static int flexrm_debugfs_stats_show(struct seq_file *file, void *offset)
+{
+ struct flexrm_mbox *mbox = dev_get_drvdata(file->private);
+
+ /* Write stats in file */
+ flexrm_write_stats_in_seqfile(mbox, file);
+
+ return 0;
+}
+
/* ====== FlexRM interrupt handler ===== */
static irqreturn_t flexrm_irq_event(int irq, void *dev_id)
@@ -1172,7 +1232,9 @@ static int flexrm_startup(struct mbox_chan *chan)
ring->bd_base = dma_pool_alloc(ring->mbox->bd_pool,
GFP_KERNEL, &ring->bd_dma_base);
if (!ring->bd_base) {
- dev_err(ring->mbox->dev, "can't allocate BD memory\n");
+ dev_err(ring->mbox->dev,
+ "can't allocate BD memory for ring%d\n",
+ ring->num);
ret = -ENOMEM;
goto fail;
}
@@ -1192,18 +1254,20 @@ static int flexrm_startup(struct mbox_chan *chan)
}
/* Allocate completion memory */
- ring->cmpl_base = dma_pool_alloc(ring->mbox->cmpl_pool,
+ ring->cmpl_base = dma_pool_zalloc(ring->mbox->cmpl_pool,
GFP_KERNEL, &ring->cmpl_dma_base);
if (!ring->cmpl_base) {
- dev_err(ring->mbox->dev, "can't allocate completion memory\n");
+ dev_err(ring->mbox->dev,
+ "can't allocate completion memory for ring%d\n",
+ ring->num);
ret = -ENOMEM;
goto fail_free_bd_memory;
}
- memset(ring->cmpl_base, 0, RING_CMPL_SIZE);
/* Request IRQ */
if (ring->irq == UINT_MAX) {
- dev_err(ring->mbox->dev, "ring IRQ not available\n");
+ dev_err(ring->mbox->dev,
+ "ring%d IRQ not available\n", ring->num);
ret = -ENODEV;
goto fail_free_cmpl_memory;
}
@@ -1212,11 +1276,26 @@ static int flexrm_startup(struct mbox_chan *chan)
flexrm_irq_thread,
0, dev_name(ring->mbox->dev), ring);
if (ret) {
- dev_err(ring->mbox->dev, "failed to request ring IRQ\n");
+ dev_err(ring->mbox->dev,
+ "failed to request ring%d IRQ\n", ring->num);
goto fail_free_cmpl_memory;
}
ring->irq_requested = true;
+ /* Set IRQ affinity hint */
+ ring->irq_aff_hint = CPU_MASK_NONE;
+ val = ring->mbox->num_rings;
+ val = (num_online_cpus() < val) ? val / num_online_cpus() : 1;
+ cpumask_set_cpu((ring->num / val) % num_online_cpus(),
+ &ring->irq_aff_hint);
+ ret = irq_update_affinity_hint(ring->irq, &ring->irq_aff_hint);
+ if (ret) {
+ dev_err(ring->mbox->dev,
+ "failed to set IRQ affinity hint for ring%d\n",
+ ring->num);
+ goto fail_free_irq;
+ }
+
/* Disable/inactivate ring */
writel_relaxed(0x0, ring->regs + RING_CONTROL);
@@ -1233,9 +1312,6 @@ static int flexrm_startup(struct mbox_chan *chan)
val = CMPL_START_ADDR_VALUE(ring->cmpl_dma_base);
writel_relaxed(val, ring->regs + RING_CMPL_START_ADDR);
- /* Ensure last pending message is cleared */
- ring->last_pending_msg = NULL;
-
/* Completion read pointer will be same as HW write pointer */
ring->cmpl_read_offset =
readl_relaxed(ring->regs + RING_CMPL_WRITE_PTR);
@@ -1259,8 +1335,15 @@ static int flexrm_startup(struct mbox_chan *chan)
val = BIT(CONTROL_ACTIVE_SHIFT);
writel_relaxed(val, ring->regs + RING_CONTROL);
+ /* Reset stats to zero */
+ atomic_set(&ring->msg_send_count, 0);
+ atomic_set(&ring->msg_cmpl_count, 0);
+
return 0;
+fail_free_irq:
+ free_irq(ring->irq, ring);
+ ring->irq_requested = false;
fail_free_cmpl_memory:
dma_pool_free(ring->mbox->cmpl_pool,
ring->cmpl_base, ring->cmpl_dma_base);
@@ -1283,8 +1366,8 @@ static void flexrm_shutdown(struct mbox_chan *chan)
/* Disable/inactivate ring */
writel_relaxed(0x0, ring->regs + RING_CONTROL);
- /* Flush ring with timeout of 1s */
- timeout = 1000;
+ /* Set ring flush state */
+ timeout = 1000; /* timeout of 1s */
writel_relaxed(BIT(CONTROL_FLUSH_SHIFT),
ring->regs + RING_CONTROL);
do {
@@ -1292,7 +1375,23 @@ static void flexrm_shutdown(struct mbox_chan *chan)
FLUSH_DONE_MASK)
break;
mdelay(1);
- } while (timeout--);
+ } while (--timeout);
+ if (!timeout)
+ dev_err(ring->mbox->dev,
+ "setting ring%d flush state timedout\n", ring->num);
+
+ /* Clear ring flush state */
+ timeout = 1000; /* timeout of 1s */
+ writel_relaxed(0x0, ring->regs + RING_CONTROL);
+ do {
+ if (!(readl_relaxed(ring->regs + RING_FLUSH_DONE) &
+ FLUSH_DONE_MASK))
+ break;
+ mdelay(1);
+ } while (--timeout);
+ if (!timeout)
+ dev_err(ring->mbox->dev,
+ "clearing ring%d flush state timedout\n", ring->num);
/* Abort all in-flight requests */
for (reqid = 0; reqid < RING_MAX_REQ_COUNT; reqid++) {
@@ -1302,7 +1401,6 @@ static void flexrm_shutdown(struct mbox_chan *chan)
/* Release reqid for recycling */
ring->requests[reqid] = NULL;
- ida_simple_remove(&ring->requests_ida, reqid);
/* Unmap DMA mappings */
flexrm_dma_unmap(ring->mbox->dev, msg);
@@ -1312,8 +1410,12 @@ static void flexrm_shutdown(struct mbox_chan *chan)
mbox_chan_received_data(chan, msg);
}
+ /* Clear requests bitmap */
+ bitmap_zero(ring->requests_bmap, RING_MAX_REQ_COUNT);
+
/* Release IRQ */
if (ring->irq_requested) {
+ irq_update_affinity_hint(ring->irq, NULL);
free_irq(ring->irq, ring);
ring->irq_requested = false;
}
@@ -1333,24 +1435,10 @@ static void flexrm_shutdown(struct mbox_chan *chan)
}
}
-static bool flexrm_last_tx_done(struct mbox_chan *chan)
-{
- bool ret;
- unsigned long flags;
- struct flexrm_ring *ring = chan->con_priv;
-
- spin_lock_irqsave(&ring->lock, flags);
- ret = (ring->last_pending_msg) ? false : true;
- spin_unlock_irqrestore(&ring->lock, flags);
-
- return ret;
-}
-
static const struct mbox_chan_ops flexrm_mbox_chan_ops = {
.send_data = flexrm_send_data,
.startup = flexrm_startup,
.shutdown = flexrm_shutdown,
- .last_tx_done = flexrm_last_tx_done,
.peek_data = flexrm_peek_data,
};
@@ -1386,7 +1474,7 @@ static void flexrm_mbox_msi_write(struct msi_desc *desc, struct msi_msg *msg)
{
struct device *dev = msi_desc_to_dev(desc);
struct flexrm_mbox *mbox = dev_get_drvdata(dev);
- struct flexrm_ring *ring = &mbox->rings[desc->platform.msi_index];
+ struct flexrm_ring *ring = &mbox->rings[desc->msi_index];
/* Configure per-Ring MSI registers */
writel_relaxed(msg->address_lo, ring->regs + RING_MSI_ADDR_LS);
@@ -1399,7 +1487,6 @@ static int flexrm_mbox_probe(struct platform_device *pdev)
int index, ret = 0;
void __iomem *regs;
void __iomem *regs_end;
- struct msi_desc *desc;
struct resource *iomem;
struct flexrm_ring *ring;
struct flexrm_mbox *mbox;
@@ -1414,18 +1501,13 @@ static int flexrm_mbox_probe(struct platform_device *pdev)
mbox->dev = dev;
platform_set_drvdata(pdev, mbox);
- /* Get resource for registers */
- iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ /* Get resource for registers and map registers of all rings */
+ mbox->regs = devm_platform_get_and_ioremap_resource(pdev, 0, &iomem);
if (!iomem || (resource_size(iomem) < RING_REGS_SIZE)) {
ret = -ENODEV;
goto fail;
- }
-
- /* Map registers of all rings */
- mbox->regs = devm_ioremap_resource(&pdev->dev, iomem);
- if (IS_ERR(mbox->regs)) {
+ } else if (IS_ERR(mbox->regs)) {
ret = PTR_ERR(mbox->regs);
- dev_err(&pdev->dev, "Failed to remap mailbox regs: %d\n", ret);
goto fail;
}
regs_end = mbox->regs + resource_size(iomem);
@@ -1468,14 +1550,15 @@ static int flexrm_mbox_probe(struct platform_device *pdev)
ring->irq_requested = false;
ring->msi_timer_val = MSI_TIMER_VAL_MASK;
ring->msi_count_threshold = 0x1;
- ida_init(&ring->requests_ida);
memset(ring->requests, 0, sizeof(ring->requests));
ring->bd_base = NULL;
ring->bd_dma_base = 0;
ring->cmpl_base = NULL;
ring->cmpl_dma_base = 0;
+ atomic_set(&ring->msg_send_count, 0);
+ atomic_set(&ring->msg_cmpl_count, 0);
spin_lock_init(&ring->lock);
- ring->last_pending_msg = NULL;
+ bitmap_zero(ring->requests_bmap, RING_MAX_REQ_COUNT);
ring->cmpl_read_offset = 0;
}
@@ -1504,21 +1587,35 @@ static int flexrm_mbox_probe(struct platform_device *pdev)
}
/* Allocate platform MSIs for each ring */
- ret = platform_msi_domain_alloc_irqs(dev, mbox->num_rings,
- flexrm_mbox_msi_write);
+ ret = platform_device_msi_init_and_alloc_irqs(dev, mbox->num_rings,
+ flexrm_mbox_msi_write);
if (ret)
goto fail_destroy_cmpl_pool;
/* Save alloced IRQ numbers for each ring */
- for_each_msi_entry(desc, dev) {
- ring = &mbox->rings[desc->platform.msi_index];
- ring->irq = desc->irq;
- }
+ for (index = 0; index < mbox->num_rings; index++)
+ mbox->rings[index].irq = msi_get_virq(dev, index);
+
+ /* Check availability of debugfs */
+ if (!debugfs_initialized())
+ goto skip_debugfs;
+
+ /* Create debugfs root entry */
+ mbox->root = debugfs_create_dir(dev_name(mbox->dev), NULL);
+
+ /* Create debugfs config entry */
+ debugfs_create_devm_seqfile(mbox->dev, "config", mbox->root,
+ flexrm_debugfs_conf_show);
+
+ /* Create debugfs stats entry */
+ debugfs_create_devm_seqfile(mbox->dev, "stats", mbox->root,
+ flexrm_debugfs_stats_show);
+
+skip_debugfs:
/* Initialize mailbox controller */
mbox->controller.txdone_irq = false;
- mbox->controller.txdone_poll = true;
- mbox->controller.txpoll_period = 1;
+ mbox->controller.txdone_poll = false;
mbox->controller.ops = &flexrm_mbox_chan_ops;
mbox->controller.dev = dev;
mbox->controller.num_chans = mbox->num_rings;
@@ -1527,23 +1624,24 @@ static int flexrm_mbox_probe(struct platform_device *pdev)
sizeof(*mbox->controller.chans), GFP_KERNEL);
if (!mbox->controller.chans) {
ret = -ENOMEM;
- goto fail_free_msis;
+ goto fail_free_debugfs_root;
}
for (index = 0; index < mbox->num_rings; index++)
mbox->controller.chans[index].con_priv = &mbox->rings[index];
/* Register mailbox controller */
- ret = mbox_controller_register(&mbox->controller);
+ ret = devm_mbox_controller_register(dev, &mbox->controller);
if (ret)
- goto fail_free_msis;
+ goto fail_free_debugfs_root;
dev_info(dev, "registered flexrm mailbox with %d channels\n",
mbox->controller.num_chans);
return 0;
-fail_free_msis:
- platform_msi_domain_free_irqs(dev);
+fail_free_debugfs_root:
+ debugfs_remove_recursive(mbox->root);
+ platform_device_msi_free_irqs_all(dev);
fail_destroy_cmpl_pool:
dma_pool_destroy(mbox->cmpl_pool);
fail_destroy_bd_pool:
@@ -1552,26 +1650,17 @@ fail:
return ret;
}
-static int flexrm_mbox_remove(struct platform_device *pdev)
+static void flexrm_mbox_remove(struct platform_device *pdev)
{
- int index;
struct device *dev = &pdev->dev;
- struct flexrm_ring *ring;
struct flexrm_mbox *mbox = platform_get_drvdata(pdev);
- mbox_controller_unregister(&mbox->controller);
+ debugfs_remove_recursive(mbox->root);
- platform_msi_domain_free_irqs(dev);
+ platform_device_msi_free_irqs_all(dev);
dma_pool_destroy(mbox->cmpl_pool);
dma_pool_destroy(mbox->bd_pool);
-
- for (index = 0; index < mbox->num_rings; index++) {
- ring = &mbox->rings[index];
- ida_destroy(&ring->requests_ida);
- }
-
- return 0;
}
static const struct of_device_id flexrm_mbox_of_match[] = {