summaryrefslogtreecommitdiff
path: root/drivers/hwtracing
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/hwtracing')
-rw-r--r--drivers/hwtracing/coresight/Kconfig11
-rw-r--r--drivers/hwtracing/coresight/Makefile1
-rw-r--r--drivers/hwtracing/coresight/coresight-catu.c577
-rw-r--r--drivers/hwtracing/coresight/coresight-catu.h119
-rw-r--r--drivers/hwtracing/coresight/coresight-etb10.c12
-rw-r--r--drivers/hwtracing/coresight/coresight-etm.h3
-rw-r--r--drivers/hwtracing/coresight/coresight-etm3x-sysfs.c43
-rw-r--r--drivers/hwtracing/coresight/coresight-etm3x.c4
-rw-r--r--drivers/hwtracing/coresight/coresight-etm4x-sysfs.c47
-rw-r--r--drivers/hwtracing/coresight/coresight-etm4x.c31
-rw-r--r--drivers/hwtracing/coresight/coresight-etm4x.h3
-rw-r--r--drivers/hwtracing/coresight/coresight-priv.h10
-rw-r--r--drivers/hwtracing/coresight/coresight-tmc-etf.c45
-rw-r--r--drivers/hwtracing/coresight/coresight-tmc-etr.c1074
-rw-r--r--drivers/hwtracing/coresight/coresight-tmc.c83
-rw-r--r--drivers/hwtracing/coresight/coresight-tmc.h113
-rw-r--r--drivers/hwtracing/coresight/coresight-tpiu.c7
-rw-r--r--drivers/hwtracing/coresight/coresight.c53
18 files changed, 2033 insertions, 203 deletions
diff --git a/drivers/hwtracing/coresight/Kconfig b/drivers/hwtracing/coresight/Kconfig
index ef9cb3c164e1..ad34380cac49 100644
--- a/drivers/hwtracing/coresight/Kconfig
+++ b/drivers/hwtracing/coresight/Kconfig
@@ -31,6 +31,17 @@ config CORESIGHT_LINK_AND_SINK_TMC
complies with the generic implementation of the component without
special enhancement or added features.
+config CORESIGHT_CATU
+ bool "Coresight Address Translation Unit (CATU) driver"
+ depends on CORESIGHT_LINK_AND_SINK_TMC
+ help
+ Enable support for the Coresight Address Translation Unit (CATU).
+ CATU supports a scatter gather table of 4K pages, with forward/backward
+ lookup. CATU helps TMC ETR to use a large physically non-contiguous trace
+ buffer by translating the addresses used by ETR to the physical address
+ by looking up the provided table. CATU can also be used in pass-through
+ mode where the address is not translated.
+
config CORESIGHT_SINK_TPIU
bool "Coresight generic TPIU driver"
depends on CORESIGHT_LINKS_AND_SINKS
diff --git a/drivers/hwtracing/coresight/Makefile b/drivers/hwtracing/coresight/Makefile
index 61db9dd0d571..41870ded51a3 100644
--- a/drivers/hwtracing/coresight/Makefile
+++ b/drivers/hwtracing/coresight/Makefile
@@ -18,3 +18,4 @@ obj-$(CONFIG_CORESIGHT_SOURCE_ETM4X) += coresight-etm4x.o \
obj-$(CONFIG_CORESIGHT_DYNAMIC_REPLICATOR) += coresight-dynamic-replicator.o
obj-$(CONFIG_CORESIGHT_STM) += coresight-stm.o
obj-$(CONFIG_CORESIGHT_CPU_DEBUG) += coresight-cpu-debug.o
+obj-$(CONFIG_CORESIGHT_CATU) += coresight-catu.o
diff --git a/drivers/hwtracing/coresight/coresight-catu.c b/drivers/hwtracing/coresight/coresight-catu.c
new file mode 100644
index 000000000000..ff94e58845b7
--- /dev/null
+++ b/drivers/hwtracing/coresight/coresight-catu.c
@@ -0,0 +1,577 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018 Arm Limited. All rights reserved.
+ *
+ * Coresight Address Translation Unit support
+ *
+ * Author: Suzuki K Poulose <suzuki.poulose@arm.com>
+ */
+
+#include <linux/amba/bus.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+
+#include "coresight-catu.h"
+#include "coresight-priv.h"
+#include "coresight-tmc.h"
+
+#define csdev_to_catu_drvdata(csdev) \
+ dev_get_drvdata(csdev->dev.parent)
+
+/* Verbose output for CATU table contents */
+#ifdef CATU_DEBUG
+#define catu_dbg(x, ...) dev_dbg(x, __VA_ARGS__)
+#else
+#define catu_dbg(x, ...) do {} while (0)
+#endif
+
+struct catu_etr_buf {
+ struct tmc_sg_table *catu_table;
+ dma_addr_t sladdr;
+};
+
+/*
+ * CATU uses a page size of 4KB for page tables as well as data pages.
+ * Each 64bit entry in the table has the following format.
+ *
+ * 63 12 1 0
+ * ------------------------------------
+ * | Address [63-12] | SBZ | V|
+ * ------------------------------------
+ *
+ * Where bit[0] V indicates if the address is valid or not.
+ * Each 4K table pages have upto 256 data page pointers, taking upto 2K
+ * size. There are two Link pointers, pointing to the previous and next
+ * table pages respectively at the end of the 4K page. (i.e, entry 510
+ * and 511).
+ * E.g, a table of two pages could look like :
+ *
+ * Table Page 0 Table Page 1
+ * SLADDR ===> x------------------x x--> x-----------------x
+ * INADDR ->| Page 0 | V | | | Page 256 | V | <- INADDR+1M
+ * |------------------| | |-----------------|
+ * INADDR+4K ->| Page 1 | V | | | |
+ * |------------------| | |-----------------|
+ * | Page 2 | V | | | |
+ * |------------------| | |-----------------|
+ * | ... | V | | | ... |
+ * |------------------| | |-----------------|
+ * INADDR+1020K| Page 255 | V | | | Page 511 | V |
+ * SLADDR+2K==>|------------------| | |-----------------|
+ * | UNUSED | | | | |
+ * |------------------| | | |
+ * | UNUSED | | | | |
+ * |------------------| | | |
+ * | ... | | | | |
+ * |------------------| | |-----------------|
+ * | IGNORED | 0 | | | Table Page 0| 1 |
+ * |------------------| | |-----------------|
+ * | Table Page 1| 1 |--x | IGNORED | 0 |
+ * x------------------x x-----------------x
+ * SLADDR+4K==>
+ *
+ * The base input address (used by the ETR, programmed in INADDR_{LO,HI})
+ * must be aligned to 1MB (the size addressable by a single page table).
+ * The CATU maps INADDR{LO:HI} to the first page in the table pointed
+ * to by SLADDR{LO:HI} and so on.
+ *
+ */
+typedef u64 cate_t;
+
+#define CATU_PAGE_SHIFT 12
+#define CATU_PAGE_SIZE (1UL << CATU_PAGE_SHIFT)
+#define CATU_PAGES_PER_SYSPAGE (PAGE_SIZE / CATU_PAGE_SIZE)
+
+/* Page pointers are only allocated in the first 2K half */
+#define CATU_PTRS_PER_PAGE ((CATU_PAGE_SIZE >> 1) / sizeof(cate_t))
+#define CATU_PTRS_PER_SYSPAGE (CATU_PAGES_PER_SYSPAGE * CATU_PTRS_PER_PAGE)
+#define CATU_LINK_PREV ((CATU_PAGE_SIZE / sizeof(cate_t)) - 2)
+#define CATU_LINK_NEXT ((CATU_PAGE_SIZE / sizeof(cate_t)) - 1)
+
+#define CATU_ADDR_SHIFT 12
+#define CATU_ADDR_MASK ~(((cate_t)1 << CATU_ADDR_SHIFT) - 1)
+#define CATU_ENTRY_VALID ((cate_t)0x1)
+#define CATU_VALID_ENTRY(addr) \
+ (((cate_t)(addr) & CATU_ADDR_MASK) | CATU_ENTRY_VALID)
+#define CATU_ENTRY_ADDR(entry) ((cate_t)(entry) & ~((cate_t)CATU_ENTRY_VALID))
+
+/* CATU expects the INADDR to be aligned to 1M. */
+#define CATU_DEFAULT_INADDR (1ULL << 20)
+
+/*
+ * catu_get_table : Retrieve the table pointers for the given @offset
+ * within the buffer. The buffer is wrapped around to a valid offset.
+ *
+ * Returns : The CPU virtual address for the beginning of the table
+ * containing the data page pointer for @offset. If @daddrp is not NULL,
+ * @daddrp points the DMA address of the beginning of the table.
+ */
+static inline cate_t *catu_get_table(struct tmc_sg_table *catu_table,
+ unsigned long offset,
+ dma_addr_t *daddrp)
+{
+ unsigned long buf_size = tmc_sg_table_buf_size(catu_table);
+ unsigned int table_nr, pg_idx, pg_offset;
+ struct tmc_pages *table_pages = &catu_table->table_pages;
+ void *ptr;
+
+ /* Make sure offset is within the range */
+ offset %= buf_size;
+
+ /*
+ * Each table can address 1MB and a single kernel page can
+ * contain "CATU_PAGES_PER_SYSPAGE" CATU tables.
+ */
+ table_nr = offset >> 20;
+ /* Find the table page where the table_nr lies in */
+ pg_idx = table_nr / CATU_PAGES_PER_SYSPAGE;
+ pg_offset = (table_nr % CATU_PAGES_PER_SYSPAGE) * CATU_PAGE_SIZE;
+ if (daddrp)
+ *daddrp = table_pages->daddrs[pg_idx] + pg_offset;
+ ptr = page_address(table_pages->pages[pg_idx]);
+ return (cate_t *)((unsigned long)ptr + pg_offset);
+}
+
+#ifdef CATU_DEBUG
+static void catu_dump_table(struct tmc_sg_table *catu_table)
+{
+ int i;
+ cate_t *table;
+ unsigned long table_end, buf_size, offset = 0;
+
+ buf_size = tmc_sg_table_buf_size(catu_table);
+ dev_dbg(catu_table->dev,
+ "Dump table %p, tdaddr: %llx\n",
+ catu_table, catu_table->table_daddr);
+
+ while (offset < buf_size) {
+ table_end = offset + SZ_1M < buf_size ?
+ offset + SZ_1M : buf_size;
+ table = catu_get_table(catu_table, offset, NULL);
+ for (i = 0; offset < table_end; i++, offset += CATU_PAGE_SIZE)
+ dev_dbg(catu_table->dev, "%d: %llx\n", i, table[i]);
+ dev_dbg(catu_table->dev, "Prev : %llx, Next: %llx\n",
+ table[CATU_LINK_PREV], table[CATU_LINK_NEXT]);
+ dev_dbg(catu_table->dev, "== End of sub-table ===");
+ }
+ dev_dbg(catu_table->dev, "== End of Table ===");
+}
+
+#else
+static inline void catu_dump_table(struct tmc_sg_table *catu_table)
+{
+}
+#endif
+
+static inline cate_t catu_make_entry(dma_addr_t addr)
+{
+ return addr ? CATU_VALID_ENTRY(addr) : 0;
+}
+
+/*
+ * catu_populate_table : Populate the given CATU table.
+ * The table is always populated as a circular table.
+ * i.e, the "prev" link of the "first" table points to the "last"
+ * table and the "next" link of the "last" table points to the
+ * "first" table. The buffer should be made linear by calling
+ * catu_set_table().
+ */
+static void
+catu_populate_table(struct tmc_sg_table *catu_table)
+{
+ int i;
+ int sys_pidx; /* Index to current system data page */
+ int catu_pidx; /* Index of CATU page within the system data page */
+ unsigned long offset, buf_size, table_end;
+ dma_addr_t data_daddr;
+ dma_addr_t prev_taddr, next_taddr, cur_taddr;
+ cate_t *table_ptr, *next_table;
+
+ buf_size = tmc_sg_table_buf_size(catu_table);
+ sys_pidx = catu_pidx = 0;
+ offset = 0;
+
+ table_ptr = catu_get_table(catu_table, 0, &cur_taddr);
+ prev_taddr = 0; /* Prev link for the first table */
+
+ while (offset < buf_size) {
+ /*
+ * The @offset is always 1M aligned here and we have an
+ * empty table @table_ptr to fill. Each table can address
+ * upto 1MB data buffer. The last table may have fewer
+ * entries if the buffer size is not aligned.
+ */
+ table_end = (offset + SZ_1M) < buf_size ?
+ (offset + SZ_1M) : buf_size;
+ for (i = 0; offset < table_end;
+ i++, offset += CATU_PAGE_SIZE) {
+
+ data_daddr = catu_table->data_pages.daddrs[sys_pidx] +
+ catu_pidx * CATU_PAGE_SIZE;
+ catu_dbg(catu_table->dev,
+ "[table %5ld:%03d] 0x%llx\n",
+ (offset >> 20), i, data_daddr);
+ table_ptr[i] = catu_make_entry(data_daddr);
+ /* Move the pointers for data pages */
+ catu_pidx = (catu_pidx + 1) % CATU_PAGES_PER_SYSPAGE;
+ if (catu_pidx == 0)
+ sys_pidx++;
+ }
+
+ /*
+ * If we have finished all the valid entries, fill the rest of
+ * the table (i.e, last table page) with invalid entries,
+ * to fail the lookups.
+ */
+ if (offset == buf_size) {
+ memset(&table_ptr[i], 0,
+ sizeof(cate_t) * (CATU_PTRS_PER_PAGE - i));
+ next_taddr = 0;
+ } else {
+ next_table = catu_get_table(catu_table,
+ offset, &next_taddr);
+ }
+
+ table_ptr[CATU_LINK_PREV] = catu_make_entry(prev_taddr);
+ table_ptr[CATU_LINK_NEXT] = catu_make_entry(next_taddr);
+
+ catu_dbg(catu_table->dev,
+ "[table%5ld]: Cur: 0x%llx Prev: 0x%llx, Next: 0x%llx\n",
+ (offset >> 20) - 1, cur_taddr, prev_taddr, next_taddr);
+
+ /* Update the prev/next addresses */
+ if (next_taddr) {
+ prev_taddr = cur_taddr;
+ cur_taddr = next_taddr;
+ table_ptr = next_table;
+ }
+ }
+
+ /* Sync the table for device */
+ tmc_sg_table_sync_table(catu_table);
+}
+
+static struct tmc_sg_table *
+catu_init_sg_table(struct device *catu_dev, int node,
+ ssize_t size, void **pages)
+{
+ int nr_tpages;
+ struct tmc_sg_table *catu_table;
+
+ /*
+ * Each table can address upto 1MB and we can have
+ * CATU_PAGES_PER_SYSPAGE tables in a system page.
+ */
+ nr_tpages = DIV_ROUND_UP(size, SZ_1M) / CATU_PAGES_PER_SYSPAGE;
+ catu_table = tmc_alloc_sg_table(catu_dev, node, nr_tpages,
+ size >> PAGE_SHIFT, pages);
+ if (IS_ERR(catu_table))
+ return catu_table;
+
+ catu_populate_table(catu_table);
+ dev_dbg(catu_dev,
+ "Setup table %p, size %ldKB, %d table pages\n",
+ catu_table, (unsigned long)size >> 10, nr_tpages);
+ catu_dump_table(catu_table);
+ return catu_table;
+}
+
+static void catu_free_etr_buf(struct etr_buf *etr_buf)
+{
+ struct catu_etr_buf *catu_buf;
+
+ if (!etr_buf || etr_buf->mode != ETR_MODE_CATU || !etr_buf->private)
+ return;
+
+ catu_buf = etr_buf->private;
+ tmc_free_sg_table(catu_buf->catu_table);
+ kfree(catu_buf);
+}
+
+static ssize_t catu_get_data_etr_buf(struct etr_buf *etr_buf, u64 offset,
+ size_t len, char **bufpp)
+{
+ struct catu_etr_buf *catu_buf = etr_buf->private;
+
+ return tmc_sg_table_get_data(catu_buf->catu_table, offset, len, bufpp);
+}
+
+static void catu_sync_etr_buf(struct etr_buf *etr_buf, u64 rrp, u64 rwp)
+{
+ struct catu_etr_buf *catu_buf = etr_buf->private;
+ struct tmc_sg_table *catu_table = catu_buf->catu_table;
+ u64 r_offset, w_offset;
+
+ /*
+ * ETR started off at etr_buf->hwaddr. Convert the RRP/RWP to
+ * offsets within the trace buffer.
+ */
+ r_offset = rrp - etr_buf->hwaddr;
+ w_offset = rwp - etr_buf->hwaddr;
+
+ if (!etr_buf->full) {
+ etr_buf->len = w_offset - r_offset;
+ if (w_offset < r_offset)
+ etr_buf->len += etr_buf->size;
+ } else {
+ etr_buf->len = etr_buf->size;
+ }
+
+ etr_buf->offset = r_offset;
+ tmc_sg_table_sync_data_range(catu_table, r_offset, etr_buf->len);
+}
+
+static int catu_alloc_etr_buf(struct tmc_drvdata *tmc_drvdata,
+ struct etr_buf *etr_buf, int node, void **pages)
+{
+ struct coresight_device *csdev;
+ struct device *catu_dev;
+ struct tmc_sg_table *catu_table;
+ struct catu_etr_buf *catu_buf;
+
+ csdev = tmc_etr_get_catu_device(tmc_drvdata);
+ if (!csdev)
+ return -ENODEV;
+ catu_dev = csdev->dev.parent;
+ catu_buf = kzalloc(sizeof(*catu_buf), GFP_KERNEL);
+ if (!catu_buf)
+ return -ENOMEM;
+
+ catu_table = catu_init_sg_table(catu_dev, node, etr_buf->size, pages);
+ if (IS_ERR(catu_table)) {
+ kfree(catu_buf);
+ return PTR_ERR(catu_table);
+ }
+
+ etr_buf->mode = ETR_MODE_CATU;
+ etr_buf->private = catu_buf;
+ etr_buf->hwaddr = CATU_DEFAULT_INADDR;
+
+ catu_buf->catu_table = catu_table;
+ /* Get the table base address */
+ catu_buf->sladdr = catu_table->table_daddr;
+
+ return 0;
+}
+
+const struct etr_buf_operations etr_catu_buf_ops = {
+ .alloc = catu_alloc_etr_buf,
+ .free = catu_free_etr_buf,
+ .sync = catu_sync_etr_buf,
+ .get_data = catu_get_data_etr_buf,
+};
+
+coresight_simple_reg32(struct catu_drvdata, devid, CORESIGHT_DEVID);
+coresight_simple_reg32(struct catu_drvdata, control, CATU_CONTROL);
+coresight_simple_reg32(struct catu_drvdata, status, CATU_STATUS);
+coresight_simple_reg32(struct catu_drvdata, mode, CATU_MODE);
+coresight_simple_reg32(struct catu_drvdata, axictrl, CATU_AXICTRL);
+coresight_simple_reg32(struct catu_drvdata, irqen, CATU_IRQEN);
+coresight_simple_reg64(struct catu_drvdata, sladdr,
+ CATU_SLADDRLO, CATU_SLADDRHI);
+coresight_simple_reg64(struct catu_drvdata, inaddr,
+ CATU_INADDRLO, CATU_INADDRHI);
+
+static struct attribute *catu_mgmt_attrs[] = {
+ &dev_attr_devid.attr,
+ &dev_attr_control.attr,
+ &dev_attr_status.attr,
+ &dev_attr_mode.attr,
+ &dev_attr_axictrl.attr,
+ &dev_attr_irqen.attr,
+ &dev_attr_sladdr.attr,
+ &dev_attr_inaddr.attr,
+ NULL,
+};
+
+static const struct attribute_group catu_mgmt_group = {
+ .attrs = catu_mgmt_attrs,
+ .name = "mgmt",
+};
+
+static const struct attribute_group *catu_groups[] = {
+ &catu_mgmt_group,
+ NULL,
+};
+
+
+static inline int catu_wait_for_ready(struct catu_drvdata *drvdata)
+{
+ return coresight_timeout(drvdata->base,
+ CATU_STATUS, CATU_STATUS_READY, 1);
+}
+
+static int catu_enable_hw(struct catu_drvdata *drvdata, void *data)
+{
+ u32 control, mode;
+ struct etr_buf *etr_buf = data;
+
+ if (catu_wait_for_ready(drvdata))
+ dev_warn(drvdata->dev, "Timeout while waiting for READY\n");
+
+ control = catu_read_control(drvdata);
+ if (control & BIT(CATU_CONTROL_ENABLE)) {
+ dev_warn(drvdata->dev, "CATU is already enabled\n");
+ return -EBUSY;
+ }
+
+ control |= BIT(CATU_CONTROL_ENABLE);
+
+ if (etr_buf && etr_buf->mode == ETR_MODE_CATU) {
+ struct catu_etr_buf *catu_buf = etr_buf->private;
+
+ mode = CATU_MODE_TRANSLATE;
+ catu_write_axictrl(drvdata, CATU_OS_AXICTRL);
+ catu_write_sladdr(drvdata, catu_buf->sladdr);
+ catu_write_inaddr(drvdata, CATU_DEFAULT_INADDR);
+ } else {
+ mode = CATU_MODE_PASS_THROUGH;
+ catu_write_sladdr(drvdata, 0);
+ catu_write_inaddr(drvdata, 0);
+ }
+
+ catu_write_irqen(drvdata, 0);
+ catu_write_mode(drvdata, mode);
+ catu_write_control(drvdata, control);
+ dev_dbg(drvdata->dev, "Enabled in %s mode\n",
+ (mode == CATU_MODE_PASS_THROUGH) ?
+ "Pass through" :
+ "Translate");
+ return 0;
+}
+
+static int catu_enable(struct coresight_device *csdev, void *data)
+{
+ int rc;
+ struct catu_drvdata *catu_drvdata = csdev_to_catu_drvdata(csdev);
+
+ CS_UNLOCK(catu_drvdata->base);
+ rc = catu_enable_hw(catu_drvdata, data);
+ CS_LOCK(catu_drvdata->base);
+ return rc;
+}
+
+static int catu_disable_hw(struct catu_drvdata *drvdata)
+{
+ int rc = 0;
+
+ catu_write_control(drvdata, 0);
+ if (catu_wait_for_ready(drvdata)) {
+ dev_info(drvdata->dev, "Timeout while waiting for READY\n");
+ rc = -EAGAIN;
+ }
+
+ dev_dbg(drvdata->dev, "Disabled\n");
+ return rc;
+}
+
+static int catu_disable(struct coresight_device *csdev, void *__unused)
+{
+ int rc;
+ struct catu_drvdata *catu_drvdata = csdev_to_catu_drvdata(csdev);
+
+ CS_UNLOCK(catu_drvdata->base);
+ rc = catu_disable_hw(catu_drvdata);
+ CS_LOCK(catu_drvdata->base);
+ return rc;
+}
+
+const struct coresight_ops_helper catu_helper_ops = {
+ .enable = catu_enable,
+ .disable = catu_disable,
+};
+
+const struct coresight_ops catu_ops = {
+ .helper_ops = &catu_helper_ops,
+};
+
+static int catu_probe(struct amba_device *adev, const struct amba_id *id)
+{
+ int ret = 0;
+ u32 dma_mask;
+ struct catu_drvdata *drvdata;
+ struct coresight_desc catu_desc;
+ struct coresight_platform_data *pdata = NULL;
+ struct device *dev = &adev->dev;
+ struct device_node *np = dev->of_node;
+ void __iomem *base;
+
+ if (np) {
+ pdata = of_get_coresight_platform_data(dev, np);
+ if (IS_ERR(pdata)) {
+ ret = PTR_ERR(pdata);
+ goto out;
+ }
+ dev->platform_data = pdata;
+ }
+
+ drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
+ if (!drvdata) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ drvdata->dev = dev;
+ dev_set_drvdata(dev, drvdata);
+ base = devm_ioremap_resource(dev, &adev->res);
+ if (IS_ERR(base)) {
+ ret = PTR_ERR(base);
+ goto out;
+ }
+
+ /* Setup dma mask for the device */
+ dma_mask = readl_relaxed(base + CORESIGHT_DEVID) & 0x3f;
+ switch (dma_mask) {
+ case 32:
+ case 40:
+ case 44:
+ case 48:
+ case 52:
+ case 56:
+ case 64:
+ break;
+ default:
+ /* Default to the 40bits as supported by TMC-ETR */
+ dma_mask = 40;
+ }
+ ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(dma_mask));
+ if (ret)
+ goto out;
+
+ drvdata->base = base;
+ catu_desc.pdata = pdata;
+ catu_desc.dev = dev;
+ catu_desc.groups = catu_groups;
+ catu_desc.type = CORESIGHT_DEV_TYPE_HELPER;
+ catu_desc.subtype.helper_subtype = CORESIGHT_DEV_SUBTYPE_HELPER_CATU;
+ catu_desc.ops = &catu_ops;
+ drvdata->csdev = coresight_register(&catu_desc);
+ if (IS_ERR(drvdata->csdev))
+ ret = PTR_ERR(drvdata->csdev);
+out:
+ pm_runtime_put(&adev->dev);
+ return ret;
+}
+
+static struct amba_id catu_ids[] = {
+ {
+ .id = 0x000bb9ee,
+ .mask = 0x000fffff,
+ },
+ {},
+};
+
+static struct amba_driver catu_driver = {
+ .drv = {
+ .name = "coresight-catu",
+ .owner = THIS_MODULE,
+ .suppress_bind_attrs = true,
+ },
+ .probe = catu_probe,
+ .id_table = catu_ids,
+};
+
+builtin_amba_driver(catu_driver);
diff --git a/drivers/hwtracing/coresight/coresight-catu.h b/drivers/hwtracing/coresight/coresight-catu.h
new file mode 100644
index 000000000000..1b281f0dcccc
--- /dev/null
+++ b/drivers/hwtracing/coresight/coresight-catu.h
@@ -0,0 +1,119 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2018 Arm Limited. All rights reserved.
+ *
+ * Author: Suzuki K Poulose <suzuki.poulose@arm.com>
+ */
+
+#ifndef _CORESIGHT_CATU_H
+#define _CORESIGHT_CATU_H
+
+#include "coresight-priv.h"
+
+/* Register offset from base */
+#define CATU_CONTROL 0x000
+#define CATU_MODE 0x004
+#define CATU_AXICTRL 0x008
+#define CATU_IRQEN 0x00c
+#define CATU_SLADDRLO 0x020
+#define CATU_SLADDRHI 0x024
+#define CATU_INADDRLO 0x028
+#define CATU_INADDRHI 0x02c
+#define CATU_STATUS 0x100
+#define CATU_DEVARCH 0xfbc
+
+#define CATU_CONTROL_ENABLE 0
+
+#define CATU_MODE_PASS_THROUGH 0U
+#define CATU_MODE_TRANSLATE 1U
+
+#define CATU_AXICTRL_ARCACHE_SHIFT 4
+#define CATU_AXICTRL_ARCACHE_MASK 0xf
+#define CATU_AXICTRL_ARPROT_MASK 0x3
+#define CATU_AXICTRL_ARCACHE(arcache) \
+ (((arcache) & CATU_AXICTRL_ARCACHE_MASK) << CATU_AXICTRL_ARCACHE_SHIFT)
+
+#define CATU_AXICTRL_VAL(arcache, arprot) \
+ (CATU_AXICTRL_ARCACHE(arcache) | ((arprot) & CATU_AXICTRL_ARPROT_MASK))
+
+#define AXI3_AxCACHE_WB_READ_ALLOC 0x7
+/*
+ * AXI - ARPROT bits:
+ * See AMBA AXI & ACE Protocol specification (ARM IHI 0022E)
+ * sectionA4.7 Access Permissions.
+ *
+ * Bit 0: 0 - Unprivileged access, 1 - Privileged access
+ * Bit 1: 0 - Secure access, 1 - Non-secure access.
+ * Bit 2: 0 - Data access, 1 - instruction access.
+ *
+ * CATU AXICTRL:ARPROT[2] is res0 as we always access data.
+ */
+#define CATU_OS_ARPROT 0x2
+
+#define CATU_OS_AXICTRL \
+ CATU_AXICTRL_VAL(AXI3_AxCACHE_WB_READ_ALLOC, CATU_OS_ARPROT)
+
+#define CATU_STATUS_READY 8
+#define CATU_STATUS_ADRERR 0
+#define CATU_STATUS_AXIERR 4
+
+#define CATU_IRQEN_ON 0x1
+#define CATU_IRQEN_OFF 0x0
+
+struct catu_drvdata {
+ struct device *dev;
+ void __iomem *base;
+ struct coresight_device *csdev;
+ int irq;
+};
+
+#define CATU_REG32(name, offset) \
+static inline u32 \
+catu_read_##name(struct catu_drvdata *drvdata) \
+{ \
+ return coresight_read_reg_pair(drvdata->base, offset, -1); \
+} \
+static inline void \
+catu_write_##name(struct catu_drvdata *drvdata, u32 val) \
+{ \
+ coresight_write_reg_pair(drvdata->base, val, offset, -1); \
+}
+
+#define CATU_REG_PAIR(name, lo_off, hi_off) \
+static inline u64 \
+catu_read_##name(struct catu_drvdata *drvdata) \
+{ \
+ return coresight_read_reg_pair(drvdata->base, lo_off, hi_off); \
+} \
+static inline void \
+catu_write_##name(struct catu_drvdata *drvdata, u64 val) \
+{ \
+ coresight_write_reg_pair(drvdata->base, val, lo_off, hi_off); \
+}
+
+CATU_REG32(control, CATU_CONTROL);
+CATU_REG32(mode, CATU_MODE);
+CATU_REG32(irqen, CATU_IRQEN);
+CATU_REG32(axictrl, CATU_AXICTRL);
+CATU_REG_PAIR(sladdr, CATU_SLADDRLO, CATU_SLADDRHI)
+CATU_REG_PAIR(inaddr, CATU_INADDRLO, CATU_INADDRHI)
+
+static inline bool coresight_is_catu_device(struct coresight_device *csdev)
+{
+ if (!IS_ENABLED(CONFIG_CORESIGHT_CATU))
+ return false;
+ if (csdev->type != CORESIGHT_DEV_TYPE_HELPER)
+ return false;
+ if (csdev->subtype.helper_subtype != CORESIGHT_DEV_SUBTYPE_HELPER_CATU)
+ return false;
+ return true;
+}
+
+#ifdef CONFIG_CORESIGHT_CATU
+extern const struct etr_buf_operations etr_catu_buf_ops;
+#else
+/* Dummy declaration for the CATU ops */
+static const struct etr_buf_operations etr_catu_buf_ops;
+#endif
+
+#endif
diff --git a/drivers/hwtracing/coresight/coresight-etb10.c b/drivers/hwtracing/coresight/coresight-etb10.c
index 320d29df17e1..306119eaf16a 100644
--- a/drivers/hwtracing/coresight/coresight-etb10.c
+++ b/drivers/hwtracing/coresight/coresight-etb10.c
@@ -195,7 +195,6 @@ static void etb_dump_hw(struct etb_drvdata *drvdata)
bool lost = false;
int i;
u8 *buf_ptr;
- const u32 *barrier;
u32 read_data, depth;
u32 read_ptr, write_ptr;
u32 frame_off, frame_endoff;
@@ -226,19 +225,16 @@ static void etb_dump_hw(struct etb_drvdata *drvdata)
depth = drvdata->buffer_depth;
buf_ptr = drvdata->buf;
- barrier = barrier_pkt;
for (i = 0; i < depth; i++) {
read_data = readl_relaxed(drvdata->base +
ETB_RAM_READ_DATA_REG);
- if (lost && *barrier) {
- read_data = *barrier;
- barrier++;
- }
-
*(u32 *)buf_ptr = read_data;
buf_ptr += 4;
}
+ if (lost)
+ coresight_insert_barrier_packet(drvdata->buf);
+
if (frame_off) {
buf_ptr -= (frame_endoff * 4);
for (i = 0; i < frame_endoff; i++) {
@@ -447,7 +443,7 @@ static void etb_update_buffer(struct coresight_device *csdev,
buf_ptr = buf->data_pages[cur] + offset;
read_data = readl_relaxed(drvdata->base +
ETB_RAM_READ_DATA_REG);
- if (lost && *barrier) {
+ if (lost && i < CORESIGHT_BARRIER_PKT_SIZE) {
read_data = *barrier;
barrier++;
}
diff --git a/drivers/hwtracing/coresight/coresight-etm.h b/drivers/hwtracing/coresight/coresight-etm.h
index e8b4549e30e2..79e1ad860d8a 100644
--- a/drivers/hwtracing/coresight/coresight-etm.h
+++ b/drivers/hwtracing/coresight/coresight-etm.h
@@ -168,8 +168,6 @@
* @seq_curr_state: current value of the sequencer register.
* @ctxid_idx: index for the context ID registers.
* @ctxid_pid: value for the context ID to trigger on.
- * @ctxid_vpid: Virtual PID seen by users if PID namespace is enabled, otherwise
- * the same value of ctxid_pid.
* @ctxid_mask: mask applicable to all the context IDs.
* @sync_freq: Synchronisation frequency.
* @timestamp_event: Defines an event that requests the insertion
@@ -202,7 +200,6 @@ struct etm_config {
u32 seq_curr_state;
u8 ctxid_idx;
u32 ctxid_pid[ETM_MAX_CTXID_CMP];
- u32 ctxid_vpid[ETM_MAX_CTXID_CMP];
u32 ctxid_mask;
u32 sync_freq;
u32 timestamp_event;
diff --git a/drivers/hwtracing/coresight/coresight-etm3x-sysfs.c b/drivers/hwtracing/coresight/coresight-etm3x-sysfs.c
index 9435c1481f61..75487b3fad86 100644
--- a/drivers/hwtracing/coresight/coresight-etm3x-sysfs.c
+++ b/drivers/hwtracing/coresight/coresight-etm3x-sysfs.c
@@ -4,6 +4,7 @@
* Author: Mathieu Poirier <mathieu.poirier@linaro.org>
*/
+#include <linux/pid_namespace.h>
#include <linux/pm_runtime.h>
#include <linux/sysfs.h>
#include "coresight-etm.h"
@@ -1025,8 +1026,15 @@ static ssize_t ctxid_pid_show(struct device *dev,
struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
struct etm_config *config = &drvdata->config;
+ /*
+ * Don't use contextID tracing if coming from a PID namespace. See
+ * comment in ctxid_pid_store().
+ */
+ if (task_active_pid_ns(current) != &init_pid_ns)
+ return -EINVAL;
+
spin_lock(&drvdata->spinlock);
- val = config->ctxid_vpid[config->ctxid_idx];
+ val = config->ctxid_pid[config->ctxid_idx];
spin_unlock(&drvdata->spinlock);
return sprintf(buf, "%#lx\n", val);
@@ -1037,19 +1045,28 @@ static ssize_t ctxid_pid_store(struct device *dev,
const char *buf, size_t size)
{
int ret;
- unsigned long vpid, pid;
+ unsigned long pid;
struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
struct etm_config *config = &drvdata->config;
- ret = kstrtoul(buf, 16, &vpid);
+ /*
+ * When contextID tracing is enabled the tracers will insert the
+ * value found in the contextID register in the trace stream. But if
+ * a process is in a namespace the PID of that process as seen from the
+ * namespace won't be what the kernel sees, something that makes the
+ * feature confusing and can potentially leak kernel only information.
+ * As such refuse to use the feature if @current is not in the initial
+ * PID namespace.
+ */
+ if (task_active_pid_ns(current) != &init_pid_ns)
+ return -EINVAL;
+
+ ret = kstrtoul(buf, 16, &pid);
if (ret)
return ret;
- pid = coresight_vpid_to_pid(vpid);
-
spin_lock(&drvdata->spinlock);
config->ctxid_pid[config->ctxid_idx] = pid;
- config->ctxid_vpid[config->ctxid_idx] = vpid;
spin_unlock(&drvdata->spinlock);
return size;
@@ -1063,6 +1080,13 @@ static ssize_t ctxid_mask_show(struct device *dev,
struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
struct etm_config *config = &drvdata->config;
+ /*
+ * Don't use contextID tracing if coming from a PID namespace. See
+ * comment in ctxid_pid_store().
+ */
+ if (task_active_pid_ns(current) != &init_pid_ns)
+ return -EINVAL;
+
val = config->ctxid_mask;
return sprintf(buf, "%#lx\n", val);
}
@@ -1076,6 +1100,13 @@ static ssize_t ctxid_mask_store(struct device *dev,
struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
struct etm_config *config = &drvdata->config;
+ /*
+ * Don't use contextID tracing if coming from a PID namespace. See
+ * comment in ctxid_pid_store().
+ */
+ if (task_active_pid_ns(current) != &init_pid_ns)
+ return -EINVAL;
+
ret = kstrtoul(buf, 16, &val);
if (ret)
return ret;
diff --git a/drivers/hwtracing/coresight/coresight-etm3x.c b/drivers/hwtracing/coresight/coresight-etm3x.c
index 15ed64d51a5b..7c74263c333d 100644
--- a/drivers/hwtracing/coresight/coresight-etm3x.c
+++ b/drivers/hwtracing/coresight/coresight-etm3x.c
@@ -230,10 +230,8 @@ void etm_set_default(struct etm_config *config)
config->seq_curr_state = 0x0;
config->ctxid_idx = 0x0;
- for (i = 0; i < ETM_MAX_CTXID_CMP; i++) {
+ for (i = 0; i < ETM_MAX_CTXID_CMP; i++)
config->ctxid_pid[i] = 0x0;
- config->ctxid_vpid[i] = 0x0;
- }
config->ctxid_mask = 0x0;
/* Setting default to 1024 as per TRM recommendation */
diff --git a/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c b/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c
index 4eb8da785ce0..a0365e23678e 100644
--- a/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c
+++ b/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c
@@ -4,6 +4,7 @@
* Author: Mathieu Poirier <mathieu.poirier@linaro.org>
*/
+#include <linux/pid_namespace.h>
#include <linux/pm_runtime.h>
#include <linux/sysfs.h>
#include "coresight-etm4x.h"
@@ -250,10 +251,8 @@ static ssize_t reset_store(struct device *dev,
}
config->ctxid_idx = 0x0;
- for (i = 0; i < drvdata->numcidc; i++) {
+ for (i = 0; i < drvdata->numcidc; i++)
config->ctxid_pid[i] = 0x0;
- config->ctxid_vpid[i] = 0x0;
- }
config->ctxid_mask0 = 0x0;
config->ctxid_mask1 = 0x0;
@@ -1637,9 +1636,16 @@ static ssize_t ctxid_pid_show(struct device *dev,
struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
struct etmv4_config *config = &drvdata->config;
+ /*
+ * Don't use contextID tracing if coming from a PID namespace. See
+ * comment in ctxid_pid_store().
+ */
+ if (task_active_pid_ns(current) != &init_pid_ns)
+ return -EINVAL;
+
spin_lock(&drvdata->spinlock);
idx = config->ctxid_idx;
- val = (unsigned long)config->ctxid_vpid[idx];
+ val = (unsigned long)config->ctxid_pid[idx];
spin_unlock(&drvdata->spinlock);
return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
}
@@ -1649,26 +1655,35 @@ static ssize_t ctxid_pid_store(struct device *dev,
const char *buf, size_t size)
{
u8 idx;
- unsigned long vpid, pid;
+ unsigned long pid;
struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
struct etmv4_config *config = &drvdata->config;
/*
+ * When contextID tracing is enabled the tracers will insert the
+ * value found in the contextID register in the trace stream. But if
+ * a process is in a namespace the PID of that process as seen from the
+ * namespace won't be what the kernel sees, something that makes the
+ * feature confusing and can potentially leak kernel only information.
+ * As such refuse to use the feature if @current is not in the initial
+ * PID namespace.
+ */
+ if (task_active_pid_ns(current) != &init_pid_ns)
+ return -EINVAL;
+
+ /*
* only implemented when ctxid tracing is enabled, i.e. at least one
* ctxid comparator is implemented and ctxid is greater than 0 bits
* in length
*/
if (!drvdata->ctxid_size || !drvdata->numcidc)
return -EINVAL;
- if (kstrtoul(buf, 16, &vpid))
+ if (kstrtoul(buf, 16, &pid))
return -EINVAL;
- pid = coresight_vpid_to_pid(vpid);
-
spin_lock(&drvdata->spinlock);
idx = config->ctxid_idx;
config->ctxid_pid[idx] = (u64)pid;
- config->ctxid_vpid[idx] = (u64)vpid;
spin_unlock(&drvdata->spinlock);
return size;
}
@@ -1682,6 +1697,13 @@ static ssize_t ctxid_masks_show(struct device *dev,
struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
struct etmv4_config *config = &drvdata->config;
+ /*
+ * Don't use contextID tracing if coming from a PID namespace. See
+ * comment in ctxid_pid_store().
+ */
+ if (task_active_pid_ns(current) != &init_pid_ns)
+ return -EINVAL;
+
spin_lock(&drvdata->spinlock);
val1 = config->ctxid_mask0;
val2 = config->ctxid_mask1;
@@ -1699,6 +1721,13 @@ static ssize_t ctxid_masks_store(struct device *dev,
struct etmv4_config *config = &drvdata->config;
/*
+ * Don't use contextID tracing if coming from a PID namespace. See
+ * comment in ctxid_pid_store().
+ */
+ if (task_active_pid_ns(current) != &init_pid_ns)
+ return -EINVAL;
+
+ /*
* only implemented when ctxid tracing is enabled, i.e. at least one
* ctxid comparator is implemented and ctxid is greater than 0 bits
* in length
diff --git a/drivers/hwtracing/coresight/coresight-etm4x.c b/drivers/hwtracing/coresight/coresight-etm4x.c
index 9bc04c50d45b..1d94ebec027b 100644
--- a/drivers/hwtracing/coresight/coresight-etm4x.c
+++ b/drivers/hwtracing/coresight/coresight-etm4x.c
@@ -1027,7 +1027,8 @@ static int etm4_probe(struct amba_device *adev, const struct amba_id *id)
}
pm_runtime_put(&adev->dev);
- dev_info(dev, "%s initialized\n", (char *)id->data);
+ dev_info(dev, "CPU%d: ETM v%d.%d initialized\n",
+ drvdata->cpu, drvdata->arch >> 4, drvdata->arch & 0xf);
if (boot_enable) {
coresight_enable(drvdata->csdev);
@@ -1045,23 +1046,19 @@ err_arch_supported:
return ret;
}
+#define ETM4x_AMBA_ID(pid) \
+ { \
+ .id = pid, \
+ .mask = 0x000fffff, \
+ }
+
static const struct amba_id etm4_ids[] = {
- { /* ETM 4.0 - Cortex-A53 */
- .id = 0x000bb95d,
- .mask = 0x000fffff,
- .data = "ETM 4.0",
- },
- { /* ETM 4.0 - Cortex-A57 */
- .id = 0x000bb95e,
- .mask = 0x000fffff,
- .data = "ETM 4.0",
- },
- { /* ETM 4.0 - A72, Maia, HiSilicon */
- .id = 0x000bb95a,
- .mask = 0x000fffff,
- .data = "ETM 4.0",
- },
- { 0, 0},
+ ETM4x_AMBA_ID(0x000bb95d), /* Cortex-A53 */
+ ETM4x_AMBA_ID(0x000bb95e), /* Cortex-A57 */
+ ETM4x_AMBA_ID(0x000bb95a), /* Cortex-A72 */
+ ETM4x_AMBA_ID(0x000bb959), /* Cortex-A73 */
+ ETM4x_AMBA_ID(0x000bb9da), /* Cortex-A35 */
+ {},
};
static struct amba_driver etm4x_driver = {
diff --git a/drivers/hwtracing/coresight/coresight-etm4x.h b/drivers/hwtracing/coresight/coresight-etm4x.h
index b7c4a6f6c6b9..52786e9d8926 100644
--- a/drivers/hwtracing/coresight/coresight-etm4x.h
+++ b/drivers/hwtracing/coresight/coresight-etm4x.h
@@ -230,8 +230,6 @@
* @addr_type: Current status of the comparator register.
* @ctxid_idx: Context ID index selector.
* @ctxid_pid: Value of the context ID comparator.
- * @ctxid_vpid: Virtual PID seen by users if PID namespace is enabled, otherwise
- * the same value of ctxid_pid.
* @ctxid_mask0:Context ID comparator mask for comparator 0-3.
* @ctxid_mask1:Context ID comparator mask for comparator 4-7.
* @vmid_idx: VM ID index selector.
@@ -274,7 +272,6 @@ struct etmv4_config {
u8 addr_type[ETM_MAX_SINGLE_ADDR_CMP];
u8 ctxid_idx;
u64 ctxid_pid[ETMv4_MAX_CTXID_CMP];
- u64 ctxid_vpid[ETMv4_MAX_CTXID_CMP];
u32 ctxid_mask0;
u32 ctxid_mask1;
u8 vmid_idx;
diff --git a/drivers/hwtracing/coresight/coresight-priv.h b/drivers/hwtracing/coresight/coresight-priv.h
index 0e5a74dae6a6..1a6cf3589866 100644
--- a/drivers/hwtracing/coresight/coresight-priv.h
+++ b/drivers/hwtracing/coresight/coresight-priv.h
@@ -57,7 +57,8 @@ static DEVICE_ATTR_RO(name)
#define coresight_simple_reg64(type, name, lo_off, hi_off) \
__coresight_simple_func(type, NULL, name, lo_off, hi_off)
-extern const u32 barrier_pkt[5];
+extern const u32 barrier_pkt[4];
+#define CORESIGHT_BARRIER_PKT_SIZE (sizeof(barrier_pkt))
enum etm_addr_type {
ETM_ADDR_TYPE_NONE,
@@ -91,6 +92,13 @@ struct cs_buffers {
void **data_pages;
};
+static inline void coresight_insert_barrier_packet(void *buf)
+{
+ if (buf)
+ memcpy(buf, barrier_pkt, CORESIGHT_BARRIER_PKT_SIZE);
+}
+
+
static inline void CS_LOCK(void __iomem *addr)
{
do {
diff --git a/drivers/hwtracing/coresight/coresight-tmc-etf.c b/drivers/hwtracing/coresight/coresight-tmc-etf.c
index 61d849b11c26..0549249f4b39 100644
--- a/drivers/hwtracing/coresight/coresight-tmc-etf.c
+++ b/drivers/hwtracing/coresight/coresight-tmc-etf.c
@@ -32,39 +32,28 @@ static void tmc_etb_enable_hw(struct tmc_drvdata *drvdata)
static void tmc_etb_dump_hw(struct tmc_drvdata *drvdata)
{
- bool lost = false;
char *bufp;
- const u32 *barrier;
- u32 read_data, status;
+ u32 read_data, lost;
int i;
- /*
- * Get a hold of the status register and see if a wrap around
- * has occurred.
- */
- status = readl_relaxed(drvdata->base + TMC_STS);
- if (status & TMC_STS_FULL)
- lost = true;
-
+ /* Check if the buffer wrapped around. */
+ lost = readl_relaxed(drvdata->base + TMC_STS) & TMC_STS_FULL;
bufp = drvdata->buf;
drvdata->len = 0;
- barrier = barrier_pkt;
while (1) {
for (i = 0; i < drvdata->memwidth; i++) {
read_data = readl_relaxed(drvdata->base + TMC_RRD);
if (read_data == 0xFFFFFFFF)
- return;
-
- if (lost && *barrier) {
- read_data = *barrier;
- barrier++;
- }
-
+ goto done;
memcpy(bufp, &read_data, 4);
bufp += 4;
drvdata->len += 4;
}
}
+done:
+ if (lost)
+ coresight_insert_barrier_packet(drvdata->buf);
+ return;
}
static void tmc_etb_disable_hw(struct tmc_drvdata *drvdata)
@@ -109,6 +98,24 @@ static void tmc_etf_disable_hw(struct tmc_drvdata *drvdata)
CS_LOCK(drvdata->base);
}
+/*
+ * Return the available trace data in the buffer from @pos, with
+ * a maximum limit of @len, updating the @bufpp on where to
+ * find it.
+ */
+ssize_t tmc_etb_get_sysfs_trace(struct tmc_drvdata *drvdata,
+ loff_t pos, size_t len, char **bufpp)
+{
+ ssize_t actual = len;
+
+ /* Adjust the len to available size @pos */
+ if (pos + actual > drvdata->len)
+ actual = drvdata->len - pos;
+ if (actual > 0)
+ *bufpp = drvdata->buf + pos;
+ return actual;
+}
+
static int tmc_enable_etf_sink_sysfs(struct coresight_device *csdev)
{
int ret = 0;
diff --git a/drivers/hwtracing/coresight/coresight-tmc-etr.c b/drivers/hwtracing/coresight/coresight-tmc-etr.c
index 02f747afa2ba..2eda5de304c2 100644
--- a/drivers/hwtracing/coresight/coresight-tmc-etr.c
+++ b/drivers/hwtracing/coresight/coresight-tmc-etr.c
@@ -6,22 +6,912 @@
#include <linux/coresight.h>
#include <linux/dma-mapping.h>
+#include <linux/iommu.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include "coresight-catu.h"
#include "coresight-priv.h"
#include "coresight-tmc.h"
+struct etr_flat_buf {
+ struct device *dev;
+ dma_addr_t daddr;
+ void *vaddr;
+ size_t size;
+};
+
+/*
+ * The TMC ETR SG has a page size of 4K. The SG table contains pointers
+ * to 4KB buffers. However, the OS may use a PAGE_SIZE different from
+ * 4K (i.e, 16KB or 64KB). This implies that a single OS page could
+ * contain more than one SG buffer and tables.
+ *
+ * A table entry has the following format:
+ *
+ * ---Bit31------------Bit4-------Bit1-----Bit0--
+ * | Address[39:12] | SBZ | Entry Type |
+ * ----------------------------------------------
+ *
+ * Address: Bits [39:12] of a physical page address. Bits [11:0] are
+ * always zero.
+ *
+ * Entry type:
+ * b00 - Reserved.
+ * b01 - Last entry in the tables, points to 4K page buffer.
+ * b10 - Normal entry, points to 4K page buffer.
+ * b11 - Link. The address points to the base of next table.
+ */
+
+typedef u32 sgte_t;
+
+#define ETR_SG_PAGE_SHIFT 12
+#define ETR_SG_PAGE_SIZE (1UL << ETR_SG_PAGE_SHIFT)
+#define ETR_SG_PAGES_PER_SYSPAGE (PAGE_SIZE / ETR_SG_PAGE_SIZE)
+#define ETR_SG_PTRS_PER_PAGE (ETR_SG_PAGE_SIZE / sizeof(sgte_t))
+#define ETR_SG_PTRS_PER_SYSPAGE (PAGE_SIZE / sizeof(sgte_t))
+
+#define ETR_SG_ET_MASK 0x3
+#define ETR_SG_ET_LAST 0x1
+#define ETR_SG_ET_NORMAL 0x2
+#define ETR_SG_ET_LINK 0x3
+
+#define ETR_SG_ADDR_SHIFT 4
+
+#define ETR_SG_ENTRY(addr, type) \
+ (sgte_t)((((addr) >> ETR_SG_PAGE_SHIFT) << ETR_SG_ADDR_SHIFT) | \
+ (type & ETR_SG_ET_MASK))
+
+#define ETR_SG_ADDR(entry) \
+ (((dma_addr_t)(entry) >> ETR_SG_ADDR_SHIFT) << ETR_SG_PAGE_SHIFT)
+#define ETR_SG_ET(entry) ((entry) & ETR_SG_ET_MASK)
+
+/*
+ * struct etr_sg_table : ETR SG Table
+ * @sg_table: Generic SG Table holding the data/table pages.
+ * @hwaddr: hwaddress used by the TMC, which is the base
+ * address of the table.
+ */
+struct etr_sg_table {
+ struct tmc_sg_table *sg_table;
+ dma_addr_t hwaddr;
+};
+
+/*
+ * tmc_etr_sg_table_entries: Total number of table entries required to map
+ * @nr_pages system pages.
+ *
+ * We need to map @nr_pages * ETR_SG_PAGES_PER_SYSPAGE data pages.
+ * Each TMC page can map (ETR_SG_PTRS_PER_PAGE - 1) buffer pointers,
+ * with the last entry pointing to another page of table entries.
+ * If we spill over to a new page for mapping 1 entry, we could as
+ * well replace the link entry of the previous page with the last entry.
+ */
+static inline unsigned long __attribute_const__
+tmc_etr_sg_table_entries(int nr_pages)
+{
+ unsigned long nr_sgpages = nr_pages * ETR_SG_PAGES_PER_SYSPAGE;
+ unsigned long nr_sglinks = nr_sgpages / (ETR_SG_PTRS_PER_PAGE - 1);
+ /*
+ * If we spill over to a new page for 1 entry, we could as well
+ * make it the LAST entry in the previous page, skipping the Link
+ * address.
+ */
+ if (nr_sglinks && (nr_sgpages % (ETR_SG_PTRS_PER_PAGE - 1) < 2))
+ nr_sglinks--;
+ return nr_sgpages + nr_sglinks;
+}
+
+/*
+ * tmc_pages_get_offset: Go through all the pages in the tmc_pages
+ * and map the device address @addr to an offset within the virtual
+ * contiguous buffer.
+ */
+static long
+tmc_pages_get_offset(struct tmc_pages *tmc_pages, dma_addr_t addr)
+{
+ int i;
+ dma_addr_t page_start;
+
+ for (i = 0; i < tmc_pages->nr_pages; i++) {
+ page_start = tmc_pages->daddrs[i];
+ if (addr >= page_start && addr < (page_start + PAGE_SIZE))
+ return i * PAGE_SIZE + (addr - page_start);
+ }
+
+ return -EINVAL;
+}
+
+/*
+ * tmc_pages_free : Unmap and free the pages used by tmc_pages.
+ * If the pages were not allocated in tmc_pages_alloc(), we would
+ * simply drop the refcount.
+ */
+static void tmc_pages_free(struct tmc_pages *tmc_pages,
+ struct device *dev, enum dma_data_direction dir)
+{
+ int i;
+
+ for (i = 0; i < tmc_pages->nr_pages; i++) {
+ if (tmc_pages->daddrs && tmc_pages->daddrs[i])
+ dma_unmap_page(dev, tmc_pages->daddrs[i],
+ PAGE_SIZE, dir);
+ if (tmc_pages->pages && tmc_pages->pages[i])
+ __free_page(tmc_pages->pages[i]);
+ }
+
+ kfree(tmc_pages->pages);
+ kfree(tmc_pages->daddrs);
+ tmc_pages->pages = NULL;
+ tmc_pages->daddrs = NULL;
+ tmc_pages->nr_pages = 0;
+}
+
+/*
+ * tmc_pages_alloc : Allocate and map pages for a given @tmc_pages.
+ * If @pages is not NULL, the list of page virtual addresses are
+ * used as the data pages. The pages are then dma_map'ed for @dev
+ * with dma_direction @dir.
+ *
+ * Returns 0 upon success, else the error number.
+ */
+static int tmc_pages_alloc(struct tmc_pages *tmc_pages,
+ struct device *dev, int node,
+ enum dma_data_direction dir, void **pages)
+{
+ int i, nr_pages;
+ dma_addr_t paddr;
+ struct page *page;
+
+ nr_pages = tmc_pages->nr_pages;
+ tmc_pages->daddrs = kcalloc(nr_pages, sizeof(*tmc_pages->daddrs),
+ GFP_KERNEL);
+ if (!tmc_pages->daddrs)
+ return -ENOMEM;
+ tmc_pages->pages = kcalloc(nr_pages, sizeof(*tmc_pages->pages),
+ GFP_KERNEL);
+ if (!tmc_pages->pages) {
+ kfree(tmc_pages->daddrs);
+ tmc_pages->daddrs = NULL;
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < nr_pages; i++) {
+ if (pages && pages[i]) {
+ page = virt_to_page(pages[i]);
+ /* Hold a refcount on the page */
+ get_page(page);
+ } else {
+ page = alloc_pages_node(node,
+ GFP_KERNEL | __GFP_ZERO, 0);
+ }
+ paddr = dma_map_page(dev, page, 0, PAGE_SIZE, dir);
+ if (dma_mapping_error(dev, paddr))
+ goto err;
+ tmc_pages->daddrs[i] = paddr;
+ tmc_pages->pages[i] = page;
+ }
+ return 0;
+err:
+ tmc_pages_free(tmc_pages, dev, dir);
+ return -ENOMEM;
+}
+
+static inline long
+tmc_sg_get_data_page_offset(struct tmc_sg_table *sg_table, dma_addr_t addr)
+{
+ return tmc_pages_get_offset(&sg_table->data_pages, addr);
+}
+
+static inline void tmc_free_table_pages(struct tmc_sg_table *sg_table)
+{
+ if (sg_table->table_vaddr)
+ vunmap(sg_table->table_vaddr);
+ tmc_pages_free(&sg_table->table_pages, sg_table->dev, DMA_TO_DEVICE);
+}
+
+static void tmc_free_data_pages(struct tmc_sg_table *sg_table)
+{
+ if (sg_table->data_vaddr)
+ vunmap(sg_table->data_vaddr);
+ tmc_pages_free(&sg_table->data_pages, sg_table->dev, DMA_FROM_DEVICE);
+}
+
+void tmc_free_sg_table(struct tmc_sg_table *sg_table)
+{
+ tmc_free_table_pages(sg_table);
+ tmc_free_data_pages(sg_table);
+}
+
+/*
+ * Alloc pages for the table. Since this will be used by the device,
+ * allocate the pages closer to the device (i.e, dev_to_node(dev)
+ * rather than the CPU node).
+ */
+static int tmc_alloc_table_pages(struct tmc_sg_table *sg_table)
+{
+ int rc;
+ struct tmc_pages *table_pages = &sg_table->table_pages;
+
+ rc = tmc_pages_alloc(table_pages, sg_table->dev,
+ dev_to_node(sg_table->dev),
+ DMA_TO_DEVICE, NULL);
+ if (rc)
+ return rc;
+ sg_table->table_vaddr = vmap(table_pages->pages,
+ table_pages->nr_pages,
+ VM_MAP,
+ PAGE_KERNEL);
+ if (!sg_table->table_vaddr)
+ rc = -ENOMEM;
+ else
+ sg_table->table_daddr = table_pages->daddrs[0];
+ return rc;
+}
+
+static int tmc_alloc_data_pages(struct tmc_sg_table *sg_table, void **pages)
+{
+ int rc;
+
+ /* Allocate data pages on the node requested by the caller */
+ rc = tmc_pages_alloc(&sg_table->data_pages,
+ sg_table->dev, sg_table->node,
+ DMA_FROM_DEVICE, pages);
+ if (!rc) {
+ sg_table->data_vaddr = vmap(sg_table->data_pages.pages,
+ sg_table->data_pages.nr_pages,
+ VM_MAP,
+ PAGE_KERNEL);
+ if (!sg_table->data_vaddr)
+ rc = -ENOMEM;
+ }
+ return rc;
+}
+
+/*
+ * tmc_alloc_sg_table: Allocate and setup dma pages for the TMC SG table
+ * and data buffers. TMC writes to the data buffers and reads from the SG
+ * Table pages.
+ *
+ * @dev - Device to which page should be DMA mapped.
+ * @node - Numa node for mem allocations
+ * @nr_tpages - Number of pages for the table entries.
+ * @nr_dpages - Number of pages for Data buffer.
+ * @pages - Optional list of virtual address of pages.
+ */
+struct tmc_sg_table *tmc_alloc_sg_table(struct device *dev,
+ int node,
+ int nr_tpages,
+ int nr_dpages,
+ void **pages)
+{
+ long rc;
+ struct tmc_sg_table *sg_table;
+
+ sg_table = kzalloc(sizeof(*sg_table), GFP_KERNEL);
+ if (!sg_table)
+ return ERR_PTR(-ENOMEM);
+ sg_table->data_pages.nr_pages = nr_dpages;
+ sg_table->table_pages.nr_pages = nr_tpages;
+ sg_table->node = node;
+ sg_table->dev = dev;
+
+ rc = tmc_alloc_data_pages(sg_table, pages);
+ if (!rc)
+ rc = tmc_alloc_table_pages(sg_table);
+ if (rc) {
+ tmc_free_sg_table(sg_table);
+ kfree(sg_table);
+ return ERR_PTR(rc);
+ }
+
+ return sg_table;
+}
+
+/*
+ * tmc_sg_table_sync_data_range: Sync the data buffer written
+ * by the device from @offset upto a @size bytes.
+ */
+void tmc_sg_table_sync_data_range(struct tmc_sg_table *table,
+ u64 offset, u64 size)
+{
+ int i, index, start;
+ int npages = DIV_ROUND_UP(size, PAGE_SIZE);
+ struct device *dev = table->dev;
+ struct tmc_pages *data = &table->data_pages;
+
+ start = offset >> PAGE_SHIFT;
+ for (i = start; i < (start + npages); i++) {
+ index = i % data->nr_pages;
+ dma_sync_single_for_cpu(dev, data->daddrs[index],
+ PAGE_SIZE, DMA_FROM_DEVICE);
+ }
+}
+
+/* tmc_sg_sync_table: Sync the page table */
+void tmc_sg_table_sync_table(struct tmc_sg_table *sg_table)
+{
+ int i;
+ struct device *dev = sg_table->dev;
+ struct tmc_pages *table_pages = &sg_table->table_pages;
+
+ for (i = 0; i < table_pages->nr_pages; i++)
+ dma_sync_single_for_device(dev, table_pages->daddrs[i],
+ PAGE_SIZE, DMA_TO_DEVICE);
+}
+
+/*
+ * tmc_sg_table_get_data: Get the buffer pointer for data @offset
+ * in the SG buffer. The @bufpp is updated to point to the buffer.
+ * Returns :
+ * the length of linear data available at @offset.
+ * or
+ * <= 0 if no data is available.
+ */
+ssize_t tmc_sg_table_get_data(struct tmc_sg_table *sg_table,
+ u64 offset, size_t len, char **bufpp)
+{
+ size_t size;
+ int pg_idx = offset >> PAGE_SHIFT;
+ int pg_offset = offset & (PAGE_SIZE - 1);
+ struct tmc_pages *data_pages = &sg_table->data_pages;
+
+ size = tmc_sg_table_buf_size(sg_table);
+ if (offset >= size)
+ return -EINVAL;
+
+ /* Make sure we don't go beyond the end */
+ len = (len < (size - offset)) ? len : size - offset;
+ /* Respect the page boundaries */
+ len = (len < (PAGE_SIZE - pg_offset)) ? len : (PAGE_SIZE - pg_offset);
+ if (len > 0)
+ *bufpp = page_address(data_pages->pages[pg_idx]) + pg_offset;
+ return len;
+}
+
+#ifdef ETR_SG_DEBUG
+/* Map a dma address to virtual address */
+static unsigned long
+tmc_sg_daddr_to_vaddr(struct tmc_sg_table *sg_table,
+ dma_addr_t addr, bool table)
+{
+ long offset;
+ unsigned long base;
+ struct tmc_pages *tmc_pages;
+
+ if (table) {
+ tmc_pages = &sg_table->table_pages;
+ base = (unsigned long)sg_table->table_vaddr;
+ } else {
+ tmc_pages = &sg_table->data_pages;
+ base = (unsigned long)sg_table->data_vaddr;
+ }
+
+ offset = tmc_pages_get_offset(tmc_pages, addr);
+ if (offset < 0)
+ return 0;
+ return base + offset;
+}
+
+/* Dump the given sg_table */
+static void tmc_etr_sg_table_dump(struct etr_sg_table *etr_table)
+{
+ sgte_t *ptr;
+ int i = 0;
+ dma_addr_t addr;
+ struct tmc_sg_table *sg_table = etr_table->sg_table;
+
+ ptr = (sgte_t *)tmc_sg_daddr_to_vaddr(sg_table,
+ etr_table->hwaddr, true);
+ while (ptr) {
+ addr = ETR_SG_ADDR(*ptr);
+ switch (ETR_SG_ET(*ptr)) {
+ case ETR_SG_ET_NORMAL:
+ dev_dbg(sg_table->dev,
+ "%05d: %p\t:[N] 0x%llx\n", i, ptr, addr);
+ ptr++;
+ break;
+ case ETR_SG_ET_LINK:
+ dev_dbg(sg_table->dev,
+ "%05d: *** %p\t:{L} 0x%llx ***\n",
+ i, ptr, addr);
+ ptr = (sgte_t *)tmc_sg_daddr_to_vaddr(sg_table,
+ addr, true);
+ break;
+ case ETR_SG_ET_LAST:
+ dev_dbg(sg_table->dev,
+ "%05d: ### %p\t:[L] 0x%llx ###\n",
+ i, ptr, addr);
+ return;
+ default:
+ dev_dbg(sg_table->dev,
+ "%05d: xxx %p\t:[INVALID] 0x%llx xxx\n",
+ i, ptr, addr);
+ return;
+ }
+ i++;
+ }
+ dev_dbg(sg_table->dev, "******* End of Table *****\n");
+}
+#else
+static inline void tmc_etr_sg_table_dump(struct etr_sg_table *etr_table) {}
+#endif
+
+/*
+ * Populate the SG Table page table entries from table/data
+ * pages allocated. Each Data page has ETR_SG_PAGES_PER_SYSPAGE SG pages.
+ * So does a Table page. So we keep track of indices of the tables
+ * in each system page and move the pointers accordingly.
+ */
+#define INC_IDX_ROUND(idx, size) ((idx) = ((idx) + 1) % (size))
+static void tmc_etr_sg_table_populate(struct etr_sg_table *etr_table)
+{
+ dma_addr_t paddr;
+ int i, type, nr_entries;
+ int tpidx = 0; /* index to the current system table_page */
+ int sgtidx = 0; /* index to the sg_table within the current syspage */
+ int sgtentry = 0; /* the entry within the sg_table */
+ int dpidx = 0; /* index to the current system data_page */
+ int spidx = 0; /* index to the SG page within the current data page */
+ sgte_t *ptr; /* pointer to the table entry to fill */
+ struct tmc_sg_table *sg_table = etr_table->sg_table;
+ dma_addr_t *table_daddrs = sg_table->table_pages.daddrs;
+ dma_addr_t *data_daddrs = sg_table->data_pages.daddrs;
+
+ nr_entries = tmc_etr_sg_table_entries(sg_table->data_pages.nr_pages);
+ /*
+ * Use the contiguous virtual address of the table to update entries.
+ */
+ ptr = sg_table->table_vaddr;
+ /*
+ * Fill all the entries, except the last entry to avoid special
+ * checks within the loop.
+ */
+ for (i = 0; i < nr_entries - 1; i++) {
+ if (sgtentry == ETR_SG_PTRS_PER_PAGE - 1) {
+ /*
+ * Last entry in a sg_table page is a link address to
+ * the next table page. If this sg_table is the last
+ * one in the system page, it links to the first
+ * sg_table in the next system page. Otherwise, it
+ * links to the next sg_table page within the system
+ * page.
+ */
+ if (sgtidx == ETR_SG_PAGES_PER_SYSPAGE - 1) {
+ paddr = table_daddrs[tpidx + 1];
+ } else {
+ paddr = table_daddrs[tpidx] +
+ (ETR_SG_PAGE_SIZE * (sgtidx + 1));
+ }
+ type = ETR_SG_ET_LINK;
+ } else {
+ /*
+ * Update the indices to the data_pages to point to the
+ * next sg_page in the data buffer.
+ */
+ type = ETR_SG_ET_NORMAL;
+ paddr = data_daddrs[dpidx] + spidx * ETR_SG_PAGE_SIZE;
+ if (!INC_IDX_ROUND(spidx, ETR_SG_PAGES_PER_SYSPAGE))
+ dpidx++;
+ }
+ *ptr++ = ETR_SG_ENTRY(paddr, type);
+ /*
+ * Move to the next table pointer, moving the table page index
+ * if necessary
+ */
+ if (!INC_IDX_ROUND(sgtentry, ETR_SG_PTRS_PER_PAGE)) {
+ if (!INC_IDX_ROUND(sgtidx, ETR_SG_PAGES_PER_SYSPAGE))
+ tpidx++;
+ }
+ }
+
+ /* Set up the last entry, which is always a data pointer */
+ paddr = data_daddrs[dpidx] + spidx * ETR_SG_PAGE_SIZE;
+ *ptr++ = ETR_SG_ENTRY(paddr, ETR_SG_ET_LAST);
+}
+
+/*
+ * tmc_init_etr_sg_table: Allocate a TMC ETR SG table, data buffer of @size and
+ * populate the table.
+ *
+ * @dev - Device pointer for the TMC
+ * @node - NUMA node where the memory should be allocated
+ * @size - Total size of the data buffer
+ * @pages - Optional list of page virtual address
+ */
+static struct etr_sg_table *
+tmc_init_etr_sg_table(struct device *dev, int node,
+ unsigned long size, void **pages)
+{
+ int nr_entries, nr_tpages;
+ int nr_dpages = size >> PAGE_SHIFT;
+ struct tmc_sg_table *sg_table;
+ struct etr_sg_table *etr_table;
+
+ etr_table = kzalloc(sizeof(*etr_table), GFP_KERNEL);
+ if (!etr_table)
+ return ERR_PTR(-ENOMEM);
+ nr_entries = tmc_etr_sg_table_entries(nr_dpages);
+ nr_tpages = DIV_ROUND_UP(nr_entries, ETR_SG_PTRS_PER_SYSPAGE);
+
+ sg_table = tmc_alloc_sg_table(dev, node, nr_tpages, nr_dpages, pages);
+ if (IS_ERR(sg_table)) {
+ kfree(etr_table);
+ return ERR_PTR(PTR_ERR(sg_table));
+ }
+
+ etr_table->sg_table = sg_table;
+ /* TMC should use table base address for DBA */
+ etr_table->hwaddr = sg_table->table_daddr;
+ tmc_etr_sg_table_populate(etr_table);
+ /* Sync the table pages for the HW */
+ tmc_sg_table_sync_table(sg_table);
+ tmc_etr_sg_table_dump(etr_table);
+
+ return etr_table;
+}
+
+/*
+ * tmc_etr_alloc_flat_buf: Allocate a contiguous DMA buffer.
+ */
+static int tmc_etr_alloc_flat_buf(struct tmc_drvdata *drvdata,
+ struct etr_buf *etr_buf, int node,
+ void **pages)
+{
+ struct etr_flat_buf *flat_buf;
+
+ /* We cannot reuse existing pages for flat buf */
+ if (pages)
+ return -EINVAL;
+
+ flat_buf = kzalloc(sizeof(*flat_buf), GFP_KERNEL);
+ if (!flat_buf)
+ return -ENOMEM;
+
+ flat_buf->vaddr = dma_alloc_coherent(drvdata->dev, etr_buf->size,
+ &flat_buf->daddr, GFP_KERNEL);
+ if (!flat_buf->vaddr) {
+ kfree(flat_buf);
+ return -ENOMEM;
+ }
+
+ flat_buf->size = etr_buf->size;
+ flat_buf->dev = drvdata->dev;
+ etr_buf->hwaddr = flat_buf->daddr;
+ etr_buf->mode = ETR_MODE_FLAT;
+ etr_buf->private = flat_buf;
+ return 0;
+}
+
+static void tmc_etr_free_flat_buf(struct etr_buf *etr_buf)
+{
+ struct etr_flat_buf *flat_buf = etr_buf->private;
+
+ if (flat_buf && flat_buf->daddr)
+ dma_free_coherent(flat_buf->dev, flat_buf->size,
+ flat_buf->vaddr, flat_buf->daddr);
+ kfree(flat_buf);
+}
+
+static void tmc_etr_sync_flat_buf(struct etr_buf *etr_buf, u64 rrp, u64 rwp)
+{
+ /*
+ * Adjust the buffer to point to the beginning of the trace data
+ * and update the available trace data.
+ */
+ etr_buf->offset = rrp - etr_buf->hwaddr;
+ if (etr_buf->full)
+ etr_buf->len = etr_buf->size;
+ else
+ etr_buf->len = rwp - rrp;
+}
+
+static ssize_t tmc_etr_get_data_flat_buf(struct etr_buf *etr_buf,
+ u64 offset, size_t len, char **bufpp)
+{
+ struct etr_flat_buf *flat_buf = etr_buf->private;
+
+ *bufpp = (char *)flat_buf->vaddr + offset;
+ /*
+ * tmc_etr_buf_get_data already adjusts the length to handle
+ * buffer wrapping around.
+ */
+ return len;
+}
+
+static const struct etr_buf_operations etr_flat_buf_ops = {
+ .alloc = tmc_etr_alloc_flat_buf,
+ .free = tmc_etr_free_flat_buf,
+ .sync = tmc_etr_sync_flat_buf,
+ .get_data = tmc_etr_get_data_flat_buf,
+};
+
+/*
+ * tmc_etr_alloc_sg_buf: Allocate an SG buf @etr_buf. Setup the parameters
+ * appropriately.
+ */
+static int tmc_etr_alloc_sg_buf(struct tmc_drvdata *drvdata,
+ struct etr_buf *etr_buf, int node,
+ void **pages)
+{
+ struct etr_sg_table *etr_table;
+
+ etr_table = tmc_init_etr_sg_table(drvdata->dev, node,
+ etr_buf->size, pages);
+ if (IS_ERR(etr_table))
+ return -ENOMEM;
+ etr_buf->hwaddr = etr_table->hwaddr;
+ etr_buf->mode = ETR_MODE_ETR_SG;
+ etr_buf->private = etr_table;
+ return 0;
+}
+
+static void tmc_etr_free_sg_buf(struct etr_buf *etr_buf)
+{
+ struct etr_sg_table *etr_table = etr_buf->private;
+
+ if (etr_table) {
+ tmc_free_sg_table(etr_table->sg_table);
+ kfree(etr_table);
+ }
+}
+
+static ssize_t tmc_etr_get_data_sg_buf(struct etr_buf *etr_buf, u64 offset,
+ size_t len, char **bufpp)
+{
+ struct etr_sg_table *etr_table = etr_buf->private;
+
+ return tmc_sg_table_get_data(etr_table->sg_table, offset, len, bufpp);
+}
+
+static void tmc_etr_sync_sg_buf(struct etr_buf *etr_buf, u64 rrp, u64 rwp)
+{
+ long r_offset, w_offset;
+ struct etr_sg_table *etr_table = etr_buf->private;
+ struct tmc_sg_table *table = etr_table->sg_table;
+
+ /* Convert hw address to offset in the buffer */
+ r_offset = tmc_sg_get_data_page_offset(table, rrp);
+ if (r_offset < 0) {
+ dev_warn(table->dev,
+ "Unable to map RRP %llx to offset\n", rrp);
+ etr_buf->len = 0;
+ return;
+ }
+
+ w_offset = tmc_sg_get_data_page_offset(table, rwp);
+ if (w_offset < 0) {
+ dev_warn(table->dev,
+ "Unable to map RWP %llx to offset\n", rwp);
+ etr_buf->len = 0;
+ return;
+ }
+
+ etr_buf->offset = r_offset;
+ if (etr_buf->full)
+ etr_buf->len = etr_buf->size;
+ else
+ etr_buf->len = ((w_offset < r_offset) ? etr_buf->size : 0) +
+ w_offset - r_offset;
+ tmc_sg_table_sync_data_range(table, r_offset, etr_buf->len);
+}
+
+static const struct etr_buf_operations etr_sg_buf_ops = {
+ .alloc = tmc_etr_alloc_sg_buf,
+ .free = tmc_etr_free_sg_buf,
+ .sync = tmc_etr_sync_sg_buf,
+ .get_data = tmc_etr_get_data_sg_buf,
+};
+
+/*
+ * TMC ETR could be connected to a CATU device, which can provide address
+ * translation service. This is represented by the Output port of the TMC
+ * (ETR) connected to the input port of the CATU.
+ *
+ * Returns : coresight_device ptr for the CATU device if a CATU is found.
+ * : NULL otherwise.
+ */
+struct coresight_device *
+tmc_etr_get_catu_device(struct tmc_drvdata *drvdata)
+{
+ int i;
+ struct coresight_device *tmp, *etr = drvdata->csdev;
+
+ if (!IS_ENABLED(CONFIG_CORESIGHT_CATU))
+ return NULL;
+
+ for (i = 0; i < etr->nr_outport; i++) {
+ tmp = etr->conns[i].child_dev;
+ if (tmp && coresight_is_catu_device(tmp))
+ return tmp;
+ }
+
+ return NULL;
+}
+
+static inline void tmc_etr_enable_catu(struct tmc_drvdata *drvdata)
+{
+ struct coresight_device *catu = tmc_etr_get_catu_device(drvdata);
+
+ if (catu && helper_ops(catu)->enable)
+ helper_ops(catu)->enable(catu, drvdata->etr_buf);
+}
+
+static inline void tmc_etr_disable_catu(struct tmc_drvdata *drvdata)
+{
+ struct coresight_device *catu = tmc_etr_get_catu_device(drvdata);
+
+ if (catu && helper_ops(catu)->disable)
+ helper_ops(catu)->disable(catu, drvdata->etr_buf);
+}
+
+static const struct etr_buf_operations *etr_buf_ops[] = {
+ [ETR_MODE_FLAT] = &etr_flat_buf_ops,
+ [ETR_MODE_ETR_SG] = &etr_sg_buf_ops,
+ [ETR_MODE_CATU] = &etr_catu_buf_ops,
+};
+
+static inline int tmc_etr_mode_alloc_buf(int mode,
+ struct tmc_drvdata *drvdata,
+ struct etr_buf *etr_buf, int node,
+ void **pages)
+{
+ int rc = -EINVAL;
+
+ switch (mode) {
+ case ETR_MODE_FLAT:
+ case ETR_MODE_ETR_SG:
+ case ETR_MODE_CATU:
+ if (etr_buf_ops[mode]->alloc)
+ rc = etr_buf_ops[mode]->alloc(drvdata, etr_buf,
+ node, pages);
+ if (!rc)
+ etr_buf->ops = etr_buf_ops[mode];
+ return rc;
+ default:
+ return -EINVAL;
+ }
+}
+
+/*
+ * tmc_alloc_etr_buf: Allocate a buffer use by ETR.
+ * @drvdata : ETR device details.
+ * @size : size of the requested buffer.
+ * @flags : Required properties for the buffer.
+ * @node : Node for memory allocations.
+ * @pages : An optional list of pages.
+ */
+static struct etr_buf *tmc_alloc_etr_buf(struct tmc_drvdata *drvdata,
+ ssize_t size, int flags,
+ int node, void **pages)
+{
+ int rc = -ENOMEM;
+ bool has_etr_sg, has_iommu;
+ bool has_sg, has_catu;
+ struct etr_buf *etr_buf;
+
+ has_etr_sg = tmc_etr_has_cap(drvdata, TMC_ETR_SG);
+ has_iommu = iommu_get_domain_for_dev(drvdata->dev);
+ has_catu = !!tmc_etr_get_catu_device(drvdata);
+
+ has_sg = has_catu || has_etr_sg;
+
+ etr_buf = kzalloc(sizeof(*etr_buf), GFP_KERNEL);
+ if (!etr_buf)
+ return ERR_PTR(-ENOMEM);
+
+ etr_buf->size = size;
+
+ /*
+ * If we have to use an existing list of pages, we cannot reliably
+ * use a contiguous DMA memory (even if we have an IOMMU). Otherwise,
+ * we use the contiguous DMA memory if at least one of the following
+ * conditions is true:
+ * a) The ETR cannot use Scatter-Gather.
+ * b) we have a backing IOMMU
+ * c) The requested memory size is smaller (< 1M).
+ *
+ * Fallback to available mechanisms.
+ *
+ */
+ if (!pages &&
+ (!has_sg || has_iommu || size < SZ_1M))
+ rc = tmc_etr_mode_alloc_buf(ETR_MODE_FLAT, drvdata,
+ etr_buf, node, pages);
+ if (rc && has_etr_sg)
+ rc = tmc_etr_mode_alloc_buf(ETR_MODE_ETR_SG, drvdata,
+ etr_buf, node, pages);
+ if (rc && has_catu)
+ rc = tmc_etr_mode_alloc_buf(ETR_MODE_CATU, drvdata,
+ etr_buf, node, pages);
+ if (rc) {
+ kfree(etr_buf);
+ return ERR_PTR(rc);
+ }
+
+ dev_dbg(drvdata->dev, "allocated buffer of size %ldKB in mode %d\n",
+ (unsigned long)size >> 10, etr_buf->mode);
+ return etr_buf;
+}
+
+static void tmc_free_etr_buf(struct etr_buf *etr_buf)
+{
+ WARN_ON(!etr_buf->ops || !etr_buf->ops->free);
+ etr_buf->ops->free(etr_buf);
+ kfree(etr_buf);
+}
+
+/*
+ * tmc_etr_buf_get_data: Get the pointer the trace data at @offset
+ * with a maximum of @len bytes.
+ * Returns: The size of the linear data available @pos, with *bufpp
+ * updated to point to the buffer.
+ */
+static ssize_t tmc_etr_buf_get_data(struct etr_buf *etr_buf,
+ u64 offset, size_t len, char **bufpp)
+{
+ /* Adjust the length to limit this transaction to end of buffer */
+ len = (len < (etr_buf->size - offset)) ? len : etr_buf->size - offset;
+
+ return etr_buf->ops->get_data(etr_buf, (u64)offset, len, bufpp);
+}
+
+static inline s64
+tmc_etr_buf_insert_barrier_packet(struct etr_buf *etr_buf, u64 offset)
+{
+ ssize_t len;
+ char *bufp;
+
+ len = tmc_etr_buf_get_data(etr_buf, offset,
+ CORESIGHT_BARRIER_PKT_SIZE, &bufp);
+ if (WARN_ON(len < CORESIGHT_BARRIER_PKT_SIZE))
+ return -EINVAL;
+ coresight_insert_barrier_packet(bufp);
+ return offset + CORESIGHT_BARRIER_PKT_SIZE;
+}
+
+/*
+ * tmc_sync_etr_buf: Sync the trace buffer availability with drvdata.
+ * Makes sure the trace data is synced to the memory for consumption.
+ * @etr_buf->offset will hold the offset to the beginning of the trace data
+ * within the buffer, with @etr_buf->len bytes to consume.
+ */
+static void tmc_sync_etr_buf(struct tmc_drvdata *drvdata)
+{
+ struct etr_buf *etr_buf = drvdata->etr_buf;
+ u64 rrp, rwp;
+ u32 status;
+
+ rrp = tmc_read_rrp(drvdata);
+ rwp = tmc_read_rwp(drvdata);
+ status = readl_relaxed(drvdata->base + TMC_STS);
+ etr_buf->full = status & TMC_STS_FULL;
+
+ WARN_ON(!etr_buf->ops || !etr_buf->ops->sync);
+
+ etr_buf->ops->sync(etr_buf, rrp, rwp);
+
+ /* Insert barrier packets at the beginning, if there was an overflow */
+ if (etr_buf->full)
+ tmc_etr_buf_insert_barrier_packet(etr_buf, etr_buf->offset);
+}
+
static void tmc_etr_enable_hw(struct tmc_drvdata *drvdata)
{
u32 axictl, sts;
+ struct etr_buf *etr_buf = drvdata->etr_buf;
- /* Zero out the memory to help with debug */
- memset(drvdata->vaddr, 0, drvdata->size);
+ /*
+ * If this ETR is connected to a CATU, enable it before we turn
+ * this on
+ */
+ tmc_etr_enable_catu(drvdata);
CS_UNLOCK(drvdata->base);
/* Wait for TMCSReady bit to be set */
tmc_wait_for_tmcready(drvdata);
- writel_relaxed(drvdata->size / 4, drvdata->base + TMC_RSZ);
+ writel_relaxed(etr_buf->size / 4, drvdata->base + TMC_RSZ);
writel_relaxed(TMC_MODE_CIRCULAR_BUFFER, drvdata->base + TMC_MODE);
axictl = readl_relaxed(drvdata->base + TMC_AXICTL);
@@ -34,16 +924,22 @@ static void tmc_etr_enable_hw(struct tmc_drvdata *drvdata)
axictl |= TMC_AXICTL_ARCACHE_OS;
}
+ if (etr_buf->mode == ETR_MODE_ETR_SG) {
+ if (WARN_ON(!tmc_etr_has_cap(drvdata, TMC_ETR_SG)))
+ return;
+ axictl |= TMC_AXICTL_SCT_GAT_MODE;
+ }
+
writel_relaxed(axictl, drvdata->base + TMC_AXICTL);
- tmc_write_dba(drvdata, drvdata->paddr);
+ tmc_write_dba(drvdata, etr_buf->hwaddr);
/*
* If the TMC pointers must be programmed before the session,
* we have to set it properly (i.e, RRP/RWP to base address and
* STS to "not full").
*/
if (tmc_etr_has_cap(drvdata, TMC_ETR_SAVE_RESTORE)) {
- tmc_write_rrp(drvdata, drvdata->paddr);
- tmc_write_rwp(drvdata, drvdata->paddr);
+ tmc_write_rrp(drvdata, etr_buf->hwaddr);
+ tmc_write_rwp(drvdata, etr_buf->hwaddr);
sts = readl_relaxed(drvdata->base + TMC_STS) & ~TMC_STS_FULL;
writel_relaxed(sts, drvdata->base + TMC_STS);
}
@@ -58,37 +954,49 @@ static void tmc_etr_enable_hw(struct tmc_drvdata *drvdata)
CS_LOCK(drvdata->base);
}
-static void tmc_etr_dump_hw(struct tmc_drvdata *drvdata)
+/*
+ * Return the available trace data in the buffer (starts at etr_buf->offset,
+ * limited by etr_buf->len) from @pos, with a maximum limit of @len,
+ * also updating the @bufpp on where to find it. Since the trace data
+ * starts at anywhere in the buffer, depending on the RRP, we adjust the
+ * @len returned to handle buffer wrapping around.
+ */
+ssize_t tmc_etr_get_sysfs_trace(struct tmc_drvdata *drvdata,
+ loff_t pos, size_t len, char **bufpp)
{
- const u32 *barrier;
- u32 val;
- u32 *temp;
- u64 rwp;
+ s64 offset;
+ ssize_t actual = len;
+ struct etr_buf *etr_buf = drvdata->etr_buf;
- rwp = tmc_read_rwp(drvdata);
- val = readl_relaxed(drvdata->base + TMC_STS);
+ if (pos + actual > etr_buf->len)
+ actual = etr_buf->len - pos;
+ if (actual <= 0)
+ return actual;
- /*
- * Adjust the buffer to point to the beginning of the trace data
- * and update the available trace data.
- */
- if (val & TMC_STS_FULL) {
- drvdata->buf = drvdata->vaddr + rwp - drvdata->paddr;
- drvdata->len = drvdata->size;
+ /* Compute the offset from which we read the data */
+ offset = etr_buf->offset + pos;
+ if (offset >= etr_buf->size)
+ offset -= etr_buf->size;
+ return tmc_etr_buf_get_data(etr_buf, offset, actual, bufpp);
+}
- barrier = barrier_pkt;
- temp = (u32 *)drvdata->buf;
+static struct etr_buf *
+tmc_etr_setup_sysfs_buf(struct tmc_drvdata *drvdata)
+{
+ return tmc_alloc_etr_buf(drvdata, drvdata->size,
+ 0, cpu_to_node(0), NULL);
+}
- while (*barrier) {
- *temp = *barrier;
- temp++;
- barrier++;
- }
+static void
+tmc_etr_free_sysfs_buf(struct etr_buf *buf)
+{
+ if (buf)
+ tmc_free_etr_buf(buf);
+}
- } else {
- drvdata->buf = drvdata->vaddr;
- drvdata->len = rwp - drvdata->paddr;
- }
+static void tmc_etr_sync_sysfs_buf(struct tmc_drvdata *drvdata)
+{
+ tmc_sync_etr_buf(drvdata);
}
static void tmc_etr_disable_hw(struct tmc_drvdata *drvdata)
@@ -101,44 +1009,45 @@ static void tmc_etr_disable_hw(struct tmc_drvdata *drvdata)
* read before the TMC is disabled.
*/
if (drvdata->mode == CS_MODE_SYSFS)
- tmc_etr_dump_hw(drvdata);
+ tmc_etr_sync_sysfs_buf(drvdata);
+
tmc_disable_hw(drvdata);
CS_LOCK(drvdata->base);
+
+ /* Disable CATU device if this ETR is connected to one */
+ tmc_etr_disable_catu(drvdata);
}
static int tmc_enable_etr_sink_sysfs(struct coresight_device *csdev)
{
int ret = 0;
- bool used = false;
unsigned long flags;
- void __iomem *vaddr = NULL;
- dma_addr_t paddr = 0;
struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
+ struct etr_buf *new_buf = NULL, *free_buf = NULL;
/*
- * If we don't have a buffer release the lock and allocate memory.
- * Otherwise keep the lock and move along.
+ * If we are enabling the ETR from disabled state, we need to make
+ * sure we have a buffer with the right size. The etr_buf is not reset
+ * immediately after we stop the tracing in SYSFS mode as we wait for
+ * the user to collect the data. We may be able to reuse the existing
+ * buffer, provided the size matches. Any allocation has to be done
+ * with the lock released.
*/
spin_lock_irqsave(&drvdata->spinlock, flags);
- if (!drvdata->vaddr) {
+ if (!drvdata->etr_buf || (drvdata->etr_buf->size != drvdata->size)) {
spin_unlock_irqrestore(&drvdata->spinlock, flags);
- /*
- * Contiguous memory can't be allocated while a spinlock is
- * held. As such allocate memory here and free it if a buffer
- * has already been allocated (from a previous session).
- */
- vaddr = dma_alloc_coherent(drvdata->dev, drvdata->size,
- &paddr, GFP_KERNEL);
- if (!vaddr)
- return -ENOMEM;
+ /* Allocate memory with the locks released */
+ free_buf = new_buf = tmc_etr_setup_sysfs_buf(drvdata);
+ if (IS_ERR(new_buf))
+ return PTR_ERR(new_buf);
/* Let's try again */
spin_lock_irqsave(&drvdata->spinlock, flags);
}
- if (drvdata->reading) {
+ if (drvdata->reading || drvdata->mode == CS_MODE_PERF) {
ret = -EBUSY;
goto out;
}
@@ -146,21 +1055,19 @@ static int tmc_enable_etr_sink_sysfs(struct coresight_device *csdev)
/*
* In sysFS mode we can have multiple writers per sink. Since this
* sink is already enabled no memory is needed and the HW need not be
- * touched.
+ * touched, even if the buffer size has changed.
*/
if (drvdata->mode == CS_MODE_SYSFS)
goto out;
/*
- * If drvdata::vaddr == NULL, use the memory allocated above.
- * Otherwise a buffer still exists from a previous session, so
- * simply use that.
+ * If we don't have a buffer or it doesn't match the requested size,
+ * use the buffer allocated above. Otherwise reuse the existing buffer.
*/
- if (drvdata->vaddr == NULL) {
- used = true;
- drvdata->vaddr = vaddr;
- drvdata->paddr = paddr;
- drvdata->buf = drvdata->vaddr;
+ if (!drvdata->etr_buf ||
+ (new_buf && drvdata->etr_buf->size != new_buf->size)) {
+ free_buf = drvdata->etr_buf;
+ drvdata->etr_buf = new_buf;
}
drvdata->mode = CS_MODE_SYSFS;
@@ -169,8 +1076,8 @@ out:
spin_unlock_irqrestore(&drvdata->spinlock, flags);
/* Free memory outside the spinlock if need be */
- if (!used && vaddr)
- dma_free_coherent(drvdata->dev, drvdata->size, vaddr, paddr);
+ if (free_buf)
+ tmc_etr_free_sysfs_buf(free_buf);
if (!ret)
dev_info(drvdata->dev, "TMC-ETR enabled\n");
@@ -180,32 +1087,8 @@ out:
static int tmc_enable_etr_sink_perf(struct coresight_device *csdev)
{
- int ret = 0;
- unsigned long flags;
- struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
-
- spin_lock_irqsave(&drvdata->spinlock, flags);
- if (drvdata->reading) {
- ret = -EINVAL;
- goto out;
- }
-
- /*
- * In Perf mode there can be only one writer per sink. There
- * is also no need to continue if the ETR is already operated
- * from sysFS.
- */
- if (drvdata->mode != CS_MODE_DISABLED) {
- ret = -EINVAL;
- goto out;
- }
-
- drvdata->mode = CS_MODE_PERF;
- tmc_etr_enable_hw(drvdata);
-out:
- spin_unlock_irqrestore(&drvdata->spinlock, flags);
-
- return ret;
+ /* We don't support perf mode yet ! */
+ return -EINVAL;
}
static int tmc_enable_etr_sink(struct coresight_device *csdev, u32 mode)
@@ -273,8 +1156,8 @@ int tmc_read_prepare_etr(struct tmc_drvdata *drvdata)
goto out;
}
- /* If drvdata::buf is NULL the trace data has been read already */
- if (drvdata->buf == NULL) {
+ /* If drvdata::etr_buf is NULL the trace data has been read already */
+ if (drvdata->etr_buf == NULL) {
ret = -EINVAL;
goto out;
}
@@ -293,8 +1176,7 @@ out:
int tmc_read_unprepare_etr(struct tmc_drvdata *drvdata)
{
unsigned long flags;
- dma_addr_t paddr;
- void __iomem *vaddr = NULL;
+ struct etr_buf *etr_buf = NULL;
/* config types are set a boot time and never change */
if (WARN_ON_ONCE(drvdata->config_type != TMC_CONFIG_TYPE_ETR))
@@ -306,9 +1188,8 @@ int tmc_read_unprepare_etr(struct tmc_drvdata *drvdata)
if (drvdata->mode == CS_MODE_SYSFS) {
/*
* The trace run will continue with the same allocated trace
- * buffer. The trace buffer is cleared in tmc_etr_enable_hw(),
- * so we don't have to explicitly clear it. Also, since the
- * tracer is still enabled drvdata::buf can't be NULL.
+ * buffer. Since the tracer is still enabled drvdata::buf can't
+ * be NULL.
*/
tmc_etr_enable_hw(drvdata);
} else {
@@ -316,17 +1197,16 @@ int tmc_read_unprepare_etr(struct tmc_drvdata *drvdata)
* The ETR is not tracing and the buffer was just read.
* As such prepare to free the trace buffer.
*/
- vaddr = drvdata->vaddr;
- paddr = drvdata->paddr;
- drvdata->buf = drvdata->vaddr = NULL;
+ etr_buf = drvdata->etr_buf;
+ drvdata->etr_buf = NULL;
}
drvdata->reading = false;
spin_unlock_irqrestore(&drvdata->spinlock, flags);
/* Free allocated memory out side of the spinlock */
- if (vaddr)
- dma_free_coherent(drvdata->dev, drvdata->size, vaddr, paddr);
+ if (etr_buf)
+ tmc_free_etr_buf(etr_buf);
return 0;
}
diff --git a/drivers/hwtracing/coresight/coresight-tmc.c b/drivers/hwtracing/coresight/coresight-tmc.c
index 456f122df74f..1b817ec1192c 100644
--- a/drivers/hwtracing/coresight/coresight-tmc.c
+++ b/drivers/hwtracing/coresight/coresight-tmc.c
@@ -12,6 +12,7 @@
#include <linux/err.h>
#include <linux/fs.h>
#include <linux/miscdevice.h>
+#include <linux/property.h>
#include <linux/uaccess.h>
#include <linux/slab.h>
#include <linux/dma-mapping.h>
@@ -123,35 +124,40 @@ static int tmc_open(struct inode *inode, struct file *file)
return 0;
}
+static inline ssize_t tmc_get_sysfs_trace(struct tmc_drvdata *drvdata,
+ loff_t pos, size_t len, char **bufpp)
+{
+ switch (drvdata->config_type) {
+ case TMC_CONFIG_TYPE_ETB:
+ case TMC_CONFIG_TYPE_ETF:
+ return tmc_etb_get_sysfs_trace(drvdata, pos, len, bufpp);
+ case TMC_CONFIG_TYPE_ETR:
+ return tmc_etr_get_sysfs_trace(drvdata, pos, len, bufpp);
+ }
+
+ return -EINVAL;
+}
+
static ssize_t tmc_read(struct file *file, char __user *data, size_t len,
loff_t *ppos)
{
+ char *bufp;
+ ssize_t actual;
struct tmc_drvdata *drvdata = container_of(file->private_data,
struct tmc_drvdata, miscdev);
- char *bufp = drvdata->buf + *ppos;
+ actual = tmc_get_sysfs_trace(drvdata, *ppos, len, &bufp);
+ if (actual <= 0)
+ return 0;
- if (*ppos + len > drvdata->len)
- len = drvdata->len - *ppos;
-
- if (drvdata->config_type == TMC_CONFIG_TYPE_ETR) {
- if (bufp == (char *)(drvdata->vaddr + drvdata->size))
- bufp = drvdata->vaddr;
- else if (bufp > (char *)(drvdata->vaddr + drvdata->size))
- bufp -= drvdata->size;
- if ((bufp + len) > (char *)(drvdata->vaddr + drvdata->size))
- len = (char *)(drvdata->vaddr + drvdata->size) - bufp;
- }
-
- if (copy_to_user(data, bufp, len)) {
+ if (copy_to_user(data, bufp, actual)) {
dev_dbg(drvdata->dev, "%s: copy_to_user failed\n", __func__);
return -EFAULT;
}
- *ppos += len;
+ *ppos += actual;
+ dev_dbg(drvdata->dev, "%zu bytes copied\n", actual);
- dev_dbg(drvdata->dev, "%s: %zu bytes copied, %d bytes left\n",
- __func__, len, (int)(drvdata->len - *ppos));
- return len;
+ return actual;
}
static int tmc_release(struct inode *inode, struct file *file)
@@ -271,8 +277,41 @@ static ssize_t trigger_cntr_store(struct device *dev,
}
static DEVICE_ATTR_RW(trigger_cntr);
+static ssize_t buffer_size_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct tmc_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ return sprintf(buf, "%#x\n", drvdata->size);
+}
+
+static ssize_t buffer_size_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ int ret;
+ unsigned long val;
+ struct tmc_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ /* Only permitted for TMC-ETRs */
+ if (drvdata->config_type != TMC_CONFIG_TYPE_ETR)
+ return -EPERM;
+
+ ret = kstrtoul(buf, 0, &val);
+ if (ret)
+ return ret;
+ /* The buffer size should be page aligned */
+ if (val & (PAGE_SIZE - 1))
+ return -EINVAL;
+ drvdata->size = val;
+ return size;
+}
+
+static DEVICE_ATTR_RW(buffer_size);
+
static struct attribute *coresight_tmc_attrs[] = {
&dev_attr_trigger_cntr.attr,
+ &dev_attr_buffer_size.attr,
NULL,
};
@@ -291,6 +330,12 @@ const struct attribute_group *coresight_tmc_groups[] = {
NULL,
};
+static inline bool tmc_etr_can_use_sg(struct tmc_drvdata *drvdata)
+{
+ return fwnode_property_present(drvdata->dev->fwnode,
+ "arm,scatter-gather");
+}
+
/* Detect and initialise the capabilities of a TMC ETR */
static int tmc_etr_setup_caps(struct tmc_drvdata *drvdata,
u32 devid, void *dev_caps)
@@ -300,7 +345,7 @@ static int tmc_etr_setup_caps(struct tmc_drvdata *drvdata,
/* Set the unadvertised capabilities */
tmc_etr_init_caps(drvdata, (u32)(unsigned long)dev_caps);
- if (!(devid & TMC_DEVID_NOSCAT))
+ if (!(devid & TMC_DEVID_NOSCAT) && tmc_etr_can_use_sg(drvdata))
tmc_etr_set_cap(drvdata, TMC_ETR_SG);
/* Check if the AXI address width is available */
diff --git a/drivers/hwtracing/coresight/coresight-tmc.h b/drivers/hwtracing/coresight/coresight-tmc.h
index dfaff077a7fc..7027bd60c4cc 100644
--- a/drivers/hwtracing/coresight/coresight-tmc.h
+++ b/drivers/hwtracing/coresight/coresight-tmc.h
@@ -7,6 +7,7 @@
#ifndef _CORESIGHT_TMC_H
#define _CORESIGHT_TMC_H
+#include <linux/dma-mapping.h>
#include <linux/miscdevice.h>
#define TMC_RSZ 0x004
@@ -122,6 +123,36 @@ enum tmc_mem_intf_width {
#define CORESIGHT_SOC_600_ETR_CAPS \
(TMC_ETR_SAVE_RESTORE | TMC_ETR_AXI_ARCACHE)
+enum etr_mode {
+ ETR_MODE_FLAT, /* Uses contiguous flat buffer */
+ ETR_MODE_ETR_SG, /* Uses in-built TMC ETR SG mechanism */
+ ETR_MODE_CATU, /* Use SG mechanism in CATU */
+};
+
+struct etr_buf_operations;
+
+/**
+ * struct etr_buf - Details of the buffer used by ETR
+ * @mode : Mode of the ETR buffer, contiguous, Scatter Gather etc.
+ * @full : Trace data overflow
+ * @size : Size of the buffer.
+ * @hwaddr : Address to be programmed in the TMC:DBA{LO,HI}
+ * @offset : Offset of the trace data in the buffer for consumption.
+ * @len : Available trace data @buf (may round up to the beginning).
+ * @ops : ETR buffer operations for the mode.
+ * @private : Backend specific information for the buf
+ */
+struct etr_buf {
+ enum etr_mode mode;
+ bool full;
+ ssize_t size;
+ dma_addr_t hwaddr;
+ unsigned long offset;
+ s64 len;
+ const struct etr_buf_operations *ops;
+ void *private;
+};
+
/**
* struct tmc_drvdata - specifics associated to an TMC component
* @base: memory mapped base address for this component.
@@ -129,11 +160,10 @@ enum tmc_mem_intf_width {
* @csdev: component vitals needed by the framework.
* @miscdev: specifics to handle "/dev/xyz.tmc" entry.
* @spinlock: only one at a time pls.
- * @buf: area of memory where trace data get sent.
- * @paddr: DMA start location in RAM.
- * @vaddr: virtual representation of @paddr.
- * @size: trace buffer size.
- * @len: size of the available trace.
+ * @buf: Snapshot of the trace data for ETF/ETB.
+ * @etr_buf: details of buffer used in TMC-ETR
+ * @len: size of the available trace for ETF/ETB.
+ * @size: trace buffer size for this TMC (common for all modes).
* @mode: how this TMC is being used.
* @config_type: TMC variant, must be of type @tmc_config_type.
* @memwidth: width of the memory interface databus, in bytes.
@@ -148,11 +178,12 @@ struct tmc_drvdata {
struct miscdevice miscdev;
spinlock_t spinlock;
bool reading;
- char *buf;
- dma_addr_t paddr;
- void __iomem *vaddr;
- u32 size;
+ union {
+ char *buf; /* TMC ETB */
+ struct etr_buf *etr_buf; /* TMC ETR */
+ };
u32 len;
+ u32 size;
u32 mode;
enum tmc_config_type config_type;
enum tmc_mem_intf_width memwidth;
@@ -160,6 +191,47 @@ struct tmc_drvdata {
u32 etr_caps;
};
+struct etr_buf_operations {
+ int (*alloc)(struct tmc_drvdata *drvdata, struct etr_buf *etr_buf,
+ int node, void **pages);
+ void (*sync)(struct etr_buf *etr_buf, u64 rrp, u64 rwp);
+ ssize_t (*get_data)(struct etr_buf *etr_buf, u64 offset, size_t len,
+ char **bufpp);
+ void (*free)(struct etr_buf *etr_buf);
+};
+
+/**
+ * struct tmc_pages - Collection of pages used for SG.
+ * @nr_pages: Number of pages in the list.
+ * @daddrs: Array of DMA'able page address.
+ * @pages: Array pages for the buffer.
+ */
+struct tmc_pages {
+ int nr_pages;
+ dma_addr_t *daddrs;
+ struct page **pages;
+};
+
+/*
+ * struct tmc_sg_table - Generic SG table for TMC
+ * @dev: Device for DMA allocations
+ * @table_vaddr: Contiguous Virtual address for PageTable
+ * @data_vaddr: Contiguous Virtual address for Data Buffer
+ * @table_daddr: DMA address of the PageTable base
+ * @node: Node for Page allocations
+ * @table_pages: List of pages & dma address for Table
+ * @data_pages: List of pages & dma address for Data
+ */
+struct tmc_sg_table {
+ struct device *dev;
+ void *table_vaddr;
+ void *data_vaddr;
+ dma_addr_t table_daddr;
+ int node;
+ struct tmc_pages table_pages;
+ struct tmc_pages data_pages;
+};
+
/* Generic functions */
void tmc_wait_for_tmcready(struct tmc_drvdata *drvdata);
void tmc_flush_and_stop(struct tmc_drvdata *drvdata);
@@ -172,10 +244,14 @@ int tmc_read_unprepare_etb(struct tmc_drvdata *drvdata);
extern const struct coresight_ops tmc_etb_cs_ops;
extern const struct coresight_ops tmc_etf_cs_ops;
+ssize_t tmc_etb_get_sysfs_trace(struct tmc_drvdata *drvdata,
+ loff_t pos, size_t len, char **bufpp);
/* ETR functions */
int tmc_read_prepare_etr(struct tmc_drvdata *drvdata);
int tmc_read_unprepare_etr(struct tmc_drvdata *drvdata);
extern const struct coresight_ops tmc_etr_cs_ops;
+ssize_t tmc_etr_get_sysfs_trace(struct tmc_drvdata *drvdata,
+ loff_t pos, size_t len, char **bufpp);
#define TMC_REG_PAIR(name, lo_off, hi_off) \
@@ -211,4 +287,23 @@ static inline bool tmc_etr_has_cap(struct tmc_drvdata *drvdata, u32 cap)
return !!(drvdata->etr_caps & cap);
}
+struct tmc_sg_table *tmc_alloc_sg_table(struct device *dev,
+ int node,
+ int nr_tpages,
+ int nr_dpages,
+ void **pages);
+void tmc_free_sg_table(struct tmc_sg_table *sg_table);
+void tmc_sg_table_sync_table(struct tmc_sg_table *sg_table);
+void tmc_sg_table_sync_data_range(struct tmc_sg_table *table,
+ u64 offset, u64 size);
+ssize_t tmc_sg_table_get_data(struct tmc_sg_table *sg_table,
+ u64 offset, size_t len, char **bufpp);
+static inline unsigned long
+tmc_sg_table_buf_size(struct tmc_sg_table *sg_table)
+{
+ return sg_table->data_pages.nr_pages << PAGE_SHIFT;
+}
+
+struct coresight_device *tmc_etr_get_catu_device(struct tmc_drvdata *drvdata);
+
#endif
diff --git a/drivers/hwtracing/coresight/coresight-tpiu.c b/drivers/hwtracing/coresight/coresight-tpiu.c
index 01b7457fe8fc..459ef930d98c 100644
--- a/drivers/hwtracing/coresight/coresight-tpiu.c
+++ b/drivers/hwtracing/coresight/coresight-tpiu.c
@@ -40,8 +40,9 @@
/** register definition **/
/* FFSR - 0x300 */
-#define FFSR_FT_STOPPED BIT(1)
+#define FFSR_FT_STOPPED_BIT 1
/* FFCR - 0x304 */
+#define FFCR_FON_MAN_BIT 6
#define FFCR_FON_MAN BIT(6)
#define FFCR_STOP_FI BIT(12)
@@ -86,9 +87,9 @@ static void tpiu_disable_hw(struct tpiu_drvdata *drvdata)
/* Generate manual flush */
writel_relaxed(FFCR_STOP_FI | FFCR_FON_MAN, drvdata->base + TPIU_FFCR);
/* Wait for flush to complete */
- coresight_timeout(drvdata->base, TPIU_FFCR, FFCR_FON_MAN, 0);
+ coresight_timeout(drvdata->base, TPIU_FFCR, FFCR_FON_MAN_BIT, 0);
/* Wait for formatter to stop */
- coresight_timeout(drvdata->base, TPIU_FFSR, FFSR_FT_STOPPED, 1);
+ coresight_timeout(drvdata->base, TPIU_FFSR, FFSR_FT_STOPPED_BIT, 1);
CS_LOCK(drvdata->base);
}
diff --git a/drivers/hwtracing/coresight/coresight.c b/drivers/hwtracing/coresight/coresight.c
index 29e834aab539..3e07fd335f8c 100644
--- a/drivers/hwtracing/coresight/coresight.c
+++ b/drivers/hwtracing/coresight/coresight.c
@@ -51,8 +51,7 @@ static struct list_head *stm_path;
* beginning of the data collected in a buffer. That way the decoder knows that
* it needs to look for another sync sequence.
*/
-const u32 barrier_pkt[5] = {0x7fffffff, 0x7fffffff,
- 0x7fffffff, 0x7fffffff, 0x0};
+const u32 barrier_pkt[4] = {0x7fffffff, 0x7fffffff, 0x7fffffff, 0x7fffffff};
static int coresight_id_match(struct device *dev, void *data)
{
@@ -108,7 +107,7 @@ static int coresight_find_link_inport(struct coresight_device *csdev,
dev_err(&csdev->dev, "couldn't find inport, parent: %s, child: %s\n",
dev_name(&parent->dev), dev_name(&csdev->dev));
- return 0;
+ return -ENODEV;
}
static int coresight_find_link_outport(struct coresight_device *csdev,
@@ -126,7 +125,7 @@ static int coresight_find_link_outport(struct coresight_device *csdev,
dev_err(&csdev->dev, "couldn't find outport, parent: %s, child: %s\n",
dev_name(&csdev->dev), dev_name(&child->dev));
- return 0;
+ return -ENODEV;
}
static int coresight_enable_sink(struct coresight_device *csdev, u32 mode)
@@ -179,6 +178,9 @@ static int coresight_enable_link(struct coresight_device *csdev,
else
refport = 0;
+ if (refport < 0)
+ return refport;
+
if (atomic_inc_return(&csdev->refcnt[refport]) == 1) {
if (link_ops(csdev)->enable) {
ret = link_ops(csdev)->enable(csdev, inport, outport);
@@ -423,6 +425,42 @@ struct coresight_device *coresight_get_enabled_sink(bool deactivate)
return dev ? to_coresight_device(dev) : NULL;
}
+/*
+ * coresight_grab_device - Power up this device and any of the helper
+ * devices connected to it for trace operation. Since the helper devices
+ * don't appear on the trace path, they should be handled along with the
+ * the master device.
+ */
+static void coresight_grab_device(struct coresight_device *csdev)
+{
+ int i;
+
+ for (i = 0; i < csdev->nr_outport; i++) {
+ struct coresight_device *child = csdev->conns[i].child_dev;
+
+ if (child && child->type == CORESIGHT_DEV_TYPE_HELPER)
+ pm_runtime_get_sync(child->dev.parent);
+ }
+ pm_runtime_get_sync(csdev->dev.parent);
+}
+
+/*
+ * coresight_drop_device - Release this device and any of the helper
+ * devices connected to it.
+ */
+static void coresight_drop_device(struct coresight_device *csdev)
+{
+ int i;
+
+ pm_runtime_put(csdev->dev.parent);
+ for (i = 0; i < csdev->nr_outport; i++) {
+ struct coresight_device *child = csdev->conns[i].child_dev;
+
+ if (child && child->type == CORESIGHT_DEV_TYPE_HELPER)
+ pm_runtime_put(child->dev.parent);
+ }
+}
+
/**
* _coresight_build_path - recursively build a path from a @csdev to a sink.
* @csdev: The device to start from.
@@ -471,9 +509,9 @@ out:
if (!node)
return -ENOMEM;
+ coresight_grab_device(csdev);
node->csdev = csdev;
list_add(&node->link, path);
- pm_runtime_get_sync(csdev->dev.parent);
return 0;
}
@@ -517,7 +555,7 @@ void coresight_release_path(struct list_head *path)
list_for_each_entry_safe(nd, next, path, link) {
csdev = nd->csdev;
- pm_runtime_put_sync(csdev->dev.parent);
+ coresight_drop_device(csdev);
list_del(&nd->link);
kfree(nd);
}
@@ -768,6 +806,9 @@ static struct device_type coresight_dev_type[] = {
.name = "source",
.groups = coresight_source_groups,
},
+ {
+ .name = "helper",
+ },
};
static void coresight_device_release(struct device *dev)