summaryrefslogtreecommitdiff
path: root/drivers/hwtracing/coresight/coresight-tmc-etr.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/hwtracing/coresight/coresight-tmc-etr.c')
-rw-r--r--drivers/hwtracing/coresight/coresight-tmc-etr.c257
1 files changed, 220 insertions, 37 deletions
diff --git a/drivers/hwtracing/coresight/coresight-tmc-etr.c b/drivers/hwtracing/coresight/coresight-tmc-etr.c
index e75428fa1592..b07fcdb3fe1a 100644
--- a/drivers/hwtracing/coresight/coresight-tmc-etr.c
+++ b/drivers/hwtracing/coresight/coresight-tmc-etr.c
@@ -30,13 +30,15 @@ struct etr_buf_hw {
bool has_iommu;
bool has_etr_sg;
bool has_catu;
+ bool has_resrv;
};
/*
* etr_perf_buffer - Perf buffer used for ETR
* @drvdata - The ETR drvdaga this buffer has been allocated for.
* @etr_buf - Actual buffer used by the ETR
- * @pid - The PID this etr_perf_buffer belongs to.
+ * @pid - The PID of the session owner that etr_perf_buffer
+ * belongs to.
* @snaphost - Perf session mode
* @nr_pages - Number of pages in the ring buffer.
* @pages - Array of Pages in the ring buffer.
@@ -123,7 +125,7 @@ struct etr_sg_table {
* If we spill over to a new page for mapping 1 entry, we could as
* well replace the link entry of the previous page with the last entry.
*/
-static inline unsigned long __attribute_const__
+static unsigned long __attribute_const__
tmc_etr_sg_table_entries(int nr_pages)
{
unsigned long nr_sgpages = nr_pages * ETR_SG_PAGES_PER_SYSPAGE;
@@ -237,13 +239,13 @@ err:
return -ENOMEM;
}
-static inline long
+static long
tmc_sg_get_data_page_offset(struct tmc_sg_table *sg_table, dma_addr_t addr)
{
return tmc_pages_get_offset(&sg_table->data_pages, addr);
}
-static inline void tmc_free_table_pages(struct tmc_sg_table *sg_table)
+static void tmc_free_table_pages(struct tmc_sg_table *sg_table)
{
if (sg_table->table_vaddr)
vunmap(sg_table->table_vaddr);
@@ -261,6 +263,7 @@ void tmc_free_sg_table(struct tmc_sg_table *sg_table)
{
tmc_free_table_pages(sg_table);
tmc_free_data_pages(sg_table);
+ kfree(sg_table);
}
EXPORT_SYMBOL_GPL(tmc_free_sg_table);
@@ -342,7 +345,6 @@ struct tmc_sg_table *tmc_alloc_sg_table(struct device *dev,
rc = tmc_alloc_table_pages(sg_table);
if (rc) {
tmc_free_sg_table(sg_table);
- kfree(sg_table);
return ERR_PTR(rc);
}
@@ -479,7 +481,7 @@ static void tmc_etr_sg_table_dump(struct etr_sg_table *etr_table)
dev_dbg(sg_table->dev, "******* End of Table *****\n");
}
#else
-static inline void tmc_etr_sg_table_dump(struct etr_sg_table *etr_table) {}
+static void tmc_etr_sg_table_dump(struct etr_sg_table *etr_table) {}
#endif
/*
@@ -695,6 +697,75 @@ static const struct etr_buf_operations etr_flat_buf_ops = {
};
/*
+ * tmc_etr_alloc_resrv_buf: Allocate a contiguous DMA buffer from reserved region.
+ */
+static int tmc_etr_alloc_resrv_buf(struct tmc_drvdata *drvdata,
+ struct etr_buf *etr_buf, int node,
+ void **pages)
+{
+ struct etr_flat_buf *resrv_buf;
+ struct device *real_dev = drvdata->csdev->dev.parent;
+
+ /* We cannot reuse existing pages for resrv buf */
+ if (pages)
+ return -EINVAL;
+
+ resrv_buf = kzalloc(sizeof(*resrv_buf), GFP_KERNEL);
+ if (!resrv_buf)
+ return -ENOMEM;
+
+ resrv_buf->daddr = dma_map_resource(real_dev, drvdata->resrv_buf.paddr,
+ drvdata->resrv_buf.size,
+ DMA_FROM_DEVICE, 0);
+ if (dma_mapping_error(real_dev, resrv_buf->daddr)) {
+ dev_err(real_dev, "failed to map source buffer address\n");
+ kfree(resrv_buf);
+ return -ENOMEM;
+ }
+
+ resrv_buf->vaddr = drvdata->resrv_buf.vaddr;
+ resrv_buf->size = etr_buf->size = drvdata->resrv_buf.size;
+ resrv_buf->dev = &drvdata->csdev->dev;
+ etr_buf->hwaddr = resrv_buf->daddr;
+ etr_buf->mode = ETR_MODE_RESRV;
+ etr_buf->private = resrv_buf;
+ return 0;
+}
+
+static void tmc_etr_free_resrv_buf(struct etr_buf *etr_buf)
+{
+ struct etr_flat_buf *resrv_buf = etr_buf->private;
+
+ if (resrv_buf && resrv_buf->daddr) {
+ struct device *real_dev = resrv_buf->dev->parent;
+
+ dma_unmap_resource(real_dev, resrv_buf->daddr,
+ resrv_buf->size, DMA_FROM_DEVICE, 0);
+ }
+ kfree(resrv_buf);
+}
+
+static void tmc_etr_sync_resrv_buf(struct etr_buf *etr_buf, u64 rrp, u64 rwp)
+{
+ /*
+ * Adjust the buffer to point to the beginning of the trace data
+ * and update the available trace data.
+ */
+ etr_buf->offset = rrp - etr_buf->hwaddr;
+ if (etr_buf->full)
+ etr_buf->len = etr_buf->size;
+ else
+ etr_buf->len = rwp - rrp;
+}
+
+static const struct etr_buf_operations etr_resrv_buf_ops = {
+ .alloc = tmc_etr_alloc_resrv_buf,
+ .free = tmc_etr_free_resrv_buf,
+ .sync = tmc_etr_sync_resrv_buf,
+ .get_data = tmc_etr_get_data_flat_buf,
+};
+
+/*
* tmc_etr_alloc_sg_buf: Allocate an SG buf @etr_buf. Setup the parameters
* appropriately.
*/
@@ -800,6 +871,7 @@ static const struct etr_buf_operations *etr_buf_ops[] = {
[ETR_MODE_FLAT] = &etr_flat_buf_ops,
[ETR_MODE_ETR_SG] = &etr_sg_buf_ops,
[ETR_MODE_CATU] = NULL,
+ [ETR_MODE_RESRV] = &etr_resrv_buf_ops
};
void tmc_etr_set_catu_ops(const struct etr_buf_operations *catu)
@@ -814,10 +886,8 @@ void tmc_etr_remove_catu_ops(void)
}
EXPORT_SYMBOL_GPL(tmc_etr_remove_catu_ops);
-static inline int tmc_etr_mode_alloc_buf(int mode,
- struct tmc_drvdata *drvdata,
- struct etr_buf *etr_buf, int node,
- void **pages)
+static int tmc_etr_mode_alloc_buf(int mode, struct tmc_drvdata *drvdata, struct etr_buf *etr_buf,
+ int node, void **pages)
{
int rc = -EINVAL;
@@ -825,6 +895,7 @@ static inline int tmc_etr_mode_alloc_buf(int mode,
case ETR_MODE_FLAT:
case ETR_MODE_ETR_SG:
case ETR_MODE_CATU:
+ case ETR_MODE_RESRV:
if (etr_buf_ops[mode] && etr_buf_ops[mode]->alloc)
rc = etr_buf_ops[mode]->alloc(drvdata, etr_buf,
node, pages);
@@ -843,6 +914,7 @@ static void get_etr_buf_hw(struct device *dev, struct etr_buf_hw *buf_hw)
buf_hw->has_iommu = iommu_get_domain_for_dev(dev->parent);
buf_hw->has_etr_sg = tmc_etr_has_cap(drvdata, TMC_ETR_SG);
buf_hw->has_catu = !!tmc_etr_get_catu_device(drvdata);
+ buf_hw->has_resrv = tmc_has_reserved_buffer(drvdata);
}
static bool etr_can_use_flat_mode(struct etr_buf_hw *buf_hw, ssize_t etr_buf_size)
@@ -935,7 +1007,7 @@ static ssize_t tmc_etr_buf_get_data(struct etr_buf *etr_buf,
return etr_buf->ops->get_data(etr_buf, (u64)offset, len, bufpp);
}
-static inline s64
+static s64
tmc_etr_buf_insert_barrier_packet(struct etr_buf *etr_buf, u64 offset)
{
ssize_t len;
@@ -986,7 +1058,7 @@ static void tmc_sync_etr_buf(struct tmc_drvdata *drvdata)
static int __tmc_etr_enable_hw(struct tmc_drvdata *drvdata)
{
- u32 axictl, sts;
+ u32 axictl, sts, ffcr;
struct etr_buf *etr_buf = drvdata->etr_buf;
int rc = 0;
@@ -1032,10 +1104,12 @@ static int __tmc_etr_enable_hw(struct tmc_drvdata *drvdata)
writel_relaxed(sts, drvdata->base + TMC_STS);
}
- writel_relaxed(TMC_FFCR_EN_FMT | TMC_FFCR_EN_TI |
- TMC_FFCR_FON_FLIN | TMC_FFCR_FON_TRIG_EVT |
- TMC_FFCR_TRIGON_TRIGIN,
- drvdata->base + TMC_FFCR);
+ ffcr = TMC_FFCR_EN_FMT | TMC_FFCR_EN_TI | TMC_FFCR_FON_FLIN |
+ TMC_FFCR_FON_TRIG_EVT | TMC_FFCR_TRIGON_TRIGIN;
+ if (drvdata->stop_on_flush)
+ ffcr |= TMC_FFCR_STOP_ON_FLUSH;
+ writel_relaxed(ffcr, drvdata->base + TMC_FFCR);
+
writel_relaxed(drvdata->trigger_cntr, drvdata->base + TMC_TRG);
tmc_enable_hw(drvdata);
@@ -1175,10 +1249,10 @@ static struct etr_buf *tmc_etr_get_sysfs_buffer(struct coresight_device *csdev)
* buffer, provided the size matches. Any allocation has to be done
* with the lock released.
*/
- spin_lock_irqsave(&drvdata->spinlock, flags);
+ raw_spin_lock_irqsave(&drvdata->spinlock, flags);
sysfs_buf = READ_ONCE(drvdata->sysfs_buf);
if (!sysfs_buf || (sysfs_buf->size != drvdata->size)) {
- spin_unlock_irqrestore(&drvdata->spinlock, flags);
+ raw_spin_unlock_irqrestore(&drvdata->spinlock, flags);
/* Allocate memory with the locks released */
free_buf = new_buf = tmc_etr_setup_sysfs_buf(drvdata);
@@ -1186,7 +1260,7 @@ static struct etr_buf *tmc_etr_get_sysfs_buffer(struct coresight_device *csdev)
return new_buf;
/* Let's try again */
- spin_lock_irqsave(&drvdata->spinlock, flags);
+ raw_spin_lock_irqsave(&drvdata->spinlock, flags);
}
if (drvdata->reading || coresight_get_mode(csdev) == CS_MODE_PERF) {
@@ -1205,7 +1279,7 @@ static struct etr_buf *tmc_etr_get_sysfs_buffer(struct coresight_device *csdev)
}
out:
- spin_unlock_irqrestore(&drvdata->spinlock, flags);
+ raw_spin_unlock_irqrestore(&drvdata->spinlock, flags);
/* Free memory outside the spinlock if need be */
if (free_buf)
@@ -1223,7 +1297,7 @@ static int tmc_enable_etr_sink_sysfs(struct coresight_device *csdev)
if (IS_ERR(sysfs_buf))
return PTR_ERR(sysfs_buf);
- spin_lock_irqsave(&drvdata->spinlock, flags);
+ raw_spin_lock_irqsave(&drvdata->spinlock, flags);
/*
* In sysFS mode we can have multiple writers per sink. Since this
@@ -1242,7 +1316,7 @@ static int tmc_enable_etr_sink_sysfs(struct coresight_device *csdev)
}
out:
- spin_unlock_irqrestore(&drvdata->spinlock, flags);
+ raw_spin_unlock_irqrestore(&drvdata->spinlock, flags);
if (!ret)
dev_dbg(&csdev->dev, "TMC-ETR enabled\n");
@@ -1560,18 +1634,19 @@ tmc_update_etr_buffer(struct coresight_device *csdev,
struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
struct etr_perf_buffer *etr_perf = config;
struct etr_buf *etr_buf = etr_perf->etr_buf;
+ struct perf_event *event = handle->event;
- spin_lock_irqsave(&drvdata->spinlock, flags);
+ raw_spin_lock_irqsave(&drvdata->spinlock, flags);
/* Don't do anything if another tracer is using this sink */
if (csdev->refcnt != 1) {
- spin_unlock_irqrestore(&drvdata->spinlock, flags);
+ raw_spin_unlock_irqrestore(&drvdata->spinlock, flags);
goto out;
}
if (WARN_ON(drvdata->perf_buf != etr_buf)) {
lost = true;
- spin_unlock_irqrestore(&drvdata->spinlock, flags);
+ raw_spin_unlock_irqrestore(&drvdata->spinlock, flags);
goto out;
}
@@ -1581,7 +1656,7 @@ tmc_update_etr_buffer(struct coresight_device *csdev,
tmc_sync_etr_buf(drvdata);
CS_LOCK(drvdata->base);
- spin_unlock_irqrestore(&drvdata->spinlock, flags);
+ raw_spin_unlock_irqrestore(&drvdata->spinlock, flags);
lost = etr_buf->full;
offset = etr_buf->offset;
@@ -1629,6 +1704,15 @@ tmc_update_etr_buffer(struct coresight_device *csdev,
*/
smp_wmb();
+ /*
+ * If the event is active, it is triggered during an AUX pause.
+ * Re-enable the sink so that it is ready when AUX resume is invoked.
+ */
+ raw_spin_lock_irqsave(&drvdata->spinlock, flags);
+ if (csdev->refcnt && !event->hw.state)
+ __tmc_etr_enable_hw(drvdata);
+ raw_spin_unlock_irqrestore(&drvdata->spinlock, flags);
+
out:
/*
* Don't set the TRUNCATED flag in snapshot mode because 1) the
@@ -1650,7 +1734,7 @@ static int tmc_enable_etr_sink_perf(struct coresight_device *csdev, void *data)
struct perf_output_handle *handle = data;
struct etr_perf_buffer *etr_perf = etm_perf_sink_config(handle);
- spin_lock_irqsave(&drvdata->spinlock, flags);
+ raw_spin_lock_irqsave(&drvdata->spinlock, flags);
/* Don't use this sink if it is already claimed by sysFS */
if (coresight_get_mode(csdev) == CS_MODE_SYSFS) {
rc = -EBUSY;
@@ -1662,7 +1746,7 @@ static int tmc_enable_etr_sink_perf(struct coresight_device *csdev, void *data)
goto unlock_out;
}
- /* Get a handle on the pid of the process to monitor */
+ /* Get a handle on the pid of the session owner */
pid = etr_perf->pid;
/* Do not proceed if this device is associated with another session */
@@ -1690,7 +1774,7 @@ static int tmc_enable_etr_sink_perf(struct coresight_device *csdev, void *data)
}
unlock_out:
- spin_unlock_irqrestore(&drvdata->spinlock, flags);
+ raw_spin_unlock_irqrestore(&drvdata->spinlock, flags);
return rc;
}
@@ -1712,16 +1796,16 @@ static int tmc_disable_etr_sink(struct coresight_device *csdev)
unsigned long flags;
struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
- spin_lock_irqsave(&drvdata->spinlock, flags);
+ raw_spin_lock_irqsave(&drvdata->spinlock, flags);
if (drvdata->reading) {
- spin_unlock_irqrestore(&drvdata->spinlock, flags);
+ raw_spin_unlock_irqrestore(&drvdata->spinlock, flags);
return -EBUSY;
}
csdev->refcnt--;
if (csdev->refcnt) {
- spin_unlock_irqrestore(&drvdata->spinlock, flags);
+ raw_spin_unlock_irqrestore(&drvdata->spinlock, flags);
return -EBUSY;
}
@@ -1734,12 +1818,80 @@ static int tmc_disable_etr_sink(struct coresight_device *csdev)
/* Reset perf specific data */
drvdata->perf_buf = NULL;
- spin_unlock_irqrestore(&drvdata->spinlock, flags);
+ raw_spin_unlock_irqrestore(&drvdata->spinlock, flags);
dev_dbg(&csdev->dev, "TMC-ETR disabled\n");
return 0;
}
+static int tmc_panic_sync_etr(struct coresight_device *csdev)
+{
+ u32 val;
+ struct tmc_crash_metadata *mdata;
+ struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
+
+ mdata = (struct tmc_crash_metadata *)drvdata->crash_mdata.vaddr;
+
+ if (!drvdata->etr_buf)
+ return 0;
+
+ /* Being in RESRV mode implies valid reserved memory as well */
+ if (drvdata->etr_buf->mode != ETR_MODE_RESRV)
+ return 0;
+
+ if (!tmc_has_crash_mdata_buffer(drvdata))
+ return 0;
+
+ CS_UNLOCK(drvdata->base);
+
+ /* Proceed only if ETR is enabled */
+ val = readl(drvdata->base + TMC_CTL);
+ if (!(val & TMC_CTL_CAPT_EN))
+ goto out;
+
+ val = readl(drvdata->base + TMC_FFSR);
+ /* Do manual flush and stop only if its not auto-stopped */
+ if (!(val & TMC_FFSR_FT_STOPPED)) {
+ dev_dbg(&csdev->dev,
+ "%s: Triggering manual flush\n", __func__);
+ tmc_flush_and_stop(drvdata);
+ } else
+ tmc_wait_for_tmcready(drvdata);
+
+ /* Sync registers from hardware to metadata region */
+ mdata->tmc_ram_size = readl(drvdata->base + TMC_RSZ);
+ mdata->tmc_sts = readl(drvdata->base + TMC_STS);
+ mdata->tmc_mode = readl(drvdata->base + TMC_MODE);
+ mdata->tmc_ffcr = readl(drvdata->base + TMC_FFCR);
+ mdata->tmc_ffsr = readl(drvdata->base + TMC_FFSR);
+ mdata->tmc_rrp = tmc_read_rrp(drvdata);
+ mdata->tmc_rwp = tmc_read_rwp(drvdata);
+ mdata->tmc_dba = tmc_read_dba(drvdata);
+ mdata->trace_paddr = drvdata->resrv_buf.paddr;
+ mdata->version = CS_CRASHDATA_VERSION;
+
+ /*
+ * Make sure all previous writes are ordered,
+ * before we mark valid
+ */
+ dmb(sy);
+ mdata->valid = true;
+ /*
+ * Below order need to maintained, since crc of metadata
+ * is dependent on first
+ */
+ mdata->crc32_tdata = find_crash_tracedata_crc(drvdata, mdata);
+ mdata->crc32_mdata = find_crash_metadata_crc(mdata);
+
+ tmc_disable_hw(drvdata);
+
+ dev_dbg(&csdev->dev, "%s: success\n", __func__);
+out:
+ CS_UNLOCK(drvdata->base);
+
+ return 0;
+}
+
static const struct coresight_ops_sink tmc_etr_sink_ops = {
.enable = tmc_enable_etr_sink,
.disable = tmc_disable_etr_sink,
@@ -1748,8 +1900,13 @@ static const struct coresight_ops_sink tmc_etr_sink_ops = {
.free_buffer = tmc_free_etr_buffer,
};
+static const struct coresight_ops_panic tmc_etr_sync_ops = {
+ .sync = tmc_panic_sync_etr,
+};
+
const struct coresight_ops tmc_etr_cs_ops = {
.sink_ops = &tmc_etr_sink_ops,
+ .panic_ops = &tmc_etr_sync_ops,
};
int tmc_read_prepare_etr(struct tmc_drvdata *drvdata)
@@ -1761,7 +1918,7 @@ int tmc_read_prepare_etr(struct tmc_drvdata *drvdata)
if (WARN_ON_ONCE(drvdata->config_type != TMC_CONFIG_TYPE_ETR))
return -EINVAL;
- spin_lock_irqsave(&drvdata->spinlock, flags);
+ raw_spin_lock_irqsave(&drvdata->spinlock, flags);
if (drvdata->reading) {
ret = -EBUSY;
goto out;
@@ -1783,7 +1940,7 @@ int tmc_read_prepare_etr(struct tmc_drvdata *drvdata)
drvdata->reading = true;
out:
- spin_unlock_irqrestore(&drvdata->spinlock, flags);
+ raw_spin_unlock_irqrestore(&drvdata->spinlock, flags);
return ret;
}
@@ -1797,7 +1954,7 @@ int tmc_read_unprepare_etr(struct tmc_drvdata *drvdata)
if (WARN_ON_ONCE(drvdata->config_type != TMC_CONFIG_TYPE_ETR))
return -EINVAL;
- spin_lock_irqsave(&drvdata->spinlock, flags);
+ raw_spin_lock_irqsave(&drvdata->spinlock, flags);
/* RE-enable the TMC if need be */
if (coresight_get_mode(drvdata->csdev) == CS_MODE_SYSFS) {
@@ -1817,7 +1974,7 @@ int tmc_read_unprepare_etr(struct tmc_drvdata *drvdata)
}
drvdata->reading = false;
- spin_unlock_irqrestore(&drvdata->spinlock, flags);
+ raw_spin_unlock_irqrestore(&drvdata->spinlock, flags);
/* Free allocated memory out side of the spinlock */
if (sysfs_buf)
@@ -1830,6 +1987,7 @@ static const char *const buf_modes_str[] = {
[ETR_MODE_FLAT] = "flat",
[ETR_MODE_ETR_SG] = "tmc-sg",
[ETR_MODE_CATU] = "catu",
+ [ETR_MODE_RESRV] = "resrv",
[ETR_MODE_AUTO] = "auto",
};
@@ -1848,6 +2006,9 @@ static ssize_t buf_modes_available_show(struct device *dev,
if (buf_hw.has_catu)
size += sysfs_emit_at(buf, size, "%s ", buf_modes_str[ETR_MODE_CATU]);
+ if (buf_hw.has_resrv)
+ size += sysfs_emit_at(buf, size, "%s ", buf_modes_str[ETR_MODE_RESRV]);
+
size += sysfs_emit_at(buf, size, "\n");
return size;
}
@@ -1861,6 +2022,26 @@ static ssize_t buf_mode_preferred_show(struct device *dev,
return sysfs_emit(buf, "%s\n", buf_modes_str[drvdata->etr_mode]);
}
+static int buf_mode_set_resrv(struct tmc_drvdata *drvdata)
+{
+ int err = -EBUSY;
+ unsigned long flags;
+ struct tmc_resrv_buf *rbuf;
+
+ rbuf = &drvdata->resrv_buf;
+
+ /* Ensure there are no active crashdata read sessions */
+ raw_spin_lock_irqsave(&drvdata->spinlock, flags);
+ if (!rbuf->reading) {
+ tmc_crashdata_set_invalid(drvdata);
+ rbuf->len = 0;
+ drvdata->etr_mode = ETR_MODE_RESRV;
+ err = 0;
+ }
+ raw_spin_unlock_irqrestore(&drvdata->spinlock, flags);
+ return err;
+}
+
static ssize_t buf_mode_preferred_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t size)
@@ -1875,6 +2056,8 @@ static ssize_t buf_mode_preferred_store(struct device *dev,
drvdata->etr_mode = ETR_MODE_ETR_SG;
else if (sysfs_streq(buf, buf_modes_str[ETR_MODE_CATU]) && buf_hw.has_catu)
drvdata->etr_mode = ETR_MODE_CATU;
+ else if (sysfs_streq(buf, buf_modes_str[ETR_MODE_RESRV]) && buf_hw.has_resrv)
+ return buf_mode_set_resrv(drvdata) ? : size;
else if (sysfs_streq(buf, buf_modes_str[ETR_MODE_AUTO]))
drvdata->etr_mode = ETR_MODE_AUTO;
else