diff options
Diffstat (limited to 'drivers/hwtracing/coresight/coresight-tmc-etr.c')
| -rw-r--r-- | drivers/hwtracing/coresight/coresight-tmc-etr.c | 537 |
1 files changed, 413 insertions, 124 deletions
diff --git a/drivers/hwtracing/coresight/coresight-tmc-etr.c b/drivers/hwtracing/coresight/coresight-tmc-etr.c index 918d461fcf4a..e0d83ee01b77 100644 --- a/drivers/hwtracing/coresight/coresight-tmc-etr.c +++ b/drivers/hwtracing/coresight/coresight-tmc-etr.c @@ -26,11 +26,19 @@ struct etr_flat_buf { size_t size; }; +struct etr_buf_hw { + bool has_iommu; + bool has_etr_sg; + bool has_catu; + bool has_resrv; +}; + /* * etr_perf_buffer - Perf buffer used for ETR * @drvdata - The ETR drvdaga this buffer has been allocated for. * @etr_buf - Actual buffer used by the ETR - * @pid - The PID this etr_perf_buffer belongs to. + * @pid - The PID of the session owner that etr_perf_buffer + * belongs to. * @snaphost - Perf session mode * @nr_pages - Number of pages in the ring buffer. * @pages - Array of Pages in the ring buffer. @@ -45,7 +53,8 @@ struct etr_perf_buffer { }; /* Convert the perf index to an offset within the ETR buffer */ -#define PERF_IDX2OFF(idx, buf) ((idx) % ((buf)->nr_pages << PAGE_SHIFT)) +#define PERF_IDX2OFF(idx, buf) \ + ((idx) % ((unsigned long)(buf)->nr_pages << PAGE_SHIFT)) /* Lower limit for ETR hardware buffer */ #define TMC_ETR_PERF_MIN_BUF_SIZE SZ_1M @@ -116,7 +125,7 @@ struct etr_sg_table { * If we spill over to a new page for mapping 1 entry, we could as * well replace the link entry of the previous page with the last entry. */ -static inline unsigned long __attribute_const__ +static unsigned long __attribute_const__ tmc_etr_sg_table_entries(int nr_pages) { unsigned long nr_sgpages = nr_pages * ETR_SG_PAGES_PER_SYSPAGE; @@ -230,13 +239,13 @@ err: return -ENOMEM; } -static inline long +static long tmc_sg_get_data_page_offset(struct tmc_sg_table *sg_table, dma_addr_t addr) { return tmc_pages_get_offset(&sg_table->data_pages, addr); } -static inline void tmc_free_table_pages(struct tmc_sg_table *sg_table) +static void tmc_free_table_pages(struct tmc_sg_table *sg_table) { if (sg_table->table_vaddr) vunmap(sg_table->table_vaddr); @@ -254,6 +263,7 @@ void tmc_free_sg_table(struct tmc_sg_table *sg_table) { tmc_free_table_pages(sg_table); tmc_free_data_pages(sg_table); + kfree(sg_table); } EXPORT_SYMBOL_GPL(tmc_free_sg_table); @@ -335,7 +345,6 @@ struct tmc_sg_table *tmc_alloc_sg_table(struct device *dev, rc = tmc_alloc_table_pages(sg_table); if (rc) { tmc_free_sg_table(sg_table); - kfree(sg_table); return ERR_PTR(rc); } @@ -472,7 +481,7 @@ static void tmc_etr_sg_table_dump(struct etr_sg_table *etr_table) dev_dbg(sg_table->dev, "******* End of Table *****\n"); } #else -static inline void tmc_etr_sg_table_dump(struct etr_sg_table *etr_table) {} +static void tmc_etr_sg_table_dump(struct etr_sg_table *etr_table) {} #endif /* @@ -609,7 +618,8 @@ static int tmc_etr_alloc_flat_buf(struct tmc_drvdata *drvdata, flat_buf->vaddr = dma_alloc_noncoherent(real_dev, etr_buf->size, &flat_buf->daddr, - DMA_FROM_DEVICE, GFP_KERNEL); + DMA_FROM_DEVICE, + GFP_KERNEL | __GFP_NOWARN); if (!flat_buf->vaddr) { kfree(flat_buf); return -ENOMEM; @@ -687,6 +697,75 @@ static const struct etr_buf_operations etr_flat_buf_ops = { }; /* + * tmc_etr_alloc_resrv_buf: Allocate a contiguous DMA buffer from reserved region. + */ +static int tmc_etr_alloc_resrv_buf(struct tmc_drvdata *drvdata, + struct etr_buf *etr_buf, int node, + void **pages) +{ + struct etr_flat_buf *resrv_buf; + struct device *real_dev = drvdata->csdev->dev.parent; + + /* We cannot reuse existing pages for resrv buf */ + if (pages) + return -EINVAL; + + resrv_buf = kzalloc(sizeof(*resrv_buf), GFP_KERNEL); + if (!resrv_buf) + return -ENOMEM; + + resrv_buf->daddr = dma_map_resource(real_dev, drvdata->resrv_buf.paddr, + drvdata->resrv_buf.size, + DMA_FROM_DEVICE, 0); + if (dma_mapping_error(real_dev, resrv_buf->daddr)) { + dev_err(real_dev, "failed to map source buffer address\n"); + kfree(resrv_buf); + return -ENOMEM; + } + + resrv_buf->vaddr = drvdata->resrv_buf.vaddr; + resrv_buf->size = etr_buf->size = drvdata->resrv_buf.size; + resrv_buf->dev = &drvdata->csdev->dev; + etr_buf->hwaddr = resrv_buf->daddr; + etr_buf->mode = ETR_MODE_RESRV; + etr_buf->private = resrv_buf; + return 0; +} + +static void tmc_etr_free_resrv_buf(struct etr_buf *etr_buf) +{ + struct etr_flat_buf *resrv_buf = etr_buf->private; + + if (resrv_buf && resrv_buf->daddr) { + struct device *real_dev = resrv_buf->dev->parent; + + dma_unmap_resource(real_dev, resrv_buf->daddr, + resrv_buf->size, DMA_FROM_DEVICE, 0); + } + kfree(resrv_buf); +} + +static void tmc_etr_sync_resrv_buf(struct etr_buf *etr_buf, u64 rrp, u64 rwp) +{ + /* + * Adjust the buffer to point to the beginning of the trace data + * and update the available trace data. + */ + etr_buf->offset = rrp - etr_buf->hwaddr; + if (etr_buf->full) + etr_buf->len = etr_buf->size; + else + etr_buf->len = rwp - rrp; +} + +static const struct etr_buf_operations etr_resrv_buf_ops = { + .alloc = tmc_etr_alloc_resrv_buf, + .free = tmc_etr_free_resrv_buf, + .sync = tmc_etr_sync_resrv_buf, + .get_data = tmc_etr_get_data_flat_buf, +}; + +/* * tmc_etr_alloc_sg_buf: Allocate an SG buf @etr_buf. Setup the parameters * appropriately. */ @@ -775,44 +854,24 @@ static const struct etr_buf_operations etr_sg_buf_ops = { struct coresight_device * tmc_etr_get_catu_device(struct tmc_drvdata *drvdata) { - int i; - struct coresight_device *tmp, *etr = drvdata->csdev; + struct coresight_device *etr = drvdata->csdev; + union coresight_dev_subtype catu_subtype = { + .helper_subtype = CORESIGHT_DEV_SUBTYPE_HELPER_CATU + }; if (!IS_ENABLED(CONFIG_CORESIGHT_CATU)) return NULL; - for (i = 0; i < etr->pdata->nr_outport; i++) { - tmp = etr->pdata->conns[i].child_dev; - if (tmp && coresight_is_catu_device(tmp)) - return tmp; - } - - return NULL; + return coresight_find_output_type(etr->pdata, CORESIGHT_DEV_TYPE_HELPER, + catu_subtype); } EXPORT_SYMBOL_GPL(tmc_etr_get_catu_device); -static inline int tmc_etr_enable_catu(struct tmc_drvdata *drvdata, - struct etr_buf *etr_buf) -{ - struct coresight_device *catu = tmc_etr_get_catu_device(drvdata); - - if (catu && helper_ops(catu)->enable) - return helper_ops(catu)->enable(catu, etr_buf); - return 0; -} - -static inline void tmc_etr_disable_catu(struct tmc_drvdata *drvdata) -{ - struct coresight_device *catu = tmc_etr_get_catu_device(drvdata); - - if (catu && helper_ops(catu)->disable) - helper_ops(catu)->disable(catu, drvdata->etr_buf); -} - static const struct etr_buf_operations *etr_buf_ops[] = { [ETR_MODE_FLAT] = &etr_flat_buf_ops, [ETR_MODE_ETR_SG] = &etr_sg_buf_ops, [ETR_MODE_CATU] = NULL, + [ETR_MODE_RESRV] = &etr_resrv_buf_ops }; void tmc_etr_set_catu_ops(const struct etr_buf_operations *catu) @@ -827,10 +886,8 @@ void tmc_etr_remove_catu_ops(void) } EXPORT_SYMBOL_GPL(tmc_etr_remove_catu_ops); -static inline int tmc_etr_mode_alloc_buf(int mode, - struct tmc_drvdata *drvdata, - struct etr_buf *etr_buf, int node, - void **pages) +static int tmc_etr_mode_alloc_buf(int mode, struct tmc_drvdata *drvdata, struct etr_buf *etr_buf, + int node, void **pages) { int rc = -EINVAL; @@ -838,6 +895,7 @@ static inline int tmc_etr_mode_alloc_buf(int mode, case ETR_MODE_FLAT: case ETR_MODE_ETR_SG: case ETR_MODE_CATU: + case ETR_MODE_RESRV: if (etr_buf_ops[mode] && etr_buf_ops[mode]->alloc) rc = etr_buf_ops[mode]->alloc(drvdata, etr_buf, node, pages); @@ -849,6 +907,23 @@ static inline int tmc_etr_mode_alloc_buf(int mode, } } +static void get_etr_buf_hw(struct device *dev, struct etr_buf_hw *buf_hw) +{ + struct tmc_drvdata *drvdata = dev_get_drvdata(dev->parent); + + buf_hw->has_iommu = iommu_get_domain_for_dev(dev->parent); + buf_hw->has_etr_sg = tmc_etr_has_cap(drvdata, TMC_ETR_SG); + buf_hw->has_catu = !!tmc_etr_get_catu_device(drvdata); + buf_hw->has_resrv = tmc_has_reserved_buffer(drvdata); +} + +static bool etr_can_use_flat_mode(struct etr_buf_hw *buf_hw, ssize_t etr_buf_size) +{ + bool has_sg = buf_hw->has_catu || buf_hw->has_etr_sg; + + return !has_sg || buf_hw->has_iommu || etr_buf_size < SZ_1M; +} + /* * tmc_alloc_etr_buf: Allocate a buffer use by ETR. * @drvdata : ETR device details. @@ -862,23 +937,22 @@ static struct etr_buf *tmc_alloc_etr_buf(struct tmc_drvdata *drvdata, int node, void **pages) { int rc = -ENOMEM; - bool has_etr_sg, has_iommu; - bool has_sg, has_catu; struct etr_buf *etr_buf; + struct etr_buf_hw buf_hw; struct device *dev = &drvdata->csdev->dev; - has_etr_sg = tmc_etr_has_cap(drvdata, TMC_ETR_SG); - has_iommu = iommu_get_domain_for_dev(dev->parent); - has_catu = !!tmc_etr_get_catu_device(drvdata); - - has_sg = has_catu || has_etr_sg; - + get_etr_buf_hw(dev, &buf_hw); etr_buf = kzalloc(sizeof(*etr_buf), GFP_KERNEL); if (!etr_buf) return ERR_PTR(-ENOMEM); etr_buf->size = size; + /* If there is user directive for buffer mode, try that first */ + if (drvdata->etr_mode != ETR_MODE_AUTO) + rc = tmc_etr_mode_alloc_buf(drvdata->etr_mode, drvdata, + etr_buf, node, pages); + /* * If we have to use an existing list of pages, we cannot reliably * use a contiguous DMA memory (even if we have an IOMMU). Otherwise, @@ -891,14 +965,13 @@ static struct etr_buf *tmc_alloc_etr_buf(struct tmc_drvdata *drvdata, * Fallback to available mechanisms. * */ - if (!pages && - (!has_sg || has_iommu || size < SZ_1M)) + if (rc && !pages && etr_can_use_flat_mode(&buf_hw, size)) rc = tmc_etr_mode_alloc_buf(ETR_MODE_FLAT, drvdata, etr_buf, node, pages); - if (rc && has_etr_sg) + if (rc && buf_hw.has_etr_sg) rc = tmc_etr_mode_alloc_buf(ETR_MODE_ETR_SG, drvdata, etr_buf, node, pages); - if (rc && has_catu) + if (rc && buf_hw.has_catu) rc = tmc_etr_mode_alloc_buf(ETR_MODE_CATU, drvdata, etr_buf, node, pages); if (rc) { @@ -934,7 +1007,7 @@ static ssize_t tmc_etr_buf_get_data(struct etr_buf *etr_buf, return etr_buf->ops->get_data(etr_buf, (u64)offset, len, bufpp); } -static inline s64 +static s64 tmc_etr_buf_insert_barrier_packet(struct etr_buf *etr_buf, u64 offset) { ssize_t len; @@ -942,7 +1015,7 @@ tmc_etr_buf_insert_barrier_packet(struct etr_buf *etr_buf, u64 offset) len = tmc_etr_buf_get_data(etr_buf, offset, CORESIGHT_BARRIER_PKT_SIZE, &bufp); - if (WARN_ON(len < CORESIGHT_BARRIER_PKT_SIZE)) + if (WARN_ON(len < 0 || len < CORESIGHT_BARRIER_PKT_SIZE)) return -EINVAL; coresight_insert_barrier_packet(bufp); return offset + CORESIGHT_BARRIER_PKT_SIZE; @@ -985,7 +1058,7 @@ static void tmc_sync_etr_buf(struct tmc_drvdata *drvdata) static int __tmc_etr_enable_hw(struct tmc_drvdata *drvdata) { - u32 axictl, sts; + u32 axictl, sts, ffcr; struct etr_buf *etr_buf = drvdata->etr_buf; int rc = 0; @@ -1031,10 +1104,12 @@ static int __tmc_etr_enable_hw(struct tmc_drvdata *drvdata) writel_relaxed(sts, drvdata->base + TMC_STS); } - writel_relaxed(TMC_FFCR_EN_FMT | TMC_FFCR_EN_TI | - TMC_FFCR_FON_FLIN | TMC_FFCR_FON_TRIG_EVT | - TMC_FFCR_TRIGON_TRIGIN, - drvdata->base + TMC_FFCR); + ffcr = TMC_FFCR_EN_FMT | TMC_FFCR_EN_TI | TMC_FFCR_FON_FLIN | + TMC_FFCR_FON_TRIG_EVT | TMC_FFCR_TRIGON_TRIGIN; + if (drvdata->stop_on_flush) + ffcr |= TMC_FFCR_STOP_ON_FLUSH; + writel_relaxed(ffcr, drvdata->base + TMC_FFCR); + writel_relaxed(drvdata->trigger_cntr, drvdata->base + TMC_TRG); tmc_enable_hw(drvdata); @@ -1058,13 +1133,6 @@ static int tmc_etr_enable_hw(struct tmc_drvdata *drvdata, if (WARN_ON(drvdata->etr_buf)) return -EBUSY; - /* - * If this ETR is connected to a CATU, enable it before we turn - * this on. - */ - rc = tmc_etr_enable_catu(drvdata, etr_buf); - if (rc) - return rc; rc = coresight_claim_device(drvdata->csdev); if (!rc) { drvdata->etr_buf = etr_buf; @@ -1072,7 +1140,6 @@ static int tmc_etr_enable_hw(struct tmc_drvdata *drvdata, if (rc) { drvdata->etr_buf = NULL; coresight_disclaim_device(drvdata->csdev); - tmc_etr_disable_catu(drvdata); } } @@ -1150,7 +1217,7 @@ static void __tmc_etr_disable_hw(struct tmc_drvdata *drvdata) * When operating in sysFS mode the content of the buffer needs to be * read before the TMC is disabled. */ - if (drvdata->mode == CS_MODE_SYSFS) + if (coresight_get_mode(drvdata->csdev) == CS_MODE_SYSFS) tmc_etr_sync_sysfs_buf(drvdata); tmc_disable_hw(drvdata); @@ -1162,14 +1229,12 @@ static void __tmc_etr_disable_hw(struct tmc_drvdata *drvdata) void tmc_etr_disable_hw(struct tmc_drvdata *drvdata) { __tmc_etr_disable_hw(drvdata); - /* Disable CATU device if this ETR is connected to one */ - tmc_etr_disable_catu(drvdata); coresight_disclaim_device(drvdata->csdev); /* Reset the ETR buf used by hardware */ drvdata->etr_buf = NULL; } -static int tmc_enable_etr_sink_sysfs(struct coresight_device *csdev) +static struct etr_buf *tmc_etr_get_sysfs_buffer(struct coresight_device *csdev) { int ret = 0; unsigned long flags; @@ -1184,36 +1249,33 @@ static int tmc_enable_etr_sink_sysfs(struct coresight_device *csdev) * buffer, provided the size matches. Any allocation has to be done * with the lock released. */ - spin_lock_irqsave(&drvdata->spinlock, flags); + raw_spin_lock_irqsave(&drvdata->spinlock, flags); + + /* + * If the ETR is already enabled, continue with the existing buffer. + */ + if (coresight_get_mode(csdev) == CS_MODE_SYSFS) + goto out; + sysfs_buf = READ_ONCE(drvdata->sysfs_buf); if (!sysfs_buf || (sysfs_buf->size != drvdata->size)) { - spin_unlock_irqrestore(&drvdata->spinlock, flags); + raw_spin_unlock_irqrestore(&drvdata->spinlock, flags); /* Allocate memory with the locks released */ free_buf = new_buf = tmc_etr_setup_sysfs_buf(drvdata); if (IS_ERR(new_buf)) - return PTR_ERR(new_buf); + return new_buf; /* Let's try again */ - spin_lock_irqsave(&drvdata->spinlock, flags); + raw_spin_lock_irqsave(&drvdata->spinlock, flags); } - if (drvdata->reading || drvdata->mode == CS_MODE_PERF) { + if (drvdata->reading || coresight_get_mode(csdev) == CS_MODE_PERF) { ret = -EBUSY; goto out; } /* - * In sysFS mode we can have multiple writers per sink. Since this - * sink is already enabled no memory is needed and the HW need not be - * touched, even if the buffer size has changed. - */ - if (drvdata->mode == CS_MODE_SYSFS) { - atomic_inc(csdev->refcnt); - goto out; - } - - /* * If we don't have a buffer or it doesn't match the requested size, * use the buffer allocated above. Otherwise reuse the existing buffer. */ @@ -1223,17 +1285,45 @@ static int tmc_enable_etr_sink_sysfs(struct coresight_device *csdev) drvdata->sysfs_buf = new_buf; } - ret = tmc_etr_enable_hw(drvdata, drvdata->sysfs_buf); - if (!ret) { - drvdata->mode = CS_MODE_SYSFS; - atomic_inc(csdev->refcnt); - } out: - spin_unlock_irqrestore(&drvdata->spinlock, flags); + raw_spin_unlock_irqrestore(&drvdata->spinlock, flags); /* Free memory outside the spinlock if need be */ if (free_buf) tmc_etr_free_sysfs_buf(free_buf); + return ret ? ERR_PTR(ret) : drvdata->sysfs_buf; +} + +static int tmc_enable_etr_sink_sysfs(struct coresight_device *csdev) +{ + int ret = 0; + unsigned long flags; + struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); + struct etr_buf *sysfs_buf = tmc_etr_get_sysfs_buffer(csdev); + + if (IS_ERR(sysfs_buf)) + return PTR_ERR(sysfs_buf); + + raw_spin_lock_irqsave(&drvdata->spinlock, flags); + + /* + * In sysFS mode we can have multiple writers per sink. Since this + * sink is already enabled no memory is needed and the HW need not be + * touched, even if the buffer size has changed. + */ + if (coresight_get_mode(csdev) == CS_MODE_SYSFS) { + csdev->refcnt++; + goto out; + } + + ret = tmc_etr_enable_hw(drvdata, sysfs_buf); + if (!ret) { + coresight_set_mode(csdev, CS_MODE_SYSFS); + csdev->refcnt++; + } + +out: + raw_spin_unlock_irqrestore(&drvdata->spinlock, flags); if (!ret) dev_dbg(&csdev->dev, "TMC-ETR enabled\n"); @@ -1241,6 +1331,27 @@ out: return ret; } +struct etr_buf *tmc_etr_get_buffer(struct coresight_device *csdev, + enum cs_mode mode, + struct coresight_path *path) +{ + struct perf_output_handle *handle = path->handle; + struct etr_perf_buffer *etr_perf; + + switch (mode) { + case CS_MODE_SYSFS: + return tmc_etr_get_sysfs_buffer(csdev); + case CS_MODE_PERF: + etr_perf = etm_perf_sink_config(handle); + if (WARN_ON(!etr_perf || !etr_perf->etr_buf)) + return ERR_PTR(-EINVAL); + return etr_perf->etr_buf; + default: + return ERR_PTR(-EINVAL); + } +} +EXPORT_SYMBOL_GPL(tmc_etr_get_buffer); + /* * alloc_etr_buf: Allocate ETR buffer for use by perf. * The size of the hardware buffer is dependent on the size configured @@ -1262,7 +1373,7 @@ alloc_etr_buf(struct tmc_drvdata *drvdata, struct perf_event *event, * than the size requested via sysfs. */ if ((nr_pages << PAGE_SHIFT) > drvdata->size) { - etr_buf = tmc_alloc_etr_buf(drvdata, (nr_pages << PAGE_SHIFT), + etr_buf = tmc_alloc_etr_buf(drvdata, ((ssize_t)nr_pages << PAGE_SHIFT), 0, node, NULL); if (!IS_ERR(etr_buf)) goto done; @@ -1531,18 +1642,19 @@ tmc_update_etr_buffer(struct coresight_device *csdev, struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); struct etr_perf_buffer *etr_perf = config; struct etr_buf *etr_buf = etr_perf->etr_buf; + struct perf_event *event = handle->event; - spin_lock_irqsave(&drvdata->spinlock, flags); + raw_spin_lock_irqsave(&drvdata->spinlock, flags); /* Don't do anything if another tracer is using this sink */ - if (atomic_read(csdev->refcnt) != 1) { - spin_unlock_irqrestore(&drvdata->spinlock, flags); + if (csdev->refcnt != 1) { + raw_spin_unlock_irqrestore(&drvdata->spinlock, flags); goto out; } if (WARN_ON(drvdata->perf_buf != etr_buf)) { lost = true; - spin_unlock_irqrestore(&drvdata->spinlock, flags); + raw_spin_unlock_irqrestore(&drvdata->spinlock, flags); goto out; } @@ -1552,7 +1664,7 @@ tmc_update_etr_buffer(struct coresight_device *csdev, tmc_sync_etr_buf(drvdata); CS_LOCK(drvdata->base); - spin_unlock_irqrestore(&drvdata->spinlock, flags); + raw_spin_unlock_irqrestore(&drvdata->spinlock, flags); lost = etr_buf->full; offset = etr_buf->offset; @@ -1600,6 +1712,15 @@ tmc_update_etr_buffer(struct coresight_device *csdev, */ smp_wmb(); + /* + * If the event is active, it is triggered during an AUX pause. + * Re-enable the sink so that it is ready when AUX resume is invoked. + */ + raw_spin_lock_irqsave(&drvdata->spinlock, flags); + if (csdev->refcnt && !event->hw.state) + __tmc_etr_enable_hw(drvdata); + raw_spin_unlock_irqrestore(&drvdata->spinlock, flags); + out: /* * Don't set the TRUNCATED flag in snapshot mode because 1) the @@ -1612,18 +1733,19 @@ out: return size; } -static int tmc_enable_etr_sink_perf(struct coresight_device *csdev, void *data) +static int tmc_enable_etr_sink_perf(struct coresight_device *csdev, + struct coresight_path *path) { int rc = 0; pid_t pid; unsigned long flags; struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); - struct perf_output_handle *handle = data; + struct perf_output_handle *handle = path->handle; struct etr_perf_buffer *etr_perf = etm_perf_sink_config(handle); - spin_lock_irqsave(&drvdata->spinlock, flags); + raw_spin_lock_irqsave(&drvdata->spinlock, flags); /* Don't use this sink if it is already claimed by sysFS */ - if (drvdata->mode == CS_MODE_SYSFS) { + if (coresight_get_mode(csdev) == CS_MODE_SYSFS) { rc = -EBUSY; goto unlock_out; } @@ -1633,7 +1755,7 @@ static int tmc_enable_etr_sink_perf(struct coresight_device *csdev, void *data) goto unlock_out; } - /* Get a handle on the pid of the process to monitor */ + /* Get a handle on the pid of the session owner */ pid = etr_perf->pid; /* Do not proceed if this device is associated with another session */ @@ -1647,7 +1769,7 @@ static int tmc_enable_etr_sink_perf(struct coresight_device *csdev, void *data) * use for this session. */ if (drvdata->pid == pid) { - atomic_inc(csdev->refcnt); + csdev->refcnt++; goto unlock_out; } @@ -1655,28 +1777,28 @@ static int tmc_enable_etr_sink_perf(struct coresight_device *csdev, void *data) if (!rc) { /* Associate with monitored process. */ drvdata->pid = pid; - drvdata->mode = CS_MODE_PERF; + coresight_set_mode(csdev, CS_MODE_PERF); drvdata->perf_buf = etr_perf->etr_buf; - atomic_inc(csdev->refcnt); + csdev->refcnt++; } unlock_out: - spin_unlock_irqrestore(&drvdata->spinlock, flags); + raw_spin_unlock_irqrestore(&drvdata->spinlock, flags); return rc; } static int tmc_enable_etr_sink(struct coresight_device *csdev, - u32 mode, void *data) + enum cs_mode mode, + struct coresight_path *path) { switch (mode) { case CS_MODE_SYSFS: return tmc_enable_etr_sink_sysfs(csdev); case CS_MODE_PERF: - return tmc_enable_etr_sink_perf(csdev, data); + return tmc_enable_etr_sink_perf(csdev, path); + default: + return -EINVAL; } - - /* We shouldn't be here */ - return -EINVAL; } static int tmc_disable_etr_sink(struct coresight_device *csdev) @@ -1684,33 +1806,102 @@ static int tmc_disable_etr_sink(struct coresight_device *csdev) unsigned long flags; struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); - spin_lock_irqsave(&drvdata->spinlock, flags); + raw_spin_lock_irqsave(&drvdata->spinlock, flags); if (drvdata->reading) { - spin_unlock_irqrestore(&drvdata->spinlock, flags); + raw_spin_unlock_irqrestore(&drvdata->spinlock, flags); return -EBUSY; } - if (atomic_dec_return(csdev->refcnt)) { - spin_unlock_irqrestore(&drvdata->spinlock, flags); + csdev->refcnt--; + if (csdev->refcnt) { + raw_spin_unlock_irqrestore(&drvdata->spinlock, flags); return -EBUSY; } /* Complain if we (somehow) got out of sync */ - WARN_ON_ONCE(drvdata->mode == CS_MODE_DISABLED); + WARN_ON_ONCE(coresight_get_mode(csdev) == CS_MODE_DISABLED); tmc_etr_disable_hw(drvdata); /* Dissociate from monitored process. */ drvdata->pid = -1; - drvdata->mode = CS_MODE_DISABLED; + coresight_set_mode(csdev, CS_MODE_DISABLED); /* Reset perf specific data */ drvdata->perf_buf = NULL; - spin_unlock_irqrestore(&drvdata->spinlock, flags); + raw_spin_unlock_irqrestore(&drvdata->spinlock, flags); dev_dbg(&csdev->dev, "TMC-ETR disabled\n"); return 0; } +static int tmc_panic_sync_etr(struct coresight_device *csdev) +{ + u32 val; + struct tmc_crash_metadata *mdata; + struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); + + mdata = (struct tmc_crash_metadata *)drvdata->crash_mdata.vaddr; + + if (!drvdata->etr_buf) + return 0; + + /* Being in RESRV mode implies valid reserved memory as well */ + if (drvdata->etr_buf->mode != ETR_MODE_RESRV) + return 0; + + if (!tmc_has_crash_mdata_buffer(drvdata)) + return 0; + + CS_UNLOCK(drvdata->base); + + /* Proceed only if ETR is enabled */ + val = readl(drvdata->base + TMC_CTL); + if (!(val & TMC_CTL_CAPT_EN)) + goto out; + + val = readl(drvdata->base + TMC_FFSR); + /* Do manual flush and stop only if its not auto-stopped */ + if (!(val & TMC_FFSR_FT_STOPPED)) { + dev_dbg(&csdev->dev, + "%s: Triggering manual flush\n", __func__); + tmc_flush_and_stop(drvdata); + } else + tmc_wait_for_tmcready(drvdata); + + /* Sync registers from hardware to metadata region */ + mdata->tmc_ram_size = readl(drvdata->base + TMC_RSZ); + mdata->tmc_sts = readl(drvdata->base + TMC_STS); + mdata->tmc_mode = readl(drvdata->base + TMC_MODE); + mdata->tmc_ffcr = readl(drvdata->base + TMC_FFCR); + mdata->tmc_ffsr = readl(drvdata->base + TMC_FFSR); + mdata->tmc_rrp = tmc_read_rrp(drvdata); + mdata->tmc_rwp = tmc_read_rwp(drvdata); + mdata->tmc_dba = tmc_read_dba(drvdata); + mdata->trace_paddr = drvdata->resrv_buf.paddr; + mdata->version = CS_CRASHDATA_VERSION; + + /* + * Make sure all previous writes are ordered, + * before we mark valid + */ + dmb(sy); + mdata->valid = true; + /* + * Below order need to maintained, since crc of metadata + * is dependent on first + */ + mdata->crc32_tdata = find_crash_tracedata_crc(drvdata, mdata); + mdata->crc32_mdata = find_crash_metadata_crc(mdata); + + tmc_disable_hw(drvdata); + + dev_dbg(&csdev->dev, "%s: success\n", __func__); +out: + CS_UNLOCK(drvdata->base); + + return 0; +} + static const struct coresight_ops_sink tmc_etr_sink_ops = { .enable = tmc_enable_etr_sink, .disable = tmc_disable_etr_sink, @@ -1719,8 +1910,13 @@ static const struct coresight_ops_sink tmc_etr_sink_ops = { .free_buffer = tmc_free_etr_buffer, }; +static const struct coresight_ops_panic tmc_etr_sync_ops = { + .sync = tmc_panic_sync_etr, +}; + const struct coresight_ops tmc_etr_cs_ops = { .sink_ops = &tmc_etr_sink_ops, + .panic_ops = &tmc_etr_sync_ops, }; int tmc_read_prepare_etr(struct tmc_drvdata *drvdata) @@ -1732,7 +1928,7 @@ int tmc_read_prepare_etr(struct tmc_drvdata *drvdata) if (WARN_ON_ONCE(drvdata->config_type != TMC_CONFIG_TYPE_ETR)) return -EINVAL; - spin_lock_irqsave(&drvdata->spinlock, flags); + raw_spin_lock_irqsave(&drvdata->spinlock, flags); if (drvdata->reading) { ret = -EBUSY; goto out; @@ -1749,12 +1945,12 @@ int tmc_read_prepare_etr(struct tmc_drvdata *drvdata) } /* Disable the TMC if we are trying to read from a running session. */ - if (drvdata->mode == CS_MODE_SYSFS) + if (coresight_get_mode(drvdata->csdev) == CS_MODE_SYSFS) __tmc_etr_disable_hw(drvdata); drvdata->reading = true; out: - spin_unlock_irqrestore(&drvdata->spinlock, flags); + raw_spin_unlock_irqrestore(&drvdata->spinlock, flags); return ret; } @@ -1768,10 +1964,10 @@ int tmc_read_unprepare_etr(struct tmc_drvdata *drvdata) if (WARN_ON_ONCE(drvdata->config_type != TMC_CONFIG_TYPE_ETR)) return -EINVAL; - spin_lock_irqsave(&drvdata->spinlock, flags); + raw_spin_lock_irqsave(&drvdata->spinlock, flags); /* RE-enable the TMC if need be */ - if (drvdata->mode == CS_MODE_SYSFS) { + if (coresight_get_mode(drvdata->csdev) == CS_MODE_SYSFS) { /* * The trace run will continue with the same allocated trace * buffer. Since the tracer is still enabled drvdata::buf can't @@ -1788,7 +1984,7 @@ int tmc_read_unprepare_etr(struct tmc_drvdata *drvdata) } drvdata->reading = false; - spin_unlock_irqrestore(&drvdata->spinlock, flags); + raw_spin_unlock_irqrestore(&drvdata->spinlock, flags); /* Free allocated memory out side of the spinlock */ if (sysfs_buf) @@ -1796,3 +1992,96 @@ int tmc_read_unprepare_etr(struct tmc_drvdata *drvdata) return 0; } + +static const char *const buf_modes_str[] = { + [ETR_MODE_FLAT] = "flat", + [ETR_MODE_ETR_SG] = "tmc-sg", + [ETR_MODE_CATU] = "catu", + [ETR_MODE_RESRV] = "resrv", + [ETR_MODE_AUTO] = "auto", +}; + +static ssize_t buf_modes_available_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct etr_buf_hw buf_hw; + ssize_t size = 0; + + get_etr_buf_hw(dev, &buf_hw); + size += sysfs_emit(buf, "%s ", buf_modes_str[ETR_MODE_AUTO]); + size += sysfs_emit_at(buf, size, "%s ", buf_modes_str[ETR_MODE_FLAT]); + if (buf_hw.has_etr_sg) + size += sysfs_emit_at(buf, size, "%s ", buf_modes_str[ETR_MODE_ETR_SG]); + + if (buf_hw.has_catu) + size += sysfs_emit_at(buf, size, "%s ", buf_modes_str[ETR_MODE_CATU]); + + if (buf_hw.has_resrv) + size += sysfs_emit_at(buf, size, "%s ", buf_modes_str[ETR_MODE_RESRV]); + + size += sysfs_emit_at(buf, size, "\n"); + return size; +} +static DEVICE_ATTR_RO(buf_modes_available); + +static ssize_t buf_mode_preferred_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct tmc_drvdata *drvdata = dev_get_drvdata(dev->parent); + + return sysfs_emit(buf, "%s\n", buf_modes_str[drvdata->etr_mode]); +} + +static int buf_mode_set_resrv(struct tmc_drvdata *drvdata) +{ + int err = -EBUSY; + unsigned long flags; + struct tmc_resrv_buf *rbuf; + + rbuf = &drvdata->resrv_buf; + + /* Ensure there are no active crashdata read sessions */ + raw_spin_lock_irqsave(&drvdata->spinlock, flags); + if (!rbuf->reading) { + tmc_crashdata_set_invalid(drvdata); + rbuf->len = 0; + drvdata->etr_mode = ETR_MODE_RESRV; + err = 0; + } + raw_spin_unlock_irqrestore(&drvdata->spinlock, flags); + return err; +} + +static ssize_t buf_mode_preferred_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t size) +{ + struct tmc_drvdata *drvdata = dev_get_drvdata(dev->parent); + struct etr_buf_hw buf_hw; + + get_etr_buf_hw(dev, &buf_hw); + if (sysfs_streq(buf, buf_modes_str[ETR_MODE_FLAT])) + drvdata->etr_mode = ETR_MODE_FLAT; + else if (sysfs_streq(buf, buf_modes_str[ETR_MODE_ETR_SG]) && buf_hw.has_etr_sg) + drvdata->etr_mode = ETR_MODE_ETR_SG; + else if (sysfs_streq(buf, buf_modes_str[ETR_MODE_CATU]) && buf_hw.has_catu) + drvdata->etr_mode = ETR_MODE_CATU; + else if (sysfs_streq(buf, buf_modes_str[ETR_MODE_RESRV]) && buf_hw.has_resrv) + return buf_mode_set_resrv(drvdata) ? : size; + else if (sysfs_streq(buf, buf_modes_str[ETR_MODE_AUTO])) + drvdata->etr_mode = ETR_MODE_AUTO; + else + return -EINVAL; + return size; +} +static DEVICE_ATTR_RW(buf_mode_preferred); + +static struct attribute *coresight_etr_attrs[] = { + &dev_attr_buf_modes_available.attr, + &dev_attr_buf_mode_preferred.attr, + NULL, +}; + +const struct attribute_group coresight_etr_group = { + .attrs = coresight_etr_attrs, +}; |
