summaryrefslogtreecommitdiff
path: root/drivers/hwtracing/coresight/coresight-etm4x-core.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/hwtracing/coresight/coresight-etm4x-core.c')
-rw-r--r--drivers/hwtracing/coresight/coresight-etm4x-core.c1038
1 files changed, 706 insertions, 332 deletions
diff --git a/drivers/hwtracing/coresight/coresight-etm4x-core.c b/drivers/hwtracing/coresight/coresight-etm4x-core.c
index bf18128cf5de..560975b70474 100644
--- a/drivers/hwtracing/coresight/coresight-etm4x-core.c
+++ b/drivers/hwtracing/coresight/coresight-etm4x-core.c
@@ -3,8 +3,11 @@
* Copyright (c) 2014, The Linux Foundation. All rights reserved.
*/
+#include <linux/acpi.h>
+#include <linux/bitfield.h>
#include <linux/bitops.h>
#include <linux/kernel.h>
+#include <linux/kvm_host.h>
#include <linux/moduleparam.h>
#include <linux/init.h>
#include <linux/types.h>
@@ -22,7 +25,6 @@
#include <linux/cpu_pm.h>
#include <linux/coresight.h>
#include <linux/coresight-pmu.h>
-#include <linux/pm_wakeup.h>
#include <linux/amba/bus.h>
#include <linux/seq_file.h>
#include <linux/uaccess.h>
@@ -30,6 +32,7 @@
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/property.h>
+#include <linux/clk/clk-conf.h>
#include <asm/barrier.h>
#include <asm/sections.h>
@@ -42,6 +45,7 @@
#include "coresight-etm4x-cfg.h"
#include "coresight-self-hosted-trace.h"
#include "coresight-syscfg.h"
+#include "coresight-trace-id.h"
static int boot_enable;
module_param(boot_enable, int, 0444);
@@ -65,11 +69,13 @@ static u64 etm4_get_access_type(struct etmv4_config *config);
static enum cpuhp_state hp_online;
struct etm4_init_arg {
- unsigned int pid;
- struct etmv4_drvdata *drvdata;
+ struct device *dev;
struct csdev_access *csa;
};
+static DEFINE_PER_CPU(struct etm4_init_arg *, delayed_probe);
+static int etm4_probe_cpu(unsigned int cpu);
+
/*
* Check if TRCSSPCICRn(i) is implemented for a given instance.
*
@@ -79,7 +85,7 @@ struct etm4_init_arg {
* TRCIDR4.NUMPC > 0b0000 .
* TRCSSCSR<n>.PC == 0b1
*/
-static inline bool etm4x_sspcicrn_present(struct etmv4_drvdata *drvdata, int n)
+static bool etm4x_sspcicrn_present(struct etmv4_drvdata *drvdata, int n)
{
return (n < drvdata->nr_ss_cmp) &&
drvdata->nr_pe &&
@@ -98,7 +104,7 @@ u64 etm4x_sysreg_read(u32 offset, bool _relaxed, bool _64bit)
}
if (!_relaxed)
- __iormb(res); /* Imitate the !relaxed I/O helpers */
+ __io_ar(res); /* Imitate the !relaxed I/O helpers */
return res;
}
@@ -106,7 +112,7 @@ u64 etm4x_sysreg_read(u32 offset, bool _relaxed, bool _64bit)
void etm4x_sysreg_write(u64 val, u32 offset, bool _relaxed, bool _64bit)
{
if (!_relaxed)
- __iowmb(); /* Imitate the !relaxed I/O helpers */
+ __io_bw(); /* Imitate the !relaxed I/O helpers */
if (!_64bit)
val &= GENMASK(31, 0);
@@ -130,7 +136,7 @@ static u64 ete_sysreg_read(u32 offset, bool _relaxed, bool _64bit)
}
if (!_relaxed)
- __iormb(res); /* Imitate the !relaxed I/O helpers */
+ __io_ar(res); /* Imitate the !relaxed I/O helpers */
return res;
}
@@ -138,7 +144,7 @@ static u64 ete_sysreg_read(u32 offset, bool _relaxed, bool _64bit)
static void ete_sysreg_write(u64 val, u32 offset, bool _relaxed, bool _64bit)
{
if (!_relaxed)
- __iowmb(); /* Imitate the !relaxed I/O helpers */
+ __io_bw(); /* Imitate the !relaxed I/O helpers */
if (!_64bit)
val &= GENMASK(31, 0);
@@ -180,7 +186,7 @@ static void etm_write_os_lock(struct etmv4_drvdata *drvdata,
isb();
}
-static inline void etm4_os_unlock_csa(struct etmv4_drvdata *drvdata,
+static void etm4_os_unlock_csa(struct etmv4_drvdata *drvdata,
struct csdev_access *csa)
{
WARN_ON(drvdata->cpu != smp_processor_id());
@@ -227,11 +233,9 @@ static int etm4_cpu_id(struct coresight_device *csdev)
return drvdata->cpu;
}
-static int etm4_trace_id(struct coresight_device *csdev)
+void etm4_release_trace_id(struct etmv4_drvdata *drvdata)
{
- struct etmv4_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
-
- return drvdata->trcid;
+ coresight_trace_id_put_cpu_id(drvdata->cpu);
}
struct etm4_enable_arg {
@@ -246,10 +250,28 @@ struct etm4_enable_arg {
*/
static void etm4x_prohibit_trace(struct etmv4_drvdata *drvdata)
{
+ u64 trfcr;
+
/* If the CPU doesn't support FEAT_TRF, nothing to do */
if (!drvdata->trfcr)
return;
- cpu_prohibit_trace();
+
+ trfcr = drvdata->trfcr & ~(TRFCR_EL1_ExTRE | TRFCR_EL1_E0TRE);
+
+ write_trfcr(trfcr);
+ kvm_tracing_set_el1_configuration(trfcr);
+}
+
+static u64 etm4x_get_kern_user_filter(struct etmv4_drvdata *drvdata)
+{
+ u64 trfcr = drvdata->trfcr;
+
+ if (drvdata->config.mode & ETM_MODE_EXCL_KERN)
+ trfcr &= ~TRFCR_EL1_ExTRE;
+ if (drvdata->config.mode & ETM_MODE_EXCL_USER)
+ trfcr &= ~TRFCR_EL1_E0TRE;
+
+ return trfcr;
}
/*
@@ -264,18 +286,28 @@ static void etm4x_prohibit_trace(struct etmv4_drvdata *drvdata)
*/
static void etm4x_allow_trace(struct etmv4_drvdata *drvdata)
{
- u64 trfcr = drvdata->trfcr;
+ u64 trfcr, guest_trfcr;
/* If the CPU doesn't support FEAT_TRF, nothing to do */
- if (!trfcr)
+ if (!drvdata->trfcr)
return;
- if (drvdata->config.mode & ETM_MODE_EXCL_KERN)
- trfcr &= ~TRFCR_ELx_ExTRE;
- if (drvdata->config.mode & ETM_MODE_EXCL_USER)
- trfcr &= ~TRFCR_ELx_E0TRE;
+ if (drvdata->config.mode & ETM_MODE_EXCL_HOST)
+ trfcr = drvdata->trfcr & ~(TRFCR_EL1_ExTRE | TRFCR_EL1_E0TRE);
+ else
+ trfcr = etm4x_get_kern_user_filter(drvdata);
write_trfcr(trfcr);
+
+ /* Set filters for guests and pass to KVM */
+ if (drvdata->config.mode & ETM_MODE_EXCL_GUEST)
+ guest_trfcr = drvdata->trfcr & ~(TRFCR_EL1_ExTRE | TRFCR_EL1_E0TRE);
+ else
+ guest_trfcr = etm4x_get_kern_user_filter(drvdata);
+
+ /* TRFCR_EL1 doesn't have CX so mask it out. */
+ guest_trfcr &= ~TRFCR_EL2_CX;
+ kvm_tracing_set_el1_configuration(guest_trfcr);
}
#ifdef CONFIG_ETM4X_IMPDEF_FEATURE
@@ -349,9 +381,17 @@ static void etm4_disable_arch_specific(struct etmv4_drvdata *drvdata)
}
static void etm4_check_arch_features(struct etmv4_drvdata *drvdata,
- unsigned int id)
+ struct csdev_access *csa)
{
- if (etm4_hisi_match_pid(id))
+ /*
+ * TRCPIDR* registers are not required for ETMs with system
+ * instructions. They must be identified by the MIDR+REVIDRs.
+ * Skip the TRCPID checks for now.
+ */
+ if (!csa->io_mem)
+ return;
+
+ if (etm4_hisi_match_pid(coresight_get_pid(csa)))
set_bit(ETM4_IMPDEF_HISI_CORE_COMMIT, drvdata->arch_features);
}
#else
@@ -364,11 +404,92 @@ static void etm4_disable_arch_specific(struct etmv4_drvdata *drvdata)
}
static void etm4_check_arch_features(struct etmv4_drvdata *drvdata,
- unsigned int id)
+ struct csdev_access *csa)
{
}
#endif /* CONFIG_ETM4X_IMPDEF_FEATURE */
+static void etm4x_sys_ins_barrier(struct csdev_access *csa, u32 offset, int pos, int val)
+{
+ if (!csa->io_mem)
+ isb();
+}
+
+/*
+ * etm4x_wait_status: Poll for TRCSTATR.<pos> == <val>. While using system
+ * instruction to access the trace unit, each access must be separated by a
+ * synchronization barrier. See ARM IHI0064H.b section "4.3.7 Synchronization of
+ * register updates", for system instructions section, in "Notes":
+ *
+ * "In particular, whenever disabling or enabling the trace unit, a poll of
+ * TRCSTATR needs explicit synchronization between each read of TRCSTATR"
+ */
+static int etm4x_wait_status(struct csdev_access *csa, int pos, int val)
+{
+ if (!csa->io_mem)
+ return coresight_timeout_action(csa, TRCSTATR, pos, val,
+ etm4x_sys_ins_barrier);
+ return coresight_timeout(csa, TRCSTATR, pos, val);
+}
+
+static int etm4_enable_trace_unit(struct etmv4_drvdata *drvdata)
+{
+ struct coresight_device *csdev = drvdata->csdev;
+ struct device *etm_dev = &csdev->dev;
+ struct csdev_access *csa = &csdev->access;
+
+ /*
+ * ETE mandates that the TRCRSR is written to before
+ * enabling it.
+ */
+ if (etm4x_is_ete(drvdata))
+ etm4x_relaxed_write32(csa, TRCRSR_TA, TRCRSR);
+
+ etm4x_allow_trace(drvdata);
+
+ /*
+ * According to software usage PKLXF in Arm ARM (ARM DDI 0487 L.a),
+ * execute a Context synchronization event to guarantee the trace unit
+ * will observe the new values of the System registers.
+ */
+ if (!csa->io_mem)
+ isb();
+
+ /* Enable the trace unit */
+ etm4x_relaxed_write32(csa, 1, TRCPRGCTLR);
+
+ /*
+ * As recommended by section 4.3.7 ("Synchronization when using system
+ * instructions to progrom the trace unit") of ARM IHI 0064H.b, the
+ * self-hosted trace analyzer must perform a Context synchronization
+ * event between writing to the TRCPRGCTLR and reading the TRCSTATR.
+ */
+ if (!csa->io_mem)
+ isb();
+
+ /* wait for TRCSTATR.IDLE to go back down to '0' */
+ if (etm4x_wait_status(csa, TRCSTATR_IDLE_BIT, 0)) {
+ dev_err(etm_dev,
+ "timeout while waiting for Idle Trace Status\n");
+ return -ETIME;
+ }
+
+ /*
+ * As recommended in section 4.3.7 (Synchronization of register updates)
+ * of ARM IHI 0064H.b, the self-hosted trace analyzer always executes an
+ * ISB instruction after programming the trace unit registers.
+ *
+ * For the memory-mapped interface, the registers are mapped as Device
+ * type (Device-nGnRE). Reading back the value of any register in the
+ * trace unit ensures that all writes have completed. Therefore, polling
+ * on TRCSTATR guarantees that the writing TRCPRGCTLR is complete, and
+ * no explicit dsb() is required at here.
+ */
+ isb();
+
+ return 0;
+}
+
static int etm4_enable_hw(struct etmv4_drvdata *drvdata)
{
int i, rc;
@@ -400,7 +521,7 @@ static int etm4_enable_hw(struct etmv4_drvdata *drvdata)
isb();
/* wait for TRCSTATR.IDLE to go up */
- if (coresight_timeout(csa, TRCSTATR, TRCSTATR_IDLE_BIT, 1))
+ if (etm4x_wait_status(csa, TRCSTATR_IDLE_BIT, 1))
dev_err(etm_dev,
"timeout while waiting for Idle Trace Status\n");
if (drvdata->nr_pe)
@@ -424,9 +545,12 @@ static int etm4_enable_hw(struct etmv4_drvdata *drvdata)
etm4x_relaxed_write32(csa, config->vipcssctlr, TRCVIPCSSCTLR);
for (i = 0; i < drvdata->nrseqstate - 1; i++)
etm4x_relaxed_write32(csa, config->seq_ctrl[i], TRCSEQEVRn(i));
- etm4x_relaxed_write32(csa, config->seq_rst, TRCSEQRSTEVR);
- etm4x_relaxed_write32(csa, config->seq_state, TRCSEQSTR);
- etm4x_relaxed_write32(csa, config->ext_inp, TRCEXTINSELR);
+ if (drvdata->nrseqstate) {
+ etm4x_relaxed_write32(csa, config->seq_rst, TRCSEQRSTEVR);
+ etm4x_relaxed_write32(csa, config->seq_state, TRCSEQSTR);
+ }
+ if (drvdata->numextinsel)
+ etm4x_relaxed_write32(csa, config->ext_inp, TRCEXTINSELR);
for (i = 0; i < drvdata->nr_cntr; i++) {
etm4x_relaxed_write32(csa, config->cntrldvr[i], TRCCNTRLDVRn(i));
etm4x_relaxed_write32(csa, config->cntr_ctrl[i], TRCCNTCTLRn(i));
@@ -443,13 +567,13 @@ static int etm4_enable_hw(struct etmv4_drvdata *drvdata)
for (i = 0; i < drvdata->nr_ss_cmp; i++) {
/* always clear status bit on restart if using single-shot */
if (config->ss_ctrl[i] || config->ss_pe_cmp[i])
- config->ss_status[i] &= ~BIT(31);
+ config->ss_status[i] &= ~TRCSSCSRn_STATUS;
etm4x_relaxed_write32(csa, config->ss_ctrl[i], TRCSSCCRn(i));
etm4x_relaxed_write32(csa, config->ss_status[i], TRCSSCSRn(i));
if (etm4x_sspcicrn_present(drvdata, i))
etm4x_relaxed_write32(csa, config->ss_pe_cmp[i], TRCSSPCICRn(i));
}
- for (i = 0; i < drvdata->nr_addr_cmp; i++) {
+ for (i = 0; i < drvdata->nr_addr_cmp * 2; i++) {
etm4x_relaxed_write64(csa, config->addr_val[i], TRCACVRn(i));
etm4x_relaxed_write64(csa, config->addr_acc[i], TRCACATRn(i));
}
@@ -475,33 +599,8 @@ static int etm4_enable_hw(struct etmv4_drvdata *drvdata)
etm4x_relaxed_write32(csa, trcpdcr | TRCPDCR_PU, TRCPDCR);
}
- /*
- * ETE mandates that the TRCRSR is written to before
- * enabling it.
- */
- if (etm4x_is_ete(drvdata))
- etm4x_relaxed_write32(csa, TRCRSR_TA, TRCRSR);
-
- etm4x_allow_trace(drvdata);
- /* Enable the trace unit */
- etm4x_relaxed_write32(csa, 1, TRCPRGCTLR);
-
- /* Synchronize the register updates for sysreg access */
- if (!csa->io_mem)
- isb();
-
- /* wait for TRCSTATR.IDLE to go back down to '0' */
- if (coresight_timeout(csa, TRCSTATR, TRCSTATR_IDLE_BIT, 0))
- dev_err(etm_dev,
- "timeout while waiting for Idle Trace Status\n");
-
- /*
- * As recommended by section 4.3.7 ("Synchronization when using the
- * memory-mapped interface") of ARM IHI 0064D
- */
- dsb(sy);
- isb();
-
+ if (!drvdata->paused)
+ rc = etm4_enable_trace_unit(drvdata);
done:
etm4_cs_lock(drvdata, csa);
@@ -510,13 +609,26 @@ done:
return rc;
}
-static void etm4_enable_hw_smp_call(void *info)
+static void etm4_enable_sysfs_smp_call(void *info)
{
struct etm4_enable_arg *arg = info;
+ struct coresight_device *csdev;
if (WARN_ON(!arg))
return;
+
+ csdev = arg->drvdata->csdev;
+ if (!coresight_take_mode(csdev, CS_MODE_SYSFS)) {
+ /* Someone is already using the tracer */
+ arg->rc = -EBUSY;
+ return;
+ }
+
arg->rc = etm4_enable_hw(arg->drvdata);
+
+ /* The tracer didn't start */
+ if (arg->rc)
+ coresight_set_mode(csdev, CS_MODE_DISABLED);
}
/*
@@ -612,7 +724,7 @@ static int etm4_parse_event_config(struct coresight_device *csdev,
struct etmv4_config *config = &drvdata->config;
struct perf_event_attr *attr = &event->attr;
unsigned long cfg_hash;
- int preset;
+ int preset, cc_threshold;
/* Clear configuration from previous run */
memset(config, 0, sizeof(struct etmv4_config));
@@ -623,6 +735,12 @@ static int etm4_parse_event_config(struct coresight_device *csdev,
if (attr->exclude_user)
config->mode = ETM_MODE_EXCL_USER;
+ if (attr->exclude_host)
+ config->mode |= ETM_MODE_EXCL_HOST;
+
+ if (attr->exclude_guest)
+ config->mode |= ETM_MODE_EXCL_GUEST;
+
/* Always start from the default config */
etm4_set_default_config(config);
@@ -633,9 +751,14 @@ static int etm4_parse_event_config(struct coresight_device *csdev,
/* Go from generic option to ETMv4 specifics */
if (attr->config & BIT(ETM_OPT_CYCACC)) {
- config->cfg |= BIT(4);
+ config->cfg |= TRCCONFIGR_CCI;
/* TRM: Must program this for cycacc to work */
- config->ccctlr = ETM_CYC_THRESHOLD_DEFAULT;
+ cc_threshold = attr->config3 & ETM_CYC_THRESHOLD_MASK;
+ if (!cc_threshold)
+ cc_threshold = ETM_CYC_THRESHOLD_DEFAULT;
+ if (cc_threshold < drvdata->ccitmin)
+ cc_threshold = drvdata->ccitmin;
+ config->ccctlr = cc_threshold;
}
if (attr->config & BIT(ETM_OPT_TS)) {
/*
@@ -653,12 +776,14 @@ static int etm4_parse_event_config(struct coresight_device *csdev,
goto out;
/* bit[11], Global timestamp tracing bit */
- config->cfg |= BIT(11);
+ config->cfg |= TRCCONFIGR_TS;
}
- if (attr->config & BIT(ETM_OPT_CTXTID))
+ /* Only trace contextID when runs in root PID namespace */
+ if ((attr->config & BIT(ETM_OPT_CTXTID)) &&
+ task_is_in_init_pid_ns(current))
/* bit[6], Context ID tracing bit */
- config->cfg |= BIT(ETM4_CFG_BIT_CTXTID);
+ config->cfg |= TRCCONFIGR_CID;
/*
* If set bit ETM_OPT_CTXTID2 in perf config, this asks to trace VMID
@@ -670,13 +795,15 @@ static int etm4_parse_event_config(struct coresight_device *csdev,
ret = -EINVAL;
goto out;
}
- config->cfg |= BIT(ETM4_CFG_BIT_VMID) | BIT(ETM4_CFG_BIT_VMID_OPT);
+ /* Only trace virtual contextID when runs in root PID namespace */
+ if (task_is_in_init_pid_ns(current))
+ config->cfg |= TRCCONFIGR_VMID | TRCCONFIGR_VMIDOPT;
}
/* return stack - enable if selected and supported */
if ((attr->config & BIT(ETM_OPT_RETSTK)) && drvdata->retstack)
/* bit[12], Return stack enable bit */
- config->cfg |= BIT(12);
+ config->cfg |= TRCCONFIGR_RS;
/*
* Set any selected configuration and preset.
@@ -692,33 +819,58 @@ static int etm4_parse_event_config(struct coresight_device *csdev,
ret = cscfg_csdev_enable_active_config(csdev, cfg_hash, preset);
}
+ /* branch broadcast - enable if selected and supported */
+ if (attr->config & BIT(ETM_OPT_BRANCH_BROADCAST)) {
+ if (!drvdata->trcbb) {
+ /*
+ * Missing BB support could cause silent decode errors
+ * so fail to open if it's not supported.
+ */
+ ret = -EINVAL;
+ goto out;
+ } else {
+ config->cfg |= BIT(ETM4_CFG_BIT_BB);
+ }
+ }
+
out:
return ret;
}
static int etm4_enable_perf(struct coresight_device *csdev,
- struct perf_event *event)
+ struct perf_event *event,
+ struct coresight_path *path)
{
- int ret = 0;
struct etmv4_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
+ int ret;
- if (WARN_ON_ONCE(drvdata->cpu != smp_processor_id())) {
- ret = -EINVAL;
- goto out;
- }
+ if (WARN_ON_ONCE(drvdata->cpu != smp_processor_id()))
+ return -EINVAL;
+
+ if (!coresight_take_mode(csdev, CS_MODE_PERF))
+ return -EBUSY;
/* Configure the tracer based on the session's specifics */
ret = etm4_parse_event_config(csdev, event);
if (ret)
goto out;
+
+ drvdata->trcid = path->trace_id;
+
+ /* Populate pause state */
+ drvdata->paused = !!READ_ONCE(event->hw.aux_paused);
+
/* And enable it */
ret = etm4_enable_hw(drvdata);
out:
+ /* Failed to start tracer; roll back to DISABLED mode */
+ if (ret)
+ coresight_set_mode(csdev, CS_MODE_DISABLED);
return ret;
}
-static int etm4_enable_sysfs(struct coresight_device *csdev)
+static int etm4_enable_sysfs(struct coresight_device *csdev, struct coresight_path *path)
{
struct etmv4_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
struct etm4_enable_arg arg = { };
@@ -733,7 +885,12 @@ static int etm4_enable_sysfs(struct coresight_device *csdev)
return ret;
}
- spin_lock(&drvdata->spinlock);
+ raw_spin_lock(&drvdata->spinlock);
+
+ drvdata->trcid = path->trace_id;
+
+ /* Tracer will never be paused in sysfs mode */
+ drvdata->paused = false;
/*
* Executing etm4_enable_hw on the cpu whose ETM is being enabled
@@ -741,68 +898,47 @@ static int etm4_enable_sysfs(struct coresight_device *csdev)
*/
arg.drvdata = drvdata;
ret = smp_call_function_single(drvdata->cpu,
- etm4_enable_hw_smp_call, &arg, 1);
+ etm4_enable_sysfs_smp_call, &arg, 1);
if (!ret)
ret = arg.rc;
if (!ret)
drvdata->sticky_enable = true;
- spin_unlock(&drvdata->spinlock);
+
+ if (ret)
+ etm4_release_trace_id(drvdata);
+
+ raw_spin_unlock(&drvdata->spinlock);
if (!ret)
dev_dbg(&csdev->dev, "ETM tracing enabled\n");
return ret;
}
-static int etm4_enable(struct coresight_device *csdev,
- struct perf_event *event, u32 mode)
+static int etm4_enable(struct coresight_device *csdev, struct perf_event *event,
+ enum cs_mode mode, struct coresight_path *path)
{
int ret;
- u32 val;
- struct etmv4_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
-
- val = local_cmpxchg(&drvdata->mode, CS_MODE_DISABLED, mode);
-
- /* Someone is already using the tracer */
- if (val)
- return -EBUSY;
switch (mode) {
case CS_MODE_SYSFS:
- ret = etm4_enable_sysfs(csdev);
+ ret = etm4_enable_sysfs(csdev, path);
break;
case CS_MODE_PERF:
- ret = etm4_enable_perf(csdev, event);
+ ret = etm4_enable_perf(csdev, event, path);
break;
default:
ret = -EINVAL;
}
- /* The tracer didn't start */
- if (ret)
- local_set(&drvdata->mode, CS_MODE_DISABLED);
-
return ret;
}
-static void etm4_disable_hw(void *info)
+static void etm4_disable_trace_unit(struct etmv4_drvdata *drvdata)
{
u32 control;
- struct etmv4_drvdata *drvdata = info;
- struct etmv4_config *config = &drvdata->config;
struct coresight_device *csdev = drvdata->csdev;
struct device *etm_dev = &csdev->dev;
struct csdev_access *csa = &csdev->access;
- int i;
-
- etm4_cs_unlock(drvdata, csa);
- etm4_disable_arch_specific(drvdata);
-
- if (!drvdata->skip_power_up) {
- /* power can be removed from the trace unit now */
- control = etm4x_relaxed_read32(csa, TRCPDCR);
- control &= ~TRCPDCR_PU;
- etm4x_relaxed_write32(csa, control, TRCPDCR);
- }
control = etm4x_relaxed_read32(csa, TRCPRGCTLR);
@@ -815,20 +951,68 @@ static void etm4_disable_hw(void *info)
*/
etm4x_prohibit_trace(drvdata);
/*
- * Make sure everything completes before disabling, as recommended
- * by section 7.3.77 ("TRCVICTLR, ViewInst Main Control Register,
- * SSTATUS") of ARM IHI 0064D
+ * Prevent being speculative at the point of disabling the trace unit,
+ * as recommended by section 7.3.77 ("TRCVICTLR, ViewInst Main Control
+ * Register, SSTATUS") of ARM IHI 0064D
*/
dsb(sy);
+ /*
+ * According to software usage VKHHY in Arm ARM (ARM DDI 0487 L.a),
+ * execute a Context synchronization event to guarantee no new
+ * program-flow trace is generated.
+ */
isb();
/* Trace synchronization barrier, is a nop if not supported */
tsb_csync();
etm4x_relaxed_write32(csa, control, TRCPRGCTLR);
+ /*
+ * As recommended by section 4.3.7 ("Synchronization when using system
+ * instructions to progrom the trace unit") of ARM IHI 0064H.b, the
+ * self-hosted trace analyzer must perform a Context synchronization
+ * event between writing to the TRCPRGCTLR and reading the TRCSTATR.
+ */
+ if (!csa->io_mem)
+ isb();
+
/* wait for TRCSTATR.PMSTABLE to go to '1' */
- if (coresight_timeout(csa, TRCSTATR, TRCSTATR_PMSTABLE_BIT, 1))
+ if (etm4x_wait_status(csa, TRCSTATR_PMSTABLE_BIT, 1))
dev_err(etm_dev,
"timeout while waiting for PM stable Trace Status\n");
+ /*
+ * As recommended in section 4.3.7 (Synchronization of register updates)
+ * of ARM IHI 0064H.b, the self-hosted trace analyzer always executes an
+ * ISB instruction after programming the trace unit registers.
+ *
+ * For the memory-mapped interface, the registers are mapped as Device
+ * type (Device-nGnRE). Reading back the value of any register in the
+ * trace unit ensures that all writes have completed. Therefore, polling
+ * on TRCSTATR guarantees that the writing TRCPRGCTLR is complete, and
+ * no explicit dsb() is required at here.
+ */
+ isb();
+}
+
+static void etm4_disable_hw(struct etmv4_drvdata *drvdata)
+{
+ u32 control;
+ struct etmv4_config *config = &drvdata->config;
+ struct coresight_device *csdev = drvdata->csdev;
+ struct csdev_access *csa = &csdev->access;
+ int i;
+
+ etm4_cs_unlock(drvdata, csa);
+ etm4_disable_arch_specific(drvdata);
+
+ if (!drvdata->skip_power_up) {
+ /* power can be removed from the trace unit now */
+ control = etm4x_relaxed_read32(csa, TRCPDCR);
+ control &= ~TRCPDCR_PU;
+ etm4x_relaxed_write32(csa, control, TRCPDCR);
+ }
+
+ etm4_disable_trace_unit(drvdata);
+
/* read the status of the single shot comparators */
for (i = 0; i < drvdata->nr_ss_cmp; i++) {
config->ss_status[i] =
@@ -848,6 +1032,15 @@ static void etm4_disable_hw(void *info)
"cpu: %d disable smp call done\n", drvdata->cpu);
}
+static void etm4_disable_sysfs_smp_call(void *info)
+{
+ struct etmv4_drvdata *drvdata = info;
+
+ etm4_disable_hw(drvdata);
+
+ coresight_set_mode(drvdata->csdev, CS_MODE_DISABLED);
+}
+
static int etm4_disable_perf(struct coresight_device *csdev,
struct perf_event *event)
{
@@ -877,6 +1070,13 @@ static int etm4_disable_perf(struct coresight_device *csdev,
/* TRCVICTLR::SSSTATUS, bit[9] */
filters->ssstatus = (control & BIT(9));
+ coresight_set_mode(drvdata->csdev, CS_MODE_DISABLED);
+
+ /*
+ * perf will release trace ids when _free_aux() is
+ * called at the end of the session.
+ */
+
return 0;
}
@@ -891,32 +1091,42 @@ static void etm4_disable_sysfs(struct coresight_device *csdev)
* DYING hotplug callback is serviced by the ETM driver.
*/
cpus_read_lock();
- spin_lock(&drvdata->spinlock);
+ raw_spin_lock(&drvdata->spinlock);
/*
* Executing etm4_disable_hw on the cpu whose ETM is being disabled
* ensures that register writes occur when cpu is powered.
*/
- smp_call_function_single(drvdata->cpu, etm4_disable_hw, drvdata, 1);
+ smp_call_function_single(drvdata->cpu, etm4_disable_sysfs_smp_call,
+ drvdata, 1);
+
+ raw_spin_unlock(&drvdata->spinlock);
+
+ cscfg_csdev_disable_active_config(csdev);
- spin_unlock(&drvdata->spinlock);
cpus_read_unlock();
+ /*
+ * we only release trace IDs when resetting sysfs.
+ * This permits sysfs users to read the trace ID after the trace
+ * session has completed. This maintains operational behaviour with
+ * prior trace id allocation method
+ */
+
dev_dbg(&csdev->dev, "ETM tracing disabled\n");
}
static void etm4_disable(struct coresight_device *csdev,
struct perf_event *event)
{
- u32 mode;
- struct etmv4_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
+ enum cs_mode mode;
/*
* For as long as the tracer isn't disabled another entity can't
* change its status. As such we can read the status here without
* fearing it will change under us.
*/
- mode = local_read(&drvdata->mode);
+ mode = coresight_get_mode(csdev);
switch (mode) {
case CS_MODE_DISABLED:
@@ -928,27 +1138,57 @@ static void etm4_disable(struct coresight_device *csdev,
etm4_disable_perf(csdev, event);
break;
}
+}
- if (mode)
- local_set(&drvdata->mode, CS_MODE_DISABLED);
+static int etm4_resume_perf(struct coresight_device *csdev)
+{
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
+ struct csdev_access *csa = &csdev->access;
+
+ if (coresight_get_mode(csdev) != CS_MODE_PERF)
+ return -EINVAL;
+
+ etm4_cs_unlock(drvdata, csa);
+ etm4_enable_trace_unit(drvdata);
+ etm4_cs_lock(drvdata, csa);
+
+ drvdata->paused = false;
+ return 0;
+}
+
+static void etm4_pause_perf(struct coresight_device *csdev)
+{
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
+ struct csdev_access *csa = &csdev->access;
+
+ if (coresight_get_mode(csdev) != CS_MODE_PERF)
+ return;
+
+ etm4_cs_unlock(drvdata, csa);
+ etm4_disable_trace_unit(drvdata);
+ etm4_cs_lock(drvdata, csa);
+
+ drvdata->paused = true;
}
static const struct coresight_ops_source etm4_source_ops = {
.cpu_id = etm4_cpu_id,
- .trace_id = etm4_trace_id,
.enable = etm4_enable,
.disable = etm4_disable,
+ .resume_perf = etm4_resume_perf,
+ .pause_perf = etm4_pause_perf,
};
static const struct coresight_ops etm4_cs_ops = {
+ .trace_id = coresight_etm_get_trace_id,
.source_ops = &etm4_source_ops,
};
-static inline bool cpu_supports_sysreg_trace(void)
+static bool cpu_supports_sysreg_trace(void)
{
u64 dfr0 = read_sysreg_s(SYS_ID_AA64DFR0_EL1);
- return ((dfr0 >> ID_AA64DFR0_TRACEVER_SHIFT) & 0xfUL) > 0;
+ return ((dfr0 >> ID_AA64DFR0_EL1_TraceVer_SHIFT) & 0xfUL) > 0;
}
static bool etm4_init_sysreg_access(struct etmv4_drvdata *drvdata,
@@ -986,29 +1226,35 @@ static bool etm4_init_sysreg_access(struct etmv4_drvdata *drvdata,
return true;
}
+static bool is_devtype_cpu_trace(void __iomem *base)
+{
+ u32 devtype = readl(base + TRCDEVTYPE);
+
+ return (devtype == CS_DEVTYPE_PE_TRACE);
+}
+
static bool etm4_init_iomem_access(struct etmv4_drvdata *drvdata,
struct csdev_access *csa)
{
u32 devarch = readl_relaxed(drvdata->base + TRCDEVARCH);
- u32 idr1 = readl_relaxed(drvdata->base + TRCIDR1);
+
+ if (!is_coresight_device(drvdata->base) || !is_devtype_cpu_trace(drvdata->base))
+ return false;
/*
* All ETMs must implement TRCDEVARCH to indicate that
- * the component is an ETMv4. To support any broken
- * implementations we fall back to TRCIDR1 check, which
- * is not really reliable.
+ * the component is an ETMv4. Even though TRCIDR1 also
+ * contains the information, it is part of the "Trace"
+ * register and must be accessed with the OSLK cleared,
+ * with MMIO. But we cannot touch the OSLK until we are
+ * sure this is an ETM. So rely only on the TRCDEVARCH.
*/
- if ((devarch & ETM_DEVARCH_ID_MASK) == ETM_DEVARCH_ETMv4x_ARCH) {
- drvdata->arch = etm_devarch_to_arch(devarch);
- } else {
- pr_warn("CPU%d: ETM4x incompatible TRCDEVARCH: %x, falling back to TRCIDR1\n",
- smp_processor_id(), devarch);
-
- if (ETM_TRCIDR1_ARCH_MAJOR(idr1) != ETM_TRCIDR1_ARCH_ETMv4)
- return false;
- drvdata->arch = etm_trcidr_to_arch(idr1);
+ if ((devarch & ETM_DEVARCH_ID_MASK) != ETM_DEVARCH_ETMv4x_ARCH) {
+ pr_warn_once("TRCDEVARCH doesn't match ETMv4 architecture\n");
+ return false;
}
+ drvdata->arch = etm_devarch_to_arch(devarch);
*csa = CSDEV_ACCESS_IOMEM(drvdata->base);
return true;
}
@@ -1036,7 +1282,7 @@ static void cpu_detect_trace_filtering(struct etmv4_drvdata *drvdata)
u64 trfcr;
drvdata->trfcr = 0;
- if (!cpuid_feature_extract_unsigned_field(dfr0, ID_AA64DFR0_TRACE_FILT_SHIFT))
+ if (!cpuid_feature_extract_unsigned_field(dfr0, ID_AA64DFR0_EL1_TraceFilt_SHIFT))
return;
/*
@@ -1044,9 +1290,9 @@ static void cpu_detect_trace_filtering(struct etmv4_drvdata *drvdata)
* tracing at the kernel EL and EL0, forcing to use the
* virtual time as the timestamp.
*/
- trfcr = (TRFCR_ELx_TS_VIRTUAL |
- TRFCR_ELx_ExTRE |
- TRFCR_ELx_E0TRE);
+ trfcr = (FIELD_PREP(TRFCR_EL1_TS_MASK, TRFCR_EL1_TS_VIRTUAL) |
+ TRFCR_EL1_ExTRE |
+ TRFCR_EL1_E0TRE);
/* If we are running at EL2, allow tracing the CONTEXTIDR_EL2. */
if (is_kernel_in_hyp_mode())
@@ -1055,6 +1301,41 @@ static void cpu_detect_trace_filtering(struct etmv4_drvdata *drvdata)
drvdata->trfcr = trfcr;
}
+/*
+ * The following errata on applicable cpu ranges, affect the CCITMIN filed
+ * in TCRIDR3 register. Software read for the field returns 0x100 limiting
+ * the cycle threshold granularity, whereas the right value should have
+ * been 0x4, which is well supported in the hardware.
+ */
+static struct midr_range etm_wrong_ccitmin_cpus[] = {
+ /* Erratum #1490853 - Cortex-A76 */
+ MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 4, 0),
+ /* Erratum #1490853 - Neoverse-N1 */
+ MIDR_RANGE(MIDR_NEOVERSE_N1, 0, 0, 4, 0),
+ /* Erratum #1491015 - Cortex-A77 */
+ MIDR_RANGE(MIDR_CORTEX_A77, 0, 0, 1, 0),
+ /* Erratum #1502854 - Cortex-X1 */
+ MIDR_REV(MIDR_CORTEX_X1, 0, 0),
+ /* Erratum #1619801 - Neoverse-V1 */
+ MIDR_REV(MIDR_NEOVERSE_V1, 0, 0),
+ {},
+};
+
+static void etm4_fixup_wrong_ccitmin(struct etmv4_drvdata *drvdata)
+{
+ /*
+ * Erratum affected cpus will read 256 as the minimum
+ * instruction trace cycle counting threshold whereas
+ * the correct value should be 4 instead. Override the
+ * recorded value for 'drvdata->ccitmin' to workaround
+ * this problem.
+ */
+ if (is_midr_in_range_list(etm_wrong_ccitmin_cpus)) {
+ if (drvdata->ccitmin == 256)
+ drvdata->ccitmin = 4;
+ }
+}
+
static void etm4_init_arch_data(void *info)
{
u32 etmidr0;
@@ -1065,9 +1346,10 @@ static void etm4_init_arch_data(void *info)
struct etm4_init_arg *init_arg = info;
struct etmv4_drvdata *drvdata;
struct csdev_access *csa;
+ struct device *dev = init_arg->dev;
int i;
- drvdata = init_arg->drvdata;
+ drvdata = dev_get_drvdata(init_arg->dev);
csa = init_arg->csa;
/*
@@ -1078,6 +1360,10 @@ static void etm4_init_arch_data(void *info)
if (!etm4_init_csdev_access(drvdata, csa))
return;
+ if (!csa->io_mem ||
+ fwnode_property_present(dev_fwnode(dev), "qcom,skip-power-up"))
+ drvdata->skip_power_up = true;
+
/* Detect the support for OS Lock before we actually use it */
etm_detect_os_lock(drvdata, csa);
@@ -1085,113 +1371,77 @@ static void etm4_init_arch_data(void *info)
etm4_os_unlock_csa(drvdata, csa);
etm4_cs_unlock(drvdata, csa);
- etm4_check_arch_features(drvdata, init_arg->pid);
+ etm4_check_arch_features(drvdata, csa);
/* find all capabilities of the tracing unit */
etmidr0 = etm4x_relaxed_read32(csa, TRCIDR0);
/* INSTP0, bits[2:1] P0 tracing support field */
- if (BMVAL(etmidr0, 1, 1) && BMVAL(etmidr0, 2, 2))
- drvdata->instrp0 = true;
- else
- drvdata->instrp0 = false;
-
+ drvdata->instrp0 = !!(FIELD_GET(TRCIDR0_INSTP0_MASK, etmidr0) == 0b11);
/* TRCBB, bit[5] Branch broadcast tracing support bit */
- if (BMVAL(etmidr0, 5, 5))
- drvdata->trcbb = true;
- else
- drvdata->trcbb = false;
-
+ drvdata->trcbb = !!(etmidr0 & TRCIDR0_TRCBB);
/* TRCCOND, bit[6] Conditional instruction tracing support bit */
- if (BMVAL(etmidr0, 6, 6))
- drvdata->trccond = true;
- else
- drvdata->trccond = false;
-
+ drvdata->trccond = !!(etmidr0 & TRCIDR0_TRCCOND);
/* TRCCCI, bit[7] Cycle counting instruction bit */
- if (BMVAL(etmidr0, 7, 7))
- drvdata->trccci = true;
- else
- drvdata->trccci = false;
-
+ drvdata->trccci = !!(etmidr0 & TRCIDR0_TRCCCI);
/* RETSTACK, bit[9] Return stack bit */
- if (BMVAL(etmidr0, 9, 9))
- drvdata->retstack = true;
- else
- drvdata->retstack = false;
-
+ drvdata->retstack = !!(etmidr0 & TRCIDR0_RETSTACK);
/* NUMEVENT, bits[11:10] Number of events field */
- drvdata->nr_event = BMVAL(etmidr0, 10, 11);
+ drvdata->nr_event = FIELD_GET(TRCIDR0_NUMEVENT_MASK, etmidr0);
/* QSUPP, bits[16:15] Q element support field */
- drvdata->q_support = BMVAL(etmidr0, 15, 16);
+ drvdata->q_support = FIELD_GET(TRCIDR0_QSUPP_MASK, etmidr0);
+ if (drvdata->q_support)
+ drvdata->q_filt = !!(etmidr0 & TRCIDR0_QFILT);
/* TSSIZE, bits[28:24] Global timestamp size field */
- drvdata->ts_size = BMVAL(etmidr0, 24, 28);
+ drvdata->ts_size = FIELD_GET(TRCIDR0_TSSIZE_MASK, etmidr0);
/* maximum size of resources */
etmidr2 = etm4x_relaxed_read32(csa, TRCIDR2);
/* CIDSIZE, bits[9:5] Indicates the Context ID size */
- drvdata->ctxid_size = BMVAL(etmidr2, 5, 9);
+ drvdata->ctxid_size = FIELD_GET(TRCIDR2_CIDSIZE_MASK, etmidr2);
/* VMIDSIZE, bits[14:10] Indicates the VMID size */
- drvdata->vmid_size = BMVAL(etmidr2, 10, 14);
+ drvdata->vmid_size = FIELD_GET(TRCIDR2_VMIDSIZE_MASK, etmidr2);
/* CCSIZE, bits[28:25] size of the cycle counter in bits minus 12 */
- drvdata->ccsize = BMVAL(etmidr2, 25, 28);
+ drvdata->ccsize = FIELD_GET(TRCIDR2_CCSIZE_MASK, etmidr2);
etmidr3 = etm4x_relaxed_read32(csa, TRCIDR3);
/* CCITMIN, bits[11:0] minimum threshold value that can be programmed */
- drvdata->ccitmin = BMVAL(etmidr3, 0, 11);
+ drvdata->ccitmin = FIELD_GET(TRCIDR3_CCITMIN_MASK, etmidr3);
+ etm4_fixup_wrong_ccitmin(drvdata);
+
/* EXLEVEL_S, bits[19:16] Secure state instruction tracing */
- drvdata->s_ex_level = BMVAL(etmidr3, 16, 19);
+ drvdata->s_ex_level = FIELD_GET(TRCIDR3_EXLEVEL_S_MASK, etmidr3);
drvdata->config.s_ex_level = drvdata->s_ex_level;
/* EXLEVEL_NS, bits[23:20] Non-secure state instruction tracing */
- drvdata->ns_ex_level = BMVAL(etmidr3, 20, 23);
-
+ drvdata->ns_ex_level = FIELD_GET(TRCIDR3_EXLEVEL_NS_MASK, etmidr3);
/*
* TRCERR, bit[24] whether a trace unit can trace a
* system error exception.
*/
- if (BMVAL(etmidr3, 24, 24))
- drvdata->trc_error = true;
- else
- drvdata->trc_error = false;
-
+ drvdata->trc_error = !!(etmidr3 & TRCIDR3_TRCERR);
/* SYNCPR, bit[25] implementation has a fixed synchronization period? */
- if (BMVAL(etmidr3, 25, 25))
- drvdata->syncpr = true;
- else
- drvdata->syncpr = false;
-
+ drvdata->syncpr = !!(etmidr3 & TRCIDR3_SYNCPR);
/* STALLCTL, bit[26] is stall control implemented? */
- if (BMVAL(etmidr3, 26, 26))
- drvdata->stallctl = true;
- else
- drvdata->stallctl = false;
-
+ drvdata->stallctl = !!(etmidr3 & TRCIDR3_STALLCTL);
/* SYSSTALL, bit[27] implementation can support stall control? */
- if (BMVAL(etmidr3, 27, 27))
- drvdata->sysstall = true;
- else
- drvdata->sysstall = false;
-
+ drvdata->sysstall = !!(etmidr3 & TRCIDR3_SYSSTALL);
/*
* NUMPROC - the number of PEs available for tracing, 5bits
* = TRCIDR3.bits[13:12]bits[30:28]
* bits[4:3] = TRCIDR3.bits[13:12] (since etm-v4.2, otherwise RES0)
* bits[3:0] = TRCIDR3.bits[30:28]
*/
- drvdata->nr_pe = (BMVAL(etmidr3, 12, 13) << 3) | BMVAL(etmidr3, 28, 30);
-
+ drvdata->nr_pe = (FIELD_GET(TRCIDR3_NUMPROC_HI_MASK, etmidr3) << 3) |
+ FIELD_GET(TRCIDR3_NUMPROC_LO_MASK, etmidr3);
/* NOOVERFLOW, bit[31] is trace overflow prevention supported */
- if (BMVAL(etmidr3, 31, 31))
- drvdata->nooverflow = true;
- else
- drvdata->nooverflow = false;
+ drvdata->nooverflow = !!(etmidr3 & TRCIDR3_NOOVERFLOW);
/* number of resources trace unit supports */
etmidr4 = etm4x_relaxed_read32(csa, TRCIDR4);
/* NUMACPAIRS, bits[0:3] number of addr comparator pairs for tracing */
- drvdata->nr_addr_cmp = BMVAL(etmidr4, 0, 3);
+ drvdata->nr_addr_cmp = FIELD_GET(TRCIDR4_NUMACPAIRS_MASK, etmidr4);
/* NUMPC, bits[15:12] number of PE comparator inputs for tracing */
- drvdata->nr_pe_cmp = BMVAL(etmidr4, 12, 15);
+ drvdata->nr_pe_cmp = FIELD_GET(TRCIDR4_NUMPC_MASK, etmidr4);
/*
* NUMRSPAIR, bits[19:16]
* The number of resource pairs conveyed by the HW starts at 0, i.e a
@@ -1202,7 +1452,7 @@ static void etm4_init_arch_data(void *info)
* the default TRUE and FALSE resource selectors are omitted.
* Otherwise for values 0x1 and above the number is N + 1 as per v4.2.
*/
- drvdata->nr_resource = BMVAL(etmidr4, 16, 19);
+ drvdata->nr_resource = FIELD_GET(TRCIDR4_NUMRSPAIR_MASK, etmidr4);
if ((drvdata->arch < ETM_ARCH_V4_3) || (drvdata->nr_resource > 0))
drvdata->nr_resource += 1;
/*
@@ -1210,45 +1460,42 @@ static void etm4_init_arch_data(void *info)
* comparator control for tracing. Read any status regs as these
* also contain RO capability data.
*/
- drvdata->nr_ss_cmp = BMVAL(etmidr4, 20, 23);
+ drvdata->nr_ss_cmp = FIELD_GET(TRCIDR4_NUMSSCC_MASK, etmidr4);
for (i = 0; i < drvdata->nr_ss_cmp; i++) {
drvdata->config.ss_status[i] =
etm4x_relaxed_read32(csa, TRCSSCSRn(i));
}
/* NUMCIDC, bits[27:24] number of Context ID comparators for tracing */
- drvdata->numcidc = BMVAL(etmidr4, 24, 27);
+ drvdata->numcidc = FIELD_GET(TRCIDR4_NUMCIDC_MASK, etmidr4);
/* NUMVMIDC, bits[31:28] number of VMID comparators for tracing */
- drvdata->numvmidc = BMVAL(etmidr4, 28, 31);
+ drvdata->numvmidc = FIELD_GET(TRCIDR4_NUMVMIDC_MASK, etmidr4);
etmidr5 = etm4x_relaxed_read32(csa, TRCIDR5);
/* NUMEXTIN, bits[8:0] number of external inputs implemented */
- drvdata->nr_ext_inp = BMVAL(etmidr5, 0, 8);
+ drvdata->nr_ext_inp = FIELD_GET(TRCIDR5_NUMEXTIN_MASK, etmidr5);
+ drvdata->numextinsel = FIELD_GET(TRCIDR5_NUMEXTINSEL_MASK, etmidr5);
/* TRACEIDSIZE, bits[21:16] indicates the trace ID width */
- drvdata->trcid_size = BMVAL(etmidr5, 16, 21);
+ drvdata->trcid_size = FIELD_GET(TRCIDR5_TRACEIDSIZE_MASK, etmidr5);
/* ATBTRIG, bit[22] implementation can support ATB triggers? */
- if (BMVAL(etmidr5, 22, 22))
- drvdata->atbtrig = true;
- else
- drvdata->atbtrig = false;
+ drvdata->atbtrig = !!(etmidr5 & TRCIDR5_ATBTRIG);
/*
* LPOVERRIDE, bit[23] implementation supports
* low-power state override
*/
- if (BMVAL(etmidr5, 23, 23) && (!drvdata->skip_power_up))
- drvdata->lpoverride = true;
- else
- drvdata->lpoverride = false;
+ drvdata->lpoverride = (etmidr5 & TRCIDR5_LPOVERRIDE) && (!drvdata->skip_power_up);
/* NUMSEQSTATE, bits[27:25] number of sequencer states implemented */
- drvdata->nrseqstate = BMVAL(etmidr5, 25, 27);
+ drvdata->nrseqstate = FIELD_GET(TRCIDR5_NUMSEQSTATE_MASK, etmidr5);
/* NUMCNTR, bits[30:28] number of counters available for tracing */
- drvdata->nr_cntr = BMVAL(etmidr5, 28, 30);
+ drvdata->nr_cntr = FIELD_GET(TRCIDR5_NUMCNTR_MASK, etmidr5);
+
+ coresight_clear_self_claim_tag_unlocked(csa);
etm4_cs_lock(drvdata, csa);
cpu_detect_trace_filtering(drvdata);
}
-static inline u32 etm4_get_victlr_access_type(struct etmv4_config *config)
+static u32 etm4_get_victlr_access_type(struct etmv4_config *config)
{
- return etm4_get_access_type(config) << TRCVICTLR_EXLEVEL_SHIFT;
+ return etm4_get_access_type(config) << __bf_shf(TRCVICTLR_EXLEVEL_MASK);
}
/* Set ELx trace filter access in the TRCVICTLR register */
@@ -1274,7 +1521,7 @@ static void etm4_set_default_config(struct etmv4_config *config)
config->ts_ctrl = 0x0;
/* TRCVICTLR::EVENT = 0x01, select the always on logic */
- config->vinst_ctrl = BIT(0);
+ config->vinst_ctrl = FIELD_PREP(TRCVICTLR_EVENT_MASK, 0x01);
/* TRCVICTLR::EXLEVEL_NS:EXLEVELS: Set kernel / user filtering */
etm4_set_victlr_access(config);
@@ -1383,7 +1630,7 @@ static void etm4_set_default_filter(struct etmv4_config *config)
* TRCVICTLR::SSSTATUS == 1, the start-stop logic is
* in the started state
*/
- config->vinst_ctrl |= BIT(9);
+ config->vinst_ctrl |= TRCVICTLR_SSSTATUS;
config->mode |= ETM_MODE_VIEWINST_STARTSTOP;
/* No start-stop filtering for ViewInst */
@@ -1487,7 +1734,7 @@ static int etm4_set_event_filters(struct etmv4_drvdata *drvdata,
* TRCVICTLR::SSSTATUS == 1, the start-stop logic is
* in the started state
*/
- config->vinst_ctrl |= BIT(9);
+ config->vinst_ctrl |= TRCVICTLR_SSSTATUS;
/* No start-stop filtering for ViewInst */
config->vissctlr = 0x0;
@@ -1506,7 +1753,7 @@ static int etm4_set_event_filters(struct etmv4_drvdata *drvdata,
/*
* If filters::ssstatus == 1, trace acquisition was
* started but the process was yanked away before the
- * the stop address was hit. As such the start/stop
+ * stop address was hit. As such the start/stop
* logic needs to be re-started so that tracing can
* resume where it left.
*
@@ -1515,7 +1762,7 @@ static int etm4_set_event_filters(struct etmv4_drvdata *drvdata,
* etm4_disable_perf().
*/
if (filters->ssstatus)
- config->vinst_ctrl |= BIT(9);
+ config->vinst_ctrl |= TRCVICTLR_SSSTATUS;
/* No include/exclude filtering for ViewInst */
config->viiectlr = 0x0;
@@ -1556,10 +1803,10 @@ void etm4_config_trace_mode(struct etmv4_config *config)
static int etm4_online_cpu(unsigned int cpu)
{
if (!etmdrvdata[cpu])
- return 0;
+ return etm4_probe_cpu(cpu);
if (etmdrvdata[cpu]->boot_enable && !etmdrvdata[cpu]->sticky_enable)
- coresight_enable(etmdrvdata[cpu]->csdev);
+ coresight_enable_sysfs(etmdrvdata[cpu]->csdev);
return 0;
}
@@ -1568,13 +1815,13 @@ static int etm4_starting_cpu(unsigned int cpu)
if (!etmdrvdata[cpu])
return 0;
- spin_lock(&etmdrvdata[cpu]->spinlock);
+ raw_spin_lock(&etmdrvdata[cpu]->spinlock);
if (!etmdrvdata[cpu]->os_unlock)
etm4_os_unlock(etmdrvdata[cpu]);
- if (local_read(&etmdrvdata[cpu]->mode))
+ if (coresight_get_mode(etmdrvdata[cpu]->csdev))
etm4_enable_hw(etmdrvdata[cpu]);
- spin_unlock(&etmdrvdata[cpu]->spinlock);
+ raw_spin_unlock(&etmdrvdata[cpu]->spinlock);
return 0;
}
@@ -1583,18 +1830,13 @@ static int etm4_dying_cpu(unsigned int cpu)
if (!etmdrvdata[cpu])
return 0;
- spin_lock(&etmdrvdata[cpu]->spinlock);
- if (local_read(&etmdrvdata[cpu]->mode))
+ raw_spin_lock(&etmdrvdata[cpu]->spinlock);
+ if (coresight_get_mode(etmdrvdata[cpu]->csdev))
etm4_disable_hw(etmdrvdata[cpu]);
- spin_unlock(&etmdrvdata[cpu]->spinlock);
+ raw_spin_unlock(&etmdrvdata[cpu]->spinlock);
return 0;
}
-static void etm4_init_trace_id(struct etmv4_drvdata *drvdata)
-{
- drvdata->trcid = coresight_get_trace_id(drvdata->cpu);
-}
-
static int __etm4_cpu_save(struct etmv4_drvdata *drvdata)
{
int i, ret = 0;
@@ -1621,7 +1863,7 @@ static int __etm4_cpu_save(struct etmv4_drvdata *drvdata)
etm4_os_lock(drvdata);
/* wait for TRCSTATR.PMSTABLE to go up */
- if (coresight_timeout(csa, TRCSTATR, TRCSTATR_PMSTABLE_BIT, 1)) {
+ if (etm4x_wait_status(csa, TRCSTATR_PMSTABLE_BIT, 1)) {
dev_err(etm_dev,
"timeout while waiting for PM Stable Status\n");
etm4_os_unlock(drvdata);
@@ -1629,9 +1871,11 @@ static int __etm4_cpu_save(struct etmv4_drvdata *drvdata)
goto out;
}
+ if (!drvdata->paused)
+ etm4_disable_trace_unit(drvdata);
+
state = drvdata->save_state;
- state->trcprgctlr = etm4x_read32(csa, TRCPRGCTLR);
if (drvdata->nr_pe)
state->trcprocselr = etm4x_read32(csa, TRCPROCSELR);
state->trcconfigr = etm4x_read32(csa, TRCCONFIGR);
@@ -1645,23 +1889,25 @@ static int __etm4_cpu_save(struct etmv4_drvdata *drvdata)
state->trcccctlr = etm4x_read32(csa, TRCCCCTLR);
state->trcbbctlr = etm4x_read32(csa, TRCBBCTLR);
state->trctraceidr = etm4x_read32(csa, TRCTRACEIDR);
- state->trcqctlr = etm4x_read32(csa, TRCQCTLR);
+ if (drvdata->q_filt)
+ state->trcqctlr = etm4x_read32(csa, TRCQCTLR);
state->trcvictlr = etm4x_read32(csa, TRCVICTLR);
state->trcviiectlr = etm4x_read32(csa, TRCVIIECTLR);
state->trcvissctlr = etm4x_read32(csa, TRCVISSCTLR);
if (drvdata->nr_pe_cmp)
state->trcvipcssctlr = etm4x_read32(csa, TRCVIPCSSCTLR);
- state->trcvdctlr = etm4x_read32(csa, TRCVDCTLR);
- state->trcvdsacctlr = etm4x_read32(csa, TRCVDSACCTLR);
- state->trcvdarcctlr = etm4x_read32(csa, TRCVDARCCTLR);
for (i = 0; i < drvdata->nrseqstate - 1; i++)
state->trcseqevr[i] = etm4x_read32(csa, TRCSEQEVRn(i));
- state->trcseqrstevr = etm4x_read32(csa, TRCSEQRSTEVR);
- state->trcseqstr = etm4x_read32(csa, TRCSEQSTR);
- state->trcextinselr = etm4x_read32(csa, TRCEXTINSELR);
+ if (drvdata->nrseqstate) {
+ state->trcseqrstevr = etm4x_read32(csa, TRCSEQRSTEVR);
+ state->trcseqstr = etm4x_read32(csa, TRCSEQSTR);
+ }
+
+ if (drvdata->numextinsel)
+ state->trcextinselr = etm4x_read32(csa, TRCEXTINSELR);
for (i = 0; i < drvdata->nr_cntr; i++) {
state->trccntrldvr[i] = etm4x_read32(csa, TRCCNTRLDVRn(i));
@@ -1669,7 +1915,8 @@ static int __etm4_cpu_save(struct etmv4_drvdata *drvdata)
state->trccntvr[i] = etm4x_read32(csa, TRCCNTVRn(i));
}
- for (i = 0; i < drvdata->nr_resource * 2; i++)
+ /* Resource selector pair 0 is reserved */
+ for (i = 2; i < drvdata->nr_resource * 2; i++)
state->trcrsctlr[i] = etm4x_read32(csa, TRCRSCTLRn(i));
for (i = 0; i < drvdata->nr_ss_cmp; i++) {
@@ -1711,7 +1958,7 @@ static int __etm4_cpu_save(struct etmv4_drvdata *drvdata)
state->trcpdcr = etm4x_read32(csa, TRCPDCR);
/* wait for TRCSTATR.IDLE to go up */
- if (coresight_timeout(csa, TRCSTATR, TRCSTATR_IDLE_BIT, 1)) {
+ if (etm4x_wait_status(csa, TRCSTATR_IDLE_BIT, 1)) {
dev_err(etm_dev,
"timeout while waiting for Idle Trace Status\n");
etm4_os_unlock(drvdata);
@@ -1719,8 +1966,6 @@ static int __etm4_cpu_save(struct etmv4_drvdata *drvdata)
goto out;
}
- drvdata->state_needs_restore = true;
-
/*
* Power can be removed from the trace unit now. We do this to
* potentially save power on systems that respect the TRCPDCR_PU
@@ -1738,14 +1983,14 @@ static int etm4_cpu_save(struct etmv4_drvdata *drvdata)
{
int ret = 0;
- /* Save the TRFCR irrespective of whether the ETM is ON */
- if (drvdata->trfcr)
- drvdata->save_trfcr = read_trfcr();
+ if (pm_save_enable != PARAM_PM_SAVE_SELF_HOSTED)
+ return 0;
+
/*
* Save and restore the ETM Trace registers only if
* the ETM is active.
*/
- if (local_read(&drvdata->mode) && drvdata->save_state)
+ if (coresight_get_mode(drvdata->csdev))
ret = __etm4_cpu_save(drvdata);
return ret;
}
@@ -1754,13 +1999,14 @@ static void __etm4_cpu_restore(struct etmv4_drvdata *drvdata)
{
int i;
struct etmv4_save_state *state = drvdata->save_state;
- struct csdev_access tmp_csa = CSDEV_ACCESS_IOMEM(drvdata->base);
- struct csdev_access *csa = &tmp_csa;
+ struct csdev_access *csa = &drvdata->csdev->access;
+
+ if (WARN_ON(!drvdata->csdev))
+ return;
etm4_cs_unlock(drvdata, csa);
etm4x_relaxed_write32(csa, state->trcclaimset, TRCCLAIMSET);
- etm4x_relaxed_write32(csa, state->trcprgctlr, TRCPRGCTLR);
if (drvdata->nr_pe)
etm4x_relaxed_write32(csa, state->trcprocselr, TRCPROCSELR);
etm4x_relaxed_write32(csa, state->trcconfigr, TRCCONFIGR);
@@ -1774,23 +2020,24 @@ static void __etm4_cpu_restore(struct etmv4_drvdata *drvdata)
etm4x_relaxed_write32(csa, state->trcccctlr, TRCCCCTLR);
etm4x_relaxed_write32(csa, state->trcbbctlr, TRCBBCTLR);
etm4x_relaxed_write32(csa, state->trctraceidr, TRCTRACEIDR);
- etm4x_relaxed_write32(csa, state->trcqctlr, TRCQCTLR);
+ if (drvdata->q_filt)
+ etm4x_relaxed_write32(csa, state->trcqctlr, TRCQCTLR);
etm4x_relaxed_write32(csa, state->trcvictlr, TRCVICTLR);
etm4x_relaxed_write32(csa, state->trcviiectlr, TRCVIIECTLR);
etm4x_relaxed_write32(csa, state->trcvissctlr, TRCVISSCTLR);
if (drvdata->nr_pe_cmp)
etm4x_relaxed_write32(csa, state->trcvipcssctlr, TRCVIPCSSCTLR);
- etm4x_relaxed_write32(csa, state->trcvdctlr, TRCVDCTLR);
- etm4x_relaxed_write32(csa, state->trcvdsacctlr, TRCVDSACCTLR);
- etm4x_relaxed_write32(csa, state->trcvdarcctlr, TRCVDARCCTLR);
for (i = 0; i < drvdata->nrseqstate - 1; i++)
etm4x_relaxed_write32(csa, state->trcseqevr[i], TRCSEQEVRn(i));
- etm4x_relaxed_write32(csa, state->trcseqrstevr, TRCSEQRSTEVR);
- etm4x_relaxed_write32(csa, state->trcseqstr, TRCSEQSTR);
- etm4x_relaxed_write32(csa, state->trcextinselr, TRCEXTINSELR);
+ if (drvdata->nrseqstate) {
+ etm4x_relaxed_write32(csa, state->trcseqrstevr, TRCSEQRSTEVR);
+ etm4x_relaxed_write32(csa, state->trcseqstr, TRCSEQSTR);
+ }
+ if (drvdata->numextinsel)
+ etm4x_relaxed_write32(csa, state->trcextinselr, TRCEXTINSELR);
for (i = 0; i < drvdata->nr_cntr; i++) {
etm4x_relaxed_write32(csa, state->trccntrldvr[i], TRCCNTRLDVRn(i));
@@ -1798,7 +2045,8 @@ static void __etm4_cpu_restore(struct etmv4_drvdata *drvdata)
etm4x_relaxed_write32(csa, state->trccntvr[i], TRCCNTVRn(i));
}
- for (i = 0; i < drvdata->nr_resource * 2; i++)
+ /* Resource selector pair 0 is reserved */
+ for (i = 2; i < drvdata->nr_resource * 2; i++)
etm4x_relaxed_write32(csa, state->trcrsctlr[i], TRCRSCTLRn(i));
for (i = 0; i < drvdata->nr_ss_cmp; i++) {
@@ -1832,8 +2080,6 @@ static void __etm4_cpu_restore(struct etmv4_drvdata *drvdata)
if (!drvdata->skip_power_up)
etm4x_relaxed_write32(csa, state->trcpdcr, TRCPDCR);
- drvdata->state_needs_restore = false;
-
/*
* As recommended by section 4.3.7 ("Synchronization when using the
* memory-mapped interface") of ARM IHI 0064D
@@ -1843,14 +2089,19 @@ static void __etm4_cpu_restore(struct etmv4_drvdata *drvdata)
/* Unlock the OS lock to re-enable trace and external debug access */
etm4_os_unlock(drvdata);
+
+ if (!drvdata->paused)
+ etm4_enable_trace_unit(drvdata);
+
etm4_cs_lock(drvdata, csa);
}
static void etm4_cpu_restore(struct etmv4_drvdata *drvdata)
{
- if (drvdata->trfcr)
- write_trfcr(drvdata->save_trfcr);
- if (drvdata->state_needs_restore)
+ if (pm_save_enable != PARAM_PM_SAVE_SELF_HOSTED)
+ return;
+
+ if (coresight_get_mode(drvdata->csdev))
__etm4_cpu_restore(drvdata);
}
@@ -1932,57 +2183,24 @@ static void etm4_pm_clear(void)
}
}
-static int etm4_probe(struct device *dev, void __iomem *base, u32 etm_pid)
+static int etm4_add_coresight_dev(struct etm4_init_arg *init_arg)
{
int ret;
struct coresight_platform_data *pdata = NULL;
- struct etmv4_drvdata *drvdata;
+ struct device *dev = init_arg->dev;
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(dev);
struct coresight_desc desc = { 0 };
- struct etm4_init_arg init_arg = { 0 };
u8 major, minor;
char *type_name;
- drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
if (!drvdata)
- return -ENOMEM;
-
- dev_set_drvdata(dev, drvdata);
-
- if (pm_save_enable == PARAM_PM_SAVE_FIRMWARE)
- pm_save_enable = coresight_loses_context_with_cpu(dev) ?
- PARAM_PM_SAVE_SELF_HOSTED : PARAM_PM_SAVE_NEVER;
-
- if (pm_save_enable != PARAM_PM_SAVE_NEVER) {
- drvdata->save_state = devm_kmalloc(dev,
- sizeof(struct etmv4_save_state), GFP_KERNEL);
- if (!drvdata->save_state)
- return -ENOMEM;
- }
-
- drvdata->base = base;
-
- spin_lock_init(&drvdata->spinlock);
-
- drvdata->cpu = coresight_get_cpu(dev);
- if (drvdata->cpu < 0)
- return drvdata->cpu;
-
- init_arg.drvdata = drvdata;
- init_arg.csa = &desc.access;
- init_arg.pid = etm_pid;
+ return -EINVAL;
- if (smp_call_function_single(drvdata->cpu,
- etm4_init_arch_data, &init_arg, 1))
- dev_err(dev, "ETM arch init failed\n");
+ desc.access = *init_arg->csa;
if (!drvdata->arch)
return -EINVAL;
- /* TRCPDCR is not accessible with system instructions. */
- if (!desc.access.io_mem ||
- fwnode_property_present(dev_fwnode(dev), "qcom,skip-power-up"))
- drvdata->skip_power_up = true;
-
major = ETM_ARCH_MAJOR_VERSION(drvdata->arch);
minor = ETM_ARCH_MINOR_VERSION(drvdata->arch);
@@ -1999,7 +2217,6 @@ static int etm4_probe(struct device *dev, void __iomem *base, u32 etm_pid)
if (!desc.name)
return -ENOMEM;
- etm4_init_trace_id(drvdata);
etm4_set_default(&drvdata->config);
pdata = coresight_get_platform_data(dev);
@@ -2037,15 +2254,77 @@ static int etm4_probe(struct device *dev, void __iomem *base, u32 etm_pid)
drvdata->cpu, type_name, major, minor);
if (boot_enable) {
- coresight_enable(drvdata->csdev);
+ coresight_enable_sysfs(drvdata->csdev);
drvdata->boot_enable = true;
}
return 0;
}
+static int etm4_probe(struct device *dev)
+{
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(dev);
+ struct csdev_access access = { 0 };
+ struct etm4_init_arg init_arg = { 0 };
+ struct etm4_init_arg *delayed;
+ int ret;
+
+ if (WARN_ON(!drvdata))
+ return -ENOMEM;
+
+ ret = coresight_get_enable_clocks(dev, &drvdata->pclk, &drvdata->atclk);
+ if (ret)
+ return ret;
+
+ if (pm_save_enable == PARAM_PM_SAVE_FIRMWARE)
+ pm_save_enable = coresight_loses_context_with_cpu(dev) ?
+ PARAM_PM_SAVE_SELF_HOSTED : PARAM_PM_SAVE_NEVER;
+
+ if (pm_save_enable != PARAM_PM_SAVE_NEVER) {
+ drvdata->save_state = devm_kmalloc(dev,
+ sizeof(struct etmv4_save_state), GFP_KERNEL);
+ if (!drvdata->save_state)
+ return -ENOMEM;
+ }
+
+ raw_spin_lock_init(&drvdata->spinlock);
+
+ drvdata->cpu = coresight_get_cpu(dev);
+ if (drvdata->cpu < 0)
+ return drvdata->cpu;
+
+ init_arg.dev = dev;
+ init_arg.csa = &access;
+
+ /*
+ * Serialize against CPUHP callbacks to avoid race condition
+ * between the smp call and saving the delayed probe.
+ */
+ cpus_read_lock();
+ if (smp_call_function_single(drvdata->cpu,
+ etm4_init_arch_data, &init_arg, 1)) {
+ /* The CPU was offline, try again once it comes online. */
+ delayed = devm_kmalloc(dev, sizeof(*delayed), GFP_KERNEL);
+ if (!delayed) {
+ cpus_read_unlock();
+ return -ENOMEM;
+ }
+
+ *delayed = init_arg;
+
+ per_cpu(delayed_probe, drvdata->cpu) = delayed;
+
+ cpus_read_unlock();
+ return 0;
+ }
+ cpus_read_unlock();
+
+ return etm4_add_coresight_dev(&init_arg);
+}
+
static int etm4_probe_amba(struct amba_device *adev, const struct amba_id *id)
{
+ struct etmv4_drvdata *drvdata;
void __iomem *base;
struct device *dev = &adev->dev;
struct resource *res = &adev->res;
@@ -2056,7 +2335,13 @@ static int etm4_probe_amba(struct amba_device *adev, const struct amba_id *id)
if (IS_ERR(base))
return PTR_ERR(base);
- ret = etm4_probe(dev, base, id->id);
+ drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
+ if (!drvdata)
+ return -ENOMEM;
+
+ drvdata->base = base;
+ dev_set_drvdata(dev, drvdata);
+ ret = etm4_probe(dev);
if (!ret)
pm_runtime_put(&adev->dev);
@@ -2065,29 +2350,69 @@ static int etm4_probe_amba(struct amba_device *adev, const struct amba_id *id)
static int etm4_probe_platform_dev(struct platform_device *pdev)
{
+ struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ struct etmv4_drvdata *drvdata;
int ret;
+ drvdata = devm_kzalloc(&pdev->dev, sizeof(*drvdata), GFP_KERNEL);
+ if (!drvdata)
+ return -ENOMEM;
+
+ if (res) {
+ drvdata->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(drvdata->base))
+ return PTR_ERR(drvdata->base);
+ }
+
+ dev_set_drvdata(&pdev->dev, drvdata);
pm_runtime_get_noresume(&pdev->dev);
pm_runtime_set_active(&pdev->dev);
pm_runtime_enable(&pdev->dev);
- /*
- * System register based devices could match the
- * HW by reading appropriate registers on the HW
- * and thus we could skip the PID.
- */
- ret = etm4_probe(&pdev->dev, NULL, 0);
+ ret = etm4_probe(&pdev->dev);
pm_runtime_put(&pdev->dev);
+ if (ret)
+ pm_runtime_disable(&pdev->dev);
+
return ret;
}
+static int etm4_probe_cpu(unsigned int cpu)
+{
+ int ret;
+ struct etm4_init_arg init_arg;
+ struct csdev_access access = { 0 };
+ struct etm4_init_arg *iap = *this_cpu_ptr(&delayed_probe);
+
+ if (!iap)
+ return 0;
+
+ init_arg = *iap;
+ devm_kfree(init_arg.dev, iap);
+ *this_cpu_ptr(&delayed_probe) = NULL;
+
+ ret = pm_runtime_resume_and_get(init_arg.dev);
+ if (ret < 0) {
+ dev_err(init_arg.dev, "Failed to get PM runtime!\n");
+ return 0;
+ }
+
+ init_arg.csa = &access;
+ etm4_init_arch_data(&init_arg);
+
+ etm4_add_coresight_dev(&init_arg);
+
+ pm_runtime_put(init_arg.dev);
+ return 0;
+}
+
static struct amba_cs_uci_id uci_id_etm4[] = {
{
/* ETMv4 UCI data */
.devarch = ETM_DEVARCH_ETMv4x_ARCH,
.devarch_mask = ETM_DEVARCH_ID_MASK,
- .devtype = 0x00000013,
+ .devtype = CS_DEVTYPE_PE_TRACE,
}
};
@@ -2096,16 +2421,20 @@ static void clear_etmdrvdata(void *info)
int cpu = *(int *)info;
etmdrvdata[cpu] = NULL;
+ per_cpu(delayed_probe, cpu) = NULL;
}
-static int __exit etm4_remove_dev(struct etmv4_drvdata *drvdata)
+static void etm4_remove_dev(struct etmv4_drvdata *drvdata)
{
- etm_perf_symlink(drvdata->csdev, false);
+ bool had_delayed_probe;
/*
* Taking hotplug lock here to avoid racing between etm4_remove_dev()
* and CPU hotplug call backs.
*/
cpus_read_lock();
+
+ had_delayed_probe = per_cpu(delayed_probe, drvdata->cpu);
+
/*
* The readers for etmdrvdata[] are CPU hotplug call backs
* and PM notification call backs. Change etmdrvdata[i] on
@@ -2113,17 +2442,18 @@ static int __exit etm4_remove_dev(struct etmv4_drvdata *drvdata)
* inside one call back function.
*/
if (smp_call_function_single(drvdata->cpu, clear_etmdrvdata, &drvdata->cpu, 1))
- etmdrvdata[drvdata->cpu] = NULL;
+ clear_etmdrvdata(&drvdata->cpu);
cpus_read_unlock();
- cscfg_unregister_csdev(drvdata->csdev);
- coresight_unregister(drvdata->csdev);
-
- return 0;
+ if (!had_delayed_probe) {
+ etm_perf_symlink(drvdata->csdev, false);
+ cscfg_unregister_csdev(drvdata->csdev);
+ coresight_unregister(drvdata->csdev);
+ }
}
-static void __exit etm4_remove_amba(struct amba_device *adev)
+static void etm4_remove_amba(struct amba_device *adev)
{
struct etmv4_drvdata *drvdata = dev_get_drvdata(&adev->dev);
@@ -2131,15 +2461,13 @@ static void __exit etm4_remove_amba(struct amba_device *adev)
etm4_remove_dev(drvdata);
}
-static int __exit etm4_remove_platform_dev(struct platform_device *pdev)
+static void etm4_remove_platform_dev(struct platform_device *pdev)
{
- int ret = 0;
struct etmv4_drvdata *drvdata = dev_get_drvdata(&pdev->dev);
if (drvdata)
- ret = etm4_remove_dev(drvdata);
+ etm4_remove_dev(drvdata);
pm_runtime_disable(&pdev->dev);
- return ret;
}
static const struct amba_id etm4_ids[] = {
@@ -2162,6 +2490,11 @@ static const struct amba_id etm4_ids[] = {
CS_AMBA_UCI_ID(0x000cc0af, uci_id_etm4),/* Marvell ThunderX2 */
CS_AMBA_UCI_ID(0x000b6d01, uci_id_etm4),/* HiSilicon-Hip08 */
CS_AMBA_UCI_ID(0x000b6d02, uci_id_etm4),/* HiSilicon-Hip09 */
+ /*
+ * Match all PIDs with ETM4 DEVARCH. No need for adding any of the new
+ * CPUs to the list here.
+ */
+ CS_AMBA_MATCH_ALL_UCI(uci_id_etm4),
{},
};
@@ -2170,7 +2503,6 @@ MODULE_DEVICE_TABLE(amba, etm4_ids);
static struct amba_driver etm4x_amba_driver = {
.drv = {
.name = "coresight-etm4x",
- .owner = THIS_MODULE,
.suppress_bind_attrs = true,
},
.probe = etm4_probe_amba,
@@ -2178,19 +2510,61 @@ static struct amba_driver etm4x_amba_driver = {
.id_table = etm4_ids,
};
+#ifdef CONFIG_PM
+static int etm4_runtime_suspend(struct device *dev)
+{
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(dev);
+
+ clk_disable_unprepare(drvdata->atclk);
+ clk_disable_unprepare(drvdata->pclk);
+
+ return 0;
+}
+
+static int etm4_runtime_resume(struct device *dev)
+{
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(dev);
+ int ret;
+
+ ret = clk_prepare_enable(drvdata->pclk);
+ if (ret)
+ return ret;
+
+ ret = clk_prepare_enable(drvdata->atclk);
+ if (ret)
+ clk_disable_unprepare(drvdata->pclk);
+
+ return ret;
+}
+#endif
+
+static const struct dev_pm_ops etm4_dev_pm_ops = {
+ SET_RUNTIME_PM_OPS(etm4_runtime_suspend, etm4_runtime_resume, NULL)
+};
+
static const struct of_device_id etm4_sysreg_match[] = {
{ .compatible = "arm,coresight-etm4x-sysreg" },
{ .compatible = "arm,embedded-trace-extension" },
{}
};
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id etm4x_acpi_ids[] = {
+ {"ARMHC500", 0, 0, 0}, /* ARM CoreSight ETM4x */
+ {}
+};
+MODULE_DEVICE_TABLE(acpi, etm4x_acpi_ids);
+#endif
+
static struct platform_driver etm4_platform_driver = {
.probe = etm4_probe_platform_dev,
.remove = etm4_remove_platform_dev,
.driver = {
.name = "coresight-etm4x",
.of_match_table = etm4_sysreg_match,
+ .acpi_match_table = ACPI_PTR(etm4x_acpi_ids),
.suppress_bind_attrs = true,
+ .pm = &etm4_dev_pm_ops,
},
};