summaryrefslogtreecommitdiff
path: root/drivers/perf
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2023-02-21 15:27:48 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2023-02-21 15:27:48 -0800
commit8bf1a529cd664c8e5268381f1e24fe67aa611dd3 (patch)
treec5cec84941923778e4e2ec5a5d65e2fc8ea71b58 /drivers/perf
parentb327dfe05258e09c8db6e1e091c2e6d84dd426a6 (diff)
parentd54170812ef1c80e0fa3ed3e554a0bbfc2920d9d (diff)
Merge tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux
Pull arm64 updates from Catalin Marinas: - Support for arm64 SME 2 and 2.1. SME2 introduces a new 512-bit architectural register (ZT0, for the look-up table feature) that Linux needs to save/restore - Include TPIDR2 in the signal context and add the corresponding kselftests - Perf updates: Arm SPEv1.2 support, HiSilicon uncore PMU updates, ACPI support to the Marvell DDR and TAD PMU drivers, reset DTM_PMU_CONFIG (ARM CMN) at probe time - Support for DYNAMIC_FTRACE_WITH_CALL_OPS on arm64 - Permit EFI boot with MMU and caches on. Instead of cleaning the entire loaded kernel image to the PoC and disabling the MMU and caches before branching to the kernel bare metal entry point, leave the MMU and caches enabled and rely on EFI's cacheable 1:1 mapping of all of system RAM to populate the initial page tables - Expose the AArch32 (compat) ELF_HWCAP features to user in an arm64 kernel (the arm32 kernel only defines the values) - Harden the arm64 shadow call stack pointer handling: stash the shadow stack pointer in the task struct on interrupt, load it directly from this structure - Signal handling cleanups to remove redundant validation of size information and avoid reading the same data from userspace twice - Refactor the hwcap macros to make use of the automatically generated ID registers. It should make new hwcaps writing less error prone - Further arm64 sysreg conversion and some fixes - arm64 kselftest fixes and improvements - Pointer authentication cleanups: don't sign leaf functions, unify asm-arch manipulation - Pseudo-NMI code generation optimisations - Minor fixes for SME and TPIDR2 handling - Miscellaneous updates: ARCH_FORCE_MAX_ORDER is now selectable, replace strtobool() to kstrtobool() in the cpufeature.c code, apply dynamic shadow call stack in two passes, intercept pfn changes in set_pte_at() without the required break-before-make sequence, attempt to dump all instructions on unhandled kernel faults * tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux: (130 commits) arm64: fix .idmap.text assertion for large kernels kselftest/arm64: Don't require FA64 for streaming SVE+ZA tests kselftest/arm64: Copy whole EXTRA context arm64: kprobes: Drop ID map text from kprobes blacklist perf: arm_spe: Print the version of SPE detected perf: arm_spe: Add support for SPEv1.2 inverted event filtering perf: Add perf_event_attr::config3 arm64/sme: Fix __finalise_el2 SMEver check drivers/perf: fsl_imx8_ddr_perf: Remove set-but-not-used variable arm64/signal: Only read new data when parsing the ZT context arm64/signal: Only read new data when parsing the ZA context arm64/signal: Only read new data when parsing the SVE context arm64/signal: Avoid rereading context frame sizes arm64/signal: Make interface for restore_fpsimd_context() consistent arm64/signal: Remove redundant size validation from parse_user_sigframe() arm64/signal: Don't redundantly verify FPSIMD magic arm64/cpufeature: Use helper macros to specify hwcaps arm64/cpufeature: Always use symbolic name for feature value in hwcaps arm64/sysreg: Initial unsigned annotations for ID registers arm64/sysreg: Initial annotation of signed ID registers ...
Diffstat (limited to 'drivers/perf')
-rw-r--r--drivers/perf/arm-cmn.c1
-rw-r--r--drivers/perf/arm_spe_pmu.c160
-rw-r--r--drivers/perf/fsl_imx8_ddr_perf.c3
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_cpa_pmu.c16
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c2
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_hha_pmu.c2
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c2
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_pa_pmu.c2
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_pmu.c9
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_pmu.h4
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_sllc_pmu.c2
-rw-r--r--drivers/perf/marvell_cn10k_ddr_pmu.c10
-rw-r--r--drivers/perf/marvell_cn10k_tad_pmu.c22
13 files changed, 143 insertions, 92 deletions
diff --git a/drivers/perf/arm-cmn.c b/drivers/perf/arm-cmn.c
index 1deb61b22bc7..c9689861be3f 100644
--- a/drivers/perf/arm-cmn.c
+++ b/drivers/perf/arm-cmn.c
@@ -1870,6 +1870,7 @@ static void arm_cmn_init_dtm(struct arm_cmn_dtm *dtm, struct arm_cmn_node *xp, i
dtm->base = xp->pmu_base + CMN_DTM_OFFSET(idx);
dtm->pmu_config_low = CMN_DTM_PMU_CONFIG_PMU_EN;
+ writeq_relaxed(dtm->pmu_config_low, dtm->base + CMN_DTM_PMU_CONFIG);
for (i = 0; i < 4; i++) {
dtm->wp_event[i] = -1;
writeq_relaxed(0, dtm->base + CMN_DTM_WPn_MASK(i));
diff --git a/drivers/perf/arm_spe_pmu.c b/drivers/perf/arm_spe_pmu.c
index 00e3a637f7b6..b9ba4c4fe5a2 100644
--- a/drivers/perf/arm_spe_pmu.c
+++ b/drivers/perf/arm_spe_pmu.c
@@ -12,6 +12,7 @@
#define DRVNAME PMUNAME "_pmu"
#define pr_fmt(fmt) DRVNAME ": " fmt
+#include <linux/bitfield.h>
#include <linux/bitops.h>
#include <linux/bug.h>
#include <linux/capability.h>
@@ -84,6 +85,7 @@ struct arm_spe_pmu {
#define SPE_PMU_FEAT_ARCH_INST (1UL << 3)
#define SPE_PMU_FEAT_LDS (1UL << 4)
#define SPE_PMU_FEAT_ERND (1UL << 5)
+#define SPE_PMU_FEAT_INV_FILT_EVT (1UL << 6)
#define SPE_PMU_FEAT_DEV_PROBED (1UL << 63)
u64 features;
@@ -201,6 +203,10 @@ static const struct attribute_group arm_spe_pmu_cap_group = {
#define ATTR_CFG_FLD_min_latency_LO 0
#define ATTR_CFG_FLD_min_latency_HI 11
+#define ATTR_CFG_FLD_inv_event_filter_CFG config3 /* PMSNEVFR_EL1 */
+#define ATTR_CFG_FLD_inv_event_filter_LO 0
+#define ATTR_CFG_FLD_inv_event_filter_HI 63
+
/* Why does everything I do descend into this? */
#define __GEN_PMU_FORMAT_ATTR(cfg, lo, hi) \
(lo) == (hi) ? #cfg ":" #lo "\n" : #cfg ":" #lo "-" #hi
@@ -231,6 +237,7 @@ GEN_PMU_FORMAT_ATTR(branch_filter);
GEN_PMU_FORMAT_ATTR(load_filter);
GEN_PMU_FORMAT_ATTR(store_filter);
GEN_PMU_FORMAT_ATTR(event_filter);
+GEN_PMU_FORMAT_ATTR(inv_event_filter);
GEN_PMU_FORMAT_ATTR(min_latency);
static struct attribute *arm_spe_pmu_formats_attr[] = {
@@ -242,12 +249,27 @@ static struct attribute *arm_spe_pmu_formats_attr[] = {
&format_attr_load_filter.attr,
&format_attr_store_filter.attr,
&format_attr_event_filter.attr,
+ &format_attr_inv_event_filter.attr,
&format_attr_min_latency.attr,
NULL,
};
+static umode_t arm_spe_pmu_format_attr_is_visible(struct kobject *kobj,
+ struct attribute *attr,
+ int unused)
+ {
+ struct device *dev = kobj_to_dev(kobj);
+ struct arm_spe_pmu *spe_pmu = dev_get_drvdata(dev);
+
+ if (attr == &format_attr_inv_event_filter.attr && !(spe_pmu->features & SPE_PMU_FEAT_INV_FILT_EVT))
+ return 0;
+
+ return attr->mode;
+}
+
static const struct attribute_group arm_spe_pmu_format_group = {
.name = "format",
+ .is_visible = arm_spe_pmu_format_attr_is_visible,
.attrs = arm_spe_pmu_formats_attr,
};
@@ -282,18 +304,18 @@ static u64 arm_spe_event_to_pmscr(struct perf_event *event)
struct perf_event_attr *attr = &event->attr;
u64 reg = 0;
- reg |= ATTR_CFG_GET_FLD(attr, ts_enable) << SYS_PMSCR_EL1_TS_SHIFT;
- reg |= ATTR_CFG_GET_FLD(attr, pa_enable) << SYS_PMSCR_EL1_PA_SHIFT;
- reg |= ATTR_CFG_GET_FLD(attr, pct_enable) << SYS_PMSCR_EL1_PCT_SHIFT;
+ reg |= FIELD_PREP(PMSCR_EL1_TS, ATTR_CFG_GET_FLD(attr, ts_enable));
+ reg |= FIELD_PREP(PMSCR_EL1_PA, ATTR_CFG_GET_FLD(attr, pa_enable));
+ reg |= FIELD_PREP(PMSCR_EL1_PCT, ATTR_CFG_GET_FLD(attr, pct_enable));
if (!attr->exclude_user)
- reg |= BIT(SYS_PMSCR_EL1_E0SPE_SHIFT);
+ reg |= PMSCR_EL1_E0SPE;
if (!attr->exclude_kernel)
- reg |= BIT(SYS_PMSCR_EL1_E1SPE_SHIFT);
+ reg |= PMSCR_EL1_E1SPE;
if (get_spe_event_has_cx(event))
- reg |= BIT(SYS_PMSCR_EL1_CX_SHIFT);
+ reg |= PMSCR_EL1_CX;
return reg;
}
@@ -302,8 +324,7 @@ static void arm_spe_event_sanitise_period(struct perf_event *event)
{
struct arm_spe_pmu *spe_pmu = to_spe_pmu(event->pmu);
u64 period = event->hw.sample_period;
- u64 max_period = SYS_PMSIRR_EL1_INTERVAL_MASK
- << SYS_PMSIRR_EL1_INTERVAL_SHIFT;
+ u64 max_period = PMSIRR_EL1_INTERVAL_MASK;
if (period < spe_pmu->min_period)
period = spe_pmu->min_period;
@@ -322,7 +343,7 @@ static u64 arm_spe_event_to_pmsirr(struct perf_event *event)
arm_spe_event_sanitise_period(event);
- reg |= ATTR_CFG_GET_FLD(attr, jitter) << SYS_PMSIRR_EL1_RND_SHIFT;
+ reg |= FIELD_PREP(PMSIRR_EL1_RND, ATTR_CFG_GET_FLD(attr, jitter));
reg |= event->hw.sample_period;
return reg;
@@ -333,18 +354,21 @@ static u64 arm_spe_event_to_pmsfcr(struct perf_event *event)
struct perf_event_attr *attr = &event->attr;
u64 reg = 0;
- reg |= ATTR_CFG_GET_FLD(attr, load_filter) << SYS_PMSFCR_EL1_LD_SHIFT;
- reg |= ATTR_CFG_GET_FLD(attr, store_filter) << SYS_PMSFCR_EL1_ST_SHIFT;
- reg |= ATTR_CFG_GET_FLD(attr, branch_filter) << SYS_PMSFCR_EL1_B_SHIFT;
+ reg |= FIELD_PREP(PMSFCR_EL1_LD, ATTR_CFG_GET_FLD(attr, load_filter));
+ reg |= FIELD_PREP(PMSFCR_EL1_ST, ATTR_CFG_GET_FLD(attr, store_filter));
+ reg |= FIELD_PREP(PMSFCR_EL1_B, ATTR_CFG_GET_FLD(attr, branch_filter));
if (reg)
- reg |= BIT(SYS_PMSFCR_EL1_FT_SHIFT);
+ reg |= PMSFCR_EL1_FT;
if (ATTR_CFG_GET_FLD(attr, event_filter))
- reg |= BIT(SYS_PMSFCR_EL1_FE_SHIFT);
+ reg |= PMSFCR_EL1_FE;
+
+ if (ATTR_CFG_GET_FLD(attr, inv_event_filter))
+ reg |= PMSFCR_EL1_FnE;
if (ATTR_CFG_GET_FLD(attr, min_latency))
- reg |= BIT(SYS_PMSFCR_EL1_FL_SHIFT);
+ reg |= PMSFCR_EL1_FL;
return reg;
}
@@ -355,11 +379,16 @@ static u64 arm_spe_event_to_pmsevfr(struct perf_event *event)
return ATTR_CFG_GET_FLD(attr, event_filter);
}
+static u64 arm_spe_event_to_pmsnevfr(struct perf_event *event)
+{
+ struct perf_event_attr *attr = &event->attr;
+ return ATTR_CFG_GET_FLD(attr, inv_event_filter);
+}
+
static u64 arm_spe_event_to_pmslatfr(struct perf_event *event)
{
struct perf_event_attr *attr = &event->attr;
- return ATTR_CFG_GET_FLD(attr, min_latency)
- << SYS_PMSLATFR_EL1_MINLAT_SHIFT;
+ return FIELD_PREP(PMSLATFR_EL1_MINLAT, ATTR_CFG_GET_FLD(attr, min_latency));
}
static void arm_spe_pmu_pad_buf(struct perf_output_handle *handle, int len)
@@ -511,7 +540,7 @@ static void arm_spe_perf_aux_output_begin(struct perf_output_handle *handle,
limit = buf->snapshot ? arm_spe_pmu_next_snapshot_off(handle)
: arm_spe_pmu_next_off(handle);
if (limit)
- limit |= BIT(SYS_PMBLIMITR_EL1_E_SHIFT);
+ limit |= PMBLIMITR_EL1_E;
limit += (u64)buf->base;
base = (u64)buf->base + PERF_IDX2OFF(handle->head, buf);
@@ -570,28 +599,28 @@ arm_spe_pmu_buf_get_fault_act(struct perf_output_handle *handle)
/* Service required? */
pmbsr = read_sysreg_s(SYS_PMBSR_EL1);
- if (!(pmbsr & BIT(SYS_PMBSR_EL1_S_SHIFT)))
+ if (!FIELD_GET(PMBSR_EL1_S, pmbsr))
return SPE_PMU_BUF_FAULT_ACT_SPURIOUS;
/*
* If we've lost data, disable profiling and also set the PARTIAL
* flag to indicate that the last record is corrupted.
*/
- if (pmbsr & BIT(SYS_PMBSR_EL1_DL_SHIFT))
+ if (FIELD_GET(PMBSR_EL1_DL, pmbsr))
perf_aux_output_flag(handle, PERF_AUX_FLAG_TRUNCATED |
PERF_AUX_FLAG_PARTIAL);
/* Report collisions to userspace so that it can up the period */
- if (pmbsr & BIT(SYS_PMBSR_EL1_COLL_SHIFT))
+ if (FIELD_GET(PMBSR_EL1_COLL, pmbsr))
perf_aux_output_flag(handle, PERF_AUX_FLAG_COLLISION);
/* We only expect buffer management events */
- switch (pmbsr & (SYS_PMBSR_EL1_EC_MASK << SYS_PMBSR_EL1_EC_SHIFT)) {
- case SYS_PMBSR_EL1_EC_BUF:
+ switch (FIELD_GET(PMBSR_EL1_EC, pmbsr)) {
+ case PMBSR_EL1_EC_BUF:
/* Handled below */
break;
- case SYS_PMBSR_EL1_EC_FAULT_S1:
- case SYS_PMBSR_EL1_EC_FAULT_S2:
+ case PMBSR_EL1_EC_FAULT_S1:
+ case PMBSR_EL1_EC_FAULT_S2:
err_str = "Unexpected buffer fault";
goto out_err;
default:
@@ -600,9 +629,8 @@ arm_spe_pmu_buf_get_fault_act(struct perf_output_handle *handle)
}
/* Buffer management event */
- switch (pmbsr &
- (SYS_PMBSR_EL1_BUF_BSC_MASK << SYS_PMBSR_EL1_BUF_BSC_SHIFT)) {
- case SYS_PMBSR_EL1_BUF_BSC_FULL:
+ switch (FIELD_GET(PMBSR_EL1_BUF_BSC_MASK, pmbsr)) {
+ case PMBSR_EL1_BUF_BSC_FULL:
ret = SPE_PMU_BUF_FAULT_ACT_OK;
goto out_stop;
default:
@@ -677,11 +705,13 @@ static u64 arm_spe_pmsevfr_res0(u16 pmsver)
{
switch (pmsver) {
case ID_AA64DFR0_EL1_PMSVer_IMP:
- return SYS_PMSEVFR_EL1_RES0_8_2;
+ return PMSEVFR_EL1_RES0_IMP;
case ID_AA64DFR0_EL1_PMSVer_V1P1:
+ return PMSEVFR_EL1_RES0_V1P1;
+ case ID_AA64DFR0_EL1_PMSVer_V1P2:
/* Return the highest version we support in default */
default:
- return SYS_PMSEVFR_EL1_RES0_8_3;
+ return PMSEVFR_EL1_RES0_V1P2;
}
}
@@ -703,6 +733,9 @@ static int arm_spe_pmu_event_init(struct perf_event *event)
if (arm_spe_event_to_pmsevfr(event) & arm_spe_pmsevfr_res0(spe_pmu->pmsver))
return -EOPNOTSUPP;
+ if (arm_spe_event_to_pmsnevfr(event) & arm_spe_pmsevfr_res0(spe_pmu->pmsver))
+ return -EOPNOTSUPP;
+
if (attr->exclude_idle)
return -EOPNOTSUPP;
@@ -717,23 +750,26 @@ static int arm_spe_pmu_event_init(struct perf_event *event)
return -EINVAL;
reg = arm_spe_event_to_pmsfcr(event);
- if ((reg & BIT(SYS_PMSFCR_EL1_FE_SHIFT)) &&
+ if ((FIELD_GET(PMSFCR_EL1_FE, reg)) &&
!(spe_pmu->features & SPE_PMU_FEAT_FILT_EVT))
return -EOPNOTSUPP;
- if ((reg & BIT(SYS_PMSFCR_EL1_FT_SHIFT)) &&
+ if ((FIELD_GET(PMSFCR_EL1_FnE, reg)) &&
+ !(spe_pmu->features & SPE_PMU_FEAT_INV_FILT_EVT))
+ return -EOPNOTSUPP;
+
+ if ((FIELD_GET(PMSFCR_EL1_FT, reg)) &&
!(spe_pmu->features & SPE_PMU_FEAT_FILT_TYP))
return -EOPNOTSUPP;
- if ((reg & BIT(SYS_PMSFCR_EL1_FL_SHIFT)) &&
+ if ((FIELD_GET(PMSFCR_EL1_FL, reg)) &&
!(spe_pmu->features & SPE_PMU_FEAT_FILT_LAT))
return -EOPNOTSUPP;
set_spe_event_has_cx(event);
reg = arm_spe_event_to_pmscr(event);
if (!perfmon_capable() &&
- (reg & (BIT(SYS_PMSCR_EL1_PA_SHIFT) |
- BIT(SYS_PMSCR_EL1_PCT_SHIFT))))
+ (reg & (PMSCR_EL1_PA | PMSCR_EL1_PCT)))
return -EACCES;
return 0;
@@ -757,6 +793,11 @@ static void arm_spe_pmu_start(struct perf_event *event, int flags)
reg = arm_spe_event_to_pmsevfr(event);
write_sysreg_s(reg, SYS_PMSEVFR_EL1);
+ if (spe_pmu->features & SPE_PMU_FEAT_INV_FILT_EVT) {
+ reg = arm_spe_event_to_pmsnevfr(event);
+ write_sysreg_s(reg, SYS_PMSNEVFR_EL1);
+ }
+
reg = arm_spe_event_to_pmslatfr(event);
write_sysreg_s(reg, SYS_PMSLATFR_EL1);
@@ -971,14 +1012,14 @@ static void __arm_spe_pmu_dev_probe(void *info)
/* Read PMBIDR first to determine whether or not we have access */
reg = read_sysreg_s(SYS_PMBIDR_EL1);
- if (reg & BIT(SYS_PMBIDR_EL1_P_SHIFT)) {
+ if (FIELD_GET(PMBIDR_EL1_P, reg)) {
dev_err(dev,
"profiling buffer owned by higher exception level\n");
return;
}
/* Minimum alignment. If it's out-of-range, then fail the probe */
- fld = reg >> SYS_PMBIDR_EL1_ALIGN_SHIFT & SYS_PMBIDR_EL1_ALIGN_MASK;
+ fld = FIELD_GET(PMBIDR_EL1_ALIGN, reg);
spe_pmu->align = 1 << fld;
if (spe_pmu->align > SZ_2K) {
dev_err(dev, "unsupported PMBIDR.Align [%d] on CPU %d\n",
@@ -988,58 +1029,61 @@ static void __arm_spe_pmu_dev_probe(void *info)
/* It's now safe to read PMSIDR and figure out what we've got */
reg = read_sysreg_s(SYS_PMSIDR_EL1);
- if (reg & BIT(SYS_PMSIDR_EL1_FE_SHIFT))
+ if (FIELD_GET(PMSIDR_EL1_FE, reg))
spe_pmu->features |= SPE_PMU_FEAT_FILT_EVT;
- if (reg & BIT(SYS_PMSIDR_EL1_FT_SHIFT))
+ if (FIELD_GET(PMSIDR_EL1_FnE, reg))
+ spe_pmu->features |= SPE_PMU_FEAT_INV_FILT_EVT;
+
+ if (FIELD_GET(PMSIDR_EL1_FT, reg))
spe_pmu->features |= SPE_PMU_FEAT_FILT_TYP;
- if (reg & BIT(SYS_PMSIDR_EL1_FL_SHIFT))
+ if (FIELD_GET(PMSIDR_EL1_FL, reg))
spe_pmu->features |= SPE_PMU_FEAT_FILT_LAT;
- if (reg & BIT(SYS_PMSIDR_EL1_ARCHINST_SHIFT))
+ if (FIELD_GET(PMSIDR_EL1_ARCHINST, reg))
spe_pmu->features |= SPE_PMU_FEAT_ARCH_INST;
- if (reg & BIT(SYS_PMSIDR_EL1_LDS_SHIFT))
+ if (FIELD_GET(PMSIDR_EL1_LDS, reg))
spe_pmu->features |= SPE_PMU_FEAT_LDS;
- if (reg & BIT(SYS_PMSIDR_EL1_ERND_SHIFT))
+ if (FIELD_GET(PMSIDR_EL1_ERND, reg))
spe_pmu->features |= SPE_PMU_FEAT_ERND;
/* This field has a spaced out encoding, so just use a look-up */
- fld = reg >> SYS_PMSIDR_EL1_INTERVAL_SHIFT & SYS_PMSIDR_EL1_INTERVAL_MASK;
+ fld = FIELD_GET(PMSIDR_EL1_INTERVAL, reg);
switch (fld) {
- case 0:
+ case PMSIDR_EL1_INTERVAL_256:
spe_pmu->min_period = 256;
break;
- case 2:
+ case PMSIDR_EL1_INTERVAL_512:
spe_pmu->min_period = 512;
break;
- case 3:
+ case PMSIDR_EL1_INTERVAL_768:
spe_pmu->min_period = 768;
break;
- case 4:
+ case PMSIDR_EL1_INTERVAL_1024:
spe_pmu->min_period = 1024;
break;
- case 5:
+ case PMSIDR_EL1_INTERVAL_1536:
spe_pmu->min_period = 1536;
break;
- case 6:
+ case PMSIDR_EL1_INTERVAL_2048:
spe_pmu->min_period = 2048;
break;
- case 7:
+ case PMSIDR_EL1_INTERVAL_3072:
spe_pmu->min_period = 3072;
break;
default:
dev_warn(dev, "unknown PMSIDR_EL1.Interval [%d]; assuming 8\n",
fld);
fallthrough;
- case 8:
+ case PMSIDR_EL1_INTERVAL_4096:
spe_pmu->min_period = 4096;
}
/* Maximum record size. If it's out-of-range, then fail the probe */
- fld = reg >> SYS_PMSIDR_EL1_MAXSIZE_SHIFT & SYS_PMSIDR_EL1_MAXSIZE_MASK;
+ fld = FIELD_GET(PMSIDR_EL1_MAXSIZE, reg);
spe_pmu->max_record_sz = 1 << fld;
if (spe_pmu->max_record_sz > SZ_2K || spe_pmu->max_record_sz < 16) {
dev_err(dev, "unsupported PMSIDR_EL1.MaxSize [%d] on CPU %d\n",
@@ -1047,22 +1091,22 @@ static void __arm_spe_pmu_dev_probe(void *info)
return;
}
- fld = reg >> SYS_PMSIDR_EL1_COUNTSIZE_SHIFT & SYS_PMSIDR_EL1_COUNTSIZE_MASK;
+ fld = FIELD_GET(PMSIDR_EL1_COUNTSIZE, reg);
switch (fld) {
default:
dev_warn(dev, "unknown PMSIDR_EL1.CountSize [%d]; assuming 2\n",
fld);
fallthrough;
- case 2:
+ case PMSIDR_EL1_COUNTSIZE_12_BIT_SAT:
spe_pmu->counter_sz = 12;
break;
- case 3:
+ case PMSIDR_EL1_COUNTSIZE_16_BIT_SAT:
spe_pmu->counter_sz = 16;
}
dev_info(dev,
- "probed for CPUs %*pbl [max_record_sz %u, align %u, features 0x%llx]\n",
- cpumask_pr_args(&spe_pmu->supported_cpus),
+ "probed SPEv1.%d for CPUs %*pbl [max_record_sz %u, align %u, features 0x%llx]\n",
+ spe_pmu->pmsver - 1, cpumask_pr_args(&spe_pmu->supported_cpus),
spe_pmu->max_record_sz, spe_pmu->align, spe_pmu->features);
spe_pmu->features |= SPE_PMU_FEAT_DEV_PROBED;
diff --git a/drivers/perf/fsl_imx8_ddr_perf.c b/drivers/perf/fsl_imx8_ddr_perf.c
index 8e058e08fe81..5222ba1e79d0 100644
--- a/drivers/perf/fsl_imx8_ddr_perf.c
+++ b/drivers/perf/fsl_imx8_ddr_perf.c
@@ -97,7 +97,6 @@ struct ddr_pmu {
struct hlist_node node;
struct device *dev;
struct perf_event *events[NUM_COUNTERS];
- int active_events;
enum cpuhp_state cpuhp_state;
const struct fsl_ddr_devtype_data *devtype_data;
int irq;
@@ -530,7 +529,6 @@ static int ddr_perf_event_add(struct perf_event *event, int flags)
}
pmu->events[counter] = event;
- pmu->active_events++;
hwc->idx = counter;
hwc->state |= PERF_HES_STOPPED;
@@ -562,7 +560,6 @@ static void ddr_perf_event_del(struct perf_event *event, int flags)
ddr_perf_event_stop(event, PERF_EF_UPDATE);
ddr_perf_free_counter(pmu, counter);
- pmu->active_events--;
hwc->idx = -1;
}
diff --git a/drivers/perf/hisilicon/hisi_uncore_cpa_pmu.c b/drivers/perf/hisilicon/hisi_uncore_cpa_pmu.c
index a9bb73f76be4..4c67d57217a7 100644
--- a/drivers/perf/hisilicon/hisi_uncore_cpa_pmu.c
+++ b/drivers/perf/hisilicon/hisi_uncore_cpa_pmu.c
@@ -316,21 +316,7 @@ static int hisi_cpa_pmu_probe(struct platform_device *pdev)
if (!name)
return -ENOMEM;
- cpa_pmu->pmu = (struct pmu) {
- .name = name,
- .module = THIS_MODULE,
- .task_ctx_nr = perf_invalid_context,
- .event_init = hisi_uncore_pmu_event_init,
- .pmu_enable = hisi_uncore_pmu_enable,
- .pmu_disable = hisi_uncore_pmu_disable,
- .add = hisi_uncore_pmu_add,
- .del = hisi_uncore_pmu_del,
- .start = hisi_uncore_pmu_start,
- .stop = hisi_uncore_pmu_stop,
- .read = hisi_uncore_pmu_read,
- .attr_groups = cpa_pmu->pmu_events.attr_groups,
- .capabilities = PERF_PMU_CAP_NO_EXCLUDE,
- };
+ hisi_pmu_init(cpa_pmu, name, THIS_MODULE);
/* Power Management should be disabled before using CPA PMU. */
hisi_cpa_pmu_disable_pm(cpa_pmu);
diff --git a/drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c b/drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c
index 50d0c0a2f1fe..8c3ffcbfd4c0 100644
--- a/drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c
+++ b/drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c
@@ -516,7 +516,7 @@ static int hisi_ddrc_pmu_probe(struct platform_device *pdev)
"hisi_sccl%u_ddrc%u", ddrc_pmu->sccl_id,
ddrc_pmu->index_id);
- hisi_pmu_init(&ddrc_pmu->pmu, name, ddrc_pmu->pmu_events.attr_groups, THIS_MODULE);
+ hisi_pmu_init(ddrc_pmu, name, THIS_MODULE);
ret = perf_pmu_register(&ddrc_pmu->pmu, name, -1);
if (ret) {
diff --git a/drivers/perf/hisilicon/hisi_uncore_hha_pmu.c b/drivers/perf/hisilicon/hisi_uncore_hha_pmu.c
index 13017b3412a5..806698b9eabf 100644
--- a/drivers/perf/hisilicon/hisi_uncore_hha_pmu.c
+++ b/drivers/perf/hisilicon/hisi_uncore_hha_pmu.c
@@ -519,7 +519,7 @@ static int hisi_hha_pmu_probe(struct platform_device *pdev)
name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "hisi_sccl%u_hha%u",
hha_pmu->sccl_id, hha_pmu->index_id);
- hisi_pmu_init(&hha_pmu->pmu, name, hha_pmu->pmu_events.attr_groups, THIS_MODULE);
+ hisi_pmu_init(hha_pmu, name, THIS_MODULE);
ret = perf_pmu_register(&hha_pmu->pmu, name, -1);
if (ret) {
diff --git a/drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c b/drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c
index 2995f3630d49..5b2c35f1658a 100644
--- a/drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c
+++ b/drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c
@@ -557,7 +557,7 @@ static int hisi_l3c_pmu_probe(struct platform_device *pdev)
*/
name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "hisi_sccl%u_l3c%u",
l3c_pmu->sccl_id, l3c_pmu->ccl_id);
- hisi_pmu_init(&l3c_pmu->pmu, name, l3c_pmu->pmu_events.attr_groups, THIS_MODULE);
+ hisi_pmu_init(l3c_pmu, name, THIS_MODULE);
ret = perf_pmu_register(&l3c_pmu->pmu, name, -1);
if (ret) {
diff --git a/drivers/perf/hisilicon/hisi_uncore_pa_pmu.c b/drivers/perf/hisilicon/hisi_uncore_pa_pmu.c
index 47d3cc9b6eec..afe3419f3f6d 100644
--- a/drivers/perf/hisilicon/hisi_uncore_pa_pmu.c
+++ b/drivers/perf/hisilicon/hisi_uncore_pa_pmu.c
@@ -412,7 +412,7 @@ static int hisi_pa_pmu_probe(struct platform_device *pdev)
return ret;
}
- hisi_pmu_init(&pa_pmu->pmu, name, pa_pmu->pmu_events.attr_groups, THIS_MODULE);
+ hisi_pmu_init(pa_pmu, name, THIS_MODULE);
ret = perf_pmu_register(&pa_pmu->pmu, name, -1);
if (ret) {
dev_err(pa_pmu->dev, "PMU register failed, ret = %d\n", ret);
diff --git a/drivers/perf/hisilicon/hisi_uncore_pmu.c b/drivers/perf/hisilicon/hisi_uncore_pmu.c
index fbc8a93d5eac..f1b0f5e1a28f 100644
--- a/drivers/perf/hisilicon/hisi_uncore_pmu.c
+++ b/drivers/perf/hisilicon/hisi_uncore_pmu.c
@@ -531,9 +531,11 @@ int hisi_uncore_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node)
}
EXPORT_SYMBOL_GPL(hisi_uncore_pmu_offline_cpu);
-void hisi_pmu_init(struct pmu *pmu, const char *name,
- const struct attribute_group **attr_groups, struct module *module)
+void hisi_pmu_init(struct hisi_pmu *hisi_pmu, const char *name,
+ struct module *module)
{
+ struct pmu *pmu = &hisi_pmu->pmu;
+
pmu->name = name;
pmu->module = module;
pmu->task_ctx_nr = perf_invalid_context;
@@ -545,7 +547,8 @@ void hisi_pmu_init(struct pmu *pmu, const char *name,
pmu->start = hisi_uncore_pmu_start;
pmu->stop = hisi_uncore_pmu_stop;
pmu->read = hisi_uncore_pmu_read;
- pmu->attr_groups = attr_groups;
+ pmu->attr_groups = hisi_pmu->pmu_events.attr_groups;
+ pmu->capabilities = PERF_PMU_CAP_NO_EXCLUDE;
}
EXPORT_SYMBOL_GPL(hisi_pmu_init);
diff --git a/drivers/perf/hisilicon/hisi_uncore_pmu.h b/drivers/perf/hisilicon/hisi_uncore_pmu.h
index b59de33cd059..f8e3cc6903d7 100644
--- a/drivers/perf/hisilicon/hisi_uncore_pmu.h
+++ b/drivers/perf/hisilicon/hisi_uncore_pmu.h
@@ -121,6 +121,6 @@ ssize_t hisi_uncore_pmu_identifier_attr_show(struct device *dev,
int hisi_uncore_pmu_init_irq(struct hisi_pmu *hisi_pmu,
struct platform_device *pdev);
-void hisi_pmu_init(struct pmu *pmu, const char *name,
- const struct attribute_group **attr_groups, struct module *module);
+void hisi_pmu_init(struct hisi_pmu *hisi_pmu, const char *name,
+ struct module *module);
#endif /* __HISI_UNCORE_PMU_H__ */
diff --git a/drivers/perf/hisilicon/hisi_uncore_sllc_pmu.c b/drivers/perf/hisilicon/hisi_uncore_sllc_pmu.c
index b9c79f17230c..1e354433776a 100644
--- a/drivers/perf/hisilicon/hisi_uncore_sllc_pmu.c
+++ b/drivers/perf/hisilicon/hisi_uncore_sllc_pmu.c
@@ -445,7 +445,7 @@ static int hisi_sllc_pmu_probe(struct platform_device *pdev)
return ret;
}
- hisi_pmu_init(&sllc_pmu->pmu, name, sllc_pmu->pmu_events.attr_groups, THIS_MODULE);
+ hisi_pmu_init(sllc_pmu, name, THIS_MODULE);
ret = perf_pmu_register(&sllc_pmu->pmu, name, -1);
if (ret) {
diff --git a/drivers/perf/marvell_cn10k_ddr_pmu.c b/drivers/perf/marvell_cn10k_ddr_pmu.c
index 665b382a0ee3..b94a5f6cc22b 100644
--- a/drivers/perf/marvell_cn10k_ddr_pmu.c
+++ b/drivers/perf/marvell_cn10k_ddr_pmu.c
@@ -12,6 +12,7 @@
#include <linux/of_device.h>
#include <linux/perf_event.h>
#include <linux/hrtimer.h>
+#include <linux/acpi.h>
/* Performance Counters Operating Mode Control Registers */
#define DDRC_PERF_CNT_OP_MODE_CTRL 0x8020
@@ -717,10 +718,19 @@ static const struct of_device_id cn10k_ddr_pmu_of_match[] = {
MODULE_DEVICE_TABLE(of, cn10k_ddr_pmu_of_match);
#endif
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id cn10k_ddr_pmu_acpi_match[] = {
+ {"MRVL000A", 0},
+ {},
+};
+MODULE_DEVICE_TABLE(acpi, cn10k_ddr_pmu_acpi_match);
+#endif
+
static struct platform_driver cn10k_ddr_pmu_driver = {
.driver = {
.name = "cn10k-ddr-pmu",
.of_match_table = of_match_ptr(cn10k_ddr_pmu_of_match),
+ .acpi_match_table = ACPI_PTR(cn10k_ddr_pmu_acpi_match),
.suppress_bind_attrs = true,
},
.probe = cn10k_ddr_perf_probe,
diff --git a/drivers/perf/marvell_cn10k_tad_pmu.c b/drivers/perf/marvell_cn10k_tad_pmu.c
index a1166afb3702..3972197e2210 100644
--- a/drivers/perf/marvell_cn10k_tad_pmu.c
+++ b/drivers/perf/marvell_cn10k_tad_pmu.c
@@ -13,6 +13,7 @@
#include <linux/cpuhotplug.h>
#include <linux/perf_event.h>
#include <linux/platform_device.h>
+#include <linux/acpi.h>
#define TAD_PFC_OFFSET 0x800
#define TAD_PFC(counter) (TAD_PFC_OFFSET | (counter << 3))
@@ -254,7 +255,7 @@ static const struct attribute_group *tad_pmu_attr_groups[] = {
static int tad_pmu_probe(struct platform_device *pdev)
{
- struct device_node *node = pdev->dev.of_node;
+ struct device *dev = &pdev->dev;
struct tad_region *regions;
struct tad_pmu *tad_pmu;
struct resource *res;
@@ -276,21 +277,21 @@ static int tad_pmu_probe(struct platform_device *pdev)
return -ENODEV;
}
- ret = of_property_read_u32(node, "marvell,tad-page-size",
- &tad_page_size);
+ ret = device_property_read_u32(dev, "marvell,tad-page-size",
+ &tad_page_size);
if (ret) {
dev_err(&pdev->dev, "Can't find tad-page-size property\n");
return ret;
}
- ret = of_property_read_u32(node, "marvell,tad-pmu-page-size",
- &tad_pmu_page_size);
+ ret = device_property_read_u32(dev, "marvell,tad-pmu-page-size",
+ &tad_pmu_page_size);
if (ret) {
dev_err(&pdev->dev, "Can't find tad-pmu-page-size property\n");
return ret;
}
- ret = of_property_read_u32(node, "marvell,tad-cnt", &tad_cnt);
+ ret = device_property_read_u32(dev, "marvell,tad-cnt", &tad_cnt);
if (ret) {
dev_err(&pdev->dev, "Can't find tad-cnt property\n");
return ret;
@@ -369,10 +370,19 @@ static const struct of_device_id tad_pmu_of_match[] = {
};
#endif
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id tad_pmu_acpi_match[] = {
+ {"MRVL000B", 0},
+ {},
+};
+MODULE_DEVICE_TABLE(acpi, tad_pmu_acpi_match);
+#endif
+
static struct platform_driver tad_pmu_driver = {
.driver = {
.name = "cn10k_tad_pmu",
.of_match_table = of_match_ptr(tad_pmu_of_match),
+ .acpi_match_table = ACPI_PTR(tad_pmu_acpi_match),
.suppress_bind_attrs = true,
},
.probe = tad_pmu_probe,