summaryrefslogtreecommitdiff
path: root/arch/alpha/kernel/perf_event.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/alpha/kernel/perf_event.c')
-rw-r--r--arch/alpha/kernel/perf_event.c66
1 files changed, 32 insertions, 34 deletions
diff --git a/arch/alpha/kernel/perf_event.c b/arch/alpha/kernel/perf_event.c
index d821b17047e0..a3eaab094ece 100644
--- a/arch/alpha/kernel/perf_event.c
+++ b/arch/alpha/kernel/perf_event.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Hardware performance events for the Alpha.
*
@@ -83,6 +84,8 @@ struct alpha_pmu_t {
long pmc_left[3];
/* Subroutine for allocation of PMCs. Enforces constraints. */
int (*check_constraints)(struct perf_event **, unsigned long *, int);
+ /* Subroutine for checking validity of a raw event for this PMU. */
+ int (*raw_event_valid)(u64 config);
};
/*
@@ -203,6 +206,12 @@ success:
}
+static int ev67_raw_event_valid(u64 config)
+{
+ return config >= EV67_CYCLES && config < EV67_LAST_ET;
+};
+
+
static const struct alpha_pmu_t ev67_pmu = {
.event_map = ev67_perfmon_event_map,
.max_events = ARRAY_SIZE(ev67_perfmon_event_map),
@@ -211,7 +220,8 @@ static const struct alpha_pmu_t ev67_pmu = {
.pmc_count_mask = {EV67_PCTR_0_COUNT_MASK, EV67_PCTR_1_COUNT_MASK, 0},
.pmc_max_period = {(1UL<<20) - 1, (1UL<<20) - 1, 0},
.pmc_left = {16, 4, 0},
- .check_constraints = ev67_check_constraints
+ .check_constraints = ev67_check_constraints,
+ .raw_event_valid = ev67_raw_event_valid,
};
@@ -341,7 +351,7 @@ static int collect_events(struct perf_event *group, int max_count,
evtype[n] = group->hw.event_base;
current_idx[n++] = PMC_NO_INDEX;
}
- list_for_each_entry(pe, &group->sibling_list, group_entry) {
+ for_each_sibling_event(pe, group) {
if (!is_software_event(pe) && pe->state != PERF_EVENT_STATE_OFF) {
if (n >= max_count)
return -1;
@@ -422,7 +432,7 @@ static void maybe_change_configuration(struct cpu_hw_events *cpuc)
*/
static int alpha_pmu_add(struct perf_event *event, int flags)
{
- struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
struct hw_perf_event *hwc = &event->hw;
int n0;
int ret;
@@ -474,7 +484,7 @@ static int alpha_pmu_add(struct perf_event *event, int flags)
*/
static void alpha_pmu_del(struct perf_event *event, int flags)
{
- struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
struct hw_perf_event *hwc = &event->hw;
unsigned long irq_flags;
int j;
@@ -522,7 +532,7 @@ static void alpha_pmu_read(struct perf_event *event)
static void alpha_pmu_stop(struct perf_event *event, int flags)
{
struct hw_perf_event *hwc = &event->hw;
- struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
if (!(hwc->state & PERF_HES_STOPPED)) {
cpuc->idx_mask &= ~(1UL<<hwc->idx);
@@ -542,7 +552,7 @@ static void alpha_pmu_stop(struct perf_event *event, int flags)
static void alpha_pmu_start(struct perf_event *event, int flags)
{
struct hw_perf_event *hwc = &event->hw;
- struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
if (WARN_ON_ONCE(!(hwc->state & PERF_HES_STOPPED)))
return;
@@ -564,7 +574,7 @@ static void alpha_pmu_start(struct perf_event *event, int flags)
* Check that CPU performance counters are supported.
* - currently support EV67 and later CPUs.
* - actually some later revisions of the EV6 have the same PMC model as the
- * EV67 but we don't do suffiently deep CPU detection to detect them.
+ * EV67 but we don't do sufficiently deep CPU detection to detect them.
* Bad luck to the very few people who might have one, I guess.
*/
static int supported_cpu(void)
@@ -609,7 +619,9 @@ static int __hw_perf_event_init(struct perf_event *event)
} else if (attr->type == PERF_TYPE_HW_CACHE) {
return -EOPNOTSUPP;
} else if (attr->type == PERF_TYPE_RAW) {
- ev = attr->config & 0xff;
+ if (!alpha_pmu->raw_event_valid(attr->config))
+ return -EINVAL;
+ ev = attr->config;
} else {
return -EOPNOTSUPP;
}
@@ -618,12 +630,6 @@ static int __hw_perf_event_init(struct perf_event *event)
return ev;
}
- /* The EV67 does not support mode exclusion */
- if (attr->exclude_kernel || attr->exclude_user
- || attr->exclude_hv || attr->exclude_idle) {
- return -EPERM;
- }
-
/*
* We place the event type in event_base here and leave calculation
* of the codes to programme the PMU for alpha_pmu_enable() because
@@ -683,8 +689,6 @@ static int __hw_perf_event_init(struct perf_event *event)
*/
static int alpha_pmu_event_init(struct perf_event *event)
{
- int err;
-
/* does not support taken branch sampling */
if (has_branch_stack(event))
return -EOPNOTSUPP;
@@ -703,9 +707,7 @@ static int alpha_pmu_event_init(struct perf_event *event)
return -ENODEV;
/* Do the real initialisation work. */
- err = __hw_perf_event_init(event);
-
- return err;
+ return __hw_perf_event_init(event);
}
/*
@@ -713,7 +715,7 @@ static int alpha_pmu_event_init(struct perf_event *event)
*/
static void alpha_pmu_enable(struct pmu *pmu)
{
- struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
if (cpuc->enabled)
return;
@@ -739,7 +741,7 @@ static void alpha_pmu_enable(struct pmu *pmu)
static void alpha_pmu_disable(struct pmu *pmu)
{
- struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
if (!cpuc->enabled)
return;
@@ -759,6 +761,7 @@ static struct pmu pmu = {
.start = alpha_pmu_start,
.stop = alpha_pmu_stop,
.read = alpha_pmu_read,
+ .capabilities = PERF_PMU_CAP_NO_EXCLUDE,
};
@@ -803,8 +806,8 @@ static void alpha_perf_event_irq_handler(unsigned long la_ptr,
struct hw_perf_event *hwc;
int idx, j;
- __get_cpu_var(irq_pmi_count)++;
- cpuc = &__get_cpu_var(cpu_hw_events);
+ __this_cpu_inc(irq_pmi_count);
+ cpuc = this_cpu_ptr(&cpu_hw_events);
/* Completely counting through the PMC's period to trigger a new PMC
* overflow interrupt while in this interrupt routine is utterly
@@ -817,7 +820,7 @@ static void alpha_perf_event_irq_handler(unsigned long la_ptr,
if (unlikely(la_ptr >= alpha_pmu->num_pmcs)) {
/* This should never occur! */
irq_err_count++;
- pr_warning("PMI: silly index %ld\n", la_ptr);
+ pr_warn("PMI: silly index %ld\n", la_ptr);
wrperfmon(PERFMON_CMD_ENABLE, cpuc->idx_mask);
return;
}
@@ -840,7 +843,7 @@ static void alpha_perf_event_irq_handler(unsigned long la_ptr,
if (unlikely(!event)) {
/* This should never occur! */
irq_err_count++;
- pr_warning("PMI: No event at index %d!\n", idx);
+ pr_warn("PMI: No event at index %d!\n", idx);
wrperfmon(PERFMON_CMD_ENABLE, cpuc->idx_mask);
return;
}
@@ -849,14 +852,9 @@ static void alpha_perf_event_irq_handler(unsigned long la_ptr,
alpha_perf_event_update(event, hwc, idx, alpha_pmu->pmc_max_period[idx]+1);
perf_sample_data_init(&data, 0, hwc->last_period);
- if (alpha_perf_event_set_period(event, hwc, idx)) {
- if (perf_event_overflow(event, &data, regs)) {
- /* Interrupts coming too quickly; "throttle" the
- * counter, i.e., disable it for a little while.
- */
- alpha_pmu_stop(event, 0);
- }
- }
+ if (alpha_perf_event_set_period(event, hwc, idx))
+ perf_event_overflow(event, &data, regs);
+
wrperfmon(PERFMON_CMD_ENABLE, cpuc->idx_mask);
return;
@@ -867,7 +865,7 @@ static void alpha_perf_event_irq_handler(unsigned long la_ptr,
/*
* Init call to initialise performance events at kernel startup.
*/
-int __init init_hw_perf_events(void)
+static int __init init_hw_perf_events(void)
{
pr_info("Performance events: ");