summaryrefslogtreecommitdiff
path: root/arch/loongarch/kernel/perf_event.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/loongarch/kernel/perf_event.c')
-rw-r--r--arch/loongarch/kernel/perf_event.c10
1 files changed, 5 insertions, 5 deletions
diff --git a/arch/loongarch/kernel/perf_event.c b/arch/loongarch/kernel/perf_event.c
index f86a4b838dd7..9d257c8519c9 100644
--- a/arch/loongarch/kernel/perf_event.c
+++ b/arch/loongarch/kernel/perf_event.c
@@ -479,8 +479,7 @@ static void handle_associated_event(struct cpu_hw_events *cpuc, int idx,
if (!loongarch_pmu_event_set_period(event, hwc, idx))
return;
- if (perf_event_overflow(event, data, regs))
- loongarch_pmu_disable_event(idx);
+ perf_event_overflow(event, data, regs);
}
static irqreturn_t pmu_handle_irq(int irq, void *dev)
@@ -846,13 +845,14 @@ static const struct loongarch_perf_event *loongarch_pmu_map_raw_event(u64 config
static int __init init_hw_perf_events(void)
{
- int counters;
+ int bits, counters;
if (!cpu_has_pmp)
return -ENODEV;
pr_info("Performance counters: ");
- counters = ((read_cpucfg(LOONGARCH_CPUCFG6) & CPUCFG6_PMNUM) >> 4) + 1;
+ bits = ((read_cpucfg(LOONGARCH_CPUCFG6) & CPUCFG6_PMBITS) >> CPUCFG6_PMBITS_SHIFT) + 1;
+ counters = ((read_cpucfg(LOONGARCH_CPUCFG6) & CPUCFG6_PMNUM) >> CPUCFG6_PMNUM_SHIFT) + 1;
loongarch_pmu.num_counters = counters;
loongarch_pmu.max_period = (1ULL << 63) - 1;
@@ -868,7 +868,7 @@ static int __init init_hw_perf_events(void)
on_each_cpu(reset_counters, NULL, 1);
pr_cont("%s PMU enabled, %d %d-bit counters available to each CPU.\n",
- loongarch_pmu.name, counters, 64);
+ loongarch_pmu.name, counters, bits);
perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW);