diff options
Diffstat (limited to 'arch/sh/kernel/perf_event.c')
| -rw-r--r-- | arch/sh/kernel/perf_event.c | 73 |
1 files changed, 18 insertions, 55 deletions
diff --git a/arch/sh/kernel/perf_event.c b/arch/sh/kernel/perf_event.c index 068b8a2759b5..1d2507f22437 100644 --- a/arch/sh/kernel/perf_event.c +++ b/arch/sh/kernel/perf_event.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0 /* * Performance event support framework for SuperH hardware counters. * @@ -10,15 +11,11 @@ * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar * Copyright (C) 2009 Jaswinder Singh Rajput * Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter - * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com> + * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra * Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com> * * ppc: * Copyright 2008-2009 Paul Mackerras, IBM Corporation. - * - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. */ #include <linux/kernel.h> #include <linux/init.h> @@ -60,24 +57,6 @@ static inline int sh_pmu_initialized(void) return !!sh_pmu; } -const char *perf_pmu_name(void) -{ - if (!sh_pmu) - return NULL; - - return sh_pmu->name; -} -EXPORT_SYMBOL_GPL(perf_pmu_name); - -int perf_num_counters(void) -{ - if (!sh_pmu) - return 0; - - return sh_pmu->num_events; -} -EXPORT_SYMBOL_GPL(perf_num_counters); - /* * Release the PMU if this is the last perf_event. */ @@ -129,14 +108,6 @@ static int __hw_perf_event_init(struct perf_event *event) return -ENODEV; /* - * All of the on-chip counters are "limited", in that they have - * no interrupts, and are therefore unable to do sampling without - * further work and timer assistance. - */ - if (hwc->sample_period) - return -EINVAL; - - /* * See if we need to reserve the counter. * * If no events are currently in use, then we have to take a @@ -227,7 +198,7 @@ again: static void sh_pmu_stop(struct perf_event *event, int flags) { - struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); + struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); struct hw_perf_event *hwc = &event->hw; int idx = hwc->idx; @@ -245,7 +216,7 @@ static void sh_pmu_stop(struct perf_event *event, int flags) static void sh_pmu_start(struct perf_event *event, int flags) { - struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); + struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); struct hw_perf_event *hwc = &event->hw; int idx = hwc->idx; @@ -262,7 +233,7 @@ static void sh_pmu_start(struct perf_event *event, int flags) static void sh_pmu_del(struct perf_event *event, int flags) { - struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); + struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); sh_pmu_stop(event, PERF_EF_UPDATE); __clear_bit(event->hw.idx, cpuc->used_mask); @@ -272,7 +243,7 @@ static void sh_pmu_del(struct perf_event *event, int flags) static int sh_pmu_add(struct perf_event *event, int flags) { - struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); + struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); struct hw_perf_event *hwc = &event->hw; int idx = hwc->idx; int ret = -EAGAIN; @@ -360,31 +331,15 @@ static struct pmu pmu = { .read = sh_pmu_read, }; -static void sh_pmu_setup(int cpu) +static int sh_pmu_prepare_cpu(unsigned int cpu) { struct cpu_hw_events *cpuhw = &per_cpu(cpu_hw_events, cpu); memset(cpuhw, 0, sizeof(struct cpu_hw_events)); + return 0; } -static int __cpuinit -sh_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu) -{ - unsigned int cpu = (long)hcpu; - - switch (action & ~CPU_TASKS_FROZEN) { - case CPU_UP_PREPARE: - sh_pmu_setup(cpu); - break; - - default: - break; - } - - return NOTIFY_OK; -} - -int __cpuinit register_sh_pmu(struct sh_pmu *_pmu) +int register_sh_pmu(struct sh_pmu *_pmu) { if (sh_pmu) return -EBUSY; @@ -392,9 +347,17 @@ int __cpuinit register_sh_pmu(struct sh_pmu *_pmu) pr_info("Performance Events: %s support registered\n", _pmu->name); + /* + * All of the on-chip counters are "limited", in that they have + * no interrupts, and are therefore unable to do sampling without + * further work and timer assistance. + */ + pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT; + WARN_ON(_pmu->num_events > MAX_HWEVENTS); perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW); - perf_cpu_notifier(sh_pmu_notifier); + cpuhp_setup_state(CPUHP_PERF_SUPERH, "PERF_SUPERH", sh_pmu_prepare_cpu, + NULL); return 0; } |
