From a6c93afed38c242ccf4ec5bcb5ff26ff2521cf36 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Fri, 15 Apr 2011 11:14:38 +0100 Subject: ARM: perf: de-const struct arm_pmu This patch removes const qualifiers from instances of struct arm_pmu, and functions initialising them, in preparation for generalising arm_pmu usage to system (AKA uncore) PMUs. This will allow for dynamically modifiable structures (locks, struct pmu) to be added as members of struct arm_pmu. Acked-by: Jamie Iles Reviewed-by: Jean Pihet Signed-off-by: Mark Rutland Signed-off-by: Will Deacon --- arch/arm/kernel/perf_event_v7.c | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) (limited to 'arch/arm/kernel/perf_event_v7.c') diff --git a/arch/arm/kernel/perf_event_v7.c b/arch/arm/kernel/perf_event_v7.c index 4c851834f68e..74f9119906d0 100644 --- a/arch/arm/kernel/perf_event_v7.c +++ b/arch/arm/kernel/perf_event_v7.c @@ -1188,7 +1188,7 @@ static u32 __init armv7_read_num_pmnc_events(void) return nb_cnt + 1; } -static const struct arm_pmu *__init armv7_a8_pmu_init(void) +static struct arm_pmu *__init armv7_a8_pmu_init(void) { armv7pmu.id = ARM_PERF_PMU_ID_CA8; armv7pmu.name = "ARMv7 Cortex-A8"; @@ -1198,7 +1198,7 @@ static const struct arm_pmu *__init armv7_a8_pmu_init(void) return &armv7pmu; } -static const struct arm_pmu *__init armv7_a9_pmu_init(void) +static struct arm_pmu *__init armv7_a9_pmu_init(void) { armv7pmu.id = ARM_PERF_PMU_ID_CA9; armv7pmu.name = "ARMv7 Cortex-A9"; @@ -1208,7 +1208,7 @@ static const struct arm_pmu *__init armv7_a9_pmu_init(void) return &armv7pmu; } -static const struct arm_pmu *__init armv7_a5_pmu_init(void) +static struct arm_pmu *__init armv7_a5_pmu_init(void) { armv7pmu.id = ARM_PERF_PMU_ID_CA5; armv7pmu.name = "ARMv7 Cortex-A5"; @@ -1218,7 +1218,7 @@ static const struct arm_pmu *__init armv7_a5_pmu_init(void) return &armv7pmu; } -static const struct arm_pmu *__init armv7_a15_pmu_init(void) +static struct arm_pmu *__init armv7_a15_pmu_init(void) { armv7pmu.id = ARM_PERF_PMU_ID_CA15; armv7pmu.name = "ARMv7 Cortex-A15"; @@ -1228,22 +1228,22 @@ static const struct arm_pmu *__init armv7_a15_pmu_init(void) return &armv7pmu; } #else -static const struct arm_pmu *__init armv7_a8_pmu_init(void) +static struct arm_pmu *__init armv7_a8_pmu_init(void) { return NULL; } -static const struct arm_pmu *__init armv7_a9_pmu_init(void) +static struct arm_pmu *__init armv7_a9_pmu_init(void) { return NULL; } -static const struct arm_pmu *__init armv7_a5_pmu_init(void) +static struct arm_pmu *__init armv7_a5_pmu_init(void) { return NULL; } -static const struct arm_pmu *__init armv7_a15_pmu_init(void) +static struct arm_pmu *__init armv7_a15_pmu_init(void) { return NULL; } -- cgit From 6330aae7dcd54df893813392e310141be7aa5323 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Tue, 19 Jul 2011 14:55:57 +0100 Subject: ARM: perf: use u32 instead of unsigned long for PMNC register The ARMv7 perf backend mixes up u32 and unsigned long, which is rather ugly. This patch makes the ARMv7 PMU code consistently use the u32 type instead. Acked-by: Jamie Iles Reviewed-by: Jean Pihet Signed-off-by: Will Deacon --- arch/arm/kernel/perf_event_v7.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) (limited to 'arch/arm/kernel/perf_event_v7.c') diff --git a/arch/arm/kernel/perf_event_v7.c b/arch/arm/kernel/perf_event_v7.c index 74f9119906d0..f4f260dabee3 100644 --- a/arch/arm/kernel/perf_event_v7.c +++ b/arch/arm/kernel/perf_event_v7.c @@ -758,26 +758,26 @@ enum armv7_counters { #define ARMV7_FLAG_MASK 0xffffffff /* Mask for writable bits */ #define ARMV7_OVERFLOWED_MASK ARMV7_FLAG_MASK -static inline unsigned long armv7_pmnc_read(void) +static inline u32 armv7_pmnc_read(void) { u32 val; asm volatile("mrc p15, 0, %0, c9, c12, 0" : "=r"(val)); return val; } -static inline void armv7_pmnc_write(unsigned long val) +static inline void armv7_pmnc_write(u32 val) { val &= ARMV7_PMNC_MASK; isb(); asm volatile("mcr p15, 0, %0, c9, c12, 0" : : "r"(val)); } -static inline int armv7_pmnc_has_overflowed(unsigned long pmnc) +static inline int armv7_pmnc_has_overflowed(u32 pmnc) { return pmnc & ARMV7_OVERFLOWED_MASK; } -static inline int armv7_pmnc_counter_has_overflowed(unsigned long pmnc, +static inline int armv7_pmnc_counter_has_overflowed(u32 pmnc, enum armv7_counters counter) { int ret = 0; @@ -812,7 +812,7 @@ static inline int armv7_pmnc_select_counter(unsigned int idx) static inline u32 armv7pmu_read_counter(int idx) { - unsigned long value = 0; + u32 value = 0; if (idx == ARMV7_CYCLE_COUNTER) asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r" (value)); @@ -1044,7 +1044,7 @@ static void armv7pmu_disable_event(struct hw_perf_event *hwc, int idx) static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev) { - unsigned long pmnc; + u32 pmnc; struct perf_sample_data data; struct cpu_hw_events *cpuc; struct pt_regs *regs; -- cgit From 25e29c7c0f4fcbe911b77a69f015bd6424cedcd0 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Tue, 19 Jul 2011 22:17:48 +0100 Subject: ARM: perf: use integers for ARMv7 event indices This patch ensures that integers are used to represent event indices in the ARMv7 PMU backend. This ensures consistency between functions and also with the arm_pmu structure. Acked-by: Jamie Iles Reviewed-by: Jean Pihet Signed-off-by: Will Deacon --- arch/arm/kernel/perf_event_v7.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) (limited to 'arch/arm/kernel/perf_event_v7.c') diff --git a/arch/arm/kernel/perf_event_v7.c b/arch/arm/kernel/perf_event_v7.c index f4f260dabee3..e39bc8935cbe 100644 --- a/arch/arm/kernel/perf_event_v7.c +++ b/arch/arm/kernel/perf_event_v7.c @@ -793,7 +793,7 @@ static inline int armv7_pmnc_counter_has_overflowed(u32 pmnc, return ret; } -static inline int armv7_pmnc_select_counter(unsigned int idx) +static inline int armv7_pmnc_select_counter(int idx) { u32 val; @@ -840,7 +840,7 @@ static inline void armv7pmu_write_counter(int idx, u32 value) smp_processor_id(), idx); } -static inline void armv7_pmnc_write_evtsel(unsigned int idx, u32 val) +static inline void armv7_pmnc_write_evtsel(int idx, u32 val) { if (armv7_pmnc_select_counter(idx) == idx) { val &= ARMV7_EVTSEL_MASK; @@ -848,7 +848,7 @@ static inline void armv7_pmnc_write_evtsel(unsigned int idx, u32 val) } } -static inline u32 armv7_pmnc_enable_counter(unsigned int idx) +static inline int armv7_pmnc_enable_counter(int idx) { u32 val; @@ -869,7 +869,7 @@ static inline u32 armv7_pmnc_enable_counter(unsigned int idx) return idx; } -static inline u32 armv7_pmnc_disable_counter(unsigned int idx) +static inline int armv7_pmnc_disable_counter(int idx) { u32 val; @@ -891,7 +891,7 @@ static inline u32 armv7_pmnc_disable_counter(unsigned int idx) return idx; } -static inline u32 armv7_pmnc_enable_intens(unsigned int idx) +static inline int armv7_pmnc_enable_intens(int idx) { u32 val; @@ -912,7 +912,7 @@ static inline u32 armv7_pmnc_enable_intens(unsigned int idx) return idx; } -static inline u32 armv7_pmnc_disable_intens(unsigned int idx) +static inline int armv7_pmnc_disable_intens(int idx) { u32 val; -- cgit From c691bb6249b25104fcb6dad31bd772c139ce4a50 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Tue, 19 Jul 2011 22:25:55 +0100 Subject: ARM: perf: index ARMv7 event counters starting from zero The current ARMv7 PMU backend indexes event counters from two, with index zero being reserved and index one being used to represent the cycle counter. This patch tidies up the code by indexing from one instead (with zero for the cycle counter). This allows us to remove many of the accessor macros along with the counter enumeration and makes the code much more readable. Acked-by: Jamie Iles Reviewed-by: Jean Pihet Signed-off-by: Will Deacon --- arch/arm/kernel/perf_event_v7.c | 239 +++++++++++++++------------------------- 1 file changed, 88 insertions(+), 151 deletions(-) (limited to 'arch/arm/kernel/perf_event_v7.c') diff --git a/arch/arm/kernel/perf_event_v7.c b/arch/arm/kernel/perf_event_v7.c index e39bc8935cbe..0934c8214304 100644 --- a/arch/arm/kernel/perf_event_v7.c +++ b/arch/arm/kernel/perf_event_v7.c @@ -676,23 +676,24 @@ static const unsigned armv7_a15_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] }; /* - * Perf Events counters + * Perf Events' indices */ -enum armv7_counters { - ARMV7_CYCLE_COUNTER = 1, /* Cycle counter */ - ARMV7_COUNTER0 = 2, /* First event counter */ -}; +#define ARMV7_IDX_CYCLE_COUNTER 0 +#define ARMV7_IDX_COUNTER0 1 +#define ARMV7_IDX_COUNTER_LAST (ARMV7_IDX_CYCLE_COUNTER + armpmu->num_events - 1) + +#define ARMV7_MAX_COUNTERS 32 +#define ARMV7_COUNTER_MASK (ARMV7_MAX_COUNTERS - 1) /* - * The cycle counter is ARMV7_CYCLE_COUNTER. - * The first event counter is ARMV7_COUNTER0. - * The last event counter is (ARMV7_COUNTER0 + armpmu->num_events - 1). + * ARMv7 low level PMNC access */ -#define ARMV7_COUNTER_LAST (ARMV7_COUNTER0 + armpmu->num_events - 1) /* - * ARMv7 low level PMNC access + * Perf Event to low level counters mapping */ +#define ARMV7_IDX_TO_COUNTER(x) \ + (((x) - ARMV7_IDX_COUNTER0) & ARMV7_COUNTER_MASK) /* * Per-CPU PMNC: config reg @@ -707,54 +708,14 @@ enum armv7_counters { #define ARMV7_PMNC_N_MASK 0x1f #define ARMV7_PMNC_MASK 0x3f /* Mask for writable bits */ -/* - * Available counters - */ -#define ARMV7_CNT0 0 /* First event counter */ -#define ARMV7_CCNT 31 /* Cycle counter */ - -/* Perf Event to low level counters mapping */ -#define ARMV7_EVENT_CNT_TO_CNTx (ARMV7_COUNTER0 - ARMV7_CNT0) - -/* - * CNTENS: counters enable reg - */ -#define ARMV7_CNTENS_P(idx) (1 << (idx - ARMV7_EVENT_CNT_TO_CNTx)) -#define ARMV7_CNTENS_C (1 << ARMV7_CCNT) - -/* - * CNTENC: counters disable reg - */ -#define ARMV7_CNTENC_P(idx) (1 << (idx - ARMV7_EVENT_CNT_TO_CNTx)) -#define ARMV7_CNTENC_C (1 << ARMV7_CCNT) - -/* - * INTENS: counters overflow interrupt enable reg - */ -#define ARMV7_INTENS_P(idx) (1 << (idx - ARMV7_EVENT_CNT_TO_CNTx)) -#define ARMV7_INTENS_C (1 << ARMV7_CCNT) - -/* - * INTENC: counters overflow interrupt disable reg - */ -#define ARMV7_INTENC_P(idx) (1 << (idx - ARMV7_EVENT_CNT_TO_CNTx)) -#define ARMV7_INTENC_C (1 << ARMV7_CCNT) - /* * EVTSEL: Event selection reg */ #define ARMV7_EVTSEL_MASK 0xff /* Mask for writable bits */ -/* - * SELECT: Counter selection reg - */ -#define ARMV7_SELECT_MASK 0x1f /* Mask for writable bits */ - /* * FLAG: counters overflow flag status reg */ -#define ARMV7_FLAG_P(idx) (1 << (idx - ARMV7_EVENT_CNT_TO_CNTx)) -#define ARMV7_FLAG_C (1 << ARMV7_CCNT) #define ARMV7_FLAG_MASK 0xffffffff /* Mask for writable bits */ #define ARMV7_OVERFLOWED_MASK ARMV7_FLAG_MASK @@ -777,34 +738,39 @@ static inline int armv7_pmnc_has_overflowed(u32 pmnc) return pmnc & ARMV7_OVERFLOWED_MASK; } -static inline int armv7_pmnc_counter_has_overflowed(u32 pmnc, - enum armv7_counters counter) +static inline int armv7_pmnc_counter_valid(int idx) +{ + return idx >= ARMV7_IDX_CYCLE_COUNTER && idx <= ARMV7_IDX_COUNTER_LAST; +} + +static inline int armv7_pmnc_counter_has_overflowed(u32 pmnc, int idx) { int ret = 0; + u32 counter; - if (counter == ARMV7_CYCLE_COUNTER) - ret = pmnc & ARMV7_FLAG_C; - else if ((counter >= ARMV7_COUNTER0) && (counter <= ARMV7_COUNTER_LAST)) - ret = pmnc & ARMV7_FLAG_P(counter); - else + if (!armv7_pmnc_counter_valid(idx)) { pr_err("CPU%u checking wrong counter %d overflow status\n", - smp_processor_id(), counter); + smp_processor_id(), idx); + } else { + counter = ARMV7_IDX_TO_COUNTER(idx); + ret = pmnc & BIT(counter); + } return ret; } static inline int armv7_pmnc_select_counter(int idx) { - u32 val; + u32 counter; - if ((idx < ARMV7_COUNTER0) || (idx > ARMV7_COUNTER_LAST)) { - pr_err("CPU%u selecting wrong PMNC counter" - " %d\n", smp_processor_id(), idx); - return -1; + if (!armv7_pmnc_counter_valid(idx)) { + pr_err("CPU%u selecting wrong PMNC counter %d\n", + smp_processor_id(), idx); + return -EINVAL; } - val = (idx - ARMV7_EVENT_CNT_TO_CNTx) & ARMV7_SELECT_MASK; - asm volatile("mcr p15, 0, %0, c9, c12, 5" : : "r" (val)); + counter = ARMV7_IDX_TO_COUNTER(idx); + asm volatile("mcr p15, 0, %0, c9, c12, 5" : : "r" (counter)); isb(); return idx; @@ -814,30 +780,26 @@ static inline u32 armv7pmu_read_counter(int idx) { u32 value = 0; - if (idx == ARMV7_CYCLE_COUNTER) - asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r" (value)); - else if ((idx >= ARMV7_COUNTER0) && (idx <= ARMV7_COUNTER_LAST)) { - if (armv7_pmnc_select_counter(idx) == idx) - asm volatile("mrc p15, 0, %0, c9, c13, 2" - : "=r" (value)); - } else + if (!armv7_pmnc_counter_valid(idx)) pr_err("CPU%u reading wrong counter %d\n", smp_processor_id(), idx); + else if (idx == ARMV7_IDX_CYCLE_COUNTER) + asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r" (value)); + else if (armv7_pmnc_select_counter(idx) == idx) + asm volatile("mrc p15, 0, %0, c9, c13, 2" : "=r" (value)); return value; } static inline void armv7pmu_write_counter(int idx, u32 value) { - if (idx == ARMV7_CYCLE_COUNTER) - asm volatile("mcr p15, 0, %0, c9, c13, 0" : : "r" (value)); - else if ((idx >= ARMV7_COUNTER0) && (idx <= ARMV7_COUNTER_LAST)) { - if (armv7_pmnc_select_counter(idx) == idx) - asm volatile("mcr p15, 0, %0, c9, c13, 2" - : : "r" (value)); - } else + if (!armv7_pmnc_counter_valid(idx)) pr_err("CPU%u writing wrong counter %d\n", smp_processor_id(), idx); + else if (idx == ARMV7_IDX_CYCLE_COUNTER) + asm volatile("mcr p15, 0, %0, c9, c13, 0" : : "r" (value)); + else if (armv7_pmnc_select_counter(idx) == idx) + asm volatile("mcr p15, 0, %0, c9, c13, 2" : : "r" (value)); } static inline void armv7_pmnc_write_evtsel(int idx, u32 val) @@ -850,86 +812,61 @@ static inline void armv7_pmnc_write_evtsel(int idx, u32 val) static inline int armv7_pmnc_enable_counter(int idx) { - u32 val; + u32 counter; - if ((idx != ARMV7_CYCLE_COUNTER) && - ((idx < ARMV7_COUNTER0) || (idx > ARMV7_COUNTER_LAST))) { - pr_err("CPU%u enabling wrong PMNC counter" - " %d\n", smp_processor_id(), idx); - return -1; + if (!armv7_pmnc_counter_valid(idx)) { + pr_err("CPU%u enabling wrong PMNC counter %d\n", + smp_processor_id(), idx); + return -EINVAL; } - if (idx == ARMV7_CYCLE_COUNTER) - val = ARMV7_CNTENS_C; - else - val = ARMV7_CNTENS_P(idx); - - asm volatile("mcr p15, 0, %0, c9, c12, 1" : : "r" (val)); - + counter = ARMV7_IDX_TO_COUNTER(idx); + asm volatile("mcr p15, 0, %0, c9, c12, 1" : : "r" (BIT(counter))); return idx; } static inline int armv7_pmnc_disable_counter(int idx) { - u32 val; + u32 counter; - - if ((idx != ARMV7_CYCLE_COUNTER) && - ((idx < ARMV7_COUNTER0) || (idx > ARMV7_COUNTER_LAST))) { - pr_err("CPU%u disabling wrong PMNC counter" - " %d\n", smp_processor_id(), idx); - return -1; + if (!armv7_pmnc_counter_valid(idx)) { + pr_err("CPU%u disabling wrong PMNC counter %d\n", + smp_processor_id(), idx); + return -EINVAL; } - if (idx == ARMV7_CYCLE_COUNTER) - val = ARMV7_CNTENC_C; - else - val = ARMV7_CNTENC_P(idx); - - asm volatile("mcr p15, 0, %0, c9, c12, 2" : : "r" (val)); - + counter = ARMV7_IDX_TO_COUNTER(idx); + asm volatile("mcr p15, 0, %0, c9, c12, 2" : : "r" (BIT(counter))); return idx; } static inline int armv7_pmnc_enable_intens(int idx) { - u32 val; + u32 counter; - if ((idx != ARMV7_CYCLE_COUNTER) && - ((idx < ARMV7_COUNTER0) || (idx > ARMV7_COUNTER_LAST))) { - pr_err("CPU%u enabling wrong PMNC counter" - " interrupt enable %d\n", smp_processor_id(), idx); - return -1; + if (!armv7_pmnc_counter_valid(idx)) { + pr_err("CPU%u enabling wrong PMNC counter IRQ enable %d\n", + smp_processor_id(), idx); + return -EINVAL; } - if (idx == ARMV7_CYCLE_COUNTER) - val = ARMV7_INTENS_C; - else - val = ARMV7_INTENS_P(idx); - - asm volatile("mcr p15, 0, %0, c9, c14, 1" : : "r" (val)); - + counter = ARMV7_IDX_TO_COUNTER(idx); + asm volatile("mcr p15, 0, %0, c9, c14, 1" : : "r" (BIT(counter))); return idx; } static inline int armv7_pmnc_disable_intens(int idx) { - u32 val; + u32 counter; - if ((idx != ARMV7_CYCLE_COUNTER) && - ((idx < ARMV7_COUNTER0) || (idx > ARMV7_COUNTER_LAST))) { - pr_err("CPU%u disabling wrong PMNC counter" - " interrupt enable %d\n", smp_processor_id(), idx); - return -1; + if (!armv7_pmnc_counter_valid(idx)) { + pr_err("CPU%u disabling wrong PMNC counter IRQ enable %d\n", + smp_processor_id(), idx); + return -EINVAL; } - if (idx == ARMV7_CYCLE_COUNTER) - val = ARMV7_INTENC_C; - else - val = ARMV7_INTENC_P(idx); - - asm volatile("mcr p15, 0, %0, c9, c14, 2" : : "r" (val)); - + counter = ARMV7_IDX_TO_COUNTER(idx); + asm volatile("mcr p15, 0, %0, c9, c14, 2" : : "r" (BIT(counter))); return idx; } @@ -973,14 +910,14 @@ static void armv7_pmnc_dump_regs(void) asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r" (val)); printk(KERN_INFO "CCNT =0x%08x\n", val); - for (cnt = ARMV7_COUNTER0; cnt < ARMV7_COUNTER_LAST; cnt++) { + for (cnt = ARMV7_IDX_COUNTER0; cnt <= ARMV7_IDX_COUNTER_LAST; cnt++) { armv7_pmnc_select_counter(cnt); asm volatile("mrc p15, 0, %0, c9, c13, 2" : "=r" (val)); printk(KERN_INFO "CNT[%d] count =0x%08x\n", - cnt-ARMV7_EVENT_CNT_TO_CNTx, val); + ARMV7_IDX_TO_COUNTER(cnt), val); asm volatile("mrc p15, 0, %0, c9, c13, 1" : "=r" (val)); printk(KERN_INFO "CNT[%d] evtsel=0x%08x\n", - cnt-ARMV7_EVENT_CNT_TO_CNTx, val); + ARMV7_IDX_TO_COUNTER(cnt), val); } } #endif @@ -1004,7 +941,7 @@ static void armv7pmu_enable_event(struct hw_perf_event *hwc, int idx) * Set event (if destined for PMNx counters) * We don't need to set the event if it's a cycle count */ - if (idx != ARMV7_CYCLE_COUNTER) + if (idx != ARMV7_IDX_CYCLE_COUNTER) armv7_pmnc_write_evtsel(idx, hwc->config_base); /* @@ -1069,7 +1006,7 @@ static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev) perf_sample_data_init(&data, 0); cpuc = &__get_cpu_var(cpu_hw_events); - for (idx = 0; idx <= armpmu->num_events; ++idx) { + for (idx = 0; idx < armpmu->num_events; ++idx) { struct perf_event *event = cpuc->events[idx]; struct hw_perf_event *hwc; @@ -1132,23 +1069,23 @@ static int armv7pmu_get_event_idx(struct cpu_hw_events *cpuc, /* Always place a cycle counter into the cycle counter. */ if (event->config_base == ARMV7_PERFCTR_CPU_CYCLES) { - if (test_and_set_bit(ARMV7_CYCLE_COUNTER, cpuc->used_mask)) + if (test_and_set_bit(ARMV7_IDX_CYCLE_COUNTER, cpuc->used_mask)) return -EAGAIN; - return ARMV7_CYCLE_COUNTER; - } else { - /* - * For anything other than a cycle counter, try and use - * the events counters - */ - for (idx = ARMV7_COUNTER0; idx <= armpmu->num_events; ++idx) { - if (!test_and_set_bit(idx, cpuc->used_mask)) - return idx; - } + return ARMV7_IDX_CYCLE_COUNTER; + } - /* The counters are all in use. */ - return -EAGAIN; + /* + * For anything other than a cycle counter, try and use + * the events counters + */ + for (idx = ARMV7_IDX_COUNTER0; idx < armpmu->num_events; ++idx) { + if (!test_and_set_bit(idx, cpuc->used_mask)) + return idx; } + + /* The counters are all in use. */ + return -EAGAIN; } static void armv7pmu_reset(void *info) @@ -1156,7 +1093,7 @@ static void armv7pmu_reset(void *info) u32 idx, nb_cnt = armpmu->num_events; /* The counter and interrupt enable registers are unknown at reset. */ - for (idx = 1; idx < nb_cnt; ++idx) + for (idx = ARMV7_IDX_CYCLE_COUNTER; idx < nb_cnt; ++idx) armv7pmu_disable_event(NULL, idx); /* Initialize & Reset PMNC: C and P bits */ -- cgit From a505addc366525cd8d9358298be0dc8655be5953 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Tue, 19 Jul 2011 13:53:36 +0100 Subject: ARM: perf: add mode exclusion for Cortex-A15 PMU The Cortex-A15 PMU implements the PMUv2 specification and therefore has support for some mode exclusion. This patch adds support for excluding user, kernel and hypervisor counts from a given event. Acked-by: Jamie Iles Reviewed-by: Jean Pihet Signed-off-by: Will Deacon --- arch/arm/kernel/perf_event_v7.c | 58 ++++++++++++++++++++++++++++++++++------- 1 file changed, 49 insertions(+), 9 deletions(-) (limited to 'arch/arm/kernel/perf_event_v7.c') diff --git a/arch/arm/kernel/perf_event_v7.c b/arch/arm/kernel/perf_event_v7.c index 0934c8214304..fe6c931d2c4b 100644 --- a/arch/arm/kernel/perf_event_v7.c +++ b/arch/arm/kernel/perf_event_v7.c @@ -17,6 +17,9 @@ */ #ifdef CONFIG_CPU_V7 + +static struct arm_pmu armv7pmu; + /* * Common ARMv7 event types * @@ -708,17 +711,25 @@ static const unsigned armv7_a15_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] #define ARMV7_PMNC_N_MASK 0x1f #define ARMV7_PMNC_MASK 0x3f /* Mask for writable bits */ -/* - * EVTSEL: Event selection reg - */ -#define ARMV7_EVTSEL_MASK 0xff /* Mask for writable bits */ - /* * FLAG: counters overflow flag status reg */ #define ARMV7_FLAG_MASK 0xffffffff /* Mask for writable bits */ #define ARMV7_OVERFLOWED_MASK ARMV7_FLAG_MASK +/* + * PMXEVTYPER: Event selection reg + */ +#define ARMV7_EVTYPE_MASK 0xc00000ff /* Mask for writable bits */ +#define ARMV7_EVTYPE_EVENT 0xff /* Mask for EVENT bits */ + +/* + * Event filters for PMUv2 + */ +#define ARMV7_EXCLUDE_PL1 (1 << 31) +#define ARMV7_EXCLUDE_USER (1 << 30) +#define ARMV7_INCLUDE_HYP (1 << 27) + static inline u32 armv7_pmnc_read(void) { u32 val; @@ -805,7 +816,7 @@ static inline void armv7pmu_write_counter(int idx, u32 value) static inline void armv7_pmnc_write_evtsel(int idx, u32 val) { if (armv7_pmnc_select_counter(idx) == idx) { - val &= ARMV7_EVTSEL_MASK; + val &= ARMV7_EVTYPE_MASK; asm volatile("mcr p15, 0, %0, c9, c13, 1" : : "r" (val)); } } @@ -939,9 +950,10 @@ static void armv7pmu_enable_event(struct hw_perf_event *hwc, int idx) /* * Set event (if destined for PMNx counters) - * We don't need to set the event if it's a cycle count + * We only need to set the event for the cycle counter if we + * have the ability to perform event filtering. */ - if (idx != ARMV7_IDX_CYCLE_COUNTER) + if (armv7pmu.set_event_filter || idx != ARMV7_IDX_CYCLE_COUNTER) armv7_pmnc_write_evtsel(idx, hwc->config_base); /* @@ -1066,9 +1078,10 @@ static int armv7pmu_get_event_idx(struct cpu_hw_events *cpuc, struct hw_perf_event *event) { int idx; + unsigned long evtype = event->config_base & ARMV7_EVTYPE_EVENT; /* Always place a cycle counter into the cycle counter. */ - if (event->config_base == ARMV7_PERFCTR_CPU_CYCLES) { + if (evtype == ARMV7_PERFCTR_CPU_CYCLES) { if (test_and_set_bit(ARMV7_IDX_CYCLE_COUNTER, cpuc->used_mask)) return -EAGAIN; @@ -1088,6 +1101,32 @@ static int armv7pmu_get_event_idx(struct cpu_hw_events *cpuc, return -EAGAIN; } +/* + * Add an event filter to a given event. This will only work for PMUv2 PMUs. + */ +static int armv7pmu_set_event_filter(struct hw_perf_event *event, + struct perf_event_attr *attr) +{ + unsigned long config_base = 0; + + if (attr->exclude_idle) + return -EPERM; + if (attr->exclude_user) + config_base |= ARMV7_EXCLUDE_USER; + if (attr->exclude_kernel) + config_base |= ARMV7_EXCLUDE_PL1; + if (!attr->exclude_hv) + config_base |= ARMV7_INCLUDE_HYP; + + /* + * Install the filter into config_base as this is used to + * construct the event type. + */ + event->config_base = config_base; + + return 0; +} + static void armv7pmu_reset(void *info) { u32 idx, nb_cnt = armpmu->num_events; @@ -1162,6 +1201,7 @@ static struct arm_pmu *__init armv7_a15_pmu_init(void) armv7pmu.cache_map = &armv7_a15_perf_cache_map; armv7pmu.event_map = &armv7_a15_perf_map; armv7pmu.num_events = armv7_read_num_pmnc_events(); + armv7pmu.set_event_filter = armv7pmu_set_event_filter; return &armv7pmu; } #else -- cgit From c47f8684baefa2bf52c4320f894e73db08dc8a0a Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Tue, 19 Jul 2011 09:37:10 +0100 Subject: ARM: perf: remove active_mask Currently, pmu_hw_events::active_mask is used to keep track of which events are active in hardware. As we can stop counters and their interrupts, this is unnecessary. Signed-off-by: Mark Rutland Reviewed-by: Will Deacon Reviewed-by: Jamie Iles Reviewed-by: Ashwin Chaugule Signed-off-by: Will Deacon --- arch/arm/kernel/perf_event_v7.c | 3 --- 1 file changed, 3 deletions(-) (limited to 'arch/arm/kernel/perf_event_v7.c') diff --git a/arch/arm/kernel/perf_event_v7.c b/arch/arm/kernel/perf_event_v7.c index fe6c931d2c4b..f4170fc228b6 100644 --- a/arch/arm/kernel/perf_event_v7.c +++ b/arch/arm/kernel/perf_event_v7.c @@ -1022,9 +1022,6 @@ static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev) struct perf_event *event = cpuc->events[idx]; struct hw_perf_event *hwc; - if (!test_bit(idx, cpuc->active_mask)) - continue; - /* * We have a single interrupt for all counters. Check that * each counter has overflowed before we process it. -- cgit From 0f78d2d5ccf72ec834da6901886a40fd8e3b7615 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Thu, 28 Apr 2011 10:17:04 +0100 Subject: ARM: perf: lock PMU registers per-CPU Currently, a single lock serialises access to CPU PMU registers. This global locking is unnecessary as PMU registers are local to the CPU they monitor. This patch replaces the global lock with a per-CPU lock. As the lock is in struct cpu_hw_events, PMUs providing a single cpu_hw_events instance can be locked globally. Signed-off-by: Mark Rutland Reviewed-by: Will Deacon Reviewed-by: Jamie Iles Reviewed-by: Ashwin Chaugule Signed-off-by: Will Deacon --- arch/arm/kernel/perf_event_v7.c | 20 ++++++++++++-------- 1 file changed, 12 insertions(+), 8 deletions(-) (limited to 'arch/arm/kernel/perf_event_v7.c') diff --git a/arch/arm/kernel/perf_event_v7.c b/arch/arm/kernel/perf_event_v7.c index f4170fc228b6..68ac522fd940 100644 --- a/arch/arm/kernel/perf_event_v7.c +++ b/arch/arm/kernel/perf_event_v7.c @@ -936,12 +936,13 @@ static void armv7_pmnc_dump_regs(void) static void armv7pmu_enable_event(struct hw_perf_event *hwc, int idx) { unsigned long flags; + struct cpu_hw_events *events = armpmu->get_hw_events(); /* * Enable counter and interrupt, and set the counter to count * the event that we're interested in. */ - raw_spin_lock_irqsave(&pmu_lock, flags); + raw_spin_lock_irqsave(&events->pmu_lock, flags); /* * Disable counter @@ -966,17 +967,18 @@ static void armv7pmu_enable_event(struct hw_perf_event *hwc, int idx) */ armv7_pmnc_enable_counter(idx); - raw_spin_unlock_irqrestore(&pmu_lock, flags); + raw_spin_unlock_irqrestore(&events->pmu_lock, flags); } static void armv7pmu_disable_event(struct hw_perf_event *hwc, int idx) { unsigned long flags; + struct cpu_hw_events *events = armpmu->get_hw_events(); /* * Disable counter and interrupt */ - raw_spin_lock_irqsave(&pmu_lock, flags); + raw_spin_lock_irqsave(&events->pmu_lock, flags); /* * Disable counter @@ -988,7 +990,7 @@ static void armv7pmu_disable_event(struct hw_perf_event *hwc, int idx) */ armv7_pmnc_disable_intens(idx); - raw_spin_unlock_irqrestore(&pmu_lock, flags); + raw_spin_unlock_irqrestore(&events->pmu_lock, flags); } static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev) @@ -1054,21 +1056,23 @@ static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev) static void armv7pmu_start(void) { unsigned long flags; + struct cpu_hw_events *events = armpmu->get_hw_events(); - raw_spin_lock_irqsave(&pmu_lock, flags); + raw_spin_lock_irqsave(&events->pmu_lock, flags); /* Enable all counters */ armv7_pmnc_write(armv7_pmnc_read() | ARMV7_PMNC_E); - raw_spin_unlock_irqrestore(&pmu_lock, flags); + raw_spin_unlock_irqrestore(&events->pmu_lock, flags); } static void armv7pmu_stop(void) { unsigned long flags; + struct cpu_hw_events *events = armpmu->get_hw_events(); - raw_spin_lock_irqsave(&pmu_lock, flags); + raw_spin_lock_irqsave(&events->pmu_lock, flags); /* Disable all counters */ armv7_pmnc_write(armv7_pmnc_read() & ~ARMV7_PMNC_E); - raw_spin_unlock_irqrestore(&pmu_lock, flags); + raw_spin_unlock_irqrestore(&events->pmu_lock, flags); } static int armv7pmu_get_event_idx(struct cpu_hw_events *cpuc, -- cgit From e1f431b57ef9e4a68281540933fa74865cbb7a74 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Thu, 28 Apr 2011 15:47:10 +0100 Subject: ARM: perf: refactor event mapping Currently mapping an event type to a hardware configuration value depends on the data being pointed to from struct arm_pmu. These fields (cache_map, event_map, raw_event_mask) are currently specific to CPU PMUs, and do not serve the general case well. This patch replaces the event map pointers on struct arm_pmu with a new 'map_event' function pointer. Small shim functions are used to reuse the existing common code. Signed-off-by: Mark Rutland Reviewed-by: Will Deacon Reviewed-by: Jamie Iles Reviewed-by: Ashwin Chaugule Signed-off-by: Will Deacon --- arch/arm/kernel/perf_event_v7.c | 37 ++++++++++++++++++++++++++++--------- 1 file changed, 28 insertions(+), 9 deletions(-) (limited to 'arch/arm/kernel/perf_event_v7.c') diff --git a/arch/arm/kernel/perf_event_v7.c b/arch/arm/kernel/perf_event_v7.c index 68ac522fd940..be7b58a2cc6f 100644 --- a/arch/arm/kernel/perf_event_v7.c +++ b/arch/arm/kernel/perf_event_v7.c @@ -1140,6 +1140,30 @@ static void armv7pmu_reset(void *info) armv7_pmnc_write(ARMV7_PMNC_P | ARMV7_PMNC_C); } +static int armv7_a8_map_event(struct perf_event *event) +{ + return map_cpu_event(event, &armv7_a8_perf_map, + &armv7_a8_perf_cache_map, 0xFF); +} + +static int armv7_a9_map_event(struct perf_event *event) +{ + return map_cpu_event(event, &armv7_a9_perf_map, + &armv7_a9_perf_cache_map, 0xFF); +} + +static int armv7_a5_map_event(struct perf_event *event) +{ + return map_cpu_event(event, &armv7_a5_perf_map, + &armv7_a5_perf_cache_map, 0xFF); +} + +static int armv7_a15_map_event(struct perf_event *event) +{ + return map_cpu_event(event, &armv7_a15_perf_map, + &armv7_a15_perf_cache_map, 0xFF); +} + static struct arm_pmu armv7pmu = { .handle_irq = armv7pmu_handle_irq, .enable = armv7pmu_enable_event, @@ -1150,7 +1174,6 @@ static struct arm_pmu armv7pmu = { .start = armv7pmu_start, .stop = armv7pmu_stop, .reset = armv7pmu_reset, - .raw_event_mask = 0xFF, .max_period = (1LLU << 32) - 1, }; @@ -1169,8 +1192,7 @@ static struct arm_pmu *__init armv7_a8_pmu_init(void) { armv7pmu.id = ARM_PERF_PMU_ID_CA8; armv7pmu.name = "ARMv7 Cortex-A8"; - armv7pmu.cache_map = &armv7_a8_perf_cache_map; - armv7pmu.event_map = &armv7_a8_perf_map; + armv7pmu.map_event = armv7_a8_map_event; armv7pmu.num_events = armv7_read_num_pmnc_events(); return &armv7pmu; } @@ -1179,8 +1201,7 @@ static struct arm_pmu *__init armv7_a9_pmu_init(void) { armv7pmu.id = ARM_PERF_PMU_ID_CA9; armv7pmu.name = "ARMv7 Cortex-A9"; - armv7pmu.cache_map = &armv7_a9_perf_cache_map; - armv7pmu.event_map = &armv7_a9_perf_map; + armv7pmu.map_event = armv7_a9_map_event; armv7pmu.num_events = armv7_read_num_pmnc_events(); return &armv7pmu; } @@ -1189,8 +1210,7 @@ static struct arm_pmu *__init armv7_a5_pmu_init(void) { armv7pmu.id = ARM_PERF_PMU_ID_CA5; armv7pmu.name = "ARMv7 Cortex-A5"; - armv7pmu.cache_map = &armv7_a5_perf_cache_map; - armv7pmu.event_map = &armv7_a5_perf_map; + armv7pmu.map_event = armv7_a5_map_event; armv7pmu.num_events = armv7_read_num_pmnc_events(); return &armv7pmu; } @@ -1199,8 +1219,7 @@ static struct arm_pmu *__init armv7_a15_pmu_init(void) { armv7pmu.id = ARM_PERF_PMU_ID_CA15; armv7pmu.name = "ARMv7 Cortex-A15"; - armv7pmu.cache_map = &armv7_a15_perf_cache_map; - armv7pmu.event_map = &armv7_a15_perf_map; + armv7pmu.map_event = armv7_a15_map_event; armv7pmu.num_events = armv7_read_num_pmnc_events(); armv7pmu.set_event_filter = armv7pmu_set_event_filter; return &armv7pmu; -- cgit From 8be3f9a2385f91f7bf5c58f351e24b9247898e8f Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Tue, 17 May 2011 11:20:11 +0100 Subject: ARM: perf: remove cpu-related misnomers Currently struct cpu_hw_events stores data on events running on a PMU associated with a CPU. As this data is general enough to be used for system PMUs, this name is a misnomer, and may cause confusion when it is used for system PMUs. Additionally, 'armpmu' is commonly used as a parameter name for an instance of struct arm_pmu. The name is also used for a global instance which represents the CPU's PMU. As cpu_hw_events is now not tied to CPU PMUs, it is renamed to pmu_hw_events, with instances of it renamed similarly. As the global 'armpmu' is CPU-specfic, it is renamed to cpu_pmu. This should make it clearer which code is generic, and which is coupled with the CPU. Signed-off-by: Mark Rutland Reviewed-by: Will Deacon Reviewed-by: Jamie Iles Reviewed-by: Ashwin Chaugule Signed-off-by: Will Deacon --- arch/arm/kernel/perf_event_v7.c | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) (limited to 'arch/arm/kernel/perf_event_v7.c') diff --git a/arch/arm/kernel/perf_event_v7.c b/arch/arm/kernel/perf_event_v7.c index be7b58a2cc6f..98b75738345e 100644 --- a/arch/arm/kernel/perf_event_v7.c +++ b/arch/arm/kernel/perf_event_v7.c @@ -683,7 +683,7 @@ static const unsigned armv7_a15_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] */ #define ARMV7_IDX_CYCLE_COUNTER 0 #define ARMV7_IDX_COUNTER0 1 -#define ARMV7_IDX_COUNTER_LAST (ARMV7_IDX_CYCLE_COUNTER + armpmu->num_events - 1) +#define ARMV7_IDX_COUNTER_LAST (ARMV7_IDX_CYCLE_COUNTER + cpu_pmu->num_events - 1) #define ARMV7_MAX_COUNTERS 32 #define ARMV7_COUNTER_MASK (ARMV7_MAX_COUNTERS - 1) @@ -936,7 +936,7 @@ static void armv7_pmnc_dump_regs(void) static void armv7pmu_enable_event(struct hw_perf_event *hwc, int idx) { unsigned long flags; - struct cpu_hw_events *events = armpmu->get_hw_events(); + struct pmu_hw_events *events = cpu_pmu->get_hw_events(); /* * Enable counter and interrupt, and set the counter to count @@ -973,7 +973,7 @@ static void armv7pmu_enable_event(struct hw_perf_event *hwc, int idx) static void armv7pmu_disable_event(struct hw_perf_event *hwc, int idx) { unsigned long flags; - struct cpu_hw_events *events = armpmu->get_hw_events(); + struct pmu_hw_events *events = cpu_pmu->get_hw_events(); /* * Disable counter and interrupt @@ -997,7 +997,7 @@ static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev) { u32 pmnc; struct perf_sample_data data; - struct cpu_hw_events *cpuc; + struct pmu_hw_events *cpuc; struct pt_regs *regs; int idx; @@ -1020,7 +1020,7 @@ static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev) perf_sample_data_init(&data, 0); cpuc = &__get_cpu_var(cpu_hw_events); - for (idx = 0; idx < armpmu->num_events; ++idx) { + for (idx = 0; idx < cpu_pmu->num_events; ++idx) { struct perf_event *event = cpuc->events[idx]; struct hw_perf_event *hwc; @@ -1038,7 +1038,7 @@ static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev) continue; if (perf_event_overflow(event, &data, regs)) - armpmu->disable(hwc, idx); + cpu_pmu->disable(hwc, idx); } /* @@ -1056,7 +1056,7 @@ static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev) static void armv7pmu_start(void) { unsigned long flags; - struct cpu_hw_events *events = armpmu->get_hw_events(); + struct pmu_hw_events *events = cpu_pmu->get_hw_events(); raw_spin_lock_irqsave(&events->pmu_lock, flags); /* Enable all counters */ @@ -1067,7 +1067,7 @@ static void armv7pmu_start(void) static void armv7pmu_stop(void) { unsigned long flags; - struct cpu_hw_events *events = armpmu->get_hw_events(); + struct pmu_hw_events *events = cpu_pmu->get_hw_events(); raw_spin_lock_irqsave(&events->pmu_lock, flags); /* Disable all counters */ @@ -1075,7 +1075,7 @@ static void armv7pmu_stop(void) raw_spin_unlock_irqrestore(&events->pmu_lock, flags); } -static int armv7pmu_get_event_idx(struct cpu_hw_events *cpuc, +static int armv7pmu_get_event_idx(struct pmu_hw_events *cpuc, struct hw_perf_event *event) { int idx; @@ -1093,7 +1093,7 @@ static int armv7pmu_get_event_idx(struct cpu_hw_events *cpuc, * For anything other than a cycle counter, try and use * the events counters */ - for (idx = ARMV7_IDX_COUNTER0; idx < armpmu->num_events; ++idx) { + for (idx = ARMV7_IDX_COUNTER0; idx < cpu_pmu->num_events; ++idx) { if (!test_and_set_bit(idx, cpuc->used_mask)) return idx; } @@ -1130,7 +1130,7 @@ static int armv7pmu_set_event_filter(struct hw_perf_event *event, static void armv7pmu_reset(void *info) { - u32 idx, nb_cnt = armpmu->num_events; + u32 idx, nb_cnt = cpu_pmu->num_events; /* The counter and interrupt enable registers are unknown at reset. */ for (idx = ARMV7_IDX_CYCLE_COUNTER; idx < nb_cnt; ++idx) -- cgit