From 65bab45113a2c5e9f13bc8cc3f6fea92f467d417 Mon Sep 17 00:00:00 2001 From: Stephen Boyd Date: Sat, 28 Feb 2015 00:11:33 +0000 Subject: ARM: perf: Preparatory work for Scorpion PMU support Do some things to make the Krait PMU support code generic enough to be used by the Scorpion PMU support code. * Rename the venum register functions to be venum instead of krait specific because the same registers exist on Scorpion * Add some macros to decode our Krait specific event encoding that's the same on Scorpion (modulo an extra region). * Drop 'krait' from krait_clear_pmresrn_group() so it can be used by Scorpion code Signed-off-by: Stephen Boyd Signed-off-by: Will Deacon --- arch/arm/kernel/perf_event_v7.c | 100 +++++++++++++++++----------------------- 1 file changed, 43 insertions(+), 57 deletions(-) (limited to 'arch/arm/kernel') diff --git a/arch/arm/kernel/perf_event_v7.c b/arch/arm/kernel/perf_event_v7.c index 8993770c47de..97a7eda8831a 100644 --- a/arch/arm/kernel/perf_event_v7.c +++ b/arch/arm/kernel/perf_event_v7.c @@ -1103,6 +1103,12 @@ static int armv7_a17_pmu_init(struct arm_pmu *cpu_pmu) #define KRAIT_EVENT_MASK (KRAIT_EVENT | VENUM_EVENT) #define PMRESRn_EN BIT(31) +#define EVENT_REGION(event) (((event) >> 12) & 0xf) /* R */ +#define EVENT_GROUP(event) ((event) & 0xf) /* G */ +#define EVENT_CODE(event) (((event) >> 4) & 0xff) /* CC */ +#define EVENT_VENUM(event) (!!(event & VENUM_EVENT)) /* N=2 */ +#define EVENT_CPU(event) (!!(event & KRAIT_EVENT)) /* N=1 */ + static u32 krait_read_pmresrn(int n) { u32 val; @@ -1141,19 +1147,19 @@ static void krait_write_pmresrn(int n, u32 val) } } -static u32 krait_read_vpmresr0(void) +static u32 venum_read_pmresr(void) { u32 val; asm volatile("mrc p10, 7, %0, c11, c0, 0" : "=r" (val)); return val; } -static void krait_write_vpmresr0(u32 val) +static void venum_write_pmresr(u32 val) { asm volatile("mcr p10, 7, %0, c11, c0, 0" : : "r" (val)); } -static void krait_pre_vpmresr0(u32 *venum_orig_val, u32 *fp_orig_val) +static void venum_pre_pmresr(u32 *venum_orig_val, u32 *fp_orig_val) { u32 venum_new_val; u32 fp_new_val; @@ -1170,7 +1176,7 @@ static void krait_pre_vpmresr0(u32 *venum_orig_val, u32 *fp_orig_val) fmxr(FPEXC, fp_new_val); } -static void krait_post_vpmresr0(u32 venum_orig_val, u32 fp_orig_val) +static void venum_post_pmresr(u32 venum_orig_val, u32 fp_orig_val) { BUG_ON(preemptible()); /* Restore FPEXC */ @@ -1193,16 +1199,11 @@ static void krait_evt_setup(int idx, u32 config_base) u32 val; u32 mask; u32 vval, fval; - unsigned int region; - unsigned int group; - unsigned int code; + unsigned int region = EVENT_REGION(config_base); + unsigned int group = EVENT_GROUP(config_base); + unsigned int code = EVENT_CODE(config_base); unsigned int group_shift; - bool venum_event; - - venum_event = !!(config_base & VENUM_EVENT); - region = (config_base >> 12) & 0xf; - code = (config_base >> 4) & 0xff; - group = (config_base >> 0) & 0xf; + bool venum_event = EVENT_VENUM(config_base); group_shift = group * 8; mask = 0xff << group_shift; @@ -1220,13 +1221,13 @@ static void krait_evt_setup(int idx, u32 config_base) asm volatile("mcr p15, 0, %0, c9, c15, 0" : : "r" (0)); if (venum_event) { - krait_pre_vpmresr0(&vval, &fval); - val = krait_read_vpmresr0(); + venum_pre_pmresr(&vval, &fval); + val = venum_read_pmresr(); val &= ~mask; val |= code << group_shift; val |= PMRESRn_EN; - krait_write_vpmresr0(val); - krait_post_vpmresr0(vval, fval); + venum_write_pmresr(val); + venum_post_pmresr(vval, fval); } else { val = krait_read_pmresrn(region); val &= ~mask; @@ -1236,7 +1237,7 @@ static void krait_evt_setup(int idx, u32 config_base) } } -static u32 krait_clear_pmresrn_group(u32 val, int group) +static u32 clear_pmresrn_group(u32 val, int group) { u32 mask; int group_shift; @@ -1256,23 +1257,19 @@ static void krait_clearpmu(u32 config_base) { u32 val; u32 vval, fval; - unsigned int region; - unsigned int group; - bool venum_event; - - venum_event = !!(config_base & VENUM_EVENT); - region = (config_base >> 12) & 0xf; - group = (config_base >> 0) & 0xf; + unsigned int region = EVENT_REGION(config_base); + unsigned int group = EVENT_GROUP(config_base); + bool venum_event = EVENT_VENUM(config_base); if (venum_event) { - krait_pre_vpmresr0(&vval, &fval); - val = krait_read_vpmresr0(); - val = krait_clear_pmresrn_group(val, group); - krait_write_vpmresr0(val); - krait_post_vpmresr0(vval, fval); + venum_pre_pmresr(&vval, &fval); + val = venum_read_pmresr(); + val = clear_pmresrn_group(val, group); + venum_write_pmresr(val); + venum_post_pmresr(vval, fval); } else { val = krait_read_pmresrn(region); - val = krait_clear_pmresrn_group(val, group); + val = clear_pmresrn_group(val, group); krait_write_pmresrn(region, val); } } @@ -1350,9 +1347,9 @@ static void krait_pmu_reset(void *info) krait_write_pmresrn(1, 0); krait_write_pmresrn(2, 0); - krait_pre_vpmresr0(&vval, &fval); - krait_write_vpmresr0(0); - krait_post_vpmresr0(vval, fval); + venum_pre_pmresr(&vval, &fval); + venum_write_pmresr(0); + venum_post_pmresr(vval, fval); } static int krait_event_to_bit(struct perf_event *event, unsigned int region, @@ -1386,26 +1383,18 @@ static int krait_pmu_get_event_idx(struct pmu_hw_events *cpuc, { int idx; int bit = -1; - unsigned int prefix; - unsigned int region; - unsigned int code; - unsigned int group; - bool krait_event; struct hw_perf_event *hwc = &event->hw; + unsigned int region = EVENT_REGION(hwc->config_base); + unsigned int code = EVENT_CODE(hwc->config_base); + unsigned int group = EVENT_GROUP(hwc->config_base); + bool venum_event = EVENT_VENUM(hwc->config_base); + bool krait_event = EVENT_CPU(hwc->config_base); - region = (hwc->config_base >> 12) & 0xf; - code = (hwc->config_base >> 4) & 0xff; - group = (hwc->config_base >> 0) & 0xf; - krait_event = !!(hwc->config_base & KRAIT_EVENT_MASK); - - if (krait_event) { + if (venum_event || krait_event) { /* Ignore invalid events */ if (group > 3 || region > 2) return -EINVAL; - prefix = hwc->config_base & KRAIT_EVENT_MASK; - if (prefix != KRAIT_EVENT && prefix != VENUM_EVENT) - return -EINVAL; - if (prefix == VENUM_EVENT && (code & 0xe0)) + if (venum_event && (code & 0xe0)) return -EINVAL; bit = krait_event_to_bit(event, region, group); @@ -1425,15 +1414,12 @@ static void krait_pmu_clear_event_idx(struct pmu_hw_events *cpuc, { int bit; struct hw_perf_event *hwc = &event->hw; - unsigned int region; - unsigned int group; - bool krait_event; - - region = (hwc->config_base >> 12) & 0xf; - group = (hwc->config_base >> 0) & 0xf; - krait_event = !!(hwc->config_base & KRAIT_EVENT_MASK); + unsigned int region = EVENT_REGION(hwc->config_base); + unsigned int group = EVENT_GROUP(hwc->config_base); + bool venum_event = EVENT_VENUM(hwc->config_base); + bool krait_event = EVENT_CPU(hwc->config_base); - if (krait_event) { + if (venum_event || krait_event) { bit = krait_event_to_bit(event, region, group); clear_bit(bit, cpuc->used_mask); } -- cgit From 934999185edd613ca80916d238ba7393b84ae53c Mon Sep 17 00:00:00 2001 From: Stephen Boyd Date: Sat, 28 Feb 2015 00:11:34 +0000 Subject: ARM: perf: Only reset PMxEVCNTCR registers on reset The Krait specific PMxEVCNTCR register is unpredictable upon reset. Currently we clear the register before we setup an event, but we don't need to do that. Instead, we can iterate through all the events and clear them once when we reset the PMU, saving a write in the event setup path. Cc: Neil Leeder Cc: Ashwin Chaugule Cc: Sheetal Sahasrabudhe Signed-off-by: Stephen Boyd Signed-off-by: Will Deacon --- arch/arm/kernel/perf_event_v7.c | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) (limited to 'arch/arm/kernel') diff --git a/arch/arm/kernel/perf_event_v7.c b/arch/arm/kernel/perf_event_v7.c index 97a7eda8831a..fae6c4ea52e8 100644 --- a/arch/arm/kernel/perf_event_v7.c +++ b/arch/arm/kernel/perf_event_v7.c @@ -1218,8 +1218,6 @@ static void krait_evt_setup(int idx, u32 config_base) val |= config_base & (ARMV7_EXCLUDE_USER | ARMV7_EXCLUDE_PL1); armv7_pmnc_write_evtsel(idx, val); - asm volatile("mcr p15, 0, %0, c9, c15, 0" : : "r" (0)); - if (venum_event) { venum_pre_pmresr(&vval, &fval); val = venum_read_pmresr(); @@ -1339,6 +1337,8 @@ static void krait_pmu_enable_event(struct perf_event *event) static void krait_pmu_reset(void *info) { u32 vval, fval; + struct arm_pmu *cpu_pmu = info; + u32 idx, nb_cnt = cpu_pmu->num_events; armv7pmu_reset(info); @@ -1350,6 +1350,13 @@ static void krait_pmu_reset(void *info) venum_pre_pmresr(&vval, &fval); venum_write_pmresr(0); venum_post_pmresr(vval, fval); + + /* Reset PMxEVNCTCR to sane default */ + for (idx = ARMV7_IDX_CYCLE_COUNTER; idx < nb_cnt; ++idx) { + armv7_pmnc_select_counter(idx); + asm volatile("mcr p15, 0, %0, c9, c15, 0" : : "r" (0)); + } + } static int krait_event_to_bit(struct perf_event *event, unsigned int region, -- cgit From 341e42c4e3f97af9bbeada64c3e1a41f65ce086a Mon Sep 17 00:00:00 2001 From: Stephen Boyd Date: Fri, 27 Feb 2015 16:11:35 -0800 Subject: ARM: perf: Add support for Scorpion PMUs Scorpion supports a set of local performance monitor event selection registers (LPM) sitting behind a cp15 based interface that extend the architected PMU events to include Scorpion CPU and Venum VFP specific events. To use these events the user is expected to program the lpm register with the event code shifted into the group they care about and then point the PMNx event at that region+group combo by writing a LPMn_GROUPx event. Add support for this hardware. Note: the raw event number is a pure software construct that allows us to map the multi-dimensional number space of regions, groups, and event codes into a flat event number space suitable for use by the perf framework. This is based on code originally written by Sheetal Sahasrabudhe, Ashwin Chaugule, and Neil Leeder [1]. [1] https://www.codeaurora.org/cgit/quic/la/kernel/msm/tree/arch/arm/kernel/perf_event_msm.c?h=msm-3.4 Cc: Mark Rutland Cc: Neil Leeder Cc: Ashwin Chaugule Cc: Sheetal Sahasrabudhe Cc: Signed-off-by: Stephen Boyd Signed-off-by: Will Deacon --- arch/arm/kernel/perf_event_cpu.c | 2 + arch/arm/kernel/perf_event_v7.c | 414 +++++++++++++++++++++++++++++++++++++++ 2 files changed, 416 insertions(+) (limited to 'arch/arm/kernel') diff --git a/arch/arm/kernel/perf_event_cpu.c b/arch/arm/kernel/perf_event_cpu.c index 61b53c46edfa..7eb86e294c68 100644 --- a/arch/arm/kernel/perf_event_cpu.c +++ b/arch/arm/kernel/perf_event_cpu.c @@ -243,6 +243,8 @@ static const struct of_device_id cpu_pmu_of_device_ids[] = { {.compatible = "arm,arm1176-pmu", .data = armv6_1176_pmu_init}, {.compatible = "arm,arm1136-pmu", .data = armv6_1136_pmu_init}, {.compatible = "qcom,krait-pmu", .data = krait_pmu_init}, + {.compatible = "qcom,scorpion-pmu", .data = scorpion_pmu_init}, + {.compatible = "qcom,scorpion-mp-pmu", .data = scorpion_mp_pmu_init}, {}, }; diff --git a/arch/arm/kernel/perf_event_v7.c b/arch/arm/kernel/perf_event_v7.c index fae6c4ea52e8..f4207a4dcb01 100644 --- a/arch/arm/kernel/perf_event_v7.c +++ b/arch/arm/kernel/perf_event_v7.c @@ -140,6 +140,23 @@ enum krait_perf_types { KRAIT_PERFCTR_L1_DTLB_ACCESS = 0x12210, }; +/* ARMv7 Scorpion specific event types */ +enum scorpion_perf_types { + SCORPION_LPM0_GROUP0 = 0x4c, + SCORPION_LPM1_GROUP0 = 0x50, + SCORPION_LPM2_GROUP0 = 0x54, + SCORPION_L2LPM_GROUP0 = 0x58, + SCORPION_VLPM_GROUP0 = 0x5c, + + SCORPION_ICACHE_ACCESS = 0x10053, + SCORPION_ICACHE_MISS = 0x10052, + + SCORPION_DTLB_ACCESS = 0x12013, + SCORPION_DTLB_MISS = 0x12012, + + SCORPION_ITLB_MISS = 0x12021, +}; + /* * Cortex-A8 HW events mapping * @@ -481,6 +498,49 @@ static const unsigned krait_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] [C(BPU)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED, }; +/* + * Scorpion HW events mapping + */ +static const unsigned scorpion_perf_map[PERF_COUNT_HW_MAX] = { + PERF_MAP_ALL_UNSUPPORTED, + [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES, + [PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_PERFCTR_INSTR_EXECUTED, + [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE, + [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED, + [PERF_COUNT_HW_BUS_CYCLES] = ARMV7_PERFCTR_CLOCK_CYCLES, +}; + +static const unsigned scorpion_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] + [PERF_COUNT_HW_CACHE_OP_MAX] + [PERF_COUNT_HW_CACHE_RESULT_MAX] = { + PERF_CACHE_MAP_ALL_UNSUPPORTED, + /* + * The performance counters don't differentiate between read and write + * accesses/misses so this isn't strictly correct, but it's the best we + * can do. Writes and reads get combined. + */ + [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS, + [C(L1D)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL, + [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS, + [C(L1D)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL, + [C(L1I)][C(OP_READ)][C(RESULT_ACCESS)] = SCORPION_ICACHE_ACCESS, + [C(L1I)][C(OP_READ)][C(RESULT_MISS)] = SCORPION_ICACHE_MISS, + /* + * Only ITLB misses and DTLB refills are supported. If users want the + * DTLB refills misses a raw counter must be used. + */ + [C(DTLB)][C(OP_READ)][C(RESULT_ACCESS)] = SCORPION_DTLB_ACCESS, + [C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = SCORPION_DTLB_MISS, + [C(DTLB)][C(OP_WRITE)][C(RESULT_ACCESS)] = SCORPION_DTLB_ACCESS, + [C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)] = SCORPION_DTLB_MISS, + [C(ITLB)][C(OP_READ)][C(RESULT_MISS)] = SCORPION_ITLB_MISS, + [C(ITLB)][C(OP_WRITE)][C(RESULT_MISS)] = SCORPION_ITLB_MISS, + [C(BPU)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED, + [C(BPU)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED, + [C(BPU)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED, + [C(BPU)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED, +}; + /* * Perf Events' indices */ @@ -976,6 +1036,12 @@ static int krait_map_event_no_branch(struct perf_event *event) &krait_perf_cache_map, 0xFFFFF); } +static int scorpion_map_event(struct perf_event *event) +{ + return armpmu_map_event(event, &scorpion_perf_map, + &scorpion_perf_cache_map, 0xFFFFF); +} + static void armv7pmu_init(struct arm_pmu *cpu_pmu) { cpu_pmu->handle_irq = armv7pmu_handle_irq; @@ -1451,6 +1517,344 @@ static int krait_pmu_init(struct arm_pmu *cpu_pmu) cpu_pmu->clear_event_idx = krait_pmu_clear_event_idx; return 0; } + +/* + * Scorpion Local Performance Monitor Register (LPMn) + * + * 31 30 24 16 8 0 + * +--------------------------------+ + * LPM0 | EN | CC | CC | CC | CC | N = 1, R = 0 + * +--------------------------------+ + * LPM1 | EN | CC | CC | CC | CC | N = 1, R = 1 + * +--------------------------------+ + * LPM2 | EN | CC | CC | CC | CC | N = 1, R = 2 + * +--------------------------------+ + * L2LPM | EN | CC | CC | CC | CC | N = 1, R = 3 + * +--------------------------------+ + * VLPM | EN | CC | CC | CC | CC | N = 2, R = ? + * +--------------------------------+ + * EN | G=3 | G=2 | G=1 | G=0 + * + * + * Event Encoding: + * + * hwc->config_base = 0xNRCCG + * + * N = prefix, 1 for Scorpion CPU (LPMn/L2LPM), 2 for Venum VFP (VLPM) + * R = region register + * CC = class of events the group G is choosing from + * G = group or particular event + * + * Example: 0x12021 is a Scorpion CPU event in LPM2's group 1 with code 2 + * + * A region (R) corresponds to a piece of the CPU (execution unit, instruction + * unit, etc.) while the event code (CC) corresponds to a particular class of + * events (interrupts for example). An event code is broken down into + * groups (G) that can be mapped into the PMU (irq, fiqs, and irq+fiqs for + * example). + */ + +static u32 scorpion_read_pmresrn(int n) +{ + u32 val; + + switch (n) { + case 0: + asm volatile("mrc p15, 0, %0, c15, c0, 0" : "=r" (val)); + break; + case 1: + asm volatile("mrc p15, 1, %0, c15, c0, 0" : "=r" (val)); + break; + case 2: + asm volatile("mrc p15, 2, %0, c15, c0, 0" : "=r" (val)); + break; + case 3: + asm volatile("mrc p15, 3, %0, c15, c2, 0" : "=r" (val)); + break; + default: + BUG(); /* Should be validated in scorpion_pmu_get_event_idx() */ + } + + return val; +} + +static void scorpion_write_pmresrn(int n, u32 val) +{ + switch (n) { + case 0: + asm volatile("mcr p15, 0, %0, c15, c0, 0" : : "r" (val)); + break; + case 1: + asm volatile("mcr p15, 1, %0, c15, c0, 0" : : "r" (val)); + break; + case 2: + asm volatile("mcr p15, 2, %0, c15, c0, 0" : : "r" (val)); + break; + case 3: + asm volatile("mcr p15, 3, %0, c15, c2, 0" : : "r" (val)); + break; + default: + BUG(); /* Should be validated in scorpion_pmu_get_event_idx() */ + } +} + +static u32 scorpion_get_pmresrn_event(unsigned int region) +{ + static const u32 pmresrn_table[] = { SCORPION_LPM0_GROUP0, + SCORPION_LPM1_GROUP0, + SCORPION_LPM2_GROUP0, + SCORPION_L2LPM_GROUP0 }; + return pmresrn_table[region]; +} + +static void scorpion_evt_setup(int idx, u32 config_base) +{ + u32 val; + u32 mask; + u32 vval, fval; + unsigned int region = EVENT_REGION(config_base); + unsigned int group = EVENT_GROUP(config_base); + unsigned int code = EVENT_CODE(config_base); + unsigned int group_shift; + bool venum_event = EVENT_VENUM(config_base); + + group_shift = group * 8; + mask = 0xff << group_shift; + + /* Configure evtsel for the region and group */ + if (venum_event) + val = SCORPION_VLPM_GROUP0; + else + val = scorpion_get_pmresrn_event(region); + val += group; + /* Mix in mode-exclusion bits */ + val |= config_base & (ARMV7_EXCLUDE_USER | ARMV7_EXCLUDE_PL1); + armv7_pmnc_write_evtsel(idx, val); + + asm volatile("mcr p15, 0, %0, c9, c15, 0" : : "r" (0)); + + if (venum_event) { + venum_pre_pmresr(&vval, &fval); + val = venum_read_pmresr(); + val &= ~mask; + val |= code << group_shift; + val |= PMRESRn_EN; + venum_write_pmresr(val); + venum_post_pmresr(vval, fval); + } else { + val = scorpion_read_pmresrn(region); + val &= ~mask; + val |= code << group_shift; + val |= PMRESRn_EN; + scorpion_write_pmresrn(region, val); + } +} + +static void scorpion_clearpmu(u32 config_base) +{ + u32 val; + u32 vval, fval; + unsigned int region = EVENT_REGION(config_base); + unsigned int group = EVENT_GROUP(config_base); + bool venum_event = EVENT_VENUM(config_base); + + if (venum_event) { + venum_pre_pmresr(&vval, &fval); + val = venum_read_pmresr(); + val = clear_pmresrn_group(val, group); + venum_write_pmresr(val); + venum_post_pmresr(vval, fval); + } else { + val = scorpion_read_pmresrn(region); + val = clear_pmresrn_group(val, group); + scorpion_write_pmresrn(region, val); + } +} + +static void scorpion_pmu_disable_event(struct perf_event *event) +{ + unsigned long flags; + struct hw_perf_event *hwc = &event->hw; + int idx = hwc->idx; + struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); + struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events); + + /* Disable counter and interrupt */ + raw_spin_lock_irqsave(&events->pmu_lock, flags); + + /* Disable counter */ + armv7_pmnc_disable_counter(idx); + + /* + * Clear pmresr code (if destined for PMNx counters) + */ + if (hwc->config_base & KRAIT_EVENT_MASK) + scorpion_clearpmu(hwc->config_base); + + /* Disable interrupt for this counter */ + armv7_pmnc_disable_intens(idx); + + raw_spin_unlock_irqrestore(&events->pmu_lock, flags); +} + +static void scorpion_pmu_enable_event(struct perf_event *event) +{ + unsigned long flags; + struct hw_perf_event *hwc = &event->hw; + int idx = hwc->idx; + struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); + struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events); + + /* + * Enable counter and interrupt, and set the counter to count + * the event that we're interested in. + */ + raw_spin_lock_irqsave(&events->pmu_lock, flags); + + /* Disable counter */ + armv7_pmnc_disable_counter(idx); + + /* + * Set event (if destined for PMNx counters) + * We don't set the event for the cycle counter because we + * don't have the ability to perform event filtering. + */ + if (hwc->config_base & KRAIT_EVENT_MASK) + scorpion_evt_setup(idx, hwc->config_base); + else if (idx != ARMV7_IDX_CYCLE_COUNTER) + armv7_pmnc_write_evtsel(idx, hwc->config_base); + + /* Enable interrupt for this counter */ + armv7_pmnc_enable_intens(idx); + + /* Enable counter */ + armv7_pmnc_enable_counter(idx); + + raw_spin_unlock_irqrestore(&events->pmu_lock, flags); +} + +static void scorpion_pmu_reset(void *info) +{ + u32 vval, fval; + struct arm_pmu *cpu_pmu = info; + u32 idx, nb_cnt = cpu_pmu->num_events; + + armv7pmu_reset(info); + + /* Clear all pmresrs */ + scorpion_write_pmresrn(0, 0); + scorpion_write_pmresrn(1, 0); + scorpion_write_pmresrn(2, 0); + scorpion_write_pmresrn(3, 0); + + venum_pre_pmresr(&vval, &fval); + venum_write_pmresr(0); + venum_post_pmresr(vval, fval); + + /* Reset PMxEVNCTCR to sane default */ + for (idx = ARMV7_IDX_CYCLE_COUNTER; idx < nb_cnt; ++idx) { + armv7_pmnc_select_counter(idx); + asm volatile("mcr p15, 0, %0, c9, c15, 0" : : "r" (0)); + } +} + +static int scorpion_event_to_bit(struct perf_event *event, unsigned int region, + unsigned int group) +{ + int bit; + struct hw_perf_event *hwc = &event->hw; + struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); + + if (hwc->config_base & VENUM_EVENT) + bit = SCORPION_VLPM_GROUP0; + else + bit = scorpion_get_pmresrn_event(region); + bit -= scorpion_get_pmresrn_event(0); + bit += group; + /* + * Lower bits are reserved for use by the counters (see + * armv7pmu_get_event_idx() for more info) + */ + bit += ARMV7_IDX_COUNTER_LAST(cpu_pmu) + 1; + + return bit; +} + +/* + * We check for column exclusion constraints here. + * Two events cant use the same group within a pmresr register. + */ +static int scorpion_pmu_get_event_idx(struct pmu_hw_events *cpuc, + struct perf_event *event) +{ + int idx; + int bit = -1; + struct hw_perf_event *hwc = &event->hw; + unsigned int region = EVENT_REGION(hwc->config_base); + unsigned int group = EVENT_GROUP(hwc->config_base); + bool venum_event = EVENT_VENUM(hwc->config_base); + bool scorpion_event = EVENT_CPU(hwc->config_base); + + if (venum_event || scorpion_event) { + /* Ignore invalid events */ + if (group > 3 || region > 3) + return -EINVAL; + + bit = scorpion_event_to_bit(event, region, group); + if (test_and_set_bit(bit, cpuc->used_mask)) + return -EAGAIN; + } + + idx = armv7pmu_get_event_idx(cpuc, event); + if (idx < 0 && bit >= 0) + clear_bit(bit, cpuc->used_mask); + + return idx; +} + +static void scorpion_pmu_clear_event_idx(struct pmu_hw_events *cpuc, + struct perf_event *event) +{ + int bit; + struct hw_perf_event *hwc = &event->hw; + unsigned int region = EVENT_REGION(hwc->config_base); + unsigned int group = EVENT_GROUP(hwc->config_base); + bool venum_event = EVENT_VENUM(hwc->config_base); + bool scorpion_event = EVENT_CPU(hwc->config_base); + + if (venum_event || scorpion_event) { + bit = scorpion_event_to_bit(event, region, group); + clear_bit(bit, cpuc->used_mask); + } +} + +static int scorpion_pmu_init(struct arm_pmu *cpu_pmu) +{ + armv7pmu_init(cpu_pmu); + cpu_pmu->name = "armv7_scorpion"; + cpu_pmu->map_event = scorpion_map_event; + cpu_pmu->num_events = armv7_read_num_pmnc_events(); + cpu_pmu->reset = scorpion_pmu_reset; + cpu_pmu->enable = scorpion_pmu_enable_event; + cpu_pmu->disable = scorpion_pmu_disable_event; + cpu_pmu->get_event_idx = scorpion_pmu_get_event_idx; + cpu_pmu->clear_event_idx = scorpion_pmu_clear_event_idx; + return 0; +} + +static int scorpion_mp_pmu_init(struct arm_pmu *cpu_pmu) +{ + armv7pmu_init(cpu_pmu); + cpu_pmu->name = "armv7_scorpion_mp"; + cpu_pmu->map_event = scorpion_map_event; + cpu_pmu->num_events = armv7_read_num_pmnc_events(); + cpu_pmu->reset = scorpion_pmu_reset; + cpu_pmu->enable = scorpion_pmu_enable_event; + cpu_pmu->disable = scorpion_pmu_disable_event; + cpu_pmu->get_event_idx = scorpion_pmu_get_event_idx; + cpu_pmu->clear_event_idx = scorpion_pmu_clear_event_idx; + return 0; +} #else static inline int armv7_a8_pmu_init(struct arm_pmu *cpu_pmu) { @@ -1491,4 +1895,14 @@ static inline int krait_pmu_init(struct arm_pmu *cpu_pmu) { return -ENODEV; } + +static inline int scorpion_pmu_init(struct arm_pmu *cpu_pmu) +{ + return -ENODEV; +} + +static inline int scorpion_mp_pmu_init(struct arm_pmu *cpu_pmu) +{ + return -ENODEV; +} #endif /* CONFIG_CPU_V7 */ -- cgit From e429817b401f095ac483fcb02524b01faf45dad6 Mon Sep 17 00:00:00 2001 From: "Suzuki K. Poulose" Date: Tue, 17 Mar 2015 18:14:58 +0000 Subject: ARM: perf: reject groups spanning multiple hardware PMUs The perf core implicitly rejects events spanning multiple HW PMUs, as in these cases the event->ctx will differ. However this validation is performed after pmu::event_init() is called in perf_init_event(), and thus pmu::event_init() may be called with a group leader from a different HW PMU. The ARM PMU driver does not take this fact into account, and when validating groups assumes that it can call to_arm_pmu(event->pmu) for any HW event. When the event in question is from another HW PMU this is wrong, and results in dereferencing garbage. This patch updates the ARM PMU driver to first test for and reject events from other PMUs, moving the to_arm_pmu and related logic after this test. Fixes a crash triggered by perf_fuzzer on Linux-4.0-rc2, with a CCI PMU present: --- CPU: 0 PID: 1527 Comm: perf_fuzzer Not tainted 4.0.0-rc2 #57 Hardware name: ARM-Versatile Express task: bd8484c0 ti: be676000 task.ti: be676000 PC is at 0xbf1bbc90 LR is at validate_event+0x34/0x5c pc : [] lr : [<80016060>] psr: 00000013 ... [<80016060>] (validate_event) from [<80016198>] (validate_group+0x28/0x90) [<80016198>] (validate_group) from [<80016398>] (armpmu_event_init+0x150/0x218) [<80016398>] (armpmu_event_init) from [<800882e4>] (perf_try_init_event+0x30/0x48) [<800882e4>] (perf_try_init_event) from [<8008f544>] (perf_init_event+0x5c/0xf4) [<8008f544>] (perf_init_event) from [<8008f8a8>] (perf_event_alloc+0x2cc/0x35c) [<8008f8a8>] (perf_event_alloc) from [<8009015c>] (SyS_perf_event_open+0x498/0xa70) [<8009015c>] (SyS_perf_event_open) from [<8000e420>] (ret_fast_syscall+0x0/0x34) Code: bf1be000 bf1bb380 802a2664 00000000 (00000002) ---[ end trace 01aff0ff00926a0a ]--- Also cleans up the code to use the arm_pmu only when we know that we are dealing with an arm pmu event. Cc: Will Deacon Acked-by: Mark Rutland Acked-by: Peter Ziljstra (Intel) Signed-off-by: Suzuki K. Poulose Signed-off-by: Will Deacon --- arch/arm/kernel/perf_event.c | 21 +++++++++++++++------ 1 file changed, 15 insertions(+), 6 deletions(-) (limited to 'arch/arm/kernel') diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c index 557e128e4df0..4a86a0133ac3 100644 --- a/arch/arm/kernel/perf_event.c +++ b/arch/arm/kernel/perf_event.c @@ -259,20 +259,29 @@ out: } static int -validate_event(struct pmu_hw_events *hw_events, - struct perf_event *event) +validate_event(struct pmu *pmu, struct pmu_hw_events *hw_events, + struct perf_event *event) { - struct arm_pmu *armpmu = to_arm_pmu(event->pmu); + struct arm_pmu *armpmu; if (is_software_event(event)) return 1; + /* + * Reject groups spanning multiple HW PMUs (e.g. CPU + CCI). The + * core perf code won't check that the pmu->ctx == leader->ctx + * until after pmu->event_init(event). + */ + if (event->pmu != pmu) + return 0; + if (event->state < PERF_EVENT_STATE_OFF) return 1; if (event->state == PERF_EVENT_STATE_OFF && !event->attr.enable_on_exec) return 1; + armpmu = to_arm_pmu(event->pmu); return armpmu->get_event_idx(hw_events, event) >= 0; } @@ -288,15 +297,15 @@ validate_group(struct perf_event *event) */ memset(&fake_pmu.used_mask, 0, sizeof(fake_pmu.used_mask)); - if (!validate_event(&fake_pmu, leader)) + if (!validate_event(event->pmu, &fake_pmu, leader)) return -EINVAL; list_for_each_entry(sibling, &leader->sibling_list, group_entry) { - if (!validate_event(&fake_pmu, sibling)) + if (!validate_event(event->pmu, &fake_pmu, sibling)) return -EINVAL; } - if (!validate_event(&fake_pmu, event)) + if (!validate_event(event->pmu, &fake_pmu, event)) return -EINVAL; return 0; -- cgit From 9fd85eb502a78bd812db58bd1f668b2a06ee30a5 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Fri, 6 Mar 2015 11:54:09 +0000 Subject: ARM: pmu: add support for interrupt-affinity property Historically, the PMU devicetree bindings have expected SPIs to be listed in order of *logical* CPU number. This is problematic for bootloaders, especially when the boot CPU (logical ID 0) isn't listed first in the devicetree. This patch adds a new optional property, interrupt-affinity, to the PMU node which allows the interrupt affinity to be described using a list of phandled to CPU nodes, with each entry in the list corresponding to the SPI at the same index in the interrupts property. Cc: Mark Rutland Signed-off-by: Will Deacon --- arch/arm/kernel/perf_event_cpu.c | 69 ++++++++++++++++++++++++++++++++++++---- 1 file changed, 62 insertions(+), 7 deletions(-) (limited to 'arch/arm/kernel') diff --git a/arch/arm/kernel/perf_event_cpu.c b/arch/arm/kernel/perf_event_cpu.c index 7eb86e294c68..91c7ba182dcd 100644 --- a/arch/arm/kernel/perf_event_cpu.c +++ b/arch/arm/kernel/perf_event_cpu.c @@ -92,11 +92,16 @@ static void cpu_pmu_free_irq(struct arm_pmu *cpu_pmu) free_percpu_irq(irq, &hw_events->percpu_pmu); } else { for (i = 0; i < irqs; ++i) { - if (!cpumask_test_and_clear_cpu(i, &cpu_pmu->active_irqs)) + int cpu = i; + + if (cpu_pmu->irq_affinity) + cpu = cpu_pmu->irq_affinity[i]; + + if (!cpumask_test_and_clear_cpu(cpu, &cpu_pmu->active_irqs)) continue; irq = platform_get_irq(pmu_device, i); if (irq >= 0) - free_irq(irq, per_cpu_ptr(&hw_events->percpu_pmu, i)); + free_irq(irq, per_cpu_ptr(&hw_events->percpu_pmu, cpu)); } } } @@ -128,32 +133,37 @@ static int cpu_pmu_request_irq(struct arm_pmu *cpu_pmu, irq_handler_t handler) on_each_cpu(cpu_pmu_enable_percpu_irq, &irq, 1); } else { for (i = 0; i < irqs; ++i) { + int cpu = i; + err = 0; irq = platform_get_irq(pmu_device, i); if (irq < 0) continue; + if (cpu_pmu->irq_affinity) + cpu = cpu_pmu->irq_affinity[i]; + /* * If we have a single PMU interrupt that we can't shift, * assume that we're running on a uniprocessor machine and * continue. Otherwise, continue without this interrupt. */ - if (irq_set_affinity(irq, cpumask_of(i)) && irqs > 1) { + if (irq_set_affinity(irq, cpumask_of(cpu)) && irqs > 1) { pr_warn("unable to set irq affinity (irq=%d, cpu=%u)\n", - irq, i); + irq, cpu); continue; } err = request_irq(irq, handler, IRQF_NOBALANCING | IRQF_NO_THREAD, "arm-pmu", - per_cpu_ptr(&hw_events->percpu_pmu, i)); + per_cpu_ptr(&hw_events->percpu_pmu, cpu)); if (err) { pr_err("unable to request IRQ%d for ARM PMU counters\n", irq); return err; } - cpumask_set_cpu(i, &cpu_pmu->active_irqs); + cpumask_set_cpu(cpu, &cpu_pmu->active_irqs); } } @@ -291,6 +301,48 @@ static int probe_current_pmu(struct arm_pmu *pmu) return ret; } +static int of_pmu_irq_cfg(struct platform_device *pdev) +{ + int i; + int *irqs = kcalloc(pdev->num_resources, sizeof(*irqs), GFP_KERNEL); + + if (!irqs) + return -ENOMEM; + + for (i = 0; i < pdev->num_resources; ++i) { + struct device_node *dn; + int cpu; + + dn = of_parse_phandle(pdev->dev.of_node, "interrupt-affinity", + i); + if (!dn) { + pr_warn("Failed to parse %s/interrupt-affinity[%d]\n", + of_node_full_name(dn), i); + break; + } + + for_each_possible_cpu(cpu) + if (arch_find_n_match_cpu_physical_id(dn, cpu, NULL)) + break; + + of_node_put(dn); + if (cpu >= nr_cpu_ids) { + pr_warn("Failed to find logical CPU for %s\n", + dn->name); + break; + } + + irqs[i] = cpu; + } + + if (i == pdev->num_resources) + cpu_pmu->irq_affinity = irqs; + else + kfree(irqs); + + return 0; +} + static int cpu_pmu_device_probe(struct platform_device *pdev) { const struct of_device_id *of_id; @@ -315,7 +367,10 @@ static int cpu_pmu_device_probe(struct platform_device *pdev) if (node && (of_id = of_match_node(cpu_pmu_of_device_ids, pdev->dev.of_node))) { init_fn = of_id->data; - ret = init_fn(pmu); + + ret = of_pmu_irq_cfg(pdev); + if (!ret) + ret = init_fn(pmu); } else { ret = probe_current_pmu(pmu); } -- cgit