From bc8c9da5a24bb5087197820a2ccabd9c37627035 Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Wed, 20 Jun 2018 01:04:38 +0900 Subject: Documentation/kprobes: Remove jprobes related leftover Remove 'jps' from the document, since jprobe got removed. Signed-off-by: Masami Hiramatsu Acked-by: Thomas Gleixner Cc: Ananth N Mavinakayanahalli Cc: Andrew Morton Cc: Jonathan Corbet Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Steven Rostedt Cc: linux-arch@vger.kernel.org Cc: linux-doc@vger.kernel.org Link: https://lore.kernel.org/lkml/152942427814.15209.6650206464370123166.stgit@devbox Signed-off-by: Ingo Molnar --- Documentation/kprobes.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Documentation/kprobes.txt b/Documentation/kprobes.txt index cb3b0de83fc6..33dd6a81fb64 100644 --- a/Documentation/kprobes.txt +++ b/Documentation/kprobes.txt @@ -474,7 +474,7 @@ error occurs during registration, all probes in the array, up to the bad probe, are safely unregistered before the register_*probes function returns. -- kps/rps/jps: an array of pointers to ``*probe`` data structures +- kps/rps: an array of pointers to ``*probe`` data structures - num: the number of the array entries. .. note:: -- cgit From 5a6cf77f5e35e7af35d36a1e7dc21a42f6412e4f Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Wed, 20 Jun 2018 01:05:07 +0900 Subject: kprobes: Remove jprobe API implementation Remove functionally empty jprobe API implementations and test cases. Signed-off-by: Masami Hiramatsu Acked-by: Thomas Gleixner Cc: Ananth N Mavinakayanahalli Cc: Andrew Morton Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Steven Rostedt Cc: linux-arch@vger.kernel.org Link: https://lore.kernel.org/lkml/152942430705.15209.2307050500995264322.stgit@devbox Signed-off-by: Ingo Molnar --- include/linux/kprobes.h | 3 -- kernel/kprobes.c | 78 +--------------------------------------- kernel/test_kprobes.c | 94 ------------------------------------------------- lib/Kconfig.debug | 2 +- 4 files changed, 2 insertions(+), 175 deletions(-) diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h index 9440a2fc8893..b520baa65682 100644 --- a/include/linux/kprobes.h +++ b/include/linux/kprobes.h @@ -389,9 +389,6 @@ int register_kprobe(struct kprobe *p); void unregister_kprobe(struct kprobe *p); int register_kprobes(struct kprobe **kps, int num); void unregister_kprobes(struct kprobe **kps, int num); -int setjmp_pre_handler(struct kprobe *, struct pt_regs *); -int longjmp_break_handler(struct kprobe *, struct pt_regs *); -void jprobe_return(void); unsigned long arch_deref_entry_point(void *); int register_kretprobe(struct kretprobe *rp); diff --git a/kernel/kprobes.c b/kernel/kprobes.c index ea619021d901..69de130595f7 100644 --- a/kernel/kprobes.c +++ b/kernel/kprobes.c @@ -1272,7 +1272,7 @@ NOKPROBE_SYMBOL(cleanup_rp_inst); /* * Add the new probe to ap->list. Fail if this is the -* second jprobe at the address - two jprobes can't coexist +* second break_handler at the address */ static int add_new_kprobe(struct kprobe *ap, struct kprobe *p) { @@ -1812,77 +1812,6 @@ unsigned long __weak arch_deref_entry_point(void *entry) return (unsigned long)entry; } -#if 0 -int register_jprobes(struct jprobe **jps, int num) -{ - int ret = 0, i; - - if (num <= 0) - return -EINVAL; - - for (i = 0; i < num; i++) { - ret = register_jprobe(jps[i]); - - if (ret < 0) { - if (i > 0) - unregister_jprobes(jps, i); - break; - } - } - - return ret; -} -EXPORT_SYMBOL_GPL(register_jprobes); - -int register_jprobe(struct jprobe *jp) -{ - unsigned long addr, offset; - struct kprobe *kp = &jp->kp; - - /* - * Verify probepoint as well as the jprobe handler are - * valid function entry points. - */ - addr = arch_deref_entry_point(jp->entry); - - if (kallsyms_lookup_size_offset(addr, NULL, &offset) && offset == 0 && - kprobe_on_func_entry(kp->addr, kp->symbol_name, kp->offset)) { - kp->pre_handler = setjmp_pre_handler; - kp->break_handler = longjmp_break_handler; - return register_kprobe(kp); - } - - return -EINVAL; -} -EXPORT_SYMBOL_GPL(register_jprobe); - -void unregister_jprobe(struct jprobe *jp) -{ - unregister_jprobes(&jp, 1); -} -EXPORT_SYMBOL_GPL(unregister_jprobe); - -void unregister_jprobes(struct jprobe **jps, int num) -{ - int i; - - if (num <= 0) - return; - mutex_lock(&kprobe_mutex); - for (i = 0; i < num; i++) - if (__unregister_kprobe_top(&jps[i]->kp) < 0) - jps[i]->kp.addr = NULL; - mutex_unlock(&kprobe_mutex); - - synchronize_sched(); - for (i = 0; i < num; i++) { - if (jps[i]->kp.addr) - __unregister_kprobe_bottom(&jps[i]->kp); - } -} -EXPORT_SYMBOL_GPL(unregister_jprobes); -#endif - #ifdef CONFIG_KRETPROBES /* * This kprobe pre_handler is registered with every kretprobe. When probe @@ -2329,8 +2258,6 @@ static void report_probe(struct seq_file *pi, struct kprobe *p, if (p->pre_handler == pre_handler_kretprobe) kprobe_type = "r"; - else if (p->pre_handler == setjmp_pre_handler) - kprobe_type = "j"; else kprobe_type = "k"; @@ -2637,6 +2564,3 @@ late_initcall(debugfs_kprobe_init); #endif /* CONFIG_DEBUG_FS */ module_init(init_kprobes); - -/* defined in arch/.../kernel/kprobes.c */ -EXPORT_SYMBOL_GPL(jprobe_return); diff --git a/kernel/test_kprobes.c b/kernel/test_kprobes.c index dd53e354f630..7bca480151b0 100644 --- a/kernel/test_kprobes.c +++ b/kernel/test_kprobes.c @@ -162,90 +162,6 @@ static int test_kprobes(void) } -#if 0 -static u32 jph_val; - -static u32 j_kprobe_target(u32 value) -{ - if (preemptible()) { - handler_errors++; - pr_err("jprobe-handler is preemptible\n"); - } - if (value != rand1) { - handler_errors++; - pr_err("incorrect value in jprobe handler\n"); - } - - jph_val = rand1; - jprobe_return(); - return 0; -} - -static struct jprobe jp = { - .entry = j_kprobe_target, - .kp.symbol_name = "kprobe_target" -}; - -static int test_jprobe(void) -{ - int ret; - - ret = register_jprobe(&jp); - if (ret < 0) { - pr_err("register_jprobe returned %d\n", ret); - return ret; - } - - ret = target(rand1); - unregister_jprobe(&jp); - if (jph_val == 0) { - pr_err("jprobe handler not called\n"); - handler_errors++; - } - - return 0; -} - -static struct jprobe jp2 = { - .entry = j_kprobe_target, - .kp.symbol_name = "kprobe_target2" -}; - -static int test_jprobes(void) -{ - int ret; - struct jprobe *jps[2] = {&jp, &jp2}; - - /* addr and flags should be cleard for reusing kprobe. */ - jp.kp.addr = NULL; - jp.kp.flags = 0; - ret = register_jprobes(jps, 2); - if (ret < 0) { - pr_err("register_jprobes returned %d\n", ret); - return ret; - } - - jph_val = 0; - ret = target(rand1); - if (jph_val == 0) { - pr_err("jprobe handler not called\n"); - handler_errors++; - } - - jph_val = 0; - ret = target2(rand1); - if (jph_val == 0) { - pr_err("jprobe handler2 not called\n"); - handler_errors++; - } - unregister_jprobes(jps, 2); - - return 0; -} -#else -#define test_jprobe() (0) -#define test_jprobes() (0) -#endif #ifdef CONFIG_KRETPROBES static u32 krph_val; @@ -383,16 +299,6 @@ int init_test_probes(void) if (ret < 0) errors++; - num_tests++; - ret = test_jprobe(); - if (ret < 0) - errors++; - - num_tests++; - ret = test_jprobes(); - if (ret < 0) - errors++; - #ifdef CONFIG_KRETPROBES num_tests++; ret = test_kretprobe(); diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 8838d1158d19..0b066b3c9284 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -1718,7 +1718,7 @@ config KPROBES_SANITY_TEST default n help This option provides for testing basic kprobes functionality on - boot. A sample kprobe, jprobe and kretprobe are inserted and + boot. Samples of kprobe and kretprobe are inserted and verified for functionality. Say N if you are unsure. -- cgit From 80006dbee674f9fa7c20a799d208657a18b5dabf Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Wed, 20 Jun 2018 01:05:35 +0900 Subject: kprobes/x86: Remove jprobe implementation Remove arch dependent setjump/longjump functions and unused fields in kprobe_ctlblk for jprobes from arch/x86. Signed-off-by: Masami Hiramatsu Acked-by: Thomas Gleixner Cc: Ananth N Mavinakayanahalli Cc: Andrew Morton Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Steven Rostedt Cc: linux-arch@vger.kernel.org Link: https://lore.kernel.org/lkml/152942433578.15209.14034551799624757792.stgit@devbox Signed-off-by: Ingo Molnar --- arch/x86/include/asm/kprobes.h | 3 -- arch/x86/kernel/kprobes/core.c | 96 ++---------------------------------------- 2 files changed, 3 insertions(+), 96 deletions(-) diff --git a/arch/x86/include/asm/kprobes.h b/arch/x86/include/asm/kprobes.h index 367d99cff426..06782c2efa04 100644 --- a/arch/x86/include/asm/kprobes.h +++ b/arch/x86/include/asm/kprobes.h @@ -111,9 +111,6 @@ struct kprobe_ctlblk { unsigned long kprobe_status; unsigned long kprobe_old_flags; unsigned long kprobe_saved_flags; - unsigned long *jprobe_saved_sp; - struct pt_regs jprobe_saved_regs; - kprobe_opcode_t jprobes_stack[MAX_STACK_SIZE]; struct prev_kprobe prev_kprobe; }; diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c index 6f4d42377fe5..1b2d1acba810 100644 --- a/arch/x86/kernel/kprobes/core.c +++ b/arch/x86/kernel/kprobes/core.c @@ -66,8 +66,6 @@ #include "common.h" -void jprobe_return_end(void); - DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL; DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk); @@ -690,10 +688,9 @@ int kprobe_int3_handler(struct pt_regs *regs) /* * If we have no pre-handler or it returned 0, we * continue with normal processing. If we have a - * pre-handler and it returned non-zero, it prepped - * for calling the break_handler below on re-entry - * for jprobe processing, so get out doing nothing - * more here. + * pre-handler and it returned non-zero, that means + * user handler setup registers to exit to another + * instruction, we must skip the single stepping. */ if (!p->pre_handler || !p->pre_handler(p, regs)) setup_singlestep(p, regs, kcb, 0); @@ -1083,93 +1080,6 @@ int kprobe_exceptions_notify(struct notifier_block *self, unsigned long val, } NOKPROBE_SYMBOL(kprobe_exceptions_notify); -int setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs) -{ - struct jprobe *jp = container_of(p, struct jprobe, kp); - unsigned long addr; - struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); - - kcb->jprobe_saved_regs = *regs; - kcb->jprobe_saved_sp = stack_addr(regs); - addr = (unsigned long)(kcb->jprobe_saved_sp); - - /* - * As Linus pointed out, gcc assumes that the callee - * owns the argument space and could overwrite it, e.g. - * tailcall optimization. So, to be absolutely safe - * we also save and restore enough stack bytes to cover - * the argument area. - * Use __memcpy() to avoid KASAN stack out-of-bounds reports as we copy - * raw stack chunk with redzones: - */ - __memcpy(kcb->jprobes_stack, (kprobe_opcode_t *)addr, MIN_STACK_SIZE(addr)); - regs->ip = (unsigned long)(jp->entry); - - /* - * jprobes use jprobe_return() which skips the normal return - * path of the function, and this messes up the accounting of the - * function graph tracer to get messed up. - * - * Pause function graph tracing while performing the jprobe function. - */ - pause_graph_tracing(); - return 1; -} -NOKPROBE_SYMBOL(setjmp_pre_handler); - -void jprobe_return(void) -{ - struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); - - /* Unpoison stack redzones in the frames we are going to jump over. */ - kasan_unpoison_stack_above_sp_to(kcb->jprobe_saved_sp); - - asm volatile ( -#ifdef CONFIG_X86_64 - " xchg %%rbx,%%rsp \n" -#else - " xchgl %%ebx,%%esp \n" -#endif - " int3 \n" - " .globl jprobe_return_end\n" - " jprobe_return_end: \n" - " nop \n"::"b" - (kcb->jprobe_saved_sp):"memory"); -} -NOKPROBE_SYMBOL(jprobe_return); -NOKPROBE_SYMBOL(jprobe_return_end); - -int longjmp_break_handler(struct kprobe *p, struct pt_regs *regs) -{ - struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); - u8 *addr = (u8 *) (regs->ip - 1); - struct jprobe *jp = container_of(p, struct jprobe, kp); - void *saved_sp = kcb->jprobe_saved_sp; - - if ((addr > (u8 *) jprobe_return) && - (addr < (u8 *) jprobe_return_end)) { - if (stack_addr(regs) != saved_sp) { - struct pt_regs *saved_regs = &kcb->jprobe_saved_regs; - printk(KERN_ERR - "current sp %p does not match saved sp %p\n", - stack_addr(regs), saved_sp); - printk(KERN_ERR "Saved registers for jprobe %p\n", jp); - show_regs(saved_regs); - printk(KERN_ERR "Current registers\n"); - show_regs(regs); - BUG(); - } - /* It's OK to start function graph tracing again */ - unpause_graph_tracing(); - *regs = kcb->jprobe_saved_regs; - __memcpy(saved_sp, kcb->jprobes_stack, MIN_STACK_SIZE(saved_sp)); - preempt_enable_no_resched(); - return 1; - } - return 0; -} -NOKPROBE_SYMBOL(longjmp_break_handler); - bool arch_within_kprobe_blacklist(unsigned long addr) { bool is_in_entry_trampoline_section = false; -- cgit From e00f1993e98b464bf530ba6ac5763b2ceccd2a1e Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Wed, 20 Jun 2018 01:06:04 +0900 Subject: ARC/kprobes: Remove jprobe implementation Remove arch dependent setjump/longjump functions and unused fields in kprobe_ctlblk for jprobes from arch/arc. Signed-off-by: Masami Hiramatsu Acked-by: Thomas Gleixner Cc: Ananth N Mavinakayanahalli Cc: Andrew Morton Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Steven Rostedt Cc: Vineet Gupta Cc: linux-arch@vger.kernel.org Cc: linux-snps-arc@lists.infradead.org Link: https://lore.kernel.org/lkml/152942436460.15209.3038881268172249579.stgit@devbox Signed-off-by: Ingo Molnar --- arch/arc/include/asm/kprobes.h | 2 -- arch/arc/kernel/kprobes.c | 38 ++------------------------------------ 2 files changed, 2 insertions(+), 38 deletions(-) diff --git a/arch/arc/include/asm/kprobes.h b/arch/arc/include/asm/kprobes.h index 2e52d18e6bc7..2c1b479d5aea 100644 --- a/arch/arc/include/asm/kprobes.h +++ b/arch/arc/include/asm/kprobes.h @@ -45,8 +45,6 @@ struct prev_kprobe { struct kprobe_ctlblk { unsigned int kprobe_status; - struct pt_regs jprobe_saved_regs; - char jprobes_stack[MAX_STACK_SIZE]; struct prev_kprobe prev_kprobe; }; diff --git a/arch/arc/kernel/kprobes.c b/arch/arc/kernel/kprobes.c index 42b05046fad9..7811a6bbe8f0 100644 --- a/arch/arc/kernel/kprobes.c +++ b/arch/arc/kernel/kprobes.c @@ -225,10 +225,8 @@ int __kprobes arc_kprobe_handler(unsigned long addr, struct pt_regs *regs) /* If we have no pre-handler or it returned 0, we continue with * normal processing. If we have a pre-handler and it returned - * non-zero - which is expected from setjmp_pre_handler for - * jprobe, we return without single stepping and leave that to - * the break-handler which is invoked by a kprobe from - * jprobe_return + * non-zero - which means user handler setup registers to exit + * to another instruction, we must skip the single stepping. */ if (!p->pre_handler || !p->pre_handler(p, regs)) { setup_singlestep(p, regs); @@ -386,38 +384,6 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self, return ret; } -int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs) -{ - struct jprobe *jp = container_of(p, struct jprobe, kp); - struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); - unsigned long sp_addr = regs->sp; - - kcb->jprobe_saved_regs = *regs; - memcpy(kcb->jprobes_stack, (void *)sp_addr, MIN_STACK_SIZE(sp_addr)); - regs->ret = (unsigned long)(jp->entry); - - return 1; -} - -void __kprobes jprobe_return(void) -{ - __asm__ __volatile__("unimp_s"); - return; -} - -int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs) -{ - struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); - unsigned long sp_addr; - - *regs = kcb->jprobe_saved_regs; - sp_addr = regs->sp; - memcpy((void *)sp_addr, kcb->jprobes_stack, MIN_STACK_SIZE(sp_addr)); - preempt_enable_no_resched(); - - return 1; -} - static void __used kretprobe_trampoline_holder(void) { __asm__ __volatile__(".global kretprobe_trampoline\n" -- cgit From 454f3e132d0520472558518df61ee14db1dbef88 Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Wed, 20 Jun 2018 01:06:34 +0900 Subject: ARM/kprobes: Remove jprobe arm implementation Remove arch dependent setjump/longjump functions and unused fields in kprobe_ctlblk for jprobes from arch/arm. Signed-off-by: Masami Hiramatsu Acked-by: Thomas Gleixner Cc: Ananth N Mavinakayanahalli Cc: Andrew Morton Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Russell King Cc: Steven Rostedt Cc: linux-arch@vger.kernel.org Cc: linux-arm-kernel@lists.infradead.org Link: https://lore.kernel.org/lkml/152942439350.15209.11127640848082283736.stgit@devbox Signed-off-by: Ingo Molnar --- arch/arm/include/asm/kprobes.h | 2 - arch/arm/include/asm/probes.h | 1 - arch/arm/probes/kprobes/core.c | 114 ----------------------------------------- 3 files changed, 117 deletions(-) diff --git a/arch/arm/include/asm/kprobes.h b/arch/arm/include/asm/kprobes.h index 59655459da59..82290f212d8e 100644 --- a/arch/arm/include/asm/kprobes.h +++ b/arch/arm/include/asm/kprobes.h @@ -44,8 +44,6 @@ struct prev_kprobe { struct kprobe_ctlblk { unsigned int kprobe_status; struct prev_kprobe prev_kprobe; - struct pt_regs jprobe_saved_regs; - char jprobes_stack[MAX_STACK_SIZE]; }; void arch_remove_kprobe(struct kprobe *); diff --git a/arch/arm/include/asm/probes.h b/arch/arm/include/asm/probes.h index 1e5b9bb92270..991c9127c650 100644 --- a/arch/arm/include/asm/probes.h +++ b/arch/arm/include/asm/probes.h @@ -51,7 +51,6 @@ struct arch_probes_insn { * We assume one instruction can consume at most 64 bytes stack, which is * 'push {r0-r15}'. Instructions consume more or unknown stack space like * 'str r0, [sp, #-80]' and 'str r0, [sp, r1]' should be prohibit to probe. - * Both kprobe and jprobe use this macro. */ #define MAX_STACK_SIZE 64 diff --git a/arch/arm/probes/kprobes/core.c b/arch/arm/probes/kprobes/core.c index e90cc8a08186..23562111c511 100644 --- a/arch/arm/probes/kprobes/core.c +++ b/arch/arm/probes/kprobes/core.c @@ -47,9 +47,6 @@ (unsigned long)(addr) + \ (size)) -/* Used as a marker in ARM_pc to note when we're in a jprobe. */ -#define JPROBE_MAGIC_ADDR 0xffffffff - DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL; DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk); @@ -521,117 +518,6 @@ void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri, regs->ARM_lr = (unsigned long)&kretprobe_trampoline; } -int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs) -{ - struct jprobe *jp = container_of(p, struct jprobe, kp); - struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); - long sp_addr = regs->ARM_sp; - long cpsr; - - kcb->jprobe_saved_regs = *regs; - memcpy(kcb->jprobes_stack, (void *)sp_addr, MIN_STACK_SIZE(sp_addr)); - regs->ARM_pc = (long)jp->entry; - - cpsr = regs->ARM_cpsr | PSR_I_BIT; -#ifdef CONFIG_THUMB2_KERNEL - /* Set correct Thumb state in cpsr */ - if (regs->ARM_pc & 1) - cpsr |= PSR_T_BIT; - else - cpsr &= ~PSR_T_BIT; -#endif - regs->ARM_cpsr = cpsr; - - preempt_disable(); - return 1; -} - -void __kprobes jprobe_return(void) -{ - struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); - - __asm__ __volatile__ ( - /* - * Setup an empty pt_regs. Fill SP and PC fields as - * they're needed by longjmp_break_handler. - * - * We allocate some slack between the original SP and start of - * our fabricated regs. To be precise we want to have worst case - * covered which is STMFD with all 16 regs so we allocate 2 * - * sizeof(struct_pt_regs)). - * - * This is to prevent any simulated instruction from writing - * over the regs when they are accessing the stack. - */ -#ifdef CONFIG_THUMB2_KERNEL - "sub r0, %0, %1 \n\t" - "mov sp, r0 \n\t" -#else - "sub sp, %0, %1 \n\t" -#endif - "ldr r0, ="__stringify(JPROBE_MAGIC_ADDR)"\n\t" - "str %0, [sp, %2] \n\t" - "str r0, [sp, %3] \n\t" - "mov r0, sp \n\t" - "bl kprobe_handler \n\t" - - /* - * Return to the context saved by setjmp_pre_handler - * and restored by longjmp_break_handler. - */ -#ifdef CONFIG_THUMB2_KERNEL - "ldr lr, [sp, %2] \n\t" /* lr = saved sp */ - "ldrd r0, r1, [sp, %5] \n\t" /* r0,r1 = saved lr,pc */ - "ldr r2, [sp, %4] \n\t" /* r2 = saved psr */ - "stmdb lr!, {r0, r1, r2} \n\t" /* push saved lr and */ - /* rfe context */ - "ldmia sp, {r0 - r12} \n\t" - "mov sp, lr \n\t" - "ldr lr, [sp], #4 \n\t" - "rfeia sp! \n\t" -#else - "ldr r0, [sp, %4] \n\t" - "msr cpsr_cxsf, r0 \n\t" - "ldmia sp, {r0 - pc} \n\t" -#endif - : - : "r" (kcb->jprobe_saved_regs.ARM_sp), - "I" (sizeof(struct pt_regs) * 2), - "J" (offsetof(struct pt_regs, ARM_sp)), - "J" (offsetof(struct pt_regs, ARM_pc)), - "J" (offsetof(struct pt_regs, ARM_cpsr)), - "J" (offsetof(struct pt_regs, ARM_lr)) - : "memory", "cc"); -} - -int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs) -{ - struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); - long stack_addr = kcb->jprobe_saved_regs.ARM_sp; - long orig_sp = regs->ARM_sp; - struct jprobe *jp = container_of(p, struct jprobe, kp); - - if (regs->ARM_pc == JPROBE_MAGIC_ADDR) { - if (orig_sp != stack_addr) { - struct pt_regs *saved_regs = - (struct pt_regs *)kcb->jprobe_saved_regs.ARM_sp; - printk("current sp %lx does not match saved sp %lx\n", - orig_sp, stack_addr); - printk("Saved registers for jprobe %p\n", jp); - show_regs(saved_regs); - printk("Current registers\n"); - show_regs(regs); - BUG(); - } - *regs = kcb->jprobe_saved_regs; - memcpy((void *)stack_addr, kcb->jprobes_stack, - MIN_STACK_SIZE(stack_addr)); - preempt_enable_no_resched(); - return 1; - } - return 0; -} - int __kprobes arch_trampoline_kprobe(struct kprobe *p) { return 0; -- cgit From 2efb75cd712d8311a2d0c1ba041ded191398bca7 Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Wed, 20 Jun 2018 01:07:03 +0900 Subject: arm64/kprobes: Remove jprobe implementation Remove arch dependent setjump/longjump functions and unused fields in kprobe_ctlblk for jprobes from arch/arm64. Signed-off-by: Masami Hiramatsu Acked-by: Thomas Gleixner Acked-by: Will Deacon Cc: Ananth N Mavinakayanahalli Cc: Andrew Morton Cc: Catalin Marinas Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Steven Rostedt Cc: linux-arch@vger.kernel.org Cc: linux-arm-kernel@lists.infradead.org Link: https://lore.kernel.org/lkml/152942442318.15209.17767976282305601884.stgit@devbox Signed-off-by: Ingo Molnar --- arch/arm64/include/asm/kprobes.h | 1 - arch/arm64/kernel/probes/kprobes.c | 68 -------------------------------------- 2 files changed, 69 deletions(-) diff --git a/arch/arm64/include/asm/kprobes.h b/arch/arm64/include/asm/kprobes.h index 6deb8d726041..d5a44cf859e9 100644 --- a/arch/arm64/include/asm/kprobes.h +++ b/arch/arm64/include/asm/kprobes.h @@ -48,7 +48,6 @@ struct kprobe_ctlblk { unsigned long saved_irqflag; struct prev_kprobe prev_kprobe; struct kprobe_step_ctx ss_ctx; - struct pt_regs jprobe_saved_regs; }; void arch_remove_kprobe(struct kprobe *); diff --git a/arch/arm64/kernel/probes/kprobes.c b/arch/arm64/kernel/probes/kprobes.c index d849d9804011..3ca2351109a6 100644 --- a/arch/arm64/kernel/probes/kprobes.c +++ b/arch/arm64/kernel/probes/kprobes.c @@ -465,74 +465,6 @@ kprobe_breakpoint_handler(struct pt_regs *regs, unsigned int esr) return DBG_HOOK_HANDLED; } -int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs) -{ - struct jprobe *jp = container_of(p, struct jprobe, kp); - struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); - - kcb->jprobe_saved_regs = *regs; - /* - * Since we can't be sure where in the stack frame "stacked" - * pass-by-value arguments are stored we just don't try to - * duplicate any of the stack. Do not use jprobes on functions that - * use more than 64 bytes (after padding each to an 8 byte boundary) - * of arguments, or pass individual arguments larger than 16 bytes. - */ - - instruction_pointer_set(regs, (unsigned long) jp->entry); - preempt_disable(); - pause_graph_tracing(); - return 1; -} - -void __kprobes jprobe_return(void) -{ - struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); - - /* - * Jprobe handler return by entering break exception, - * encoded same as kprobe, but with following conditions - * -a special PC to identify it from the other kprobes. - * -restore stack addr to original saved pt_regs - */ - asm volatile(" mov sp, %0 \n" - "jprobe_return_break: brk %1 \n" - : - : "r" (kcb->jprobe_saved_regs.sp), - "I" (BRK64_ESR_KPROBES) - : "memory"); - - unreachable(); -} - -int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs) -{ - struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); - long stack_addr = kcb->jprobe_saved_regs.sp; - long orig_sp = kernel_stack_pointer(regs); - struct jprobe *jp = container_of(p, struct jprobe, kp); - extern const char jprobe_return_break[]; - - if (instruction_pointer(regs) != (u64) jprobe_return_break) - return 0; - - if (orig_sp != stack_addr) { - struct pt_regs *saved_regs = - (struct pt_regs *)kcb->jprobe_saved_regs.sp; - pr_err("current sp %lx does not match saved sp %lx\n", - orig_sp, stack_addr); - pr_err("Saved registers for jprobe %p\n", jp); - __show_regs(saved_regs); - pr_err("Current registers\n"); - __show_regs(regs); - BUG(); - } - unpause_graph_tracing(); - *regs = kcb->jprobe_saved_regs; - preempt_enable_no_resched(); - return 1; -} - bool arch_within_kprobe_blacklist(unsigned long addr) { if ((addr >= (unsigned long)__kprobes_text_start && -- cgit From c530e2f02ef8700ec59fed175e800a2a97695350 Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Wed, 20 Jun 2018 01:07:32 +0900 Subject: powerpc/kprobes: Remove jprobe powerpc implementation Remove arch dependent setjump/longjump functions and unused fields in kprobe_ctlblk for jprobes from arch/powerpc. This also reverts commits related __is_active_jprobe() function. Signed-off-by: Masami Hiramatsu Acked-by: Thomas Gleixner Acked-by: Naveen N. Rao Cc: Ananth N Mavinakayanahalli Cc: Andrew Morton Cc: Benjamin Herrenschmidt Cc: Linus Torvalds Cc: Michael Ellerman Cc: Paul Mackerras Cc: Peter Zijlstra Cc: Steven Rostedt Cc: linux-arch@vger.kernel.org Cc: linuxppc-dev@lists.ozlabs.org Link: https://lore.kernel.org/lkml/152942445234.15209.12868722778364739753.stgit@devbox Signed-off-by: Ingo Molnar --- arch/powerpc/include/asm/kprobes.h | 2 - arch/powerpc/kernel/kprobes-ftrace.c | 15 ------- arch/powerpc/kernel/kprobes.c | 54 -------------------------- arch/powerpc/kernel/trace/ftrace_64_mprofile.S | 39 +++---------------- 4 files changed, 5 insertions(+), 105 deletions(-) diff --git a/arch/powerpc/include/asm/kprobes.h b/arch/powerpc/include/asm/kprobes.h index 9f3be5c8a4a3..674036db558b 100644 --- a/arch/powerpc/include/asm/kprobes.h +++ b/arch/powerpc/include/asm/kprobes.h @@ -88,7 +88,6 @@ struct prev_kprobe { struct kprobe_ctlblk { unsigned long kprobe_status; unsigned long kprobe_saved_msr; - struct pt_regs jprobe_saved_regs; struct prev_kprobe prev_kprobe; }; @@ -104,7 +103,6 @@ extern int kprobe_fault_handler(struct pt_regs *regs, int trapnr); extern int kprobe_handler(struct pt_regs *regs); extern int kprobe_post_handler(struct pt_regs *regs); #ifdef CONFIG_KPROBES_ON_FTRACE -extern int __is_active_jprobe(unsigned long addr); extern int skip_singlestep(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *kcb); #else diff --git a/arch/powerpc/kernel/kprobes-ftrace.c b/arch/powerpc/kernel/kprobes-ftrace.c index 7a1f99f1b47f..1b316331c2d9 100644 --- a/arch/powerpc/kernel/kprobes-ftrace.c +++ b/arch/powerpc/kernel/kprobes-ftrace.c @@ -25,21 +25,6 @@ #include #include -/* - * This is called from ftrace code after invoking registered handlers to - * disambiguate regs->nip changes done by jprobes and livepatch. We check if - * there is an active jprobe at the provided address (mcount location). - */ -int __is_active_jprobe(unsigned long addr) -{ - if (!preemptible()) { - struct kprobe *p = raw_cpu_read(current_kprobe); - return (p && (unsigned long)p->addr == addr) ? 1 : 0; - } - - return 0; -} - static nokprobe_inline int __skip_singlestep(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *kcb, unsigned long orig_nip) diff --git a/arch/powerpc/kernel/kprobes.c b/arch/powerpc/kernel/kprobes.c index e4c5bf33970b..600678fce0a8 100644 --- a/arch/powerpc/kernel/kprobes.c +++ b/arch/powerpc/kernel/kprobes.c @@ -611,60 +611,6 @@ unsigned long arch_deref_entry_point(void *entry) } NOKPROBE_SYMBOL(arch_deref_entry_point); -int setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs) -{ - struct jprobe *jp = container_of(p, struct jprobe, kp); - struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); - - memcpy(&kcb->jprobe_saved_regs, regs, sizeof(struct pt_regs)); - - /* setup return addr to the jprobe handler routine */ - regs->nip = arch_deref_entry_point(jp->entry); -#ifdef PPC64_ELF_ABI_v2 - regs->gpr[12] = (unsigned long)jp->entry; -#elif defined(PPC64_ELF_ABI_v1) - regs->gpr[2] = (unsigned long)(((func_descr_t *)jp->entry)->toc); -#endif - - /* - * jprobes use jprobe_return() which skips the normal return - * path of the function, and this messes up the accounting of the - * function graph tracer. - * - * Pause function graph tracing while performing the jprobe function. - */ - pause_graph_tracing(); - - return 1; -} -NOKPROBE_SYMBOL(setjmp_pre_handler); - -void __used jprobe_return(void) -{ - asm volatile("jprobe_return_trap:\n" - "trap\n" - ::: "memory"); -} -NOKPROBE_SYMBOL(jprobe_return); - -int longjmp_break_handler(struct kprobe *p, struct pt_regs *regs) -{ - struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); - - if (regs->nip != ppc_kallsyms_lookup_name("jprobe_return_trap")) { - pr_debug("longjmp_break_handler NIP (0x%lx) does not match jprobe_return_trap (0x%lx)\n", - regs->nip, ppc_kallsyms_lookup_name("jprobe_return_trap")); - return 0; - } - - memcpy(regs, &kcb->jprobe_saved_regs, sizeof(struct pt_regs)); - /* It's OK to start function graph tracing again */ - unpause_graph_tracing(); - preempt_enable_no_resched(); - return 1; -} -NOKPROBE_SYMBOL(longjmp_break_handler); - static struct kprobe trampoline_p = { .addr = (kprobe_opcode_t *) &kretprobe_trampoline, .pre_handler = trampoline_probe_handler diff --git a/arch/powerpc/kernel/trace/ftrace_64_mprofile.S b/arch/powerpc/kernel/trace/ftrace_64_mprofile.S index 9a5b5a513604..32476a6e4e9c 100644 --- a/arch/powerpc/kernel/trace/ftrace_64_mprofile.S +++ b/arch/powerpc/kernel/trace/ftrace_64_mprofile.S @@ -104,39 +104,13 @@ ftrace_regs_call: bl ftrace_stub nop - /* Load the possibly modified NIP */ - ld r15, _NIP(r1) - + /* Load ctr with the possibly modified NIP */ + ld r3, _NIP(r1) + mtctr r3 #ifdef CONFIG_LIVEPATCH - cmpd r14, r15 /* has NIP been altered? */ + cmpd r14, r3 /* has NIP been altered? */ #endif -#if defined(CONFIG_LIVEPATCH) && defined(CONFIG_KPROBES_ON_FTRACE) - /* NIP has not been altered, skip over further checks */ - beq 1f - - /* Check if there is an active jprobe on us */ - subi r3, r14, 4 - bl __is_active_jprobe - nop - - /* - * If r3 == 1, then this is a kprobe/jprobe. - * else, this is livepatched function. - * - * The conditional branch for livepatch_handler below will use the - * result of this comparison. For kprobe/jprobe, we just need to branch to - * the new NIP, not call livepatch_handler. The branch below is bne, so we - * want CR0[EQ] to be true if this is a kprobe/jprobe. Which means we want - * CR0[EQ] = (r3 == 1). - */ - cmpdi r3, 1 -1: -#endif - - /* Load CTR with the possibly modified NIP */ - mtctr r15 - /* Restore gprs */ REST_GPR(0,r1) REST_10GPRS(2,r1) @@ -154,10 +128,7 @@ ftrace_regs_call: addi r1, r1, SWITCH_FRAME_SIZE #ifdef CONFIG_LIVEPATCH - /* - * Based on the cmpd or cmpdi above, if the NIP was altered and we're - * not on a kprobe/jprobe, then handle livepatch. - */ + /* Based on the cmpd above, if the NIP was altered handle livepatch */ bne- livepatch_handler #endif -- cgit From 0aeaf6b3a34588050a32d88177e18a1909bbb994 Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Wed, 20 Jun 2018 01:08:01 +0900 Subject: ia64/kprobes: Remove jprobe implementation Remove arch dependent setjump/longjump functions and unused fields in kprobe_ctlblk for jprobes from arch/ia64. Note that since ia64 jprobes code is a bit different from other architectures, this keeps __IA64_BREAK_JPROBE for checking the ->break_handler(). It will be removed with the break_handler() calls afterwards. Signed-off-by: Masami Hiramatsu Acked-by: Thomas Gleixner Cc: Ananth N Mavinakayanahalli Cc: Andrew Morton Cc: Fenghua Yu Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Steven Rostedt Cc: Tony Luck Cc: linux-arch@vger.kernel.org Cc: linux-ia64@vger.kernel.org Link: https://lore.kernel.org/lkml/152942448152.15209.2026051332977587306.stgit@devbox Signed-off-by: Ingo Molnar --- arch/ia64/include/asm/kprobes.h | 2 - arch/ia64/kernel/Makefile | 2 +- arch/ia64/kernel/jprobes.S | 90 ----------------------------------------- arch/ia64/kernel/kprobes.c | 70 -------------------------------- 4 files changed, 1 insertion(+), 163 deletions(-) delete mode 100644 arch/ia64/kernel/jprobes.S diff --git a/arch/ia64/include/asm/kprobes.h b/arch/ia64/include/asm/kprobes.h index 0302b3664789..580356a2eea6 100644 --- a/arch/ia64/include/asm/kprobes.h +++ b/arch/ia64/include/asm/kprobes.h @@ -82,8 +82,6 @@ struct prev_kprobe { #define ARCH_PREV_KPROBE_SZ 2 struct kprobe_ctlblk { unsigned long kprobe_status; - struct pt_regs jprobe_saved_regs; - unsigned long jprobes_saved_stacked_regs[MAX_PARAM_RSE_SIZE]; unsigned long *bsp; unsigned long cfm; atomic_t prev_kprobe_index; diff --git a/arch/ia64/kernel/Makefile b/arch/ia64/kernel/Makefile index 498f3da3f225..d0c0ccdd656a 100644 --- a/arch/ia64/kernel/Makefile +++ b/arch/ia64/kernel/Makefile @@ -25,7 +25,7 @@ obj-$(CONFIG_NUMA) += numa.o obj-$(CONFIG_PERFMON) += perfmon_default_smpl.o obj-$(CONFIG_IA64_CYCLONE) += cyclone.o obj-$(CONFIG_IA64_MCA_RECOVERY) += mca_recovery.o -obj-$(CONFIG_KPROBES) += kprobes.o jprobes.o +obj-$(CONFIG_KPROBES) += kprobes.o obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o crash.o obj-$(CONFIG_CRASH_DUMP) += crash_dump.o diff --git a/arch/ia64/kernel/jprobes.S b/arch/ia64/kernel/jprobes.S deleted file mode 100644 index f69389c7be1d..000000000000 --- a/arch/ia64/kernel/jprobes.S +++ /dev/null @@ -1,90 +0,0 @@ -/* - * Jprobe specific operations - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. - * - * Copyright (C) Intel Corporation, 2005 - * - * 2005-May Rusty Lynch and Anil S Keshavamurthy - * initial implementation - * - * Jprobes (a.k.a. "jump probes" which is built on-top of kprobes) allow a - * probe to be inserted into the beginning of a function call. The fundamental - * difference between a jprobe and a kprobe is the jprobe handler is executed - * in the same context as the target function, while the kprobe handlers - * are executed in interrupt context. - * - * For jprobes we initially gain control by placing a break point in the - * first instruction of the targeted function. When we catch that specific - * break, we: - * * set the return address to our jprobe_inst_return() function - * * jump to the jprobe handler function - * - * Since we fixed up the return address, the jprobe handler will return to our - * jprobe_inst_return() function, giving us control again. At this point we - * are back in the parents frame marker, so we do yet another call to our - * jprobe_break() function to fix up the frame marker as it would normally - * exist in the target function. - * - * Our jprobe_return function then transfers control back to kprobes.c by - * executing a break instruction using one of our reserved numbers. When we - * catch that break in kprobes.c, we continue like we do for a normal kprobe - * by single stepping the emulated instruction, and then returning execution - * to the correct location. - */ -#include -#include - - /* - * void jprobe_break(void) - */ - .section .kprobes.text, "ax" -ENTRY(jprobe_break) - break.m __IA64_BREAK_JPROBE -END(jprobe_break) - - /* - * void jprobe_inst_return(void) - */ -GLOBAL_ENTRY(jprobe_inst_return) - br.call.sptk.many b0=jprobe_break -END(jprobe_inst_return) - -GLOBAL_ENTRY(invalidate_stacked_regs) - movl r16=invalidate_restore_cfm - ;; - mov b6=r16 - ;; - br.ret.sptk.many b6 - ;; -invalidate_restore_cfm: - mov r16=ar.rsc - ;; - mov ar.rsc=r0 - ;; - loadrs - ;; - mov ar.rsc=r16 - ;; - br.cond.sptk.many rp -END(invalidate_stacked_regs) - -GLOBAL_ENTRY(flush_register_stack) - // flush dirty regs to backing store (must be first in insn group) - flushrs - ;; - br.ret.sptk.many rp -END(flush_register_stack) - diff --git a/arch/ia64/kernel/kprobes.c b/arch/ia64/kernel/kprobes.c index f5f3a5e6fcd1..823e4bd03a18 100644 --- a/arch/ia64/kernel/kprobes.c +++ b/arch/ia64/kernel/kprobes.c @@ -35,8 +35,6 @@ #include #include -extern void jprobe_inst_return(void); - DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL; DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk); @@ -1040,74 +1038,6 @@ unsigned long arch_deref_entry_point(void *entry) return ((struct fnptr *)entry)->ip; } -int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs) -{ - struct jprobe *jp = container_of(p, struct jprobe, kp); - unsigned long addr = arch_deref_entry_point(jp->entry); - struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); - struct param_bsp_cfm pa; - int bytes; - - /* - * Callee owns the argument space and could overwrite it, eg - * tail call optimization. So to be absolutely safe - * we save the argument space before transferring the control - * to instrumented jprobe function which runs in - * the process context - */ - pa.ip = regs->cr_iip; - unw_init_running(ia64_get_bsp_cfm, &pa); - bytes = (char *)ia64_rse_skip_regs(pa.bsp, pa.cfm & 0x3f) - - (char *)pa.bsp; - memcpy( kcb->jprobes_saved_stacked_regs, - pa.bsp, - bytes ); - kcb->bsp = pa.bsp; - kcb->cfm = pa.cfm; - - /* save architectural state */ - kcb->jprobe_saved_regs = *regs; - - /* after rfi, execute the jprobe instrumented function */ - regs->cr_iip = addr & ~0xFULL; - ia64_psr(regs)->ri = addr & 0xf; - regs->r1 = ((struct fnptr *)(jp->entry))->gp; - - /* - * fix the return address to our jprobe_inst_return() function - * in the jprobes.S file - */ - regs->b0 = ((struct fnptr *)(jprobe_inst_return))->ip; - - return 1; -} - -/* ia64 does not need this */ -void __kprobes jprobe_return(void) -{ -} - -int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs) -{ - struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); - int bytes; - - /* restoring architectural state */ - *regs = kcb->jprobe_saved_regs; - - /* restoring the original argument space */ - flush_register_stack(); - bytes = (char *)ia64_rse_skip_regs(kcb->bsp, kcb->cfm & 0x3f) - - (char *)kcb->bsp; - memcpy( kcb->bsp, - kcb->jprobes_saved_stacked_regs, - bytes ); - invalidate_stacked_regs(); - - preempt_enable_no_resched(); - return 1; -} - static struct kprobe trampoline_p = { .pre_handler = trampoline_probe_handler }; -- cgit From 8c2c3f2d40b6c4f40eddc099916dffac6171bf2e Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Wed, 20 Jun 2018 01:08:30 +0900 Subject: MIPS/kprobes: Remove jprobe implementation Remove arch dependent setjump/longjump functions and unused fields in kprobe_ctlblk for jprobes from arch/mips. Signed-off-by: Masami Hiramatsu Acked-by: Thomas Gleixner Cc: Ananth N Mavinakayanahalli Cc: Andrew Morton Cc: James Hogan Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Ralf Baechle Cc: Steven Rostedt Cc: linux-arch@vger.kernel.org Cc: linux-mips@linux-mips.org Link: https://lore.kernel.org/lkml/152942451058.15209.3459785416221980965.stgit@devbox Signed-off-by: Ingo Molnar --- arch/mips/include/asm/kprobes.h | 13 ------------ arch/mips/kernel/kprobes.c | 45 ----------------------------------------- 2 files changed, 58 deletions(-) diff --git a/arch/mips/include/asm/kprobes.h b/arch/mips/include/asm/kprobes.h index ad1a99948f27..a72dfbf1babb 100644 --- a/arch/mips/include/asm/kprobes.h +++ b/arch/mips/include/asm/kprobes.h @@ -68,16 +68,6 @@ struct prev_kprobe { unsigned long saved_epc; }; -#define MAX_JPROBES_STACK_SIZE 128 -#define MAX_JPROBES_STACK_ADDR \ - (((unsigned long)current_thread_info()) + THREAD_SIZE - 32 - sizeof(struct pt_regs)) - -#define MIN_JPROBES_STACK_SIZE(ADDR) \ - ((((ADDR) + MAX_JPROBES_STACK_SIZE) > MAX_JPROBES_STACK_ADDR) \ - ? MAX_JPROBES_STACK_ADDR - (ADDR) \ - : MAX_JPROBES_STACK_SIZE) - - #define SKIP_DELAYSLOT 0x0001 /* per-cpu kprobe control block */ @@ -86,12 +76,9 @@ struct kprobe_ctlblk { unsigned long kprobe_old_SR; unsigned long kprobe_saved_SR; unsigned long kprobe_saved_epc; - unsigned long jprobe_saved_sp; - struct pt_regs jprobe_saved_regs; /* Per-thread fields, used while emulating branches */ unsigned long flags; unsigned long target_epc; - u8 jprobes_stack[MAX_JPROBES_STACK_SIZE]; struct prev_kprobe prev_kprobe; }; diff --git a/arch/mips/kernel/kprobes.c b/arch/mips/kernel/kprobes.c index f5c8bce70db2..efdcd0b1ce12 100644 --- a/arch/mips/kernel/kprobes.c +++ b/arch/mips/kernel/kprobes.c @@ -468,51 +468,6 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self, return ret; } -int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs) -{ - struct jprobe *jp = container_of(p, struct jprobe, kp); - struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); - - kcb->jprobe_saved_regs = *regs; - kcb->jprobe_saved_sp = regs->regs[29]; - - memcpy(kcb->jprobes_stack, (void *)kcb->jprobe_saved_sp, - MIN_JPROBES_STACK_SIZE(kcb->jprobe_saved_sp)); - - regs->cp0_epc = (unsigned long)(jp->entry); - - return 1; -} - -/* Defined in the inline asm below. */ -void jprobe_return_end(void); - -void __kprobes jprobe_return(void) -{ - /* Assembler quirk necessitates this '0,code' business. */ - asm volatile( - "break 0,%0\n\t" - ".globl jprobe_return_end\n" - "jprobe_return_end:\n" - : : "n" (BRK_KPROBE_BP) : "memory"); -} - -int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs) -{ - struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); - - if (regs->cp0_epc >= (unsigned long)jprobe_return && - regs->cp0_epc <= (unsigned long)jprobe_return_end) { - *regs = kcb->jprobe_saved_regs; - memcpy((void *)kcb->jprobe_saved_sp, kcb->jprobes_stack, - MIN_JPROBES_STACK_SIZE(kcb->jprobe_saved_sp)); - preempt_enable_no_resched(); - - return 1; - } - return 0; -} - /* * Function return probe trampoline: * - init_kprobes() establishes a probepoint here -- cgit From fc682f7ba30e89aa49386fd7e427e3d083b7ae58 Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Wed, 20 Jun 2018 01:08:59 +0900 Subject: s390/kprobes: Remove jprobe implementation Remove arch dependent setjump/longjump functions and unused fields in kprobe_ctlblk for jprobes from arch/s390. Signed-off-by: Masami Hiramatsu Acked-by: Thomas Gleixner Cc: Ananth N Mavinakayanahalli Cc: Andrew Morton Cc: Heiko Carstens Cc: Linus Torvalds Cc: Martin Schwidefsky Cc: Peter Zijlstra Cc: Steven Rostedt Cc: linux-arch@vger.kernel.org Cc: linux-s390@vger.kernel.org Link: https://lore.kernel.org/lkml/152942453967.15209.8535227381029256754.stgit@devbox Signed-off-by: Ingo Molnar --- arch/s390/include/asm/kprobes.h | 2 -- arch/s390/kernel/kprobes.c | 59 ++--------------------------------------- 2 files changed, 2 insertions(+), 59 deletions(-) diff --git a/arch/s390/include/asm/kprobes.h b/arch/s390/include/asm/kprobes.h index 13de80cf741c..b106aa29bf55 100644 --- a/arch/s390/include/asm/kprobes.h +++ b/arch/s390/include/asm/kprobes.h @@ -68,8 +68,6 @@ struct kprobe_ctlblk { unsigned long kprobe_saved_imask; unsigned long kprobe_saved_ctl[3]; struct prev_kprobe prev_kprobe; - struct pt_regs jprobe_saved_regs; - kprobe_opcode_t jprobes_stack[MAX_STACK_SIZE]; }; void arch_remove_kprobe(struct kprobe *p); diff --git a/arch/s390/kernel/kprobes.c b/arch/s390/kernel/kprobes.c index 60f60afa645c..0967de19f53d 100644 --- a/arch/s390/kernel/kprobes.c +++ b/arch/s390/kernel/kprobes.c @@ -321,9 +321,8 @@ static int kprobe_handler(struct pt_regs *regs) * If we have no pre-handler or it returned 0, we * continue with single stepping. If we have a * pre-handler and it returned non-zero, it prepped - * for calling the break_handler below on re-entry - * for jprobe processing, so get out doing nothing - * more here. + * for changing execution path, so get out doing + * nothing more here. */ push_kprobe(kcb, p); kcb->kprobe_status = KPROBE_HIT_ACTIVE; @@ -661,60 +660,6 @@ int kprobe_exceptions_notify(struct notifier_block *self, } NOKPROBE_SYMBOL(kprobe_exceptions_notify); -int setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs) -{ - struct jprobe *jp = container_of(p, struct jprobe, kp); - struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); - unsigned long stack; - - memcpy(&kcb->jprobe_saved_regs, regs, sizeof(struct pt_regs)); - - /* setup return addr to the jprobe handler routine */ - regs->psw.addr = (unsigned long) jp->entry; - regs->psw.mask &= ~(PSW_MASK_IO | PSW_MASK_EXT); - - /* r15 is the stack pointer */ - stack = (unsigned long) regs->gprs[15]; - - memcpy(kcb->jprobes_stack, (void *) stack, MIN_STACK_SIZE(stack)); - - /* - * jprobes use jprobe_return() which skips the normal return - * path of the function, and this messes up the accounting of the - * function graph tracer to get messed up. - * - * Pause function graph tracing while performing the jprobe function. - */ - pause_graph_tracing(); - return 1; -} -NOKPROBE_SYMBOL(setjmp_pre_handler); - -void jprobe_return(void) -{ - asm volatile(".word 0x0002"); -} -NOKPROBE_SYMBOL(jprobe_return); - -int longjmp_break_handler(struct kprobe *p, struct pt_regs *regs) -{ - struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); - unsigned long stack; - - /* It's OK to start function graph tracing again */ - unpause_graph_tracing(); - - stack = (unsigned long) kcb->jprobe_saved_regs.gprs[15]; - - /* Put the regs back */ - memcpy(regs, &kcb->jprobe_saved_regs, sizeof(struct pt_regs)); - /* put the stack back */ - memcpy((void *) stack, kcb->jprobes_stack, MIN_STACK_SIZE(stack)); - preempt_enable_no_resched(); - return 1; -} -NOKPROBE_SYMBOL(longjmp_break_handler); - static struct kprobe trampoline = { .addr = (kprobe_opcode_t *) &kretprobe_trampoline, .pre_handler = trampoline_probe_handler -- cgit From 8a4b11a21eaf2b7bae91e6de7ac0543e0342e955 Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Wed, 20 Jun 2018 01:09:28 +0900 Subject: sh/kprobes: Remove jprobe implementation Remove arch dependent setjump/longjump functions and unused fields in kprobe_ctlblk for jprobes from arch/sh. Signed-off-by: Masami Hiramatsu Acked-by: Thomas Gleixner Cc: Ananth N Mavinakayanahalli Cc: Andrew Morton Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Rich Felker Cc: Steven Rostedt Cc: Yoshinori Sato Cc: linux-arch@vger.kernel.org Cc: linux-sh@vger.kernel.org Link: https://lore.kernel.org/lkml/152942456877.15209.5482537226935729621.stgit@devbox Signed-off-by: Ingo Molnar --- arch/sh/include/asm/kprobes.h | 4 ---- arch/sh/kernel/kprobes.c | 51 ------------------------------------------- 2 files changed, 55 deletions(-) diff --git a/arch/sh/include/asm/kprobes.h b/arch/sh/include/asm/kprobes.h index 85d8bcaa8493..6171682f7798 100644 --- a/arch/sh/include/asm/kprobes.h +++ b/arch/sh/include/asm/kprobes.h @@ -27,7 +27,6 @@ struct kprobe; void arch_remove_kprobe(struct kprobe *); void kretprobe_trampoline(void); -void jprobe_return_end(void); /* Architecture specific copy of original instruction*/ struct arch_specific_insn { @@ -43,9 +42,6 @@ struct prev_kprobe { /* per-cpu kprobe control block */ struct kprobe_ctlblk { unsigned long kprobe_status; - unsigned long jprobe_saved_r15; - struct pt_regs jprobe_saved_regs; - kprobe_opcode_t jprobes_stack[MAX_STACK_SIZE]; struct prev_kprobe prev_kprobe; }; diff --git a/arch/sh/kernel/kprobes.c b/arch/sh/kernel/kprobes.c index 52a5e11247d1..2b5e58873b96 100644 --- a/arch/sh/kernel/kprobes.c +++ b/arch/sh/kernel/kprobes.c @@ -523,57 +523,6 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self, return ret; } -int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs) -{ - struct jprobe *jp = container_of(p, struct jprobe, kp); - unsigned long addr; - struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); - - kcb->jprobe_saved_regs = *regs; - kcb->jprobe_saved_r15 = regs->regs[15]; - addr = kcb->jprobe_saved_r15; - - /* - * TBD: As Linus pointed out, gcc assumes that the callee - * owns the argument space and could overwrite it, e.g. - * tailcall optimization. So, to be absolutely safe - * we also save and restore enough stack bytes to cover - * the argument area. - */ - memcpy(kcb->jprobes_stack, (kprobe_opcode_t *) addr, - MIN_STACK_SIZE(addr)); - - regs->pc = (unsigned long)(jp->entry); - - return 1; -} - -void __kprobes jprobe_return(void) -{ - asm volatile ("trapa #0x3a\n\t" "jprobe_return_end:\n\t" "nop\n\t"); -} - -int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs) -{ - struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); - unsigned long stack_addr = kcb->jprobe_saved_r15; - u8 *addr = (u8 *)regs->pc; - - if ((addr >= (u8 *)jprobe_return) && - (addr <= (u8 *)jprobe_return_end)) { - *regs = kcb->jprobe_saved_regs; - - memcpy((kprobe_opcode_t *)stack_addr, kcb->jprobes_stack, - MIN_STACK_SIZE(stack_addr)); - - kcb->kprobe_status = KPROBE_HIT_SS; - preempt_enable_no_resched(); - return 1; - } - - return 0; -} - static struct kprobe trampoline_p = { .addr = (kprobe_opcode_t *)&kretprobe_trampoline, .pre_handler = trampoline_probe_handler -- cgit From e9bf11a5a68b39e41f2113b7d3cf5125d6816e23 Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Wed, 20 Jun 2018 01:09:58 +0900 Subject: sparc64/kprobes: Remove jprobe implementation Remove arch dependent setjump/longjump functions and unused fields in kprobe_ctlblk for jprobes from arch/sparc. Signed-off-by: Masami Hiramatsu Acked-by: Thomas Gleixner Cc: Ananth N Mavinakayanahalli Cc: Andrew Morton Cc: David S. Miller Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Steven Rostedt Cc: linux-arch@vger.kernel.org Cc: sparclinux@vger.kernel.org Link: https://lore.kernel.org/lkml/152942459795.15209.9736720668494241853.stgit@devbox Signed-off-by: Ingo Molnar --- arch/sparc/include/asm/kprobes.h | 1 - arch/sparc/kernel/kprobes.c | 47 ---------------------------------------- 2 files changed, 48 deletions(-) diff --git a/arch/sparc/include/asm/kprobes.h b/arch/sparc/include/asm/kprobes.h index 3704490b4488..bfcaa6326c20 100644 --- a/arch/sparc/include/asm/kprobes.h +++ b/arch/sparc/include/asm/kprobes.h @@ -44,7 +44,6 @@ struct kprobe_ctlblk { unsigned long kprobe_status; unsigned long kprobe_orig_tnpc; unsigned long kprobe_orig_tstate_pil; - struct pt_regs jprobe_saved_regs; struct prev_kprobe prev_kprobe; }; diff --git a/arch/sparc/kernel/kprobes.c b/arch/sparc/kernel/kprobes.c index ab4ba4347941..f555711da8f1 100644 --- a/arch/sparc/kernel/kprobes.c +++ b/arch/sparc/kernel/kprobes.c @@ -441,53 +441,6 @@ out: exception_exit(prev_state); } -/* Jprobes support. */ -int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs) -{ - struct jprobe *jp = container_of(p, struct jprobe, kp); - struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); - - memcpy(&(kcb->jprobe_saved_regs), regs, sizeof(*regs)); - - regs->tpc = (unsigned long) jp->entry; - regs->tnpc = ((unsigned long) jp->entry) + 0x4UL; - regs->tstate |= TSTATE_PIL; - - return 1; -} - -void __kprobes jprobe_return(void) -{ - struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); - register unsigned long orig_fp asm("g1"); - - orig_fp = kcb->jprobe_saved_regs.u_regs[UREG_FP]; - __asm__ __volatile__("\n" -"1: cmp %%sp, %0\n\t" - "blu,a,pt %%xcc, 1b\n\t" - " restore\n\t" - ".globl jprobe_return_trap_instruction\n" -"jprobe_return_trap_instruction:\n\t" - "ta 0x70" - : /* no outputs */ - : "r" (orig_fp)); -} - -extern void jprobe_return_trap_instruction(void); - -int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs) -{ - u32 *addr = (u32 *) regs->tpc; - struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); - - if (addr == (u32 *) jprobe_return_trap_instruction) { - memcpy(regs, &(kcb->jprobe_saved_regs), sizeof(*regs)); - preempt_enable_no_resched(); - return 1; - } - return 0; -} - /* The value stored in the return address register is actually 2 * instructions before where the callee will return to. * Sequences usually look something like this -- cgit From 059053a275b56eff1db11605c68988a6e9818561 Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Wed, 20 Jun 2018 01:10:27 +0900 Subject: kprobes: Don't check the ->break_handler() in generic kprobes code Don't check the ->break_handler() from the core kprobes code, because it was only used by jprobes which got removed. ( In followup patches we'll remove the remaining calls in low level arch handlers as well and remove the callback altogether. ) Signed-off-by: Masami Hiramatsu Acked-by: Thomas Gleixner Cc: Ananth N Mavinakayanahalli Cc: Andrew Morton Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Steven Rostedt Cc: linux-arch@vger.kernel.org Link: https://lore.kernel.org/lkml/152942462686.15209.6324404940493598980.stgit@devbox Signed-off-by: Ingo Molnar --- Documentation/kprobes.txt | 2 +- kernel/kprobes.c | 39 +++++---------------------------------- 2 files changed, 6 insertions(+), 35 deletions(-) diff --git a/Documentation/kprobes.txt b/Documentation/kprobes.txt index 33dd6a81fb64..ab2c7307f123 100644 --- a/Documentation/kprobes.txt +++ b/Documentation/kprobes.txt @@ -262,7 +262,7 @@ is optimized, that modification is ignored. Thus, if you want to tweak the kernel's execution path, you need to suppress optimization, using one of the following techniques: -- Specify an empty function for the kprobe's post_handler or break_handler. +- Specify an empty function for the kprobe's post_handler. or diff --git a/kernel/kprobes.c b/kernel/kprobes.c index 69de130595f7..536ab451e96d 100644 --- a/kernel/kprobes.c +++ b/kernel/kprobes.c @@ -627,8 +627,8 @@ static void optimize_kprobe(struct kprobe *p) (kprobe_disabled(p) || kprobes_all_disarmed)) return; - /* Both of break_handler and post_handler are not supported. */ - if (p->break_handler || p->post_handler) + /* kprobes with post_handler can not be optimized */ + if (p->post_handler) return; op = container_of(p, struct optimized_kprobe, kp); @@ -1116,20 +1116,6 @@ static int aggr_fault_handler(struct kprobe *p, struct pt_regs *regs, } NOKPROBE_SYMBOL(aggr_fault_handler); -static int aggr_break_handler(struct kprobe *p, struct pt_regs *regs) -{ - struct kprobe *cur = __this_cpu_read(kprobe_instance); - int ret = 0; - - if (cur && cur->break_handler) { - if (cur->break_handler(cur, regs)) - ret = 1; - } - reset_kprobe_instance(); - return ret; -} -NOKPROBE_SYMBOL(aggr_break_handler); - /* Walks the list and increments nmissed count for multiprobe case */ void kprobes_inc_nmissed_count(struct kprobe *p) { @@ -1270,24 +1256,15 @@ static void cleanup_rp_inst(struct kretprobe *rp) } NOKPROBE_SYMBOL(cleanup_rp_inst); -/* -* Add the new probe to ap->list. Fail if this is the -* second break_handler at the address -*/ +/* Add the new probe to ap->list */ static int add_new_kprobe(struct kprobe *ap, struct kprobe *p) { BUG_ON(kprobe_gone(ap) || kprobe_gone(p)); - if (p->break_handler || p->post_handler) + if (p->post_handler) unoptimize_kprobe(ap, true); /* Fall back to normal kprobe */ - if (p->break_handler) { - if (ap->break_handler) - return -EEXIST; - list_add_tail_rcu(&p->list, &ap->list); - ap->break_handler = aggr_break_handler; - } else - list_add_rcu(&p->list, &ap->list); + list_add_rcu(&p->list, &ap->list); if (p->post_handler && !ap->post_handler) ap->post_handler = aggr_post_handler; @@ -1310,8 +1287,6 @@ static void init_aggr_kprobe(struct kprobe *ap, struct kprobe *p) /* We don't care the kprobe which has gone. */ if (p->post_handler && !kprobe_gone(p)) ap->post_handler = aggr_post_handler; - if (p->break_handler && !kprobe_gone(p)) - ap->break_handler = aggr_break_handler; INIT_LIST_HEAD(&ap->list); INIT_HLIST_NODE(&ap->hlist); @@ -1706,8 +1681,6 @@ static int __unregister_kprobe_top(struct kprobe *p) goto disarmed; else { /* If disabling probe has special handlers, update aggrprobe */ - if (p->break_handler && !kprobe_gone(p)) - ap->break_handler = NULL; if (p->post_handler && !kprobe_gone(p)) { list_for_each_entry_rcu(list_p, &ap->list, list) { if ((list_p != p) && (list_p->post_handler)) @@ -1911,7 +1884,6 @@ int register_kretprobe(struct kretprobe *rp) rp->kp.pre_handler = pre_handler_kretprobe; rp->kp.post_handler = NULL; rp->kp.fault_handler = NULL; - rp->kp.break_handler = NULL; /* Pre-allocate memory for max kretprobe instances */ if (rp->maxactive <= 0) { @@ -2034,7 +2006,6 @@ static void kill_kprobe(struct kprobe *p) list_for_each_entry_rcu(kp, &p->list, list) kp->flags |= KPROBE_FLAG_GONE; p->post_handler = NULL; - p->break_handler = NULL; kill_optimized_kprobe(p); } /* -- cgit From e704e34cd0bbd1c69eb4ca724935a23f6440502e Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Wed, 20 Jun 2018 01:10:55 +0900 Subject: kprobes/x86: Don't call the ->break_handler() in x86 kprobes Don't call the ->break_handler() and remove break_handler related code from x86 since that was only used by jprobe which got removed. Signed-off-by: Masami Hiramatsu Acked-by: Thomas Gleixner Cc: Ananth N Mavinakayanahalli Cc: Andrew Morton Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Steven Rostedt Cc: linux-arch@vger.kernel.org Link: https://lore.kernel.org/lkml/152942465549.15209.15889693025972771135.stgit@devbox Signed-off-by: Ingo Molnar --- arch/x86/include/asm/kprobes.h | 2 +- arch/x86/kernel/kprobes/common.h | 10 ---------- arch/x86/kernel/kprobes/core.c | 7 ------- arch/x86/kernel/kprobes/ftrace.c | 42 +++++++++++----------------------------- 4 files changed, 12 insertions(+), 49 deletions(-) diff --git a/arch/x86/include/asm/kprobes.h b/arch/x86/include/asm/kprobes.h index 06782c2efa04..c8cec1b39b88 100644 --- a/arch/x86/include/asm/kprobes.h +++ b/arch/x86/include/asm/kprobes.h @@ -78,7 +78,7 @@ struct arch_specific_insn { * boostable = true: This instruction has been boosted: we have * added a relative jump after the instruction copy in insn, * so no single-step and fixup are needed (unless there's - * a post_handler or break_handler). + * a post_handler). */ bool boostable; bool if_modifier; diff --git a/arch/x86/kernel/kprobes/common.h b/arch/x86/kernel/kprobes/common.h index ae38dccf0c8f..2b949f4fd4d8 100644 --- a/arch/x86/kernel/kprobes/common.h +++ b/arch/x86/kernel/kprobes/common.h @@ -105,14 +105,4 @@ static inline unsigned long __recover_optprobed_insn(kprobe_opcode_t *buf, unsig } #endif -#ifdef CONFIG_KPROBES_ON_FTRACE -extern int skip_singlestep(struct kprobe *p, struct pt_regs *regs, - struct kprobe_ctlblk *kcb); -#else -static inline int skip_singlestep(struct kprobe *p, struct pt_regs *regs, - struct kprobe_ctlblk *kcb) -{ - return 0; -} -#endif #endif diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c index 1b2d1acba810..0ac16a0d93e5 100644 --- a/arch/x86/kernel/kprobes/core.c +++ b/arch/x86/kernel/kprobes/core.c @@ -709,13 +709,6 @@ int kprobe_int3_handler(struct pt_regs *regs) regs->ip = (unsigned long)addr; preempt_enable_no_resched(); return 1; - } else if (kprobe_running()) { - p = __this_cpu_read(current_kprobe); - if (p->break_handler && p->break_handler(p, regs)) { - if (!skip_singlestep(p, regs, kcb)) - setup_singlestep(p, regs, kcb, 0); - return 1; - } } /* else: not a kprobe fault; let the kernel handle it */ preempt_enable_no_resched(); diff --git a/arch/x86/kernel/kprobes/ftrace.c b/arch/x86/kernel/kprobes/ftrace.c index 8dc0161cec8f..02a6dd1b6bd0 100644 --- a/arch/x86/kernel/kprobes/ftrace.c +++ b/arch/x86/kernel/kprobes/ftrace.c @@ -25,36 +25,6 @@ #include "common.h" -static nokprobe_inline -void __skip_singlestep(struct kprobe *p, struct pt_regs *regs, - struct kprobe_ctlblk *kcb, unsigned long orig_ip) -{ - /* - * Emulate singlestep (and also recover regs->ip) - * as if there is a 5byte nop - */ - regs->ip = (unsigned long)p->addr + MCOUNT_INSN_SIZE; - if (unlikely(p->post_handler)) { - kcb->kprobe_status = KPROBE_HIT_SSDONE; - p->post_handler(p, regs, 0); - } - __this_cpu_write(current_kprobe, NULL); - if (orig_ip) - regs->ip = orig_ip; -} - -int skip_singlestep(struct kprobe *p, struct pt_regs *regs, - struct kprobe_ctlblk *kcb) -{ - if (kprobe_ftrace(p)) { - __skip_singlestep(p, regs, kcb, 0); - preempt_enable_no_resched(); - return 1; - } - return 0; -} -NOKPROBE_SYMBOL(skip_singlestep); - /* Ftrace callback handler for kprobes -- called under preepmt disabed */ void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip, struct ftrace_ops *ops, struct pt_regs *regs) @@ -80,7 +50,17 @@ void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip, __this_cpu_write(current_kprobe, p); kcb->kprobe_status = KPROBE_HIT_ACTIVE; if (!p->pre_handler || !p->pre_handler(p, regs)) { - __skip_singlestep(p, regs, kcb, orig_ip); + /* + * Emulate singlestep (and also recover regs->ip) + * as if there is a 5byte nop + */ + regs->ip = (unsigned long)p->addr + MCOUNT_INSN_SIZE; + if (unlikely(p->post_handler)) { + kcb->kprobe_status = KPROBE_HIT_SSDONE; + p->post_handler(p, regs, 0); + } + regs->ip = orig_ip; + __this_cpu_write(current_kprobe, NULL); preempt_enable_no_resched(); } /* -- cgit From 5ed4b0c5d17671160c0ab74a260ae57e9352b585 Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Wed, 20 Jun 2018 01:11:24 +0900 Subject: ARC/kprobes: Don't call the ->break_handler() in ARC kprobes code Don't call the ->break_handler() from the ARC kprobes code, because it was only used by jprobes which got removed. Signed-off-by: Masami Hiramatsu Acked-by: Thomas Gleixner Cc: Ananth N Mavinakayanahalli Cc: Andrew Morton Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Steven Rostedt Cc: Vineet Gupta Cc: linux-arch@vger.kernel.org Cc: linux-snps-arc@lists.infradead.org Link: https://lore.kernel.org/lkml/152942468446.15209.13773902741600803798.stgit@devbox Signed-off-by: Ingo Molnar --- arch/arc/kernel/kprobes.c | 7 ------- 1 file changed, 7 deletions(-) diff --git a/arch/arc/kernel/kprobes.c b/arch/arc/kernel/kprobes.c index 7811a6bbe8f0..465365696c91 100644 --- a/arch/arc/kernel/kprobes.c +++ b/arch/arc/kernel/kprobes.c @@ -234,13 +234,6 @@ int __kprobes arc_kprobe_handler(unsigned long addr, struct pt_regs *regs) } return 1; - } else if (kprobe_running()) { - p = __this_cpu_read(current_kprobe); - if (p->break_handler && p->break_handler(p, regs)) { - setup_singlestep(p, regs); - kcb->kprobe_status = KPROBE_HIT_SS; - return 1; - } } /* no_kprobe: */ -- cgit From 75feff53a2011e8954fab02bdcef49f36a01b256 Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Wed, 20 Jun 2018 01:11:53 +0900 Subject: ARM/kprobes: Don't call the ->break_handler() in arm kprobes code Don't call the ->break_handler() from the arm kprobes code, because it was only used by jprobes which got removed. Signed-off-by: Masami Hiramatsu Acked-by: Thomas Gleixner Cc: Ananth N Mavinakayanahalli Cc: Andrew Morton Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Russell King Cc: Steven Rostedt Cc: linux-arch@vger.kernel.org Cc: linux-arm-kernel@lists.infradead.org Link: https://lore.kernel.org/lkml/152942471328.15209.10625693210204476080.stgit@devbox Signed-off-by: Ingo Molnar --- arch/arm/probes/kprobes/core.c | 11 ----------- 1 file changed, 11 deletions(-) diff --git a/arch/arm/probes/kprobes/core.c b/arch/arm/probes/kprobes/core.c index 23562111c511..3192350f389d 100644 --- a/arch/arm/probes/kprobes/core.c +++ b/arch/arm/probes/kprobes/core.c @@ -315,17 +315,6 @@ void __kprobes kprobe_handler(struct pt_regs *regs) reset_current_kprobe(); } } - } else if (cur) { - /* We probably hit a jprobe. Call its break handler. */ - if (cur->break_handler && cur->break_handler(cur, regs)) { - kcb->kprobe_status = KPROBE_HIT_SS; - singlestep(cur, regs, kcb); - if (cur->post_handler) { - kcb->kprobe_status = KPROBE_HIT_SSDONE; - cur->post_handler(cur, regs, 0); - } - } - reset_current_kprobe(); } else { /* * The probe was removed and a race is in progress. -- cgit From c9abd554aa1231df2ad1dd5b30114c1552c2d956 Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Wed, 20 Jun 2018 01:12:22 +0900 Subject: arm64/kprobes: Don't call the ->break_handler() in arm64 kprobes code Don't call the ->break_handler() from the arm64 kprobes code, because it was only used by jprobes which got removed. Signed-off-by: Masami Hiramatsu Acked-by: Thomas Gleixner Acked-by: Will Deacon Cc: Ananth N Mavinakayanahalli Cc: Andrew Morton Cc: Catalin Marinas Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Steven Rostedt Cc: linux-arch@vger.kernel.org Cc: linux-arm-kernel@lists.infradead.org Link: https://lore.kernel.org/lkml/152942474231.15209.17684808374429473004.stgit@devbox Signed-off-by: Ingo Molnar --- arch/arm64/kernel/probes/kprobes.c | 8 -------- 1 file changed, 8 deletions(-) diff --git a/arch/arm64/kernel/probes/kprobes.c b/arch/arm64/kernel/probes/kprobes.c index 3ca2351109a6..076c3c0775a6 100644 --- a/arch/arm64/kernel/probes/kprobes.c +++ b/arch/arm64/kernel/probes/kprobes.c @@ -408,14 +408,6 @@ static void __kprobes kprobe_handler(struct pt_regs *regs) return; } } - } else if ((le32_to_cpu(*(kprobe_opcode_t *) addr) == - BRK64_OPCODE_KPROBES) && cur_kprobe) { - /* We probably hit a jprobe. Call its break handler. */ - if (cur_kprobe->break_handler && - cur_kprobe->break_handler(cur_kprobe, regs)) { - setup_singlestep(cur_kprobe, regs, kcb, 0); - return; - } } /* * The breakpoint instruction was removed right -- cgit From 6e5fd3a298e9ff74e5bf60f161e31cdb72f2e6bd Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Wed, 20 Jun 2018 01:12:51 +0900 Subject: powerpc/kprobes: Don't call the ->break_handler() in powerpc kprobes code Don't call the ->break_handler() from the powerpc kprobes code, because it was only used by jprobes which got removed. This also removes skip_singlestep() and embeds it in the caller, kprobe_ftrace_handler(), which simplifies regs->nip operation around there. Signed-off-by: Masami Hiramatsu Acked-by: Thomas Gleixner Acked-by: Naveen N. Rao Cc: Ananth N Mavinakayanahalli Cc: Andrew Morton Cc: Benjamin Herrenschmidt Cc: Linus Torvalds Cc: Michael Ellerman Cc: Paul Mackerras Cc: Peter Zijlstra Cc: Steven Rostedt Cc: linux-arch@vger.kernel.org Cc: linuxppc-dev@lists.ozlabs.org Link: https://lore.kernel.org/lkml/152942477127.15209.8982613703787878618.stgit@devbox Signed-off-by: Ingo Molnar --- arch/powerpc/include/asm/kprobes.h | 10 -------- arch/powerpc/kernel/kprobes-ftrace.c | 46 ++++++++++-------------------------- arch/powerpc/kernel/kprobes.c | 31 +++++++++--------------- 3 files changed, 23 insertions(+), 64 deletions(-) diff --git a/arch/powerpc/include/asm/kprobes.h b/arch/powerpc/include/asm/kprobes.h index 674036db558b..785c464b6588 100644 --- a/arch/powerpc/include/asm/kprobes.h +++ b/arch/powerpc/include/asm/kprobes.h @@ -102,16 +102,6 @@ extern int kprobe_exceptions_notify(struct notifier_block *self, extern int kprobe_fault_handler(struct pt_regs *regs, int trapnr); extern int kprobe_handler(struct pt_regs *regs); extern int kprobe_post_handler(struct pt_regs *regs); -#ifdef CONFIG_KPROBES_ON_FTRACE -extern int skip_singlestep(struct kprobe *p, struct pt_regs *regs, - struct kprobe_ctlblk *kcb); -#else -static inline int skip_singlestep(struct kprobe *p, struct pt_regs *regs, - struct kprobe_ctlblk *kcb) -{ - return 0; -} -#endif #else static inline int kprobe_handler(struct pt_regs *regs) { return 0; } static inline int kprobe_post_handler(struct pt_regs *regs) { return 0; } diff --git a/arch/powerpc/kernel/kprobes-ftrace.c b/arch/powerpc/kernel/kprobes-ftrace.c index 1b316331c2d9..070d1d862444 100644 --- a/arch/powerpc/kernel/kprobes-ftrace.c +++ b/arch/powerpc/kernel/kprobes-ftrace.c @@ -25,35 +25,6 @@ #include #include -static nokprobe_inline -int __skip_singlestep(struct kprobe *p, struct pt_regs *regs, - struct kprobe_ctlblk *kcb, unsigned long orig_nip) -{ - /* - * Emulate singlestep (and also recover regs->nip) - * as if there is a nop - */ - regs->nip = (unsigned long)p->addr + MCOUNT_INSN_SIZE; - if (unlikely(p->post_handler)) { - kcb->kprobe_status = KPROBE_HIT_SSDONE; - p->post_handler(p, regs, 0); - } - __this_cpu_write(current_kprobe, NULL); - if (orig_nip) - regs->nip = orig_nip; - return 1; -} - -int skip_singlestep(struct kprobe *p, struct pt_regs *regs, - struct kprobe_ctlblk *kcb) -{ - if (kprobe_ftrace(p)) - return __skip_singlestep(p, regs, kcb, 0); - else - return 0; -} -NOKPROBE_SYMBOL(skip_singlestep); - /* Ftrace callback handler for kprobes */ void kprobe_ftrace_handler(unsigned long nip, unsigned long parent_nip, struct ftrace_ops *ops, struct pt_regs *regs) @@ -71,8 +42,6 @@ void kprobe_ftrace_handler(unsigned long nip, unsigned long parent_nip, if (kprobe_running()) { kprobes_inc_nmissed_count(p); } else { - unsigned long orig_nip = regs->nip; - /* * On powerpc, NIP is *before* this instruction for the * pre handler @@ -81,9 +50,18 @@ void kprobe_ftrace_handler(unsigned long nip, unsigned long parent_nip, __this_cpu_write(current_kprobe, p); kcb->kprobe_status = KPROBE_HIT_ACTIVE; - if (!p->pre_handler || !p->pre_handler(p, regs)) - __skip_singlestep(p, regs, kcb, orig_nip); - else { + if (!p->pre_handler || !p->pre_handler(p, regs)) { + /* + * Emulate singlestep (and also recover regs->nip) + * as if there is a nop + */ + regs->nip += MCOUNT_INSN_SIZE; + if (unlikely(p->post_handler)) { + kcb->kprobe_status = KPROBE_HIT_SSDONE; + p->post_handler(p, regs, 0); + } + __this_cpu_write(current_kprobe, NULL); + } else { /* * If pre_handler returns !0, it sets regs->nip and * resets current kprobe. In this case, we should not diff --git a/arch/powerpc/kernel/kprobes.c b/arch/powerpc/kernel/kprobes.c index 600678fce0a8..f06747e2e70d 100644 --- a/arch/powerpc/kernel/kprobes.c +++ b/arch/powerpc/kernel/kprobes.c @@ -317,25 +317,17 @@ int kprobe_handler(struct pt_regs *regs) } prepare_singlestep(p, regs); return 1; - } else { - if (*addr != BREAKPOINT_INSTRUCTION) { - /* If trap variant, then it belongs not to us */ - kprobe_opcode_t cur_insn = *addr; - if (is_trap(cur_insn)) - goto no_kprobe; - /* The breakpoint instruction was removed by - * another cpu right after we hit, no further - * handling of this interrupt is appropriate - */ - ret = 1; + } else if (*addr != BREAKPOINT_INSTRUCTION) { + /* If trap variant, then it belongs not to us */ + kprobe_opcode_t cur_insn = *addr; + + if (is_trap(cur_insn)) goto no_kprobe; - } - p = __this_cpu_read(current_kprobe); - if (p->break_handler && p->break_handler(p, regs)) { - if (!skip_singlestep(p, regs, kcb)) - goto ss_probe; - ret = 1; - } + /* The breakpoint instruction was removed by + * another cpu right after we hit, no further + * handling of this interrupt is appropriate + */ + ret = 1; } goto no_kprobe; } @@ -350,7 +342,7 @@ int kprobe_handler(struct pt_regs *regs) */ kprobe_opcode_t cur_insn = *addr; if (is_trap(cur_insn)) - goto no_kprobe; + goto no_kprobe; /* * The breakpoint instruction was removed right * after we hit it. Another cpu has removed @@ -370,7 +362,6 @@ int kprobe_handler(struct pt_regs *regs) /* handler has already set things up, so skip ss setup */ return 1; -ss_probe: if (p->ainsn.boostable >= 0) { ret = try_to_emulate(p, regs); -- cgit From cf3bb032fd753424e48d1bc580634768fc54aabe Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Wed, 20 Jun 2018 01:13:20 +0900 Subject: ia64/kprobes: Don't call the ->break_handler() in ia64 kprobes code Don't call the ->break_handler() from the ia64 kprobes code, because it was only used by jprobes which got removed. Signed-off-by: Masami Hiramatsu Acked-by: Thomas Gleixner Cc: Ananth N Mavinakayanahalli Cc: Andrew Morton Cc: Fenghua Yu Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Steven Rostedt Cc: Tony Luck Cc: linux-arch@vger.kernel.org Cc: linux-ia64@vger.kernel.org Link: https://lore.kernel.org/lkml/152942480045.15209.10715901482832337704.stgit@devbox Signed-off-by: Ingo Molnar --- arch/ia64/include/uapi/asm/break.h | 1 - arch/ia64/kernel/kprobes.c | 10 ---------- 2 files changed, 11 deletions(-) diff --git a/arch/ia64/include/uapi/asm/break.h b/arch/ia64/include/uapi/asm/break.h index 5d742bcb0018..4ca110f0a94b 100644 --- a/arch/ia64/include/uapi/asm/break.h +++ b/arch/ia64/include/uapi/asm/break.h @@ -14,7 +14,6 @@ */ #define __IA64_BREAK_KDB 0x80100 #define __IA64_BREAK_KPROBE 0x81000 /* .. 0x81fff */ -#define __IA64_BREAK_JPROBE 0x82000 /* * OS-specific break numbers: diff --git a/arch/ia64/kernel/kprobes.c b/arch/ia64/kernel/kprobes.c index 823e4bd03a18..74c8524e6309 100644 --- a/arch/ia64/kernel/kprobes.c +++ b/arch/ia64/kernel/kprobes.c @@ -817,14 +817,6 @@ static int __kprobes pre_kprobes_handler(struct die_args *args) prepare_ss(p, regs); kcb->kprobe_status = KPROBE_REENTER; return 1; - } else if (args->err == __IA64_BREAK_JPROBE) { - /* - * jprobe instrumented function just completed - */ - p = __this_cpu_read(current_kprobe); - if (p->break_handler && p->break_handler(p, regs)) { - goto ss_probe; - } } else if (!is_ia64_break_inst(regs)) { /* The breakpoint instruction was removed by * another cpu right after we hit, no further @@ -867,7 +859,6 @@ static int __kprobes pre_kprobes_handler(struct die_args *args) */ return 1; -ss_probe: #if !defined(CONFIG_PREEMPT) if (p->ainsn.inst_flag == INST_FLAG_BOOSTABLE && !p->post_handler) { /* Boost up -- we can execute copied instructions directly */ @@ -990,7 +981,6 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self, case DIE_BREAK: /* err is break number from ia64_bad_break() */ if ((args->err >> 12) == (__IA64_BREAK_KPROBE >> 12) - || args->err == __IA64_BREAK_JPROBE || args->err == 0) if (pre_kprobes_handler(args)) ret = NOTIFY_STOP; -- cgit From 9b85753da73c5c10a9bee4d573be9503ad726b45 Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Wed, 20 Jun 2018 01:13:49 +0900 Subject: MIPS/kprobes: Don't call the ->break_handler() in MIPS kprobes code Don't call the ->break_handler() from the MIPS kprobes code, because it was only used by jprobes which got removed. Signed-off-by: Masami Hiramatsu Acked-by: Thomas Gleixner Cc: Ananth N Mavinakayanahalli Cc: Andrew Morton Cc: James Hogan Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Ralf Baechle Cc: Steven Rostedt Cc: linux-arch@vger.kernel.org Cc: linux-mips@linux-mips.org Link: https://lore.kernel.org/lkml/152942482953.15209.843924518200700137.stgit@devbox Signed-off-by: Ingo Molnar --- arch/mips/kernel/kprobes.c | 21 +++++++-------------- 1 file changed, 7 insertions(+), 14 deletions(-) diff --git a/arch/mips/kernel/kprobes.c b/arch/mips/kernel/kprobes.c index efdcd0b1ce12..7fd277bc59b9 100644 --- a/arch/mips/kernel/kprobes.c +++ b/arch/mips/kernel/kprobes.c @@ -326,19 +326,13 @@ static int __kprobes kprobe_handler(struct pt_regs *regs) preempt_enable_no_resched(); } return 1; - } else { - if (addr->word != breakpoint_insn.word) { - /* - * The breakpoint instruction was removed by - * another cpu right after we hit, no further - * handling of this interrupt is appropriate - */ - ret = 1; - goto no_kprobe; - } - p = __this_cpu_read(current_kprobe); - if (p->break_handler && p->break_handler(p, regs)) - goto ss_probe; + } else if (addr->word != breakpoint_insn.word) { + /* + * The breakpoint instruction was removed by + * another cpu right after we hit, no further + * handling of this interrupt is appropriate + */ + ret = 1; } goto no_kprobe; } @@ -367,7 +361,6 @@ static int __kprobes kprobe_handler(struct pt_regs *regs) return 1; } -ss_probe: prepare_singlestep(p, regs, kcb); if (kcb->flags & SKIP_DELAYSLOT) { kcb->kprobe_status = KPROBE_HIT_SSDONE; -- cgit From cba5ec980ca14b204bdb128be397ed1ddc1c7249 Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Wed, 20 Jun 2018 01:14:18 +0900 Subject: s390/kprobes: Don't call the ->break_handler() in s390 kprobes code Don't call the ->break_handler() from the s390 kprobes code, because it was only used by jprobes which got removed. Signed-off-by: Masami Hiramatsu Acked-by: Thomas Gleixner Cc: Ananth N Mavinakayanahalli Cc: Andrew Morton Cc: Heiko Carstens Cc: Linus Torvalds Cc: Martin Schwidefsky Cc: Peter Zijlstra Cc: Steven Rostedt Cc: linux-arch@vger.kernel.org Cc: linux-s390@vger.kernel.org Link: https://lore.kernel.org/lkml/152942485849.15209.16608277783809018031.stgit@devbox Signed-off-by: Ingo Molnar --- arch/s390/kernel/kprobes.c | 20 -------------------- 1 file changed, 20 deletions(-) diff --git a/arch/s390/kernel/kprobes.c b/arch/s390/kernel/kprobes.c index 0967de19f53d..3e34018960b5 100644 --- a/arch/s390/kernel/kprobes.c +++ b/arch/s390/kernel/kprobes.c @@ -332,26 +332,6 @@ static int kprobe_handler(struct pt_regs *regs) } enable_singlestep(kcb, regs, (unsigned long) p->ainsn.insn); return 1; - } else if (kprobe_running()) { - p = __this_cpu_read(current_kprobe); - if (p->break_handler && p->break_handler(p, regs)) { - /* - * Continuation after the jprobe completed and - * caused the jprobe_return trap. The jprobe - * break_handler "returns" to the original - * function that still has the kprobe breakpoint - * installed. We continue with single stepping. - */ - kcb->kprobe_status = KPROBE_HIT_SS; - enable_singlestep(kcb, regs, - (unsigned long) p->ainsn.insn); - return 1; - } /* else: - * No kprobe at this address and the current kprobe - * has no break handler (no jprobe!). The kernel just - * exploded, let the standard trap handler pick up the - * pieces. - */ } /* else: * No kprobe at this address and no active kprobe. The trap has * not been caused by a kprobe breakpoint. The race of breakpoint -- cgit From fa5a24b16f94415d0a5da40bd479c26cfc6c15e3 Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Wed, 20 Jun 2018 01:14:47 +0900 Subject: sh/kprobes: Don't call the ->break_handler() in SH kprobes code Don't call the ->break_handler() from the SH kprobes code, because it was only used by jprobes which got removed. Signed-off-by: Masami Hiramatsu Acked-by: Thomas Gleixner Cc: Ananth N Mavinakayanahalli Cc: Andrew Morton Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Rich Felker Cc: Steven Rostedt Cc: Yoshinori Sato Cc: linux-arch@vger.kernel.org Cc: linux-sh@vger.kernel.org Link: https://lore.kernel.org/lkml/152942488763.15209.7791710414058030809.stgit@devbox Signed-off-by: Ingo Molnar --- arch/sh/kernel/kprobes.c | 14 +------------- 1 file changed, 1 insertion(+), 13 deletions(-) diff --git a/arch/sh/kernel/kprobes.c b/arch/sh/kernel/kprobes.c index 2b5e58873b96..4fafe0cd12c6 100644 --- a/arch/sh/kernel/kprobes.c +++ b/arch/sh/kernel/kprobes.c @@ -248,11 +248,6 @@ static int __kprobes kprobe_handler(struct pt_regs *regs) prepare_singlestep(p, regs); kcb->kprobe_status = KPROBE_REENTER; return 1; - } else { - p = __this_cpu_read(current_kprobe); - if (p->break_handler && p->break_handler(p, regs)) { - goto ss_probe; - } } goto no_kprobe; } @@ -281,7 +276,6 @@ static int __kprobes kprobe_handler(struct pt_regs *regs) /* handler has already set things up, so skip ss setup */ return 1; -ss_probe: prepare_singlestep(p, regs); kcb->kprobe_status = KPROBE_HIT_SS; return 1; @@ -508,14 +502,8 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self, if (post_kprobe_handler(args->regs)) ret = NOTIFY_STOP; } else { - if (kprobe_handler(args->regs)) { + if (kprobe_handler(args->regs)) ret = NOTIFY_STOP; - } else { - p = __this_cpu_read(current_kprobe); - if (p->break_handler && - p->break_handler(p, args->regs)) - ret = NOTIFY_STOP; - } } } } -- cgit From d5ad85b6d9ac286ae7fa8718a8a1c28561349c07 Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Wed, 20 Jun 2018 01:15:17 +0900 Subject: sparc64/kprobes: Don't call the ->break_handler() in sparc64 kprobes code Don't call the ->break_handler() from the sparc64 kprobes code, because it was only used by jprobes which got removed. Signed-off-by: Masami Hiramatsu Acked-by: Thomas Gleixner Cc: Ananth N Mavinakayanahalli Cc: Andrew Morton Cc: David S. Miller Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Steven Rostedt Cc: linux-arch@vger.kernel.org Cc: sparclinux@vger.kernel.org Link: https://lore.kernel.org/lkml/152942491684.15209.8114703769135529984.stgit@devbox Signed-off-by: Ingo Molnar --- arch/sparc/kernel/kprobes.c | 11 ++--------- 1 file changed, 2 insertions(+), 9 deletions(-) diff --git a/arch/sparc/kernel/kprobes.c b/arch/sparc/kernel/kprobes.c index f555711da8f1..c684c96ef2e9 100644 --- a/arch/sparc/kernel/kprobes.c +++ b/arch/sparc/kernel/kprobes.c @@ -147,18 +147,12 @@ static int __kprobes kprobe_handler(struct pt_regs *regs) kcb->kprobe_status = KPROBE_REENTER; prepare_singlestep(p, regs, kcb); return 1; - } else { - if (*(u32 *)addr != BREAKPOINT_INSTRUCTION) { + } else if (*(u32 *)addr != BREAKPOINT_INSTRUCTION) { /* The breakpoint instruction was removed by * another cpu right after we hit, no further * handling of this interrupt is appropriate */ - ret = 1; - goto no_kprobe; - } - p = __this_cpu_read(current_kprobe); - if (p->break_handler && p->break_handler(p, regs)) - goto ss_probe; + ret = 1; } goto no_kprobe; } @@ -184,7 +178,6 @@ static int __kprobes kprobe_handler(struct pt_regs *regs) if (p->pre_handler && p->pre_handler(p, regs)) return 1; -ss_probe: prepare_singlestep(p, regs, kcb); kcb->kprobe_status = KPROBE_HIT_SS; return 1; -- cgit From cce188bd58cfbd603b904dbce75f34de2eff959a Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Wed, 20 Jun 2018 01:15:45 +0900 Subject: bpf/error-inject/kprobes: Clear current_kprobe and enable preempt in kprobe Clear current_kprobe and enable preemption in kprobe even if pre_handler returns !0. This simplifies function override using kprobes. Jprobe used to require to keep the preemption disabled and keep current_kprobe until it returned to original function entry. For this reason kprobe_int3_handler() and similar arch dependent kprobe handers checks pre_handler result and exit without enabling preemption if the result is !0. After removing the jprobe, Kprobes does not need to keep preempt disabled even if user handler returns !0 anymore. But since the function override handler in error-inject and bpf is also returns !0 if it overrides a function, to balancing the preempt count, it enables preemption and reset current kprobe by itself. That is a bad design that is very buggy. This fixes such unbalanced preempt-count and current_kprobes setting in kprobes, bpf and error-inject. Note: for powerpc and x86, this removes all preempt_disable from kprobe_ftrace_handler because ftrace callbacks are called under preempt disabled. Signed-off-by: Masami Hiramatsu Acked-by: Thomas Gleixner Acked-by: Naveen N. Rao Cc: Alexei Starovoitov Cc: Ananth N Mavinakayanahalli Cc: Andrew Morton Cc: Benjamin Herrenschmidt Cc: Catalin Marinas Cc: David S. Miller Cc: Fenghua Yu Cc: Heiko Carstens Cc: James Hogan Cc: Josef Bacik Cc: Linus Torvalds Cc: Martin Schwidefsky Cc: Michael Ellerman Cc: Paul Mackerras Cc: Peter Zijlstra Cc: Ralf Baechle Cc: Rich Felker Cc: Russell King Cc: Steven Rostedt Cc: Tony Luck Cc: Vineet Gupta Cc: Will Deacon Cc: Yoshinori Sato Cc: linux-arch@vger.kernel.org Cc: linux-arm-kernel@lists.infradead.org Cc: linux-ia64@vger.kernel.org Cc: linux-mips@linux-mips.org Cc: linux-s390@vger.kernel.org Cc: linux-sh@vger.kernel.org Cc: linux-snps-arc@lists.infradead.org Cc: linuxppc-dev@lists.ozlabs.org Cc: sparclinux@vger.kernel.org Link: https://lore.kernel.org/lkml/152942494574.15209.12323837825873032258.stgit@devbox Signed-off-by: Ingo Molnar --- arch/arc/kernel/kprobes.c | 5 +++-- arch/arm/probes/kprobes/core.c | 10 +++++----- arch/arm64/kernel/probes/kprobes.c | 10 +++++----- arch/ia64/kernel/kprobes.c | 13 ++++--------- arch/mips/kernel/kprobes.c | 4 ++-- arch/powerpc/kernel/kprobes-ftrace.c | 19 ++++++------------- arch/powerpc/kernel/kprobes.c | 7 +++++-- arch/s390/kernel/kprobes.c | 7 ++++--- arch/sh/kernel/kprobes.c | 7 ++++--- arch/sparc/kernel/kprobes.c | 7 ++++--- arch/x86/kernel/kprobes/core.c | 4 ++++ arch/x86/kernel/kprobes/ftrace.c | 9 +++------ kernel/fail_function.c | 3 --- kernel/trace/trace_kprobe.c | 11 +++-------- 14 files changed, 52 insertions(+), 64 deletions(-) diff --git a/arch/arc/kernel/kprobes.c b/arch/arc/kernel/kprobes.c index 465365696c91..df35d4c0b0b8 100644 --- a/arch/arc/kernel/kprobes.c +++ b/arch/arc/kernel/kprobes.c @@ -231,6 +231,9 @@ int __kprobes arc_kprobe_handler(unsigned long addr, struct pt_regs *regs) if (!p->pre_handler || !p->pre_handler(p, regs)) { setup_singlestep(p, regs); kcb->kprobe_status = KPROBE_HIT_SS; + } else { + reset_current_kprobe(); + preempt_enable_no_resched(); } return 1; @@ -442,9 +445,7 @@ static int __kprobes trampoline_probe_handler(struct kprobe *p, kretprobe_assert(ri, orig_ret_address, trampoline_address); regs->ret = orig_ret_address; - reset_current_kprobe(); kretprobe_hash_unlock(current, &flags); - preempt_enable_no_resched(); hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) { hlist_del(&ri->hlist); diff --git a/arch/arm/probes/kprobes/core.c b/arch/arm/probes/kprobes/core.c index 3192350f389d..8d37601fdb20 100644 --- a/arch/arm/probes/kprobes/core.c +++ b/arch/arm/probes/kprobes/core.c @@ -300,10 +300,10 @@ void __kprobes kprobe_handler(struct pt_regs *regs) /* * If we have no pre-handler or it returned 0, we - * continue with normal processing. If we have a - * pre-handler and it returned non-zero, it prepped - * for calling the break_handler below on re-entry, - * so get out doing nothing more here. + * continue with normal processing. If we have a + * pre-handler and it returned non-zero, it will + * modify the execution path and no need to single + * stepping. Let's just reset current kprobe and exit. */ if (!p->pre_handler || !p->pre_handler(p, regs)) { kcb->kprobe_status = KPROBE_HIT_SS; @@ -312,8 +312,8 @@ void __kprobes kprobe_handler(struct pt_regs *regs) kcb->kprobe_status = KPROBE_HIT_SSDONE; p->post_handler(p, regs, 0); } - reset_current_kprobe(); } + reset_current_kprobe(); } } else { /* diff --git a/arch/arm64/kernel/probes/kprobes.c b/arch/arm64/kernel/probes/kprobes.c index 076c3c0775a6..5daf3d721cb7 100644 --- a/arch/arm64/kernel/probes/kprobes.c +++ b/arch/arm64/kernel/probes/kprobes.c @@ -395,9 +395,9 @@ static void __kprobes kprobe_handler(struct pt_regs *regs) /* * If we have no pre-handler or it returned 0, we * continue with normal processing. If we have a - * pre-handler and it returned non-zero, it prepped - * for calling the break_handler below on re-entry, - * so get out doing nothing more here. + * pre-handler and it returned non-zero, it will + * modify the execution path and no need to single + * stepping. Let's just reset current kprobe and exit. * * pre_handler can hit a breakpoint and can step thru * before return, keep PSTATE D-flag enabled until @@ -405,8 +405,8 @@ static void __kprobes kprobe_handler(struct pt_regs *regs) */ if (!p->pre_handler || !p->pre_handler(p, regs)) { setup_singlestep(p, regs, kcb, 0); - return; - } + } else + reset_current_kprobe(); } } /* diff --git a/arch/ia64/kernel/kprobes.c b/arch/ia64/kernel/kprobes.c index 74c8524e6309..aa41bd5cf9b7 100644 --- a/arch/ia64/kernel/kprobes.c +++ b/arch/ia64/kernel/kprobes.c @@ -478,12 +478,9 @@ int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs) */ break; } - kretprobe_assert(ri, orig_ret_address, trampoline_address); - reset_current_kprobe(); kretprobe_hash_unlock(current, &flags); - preempt_enable_no_resched(); hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) { hlist_del(&ri->hlist); @@ -851,13 +848,11 @@ static int __kprobes pre_kprobes_handler(struct die_args *args) set_current_kprobe(p, kcb); kcb->kprobe_status = KPROBE_HIT_ACTIVE; - if (p->pre_handler && p->pre_handler(p, regs)) - /* - * Our pre-handler is specifically requesting that we just - * do a return. This is used for both the jprobe pre-handler - * and the kretprobe trampoline - */ + if (p->pre_handler && p->pre_handler(p, regs)) { + reset_current_kprobe(); + preempt_enable_no_resched(); return 1; + } #if !defined(CONFIG_PREEMPT) if (p->ainsn.inst_flag == INST_FLAG_BOOSTABLE && !p->post_handler) { diff --git a/arch/mips/kernel/kprobes.c b/arch/mips/kernel/kprobes.c index 7fd277bc59b9..54cd675c5d1d 100644 --- a/arch/mips/kernel/kprobes.c +++ b/arch/mips/kernel/kprobes.c @@ -358,6 +358,8 @@ static int __kprobes kprobe_handler(struct pt_regs *regs) if (p->pre_handler && p->pre_handler(p, regs)) { /* handler has already set things up, so skip ss setup */ + reset_current_kprobe(); + preempt_enable_no_resched(); return 1; } @@ -543,9 +545,7 @@ static int __kprobes trampoline_probe_handler(struct kprobe *p, kretprobe_assert(ri, orig_ret_address, trampoline_address); instruction_pointer(regs) = orig_ret_address; - reset_current_kprobe(); kretprobe_hash_unlock(current, &flags); - preempt_enable_no_resched(); hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) { hlist_del(&ri->hlist); diff --git a/arch/powerpc/kernel/kprobes-ftrace.c b/arch/powerpc/kernel/kprobes-ftrace.c index 070d1d862444..e4a49c051325 100644 --- a/arch/powerpc/kernel/kprobes-ftrace.c +++ b/arch/powerpc/kernel/kprobes-ftrace.c @@ -32,11 +32,9 @@ void kprobe_ftrace_handler(unsigned long nip, unsigned long parent_nip, struct kprobe *p; struct kprobe_ctlblk *kcb; - preempt_disable(); - p = get_kprobe((kprobe_opcode_t *)nip); if (unlikely(!p) || kprobe_disabled(p)) - goto end; + return; kcb = get_kprobe_ctlblk(); if (kprobe_running()) { @@ -60,18 +58,13 @@ void kprobe_ftrace_handler(unsigned long nip, unsigned long parent_nip, kcb->kprobe_status = KPROBE_HIT_SSDONE; p->post_handler(p, regs, 0); } - __this_cpu_write(current_kprobe, NULL); - } else { - /* - * If pre_handler returns !0, it sets regs->nip and - * resets current kprobe. In this case, we should not - * re-enable preemption. - */ - return; } + /* + * If pre_handler returns !0, it changes regs->nip. We have to + * skip emulating post_handler. + */ + __this_cpu_write(current_kprobe, NULL); } -end: - preempt_enable_no_resched(); } NOKPROBE_SYMBOL(kprobe_ftrace_handler); diff --git a/arch/powerpc/kernel/kprobes.c b/arch/powerpc/kernel/kprobes.c index f06747e2e70d..5c60bb0f927f 100644 --- a/arch/powerpc/kernel/kprobes.c +++ b/arch/powerpc/kernel/kprobes.c @@ -358,9 +358,12 @@ int kprobe_handler(struct pt_regs *regs) kcb->kprobe_status = KPROBE_HIT_ACTIVE; set_current_kprobe(p, regs, kcb); - if (p->pre_handler && p->pre_handler(p, regs)) - /* handler has already set things up, so skip ss setup */ + if (p->pre_handler && p->pre_handler(p, regs)) { + /* handler changed execution path, so skip ss setup */ + reset_current_kprobe(); + preempt_enable_no_resched(); return 1; + } if (p->ainsn.boostable >= 0) { ret = try_to_emulate(p, regs); diff --git a/arch/s390/kernel/kprobes.c b/arch/s390/kernel/kprobes.c index 3e34018960b5..7c0a095e9c5f 100644 --- a/arch/s390/kernel/kprobes.c +++ b/arch/s390/kernel/kprobes.c @@ -326,8 +326,11 @@ static int kprobe_handler(struct pt_regs *regs) */ push_kprobe(kcb, p); kcb->kprobe_status = KPROBE_HIT_ACTIVE; - if (p->pre_handler && p->pre_handler(p, regs)) + if (p->pre_handler && p->pre_handler(p, regs)) { + pop_kprobe(kcb); + preempt_enable_no_resched(); return 1; + } kcb->kprobe_status = KPROBE_HIT_SS; } enable_singlestep(kcb, regs, (unsigned long) p->ainsn.insn); @@ -431,9 +434,7 @@ static int trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs) regs->psw.addr = orig_ret_address; - pop_kprobe(get_kprobe_ctlblk()); kretprobe_hash_unlock(current, &flags); - preempt_enable_no_resched(); hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) { hlist_del(&ri->hlist); diff --git a/arch/sh/kernel/kprobes.c b/arch/sh/kernel/kprobes.c index 4fafe0cd12c6..241e903dd3ee 100644 --- a/arch/sh/kernel/kprobes.c +++ b/arch/sh/kernel/kprobes.c @@ -272,9 +272,12 @@ static int __kprobes kprobe_handler(struct pt_regs *regs) set_current_kprobe(p, regs, kcb); kcb->kprobe_status = KPROBE_HIT_ACTIVE; - if (p->pre_handler && p->pre_handler(p, regs)) + if (p->pre_handler && p->pre_handler(p, regs)) { /* handler has already set things up, so skip ss setup */ + reset_current_kprobe(); + preempt_enable_no_resched(); return 1; + } prepare_singlestep(p, regs); kcb->kprobe_status = KPROBE_HIT_SS; @@ -352,8 +355,6 @@ int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs) regs->pc = orig_ret_address; kretprobe_hash_unlock(current, &flags); - preempt_enable_no_resched(); - hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) { hlist_del(&ri->hlist); kfree(ri); diff --git a/arch/sparc/kernel/kprobes.c b/arch/sparc/kernel/kprobes.c index c684c96ef2e9..dfbca2470536 100644 --- a/arch/sparc/kernel/kprobes.c +++ b/arch/sparc/kernel/kprobes.c @@ -175,8 +175,11 @@ static int __kprobes kprobe_handler(struct pt_regs *regs) set_current_kprobe(p, regs, kcb); kcb->kprobe_status = KPROBE_HIT_ACTIVE; - if (p->pre_handler && p->pre_handler(p, regs)) + if (p->pre_handler && p->pre_handler(p, regs)) { + reset_current_kprobe(); + preempt_enable_no_resched(); return 1; + } prepare_singlestep(p, regs, kcb); kcb->kprobe_status = KPROBE_HIT_SS; @@ -508,9 +511,7 @@ static int __kprobes trampoline_probe_handler(struct kprobe *p, regs->tpc = orig_ret_address; regs->tnpc = orig_ret_address + 4; - reset_current_kprobe(); kretprobe_hash_unlock(current, &flags); - preempt_enable_no_resched(); hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) { hlist_del(&ri->hlist); diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c index 0ac16a0d93e5..814e26b7c8a2 100644 --- a/arch/x86/kernel/kprobes/core.c +++ b/arch/x86/kernel/kprobes/core.c @@ -694,6 +694,10 @@ int kprobe_int3_handler(struct pt_regs *regs) */ if (!p->pre_handler || !p->pre_handler(p, regs)) setup_singlestep(p, regs, kcb, 0); + else { + reset_current_kprobe(); + preempt_enable_no_resched(); + } return 1; } } else if (*addr != BREAKPOINT_INSTRUCTION) { diff --git a/arch/x86/kernel/kprobes/ftrace.c b/arch/x86/kernel/kprobes/ftrace.c index 02a6dd1b6bd0..ef819e19650b 100644 --- a/arch/x86/kernel/kprobes/ftrace.c +++ b/arch/x86/kernel/kprobes/ftrace.c @@ -45,8 +45,6 @@ void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip, /* Kprobe handler expects regs->ip = ip + 1 as breakpoint hit */ regs->ip = ip + sizeof(kprobe_opcode_t); - /* To emulate trap based kprobes, preempt_disable here */ - preempt_disable(); __this_cpu_write(current_kprobe, p); kcb->kprobe_status = KPROBE_HIT_ACTIVE; if (!p->pre_handler || !p->pre_handler(p, regs)) { @@ -60,13 +58,12 @@ void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip, p->post_handler(p, regs, 0); } regs->ip = orig_ip; - __this_cpu_write(current_kprobe, NULL); - preempt_enable_no_resched(); } /* - * If pre_handler returns !0, it sets regs->ip and - * resets current kprobe, and keep preempt count +1. + * If pre_handler returns !0, it changes regs->ip. We have to + * skip emulating post_handler. */ + __this_cpu_write(current_kprobe, NULL); } } NOKPROBE_SYMBOL(kprobe_ftrace_handler); diff --git a/kernel/fail_function.c b/kernel/fail_function.c index 5349c91c2298..bc80a4e268c0 100644 --- a/kernel/fail_function.c +++ b/kernel/fail_function.c @@ -184,9 +184,6 @@ static int fei_kprobe_handler(struct kprobe *kp, struct pt_regs *regs) if (should_fail(&fei_fault_attr, 1)) { regs_set_return_value(regs, attr->retval); override_function_with_return(regs); - /* Kprobe specific fixup */ - reset_current_kprobe(); - preempt_enable_no_resched(); return 1; } diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c index daa81571b22a..7e3b944b6ac1 100644 --- a/kernel/trace/trace_kprobe.c +++ b/kernel/trace/trace_kprobe.c @@ -1217,16 +1217,11 @@ kprobe_perf_func(struct trace_kprobe *tk, struct pt_regs *regs) /* * We need to check and see if we modified the pc of the - * pt_regs, and if so clear the kprobe and return 1 so that we - * don't do the single stepping. - * The ftrace kprobe handler leaves it up to us to re-enable - * preemption here before returning if we've modified the ip. + * pt_regs, and if so return 1 so that we don't do the + * single stepping. */ - if (orig_ip != instruction_pointer(regs)) { - reset_current_kprobe(); - preempt_enable_no_resched(); + if (orig_ip != instruction_pointer(regs)) return 1; - } if (!ret) return 0; } -- cgit From 2bbda764d720aacabaad38374d26fcccb7843f17 Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Wed, 20 Jun 2018 01:16:17 +0900 Subject: kprobes/x86: Do not disable preempt on int3 path Since int3 and debug exception(for singlestep) are run with IRQ disabled and while running single stepping we drop IF from regs->flags, that path must not be preemptible. So we can remove the preempt disable/enable calls from that path. Suggested-by: Ingo Molnar Signed-off-by: Masami Hiramatsu Acked-by: Thomas Gleixner Cc: Ananth N Mavinakayanahalli Cc: Andrew Morton Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Steven Rostedt Cc: linux-arch@vger.kernel.org Cc: linux-doc@vger.kernel.org Link: https://lore.kernel.org/lkml/152942497779.15209.2879580696589868291.stgit@devbox Signed-off-by: Ingo Molnar --- Documentation/kprobes.txt | 11 +++++------ arch/x86/kernel/kprobes/core.c | 18 ++++-------------- arch/x86/kernel/kprobes/opt.c | 1 - 3 files changed, 9 insertions(+), 21 deletions(-) diff --git a/Documentation/kprobes.txt b/Documentation/kprobes.txt index ab2c7307f123..cbb545910634 100644 --- a/Documentation/kprobes.txt +++ b/Documentation/kprobes.txt @@ -566,12 +566,11 @@ the same handler) may run concurrently on different CPUs. Kprobes does not use mutexes or allocate memory except during registration and unregistration. -Probe handlers are run with preemption disabled. Depending on the -architecture and optimization state, handlers may also run with -interrupts disabled (e.g., kretprobe handlers and optimized kprobe -handlers run without interrupt disabled on x86/x86-64). In any case, -your handler should not yield the CPU (e.g., by attempting to acquire -a semaphore). +Probe handlers are run with preemption disabled or interrupt disabled, +which depends on the architecture and optimization state. (e.g., +kretprobe handlers and optimized kprobe handlers run without interrupt +disabled on x86/x86-64). In any case, your handler should not yield +the CPU (e.g., by attempting to acquire a semaphore, or waiting I/O). Since a return probe is implemented by replacing the return address with the trampoline's address, stack backtraces and calls diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c index 814e26b7c8a2..f7104b256de7 100644 --- a/arch/x86/kernel/kprobes/core.c +++ b/arch/x86/kernel/kprobes/core.c @@ -594,7 +594,6 @@ static void setup_singlestep(struct kprobe *p, struct pt_regs *regs, * stepping. */ regs->ip = (unsigned long)p->ainsn.insn; - preempt_enable_no_resched(); return; } #endif @@ -667,12 +666,10 @@ int kprobe_int3_handler(struct pt_regs *regs) addr = (kprobe_opcode_t *)(regs->ip - sizeof(kprobe_opcode_t)); /* - * We don't want to be preempted for the entire - * duration of kprobe processing. We conditionally - * re-enable preemption at the end of this function, - * and also in reenter_kprobe() and setup_singlestep(). + * We don't want to be preempted for the entire duration of kprobe + * processing. Since int3 and debug trap disables irqs and we clear + * IF while singlestepping, it must be no preemptible. */ - preempt_disable(); kcb = get_kprobe_ctlblk(); p = get_kprobe(addr); @@ -694,10 +691,8 @@ int kprobe_int3_handler(struct pt_regs *regs) */ if (!p->pre_handler || !p->pre_handler(p, regs)) setup_singlestep(p, regs, kcb, 0); - else { + else reset_current_kprobe(); - preempt_enable_no_resched(); - } return 1; } } else if (*addr != BREAKPOINT_INSTRUCTION) { @@ -711,11 +706,9 @@ int kprobe_int3_handler(struct pt_regs *regs) * the original instruction. */ regs->ip = (unsigned long)addr; - preempt_enable_no_resched(); return 1; } /* else: not a kprobe fault; let the kernel handle it */ - preempt_enable_no_resched(); return 0; } NOKPROBE_SYMBOL(kprobe_int3_handler); @@ -966,8 +959,6 @@ int kprobe_debug_handler(struct pt_regs *regs) } reset_current_kprobe(); out: - preempt_enable_no_resched(); - /* * if somebody else is singlestepping across a probe point, flags * will have TF set, in which case, continue the remaining processing @@ -1014,7 +1005,6 @@ int kprobe_fault_handler(struct pt_regs *regs, int trapnr) restore_previous_kprobe(kcb); else reset_current_kprobe(); - preempt_enable_no_resched(); } else if (kcb->kprobe_status == KPROBE_HIT_ACTIVE || kcb->kprobe_status == KPROBE_HIT_SSDONE) { /* diff --git a/arch/x86/kernel/kprobes/opt.c b/arch/x86/kernel/kprobes/opt.c index 203d398802a3..eaf02f2e7300 100644 --- a/arch/x86/kernel/kprobes/opt.c +++ b/arch/x86/kernel/kprobes/opt.c @@ -491,7 +491,6 @@ int setup_detour_execution(struct kprobe *p, struct pt_regs *regs, int reenter) regs->ip = (unsigned long)op->optinsn.insn + TMPL_END_IDX; if (!reenter) reset_current_kprobe(); - preempt_enable_no_resched(); return 1; } return 0; -- cgit From dcce32d952eddcd427f648ebd04339cfbf305e23 Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Wed, 20 Jun 2018 01:16:46 +0900 Subject: Documentation/kprobes: Add how to change the execution path Add a section that explaining how to change the execution path with kprobes and warnings for some arch. Signed-off-by: Masami Hiramatsu Acked-by: Thomas Gleixner Cc: Ananth N Mavinakayanahalli Cc: Andrew Morton Cc: Jonathan Corbet Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Steven Rostedt Cc: linux-arch@vger.kernel.org Cc: linux-doc@vger.kernel.org Link: https://lore.kernel.org/lkml/152942500680.15209.12374262914863044775.stgit@devbox Signed-off-by: Ingo Molnar --- Documentation/kprobes.txt | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/Documentation/kprobes.txt b/Documentation/kprobes.txt index cbb545910634..13d8efdb9718 100644 --- a/Documentation/kprobes.txt +++ b/Documentation/kprobes.txt @@ -80,6 +80,26 @@ After the instruction is single-stepped, Kprobes executes the "post_handler," if any, that is associated with the kprobe. Execution then continues with the instruction following the probepoint. +Changing Execution Path +----------------------- + +Since the kprobes can probe into a running kernel code, it can change +the register set, including instruction pointer. This operation +requires maximum attention, such as keeping the stack frame, recovering +execution path etc. Since it is operated on running kernel and need deep +knowladge of the archtecture and concurrent computing, you can easily +shot your foot. + +If you change the instruction pointer (and set up other related +registers) in pre_handler, you must return !0 so that the kprobes +stops single stepping and just returns to given address. +This also means post_handler should not be called anymore. + +Note that this operation may be harder on some architectures which +use TOC (Table of Contents) for function call, since you have to +setup new TOC for your function in your module, and recover old +one after back from it. + Return Probes ------------- -- cgit From 4de58696de076d9bd2745d1cbe0930635c3f5ac9 Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Wed, 20 Jun 2018 01:17:15 +0900 Subject: kprobes: Remove jprobe stub API Remove jprobe stub APIs from linux/kprobes.h since the jprobe implementation was completely gone. Signed-off-by: Masami Hiramatsu Acked-by: Thomas Gleixner Cc: Ananth N Mavinakayanahalli Cc: Andrew Morton Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Steven Rostedt Cc: linux-arch@vger.kernel.org Link: https://lore.kernel.org/lkml/152942503572.15209.1652552217914694917.stgit@devbox Signed-off-by: Ingo Molnar --- include/linux/kprobes.h | 50 ------------------------------------------------- 1 file changed, 50 deletions(-) diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h index b520baa65682..e909413e4e38 100644 --- a/include/linux/kprobes.h +++ b/include/linux/kprobes.h @@ -63,7 +63,6 @@ struct pt_regs; struct kretprobe; struct kretprobe_instance; typedef int (*kprobe_pre_handler_t) (struct kprobe *, struct pt_regs *); -typedef int (*kprobe_break_handler_t) (struct kprobe *, struct pt_regs *); typedef void (*kprobe_post_handler_t) (struct kprobe *, struct pt_regs *, unsigned long flags); typedef int (*kprobe_fault_handler_t) (struct kprobe *, struct pt_regs *, @@ -101,12 +100,6 @@ struct kprobe { */ kprobe_fault_handler_t fault_handler; - /* - * ... called if breakpoint trap occurs in probe handler. - * Return 1 if it handled break, otherwise kernel will see it. - */ - kprobe_break_handler_t break_handler; - /* Saved opcode (which has been replaced with breakpoint) */ kprobe_opcode_t opcode; @@ -154,24 +147,6 @@ static inline int kprobe_ftrace(struct kprobe *p) return p->flags & KPROBE_FLAG_FTRACE; } -/* - * Special probe type that uses setjmp-longjmp type tricks to resume - * execution at a specified entry with a matching prototype corresponding - * to the probed function - a trick to enable arguments to become - * accessible seamlessly by probe handling logic. - * Note: - * Because of the way compilers allocate stack space for local variables - * etc upfront, regardless of sub-scopes within a function, this mirroring - * principle currently works only for probes placed on function entry points. - */ -struct jprobe { - struct kprobe kp; - void *entry; /* probe handling code to jump to */ -}; - -/* For backward compatibility with old code using JPROBE_ENTRY() */ -#define JPROBE_ENTRY(handler) (handler) - /* * Function-return probe - * Note: @@ -436,9 +411,6 @@ static inline void unregister_kprobe(struct kprobe *p) static inline void unregister_kprobes(struct kprobe **kps, int num) { } -static inline void jprobe_return(void) -{ -} static inline int register_kretprobe(struct kretprobe *rp) { return -ENOSYS; @@ -465,20 +437,6 @@ static inline int enable_kprobe(struct kprobe *kp) return -ENOSYS; } #endif /* CONFIG_KPROBES */ -static inline int register_jprobe(struct jprobe *p) -{ - return -ENOSYS; -} -static inline int register_jprobes(struct jprobe **jps, int num) -{ - return -ENOSYS; -} -static inline void unregister_jprobe(struct jprobe *p) -{ -} -static inline void unregister_jprobes(struct jprobe **jps, int num) -{ -} static inline int disable_kretprobe(struct kretprobe *rp) { return disable_kprobe(&rp->kp); @@ -487,14 +445,6 @@ static inline int enable_kretprobe(struct kretprobe *rp) { return enable_kprobe(&rp->kp); } -static inline int disable_jprobe(struct jprobe *jp) -{ - return -ENOSYS; -} -static inline int enable_jprobe(struct jprobe *jp) -{ - return -ENOSYS; -} #ifndef CONFIG_KPROBES static inline bool is_kprobe_insn_slot(unsigned long addr) -- cgit From 0592e57b24e7e05ec1f4c50b9666c013abff7017 Mon Sep 17 00:00:00 2001 From: Kan Liang Date: Tue, 5 Jun 2018 08:38:45 -0700 Subject: perf/x86/intel/lbr: Fix incomplete LBR call stack LBR has a limited stack size. If a task has a deeper call stack than LBR's stack size, only the overflowed part is reported. A complete call stack may not be reconstructed by perf tool. Current code doesn't access all LBR registers. It only read the ones below the TOS. The LBR registers above the TOS will be discarded unconditionally. When a CALL is captured, the TOS is incremented by 1 , modulo max LBR stack size. The LBR HW only records the call stack information to the register which the TOS points to. It will not touch other LBR registers. So the registers above the TOS probably still store the valid call stack information for an overflowed call stack, which need to be reported. To retrieve complete call stack information, we need to start from TOS, read all LBR registers until an invalid entry is detected. 0s can be used to detect the invalid entry, because: - When a RET is captured, the HW zeros the LBR register which TOS points to, then decreases the TOS. - The LBR registers are reset to 0 when adding a new LBR event or scheduling an existing LBR event. - A taken branch at IP 0 is not expected The context switch code is also modified to save/restore all valid LBR registers. Furthermore, the LBR registers, which don't have valid call stack information, need to be reset in restore, because they may be polluted while swapped out. Here is a small test program, tchain_deep. Its call stack is deeper than 32. noinline void f33(void) { int i; for (i = 0; i < 10000000;) { if (i%2) i++; else i++; } } noinline void f32(void) { f33(); } noinline void f31(void) { f32(); } ... ... noinline void f1(void) { f2(); } int main() { f1(); } Here is the test result on SKX. The max stack size of SKX is 32. Without the patch: $ perf record -e cycles --call-graph lbr -- ./tchain_deep $ perf report --stdio # # Children Self Command Shared Object Symbol # ........ ........ ........... ................ ................. # 100.00% 99.99% tchain_deep tchain_deep [.] f33 | --99.99%--f30 f31 f32 f33 With the patch: $ perf record -e cycles --call-graph lbr -- ./tchain_deep $ perf report --stdio # Children Self Command Shared Object Symbol # ........ ........ ........... ................ .................. # 99.99% 0.00% tchain_deep tchain_deep [.] f1 | ---f1 f2 f3 f4 f5 f6 f7 f8 f9 f10 f11 f12 f13 f14 f15 f16 f17 f18 f19 f20 f21 f22 f23 f24 f25 f26 f27 f28 f29 f30 f31 f32 f33 Signed-off-by: Kan Liang Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Peter Zijlstra Cc: Arnaldo Carvalho de Melo Cc: Jiri Olsa Cc: Stephane Eranian Cc: Vince Weaver Cc: Alexander Shishkin Cc: Thomas Gleixner Cc: acme@kernel.org Cc: eranian@google.com Link: https://lore.kernel.org/lkml/1528213126-4312-1-git-send-email-kan.liang@linux.intel.com Signed-off-by: Ingo Molnar --- arch/x86/events/intel/lbr.c | 32 ++++++++++++++++++++++++++------ arch/x86/events/perf_event.h | 1 + 2 files changed, 27 insertions(+), 6 deletions(-) diff --git a/arch/x86/events/intel/lbr.c b/arch/x86/events/intel/lbr.c index cf372b90557e..a4170048a30b 100644 --- a/arch/x86/events/intel/lbr.c +++ b/arch/x86/events/intel/lbr.c @@ -346,7 +346,7 @@ static void __intel_pmu_lbr_restore(struct x86_perf_task_context *task_ctx) mask = x86_pmu.lbr_nr - 1; tos = task_ctx->tos; - for (i = 0; i < tos; i++) { + for (i = 0; i < task_ctx->valid_lbrs; i++) { lbr_idx = (tos - i) & mask; wrlbr_from(lbr_idx, task_ctx->lbr_from[i]); wrlbr_to (lbr_idx, task_ctx->lbr_to[i]); @@ -354,6 +354,15 @@ static void __intel_pmu_lbr_restore(struct x86_perf_task_context *task_ctx) if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_INFO) wrmsrl(MSR_LBR_INFO_0 + lbr_idx, task_ctx->lbr_info[i]); } + + for (; i < x86_pmu.lbr_nr; i++) { + lbr_idx = (tos - i) & mask; + wrlbr_from(lbr_idx, 0); + wrlbr_to(lbr_idx, 0); + if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_INFO) + wrmsrl(MSR_LBR_INFO_0 + lbr_idx, 0); + } + wrmsrl(x86_pmu.lbr_tos, tos); task_ctx->lbr_stack_state = LBR_NONE; } @@ -361,7 +370,7 @@ static void __intel_pmu_lbr_restore(struct x86_perf_task_context *task_ctx) static void __intel_pmu_lbr_save(struct x86_perf_task_context *task_ctx) { unsigned lbr_idx, mask; - u64 tos; + u64 tos, from; int i; if (task_ctx->lbr_callstack_users == 0) { @@ -371,13 +380,17 @@ static void __intel_pmu_lbr_save(struct x86_perf_task_context *task_ctx) mask = x86_pmu.lbr_nr - 1; tos = intel_pmu_lbr_tos(); - for (i = 0; i < tos; i++) { + for (i = 0; i < x86_pmu.lbr_nr; i++) { lbr_idx = (tos - i) & mask; - task_ctx->lbr_from[i] = rdlbr_from(lbr_idx); + from = rdlbr_from(lbr_idx); + if (!from) + break; + task_ctx->lbr_from[i] = from; task_ctx->lbr_to[i] = rdlbr_to(lbr_idx); if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_INFO) rdmsrl(MSR_LBR_INFO_0 + lbr_idx, task_ctx->lbr_info[i]); } + task_ctx->valid_lbrs = i; task_ctx->tos = tos; task_ctx->lbr_stack_state = LBR_VALID; } @@ -531,7 +544,7 @@ static void intel_pmu_lbr_read_32(struct cpu_hw_events *cpuc) */ static void intel_pmu_lbr_read_64(struct cpu_hw_events *cpuc) { - bool need_info = false; + bool need_info = false, call_stack = false; unsigned long mask = x86_pmu.lbr_nr - 1; int lbr_format = x86_pmu.intel_cap.lbr_format; u64 tos = intel_pmu_lbr_tos(); @@ -542,7 +555,7 @@ static void intel_pmu_lbr_read_64(struct cpu_hw_events *cpuc) if (cpuc->lbr_sel) { need_info = !(cpuc->lbr_sel->config & LBR_NO_INFO); if (cpuc->lbr_sel->config & LBR_CALL_STACK) - num = tos; + call_stack = true; } for (i = 0; i < num; i++) { @@ -555,6 +568,13 @@ static void intel_pmu_lbr_read_64(struct cpu_hw_events *cpuc) from = rdlbr_from(lbr_idx); to = rdlbr_to(lbr_idx); + /* + * Read LBR call stack entries + * until invalid entry (0s) is detected. + */ + if (call_stack && !from) + break; + if (lbr_format == LBR_FORMAT_INFO && need_info) { u64 info; diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h index 9f3711470ec1..6b72a92069fd 100644 --- a/arch/x86/events/perf_event.h +++ b/arch/x86/events/perf_event.h @@ -648,6 +648,7 @@ struct x86_perf_task_context { u64 lbr_to[MAX_LBR_ENTRIES]; u64 lbr_info[MAX_LBR_ENTRIES]; int tos; + int valid_lbrs; int lbr_callstack_users; int lbr_stack_state; }; -- cgit From 8b077e4a69bef5c4121426e99497975860191e53 Mon Sep 17 00:00:00 2001 From: Kan Liang Date: Tue, 5 Jun 2018 08:38:46 -0700 Subject: perf/x86/intel/lbr: Optimize context switches for the LBR call stack Context switches with perf LBR call stack context are fairly expensive because they do a lot of MSR writes. Currently we unconditionally do the expensive operation when LBR call stack is enabled. It's not necessary for some common cases, e.g task -> other kernel thread -> same task. The LBR registers are not changed, hence they don't need to be rewritten/restored. Introduce per-CPU variables to track the last LBR call stack context. If the same context is scheduled in, the rewrite/restore is not required, with the following two exceptions: - The LBR registers may be modified by a normal LBR event, i.e., adding a new LBR event or scheduling an existing LBR event. In both cases, the LBR registers are reset first. The last LBR call stack information is cleared in intel_pmu_lbr_reset(). Restoring the LBR registers is required. - The LBR registers are initialized to zero in C6. If the LBR registers which TOS points is cleared, C6 must be entered while swapped out. Restoring the LBR registers is required as well. These exceptions are not common. Signed-off-by: Kan Liang Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: Peter Zijlstra Cc: Arnaldo Carvalho de Melo Cc: Jiri Olsa Cc: Stephane Eranian Cc: Vince Weaver Cc: Alexander Shishkin Cc: acme@kernel.org Cc: eranian@google.com Link: https://lore.kernel.org/lkml/1528213126-4312-2-git-send-email-kan.liang@linux.intel.com Signed-off-by: Ingo Molnar --- arch/x86/events/intel/lbr.c | 24 +++++++++++++++++++++++- arch/x86/events/perf_event.h | 4 ++++ 2 files changed, 27 insertions(+), 1 deletion(-) diff --git a/arch/x86/events/intel/lbr.c b/arch/x86/events/intel/lbr.c index a4170048a30b..f3e006bed9a7 100644 --- a/arch/x86/events/intel/lbr.c +++ b/arch/x86/events/intel/lbr.c @@ -216,6 +216,8 @@ static void intel_pmu_lbr_reset_64(void) void intel_pmu_lbr_reset(void) { + struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); + if (!x86_pmu.lbr_nr) return; @@ -223,6 +225,9 @@ void intel_pmu_lbr_reset(void) intel_pmu_lbr_reset_32(); else intel_pmu_lbr_reset_64(); + + cpuc->last_task_ctx = NULL; + cpuc->last_log_id = 0; } /* @@ -334,6 +339,7 @@ static inline u64 rdlbr_to(unsigned int idx) static void __intel_pmu_lbr_restore(struct x86_perf_task_context *task_ctx) { + struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); int i; unsigned lbr_idx, mask; u64 tos; @@ -344,8 +350,20 @@ static void __intel_pmu_lbr_restore(struct x86_perf_task_context *task_ctx) return; } - mask = x86_pmu.lbr_nr - 1; tos = task_ctx->tos; + /* + * Does not restore the LBR registers, if + * - No one else touched them, and + * - Did not enter C6 + */ + if ((task_ctx == cpuc->last_task_ctx) && + (task_ctx->log_id == cpuc->last_log_id) && + rdlbr_from(tos)) { + task_ctx->lbr_stack_state = LBR_NONE; + return; + } + + mask = x86_pmu.lbr_nr - 1; for (i = 0; i < task_ctx->valid_lbrs; i++) { lbr_idx = (tos - i) & mask; wrlbr_from(lbr_idx, task_ctx->lbr_from[i]); @@ -369,6 +387,7 @@ static void __intel_pmu_lbr_restore(struct x86_perf_task_context *task_ctx) static void __intel_pmu_lbr_save(struct x86_perf_task_context *task_ctx) { + struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); unsigned lbr_idx, mask; u64 tos, from; int i; @@ -393,6 +412,9 @@ static void __intel_pmu_lbr_save(struct x86_perf_task_context *task_ctx) task_ctx->valid_lbrs = i; task_ctx->tos = tos; task_ctx->lbr_stack_state = LBR_VALID; + + cpuc->last_task_ctx = task_ctx; + cpuc->last_log_id = ++task_ctx->log_id; } void intel_pmu_lbr_sched_task(struct perf_event_context *ctx, bool sched_in) diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h index 6b72a92069fd..2430398befd8 100644 --- a/arch/x86/events/perf_event.h +++ b/arch/x86/events/perf_event.h @@ -163,6 +163,7 @@ struct intel_excl_cntrs { unsigned core_id; /* per-core: core id */ }; +struct x86_perf_task_context; #define MAX_LBR_ENTRIES 32 enum { @@ -214,6 +215,8 @@ struct cpu_hw_events { struct perf_branch_entry lbr_entries[MAX_LBR_ENTRIES]; struct er_account *lbr_sel; u64 br_sel; + struct x86_perf_task_context *last_task_ctx; + int last_log_id; /* * Intel host/guest exclude bits @@ -651,6 +654,7 @@ struct x86_perf_task_context { int valid_lbrs; int lbr_callstack_users; int lbr_stack_state; + int log_id; }; #define x86_add_quirk(func_) \ -- cgit From 9e3ed2d7597ccbce2a30e036342fb0613d0759e5 Mon Sep 17 00:00:00 2001 From: Souptick Joarder Date: Mon, 21 May 2018 23:55:20 +0530 Subject: perf/core: Change perf_mmap_fault() return type to 'vm_fault_t' Use new return type 'vm_fault_t' for fault handlers. For now, this is just documenting that the function returns a VM_FAULT value rather than an errno. Once all instances are converted, vm_fault_t will become a distinct type. See the following commit: 1c8f422059ae ("mm: change return type to vm_fault_t") Signed-off-by: Souptick Joarder Reviewed-by: Matthew Wilcox Acked-by: Peter Zijlstra (Intel) Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: acme@kernel.org Cc: akpm@linux-foundation.org Cc: alexander.shishkin@linux.intel.com Cc: jolsa@redhat.com Cc: namhyung@kernel.org Link: https://lkml.kernel.org/lkml/20180521182520.GA19677@jordon-HP-15-Notebook-PC Signed-off-by: Ingo Molnar --- kernel/events/core.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/kernel/events/core.c b/kernel/events/core.c index 80cca2b30c4f..a4d5ac6d74af 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -5273,11 +5273,11 @@ unlock: } EXPORT_SYMBOL_GPL(perf_event_update_userpage); -static int perf_mmap_fault(struct vm_fault *vmf) +static vm_fault_t perf_mmap_fault(struct vm_fault *vmf) { struct perf_event *event = vmf->vma->vm_file->private_data; struct ring_buffer *rb; - int ret = VM_FAULT_SIGBUS; + vm_fault_t ret = VM_FAULT_SIGBUS; if (vmf->flags & FAULT_FLAG_MKWRITE) { if (vmf->pgoff == 0) -- cgit From f2a3ab36077222437b4826fc76111caa14562b7c Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Sat, 28 Apr 2018 21:35:01 +0900 Subject: kprobes: Make list and blacklist root user read only Since the blacklist and list files on debugfs indicates a sensitive address information to reader, it should be restricted to the root user. Suggested-by: Thomas Richter Suggested-by: Ingo Molnar Signed-off-by: Masami Hiramatsu Cc: Ananth N Mavinakayanahalli Cc: Anil S Keshavamurthy Cc: Arnd Bergmann Cc: David Howells Cc: David S . Miller Cc: Heiko Carstens Cc: Jon Medhurst Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: Tobin C . Harding Cc: Will Deacon Cc: acme@kernel.org Cc: akpm@linux-foundation.org Cc: brueckner@linux.vnet.ibm.com Cc: linux-arch@vger.kernel.org Cc: rostedt@goodmis.org Cc: schwidefsky@de.ibm.com Cc: stable@vger.kernel.org Link: https://lkml.kernel.org/lkml/152491890171.9916.5183693615601334087.stgit@devbox Signed-off-by: Ingo Molnar --- kernel/kprobes.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/kernel/kprobes.c b/kernel/kprobes.c index 536ab451e96d..898ee56d4f48 100644 --- a/kernel/kprobes.c +++ b/kernel/kprobes.c @@ -2509,7 +2509,7 @@ static int __init debugfs_kprobe_init(void) if (!dir) return -ENOMEM; - file = debugfs_create_file("list", 0444, dir, NULL, + file = debugfs_create_file("list", 0400, dir, NULL, &debugfs_kprobes_operations); if (!file) goto error; @@ -2519,7 +2519,7 @@ static int __init debugfs_kprobe_init(void) if (!file) goto error; - file = debugfs_create_file("blacklist", 0444, dir, NULL, + file = debugfs_create_file("blacklist", 0400, dir, NULL, &debugfs_kprobe_blacklist_ops); if (!file) goto error; -- cgit From ffb9bd68ebdb3b8d00ef5a79bbe8167a3281cace Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Sat, 28 Apr 2018 21:35:32 +0900 Subject: kprobes: Show blacklist addresses as same as kallsyms does Show kprobes blacklist addresses under same condition of showing kallsyms addresses. Since there are several name conflict for local symbols, kprobe blacklist needs to show each addresses so that user can identify where is on blacklist by comparing with kallsyms. Signed-off-by: Masami Hiramatsu Cc: Ananth N Mavinakayanahalli Cc: Anil S Keshavamurthy Cc: Arnd Bergmann Cc: David Howells Cc: David S . Miller Cc: Heiko Carstens Cc: Jon Medhurst Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: Thomas Richter Cc: Tobin C . Harding Cc: Will Deacon Cc: acme@kernel.org Cc: akpm@linux-foundation.org Cc: brueckner@linux.vnet.ibm.com Cc: linux-arch@vger.kernel.org Cc: rostedt@goodmis.org Cc: schwidefsky@de.ibm.com Cc: stable@vger.kernel.org Link: https://lkml.kernel.org/lkml/152491893217.9916.14760965896164273464.stgit@devbox Signed-off-by: Ingo Molnar --- kernel/kprobes.c | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/kernel/kprobes.c b/kernel/kprobes.c index 898ee56d4f48..ab1bfa3d1d9c 100644 --- a/kernel/kprobes.c +++ b/kernel/kprobes.c @@ -2326,8 +2326,16 @@ static int kprobe_blacklist_seq_show(struct seq_file *m, void *v) struct kprobe_blacklist_entry *ent = list_entry(v, struct kprobe_blacklist_entry, list); - seq_printf(m, "0x%px-0x%px\t%ps\n", (void *)ent->start_addr, - (void *)ent->end_addr, (void *)ent->start_addr); + /* + * If /proc/kallsyms is not showing kernel address, we won't + * show them here either. + */ + if (!kallsyms_show_value()) + seq_printf(m, "0x%px-0x%px\t%ps\n", NULL, NULL, + (void *)ent->start_addr); + else + seq_printf(m, "0x%px-0x%px\t%ps\n", (void *)ent->start_addr, + (void *)ent->end_addr, (void *)ent->start_addr); return 0; } -- cgit From 81365a947de453bcaba2558f8de5dadcffe05bc1 Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Sat, 28 Apr 2018 21:36:02 +0900 Subject: kprobes: Show address of kprobes if kallsyms does Show probed address in debugfs kprobe list file as same as kallsyms does. This information is used for checking kprobes are placed in the expected address. So it should be able to compared with address in kallsyms. Signed-off-by: Masami Hiramatsu Cc: Ananth N Mavinakayanahalli Cc: Anil S Keshavamurthy Cc: Arnd Bergmann Cc: David Howells Cc: David S . Miller Cc: Heiko Carstens Cc: Jon Medhurst Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: Thomas Richter Cc: Tobin C . Harding Cc: Will Deacon Cc: acme@kernel.org Cc: akpm@linux-foundation.org Cc: brueckner@linux.vnet.ibm.com Cc: linux-arch@vger.kernel.org Cc: rostedt@goodmis.org Cc: schwidefsky@de.ibm.com Cc: stable@vger.kernel.org Link: https://lkml.kernel.org/lkml/152491896256.9916.1583733714492565296.stgit@devbox Signed-off-by: Ingo Molnar --- kernel/kprobes.c | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/kernel/kprobes.c b/kernel/kprobes.c index ab1bfa3d1d9c..1d6130d2937a 100644 --- a/kernel/kprobes.c +++ b/kernel/kprobes.c @@ -2226,19 +2226,23 @@ static void report_probe(struct seq_file *pi, struct kprobe *p, const char *sym, int offset, char *modname, struct kprobe *pp) { char *kprobe_type; + void *addr = p->addr; if (p->pre_handler == pre_handler_kretprobe) kprobe_type = "r"; else kprobe_type = "k"; + if (!kallsyms_show_value()) + addr = NULL; + if (sym) - seq_printf(pi, "%p %s %s+0x%x %s ", - p->addr, kprobe_type, sym, offset, + seq_printf(pi, "%px %s %s+0x%x %s ", + addr, kprobe_type, sym, offset, (modname ? modname : " ")); - else - seq_printf(pi, "%p %s %p ", - p->addr, kprobe_type, p->addr); + else /* try to use %pS */ + seq_printf(pi, "%px %s %pS ", + addr, kprobe_type, p->addr); if (!pp) pp = p; -- cgit From 4458515b2c52831ee622411d2fe3e774d1f5c49a Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Sat, 28 Apr 2018 21:36:33 +0900 Subject: kprobes: Replace %p with other pointer types Replace %p with %pS or just remove it if unneeded. And use WARN_ONCE() if it is a single bug. Signed-off-by: Masami Hiramatsu Cc: Ananth N Mavinakayanahalli Cc: Anil S Keshavamurthy Cc: Arnd Bergmann Cc: David Howells Cc: David S . Miller Cc: Heiko Carstens Cc: Jon Medhurst Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: Thomas Richter Cc: Tobin C . Harding Cc: Will Deacon Cc: acme@kernel.org Cc: akpm@linux-foundation.org Cc: brueckner@linux.vnet.ibm.com Cc: linux-arch@vger.kernel.org Cc: rostedt@goodmis.org Cc: schwidefsky@de.ibm.com Cc: stable@vger.kernel.org Link: https://lkml.kernel.org/lkml/152491899284.9916.5350534544808158621.stgit@devbox Signed-off-by: Ingo Molnar --- kernel/kprobes.c | 22 ++++++++++------------ 1 file changed, 10 insertions(+), 12 deletions(-) diff --git a/kernel/kprobes.c b/kernel/kprobes.c index 1d6130d2937a..ab257be4d924 100644 --- a/kernel/kprobes.c +++ b/kernel/kprobes.c @@ -710,9 +710,7 @@ static void reuse_unused_kprobe(struct kprobe *ap) * there is still a relative jump) and disabled. */ op = container_of(ap, struct optimized_kprobe, kp); - if (unlikely(list_empty(&op->list))) - printk(KERN_WARNING "Warning: found a stray unused " - "aggrprobe@%p\n", ap->addr); + WARN_ON_ONCE(list_empty(&op->list)); /* Enable the probe again */ ap->flags &= ~KPROBE_FLAG_DISABLED; /* Optimize it again (remove from op->list) */ @@ -985,7 +983,8 @@ static int arm_kprobe_ftrace(struct kprobe *p) ret = ftrace_set_filter_ip(&kprobe_ftrace_ops, (unsigned long)p->addr, 0, 0); if (ret) { - pr_debug("Failed to arm kprobe-ftrace at %p (%d)\n", p->addr, ret); + pr_debug("Failed to arm kprobe-ftrace at %pS (%d)\n", + p->addr, ret); return ret; } @@ -1025,7 +1024,8 @@ static int disarm_kprobe_ftrace(struct kprobe *p) ret = ftrace_set_filter_ip(&kprobe_ftrace_ops, (unsigned long)p->addr, 1, 0); - WARN(ret < 0, "Failed to disarm kprobe-ftrace at %p (%d)\n", p->addr, ret); + WARN_ONCE(ret < 0, "Failed to disarm kprobe-ftrace at %pS (%d)\n", + p->addr, ret); return ret; } #else /* !CONFIG_KPROBES_ON_FTRACE */ @@ -2069,11 +2069,12 @@ out: } EXPORT_SYMBOL_GPL(enable_kprobe); +/* Caller must NOT call this in usual path. This is only for critical case */ void dump_kprobe(struct kprobe *kp) { - printk(KERN_WARNING "Dumping kprobe:\n"); - printk(KERN_WARNING "Name: %s\nAddress: %p\nOffset: %x\n", - kp->symbol_name, kp->addr, kp->offset); + pr_err("Dumping kprobe:\n"); + pr_err("Name: %s\nOffset: %x\nAddress: %pS\n", + kp->symbol_name, kp->offset, kp->addr); } NOKPROBE_SYMBOL(dump_kprobe); @@ -2096,11 +2097,8 @@ static int __init populate_kprobe_blacklist(unsigned long *start, entry = arch_deref_entry_point((void *)*iter); if (!kernel_text_address(entry) || - !kallsyms_lookup_size_offset(entry, &size, &offset)) { - pr_err("Failed to find blacklist at %p\n", - (void *)entry); + !kallsyms_lookup_size_offset(entry, &size, &offset)) continue; - } ent = kmalloc(sizeof(*ent), GFP_KERNEL); if (!ent) -- cgit From 0ea063306eecf300fcf06d2f5917474b580f666f Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Sat, 28 Apr 2018 21:37:03 +0900 Subject: kprobes/x86: Fix %p uses in error messages Remove all %p uses in error messages in kprobes/x86. Signed-off-by: Masami Hiramatsu Cc: Ananth N Mavinakayanahalli Cc: Anil S Keshavamurthy Cc: Arnd Bergmann Cc: David Howells Cc: David S . Miller Cc: Heiko Carstens Cc: Jon Medhurst Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: Thomas Richter Cc: Tobin C . Harding Cc: Will Deacon Cc: acme@kernel.org Cc: akpm@linux-foundation.org Cc: brueckner@linux.vnet.ibm.com Cc: linux-arch@vger.kernel.org Cc: rostedt@goodmis.org Cc: schwidefsky@de.ibm.com Cc: stable@vger.kernel.org Link: https://lkml.kernel.org/lkml/152491902310.9916.13355297638917767319.stgit@devbox Signed-off-by: Ingo Molnar --- arch/x86/kernel/kprobes/core.c | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c index f7104b256de7..b0d1e81c96bb 100644 --- a/arch/x86/kernel/kprobes/core.c +++ b/arch/x86/kernel/kprobes/core.c @@ -393,8 +393,6 @@ int __copy_instruction(u8 *dest, u8 *src, u8 *real, struct insn *insn) - (u8 *) real; if ((s64) (s32) newdisp != newdisp) { pr_err("Kprobes error: new displacement does not fit into s32 (%llx)\n", newdisp); - pr_err("\tSrc: %p, Dest: %p, old disp: %x\n", - src, real, insn->displacement.value); return 0; } disp = (u8 *) dest + insn_offset_displacement(insn); @@ -637,8 +635,7 @@ static int reenter_kprobe(struct kprobe *p, struct pt_regs *regs, * Raise a BUG or we'll continue in an endless reentering loop * and eventually a stack overflow. */ - printk(KERN_WARNING "Unrecoverable kprobe detected at %p.\n", - p->addr); + pr_err("Unrecoverable kprobe detected.\n"); dump_kprobe(p); BUG(); default: -- cgit From 75b2f5f5911fe7a2fc82969b2b24dde34e8f820d Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Sat, 28 Apr 2018 21:37:33 +0900 Subject: kprobes/arm: Fix %p uses in error messages Fix %p uses in error messages by removing it and using general dumper. Signed-off-by: Masami Hiramatsu Cc: Ananth N Mavinakayanahalli Cc: Anil S Keshavamurthy Cc: Arnd Bergmann Cc: David Howells Cc: David S . Miller Cc: Heiko Carstens Cc: Jon Medhurst Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: Thomas Richter Cc: Tobin C . Harding Cc: Will Deacon Cc: acme@kernel.org Cc: akpm@linux-foundation.org Cc: brueckner@linux.vnet.ibm.com Cc: linux-arch@vger.kernel.org Cc: rostedt@goodmis.org Cc: schwidefsky@de.ibm.com Cc: stable@vger.kernel.org Link: https://lkml.kernel.org/lkml/152491905361.9916.15300852365956231645.stgit@devbox Signed-off-by: Ingo Molnar --- arch/arm/probes/kprobes/core.c | 4 ++-- arch/arm/probes/kprobes/test-core.c | 1 - 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/arch/arm/probes/kprobes/core.c b/arch/arm/probes/kprobes/core.c index 8d37601fdb20..f8bd523d64d1 100644 --- a/arch/arm/probes/kprobes/core.c +++ b/arch/arm/probes/kprobes/core.c @@ -286,8 +286,8 @@ void __kprobes kprobe_handler(struct pt_regs *regs) break; case KPROBE_REENTER: /* A nested probe was hit in FIQ, it is a BUG */ - pr_warn("Unrecoverable kprobe detected at %p.\n", - p->addr); + pr_warn("Unrecoverable kprobe detected.\n"); + dump_kprobe(p); /* fall through */ default: /* impossible cases */ diff --git a/arch/arm/probes/kprobes/test-core.c b/arch/arm/probes/kprobes/test-core.c index 14db14152909..cc237fa9b90f 100644 --- a/arch/arm/probes/kprobes/test-core.c +++ b/arch/arm/probes/kprobes/test-core.c @@ -1461,7 +1461,6 @@ fail: print_registers(&result_regs); if (mem) { - pr_err("current_stack=%p\n", current_stack); pr_err("expected_memory:\n"); print_memory(expected_memory, mem_size); pr_err("result_memory:\n"); -- cgit From 0722867dcbc28cc9b269b57acd847c7c1aa638d6 Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Sat, 28 Apr 2018 21:38:04 +0900 Subject: kprobes/arm64: Fix %p uses in error messages Fix %p uses in error messages by removing it because those are redundant or meaningless. Signed-off-by: Masami Hiramatsu Acked-by: Will Deacon Cc: Ananth N Mavinakayanahalli Cc: Anil S Keshavamurthy Cc: Arnd Bergmann Cc: David Howells Cc: David S . Miller Cc: Heiko Carstens Cc: Jon Medhurst Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: Thomas Richter Cc: Tobin C . Harding Cc: acme@kernel.org Cc: akpm@linux-foundation.org Cc: brueckner@linux.vnet.ibm.com Cc: linux-arch@vger.kernel.org Cc: rostedt@goodmis.org Cc: schwidefsky@de.ibm.com Cc: stable@vger.kernel.org Link: https://lkml.kernel.org/lkml/152491908405.9916.12425053035317241111.stgit@devbox Signed-off-by: Ingo Molnar --- arch/arm64/kernel/probes/kprobes.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/arm64/kernel/probes/kprobes.c b/arch/arm64/kernel/probes/kprobes.c index 5daf3d721cb7..e78c3ef04d95 100644 --- a/arch/arm64/kernel/probes/kprobes.c +++ b/arch/arm64/kernel/probes/kprobes.c @@ -275,7 +275,7 @@ static int __kprobes reenter_kprobe(struct kprobe *p, break; case KPROBE_HIT_SS: case KPROBE_REENTER: - pr_warn("Unrecoverable kprobe detected at %p.\n", p->addr); + pr_warn("Unrecoverable kprobe detected.\n"); dump_kprobe(p); BUG(); break; -- cgit From 01bdee64f9cf8e15f998bf52789ed9d0ebdfa621 Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Fri, 22 Jun 2018 15:07:40 +0900 Subject: kprobes/Documentation: Fix various typos Fix typos and clean up the wording, with the help of Randy Dunlap. Suggested-by: Randy Dunlap Signed-off-by: Masami Hiramatsu Cc: Ananth N Mavinakayanahalli Cc: Andrew Morton Cc: Jonathan Corbet Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Steven Rostedt Cc: Thomas Gleixner Cc: linux-arch@vger.kernel.org Cc: linux-doc@vger.kernel.org Link: http://lkml.kernel.org/r/20180622150740.bd26241032c972d86e23bf73@kernel.org Signed-off-by: Ingo Molnar --- Documentation/kprobes.txt | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/Documentation/kprobes.txt b/Documentation/kprobes.txt index 13d8efdb9718..10f4499e677c 100644 --- a/Documentation/kprobes.txt +++ b/Documentation/kprobes.txt @@ -83,22 +83,22 @@ Execution then continues with the instruction following the probepoint. Changing Execution Path ----------------------- -Since the kprobes can probe into a running kernel code, it can change -the register set, including instruction pointer. This operation -requires maximum attention, such as keeping the stack frame, recovering -execution path etc. Since it is operated on running kernel and need deep -knowladge of the archtecture and concurrent computing, you can easily -shot your foot. +Since kprobes can probe into a running kernel code, it can change the +register set, including instruction pointer. This operation requires +maximum care, such as keeping the stack frame, recovering the execution +path etc. Since it operates on a running kernel and needs deep knowledge +of computer architecture and concurrent computing, you can easily shoot +your foot. If you change the instruction pointer (and set up other related -registers) in pre_handler, you must return !0 so that the kprobes -stops single stepping and just returns to given address. +registers) in pre_handler, you must return !0 so that kprobes stops +single stepping and just returns to the given address. This also means post_handler should not be called anymore. -Note that this operation may be harder on some architectures which -use TOC (Table of Contents) for function call, since you have to -setup new TOC for your function in your module, and recover old -one after back from it. +Note that this operation may be harder on some architectures which use +TOC (Table of Contents) for function call, since you have to setup a new +TOC for your function in your module, and recover the old one after +returning from it. Return Probes ------------- -- cgit From 9a4903dde2c8633c5fcf887b98c4e047a6154a54 Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Tue, 26 Jun 2018 04:58:48 +0200 Subject: perf/hw_breakpoint: Split attribute parse and commit arch_validate_hwbkpt_settings() mixes up attribute check and commit into a single code entity. Therefore the validation may return an error due to incorrect atributes while still leaving halfway modified architecture breakpoint data. This is harmless when we deal with a new breakpoint but it becomes a problem when we modify an existing breakpoint. Split attribute parse and commit to fix that. The architecture is passed a "struct arch_hw_breakpoint" to fill on top of the new attr and the core takes care about copying the backend data once it's fully validated. The architectures then need to implement the new API. Original-patch-by: Andy Lutomirski Reported-by: Linus Torvalds Signed-off-by: Frederic Weisbecker Cc: Alexander Shishkin Cc: Andy Lutomirski Cc: Arnaldo Carvalho de Melo Cc: Arnaldo Carvalho de Melo Cc: Benjamin Herrenschmidt Cc: Catalin Marinas Cc: Chris Zankel Cc: Jiri Olsa Cc: Joel Fernandes Cc: Mark Rutland Cc: Max Filippov Cc: Michael Ellerman Cc: Namhyung Kim Cc: Paul Mackerras Cc: Peter Zijlstra Cc: Rich Felker Cc: Thomas Gleixner Cc: Will Deacon Cc: Yoshinori Sato Link: http://lkml.kernel.org/r/1529981939-8231-2-git-send-email-frederic@kernel.org Signed-off-by: Ingo Molnar --- kernel/events/hw_breakpoint.c | 57 +++++++++++++++++++++++++++++++------------ 1 file changed, 41 insertions(+), 16 deletions(-) diff --git a/kernel/events/hw_breakpoint.c b/kernel/events/hw_breakpoint.c index 6e28d2866be5..314e2a9040c7 100644 --- a/kernel/events/hw_breakpoint.c +++ b/kernel/events/hw_breakpoint.c @@ -400,16 +400,35 @@ int dbg_release_bp_slot(struct perf_event *bp) return 0; } -static int validate_hw_breakpoint(struct perf_event *bp) +#ifndef hw_breakpoint_arch_parse +int hw_breakpoint_arch_parse(struct perf_event *bp, + const struct perf_event_attr *attr, + struct arch_hw_breakpoint *hw) { - int ret; + int err; - ret = arch_validate_hwbkpt_settings(bp); - if (ret) - return ret; + err = arch_validate_hwbkpt_settings(bp); + if (err) + return err; + + *hw = bp->hw.info; + + return 0; +} +#endif + +static int hw_breakpoint_parse(struct perf_event *bp, + const struct perf_event_attr *attr, + struct arch_hw_breakpoint *hw) +{ + int err; + + err = hw_breakpoint_arch_parse(bp, attr, hw); + if (err) + return err; if (arch_check_bp_in_kernelspace(bp)) { - if (bp->attr.exclude_kernel) + if (attr->exclude_kernel) return -EINVAL; /* * Don't let unprivileged users set a breakpoint in the trap @@ -424,19 +443,22 @@ static int validate_hw_breakpoint(struct perf_event *bp) int register_perf_hw_breakpoint(struct perf_event *bp) { - int ret; - - ret = reserve_bp_slot(bp); - if (ret) - return ret; + struct arch_hw_breakpoint hw; + int err; - ret = validate_hw_breakpoint(bp); + err = reserve_bp_slot(bp); + if (err) + return err; - /* if arch_validate_hwbkpt_settings() fails then release bp slot */ - if (ret) + err = hw_breakpoint_parse(bp, &bp->attr, &hw); + if (err) { release_bp_slot(bp); + return err; + } - return ret; + bp->hw.info = hw; + + return 0; } /** @@ -464,6 +486,7 @@ modify_user_hw_breakpoint_check(struct perf_event *bp, struct perf_event_attr *a u64 old_len = bp->attr.bp_len; int old_type = bp->attr.bp_type; bool modify = attr->bp_type != old_type; + struct arch_hw_breakpoint hw; int err = 0; bp->attr.bp_addr = attr->bp_addr; @@ -473,7 +496,7 @@ modify_user_hw_breakpoint_check(struct perf_event *bp, struct perf_event_attr *a if (check && memcmp(&bp->attr, attr, sizeof(*attr))) return -EINVAL; - err = validate_hw_breakpoint(bp); + err = hw_breakpoint_parse(bp, attr, &hw); if (!err && modify) err = modify_bp_slot(bp, old_type); @@ -484,7 +507,9 @@ modify_user_hw_breakpoint_check(struct perf_event *bp, struct perf_event_attr *a return err; } + bp->hw.info = hw; bp->attr.disabled = attr->disabled; + return 0; } -- cgit From 8e983ff9ac02a8fb454ed09c2462bdb3617006a8 Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Tue, 26 Jun 2018 04:58:49 +0200 Subject: perf/hw_breakpoint: Pass arch breakpoint struct to arch_check_bp_in_kernelspace() We can't pass the breakpoint directly on arch_check_bp_in_kernelspace() anymore because its architecture internal datas (struct arch_hw_breakpoint) are not yet filled by the time we call the function, and most implementation need this backend to be up to date. So arrange the function to take the probing struct instead. Signed-off-by: Frederic Weisbecker Cc: Alexander Shishkin Cc: Andy Lutomirski Cc: Arnaldo Carvalho de Melo Cc: Arnaldo Carvalho de Melo Cc: Benjamin Herrenschmidt Cc: Catalin Marinas Cc: Chris Zankel Cc: Jiri Olsa Cc: Joel Fernandes Cc: Linus Torvalds Cc: Mark Rutland Cc: Max Filippov Cc: Michael Ellerman Cc: Namhyung Kim Cc: Paul Mackerras Cc: Peter Zijlstra Cc: Rich Felker Cc: Thomas Gleixner Cc: Will Deacon Cc: Yoshinori Sato Link: http://lkml.kernel.org/r/1529981939-8231-3-git-send-email-frederic@kernel.org Signed-off-by: Ingo Molnar --- arch/arm/include/asm/hw_breakpoint.h | 2 +- arch/arm/kernel/hw_breakpoint.c | 11 +++-- arch/arm64/include/asm/hw_breakpoint.h | 2 +- arch/arm64/kernel/hw_breakpoint.c | 9 ++-- arch/powerpc/include/asm/hw_breakpoint.h | 2 +- arch/powerpc/kernel/hw_breakpoint.c | 6 +-- arch/sh/include/asm/hw_breakpoint.h | 2 +- arch/sh/kernel/hw_breakpoint.c | 9 ++-- arch/x86/include/asm/hw_breakpoint.h | 2 +- arch/x86/kernel/hw_breakpoint.c | 71 +++++++++++++++++--------------- arch/xtensa/include/asm/hw_breakpoint.h | 2 +- arch/xtensa/kernel/hw_breakpoint.c | 7 ++-- kernel/events/hw_breakpoint.c | 2 +- 13 files changed, 63 insertions(+), 64 deletions(-) diff --git a/arch/arm/include/asm/hw_breakpoint.h b/arch/arm/include/asm/hw_breakpoint.h index e46e4e7bdba3..d5a0f52c6755 100644 --- a/arch/arm/include/asm/hw_breakpoint.h +++ b/arch/arm/include/asm/hw_breakpoint.h @@ -117,7 +117,7 @@ struct pmu; extern int arch_bp_generic_fields(struct arch_hw_breakpoint_ctrl ctrl, int *gen_len, int *gen_type); -extern int arch_check_bp_in_kernelspace(struct perf_event *bp); +extern int arch_check_bp_in_kernelspace(struct arch_hw_breakpoint *hw); extern int arch_validate_hwbkpt_settings(struct perf_event *bp); extern int hw_breakpoint_exceptions_notify(struct notifier_block *unused, unsigned long val, void *data); diff --git a/arch/arm/kernel/hw_breakpoint.c b/arch/arm/kernel/hw_breakpoint.c index 629e25152c0d..385dcf45d64b 100644 --- a/arch/arm/kernel/hw_breakpoint.c +++ b/arch/arm/kernel/hw_breakpoint.c @@ -456,14 +456,13 @@ static int get_hbp_len(u8 hbp_len) /* * Check whether bp virtual address is in kernel space. */ -int arch_check_bp_in_kernelspace(struct perf_event *bp) +int arch_check_bp_in_kernelspace(struct arch_hw_breakpoint *hw) { unsigned int len; unsigned long va; - struct arch_hw_breakpoint *info = counter_arch_bp(bp); - va = info->address; - len = get_hbp_len(info->ctrl.len); + va = hw->address; + len = get_hbp_len(hw->ctrl.len); return (va >= TASK_SIZE) && ((va + len - 1) >= TASK_SIZE); } @@ -576,7 +575,7 @@ static int arch_build_bp_info(struct perf_event *bp) /* Privilege */ info->ctrl.privilege = ARM_BREAKPOINT_USER; - if (arch_check_bp_in_kernelspace(bp)) + if (arch_check_bp_in_kernelspace(info)) info->ctrl.privilege |= ARM_BREAKPOINT_PRIV; /* Enabled? */ @@ -640,7 +639,7 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp) return -EINVAL; /* We don't allow mismatch breakpoints in kernel space. */ - if (arch_check_bp_in_kernelspace(bp)) + if (arch_check_bp_in_kernelspace(info)) return -EPERM; /* diff --git a/arch/arm64/include/asm/hw_breakpoint.h b/arch/arm64/include/asm/hw_breakpoint.h index 41770766d964..9f4a3d48762e 100644 --- a/arch/arm64/include/asm/hw_breakpoint.h +++ b/arch/arm64/include/asm/hw_breakpoint.h @@ -124,7 +124,7 @@ struct pmu; extern int arch_bp_generic_fields(struct arch_hw_breakpoint_ctrl ctrl, int *gen_len, int *gen_type, int *offset); -extern int arch_check_bp_in_kernelspace(struct perf_event *bp); +extern int arch_check_bp_in_kernelspace(struct arch_hw_breakpoint *hw); extern int arch_validate_hwbkpt_settings(struct perf_event *bp); extern int hw_breakpoint_exceptions_notify(struct notifier_block *unused, unsigned long val, void *data); diff --git a/arch/arm64/kernel/hw_breakpoint.c b/arch/arm64/kernel/hw_breakpoint.c index 413dbe530da8..6a90d12e4ff2 100644 --- a/arch/arm64/kernel/hw_breakpoint.c +++ b/arch/arm64/kernel/hw_breakpoint.c @@ -343,14 +343,13 @@ static int get_hbp_len(u8 hbp_len) /* * Check whether bp virtual address is in kernel space. */ -int arch_check_bp_in_kernelspace(struct perf_event *bp) +int arch_check_bp_in_kernelspace(struct arch_hw_breakpoint *hw) { unsigned int len; unsigned long va; - struct arch_hw_breakpoint *info = counter_arch_bp(bp); - va = info->address; - len = get_hbp_len(info->ctrl.len); + va = hw->address; + len = get_hbp_len(hw->ctrl.len); return (va >= TASK_SIZE) && ((va + len - 1) >= TASK_SIZE); } @@ -502,7 +501,7 @@ static int arch_build_bp_info(struct perf_event *bp) * Note that we disallow combined EL0/EL1 breakpoints because * that would complicate the stepping code. */ - if (arch_check_bp_in_kernelspace(bp)) + if (arch_check_bp_in_kernelspace(info)) info->ctrl.privilege = AARCH64_BREAKPOINT_EL1; else info->ctrl.privilege = AARCH64_BREAKPOINT_EL0; diff --git a/arch/powerpc/include/asm/hw_breakpoint.h b/arch/powerpc/include/asm/hw_breakpoint.h index 8e7b09703ca4..4d0b1bfcdfd0 100644 --- a/arch/powerpc/include/asm/hw_breakpoint.h +++ b/arch/powerpc/include/asm/hw_breakpoint.h @@ -60,7 +60,7 @@ struct perf_sample_data; extern int hw_breakpoint_slots(int type); extern int arch_bp_generic_fields(int type, int *gen_bp_type); -extern int arch_check_bp_in_kernelspace(struct perf_event *bp); +extern int arch_check_bp_in_kernelspace(struct arch_hw_breakpoint *hw); extern int arch_validate_hwbkpt_settings(struct perf_event *bp); extern int hw_breakpoint_exceptions_notify(struct notifier_block *unused, unsigned long val, void *data); diff --git a/arch/powerpc/kernel/hw_breakpoint.c b/arch/powerpc/kernel/hw_breakpoint.c index 80547dad37da..899bcec3f3c8 100644 --- a/arch/powerpc/kernel/hw_breakpoint.c +++ b/arch/powerpc/kernel/hw_breakpoint.c @@ -119,11 +119,9 @@ void arch_unregister_hw_breakpoint(struct perf_event *bp) /* * Check for virtual address in kernel space. */ -int arch_check_bp_in_kernelspace(struct perf_event *bp) +int arch_check_bp_in_kernelspace(struct arch_hw_breakpoint *hw) { - struct arch_hw_breakpoint *info = counter_arch_bp(bp); - - return is_kernel_addr(info->address); + return is_kernel_addr(hw->address); } int arch_bp_generic_fields(int type, int *gen_bp_type) diff --git a/arch/sh/include/asm/hw_breakpoint.h b/arch/sh/include/asm/hw_breakpoint.h index 7431c172c0cb..8a88ed0b3cd7 100644 --- a/arch/sh/include/asm/hw_breakpoint.h +++ b/arch/sh/include/asm/hw_breakpoint.h @@ -54,7 +54,7 @@ static inline int hw_breakpoint_slots(int type) } /* arch/sh/kernel/hw_breakpoint.c */ -extern int arch_check_bp_in_kernelspace(struct perf_event *bp); +extern int arch_check_bp_in_kernelspace(struct arch_hw_breakpoint *hw); extern int arch_validate_hwbkpt_settings(struct perf_event *bp); extern int hw_breakpoint_exceptions_notify(struct notifier_block *unused, unsigned long val, void *data); diff --git a/arch/sh/kernel/hw_breakpoint.c b/arch/sh/kernel/hw_breakpoint.c index 8648ed05ccf0..38791fefdd0c 100644 --- a/arch/sh/kernel/hw_breakpoint.c +++ b/arch/sh/kernel/hw_breakpoint.c @@ -124,14 +124,13 @@ static int get_hbp_len(u16 hbp_len) /* * Check for virtual address in kernel space. */ -int arch_check_bp_in_kernelspace(struct perf_event *bp) +int arch_check_bp_in_kernelspace(struct arch_hw_breakpoint *hw) { unsigned int len; unsigned long va; - struct arch_hw_breakpoint *info = counter_arch_bp(bp); - va = info->address; - len = get_hbp_len(info->len); + va = hw->address; + len = get_hbp_len(hw->len); return (va >= TASK_SIZE) && ((va + len - 1) >= TASK_SIZE); } @@ -346,7 +345,7 @@ static int __kprobes hw_breakpoint_handler(struct die_args *args) perf_bp_event(bp, args->regs); /* Deliver the signal to userspace */ - if (!arch_check_bp_in_kernelspace(bp)) { + if (!arch_check_bp_in_kernelspace(&bp->hw.info)) { force_sig_fault(SIGTRAP, TRAP_HWBKPT, (void __user *)NULL, current); } diff --git a/arch/x86/include/asm/hw_breakpoint.h b/arch/x86/include/asm/hw_breakpoint.h index f59c39835a5a..78924596f51f 100644 --- a/arch/x86/include/asm/hw_breakpoint.h +++ b/arch/x86/include/asm/hw_breakpoint.h @@ -52,7 +52,7 @@ static inline int hw_breakpoint_slots(int type) struct perf_event; struct pmu; -extern int arch_check_bp_in_kernelspace(struct perf_event *bp); +extern int arch_check_bp_in_kernelspace(struct arch_hw_breakpoint *hw); extern int arch_validate_hwbkpt_settings(struct perf_event *bp); extern int hw_breakpoint_exceptions_notify(struct notifier_block *unused, unsigned long val, void *data); diff --git a/arch/x86/kernel/hw_breakpoint.c b/arch/x86/kernel/hw_breakpoint.c index 8771766d46b6..c43379188f4f 100644 --- a/arch/x86/kernel/hw_breakpoint.c +++ b/arch/x86/kernel/hw_breakpoint.c @@ -169,28 +169,29 @@ void arch_uninstall_hw_breakpoint(struct perf_event *bp) set_dr_addr_mask(0, i); } -/* - * Check for virtual address in kernel space. - */ -int arch_check_bp_in_kernelspace(struct perf_event *bp) +static int arch_bp_generic_len(int x86_len) { - unsigned int len; - unsigned long va; - struct arch_hw_breakpoint *info = counter_arch_bp(bp); - - va = info->address; - len = bp->attr.bp_len; - - /* - * We don't need to worry about va + len - 1 overflowing: - * we already require that va is aligned to a multiple of len. - */ - return (va >= TASK_SIZE_MAX) || ((va + len - 1) >= TASK_SIZE_MAX); + switch (x86_len) { + case X86_BREAKPOINT_LEN_1: + return HW_BREAKPOINT_LEN_1; + case X86_BREAKPOINT_LEN_2: + return HW_BREAKPOINT_LEN_2; + case X86_BREAKPOINT_LEN_4: + return HW_BREAKPOINT_LEN_4; +#ifdef CONFIG_X86_64 + case X86_BREAKPOINT_LEN_8: + return HW_BREAKPOINT_LEN_8; +#endif + default: + return -EINVAL; + } } int arch_bp_generic_fields(int x86_len, int x86_type, int *gen_len, int *gen_type) { + int len; + /* Type */ switch (x86_type) { case X86_BREAKPOINT_EXECUTE: @@ -211,28 +212,32 @@ int arch_bp_generic_fields(int x86_len, int x86_type, } /* Len */ - switch (x86_len) { - case X86_BREAKPOINT_LEN_1: - *gen_len = HW_BREAKPOINT_LEN_1; - break; - case X86_BREAKPOINT_LEN_2: - *gen_len = HW_BREAKPOINT_LEN_2; - break; - case X86_BREAKPOINT_LEN_4: - *gen_len = HW_BREAKPOINT_LEN_4; - break; -#ifdef CONFIG_X86_64 - case X86_BREAKPOINT_LEN_8: - *gen_len = HW_BREAKPOINT_LEN_8; - break; -#endif - default: + len = arch_bp_generic_len(x86_len); + if (len < 0) return -EINVAL; - } + *gen_len = len; return 0; } +/* + * Check for virtual address in kernel space. + */ +int arch_check_bp_in_kernelspace(struct arch_hw_breakpoint *hw) +{ + unsigned long va; + int len; + + va = hw->address; + len = arch_bp_generic_len(hw->len); + WARN_ON_ONCE(len < 0); + + /* + * We don't need to worry about va + len - 1 overflowing: + * we already require that va is aligned to a multiple of len. + */ + return (va >= TASK_SIZE_MAX) || ((va + len - 1) >= TASK_SIZE_MAX); +} static int arch_build_bp_info(struct perf_event *bp) { diff --git a/arch/xtensa/include/asm/hw_breakpoint.h b/arch/xtensa/include/asm/hw_breakpoint.h index dbe3053b284a..2525bf6816a6 100644 --- a/arch/xtensa/include/asm/hw_breakpoint.h +++ b/arch/xtensa/include/asm/hw_breakpoint.h @@ -35,7 +35,7 @@ struct pt_regs; struct task_struct; int hw_breakpoint_slots(int type); -int arch_check_bp_in_kernelspace(struct perf_event *bp); +int arch_check_bp_in_kernelspace(struct arch_hw_breakpoint *hw); int arch_validate_hwbkpt_settings(struct perf_event *bp); int hw_breakpoint_exceptions_notify(struct notifier_block *unused, unsigned long val, void *data); diff --git a/arch/xtensa/kernel/hw_breakpoint.c b/arch/xtensa/kernel/hw_breakpoint.c index b35656ab7dbd..6e34c3848885 100644 --- a/arch/xtensa/kernel/hw_breakpoint.c +++ b/arch/xtensa/kernel/hw_breakpoint.c @@ -33,14 +33,13 @@ int hw_breakpoint_slots(int type) } } -int arch_check_bp_in_kernelspace(struct perf_event *bp) +int arch_check_bp_in_kernelspace(struct arch_hw_breakpoint *hw) { unsigned int len; unsigned long va; - struct arch_hw_breakpoint *info = counter_arch_bp(bp); - va = info->address; - len = bp->attr.bp_len; + va = hw->address; + len = hw->len; return (va >= TASK_SIZE) && ((va + len - 1) >= TASK_SIZE); } diff --git a/kernel/events/hw_breakpoint.c b/kernel/events/hw_breakpoint.c index 314e2a9040c7..89fe94c9214c 100644 --- a/kernel/events/hw_breakpoint.c +++ b/kernel/events/hw_breakpoint.c @@ -427,7 +427,7 @@ static int hw_breakpoint_parse(struct perf_event *bp, if (err) return err; - if (arch_check_bp_in_kernelspace(bp)) { + if (arch_check_bp_in_kernelspace(hw)) { if (attr->exclude_kernel) return -EINVAL; /* -- cgit From a0baf043c5cfa3a489a63ac50f5201c31a651e21 Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Tue, 26 Jun 2018 04:58:50 +0200 Subject: perf/arch/x86: Implement hw_breakpoint_arch_parse() Migrate to the new API in order to remove arch_validate_hwbkpt_settings() that clumsily mixes up architecture validation and commit. Original-patch-by: Andy Lutomirski Signed-off-by: Frederic Weisbecker Cc: Alexander Shishkin Cc: Andy Lutomirski Cc: Arnaldo Carvalho de Melo Cc: Arnaldo Carvalho de Melo Cc: Benjamin Herrenschmidt Cc: Catalin Marinas Cc: Chris Zankel Cc: Jiri Olsa Cc: Joel Fernandes Cc: Linus Torvalds Cc: Mark Rutland Cc: Max Filippov Cc: Michael Ellerman Cc: Namhyung Kim Cc: Paul Mackerras Cc: Peter Zijlstra Cc: Rich Felker Cc: Thomas Gleixner Cc: Will Deacon Cc: Yoshinori Sato Link: http://lkml.kernel.org/r/1529981939-8231-4-git-send-email-frederic@kernel.org Signed-off-by: Ingo Molnar --- arch/x86/include/asm/hw_breakpoint.h | 6 +++- arch/x86/kernel/hw_breakpoint.c | 60 ++++++++++++++++++------------------ 2 files changed, 35 insertions(+), 31 deletions(-) diff --git a/arch/x86/include/asm/hw_breakpoint.h b/arch/x86/include/asm/hw_breakpoint.h index 78924596f51f..6c88e8e22678 100644 --- a/arch/x86/include/asm/hw_breakpoint.h +++ b/arch/x86/include/asm/hw_breakpoint.h @@ -49,11 +49,15 @@ static inline int hw_breakpoint_slots(int type) return HBP_NUM; } +struct perf_event_attr; struct perf_event; struct pmu; extern int arch_check_bp_in_kernelspace(struct arch_hw_breakpoint *hw); -extern int arch_validate_hwbkpt_settings(struct perf_event *bp); +extern int hw_breakpoint_arch_parse(struct perf_event *bp, + const struct perf_event_attr *attr, + struct arch_hw_breakpoint *hw); +#define hw_breakpoint_arch_parse hw_breakpoint_arch_parse extern int hw_breakpoint_exceptions_notify(struct notifier_block *unused, unsigned long val, void *data); diff --git a/arch/x86/kernel/hw_breakpoint.c b/arch/x86/kernel/hw_breakpoint.c index c43379188f4f..34a5c1715148 100644 --- a/arch/x86/kernel/hw_breakpoint.c +++ b/arch/x86/kernel/hw_breakpoint.c @@ -239,19 +239,20 @@ int arch_check_bp_in_kernelspace(struct arch_hw_breakpoint *hw) return (va >= TASK_SIZE_MAX) || ((va + len - 1) >= TASK_SIZE_MAX); } -static int arch_build_bp_info(struct perf_event *bp) +static int arch_build_bp_info(struct perf_event *bp, + const struct perf_event_attr *attr, + struct arch_hw_breakpoint *hw) { - struct arch_hw_breakpoint *info = counter_arch_bp(bp); - - info->address = bp->attr.bp_addr; + hw->address = attr->bp_addr; + hw->mask = 0; /* Type */ - switch (bp->attr.bp_type) { + switch (attr->bp_type) { case HW_BREAKPOINT_W: - info->type = X86_BREAKPOINT_WRITE; + hw->type = X86_BREAKPOINT_WRITE; break; case HW_BREAKPOINT_W | HW_BREAKPOINT_R: - info->type = X86_BREAKPOINT_RW; + hw->type = X86_BREAKPOINT_RW; break; case HW_BREAKPOINT_X: /* @@ -259,23 +260,23 @@ static int arch_build_bp_info(struct perf_event *bp) * acceptable for kprobes. On non-kprobes kernels, we don't * allow kernel breakpoints at all. */ - if (bp->attr.bp_addr >= TASK_SIZE_MAX) { + if (attr->bp_addr >= TASK_SIZE_MAX) { #ifdef CONFIG_KPROBES - if (within_kprobe_blacklist(bp->attr.bp_addr)) + if (within_kprobe_blacklist(attr->bp_addr)) return -EINVAL; #else return -EINVAL; #endif } - info->type = X86_BREAKPOINT_EXECUTE; + hw->type = X86_BREAKPOINT_EXECUTE; /* * x86 inst breakpoints need to have a specific undefined len. * But we still need to check userspace is not trying to setup * an unsupported length, to get a range breakpoint for example. */ - if (bp->attr.bp_len == sizeof(long)) { - info->len = X86_BREAKPOINT_LEN_X; + if (attr->bp_len == sizeof(long)) { + hw->len = X86_BREAKPOINT_LEN_X; return 0; } default: @@ -283,28 +284,26 @@ static int arch_build_bp_info(struct perf_event *bp) } /* Len */ - info->mask = 0; - - switch (bp->attr.bp_len) { + switch (attr->bp_len) { case HW_BREAKPOINT_LEN_1: - info->len = X86_BREAKPOINT_LEN_1; + hw->len = X86_BREAKPOINT_LEN_1; break; case HW_BREAKPOINT_LEN_2: - info->len = X86_BREAKPOINT_LEN_2; + hw->len = X86_BREAKPOINT_LEN_2; break; case HW_BREAKPOINT_LEN_4: - info->len = X86_BREAKPOINT_LEN_4; + hw->len = X86_BREAKPOINT_LEN_4; break; #ifdef CONFIG_X86_64 case HW_BREAKPOINT_LEN_8: - info->len = X86_BREAKPOINT_LEN_8; + hw->len = X86_BREAKPOINT_LEN_8; break; #endif default: /* AMD range breakpoint */ - if (!is_power_of_2(bp->attr.bp_len)) + if (!is_power_of_2(attr->bp_len)) return -EINVAL; - if (bp->attr.bp_addr & (bp->attr.bp_len - 1)) + if (attr->bp_addr & (attr->bp_len - 1)) return -EINVAL; if (!boot_cpu_has(X86_FEATURE_BPEXT)) @@ -317,8 +316,8 @@ static int arch_build_bp_info(struct perf_event *bp) * breakpoints, then we'll have to check for kprobe-blacklisted * addresses anywhere in the range. */ - info->mask = bp->attr.bp_len - 1; - info->len = X86_BREAKPOINT_LEN_1; + hw->mask = attr->bp_len - 1; + hw->len = X86_BREAKPOINT_LEN_1; } return 0; @@ -327,22 +326,23 @@ static int arch_build_bp_info(struct perf_event *bp) /* * Validate the arch-specific HW Breakpoint register settings */ -int arch_validate_hwbkpt_settings(struct perf_event *bp) +int hw_breakpoint_arch_parse(struct perf_event *bp, + const struct perf_event_attr *attr, + struct arch_hw_breakpoint *hw) { - struct arch_hw_breakpoint *info = counter_arch_bp(bp); unsigned int align; int ret; - ret = arch_build_bp_info(bp); + ret = arch_build_bp_info(bp, attr, hw); if (ret) return ret; - switch (info->len) { + switch (hw->len) { case X86_BREAKPOINT_LEN_1: align = 0; - if (info->mask) - align = info->mask; + if (hw->mask) + align = hw->mask; break; case X86_BREAKPOINT_LEN_2: align = 1; @@ -363,7 +363,7 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp) * Check that the low-order bits of the address are appropriate * for the alignment implied by len. */ - if (info->address & align) + if (hw->address & align) return -EINVAL; return 0; -- cgit From 5d5176baed7abf88bb465a128d414fa8748dcab7 Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Tue, 26 Jun 2018 04:58:51 +0200 Subject: perf/arch/powerpc: Implement hw_breakpoint_arch_parse() Migrate to the new API in order to remove arch_validate_hwbkpt_settings() that clumsily mixes up architecture validation and commit Signed-off-by: Frederic Weisbecker Acked-by: Michael Ellerman Cc: Alexander Shishkin Cc: Andy Lutomirski Cc: Arnaldo Carvalho de Melo Cc: Arnaldo Carvalho de Melo Cc: Benjamin Herrenschmidt Cc: Catalin Marinas Cc: Chris Zankel Cc: Jiri Olsa Cc: Joel Fernandes Cc: Linus Torvalds Cc: Mark Rutland Cc: Max Filippov Cc: Namhyung Kim Cc: Paul Mackerras Cc: Peter Zijlstra Cc: Rich Felker Cc: Thomas Gleixner Cc: Will Deacon Cc: Yoshinori Sato Link: http://lkml.kernel.org/r/1529981939-8231-5-git-send-email-frederic@kernel.org Signed-off-by: Ingo Molnar --- arch/powerpc/include/asm/hw_breakpoint.h | 6 ++++- arch/powerpc/kernel/hw_breakpoint.c | 41 ++++++++++++++++---------------- 2 files changed, 26 insertions(+), 21 deletions(-) diff --git a/arch/powerpc/include/asm/hw_breakpoint.h b/arch/powerpc/include/asm/hw_breakpoint.h index 4d0b1bfcdfd0..38ae180610bd 100644 --- a/arch/powerpc/include/asm/hw_breakpoint.h +++ b/arch/powerpc/include/asm/hw_breakpoint.h @@ -52,6 +52,7 @@ struct arch_hw_breakpoint { #include #include +struct perf_event_attr; struct perf_event; struct pmu; struct perf_sample_data; @@ -61,7 +62,10 @@ struct perf_sample_data; extern int hw_breakpoint_slots(int type); extern int arch_bp_generic_fields(int type, int *gen_bp_type); extern int arch_check_bp_in_kernelspace(struct arch_hw_breakpoint *hw); -extern int arch_validate_hwbkpt_settings(struct perf_event *bp); +extern int hw_breakpoint_arch_parse(struct perf_event *bp, + const struct perf_event_attr *attr, + struct arch_hw_breakpoint *hw); +#define hw_breakpoint_arch_parse hw_breakpoint_arch_parse extern int hw_breakpoint_exceptions_notify(struct notifier_block *unused, unsigned long val, void *data); int arch_install_hw_breakpoint(struct perf_event *bp); diff --git a/arch/powerpc/kernel/hw_breakpoint.c b/arch/powerpc/kernel/hw_breakpoint.c index 899bcec3f3c8..fec8a6773119 100644 --- a/arch/powerpc/kernel/hw_breakpoint.c +++ b/arch/powerpc/kernel/hw_breakpoint.c @@ -139,30 +139,31 @@ int arch_bp_generic_fields(int type, int *gen_bp_type) /* * Validate the arch-specific HW Breakpoint register settings */ -int arch_validate_hwbkpt_settings(struct perf_event *bp) +int hw_breakpoint_arch_parse(struct perf_event *bp, + const struct perf_event_attr *attr, + struct arch_hw_breakpoint *hw) { int ret = -EINVAL, length_max; - struct arch_hw_breakpoint *info = counter_arch_bp(bp); if (!bp) return ret; - info->type = HW_BRK_TYPE_TRANSLATE; - if (bp->attr.bp_type & HW_BREAKPOINT_R) - info->type |= HW_BRK_TYPE_READ; - if (bp->attr.bp_type & HW_BREAKPOINT_W) - info->type |= HW_BRK_TYPE_WRITE; - if (info->type == HW_BRK_TYPE_TRANSLATE) + hw->type = HW_BRK_TYPE_TRANSLATE; + if (attr->bp_type & HW_BREAKPOINT_R) + hw->type |= HW_BRK_TYPE_READ; + if (attr->bp_type & HW_BREAKPOINT_W) + hw->type |= HW_BRK_TYPE_WRITE; + if (hw->type == HW_BRK_TYPE_TRANSLATE) /* must set alteast read or write */ return ret; - if (!(bp->attr.exclude_user)) - info->type |= HW_BRK_TYPE_USER; - if (!(bp->attr.exclude_kernel)) - info->type |= HW_BRK_TYPE_KERNEL; - if (!(bp->attr.exclude_hv)) - info->type |= HW_BRK_TYPE_HYP; - info->address = bp->attr.bp_addr; - info->len = bp->attr.bp_len; + if (!attr->exclude_user) + hw->type |= HW_BRK_TYPE_USER; + if (!attr->exclude_kernel) + hw->type |= HW_BRK_TYPE_KERNEL; + if (!attr->exclude_hv) + hw->type |= HW_BRK_TYPE_HYP; + hw->address = attr->bp_addr; + hw->len = attr->bp_len; /* * Since breakpoint length can be a maximum of HW_BREAKPOINT_LEN(8) @@ -176,12 +177,12 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp) if (cpu_has_feature(CPU_FTR_DAWR)) { length_max = 512 ; /* 64 doublewords */ /* DAWR region can't cross 512 boundary */ - if ((bp->attr.bp_addr >> 9) != - ((bp->attr.bp_addr + bp->attr.bp_len - 1) >> 9)) + if ((attr->bp_addr >> 9) != + ((attr->bp_addr + attr->bp_len - 1) >> 9)) return -EINVAL; } - if (info->len > - (length_max - (info->address & HW_BREAKPOINT_ALIGN))) + if (hw->len > + (length_max - (hw->address & HW_BREAKPOINT_ALIGN))) return -EINVAL; return 0; } -- cgit From 9d52718cd392a94414f0ce780d9bae3ed518ab34 Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Tue, 26 Jun 2018 04:58:52 +0200 Subject: perf/arch/arm: Implement hw_breakpoint_arch_parse() Migrate to the new API in order to remove arch_validate_hwbkpt_settings() that clumsily mixes up architecture validation and commit. Signed-off-by: Frederic Weisbecker Acked-by: Mark Rutland Cc: Alexander Shishkin Cc: Andy Lutomirski Cc: Arnaldo Carvalho de Melo Cc: Arnaldo Carvalho de Melo Cc: Benjamin Herrenschmidt Cc: Catalin Marinas Cc: Chris Zankel Cc: Jiri Olsa Cc: Joel Fernandes Cc: Linus Torvalds Cc: Max Filippov Cc: Michael Ellerman Cc: Namhyung Kim Cc: Paul Mackerras Cc: Peter Zijlstra Cc: Rich Felker Cc: Thomas Gleixner Cc: Will Deacon Cc: Yoshinori Sato Link: http://lkml.kernel.org/r/1529981939-8231-6-git-send-email-frederic@kernel.org Signed-off-by: Ingo Molnar --- arch/arm/include/asm/hw_breakpoint.h | 6 ++- arch/arm/kernel/hw_breakpoint.c | 71 ++++++++++++++++++------------------ 2 files changed, 41 insertions(+), 36 deletions(-) diff --git a/arch/arm/include/asm/hw_breakpoint.h b/arch/arm/include/asm/hw_breakpoint.h index d5a0f52c6755..1e02925c11f4 100644 --- a/arch/arm/include/asm/hw_breakpoint.h +++ b/arch/arm/include/asm/hw_breakpoint.h @@ -111,6 +111,7 @@ static inline void decode_ctrl_reg(u32 reg, asm volatile("mcr p14, 0, %0, " #N "," #M ", " #OP2 : : "r" (VAL));\ } while (0) +struct perf_event_attr; struct notifier_block; struct perf_event; struct pmu; @@ -118,7 +119,10 @@ struct pmu; extern int arch_bp_generic_fields(struct arch_hw_breakpoint_ctrl ctrl, int *gen_len, int *gen_type); extern int arch_check_bp_in_kernelspace(struct arch_hw_breakpoint *hw); -extern int arch_validate_hwbkpt_settings(struct perf_event *bp); +extern int hw_breakpoint_arch_parse(struct perf_event *bp, + const struct perf_event_attr *attr, + struct arch_hw_breakpoint *hw); +#define hw_breakpoint_arch_parse hw_breakpoint_arch_parse extern int hw_breakpoint_exceptions_notify(struct notifier_block *unused, unsigned long val, void *data); diff --git a/arch/arm/kernel/hw_breakpoint.c b/arch/arm/kernel/hw_breakpoint.c index 385dcf45d64b..1d5fbf1d1c67 100644 --- a/arch/arm/kernel/hw_breakpoint.c +++ b/arch/arm/kernel/hw_breakpoint.c @@ -517,42 +517,42 @@ int arch_bp_generic_fields(struct arch_hw_breakpoint_ctrl ctrl, /* * Construct an arch_hw_breakpoint from a perf_event. */ -static int arch_build_bp_info(struct perf_event *bp) +static int arch_build_bp_info(struct perf_event *bp, + const struct perf_event_attr *attr, + struct arch_hw_breakpoint *hw) { - struct arch_hw_breakpoint *info = counter_arch_bp(bp); - /* Type */ - switch (bp->attr.bp_type) { + switch (attr->bp_type) { case HW_BREAKPOINT_X: - info->ctrl.type = ARM_BREAKPOINT_EXECUTE; + hw->ctrl.type = ARM_BREAKPOINT_EXECUTE; break; case HW_BREAKPOINT_R: - info->ctrl.type = ARM_BREAKPOINT_LOAD; + hw->ctrl.type = ARM_BREAKPOINT_LOAD; break; case HW_BREAKPOINT_W: - info->ctrl.type = ARM_BREAKPOINT_STORE; + hw->ctrl.type = ARM_BREAKPOINT_STORE; break; case HW_BREAKPOINT_RW: - info->ctrl.type = ARM_BREAKPOINT_LOAD | ARM_BREAKPOINT_STORE; + hw->ctrl.type = ARM_BREAKPOINT_LOAD | ARM_BREAKPOINT_STORE; break; default: return -EINVAL; } /* Len */ - switch (bp->attr.bp_len) { + switch (attr->bp_len) { case HW_BREAKPOINT_LEN_1: - info->ctrl.len = ARM_BREAKPOINT_LEN_1; + hw->ctrl.len = ARM_BREAKPOINT_LEN_1; break; case HW_BREAKPOINT_LEN_2: - info->ctrl.len = ARM_BREAKPOINT_LEN_2; + hw->ctrl.len = ARM_BREAKPOINT_LEN_2; break; case HW_BREAKPOINT_LEN_4: - info->ctrl.len = ARM_BREAKPOINT_LEN_4; + hw->ctrl.len = ARM_BREAKPOINT_LEN_4; break; case HW_BREAKPOINT_LEN_8: - info->ctrl.len = ARM_BREAKPOINT_LEN_8; - if ((info->ctrl.type != ARM_BREAKPOINT_EXECUTE) + hw->ctrl.len = ARM_BREAKPOINT_LEN_8; + if ((hw->ctrl.type != ARM_BREAKPOINT_EXECUTE) && max_watchpoint_len >= 8) break; default: @@ -565,24 +565,24 @@ static int arch_build_bp_info(struct perf_event *bp) * by the hardware and must be aligned to the appropriate number of * bytes. */ - if (info->ctrl.type == ARM_BREAKPOINT_EXECUTE && - info->ctrl.len != ARM_BREAKPOINT_LEN_2 && - info->ctrl.len != ARM_BREAKPOINT_LEN_4) + if (hw->ctrl.type == ARM_BREAKPOINT_EXECUTE && + hw->ctrl.len != ARM_BREAKPOINT_LEN_2 && + hw->ctrl.len != ARM_BREAKPOINT_LEN_4) return -EINVAL; /* Address */ - info->address = bp->attr.bp_addr; + hw->address = attr->bp_addr; /* Privilege */ - info->ctrl.privilege = ARM_BREAKPOINT_USER; - if (arch_check_bp_in_kernelspace(info)) - info->ctrl.privilege |= ARM_BREAKPOINT_PRIV; + hw->ctrl.privilege = ARM_BREAKPOINT_USER; + if (arch_check_bp_in_kernelspace(hw)) + hw->ctrl.privilege |= ARM_BREAKPOINT_PRIV; /* Enabled? */ - info->ctrl.enabled = !bp->attr.disabled; + hw->ctrl.enabled = !attr->disabled; /* Mismatch */ - info->ctrl.mismatch = 0; + hw->ctrl.mismatch = 0; return 0; } @@ -590,9 +590,10 @@ static int arch_build_bp_info(struct perf_event *bp) /* * Validate the arch-specific HW Breakpoint register settings. */ -int arch_validate_hwbkpt_settings(struct perf_event *bp) +int hw_breakpoint_arch_parse(struct perf_event *bp, + const struct perf_event_attr *attr, + struct arch_hw_breakpoint *hw) { - struct arch_hw_breakpoint *info = counter_arch_bp(bp); int ret = 0; u32 offset, alignment_mask = 0x3; @@ -601,14 +602,14 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp) return -ENODEV; /* Build the arch_hw_breakpoint. */ - ret = arch_build_bp_info(bp); + ret = arch_build_bp_info(bp, attr, hw); if (ret) goto out; /* Check address alignment. */ - if (info->ctrl.len == ARM_BREAKPOINT_LEN_8) + if (hw->ctrl.len == ARM_BREAKPOINT_LEN_8) alignment_mask = 0x7; - offset = info->address & alignment_mask; + offset = hw->address & alignment_mask; switch (offset) { case 0: /* Aligned */ @@ -616,19 +617,19 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp) case 1: case 2: /* Allow halfword watchpoints and breakpoints. */ - if (info->ctrl.len == ARM_BREAKPOINT_LEN_2) + if (hw->ctrl.len == ARM_BREAKPOINT_LEN_2) break; case 3: /* Allow single byte watchpoint. */ - if (info->ctrl.len == ARM_BREAKPOINT_LEN_1) + if (hw->ctrl.len == ARM_BREAKPOINT_LEN_1) break; default: ret = -EINVAL; goto out; } - info->address &= ~alignment_mask; - info->ctrl.len <<= offset; + hw->address &= ~alignment_mask; + hw->ctrl.len <<= offset; if (is_default_overflow_handler(bp)) { /* @@ -639,7 +640,7 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp) return -EINVAL; /* We don't allow mismatch breakpoints in kernel space. */ - if (arch_check_bp_in_kernelspace(info)) + if (arch_check_bp_in_kernelspace(hw)) return -EPERM; /* @@ -654,8 +655,8 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp) * reports them. */ if (!debug_exception_updates_fsr() && - (info->ctrl.type == ARM_BREAKPOINT_LOAD || - info->ctrl.type == ARM_BREAKPOINT_STORE)) + (hw->ctrl.type == ARM_BREAKPOINT_LOAD || + hw->ctrl.type == ARM_BREAKPOINT_STORE)) return -EINVAL; } -- cgit From 8c449753a681517f0e6f877aad8539ac4c71757d Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Tue, 26 Jun 2018 04:58:53 +0200 Subject: perf/arch/arm64: Implement hw_breakpoint_arch_parse() Migrate to the new API in order to remove arch_validate_hwbkpt_settings() that clumsily mixes up architecture validation and commit. Signed-off-by: Frederic Weisbecker Acked-by: Will Deacon Acked-by: Mark Rutland Cc: Alexander Shishkin Cc: Andy Lutomirski Cc: Arnaldo Carvalho de Melo Cc: Arnaldo Carvalho de Melo Cc: Benjamin Herrenschmidt Cc: Catalin Marinas Cc: Chris Zankel Cc: Jiri Olsa Cc: Joel Fernandes Cc: Linus Torvalds Cc: Max Filippov Cc: Michael Ellerman Cc: Namhyung Kim Cc: Paul Mackerras Cc: Peter Zijlstra Cc: Rich Felker Cc: Thomas Gleixner Cc: Yoshinori Sato Link: http://lkml.kernel.org/r/1529981939-8231-7-git-send-email-frederic@kernel.org Signed-off-by: Ingo Molnar --- arch/arm64/include/asm/hw_breakpoint.h | 6 ++- arch/arm64/kernel/hw_breakpoint.c | 79 +++++++++++++++++----------------- 2 files changed, 45 insertions(+), 40 deletions(-) diff --git a/arch/arm64/include/asm/hw_breakpoint.h b/arch/arm64/include/asm/hw_breakpoint.h index 9f4a3d48762e..bf9c305daa62 100644 --- a/arch/arm64/include/asm/hw_breakpoint.h +++ b/arch/arm64/include/asm/hw_breakpoint.h @@ -119,13 +119,17 @@ static inline void decode_ctrl_reg(u32 reg, struct task_struct; struct notifier_block; +struct perf_event_attr; struct perf_event; struct pmu; extern int arch_bp_generic_fields(struct arch_hw_breakpoint_ctrl ctrl, int *gen_len, int *gen_type, int *offset); extern int arch_check_bp_in_kernelspace(struct arch_hw_breakpoint *hw); -extern int arch_validate_hwbkpt_settings(struct perf_event *bp); +extern int hw_breakpoint_arch_parse(struct perf_event *bp, + const struct perf_event_attr *attr, + struct arch_hw_breakpoint *hw); +#define hw_breakpoint_arch_parse hw_breakpoint_arch_parse extern int hw_breakpoint_exceptions_notify(struct notifier_block *unused, unsigned long val, void *data); diff --git a/arch/arm64/kernel/hw_breakpoint.c b/arch/arm64/kernel/hw_breakpoint.c index 6a90d12e4ff2..8c9644376326 100644 --- a/arch/arm64/kernel/hw_breakpoint.c +++ b/arch/arm64/kernel/hw_breakpoint.c @@ -420,53 +420,53 @@ int arch_bp_generic_fields(struct arch_hw_breakpoint_ctrl ctrl, /* * Construct an arch_hw_breakpoint from a perf_event. */ -static int arch_build_bp_info(struct perf_event *bp) +static int arch_build_bp_info(struct perf_event *bp, + const struct perf_event_attr *attr, + struct arch_hw_breakpoint *hw) { - struct arch_hw_breakpoint *info = counter_arch_bp(bp); - /* Type */ - switch (bp->attr.bp_type) { + switch (attr->bp_type) { case HW_BREAKPOINT_X: - info->ctrl.type = ARM_BREAKPOINT_EXECUTE; + hw->ctrl.type = ARM_BREAKPOINT_EXECUTE; break; case HW_BREAKPOINT_R: - info->ctrl.type = ARM_BREAKPOINT_LOAD; + hw->ctrl.type = ARM_BREAKPOINT_LOAD; break; case HW_BREAKPOINT_W: - info->ctrl.type = ARM_BREAKPOINT_STORE; + hw->ctrl.type = ARM_BREAKPOINT_STORE; break; case HW_BREAKPOINT_RW: - info->ctrl.type = ARM_BREAKPOINT_LOAD | ARM_BREAKPOINT_STORE; + hw->ctrl.type = ARM_BREAKPOINT_LOAD | ARM_BREAKPOINT_STORE; break; default: return -EINVAL; } /* Len */ - switch (bp->attr.bp_len) { + switch (attr->bp_len) { case HW_BREAKPOINT_LEN_1: - info->ctrl.len = ARM_BREAKPOINT_LEN_1; + hw->ctrl.len = ARM_BREAKPOINT_LEN_1; break; case HW_BREAKPOINT_LEN_2: - info->ctrl.len = ARM_BREAKPOINT_LEN_2; + hw->ctrl.len = ARM_BREAKPOINT_LEN_2; break; case HW_BREAKPOINT_LEN_3: - info->ctrl.len = ARM_BREAKPOINT_LEN_3; + hw->ctrl.len = ARM_BREAKPOINT_LEN_3; break; case HW_BREAKPOINT_LEN_4: - info->ctrl.len = ARM_BREAKPOINT_LEN_4; + hw->ctrl.len = ARM_BREAKPOINT_LEN_4; break; case HW_BREAKPOINT_LEN_5: - info->ctrl.len = ARM_BREAKPOINT_LEN_5; + hw->ctrl.len = ARM_BREAKPOINT_LEN_5; break; case HW_BREAKPOINT_LEN_6: - info->ctrl.len = ARM_BREAKPOINT_LEN_6; + hw->ctrl.len = ARM_BREAKPOINT_LEN_6; break; case HW_BREAKPOINT_LEN_7: - info->ctrl.len = ARM_BREAKPOINT_LEN_7; + hw->ctrl.len = ARM_BREAKPOINT_LEN_7; break; case HW_BREAKPOINT_LEN_8: - info->ctrl.len = ARM_BREAKPOINT_LEN_8; + hw->ctrl.len = ARM_BREAKPOINT_LEN_8; break; default: return -EINVAL; @@ -477,37 +477,37 @@ static int arch_build_bp_info(struct perf_event *bp) * AArch32 also requires breakpoints of length 2 for Thumb. * Watchpoints can be of length 1, 2, 4 or 8 bytes. */ - if (info->ctrl.type == ARM_BREAKPOINT_EXECUTE) { + if (hw->ctrl.type == ARM_BREAKPOINT_EXECUTE) { if (is_compat_bp(bp)) { - if (info->ctrl.len != ARM_BREAKPOINT_LEN_2 && - info->ctrl.len != ARM_BREAKPOINT_LEN_4) + if (hw->ctrl.len != ARM_BREAKPOINT_LEN_2 && + hw->ctrl.len != ARM_BREAKPOINT_LEN_4) return -EINVAL; - } else if (info->ctrl.len != ARM_BREAKPOINT_LEN_4) { + } else if (hw->ctrl.len != ARM_BREAKPOINT_LEN_4) { /* * FIXME: Some tools (I'm looking at you perf) assume * that breakpoints should be sizeof(long). This * is nonsense. For now, we fix up the parameter * but we should probably return -EINVAL instead. */ - info->ctrl.len = ARM_BREAKPOINT_LEN_4; + hw->ctrl.len = ARM_BREAKPOINT_LEN_4; } } /* Address */ - info->address = bp->attr.bp_addr; + hw->address = attr->bp_addr; /* * Privilege * Note that we disallow combined EL0/EL1 breakpoints because * that would complicate the stepping code. */ - if (arch_check_bp_in_kernelspace(info)) - info->ctrl.privilege = AARCH64_BREAKPOINT_EL1; + if (arch_check_bp_in_kernelspace(hw)) + hw->ctrl.privilege = AARCH64_BREAKPOINT_EL1; else - info->ctrl.privilege = AARCH64_BREAKPOINT_EL0; + hw->ctrl.privilege = AARCH64_BREAKPOINT_EL0; /* Enabled? */ - info->ctrl.enabled = !bp->attr.disabled; + hw->ctrl.enabled = !attr->disabled; return 0; } @@ -515,14 +515,15 @@ static int arch_build_bp_info(struct perf_event *bp) /* * Validate the arch-specific HW Breakpoint register settings. */ -int arch_validate_hwbkpt_settings(struct perf_event *bp) +int hw_breakpoint_arch_parse(struct perf_event *bp, + const struct perf_event_attr *attr, + struct arch_hw_breakpoint *hw) { - struct arch_hw_breakpoint *info = counter_arch_bp(bp); int ret; u64 alignment_mask, offset; /* Build the arch_hw_breakpoint. */ - ret = arch_build_bp_info(bp); + ret = arch_build_bp_info(bp, attr, hw); if (ret) return ret; @@ -536,42 +537,42 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp) * that here. */ if (is_compat_bp(bp)) { - if (info->ctrl.len == ARM_BREAKPOINT_LEN_8) + if (hw->ctrl.len == ARM_BREAKPOINT_LEN_8) alignment_mask = 0x7; else alignment_mask = 0x3; - offset = info->address & alignment_mask; + offset = hw->address & alignment_mask; switch (offset) { case 0: /* Aligned */ break; case 1: /* Allow single byte watchpoint. */ - if (info->ctrl.len == ARM_BREAKPOINT_LEN_1) + if (hw->ctrl.len == ARM_BREAKPOINT_LEN_1) break; case 2: /* Allow halfword watchpoints and breakpoints. */ - if (info->ctrl.len == ARM_BREAKPOINT_LEN_2) + if (hw->ctrl.len == ARM_BREAKPOINT_LEN_2) break; default: return -EINVAL; } } else { - if (info->ctrl.type == ARM_BREAKPOINT_EXECUTE) + if (hw->ctrl.type == ARM_BREAKPOINT_EXECUTE) alignment_mask = 0x3; else alignment_mask = 0x7; - offset = info->address & alignment_mask; + offset = hw->address & alignment_mask; } - info->address &= ~alignment_mask; - info->ctrl.len <<= offset; + hw->address &= ~alignment_mask; + hw->ctrl.len <<= offset; /* * Disallow per-task kernel breakpoints since these would * complicate the stepping code. */ - if (info->ctrl.privilege == AARCH64_BREAKPOINT_EL1 && bp->hw.target) + if (hw->ctrl.privilege == AARCH64_BREAKPOINT_EL1 && bp->hw.target) return -EINVAL; return 0; -- cgit From 923442895760df69eedfd3e25aa954af99a753e6 Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Tue, 26 Jun 2018 04:58:54 +0200 Subject: perf/arch/sh: Remove "struct arch_hw_breakpoint::name" unused field This field seem to be unused, perhaps a leftover from old code... Signed-off-by: Frederic Weisbecker Cc: Alexander Shishkin Cc: Andy Lutomirski Cc: Arnaldo Carvalho de Melo Cc: Arnaldo Carvalho de Melo Cc: Benjamin Herrenschmidt Cc: Catalin Marinas Cc: Chris Zankel Cc: Jiri Olsa Cc: Joel Fernandes Cc: Linus Torvalds Cc: Mark Rutland Cc: Max Filippov Cc: Michael Ellerman Cc: Namhyung Kim Cc: Paul Mackerras Cc: Peter Zijlstra Cc: Rich Felker Cc: Thomas Gleixner Cc: Will Deacon Cc: Yoshinori Sato Link: http://lkml.kernel.org/r/1529981939-8231-8-git-send-email-frederic@kernel.org Signed-off-by: Ingo Molnar --- arch/sh/include/asm/hw_breakpoint.h | 1 - arch/sh/kernel/hw_breakpoint.c | 7 ------- 2 files changed, 8 deletions(-) diff --git a/arch/sh/include/asm/hw_breakpoint.h b/arch/sh/include/asm/hw_breakpoint.h index 8a88ed0b3cd7..dae622d9b10b 100644 --- a/arch/sh/include/asm/hw_breakpoint.h +++ b/arch/sh/include/asm/hw_breakpoint.h @@ -10,7 +10,6 @@ #include struct arch_hw_breakpoint { - char *name; /* Contains name of the symbol to set bkpt */ unsigned long address; u16 len; u16 type; diff --git a/arch/sh/kernel/hw_breakpoint.c b/arch/sh/kernel/hw_breakpoint.c index 38791fefdd0c..c453a0cea3c2 100644 --- a/arch/sh/kernel/hw_breakpoint.c +++ b/arch/sh/kernel/hw_breakpoint.c @@ -247,13 +247,6 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp) return ret; } - /* - * For kernel-addresses, either the address or symbol name can be - * specified. - */ - if (info->name) - info->address = (unsigned long)kallsyms_lookup_name(info->name); - /* * Check that the low-order bits of the address are appropriate * for the alignment implied by len. -- cgit From 551624d6fc6b282cdcc3f8ab395cb03da0a38fc7 Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Tue, 26 Jun 2018 04:58:55 +0200 Subject: perf/arch/sh: Implement hw_breakpoint_arch_parse() Migrate to the new API in order to remove arch_validate_hwbkpt_settings() that clumsily mixes up architecture validation and commit Signed-off-by: Frederic Weisbecker Cc: Alexander Shishkin Cc: Andy Lutomirski Cc: Arnaldo Carvalho de Melo Cc: Arnaldo Carvalho de Melo Cc: Benjamin Herrenschmidt Cc: Catalin Marinas Cc: Chris Zankel Cc: Jiri Olsa Cc: Joel Fernandes Cc: Linus Torvalds Cc: Mark Rutland Cc: Max Filippov Cc: Michael Ellerman Cc: Namhyung Kim Cc: Paul Mackerras Cc: Peter Zijlstra Cc: Rich Felker Cc: Thomas Gleixner Cc: Will Deacon Cc: Yoshinori Sato Link: http://lkml.kernel.org/r/1529981939-8231-9-git-send-email-frederic@kernel.org Signed-off-by: Ingo Molnar --- arch/sh/include/asm/hw_breakpoint.h | 6 +++++- arch/sh/kernel/hw_breakpoint.c | 37 +++++++++++++++++++------------------ 2 files changed, 24 insertions(+), 19 deletions(-) diff --git a/arch/sh/include/asm/hw_breakpoint.h b/arch/sh/include/asm/hw_breakpoint.h index dae622d9b10b..867edcc60e8f 100644 --- a/arch/sh/include/asm/hw_breakpoint.h +++ b/arch/sh/include/asm/hw_breakpoint.h @@ -40,6 +40,7 @@ struct sh_ubc { struct clk *clk; /* optional interface clock / MSTP bit */ }; +struct perf_event_attr; struct perf_event; struct task_struct; struct pmu; @@ -54,7 +55,10 @@ static inline int hw_breakpoint_slots(int type) /* arch/sh/kernel/hw_breakpoint.c */ extern int arch_check_bp_in_kernelspace(struct arch_hw_breakpoint *hw); -extern int arch_validate_hwbkpt_settings(struct perf_event *bp); +extern int hw_breakpoint_arch_parse(struct perf_event *bp, + const struct perf_event_attr *attr, + struct arch_hw_breakpoint *hw); +#define hw_breakpoint_arch_parse hw_breakpoint_arch_parse extern int hw_breakpoint_exceptions_notify(struct notifier_block *unused, unsigned long val, void *data); diff --git a/arch/sh/kernel/hw_breakpoint.c b/arch/sh/kernel/hw_breakpoint.c index c453a0cea3c2..d9ff3b42da7c 100644 --- a/arch/sh/kernel/hw_breakpoint.c +++ b/arch/sh/kernel/hw_breakpoint.c @@ -173,40 +173,40 @@ int arch_bp_generic_fields(int sh_len, int sh_type, return 0; } -static int arch_build_bp_info(struct perf_event *bp) +static int arch_build_bp_info(struct perf_event *bp, + const struct perf_event_attr *attr, + struct arch_hw_breakpoint *hw) { - struct arch_hw_breakpoint *info = counter_arch_bp(bp); - - info->address = bp->attr.bp_addr; + hw->address = attr->bp_addr; /* Len */ - switch (bp->attr.bp_len) { + switch (attr->bp_len) { case HW_BREAKPOINT_LEN_1: - info->len = SH_BREAKPOINT_LEN_1; + hw->len = SH_BREAKPOINT_LEN_1; break; case HW_BREAKPOINT_LEN_2: - info->len = SH_BREAKPOINT_LEN_2; + hw->len = SH_BREAKPOINT_LEN_2; break; case HW_BREAKPOINT_LEN_4: - info->len = SH_BREAKPOINT_LEN_4; + hw->len = SH_BREAKPOINT_LEN_4; break; case HW_BREAKPOINT_LEN_8: - info->len = SH_BREAKPOINT_LEN_8; + hw->len = SH_BREAKPOINT_LEN_8; break; default: return -EINVAL; } /* Type */ - switch (bp->attr.bp_type) { + switch (attr->bp_type) { case HW_BREAKPOINT_R: - info->type = SH_BREAKPOINT_READ; + hw->type = SH_BREAKPOINT_READ; break; case HW_BREAKPOINT_W: - info->type = SH_BREAKPOINT_WRITE; + hw->type = SH_BREAKPOINT_WRITE; break; case HW_BREAKPOINT_W | HW_BREAKPOINT_R: - info->type = SH_BREAKPOINT_RW; + hw->type = SH_BREAKPOINT_RW; break; default: return -EINVAL; @@ -218,19 +218,20 @@ static int arch_build_bp_info(struct perf_event *bp) /* * Validate the arch-specific HW Breakpoint register settings */ -int arch_validate_hwbkpt_settings(struct perf_event *bp) +int hw_breakpoint_arch_parse(struct perf_event *bp, + const struct perf_event_attr *attr, + struct arch_hw_breakpoint *hw) { - struct arch_hw_breakpoint *info = counter_arch_bp(bp); unsigned int align; int ret; - ret = arch_build_bp_info(bp); + ret = arch_build_bp_info(bp, attr, hw); if (ret) return ret; ret = -EINVAL; - switch (info->len) { + switch (hw->len) { case SH_BREAKPOINT_LEN_1: align = 0; break; @@ -251,7 +252,7 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp) * Check that the low-order bits of the address are appropriate * for the alignment implied by len. */ - if (info->address & align) + if (hw->address & align) return -EINVAL; return 0; -- cgit From ac46c7fddef702dadae18c1fc932530cd31cf9cd Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Tue, 26 Jun 2018 04:58:56 +0200 Subject: perf/arch/xtensa: Implement hw_breakpoint_arch_parse() Migrate to the new API in order to remove arch_validate_hwbkpt_settings() that clumsily mixes up architecture validation and commit Signed-off-by: Frederic Weisbecker Cc: Alexander Shishkin Cc: Andy Lutomirski Cc: Arnaldo Carvalho de Melo Cc: Arnaldo Carvalho de Melo Cc: Benjamin Herrenschmidt Cc: Catalin Marinas Cc: Chris Zankel Cc: Jiri Olsa Cc: Joel Fernandes Cc: Linus Torvalds Cc: Mark Rutland Cc: Max Filippov Cc: Michael Ellerman Cc: Namhyung Kim Cc: Paul Mackerras Cc: Peter Zijlstra Cc: Rich Felker Cc: Thomas Gleixner Cc: Will Deacon Cc: Yoshinori Sato Link: http://lkml.kernel.org/r/1529981939-8231-10-git-send-email-frederic@kernel.org Signed-off-by: Ingo Molnar --- arch/xtensa/include/asm/hw_breakpoint.h | 6 +++++- arch/xtensa/kernel/hw_breakpoint.c | 33 ++++++++++++--------------------- 2 files changed, 17 insertions(+), 22 deletions(-) diff --git a/arch/xtensa/include/asm/hw_breakpoint.h b/arch/xtensa/include/asm/hw_breakpoint.h index 2525bf6816a6..f347c2132e6b 100644 --- a/arch/xtensa/include/asm/hw_breakpoint.h +++ b/arch/xtensa/include/asm/hw_breakpoint.h @@ -30,13 +30,17 @@ struct arch_hw_breakpoint { u16 type; }; +struct perf_event_attr; struct perf_event; struct pt_regs; struct task_struct; int hw_breakpoint_slots(int type); int arch_check_bp_in_kernelspace(struct arch_hw_breakpoint *hw); -int arch_validate_hwbkpt_settings(struct perf_event *bp); +int hw_breakpoint_arch_parse(struct perf_event *bp, + const struct perf_event_attr *attr, + struct arch_hw_breakpoint *hw); +#define hw_breakpoint_arch_parse hw_breakpoint_arch_parse int hw_breakpoint_exceptions_notify(struct notifier_block *unused, unsigned long val, void *data); diff --git a/arch/xtensa/kernel/hw_breakpoint.c b/arch/xtensa/kernel/hw_breakpoint.c index 6e34c3848885..c2e387c19cda 100644 --- a/arch/xtensa/kernel/hw_breakpoint.c +++ b/arch/xtensa/kernel/hw_breakpoint.c @@ -47,50 +47,41 @@ int arch_check_bp_in_kernelspace(struct arch_hw_breakpoint *hw) /* * Construct an arch_hw_breakpoint from a perf_event. */ -static int arch_build_bp_info(struct perf_event *bp) +int hw_breakpoint_arch_parse(struct perf_event *bp, + const struct perf_event_attr *attr, + struct arch_hw_breakpoint *hw) { - struct arch_hw_breakpoint *info = counter_arch_bp(bp); - /* Type */ - switch (bp->attr.bp_type) { + switch (attr->bp_type) { case HW_BREAKPOINT_X: - info->type = XTENSA_BREAKPOINT_EXECUTE; + hw->type = XTENSA_BREAKPOINT_EXECUTE; break; case HW_BREAKPOINT_R: - info->type = XTENSA_BREAKPOINT_LOAD; + hw->type = XTENSA_BREAKPOINT_LOAD; break; case HW_BREAKPOINT_W: - info->type = XTENSA_BREAKPOINT_STORE; + hw->type = XTENSA_BREAKPOINT_STORE; break; case HW_BREAKPOINT_RW: - info->type = XTENSA_BREAKPOINT_LOAD | XTENSA_BREAKPOINT_STORE; + hw->type = XTENSA_BREAKPOINT_LOAD | XTENSA_BREAKPOINT_STORE; break; default: return -EINVAL; } /* Len */ - info->len = bp->attr.bp_len; - if (info->len < 1 || info->len > 64 || !is_power_of_2(info->len)) + hw->len = attr->bp_len; + if (hw->len < 1 || hw->len > 64 || !is_power_of_2(hw->len)) return -EINVAL; /* Address */ - info->address = bp->attr.bp_addr; - if (info->address & (info->len - 1)) + hw->address = attr->bp_addr; + if (hw->address & (hw->len - 1)) return -EINVAL; return 0; } -int arch_validate_hwbkpt_settings(struct perf_event *bp) -{ - int ret; - - /* Build the arch_hw_breakpoint. */ - ret = arch_build_bp_info(bp); - return ret; -} - int hw_breakpoint_exceptions_notify(struct notifier_block *unused, unsigned long val, void *data) { -- cgit From cffbb3bd444bc95186b6ab0ed3bf1d5bcf4aa620 Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Tue, 26 Jun 2018 04:58:57 +0200 Subject: perf/hw_breakpoint: Remove default hw_breakpoint_arch_parse() All architectures have implemented it, we can now remove the poor weak version. Signed-off-by: Frederic Weisbecker Cc: Alexander Shishkin Cc: Andy Lutomirski Cc: Arnaldo Carvalho de Melo Cc: Arnaldo Carvalho de Melo Cc: Benjamin Herrenschmidt Cc: Catalin Marinas Cc: Chris Zankel Cc: Jiri Olsa Cc: Joel Fernandes Cc: Linus Torvalds Cc: Mark Rutland Cc: Max Filippov Cc: Michael Ellerman Cc: Namhyung Kim Cc: Paul Mackerras Cc: Peter Zijlstra Cc: Rich Felker Cc: Thomas Gleixner Cc: Will Deacon Cc: Yoshinori Sato Link: http://lkml.kernel.org/r/1529981939-8231-11-git-send-email-frederic@kernel.org Signed-off-by: Ingo Molnar --- arch/arm/include/asm/hw_breakpoint.h | 1 - arch/arm64/include/asm/hw_breakpoint.h | 1 - arch/powerpc/include/asm/hw_breakpoint.h | 1 - arch/sh/include/asm/hw_breakpoint.h | 1 - arch/x86/include/asm/hw_breakpoint.h | 1 - arch/xtensa/include/asm/hw_breakpoint.h | 1 - kernel/events/hw_breakpoint.c | 17 ----------------- 7 files changed, 23 deletions(-) diff --git a/arch/arm/include/asm/hw_breakpoint.h b/arch/arm/include/asm/hw_breakpoint.h index 1e02925c11f4..ac54c06764e6 100644 --- a/arch/arm/include/asm/hw_breakpoint.h +++ b/arch/arm/include/asm/hw_breakpoint.h @@ -122,7 +122,6 @@ extern int arch_check_bp_in_kernelspace(struct arch_hw_breakpoint *hw); extern int hw_breakpoint_arch_parse(struct perf_event *bp, const struct perf_event_attr *attr, struct arch_hw_breakpoint *hw); -#define hw_breakpoint_arch_parse hw_breakpoint_arch_parse extern int hw_breakpoint_exceptions_notify(struct notifier_block *unused, unsigned long val, void *data); diff --git a/arch/arm64/include/asm/hw_breakpoint.h b/arch/arm64/include/asm/hw_breakpoint.h index bf9c305daa62..6a53e59ced95 100644 --- a/arch/arm64/include/asm/hw_breakpoint.h +++ b/arch/arm64/include/asm/hw_breakpoint.h @@ -129,7 +129,6 @@ extern int arch_check_bp_in_kernelspace(struct arch_hw_breakpoint *hw); extern int hw_breakpoint_arch_parse(struct perf_event *bp, const struct perf_event_attr *attr, struct arch_hw_breakpoint *hw); -#define hw_breakpoint_arch_parse hw_breakpoint_arch_parse extern int hw_breakpoint_exceptions_notify(struct notifier_block *unused, unsigned long val, void *data); diff --git a/arch/powerpc/include/asm/hw_breakpoint.h b/arch/powerpc/include/asm/hw_breakpoint.h index 38ae180610bd..27d6e3c8fde9 100644 --- a/arch/powerpc/include/asm/hw_breakpoint.h +++ b/arch/powerpc/include/asm/hw_breakpoint.h @@ -65,7 +65,6 @@ extern int arch_check_bp_in_kernelspace(struct arch_hw_breakpoint *hw); extern int hw_breakpoint_arch_parse(struct perf_event *bp, const struct perf_event_attr *attr, struct arch_hw_breakpoint *hw); -#define hw_breakpoint_arch_parse hw_breakpoint_arch_parse extern int hw_breakpoint_exceptions_notify(struct notifier_block *unused, unsigned long val, void *data); int arch_install_hw_breakpoint(struct perf_event *bp); diff --git a/arch/sh/include/asm/hw_breakpoint.h b/arch/sh/include/asm/hw_breakpoint.h index 867edcc60e8f..199d17b765f2 100644 --- a/arch/sh/include/asm/hw_breakpoint.h +++ b/arch/sh/include/asm/hw_breakpoint.h @@ -58,7 +58,6 @@ extern int arch_check_bp_in_kernelspace(struct arch_hw_breakpoint *hw); extern int hw_breakpoint_arch_parse(struct perf_event *bp, const struct perf_event_attr *attr, struct arch_hw_breakpoint *hw); -#define hw_breakpoint_arch_parse hw_breakpoint_arch_parse extern int hw_breakpoint_exceptions_notify(struct notifier_block *unused, unsigned long val, void *data); diff --git a/arch/x86/include/asm/hw_breakpoint.h b/arch/x86/include/asm/hw_breakpoint.h index 6c88e8e22678..a1f0e90d0818 100644 --- a/arch/x86/include/asm/hw_breakpoint.h +++ b/arch/x86/include/asm/hw_breakpoint.h @@ -57,7 +57,6 @@ extern int arch_check_bp_in_kernelspace(struct arch_hw_breakpoint *hw); extern int hw_breakpoint_arch_parse(struct perf_event *bp, const struct perf_event_attr *attr, struct arch_hw_breakpoint *hw); -#define hw_breakpoint_arch_parse hw_breakpoint_arch_parse extern int hw_breakpoint_exceptions_notify(struct notifier_block *unused, unsigned long val, void *data); diff --git a/arch/xtensa/include/asm/hw_breakpoint.h b/arch/xtensa/include/asm/hw_breakpoint.h index f347c2132e6b..9f119c1ca0b5 100644 --- a/arch/xtensa/include/asm/hw_breakpoint.h +++ b/arch/xtensa/include/asm/hw_breakpoint.h @@ -40,7 +40,6 @@ int arch_check_bp_in_kernelspace(struct arch_hw_breakpoint *hw); int hw_breakpoint_arch_parse(struct perf_event *bp, const struct perf_event_attr *attr, struct arch_hw_breakpoint *hw); -#define hw_breakpoint_arch_parse hw_breakpoint_arch_parse int hw_breakpoint_exceptions_notify(struct notifier_block *unused, unsigned long val, void *data); diff --git a/kernel/events/hw_breakpoint.c b/kernel/events/hw_breakpoint.c index 89fe94c9214c..e7bc8d0a5972 100644 --- a/kernel/events/hw_breakpoint.c +++ b/kernel/events/hw_breakpoint.c @@ -400,23 +400,6 @@ int dbg_release_bp_slot(struct perf_event *bp) return 0; } -#ifndef hw_breakpoint_arch_parse -int hw_breakpoint_arch_parse(struct perf_event *bp, - const struct perf_event_attr *attr, - struct arch_hw_breakpoint *hw) -{ - int err; - - err = arch_validate_hwbkpt_settings(bp); - if (err) - return err; - - *hw = bp->hw.info; - - return 0; -} -#endif - static int hw_breakpoint_parse(struct perf_event *bp, const struct perf_event_attr *attr, struct arch_hw_breakpoint *hw) -- cgit From cb8b78815b68b048303f55c276d2aef147e8f2e7 Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Tue, 26 Jun 2018 04:58:58 +0200 Subject: perf/hw_breakpoint: Pass new breakpoint type to modify_breakpoint_slot() We soon won't be able to rely on bp->attr anymore to get the new type of the modifying breakpoint because the new attributes are going to be copied only once we successfully modified the breakpoint slot. This will fix the current misdesigned layout where the new attr are copied to the modifying breakpoint before we actually know if the modification will be validated. In order to prepare for that, allow modify_breakpoint_slot() to take the new breakpoint type. Signed-off-by: Frederic Weisbecker Cc: Alexander Shishkin Cc: Andy Lutomirski Cc: Arnaldo Carvalho de Melo Cc: Arnaldo Carvalho de Melo Cc: Benjamin Herrenschmidt Cc: Catalin Marinas Cc: Chris Zankel Cc: Jiri Olsa Cc: Joel Fernandes Cc: Linus Torvalds Cc: Mark Rutland Cc: Max Filippov Cc: Michael Ellerman Cc: Namhyung Kim Cc: Paul Mackerras Cc: Peter Zijlstra Cc: Rich Felker Cc: Thomas Gleixner Cc: Will Deacon Cc: Yoshinori Sato Link: http://lkml.kernel.org/r/1529981939-8231-12-git-send-email-frederic@kernel.org Signed-off-by: Ingo Molnar --- kernel/events/hw_breakpoint.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/kernel/events/hw_breakpoint.c b/kernel/events/hw_breakpoint.c index e7bc8d0a5972..71387703b5f1 100644 --- a/kernel/events/hw_breakpoint.c +++ b/kernel/events/hw_breakpoint.c @@ -345,13 +345,13 @@ void release_bp_slot(struct perf_event *bp) mutex_unlock(&nr_bp_mutex); } -static int __modify_bp_slot(struct perf_event *bp, u64 old_type) +static int __modify_bp_slot(struct perf_event *bp, u64 old_type, u64 new_type) { int err; __release_bp_slot(bp, old_type); - err = __reserve_bp_slot(bp, bp->attr.bp_type); + err = __reserve_bp_slot(bp, new_type); if (err) { /* * Reserve the old_type slot back in case @@ -367,12 +367,12 @@ static int __modify_bp_slot(struct perf_event *bp, u64 old_type) return err; } -static int modify_bp_slot(struct perf_event *bp, u64 old_type) +static int modify_bp_slot(struct perf_event *bp, u64 old_type, u64 new_type) { int ret; mutex_lock(&nr_bp_mutex); - ret = __modify_bp_slot(bp, old_type); + ret = __modify_bp_slot(bp, old_type, new_type); mutex_unlock(&nr_bp_mutex); return ret; } @@ -481,7 +481,7 @@ modify_user_hw_breakpoint_check(struct perf_event *bp, struct perf_event_attr *a err = hw_breakpoint_parse(bp, attr, &hw); if (!err && modify) - err = modify_bp_slot(bp, old_type); + err = modify_bp_slot(bp, old_type, bp->attr.bp_type); if (err) { bp->attr.bp_addr = old_addr; -- cgit From 26c6ccdf5c06aa83bb78518c72cb287f043db3df Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Tue, 26 Jun 2018 04:58:59 +0200 Subject: perf/hw_breakpoint: Clean up and consolidate modify_user_hw_breakpoint_check() Remove the dance around old and new attributes. Just don't modify the previous breakpoint at all until we have verified everything. Original-patch-by: Andy Lutomirski Reported-by: Linus Torvalds Signed-off-by: Frederic Weisbecker Cc: Alexander Shishkin Cc: Andy Lutomirski Cc: Arnaldo Carvalho de Melo Cc: Arnaldo Carvalho de Melo Cc: Benjamin Herrenschmidt Cc: Catalin Marinas Cc: Chris Zankel Cc: Jiri Olsa Cc: Joel Fernandes Cc: Mark Rutland Cc: Max Filippov Cc: Michael Ellerman Cc: Namhyung Kim Cc: Paul Mackerras Cc: Peter Zijlstra Cc: Rich Felker Cc: Thomas Gleixner Cc: Will Deacon Cc: Yoshinori Sato Link: http://lkml.kernel.org/r/1529981939-8231-13-git-send-email-frederic@kernel.org Signed-off-by: Ingo Molnar --- kernel/events/hw_breakpoint.c | 44 ++++++++++++++++++++++++------------------- 1 file changed, 25 insertions(+), 19 deletions(-) diff --git a/kernel/events/hw_breakpoint.c b/kernel/events/hw_breakpoint.c index 71387703b5f1..b3814fce5ecb 100644 --- a/kernel/events/hw_breakpoint.c +++ b/kernel/events/hw_breakpoint.c @@ -461,37 +461,43 @@ register_user_hw_breakpoint(struct perf_event_attr *attr, } EXPORT_SYMBOL_GPL(register_user_hw_breakpoint); +static void hw_breakpoint_copy_attr(struct perf_event_attr *to, + struct perf_event_attr *from) +{ + to->bp_addr = from->bp_addr; + to->bp_type = from->bp_type; + to->bp_len = from->bp_len; + to->disabled = from->disabled; +} + int modify_user_hw_breakpoint_check(struct perf_event *bp, struct perf_event_attr *attr, bool check) { - u64 old_addr = bp->attr.bp_addr; - u64 old_len = bp->attr.bp_len; - int old_type = bp->attr.bp_type; - bool modify = attr->bp_type != old_type; struct arch_hw_breakpoint hw; - int err = 0; + int err; - bp->attr.bp_addr = attr->bp_addr; - bp->attr.bp_type = attr->bp_type; - bp->attr.bp_len = attr->bp_len; + err = hw_breakpoint_parse(bp, attr, &hw); + if (err) + return err; - if (check && memcmp(&bp->attr, attr, sizeof(*attr))) - return -EINVAL; + if (check) { + struct perf_event_attr old_attr; - err = hw_breakpoint_parse(bp, attr, &hw); - if (!err && modify) - err = modify_bp_slot(bp, old_type, bp->attr.bp_type); + old_attr = bp->attr; + hw_breakpoint_copy_attr(&old_attr, attr); + if (memcmp(&old_attr, attr, sizeof(*attr))) + return -EINVAL; + } - if (err) { - bp->attr.bp_addr = old_addr; - bp->attr.bp_type = old_type; - bp->attr.bp_len = old_len; - return err; + if (bp->attr.bp_type != attr->bp_type) { + err = modify_bp_slot(bp, bp->attr.bp_type, attr->bp_type); + if (err) + return err; } + hw_breakpoint_copy_attr(&bp->attr, attr); bp->hw.info = hw; - bp->attr.disabled = attr->disabled; return 0; } -- cgit From 788faab70d5a882693286b8d5022779559c79904 Mon Sep 17 00:00:00 2001 From: Tobias Tefke Date: Mon, 9 Jul 2018 12:57:15 +0200 Subject: perf, tools: Use correct articles in comments Some of the comments in the perf events code use articles incorrectly, using 'a' for words beginning with a vowel sound, where 'an' should be used. Signed-off-by: Tobias Tefke Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: acme@kernel.org Cc: alexander.shishkin@linux.intel.com Cc: jolsa@redhat.com Cc: namhyung@kernel.org Link: http://lkml.kernel.org/r/20180709105715.22938-1-tobias.tefke@tutanota.com [ Fix a few more perf related 'a event' typo fixes from all around the kernel and tooling tree. ] Signed-off-by: Ingo Molnar --- arch/powerpc/perf/core-book3s.c | 6 +++--- include/linux/perf_event.h | 2 +- kernel/events/core.c | 16 ++++++++-------- kernel/events/uprobes.c | 6 +++--- tools/perf/Documentation/perf-list.txt | 2 +- tools/perf/Documentation/perf-record.txt | 4 ++-- 6 files changed, 18 insertions(+), 18 deletions(-) diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c index 3f66fcf8ad99..19d8ab49d1bd 100644 --- a/arch/powerpc/perf/core-book3s.c +++ b/arch/powerpc/perf/core-book3s.c @@ -1469,7 +1469,7 @@ static int collect_events(struct perf_event *group, int max_count, } /* - * Add a event to the PMU. + * Add an event to the PMU. * If all events are not already frozen, then we disable and * re-enable the PMU in order to get hw_perf_enable to do the * actual work of reconfiguring the PMU. @@ -1548,7 +1548,7 @@ nocheck: } /* - * Remove a event from the PMU. + * Remove an event from the PMU. */ static void power_pmu_del(struct perf_event *event, int ef_flags) { @@ -1742,7 +1742,7 @@ static int power_pmu_commit_txn(struct pmu *pmu) /* * Return 1 if we might be able to put event on a limited PMC, * or 0 if not. - * A event can only go on a limited PMC if it counts something + * An event can only go on a limited PMC if it counts something * that a limited PMC can count, doesn't require interrupts, and * doesn't exclude any processor mode. */ diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 1fa12887ec02..e6dd3a2f8ec4 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -490,7 +490,7 @@ struct perf_addr_filters_head { }; /** - * enum perf_event_state - the states of a event + * enum perf_event_state - the states of an event: */ enum perf_event_state { PERF_EVENT_STATE_DEAD = -4, diff --git a/kernel/events/core.c b/kernel/events/core.c index a4d5ac6d74af..86b31e5a5a9a 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -1656,7 +1656,7 @@ perf_event_groups_next(struct perf_event *event) typeof(*event), group_node)) /* - * Add a event from the lists for its context. + * Add an event from the lists for its context. * Must be called with ctx->mutex and ctx->lock held. */ static void @@ -1844,7 +1844,7 @@ static void perf_group_attach(struct perf_event *event) } /* - * Remove a event from the lists for its context. + * Remove an event from the lists for its context. * Must be called with ctx->mutex and ctx->lock held. */ static void @@ -2148,7 +2148,7 @@ static void __perf_event_disable(struct perf_event *event, } /* - * Disable a event. + * Disable an event. * * If event->ctx is a cloned context, callers must make sure that * every task struct that event->ctx->task could possibly point to @@ -2677,7 +2677,7 @@ static void __perf_event_enable(struct perf_event *event, } /* - * Enable a event. + * Enable an event. * * If event->ctx is a cloned context, callers must make sure that * every task struct that event->ctx->task could possibly point to @@ -2755,7 +2755,7 @@ static int __perf_event_stop(void *info) * events will refuse to restart because of rb::aux_mmap_count==0, * see comments in perf_aux_output_begin(). * - * Since this is happening on a event-local CPU, no trace is lost + * Since this is happening on an event-local CPU, no trace is lost * while restarting. */ if (sd->restart) @@ -4827,7 +4827,7 @@ __perf_read(struct perf_event *event, char __user *buf, size_t count) int ret; /* - * Return end-of-file for a read on a event that is in + * Return end-of-file for a read on an event that is in * error state (i.e. because it was pinned but it couldn't be * scheduled on to the CPU at some point). */ @@ -9898,7 +9898,7 @@ enabled: } /* - * Allocate and initialize a event structure + * Allocate and initialize an event structure */ static struct perf_event * perf_event_alloc(struct perf_event_attr *attr, int cpu, @@ -11229,7 +11229,7 @@ const struct perf_event_attr *perf_event_attrs(struct perf_event *event) } /* - * Inherit a event from parent task to child task. + * Inherit an event from parent task to child task. * * Returns: * - valid pointer on success diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c index ccc579a7d32e..aed1ba569954 100644 --- a/kernel/events/uprobes.c +++ b/kernel/events/uprobes.c @@ -918,7 +918,7 @@ int uprobe_register(struct inode *inode, loff_t offset, struct uprobe_consumer * EXPORT_SYMBOL_GPL(uprobe_register); /* - * uprobe_apply - unregister a already registered probe. + * uprobe_apply - unregister an already registered probe. * @inode: the file in which the probe has to be removed. * @offset: offset from the start of the file. * @uc: consumer which wants to add more or remove some breakpoints @@ -947,7 +947,7 @@ int uprobe_apply(struct inode *inode, loff_t offset, } /* - * uprobe_unregister - unregister a already registered probe. + * uprobe_unregister - unregister an already registered probe. * @inode: the file in which the probe has to be removed. * @offset: offset from the start of the file. * @uc: identify which probe if multiple probes are colocated. @@ -1403,7 +1403,7 @@ static struct return_instance *free_ret_instance(struct return_instance *ri) /* * Called with no locks held. - * Called in context of a exiting or a exec-ing thread. + * Called in context of an exiting or an exec-ing thread. */ void uprobe_free_utask(struct task_struct *t) { diff --git a/tools/perf/Documentation/perf-list.txt b/tools/perf/Documentation/perf-list.txt index 11300dbe35c5..14e13512c05f 100644 --- a/tools/perf/Documentation/perf-list.txt +++ b/tools/perf/Documentation/perf-list.txt @@ -234,7 +234,7 @@ perf also supports group leader sampling using the :S specifier. perf record -e '{cycles,instructions}:S' ... perf report --group -Normally all events in a event group sample, but with :S only +Normally all events in an event group sample, but with :S only the first event (the leader) samples, and it only reads the values of the other events in the group. diff --git a/tools/perf/Documentation/perf-record.txt b/tools/perf/Documentation/perf-record.txt index 04168da4268e..246dee081efd 100644 --- a/tools/perf/Documentation/perf-record.txt +++ b/tools/perf/Documentation/perf-record.txt @@ -94,7 +94,7 @@ OPTIONS "perf report" to view group events together. --filter=:: - Event filter. This option should follow a event selector (-e) which + Event filter. This option should follow an event selector (-e) which selects either tracepoint event(s) or a hardware trace PMU (e.g. Intel PT or CoreSight). @@ -153,7 +153,7 @@ OPTIONS --exclude-perf:: Don't record events issued by perf itself. This option should follow - a event selector (-e) which selects tracepoint event(s). It adds a + an event selector (-e) which selects tracepoint event(s). It adds a filter expression 'common_pid != $PERFPID' to filters. If other '--filter' exists, the new filter expression will be combined with them by '&&'. -- cgit From 06dc5bf21f3f90750fcc073fbc6ce2a0324df051 Mon Sep 17 00:00:00 2001 From: Alexey Budankov Date: Tue, 19 Jun 2018 11:45:40 +0300 Subject: perf tests: Check that complex event name is parsed correctly Extend regression testing to cover case of complex event names enabled by the cset f92da71280fb ("perf record: Enable arbitrary event names thru name= modifier"). Testing it: # perf test 1: vmlinux symtab matches kallsyms : Skip 2: Detect openat syscall event : Ok 3: Detect openat syscall event on all cpus : Ok 4: Read samples using the mmap interface : Ok 5: Test data source output : Ok 6: Parse event definition strings : Ok <===! 7: Simple expression parser : Ok ... Committer testing: # perf test "event definition" 6: Parse event definition strings : Ok # perf test -v 6 2> /tmp/before # perf test -v 6 2> /tmp/after # diff -u /tmp/before /tmp/after --- /tmp/before 2018-06-19 10:50:21.485572638 -0300 +++ /tmp/after 2018-06-19 10:50:40.886572896 -0300 @@ -1,6 +1,6 @@ 6: Parse event definition strings : --- start --- -test child forked, pid 24259 +test child forked, pid 24904 running test 0 'syscalls:sys_enter_openat'Using CPUID GenuineIntel-6-3D registering plugin: /root/.traceevent/plugins/plugin_kvm.so registering plugin: /root/.traceevent/plugins/plugin_hrtimer.so @@ -136,9 +136,11 @@ running test 50 '4:0x6530160/name=numpmu/' running test 51 'L1-dcache-misses/name=cachepmu/' running test 52 'intel_pt//u' +running test 53 'cycles/name='COMPLEX_CYCLES_NAME:orig=cycles,desc=chip-clock-ticks'/Duk' running test 0 'cpu/config=10,config1,config2=3,period=1000/u' running test 1 'cpu/config=1,name=krava/u,cpu/config=2/u' running test 2 'cpu/config=1,call-graph=fp,time,period=100000/,cpu/config=2,call-graph=no,time=0,period=2000/' +running test 3 'cpu/name='COMPLEX_CYCLES_NAME:orig=cycles,desc=chip-clock-ticks',period=0x1,event=0x2,umask=0x3/ukp' el-capacity -> cpu/event=0x54,umask=0x2/ el-conflict -> cpu/event=0x54,umask=0x1/ el-start -> cpu/event=0xc8,umask=0x1/ # Signed-off-by: Alexey Budankov Tested-by: Arnaldo Carvalho de Melo Cc: Alexander Shishkin Cc: Andi Kleen Cc: Jiri Olsa Cc: Namhyung Kim Cc: Peter Zijlstra Link: http://lkml.kernel.org/r/ad30b774-219b-7b80-c610-4e9e298cf8a7@linux.intel.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/tests/parse-events.c | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/tools/perf/tests/parse-events.c b/tools/perf/tests/parse-events.c index 61211918bfba..a36313daec4e 100644 --- a/tools/perf/tests/parse-events.c +++ b/tools/perf/tests/parse-events.c @@ -1322,6 +1322,14 @@ static int test__intel_pt(struct perf_evlist *evlist) return 0; } +static int test__checkevent_complex_name(struct perf_evlist *evlist) +{ + struct perf_evsel *evsel = perf_evlist__first(evlist); + + TEST_ASSERT_VAL("wrong complex name parsing", strcmp(evsel->name, "COMPLEX_CYCLES_NAME:orig=cycles,desc=chip-clock-ticks") == 0); + return 0; +} + static int count_tracepoints(void) { struct dirent *events_ent; @@ -1658,6 +1666,11 @@ static struct evlist_test test__events[] = { .check = test__intel_pt, .id = 52, }, + { + .name = "cycles/name='COMPLEX_CYCLES_NAME:orig=cycles,desc=chip-clock-ticks'/Duk", + .check = test__checkevent_complex_name, + .id = 53 + } }; static struct evlist_test test__events_pmu[] = { @@ -1676,6 +1689,11 @@ static struct evlist_test test__events_pmu[] = { .check = test__checkevent_pmu_partial_time_callgraph, .id = 2, }, + { + .name = "cpu/name='COMPLEX_CYCLES_NAME:orig=cycles,desc=chip-clock-ticks',period=0x1,event=0x2,umask=0x3/ukp", + .check = test__checkevent_complex_name, + .id = 3, + } }; struct terms_test { -- cgit From e9de7e2f7e22989fefc760cf0920062c58b2b2b1 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Wed, 20 Jun 2018 15:58:20 -0300 Subject: perf hists: Clarify callchain disabling when available We want to allow having mixed events with/without callchains, not using a global flag to show callchains, but allowing supressing callchains when they are present. So invert the logic of the last parameter to hists__fprint() to that effect. Cc: Adrian Hunter Cc: David Ahern Cc: Jiri Olsa Cc: Namhyung Kim Cc: Wang Nan Link: https://lkml.kernel.org/n/tip-ohqyisr6qge79qa95ojslptx@git.kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/builtin-c2c.c | 4 ++-- tools/perf/builtin-diff.c | 2 +- tools/perf/builtin-report.c | 4 ++-- tools/perf/builtin-top.c | 2 +- tools/perf/ui/stdio/hist.c | 8 ++++---- tools/perf/util/hist.h | 2 +- 6 files changed, 11 insertions(+), 11 deletions(-) diff --git a/tools/perf/builtin-c2c.c b/tools/perf/builtin-c2c.c index 6a8738f7ead3..f2ea85ee573f 100644 --- a/tools/perf/builtin-c2c.c +++ b/tools/perf/builtin-c2c.c @@ -2193,7 +2193,7 @@ static void print_cacheline(struct c2c_hists *c2c_hists, fprintf(out, "%s\n", bf); fprintf(out, " -------------------------------------------------------------\n"); - hists__fprintf(&c2c_hists->hists, false, 0, 0, 0, out, true); + hists__fprintf(&c2c_hists->hists, false, 0, 0, 0, out, false); } static void print_pareto(FILE *out) @@ -2268,7 +2268,7 @@ static void perf_c2c__hists_fprintf(FILE *out, struct perf_session *session) fprintf(out, "=================================================\n"); fprintf(out, "#\n"); - hists__fprintf(&c2c.hists.hists, true, 0, 0, 0, stdout, false); + hists__fprintf(&c2c.hists.hists, true, 0, 0, 0, stdout, true); fprintf(out, "\n"); fprintf(out, "=================================================\n"); diff --git a/tools/perf/builtin-diff.c b/tools/perf/builtin-diff.c index d660cb7b222b..39db2ee32d48 100644 --- a/tools/perf/builtin-diff.c +++ b/tools/perf/builtin-diff.c @@ -696,7 +696,7 @@ static void hists__process(struct hists *hists) hists__output_resort(hists, NULL); hists__fprintf(hists, !quiet, 0, 0, 0, stdout, - symbol_conf.use_callchain); + !symbol_conf.use_callchain); } static void data__fprintf(void) diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c index c04dc7b53797..02f7a3c27761 100644 --- a/tools/perf/builtin-report.c +++ b/tools/perf/builtin-report.c @@ -478,8 +478,8 @@ static int perf_evlist__tty_browse_hists(struct perf_evlist *evlist, hists__fprintf_nr_sample_events(hists, rep, evname, stdout); hists__fprintf(hists, !quiet, 0, 0, rep->min_percent, stdout, - symbol_conf.use_callchain || - symbol_conf.show_branchflag_count); + !(symbol_conf.use_callchain || + symbol_conf.show_branchflag_count)); fprintf(stdout, "\n\n"); } diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c index ffdc2769ff9f..d21d8751e749 100644 --- a/tools/perf/builtin-top.c +++ b/tools/perf/builtin-top.c @@ -307,7 +307,7 @@ static void perf_top__print_sym_table(struct perf_top *top) hists__output_recalc_col_len(hists, top->print_entries - printed); putchar('\n'); hists__fprintf(hists, false, top->print_entries - printed, win_width, - top->min_percent, stdout, symbol_conf.use_callchain); + top->min_percent, stdout, !symbol_conf.use_callchain); } static void prompt_integer(int *target, const char *msg) diff --git a/tools/perf/ui/stdio/hist.c b/tools/perf/ui/stdio/hist.c index 69b7a28f7a1c..74c4ae1f0a05 100644 --- a/tools/perf/ui/stdio/hist.c +++ b/tools/perf/ui/stdio/hist.c @@ -529,7 +529,7 @@ out: static int hist_entry__fprintf(struct hist_entry *he, size_t size, char *bf, size_t bfsz, FILE *fp, - bool use_callchain) + bool ignore_callchains) { int ret; int callchain_ret = 0; @@ -550,7 +550,7 @@ static int hist_entry__fprintf(struct hist_entry *he, size_t size, ret = fprintf(fp, "%s\n", bf); - if (hist_entry__has_callchains(he) && use_callchain) + if (hist_entry__has_callchains(he) && !ignore_callchains) callchain_ret = hist_entry_callchain__fprintf(he, total_period, 0, fp); @@ -755,7 +755,7 @@ int hists__fprintf_headers(struct hists *hists, FILE *fp) size_t hists__fprintf(struct hists *hists, bool show_header, int max_rows, int max_cols, float min_pcnt, FILE *fp, - bool use_callchain) + bool ignore_callchains) { struct rb_node *nd; size_t ret = 0; @@ -799,7 +799,7 @@ size_t hists__fprintf(struct hists *hists, bool show_header, int max_rows, if (percent < min_pcnt) continue; - ret += hist_entry__fprintf(h, max_cols, line, linesz, fp, use_callchain); + ret += hist_entry__fprintf(h, max_cols, line, linesz, fp, ignore_callchains); if (max_rows && ++nr_rows >= max_rows) break; diff --git a/tools/perf/util/hist.h b/tools/perf/util/hist.h index 73049f7f0f60..3badd7f1e1b8 100644 --- a/tools/perf/util/hist.h +++ b/tools/perf/util/hist.h @@ -181,7 +181,7 @@ size_t events_stats__fprintf(struct events_stats *stats, FILE *fp); size_t hists__fprintf(struct hists *hists, bool show_header, int max_rows, int max_cols, float min_pcnt, FILE *fp, - bool use_callchain); + bool ignore_callchains); size_t perf_evlist__fprintf_nr_events(struct perf_evlist *evlist, FILE *fp); void hists__filter_by_dso(struct hists *hists); -- cgit From 6abf0f451087176caf0f9ad0cc2a79bcc5ebb443 Mon Sep 17 00:00:00 2001 From: Leo Yan Date: Mon, 18 Jun 2018 13:09:54 +0800 Subject: perf cs-etm: Introduce invalid address macro This patch introduces invalid address macro and uses it to replace dummy value '0xdeadbeefdeadbeefUL'. Signed-off-by: Leo Yan Reviewed-by: Mathieu Poirier Cc: Alexander Shishkin Cc: Jiri Olsa Cc: Kim Phillips Cc: Mike Leach Cc: Namhyung Kim Cc: Peter Zijlstra Cc: Robert Walker Cc: linux-arm-kernel@lists.infradead.org Link: http://lkml.kernel.org/r/1529298599-3876-2-git-send-email-leo.yan@linaro.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/cs-etm-decoder/cs-etm-decoder.c | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/tools/perf/util/cs-etm-decoder/cs-etm-decoder.c b/tools/perf/util/cs-etm-decoder/cs-etm-decoder.c index 4d5fc374e730..938def6d0bb9 100644 --- a/tools/perf/util/cs-etm-decoder/cs-etm-decoder.c +++ b/tools/perf/util/cs-etm-decoder/cs-etm-decoder.c @@ -31,6 +31,8 @@ #endif #endif +#define CS_ETM_INVAL_ADDR 0xdeadbeefdeadbeefUL + struct cs_etm_decoder { void *data; void (*packet_printer)(const char *msg); @@ -261,8 +263,8 @@ static void cs_etm_decoder__clear_buffer(struct cs_etm_decoder *decoder) decoder->tail = 0; decoder->packet_count = 0; for (i = 0; i < MAX_BUFFER; i++) { - decoder->packet_buffer[i].start_addr = 0xdeadbeefdeadbeefUL; - decoder->packet_buffer[i].end_addr = 0xdeadbeefdeadbeefUL; + decoder->packet_buffer[i].start_addr = CS_ETM_INVAL_ADDR; + decoder->packet_buffer[i].end_addr = CS_ETM_INVAL_ADDR; decoder->packet_buffer[i].last_instr_taken_branch = false; decoder->packet_buffer[i].exc = false; decoder->packet_buffer[i].exc_ret = false; @@ -295,8 +297,8 @@ cs_etm_decoder__buffer_packet(struct cs_etm_decoder *decoder, decoder->packet_buffer[et].exc = false; decoder->packet_buffer[et].exc_ret = false; decoder->packet_buffer[et].cpu = *((int *)inode->priv); - decoder->packet_buffer[et].start_addr = 0xdeadbeefdeadbeefUL; - decoder->packet_buffer[et].end_addr = 0xdeadbeefdeadbeefUL; + decoder->packet_buffer[et].start_addr = CS_ETM_INVAL_ADDR; + decoder->packet_buffer[et].end_addr = CS_ETM_INVAL_ADDR; if (decoder->packet_count == MAX_BUFFER - 1) return OCSD_RESP_WAIT; -- cgit From 6cd4ac6a02c9bc6560faf567ac013902ab9cd039 Mon Sep 17 00:00:00 2001 From: Leo Yan Date: Mon, 18 Jun 2018 13:09:55 +0800 Subject: perf cs-etm: Bail out immediately for instruction sample failure If the instruction sample failure has happened, it isn't necessary to execute to the end of the function cs_etm__flush(). This commit is to bail out immediately and return the error code. Signed-off-by: Leo Yan Reviewed-by: Mathieu Poirier Cc: Alexander Shishkin Cc: Jiri Olsa Cc: Kim Phillips Cc: Mike Leach Cc: Namhyung Kim Cc: Peter Zijlstra Cc: Robert Walker Cc: linux-arm-kernel@lists.infradead.org Link: http://lkml.kernel.org/r/1529298599-3876-3-git-send-email-leo.yan@linaro.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/cs-etm.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/tools/perf/util/cs-etm.c b/tools/perf/util/cs-etm.c index 822ba915d144..8b2c099e750a 100644 --- a/tools/perf/util/cs-etm.c +++ b/tools/perf/util/cs-etm.c @@ -939,6 +939,9 @@ static int cs_etm__flush(struct cs_etm_queue *etmq) err = cs_etm__synth_instruction_sample( etmq, addr, etmq->period_instructions); + if (err) + return err; + etmq->period_instructions = 0; /* -- cgit From b8b5ab52bc3a0423080783ae563801077f7ea400 Mon Sep 17 00:00:00 2001 From: Thomas Richter Date: Thu, 21 Jun 2018 10:04:49 +0200 Subject: Revert "perf list: Add s390 support for detailed/verbose PMU event description" This reverts commit 038586c34301578e538f6c5aa79ca82bce1b9152. Fix the support of detailed/verbose PMU event description by using the "Unit": keyword in the json files to address event names refering to the /sys/devices/cpum_[cs]f devices. Signed-off-by: Thomas Richter Reviewed-by: Hendrik Brueckner Cc: Heiko Carstens Cc: Hendrik Brueckner Cc: Martin Schwidefsky Link: http://lkml.kernel.org/r/20180621080452.61012-1-tmricht@linux.ibm.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/pmu.c | 6 ------ 1 file changed, 6 deletions(-) diff --git a/tools/perf/util/pmu.c b/tools/perf/util/pmu.c index 3ba6a1742f91..afd68524ffa9 100644 --- a/tools/perf/util/pmu.c +++ b/tools/perf/util/pmu.c @@ -652,12 +652,6 @@ static int is_arm_pmu_core(const char *name) if (stat(path, &st) == 0) return 1; - /* Look for cpu sysfs (specific to s390) */ - scnprintf(path, PATH_MAX, "%s/bus/event_source/devices/%s", - sysfs, name); - if (stat(path, &st) == 0 && !strncmp(name, "cpum_", 5)) - return 1; - return 0; } -- cgit From 9bacbced0e32204deb8b9d011279f9beddd8c2ef Mon Sep 17 00:00:00 2001 From: Thomas Richter Date: Thu, 21 Jun 2018 10:04:50 +0200 Subject: perf list: Add s390 support for detailed PMU event description Correct the support of detailed/verbose PMU event description by using the "Unit": keyword in the json files to address event names refering to the /sys/devices/cpum_[cs]f devices. Signed-off-by: Thomas Richter Reviewed-by: Hendrik Brueckner Cc: Heiko Carstens Cc: Martin Schwidefsky Link: http://lkml.kernel.org/r/20180621080452.61012-2-tmricht@linux.ibm.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/pmu-events/arch/s390/cf_z10/basic.json | 12 +++++ tools/perf/pmu-events/arch/s390/cf_z10/crypto.json | 16 +++++++ .../perf/pmu-events/arch/s390/cf_z10/extended.json | 18 +++++++ tools/perf/pmu-events/arch/s390/cf_z13/basic.json | 12 +++++ tools/perf/pmu-events/arch/s390/cf_z13/crypto.json | 16 +++++++ .../perf/pmu-events/arch/s390/cf_z13/extended.json | 56 ++++++++++++++++++++++ tools/perf/pmu-events/arch/s390/cf_z14/basic.json | 8 ++++ tools/perf/pmu-events/arch/s390/cf_z14/crypto.json | 16 +++++++ .../perf/pmu-events/arch/s390/cf_z14/extended.json | 53 ++++++++++++++++++++ tools/perf/pmu-events/arch/s390/cf_z196/basic.json | 12 +++++ .../perf/pmu-events/arch/s390/cf_z196/crypto.json | 16 +++++++ .../pmu-events/arch/s390/cf_z196/extended.json | 24 ++++++++++ .../perf/pmu-events/arch/s390/cf_zec12/basic.json | 12 +++++ .../perf/pmu-events/arch/s390/cf_zec12/crypto.json | 16 +++++++ .../pmu-events/arch/s390/cf_zec12/extended.json | 35 ++++++++++++++ tools/perf/pmu-events/jevents.c | 2 + 16 files changed, 324 insertions(+) diff --git a/tools/perf/pmu-events/arch/s390/cf_z10/basic.json b/tools/perf/pmu-events/arch/s390/cf_z10/basic.json index 8bf16759ca53..2dd8dafff2ef 100644 --- a/tools/perf/pmu-events/arch/s390/cf_z10/basic.json +++ b/tools/perf/pmu-events/arch/s390/cf_z10/basic.json @@ -1,71 +1,83 @@ [ { + "Unit": "CPU-M-CF", "EventCode": "0", "EventName": "CPU_CYCLES", "BriefDescription": "CPU Cycles", "PublicDescription": "Cycle Count" }, { + "Unit": "CPU-M-CF", "EventCode": "1", "EventName": "INSTRUCTIONS", "BriefDescription": "Instructions", "PublicDescription": "Instruction Count" }, { + "Unit": "CPU-M-CF", "EventCode": "2", "EventName": "L1I_DIR_WRITES", "BriefDescription": "L1I Directory Writes", "PublicDescription": "Level-1 I-Cache Directory Write Count" }, { + "Unit": "CPU-M-CF", "EventCode": "3", "EventName": "L1I_PENALTY_CYCLES", "BriefDescription": "L1I Penalty Cycles", "PublicDescription": "Level-1 I-Cache Penalty Cycle Count" }, { + "Unit": "CPU-M-CF", "EventCode": "4", "EventName": "L1D_DIR_WRITES", "BriefDescription": "L1D Directory Writes", "PublicDescription": "Level-1 D-Cache Directory Write Count" }, { + "Unit": "CPU-M-CF", "EventCode": "5", "EventName": "L1D_PENALTY_CYCLES", "BriefDescription": "L1D Penalty Cycles", "PublicDescription": "Level-1 D-Cache Penalty Cycle Count" }, { + "Unit": "CPU-M-CF", "EventCode": "32", "EventName": "PROBLEM_STATE_CPU_CYCLES", "BriefDescription": "Problem-State CPU Cycles", "PublicDescription": "Problem-State Cycle Count" }, { + "Unit": "CPU-M-CF", "EventCode": "33", "EventName": "PROBLEM_STATE_INSTRUCTIONS", "BriefDescription": "Problem-State Instructions", "PublicDescription": "Problem-State Instruction Count" }, { + "Unit": "CPU-M-CF", "EventCode": "34", "EventName": "PROBLEM_STATE_L1I_DIR_WRITES", "BriefDescription": "Problem-State L1I Directory Writes", "PublicDescription": "Problem-State Level-1 I-Cache Directory Write Count" }, { + "Unit": "CPU-M-CF", "EventCode": "35", "EventName": "PROBLEM_STATE_L1I_PENALTY_CYCLES", "BriefDescription": "Problem-State L1I Penalty Cycles", "PublicDescription": "Problem-State Level-1 I-Cache Penalty Cycle Count" }, { + "Unit": "CPU-M-CF", "EventCode": "36", "EventName": "PROBLEM_STATE_L1D_DIR_WRITES", "BriefDescription": "Problem-State L1D Directory Writes", "PublicDescription": "Problem-State Level-1 D-Cache Directory Write Count" }, { + "Unit": "CPU-M-CF", "EventCode": "37", "EventName": "PROBLEM_STATE_L1D_PENALTY_CYCLES", "BriefDescription": "Problem-State L1D Penalty Cycles", diff --git a/tools/perf/pmu-events/arch/s390/cf_z10/crypto.json b/tools/perf/pmu-events/arch/s390/cf_z10/crypto.json index 7e5b72492141..db286f19e7b6 100644 --- a/tools/perf/pmu-events/arch/s390/cf_z10/crypto.json +++ b/tools/perf/pmu-events/arch/s390/cf_z10/crypto.json @@ -1,95 +1,111 @@ [ { + "Unit": "CPU-M-CF", "EventCode": "64", "EventName": "PRNG_FUNCTIONS", "BriefDescription": "PRNG Functions", "PublicDescription": "Total number of the PRNG functions issued by the CPU" }, { + "Unit": "CPU-M-CF", "EventCode": "65", "EventName": "PRNG_CYCLES", "BriefDescription": "PRNG Cycles", "PublicDescription": "Total number of CPU cycles when the DEA/AES coprocessor is busy performing PRNG functions issued by the CPU" }, { + "Unit": "CPU-M-CF", "EventCode": "66", "EventName": "PRNG_BLOCKED_FUNCTIONS", "BriefDescription": "PRNG Blocked Functions", "PublicDescription": "Total number of the PRNG functions that are issued by the CPU and are blocked because the DEA/AES coprocessor is busy performing a function issued by another CPU" }, { + "Unit": "CPU-M-CF", "EventCode": "67", "EventName": "PRNG_BLOCKED_CYCLES", "BriefDescription": "PRNG Blocked Cycles", "PublicDescription": "Total number of CPU cycles blocked for the PRNG functions issued by the CPU because the DEA/AES coprocessor is busy performing a function issued by another CPU" }, { + "Unit": "CPU-M-CF", "EventCode": "68", "EventName": "SHA_FUNCTIONS", "BriefDescription": "SHA Functions", "PublicDescription": "Total number of SHA functions issued by the CPU" }, { + "Unit": "CPU-M-CF", "EventCode": "69", "EventName": "SHA_CYCLES", "BriefDescription": "SHA Cycles", "PublicDescription": "Total number of CPU cycles when the SHA coprocessor is busy performing the SHA functions issued by the CPU" }, { + "Unit": "CPU-M-CF", "EventCode": "70", "EventName": "SHA_BLOCKED_FUNCTIONS", "BriefDescription": "SHA Blocked Functions", "PublicDescription": "Total number of the SHA functions that are issued by the CPU and are blocked because the SHA coprocessor is busy performing a function issued by another CPU" }, { + "Unit": "CPU-M-CF", "EventCode": "71", "EventName": "SHA_BLOCKED_CYCLES", "BriefDescription": "SHA Bloced Cycles", "PublicDescription": "Total number of CPU cycles blocked for the SHA functions issued by the CPU because the SHA coprocessor is busy performing a function issued by another CPU" }, { + "Unit": "CPU-M-CF", "EventCode": "72", "EventName": "DEA_FUNCTIONS", "BriefDescription": "DEA Functions", "PublicDescription": "Total number of the DEA functions issued by the CPU" }, { + "Unit": "CPU-M-CF", "EventCode": "73", "EventName": "DEA_CYCLES", "BriefDescription": "DEA Cycles", "PublicDescription": "Total number of CPU cycles when the DEA/AES coprocessor is busy performing the DEA functions issued by the CPU" }, { + "Unit": "CPU-M-CF", "EventCode": "74", "EventName": "DEA_BLOCKED_FUNCTIONS", "BriefDescription": "DEA Blocked Functions", "PublicDescription": "Total number of the DEA functions that are issued by the CPU and are blocked because the DEA/AES coprocessor is busy performing a function issued by another CPU" }, { + "Unit": "CPU-M-CF", "EventCode": "75", "EventName": "DEA_BLOCKED_CYCLES", "BriefDescription": "DEA Blocked Cycles", "PublicDescription": "Total number of CPU cycles blocked for the DEA functions issued by the CPU because the DEA/AES coprocessor is busy performing a function issued by another CPU" }, { + "Unit": "CPU-M-CF", "EventCode": "76", "EventName": "AES_FUNCTIONS", "BriefDescription": "AES Functions", "PublicDescription": "Total number of AES functions issued by the CPU" }, { + "Unit": "CPU-M-CF", "EventCode": "77", "EventName": "AES_CYCLES", "BriefDescription": "AES Cycles", "PublicDescription": "Total number of CPU cycles when the DEA/AES coprocessor is busy performing the AES functions issued by the CPU" }, { + "Unit": "CPU-M-CF", "EventCode": "78", "EventName": "AES_BLOCKED_FUNCTIONS", "BriefDescription": "AES Blocked Functions", "PublicDescription": "Total number of AES functions that are issued by the CPU and are blocked because the DEA/AES coprocessor is busy performing a function issued by another CPU" }, { + "Unit": "CPU-M-CF", "EventCode": "79", "EventName": "AES_BLOCKED_CYCLES", "BriefDescription": "AES Blocked Cycles", diff --git a/tools/perf/pmu-events/arch/s390/cf_z10/extended.json b/tools/perf/pmu-events/arch/s390/cf_z10/extended.json index 0feedb40f30f..b6b7f29ca831 100644 --- a/tools/perf/pmu-events/arch/s390/cf_z10/extended.json +++ b/tools/perf/pmu-events/arch/s390/cf_z10/extended.json @@ -1,107 +1,125 @@ [ { + "Unit": "CPU-M-CF", "EventCode": "128", "EventName": "L1I_L2_SOURCED_WRITES", "BriefDescription": "L1I L2 Sourced Writes", "PublicDescription": "A directory write to the Level-1 I-Cache directory where the returned cache line was sourced from the Level-2 (L1.5) cache" }, { + "Unit": "CPU-M-CF", "EventCode": "129", "EventName": "L1D_L2_SOURCED_WRITES", "BriefDescription": "L1D L2 Sourced Writes", "PublicDescription": "A directory write to the Level-1 D-Cache directory where the installed cache line was sourced from the Level-2 (L1.5) cache" }, { + "Unit": "CPU-M-CF", "EventCode": "130", "EventName": "L1I_L3_LOCAL_WRITES", "BriefDescription": "L1I L3 Local Writes", "PublicDescription": "A directory write to the Level-1 I-Cache directory where the installed cache line was sourced from the Level-3 cache that is on the same book as the Instruction cache (Local L2 cache)" }, { + "Unit": "CPU-M-CF", "EventCode": "131", "EventName": "L1D_L3_LOCAL_WRITES", "BriefDescription": "L1D L3 Local Writes", "PublicDescription": "A directory write to the Level-1 D-Cache directory where the installtion cache line was source from the Level-3 cache that is on the same book as the Data cache (Local L2 cache)" }, { + "Unit": "CPU-M-CF", "EventCode": "132", "EventName": "L1I_L3_REMOTE_WRITES", "BriefDescription": "L1I L3 Remote Writes", "PublicDescription": "A directory write to the Level-1 I-Cache directory where the installed cache line was sourced from a Level-3 cache that is not on the same book as the Instruction cache (Remote L2 cache)" }, { + "Unit": "CPU-M-CF", "EventCode": "133", "EventName": "L1D_L3_REMOTE_WRITES", "BriefDescription": "L1D L3 Remote Writes", "PublicDescription": "A directory write to the Level-1 D-Cache directory where the installed cache line was sourced from a Level-3 cache that is not on the same book as the Data cache (Remote L2 cache)" }, { + "Unit": "CPU-M-CF", "EventCode": "134", "EventName": "L1D_LMEM_SOURCED_WRITES", "BriefDescription": "L1D Local Memory Sourced Writes", "PublicDescription": "A directory write to the Level-1 D-Cache directory where the installed cache line was sourced from memory that is attached to the same book as the Data cache (Local Memory)" }, { + "Unit": "CPU-M-CF", "EventCode": "135", "EventName": "L1I_LMEM_SOURCED_WRITES", "BriefDescription": "L1I Local Memory Sourced Writes", "PublicDescription": "A directory write to the Level-1 I-Cache where the installed cache line was sourced from memory that is attached to the s ame book as the Instruction cache (Local Memory)" }, { + "Unit": "CPU-M-CF", "EventCode": "136", "EventName": "L1D_RO_EXCL_WRITES", "BriefDescription": "L1D Read-only Exclusive Writes", "PublicDescription": "A directory write to the Level-1 D-Cache where the line was originally in a Read-Only state in the cache but has been updated to be in the Exclusive state that allows stores to the cache line" }, { + "Unit": "CPU-M-CF", "EventCode": "137", "EventName": "L1I_CACHELINE_INVALIDATES", "BriefDescription": "L1I Cacheline Invalidates", "PublicDescription": "A cache line in the Level-1 I-Cache has been invalidated by a store on the same CPU as the Level-1 I-Cache" }, { + "Unit": "CPU-M-CF", "EventCode": "138", "EventName": "ITLB1_WRITES", "BriefDescription": "ITLB1 Writes", "PublicDescription": "A translation entry has been written into the Level-1 Instruction Translation Lookaside Buffer" }, { + "Unit": "CPU-M-CF", "EventCode": "139", "EventName": "DTLB1_WRITES", "BriefDescription": "DTLB1 Writes", "PublicDescription": "A translation entry has been written to the Level-1 Data Translation Lookaside Buffer" }, { + "Unit": "CPU-M-CF", "EventCode": "140", "EventName": "TLB2_PTE_WRITES", "BriefDescription": "TLB2 PTE Writes", "PublicDescription": "A translation entry has been written to the Level-2 TLB Page Table Entry arrays" }, { + "Unit": "CPU-M-CF", "EventCode": "141", "EventName": "TLB2_CRSTE_WRITES", "BriefDescription": "TLB2 CRSTE Writes", "PublicDescription": "A translation entry has been written to the Level-2 TLB Common Region Segment Table Entry arrays" }, { + "Unit": "CPU-M-CF", "EventCode": "142", "EventName": "TLB2_CRSTE_HPAGE_WRITES", "BriefDescription": "TLB2 CRSTE One-Megabyte Page Writes", "PublicDescription": "A translation entry has been written to the Level-2 TLB Common Region Segment Table Entry arrays for a one-megabyte large page translation" }, { + "Unit": "CPU-M-CF", "EventCode": "145", "EventName": "ITLB1_MISSES", "BriefDescription": "ITLB1 Misses", "PublicDescription": "Level-1 Instruction TLB miss in progress. Incremented by one for every cycle an ITLB1 miss is in progress" }, { + "Unit": "CPU-M-CF", "EventCode": "146", "EventName": "DTLB1_MISSES", "BriefDescription": "DTLB1 Misses", "PublicDescription": "Level-1 Data TLB miss in progress. Incremented by one for every cycle an DTLB1 miss is in progress" }, { + "Unit": "CPU-M-CF", "EventCode": "147", "EventName": "L2C_STORES_SENT", "BriefDescription": "L2C Stores Sent", diff --git a/tools/perf/pmu-events/arch/s390/cf_z13/basic.json b/tools/perf/pmu-events/arch/s390/cf_z13/basic.json index 8bf16759ca53..2dd8dafff2ef 100644 --- a/tools/perf/pmu-events/arch/s390/cf_z13/basic.json +++ b/tools/perf/pmu-events/arch/s390/cf_z13/basic.json @@ -1,71 +1,83 @@ [ { + "Unit": "CPU-M-CF", "EventCode": "0", "EventName": "CPU_CYCLES", "BriefDescription": "CPU Cycles", "PublicDescription": "Cycle Count" }, { + "Unit": "CPU-M-CF", "EventCode": "1", "EventName": "INSTRUCTIONS", "BriefDescription": "Instructions", "PublicDescription": "Instruction Count" }, { + "Unit": "CPU-M-CF", "EventCode": "2", "EventName": "L1I_DIR_WRITES", "BriefDescription": "L1I Directory Writes", "PublicDescription": "Level-1 I-Cache Directory Write Count" }, { + "Unit": "CPU-M-CF", "EventCode": "3", "EventName": "L1I_PENALTY_CYCLES", "BriefDescription": "L1I Penalty Cycles", "PublicDescription": "Level-1 I-Cache Penalty Cycle Count" }, { + "Unit": "CPU-M-CF", "EventCode": "4", "EventName": "L1D_DIR_WRITES", "BriefDescription": "L1D Directory Writes", "PublicDescription": "Level-1 D-Cache Directory Write Count" }, { + "Unit": "CPU-M-CF", "EventCode": "5", "EventName": "L1D_PENALTY_CYCLES", "BriefDescription": "L1D Penalty Cycles", "PublicDescription": "Level-1 D-Cache Penalty Cycle Count" }, { + "Unit": "CPU-M-CF", "EventCode": "32", "EventName": "PROBLEM_STATE_CPU_CYCLES", "BriefDescription": "Problem-State CPU Cycles", "PublicDescription": "Problem-State Cycle Count" }, { + "Unit": "CPU-M-CF", "EventCode": "33", "EventName": "PROBLEM_STATE_INSTRUCTIONS", "BriefDescription": "Problem-State Instructions", "PublicDescription": "Problem-State Instruction Count" }, { + "Unit": "CPU-M-CF", "EventCode": "34", "EventName": "PROBLEM_STATE_L1I_DIR_WRITES", "BriefDescription": "Problem-State L1I Directory Writes", "PublicDescription": "Problem-State Level-1 I-Cache Directory Write Count" }, { + "Unit": "CPU-M-CF", "EventCode": "35", "EventName": "PROBLEM_STATE_L1I_PENALTY_CYCLES", "BriefDescription": "Problem-State L1I Penalty Cycles", "PublicDescription": "Problem-State Level-1 I-Cache Penalty Cycle Count" }, { + "Unit": "CPU-M-CF", "EventCode": "36", "EventName": "PROBLEM_STATE_L1D_DIR_WRITES", "BriefDescription": "Problem-State L1D Directory Writes", "PublicDescription": "Problem-State Level-1 D-Cache Directory Write Count" }, { + "Unit": "CPU-M-CF", "EventCode": "37", "EventName": "PROBLEM_STATE_L1D_PENALTY_CYCLES", "BriefDescription": "Problem-State L1D Penalty Cycles", diff --git a/tools/perf/pmu-events/arch/s390/cf_z13/crypto.json b/tools/perf/pmu-events/arch/s390/cf_z13/crypto.json index 7e5b72492141..db286f19e7b6 100644 --- a/tools/perf/pmu-events/arch/s390/cf_z13/crypto.json +++ b/tools/perf/pmu-events/arch/s390/cf_z13/crypto.json @@ -1,95 +1,111 @@ [ { + "Unit": "CPU-M-CF", "EventCode": "64", "EventName": "PRNG_FUNCTIONS", "BriefDescription": "PRNG Functions", "PublicDescription": "Total number of the PRNG functions issued by the CPU" }, { + "Unit": "CPU-M-CF", "EventCode": "65", "EventName": "PRNG_CYCLES", "BriefDescription": "PRNG Cycles", "PublicDescription": "Total number of CPU cycles when the DEA/AES coprocessor is busy performing PRNG functions issued by the CPU" }, { + "Unit": "CPU-M-CF", "EventCode": "66", "EventName": "PRNG_BLOCKED_FUNCTIONS", "BriefDescription": "PRNG Blocked Functions", "PublicDescription": "Total number of the PRNG functions that are issued by the CPU and are blocked because the DEA/AES coprocessor is busy performing a function issued by another CPU" }, { + "Unit": "CPU-M-CF", "EventCode": "67", "EventName": "PRNG_BLOCKED_CYCLES", "BriefDescription": "PRNG Blocked Cycles", "PublicDescription": "Total number of CPU cycles blocked for the PRNG functions issued by the CPU because the DEA/AES coprocessor is busy performing a function issued by another CPU" }, { + "Unit": "CPU-M-CF", "EventCode": "68", "EventName": "SHA_FUNCTIONS", "BriefDescription": "SHA Functions", "PublicDescription": "Total number of SHA functions issued by the CPU" }, { + "Unit": "CPU-M-CF", "EventCode": "69", "EventName": "SHA_CYCLES", "BriefDescription": "SHA Cycles", "PublicDescription": "Total number of CPU cycles when the SHA coprocessor is busy performing the SHA functions issued by the CPU" }, { + "Unit": "CPU-M-CF", "EventCode": "70", "EventName": "SHA_BLOCKED_FUNCTIONS", "BriefDescription": "SHA Blocked Functions", "PublicDescription": "Total number of the SHA functions that are issued by the CPU and are blocked because the SHA coprocessor is busy performing a function issued by another CPU" }, { + "Unit": "CPU-M-CF", "EventCode": "71", "EventName": "SHA_BLOCKED_CYCLES", "BriefDescription": "SHA Bloced Cycles", "PublicDescription": "Total number of CPU cycles blocked for the SHA functions issued by the CPU because the SHA coprocessor is busy performing a function issued by another CPU" }, { + "Unit": "CPU-M-CF", "EventCode": "72", "EventName": "DEA_FUNCTIONS", "BriefDescription": "DEA Functions", "PublicDescription": "Total number of the DEA functions issued by the CPU" }, { + "Unit": "CPU-M-CF", "EventCode": "73", "EventName": "DEA_CYCLES", "BriefDescription": "DEA Cycles", "PublicDescription": "Total number of CPU cycles when the DEA/AES coprocessor is busy performing the DEA functions issued by the CPU" }, { + "Unit": "CPU-M-CF", "EventCode": "74", "EventName": "DEA_BLOCKED_FUNCTIONS", "BriefDescription": "DEA Blocked Functions", "PublicDescription": "Total number of the DEA functions that are issued by the CPU and are blocked because the DEA/AES coprocessor is busy performing a function issued by another CPU" }, { + "Unit": "CPU-M-CF", "EventCode": "75", "EventName": "DEA_BLOCKED_CYCLES", "BriefDescription": "DEA Blocked Cycles", "PublicDescription": "Total number of CPU cycles blocked for the DEA functions issued by the CPU because the DEA/AES coprocessor is busy performing a function issued by another CPU" }, { + "Unit": "CPU-M-CF", "EventCode": "76", "EventName": "AES_FUNCTIONS", "BriefDescription": "AES Functions", "PublicDescription": "Total number of AES functions issued by the CPU" }, { + "Unit": "CPU-M-CF", "EventCode": "77", "EventName": "AES_CYCLES", "BriefDescription": "AES Cycles", "PublicDescription": "Total number of CPU cycles when the DEA/AES coprocessor is busy performing the AES functions issued by the CPU" }, { + "Unit": "CPU-M-CF", "EventCode": "78", "EventName": "AES_BLOCKED_FUNCTIONS", "BriefDescription": "AES Blocked Functions", "PublicDescription": "Total number of AES functions that are issued by the CPU and are blocked because the DEA/AES coprocessor is busy performing a function issued by another CPU" }, { + "Unit": "CPU-M-CF", "EventCode": "79", "EventName": "AES_BLOCKED_CYCLES", "BriefDescription": "AES Blocked Cycles", diff --git a/tools/perf/pmu-events/arch/s390/cf_z13/extended.json b/tools/perf/pmu-events/arch/s390/cf_z13/extended.json index 9a002b6967f1..436ce33f1182 100644 --- a/tools/perf/pmu-events/arch/s390/cf_z13/extended.json +++ b/tools/perf/pmu-events/arch/s390/cf_z13/extended.json @@ -1,335 +1,391 @@ [ { + "Unit": "CPU-M-CF", "EventCode": "128", "EventName": "L1D_RO_EXCL_WRITES", "BriefDescription": "L1D Read-only Exclusive Writes", "PublicDescription": "A directory write to the Level-1 Data cache where the line was originally in a Read-Only state in the cache but has been updated to be in the Exclusive state that allows stores to the cache line." }, { + "Unit": "CPU-M-CF", "EventCode": "129", "EventName": "DTLB1_WRITES", "BriefDescription": "DTLB1 Writes", "PublicDescription": "A translation entry has been written to the Level-1 Data Translation Lookaside Buffer" }, { + "Unit": "CPU-M-CF", "EventCode": "130", "EventName": "DTLB1_MISSES", "BriefDescription": "DTLB1 Misses", "PublicDescription": "Level-1 Data TLB miss in progress. Incremented by one for every cycle a DTLB1 miss is in progress." }, { + "Unit": "CPU-M-CF", "EventCode": "131", "EventName": "DTLB1_HPAGE_WRITES", "BriefDescription": "DTLB1 One-Megabyte Page Writes", "PublicDescription": "A translation entry has been written to the Level-1 Data Translation Lookaside Buffer for a one-megabyte page" }, { + "Unit": "CPU-M-CF", "EventCode": "132", "EventName": "DTLB1_GPAGE_WRITES", "BriefDescription": "DTLB1 Two-Gigabyte Page Writes", "PublicDescription": "Counter:132 Name:DTLB1_GPAGE_WRITES A translation entry has been written to the Level-1 Data Translation Lookaside Buffer for a two-gigabyte page." }, { + "Unit": "CPU-M-CF", "EventCode": "133", "EventName": "L1D_L2D_SOURCED_WRITES", "BriefDescription": "L1D L2D Sourced Writes", "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from the Level-2 Data cache" }, { + "Unit": "CPU-M-CF", "EventCode": "134", "EventName": "ITLB1_WRITES", "BriefDescription": "ITLB1 Writes", "PublicDescription": "A translation entry has been written to the Level-1 Instruction Translation Lookaside Buffer" }, { + "Unit": "CPU-M-CF", "EventCode": "135", "EventName": "ITLB1_MISSES", "BriefDescription": "ITLB1 Misses", "PublicDescription": "Level-1 Instruction TLB miss in progress. Incremented by one for every cycle an ITLB1 miss is in progress" }, { + "Unit": "CPU-M-CF", "EventCode": "136", "EventName": "L1I_L2I_SOURCED_WRITES", "BriefDescription": "L1I L2I Sourced Writes", "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from the Level-2 Instruction cache" }, { + "Unit": "CPU-M-CF", "EventCode": "137", "EventName": "TLB2_PTE_WRITES", "BriefDescription": "TLB2 PTE Writes", "PublicDescription": "A translation entry has been written to the Level-2 TLB Page Table Entry arrays" }, { + "Unit": "CPU-M-CF", "EventCode": "138", "EventName": "TLB2_CRSTE_HPAGE_WRITES", "BriefDescription": "TLB2 CRSTE One-Megabyte Page Writes", "PublicDescription": "A translation entry has been written to the Level-2 TLB Combined Region Segment Table Entry arrays for a one-megabyte large page translation" }, { + "Unit": "CPU-M-CF", "EventCode": "139", "EventName": "TLB2_CRSTE_WRITES", "BriefDescription": "TLB2 CRSTE Writes", "PublicDescription": "A translation entry has been written to the Level-2 TLB Combined Region Segment Table Entry arrays" }, { + "Unit": "CPU-M-CF", "EventCode": "140", "EventName": "TX_C_TEND", "BriefDescription": "Completed TEND instructions in constrained TX mode", "PublicDescription": "A TEND instruction has completed in a constrained transactional-execution mode" }, { + "Unit": "CPU-M-CF", "EventCode": "141", "EventName": "TX_NC_TEND", "BriefDescription": "Completed TEND instructions in non-constrained TX mode", "PublicDescription": "A TEND instruction has completed in a non-constrained transactional-execution mode" }, { + "Unit": "CPU-M-CF", "EventCode": "143", "EventName": "L1C_TLB1_MISSES", "BriefDescription": "L1C TLB1 Misses", "PublicDescription": "Increments by one for any cycle where a Level-1 cache or Level-1 TLB miss is in progress." }, { + "Unit": "CPU-M-CF", "EventCode": "144", "EventName": "L1D_ONCHIP_L3_SOURCED_WRITES", "BriefDescription": "L1D On-Chip L3 Sourced Writes", "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On-Chip Level-3 cache without intervention" }, { + "Unit": "CPU-M-CF", "EventCode": "145", "EventName": "L1D_ONCHIP_L3_SOURCED_WRITES_IV", "BriefDescription": "L1D On-Chip L3 Sourced Writes with Intervention", "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On-Chip Level-3 cache with intervention" }, { + "Unit": "CPU-M-CF", "EventCode": "146", "EventName": "L1D_ONNODE_L4_SOURCED_WRITES", "BriefDescription": "L1D On-Node L4 Sourced Writes", "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On-Node Level-4 cache" }, { + "Unit": "CPU-M-CF", "EventCode": "147", "EventName": "L1D_ONNODE_L3_SOURCED_WRITES_IV", "BriefDescription": "L1D On-Node L3 Sourced Writes with Intervention", "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On-Node Level-3 cache with intervention" }, { + "Unit": "CPU-M-CF", "EventCode": "148", "EventName": "L1D_ONNODE_L3_SOURCED_WRITES", "BriefDescription": "L1D On-Node L3 Sourced Writes", "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On-Node Level-3 cache without intervention" }, { + "Unit": "CPU-M-CF", "EventCode": "149", "EventName": "L1D_ONDRAWER_L4_SOURCED_WRITES", "BriefDescription": "L1D On-Drawer L4 Sourced Writes", "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On-Drawer Level-4 cache" }, { + "Unit": "CPU-M-CF", "EventCode": "150", "EventName": "L1D_ONDRAWER_L3_SOURCED_WRITES_IV", "BriefDescription": "L1D On-Drawer L3 Sourced Writes with Intervention", "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On-Drawer Level-3 cache with intervention" }, { + "Unit": "CPU-M-CF", "EventCode": "151", "EventName": "L1D_ONDRAWER_L3_SOURCED_WRITES", "BriefDescription": "L1D On-Drawer L3 Sourced Writes", "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On-Drawer Level-3 cache without intervention" }, { + "Unit": "CPU-M-CF", "EventCode": "152", "EventName": "L1D_OFFDRAWER_SCOL_L4_SOURCED_WRITES", "BriefDescription": "L1D Off-Drawer Same-Column L4 Sourced Writes", "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an Off-Drawer Same-Column Level-4 cache" }, { + "Unit": "CPU-M-CF", "EventCode": "153", "EventName": "L1D_OFFDRAWER_SCOL_L3_SOURCED_WRITES_IV", "BriefDescription": "L1D Off-Drawer Same-Column L3 Sourced Writes with Intervention", "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an Off-Drawer Same-Column Level-3 cache with intervention" }, { + "Unit": "CPU-M-CF", "EventCode": "154", "EventName": "L1D_OFFDRAWER_SCOL_L3_SOURCED_WRITES", "BriefDescription": "L1D Off-Drawer Same-Column L3 Sourced Writes", "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an Off-Drawer Same-Column Level-3 cache without intervention" }, { + "Unit": "CPU-M-CF", "EventCode": "155", "EventName": "L1D_OFFDRAWER_FCOL_L4_SOURCED_WRITES", "BriefDescription": "L1D Off-Drawer Far-Column L3 Sourced Writes", "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an Off-Drawer Far-Column Level-4 cache" }, { + "Unit": "CPU-M-CF", "EventCode": "156", "EventName": "L1D_OFFDRAWER_FCOL_L3_SOURCED_WRITES_IV", "BriefDescription": "L1D Off-Drawer Far-Column L3 Sourced Writes with Intervention", "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an Off-Drawer Far-Column Level-3 cache with intervention" }, { + "Unit": "CPU-M-CF", "EventCode": "157", "EventName": "L1D_OFFDRAWER_FCOL_L3_SOURCED_WRITES", "BriefDescription": "L1D Off-Drawer Far-Column L3 Sourced Writes", "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an Off-Drawer Far-Column Level-3 cache without intervention" }, { + "Unit": "CPU-M-CF", "EventCode": "158", "EventName": "L1D_ONNODE_MEM_SOURCED_WRITES", "BriefDescription": "L1D On-Node Memory Sourced Writes", "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from On-Node memory" }, { + "Unit": "CPU-M-CF", "EventCode": "159", "EventName": "L1D_ONDRAWER_MEM_SOURCED_WRITES", "BriefDescription": "L1D On-Drawer Memory Sourced Writes", "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from On-Drawer memory" }, { + "Unit": "CPU-M-CF", "EventCode": "160", "EventName": "L1D_OFFDRAWER_MEM_SOURCED_WRITES", "BriefDescription": "L1D Off-Drawer Memory Sourced Writes", "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from On-Drawer memory" }, { + "Unit": "CPU-M-CF", "EventCode": "161", "EventName": "L1D_ONCHIP_MEM_SOURCED_WRITES", "BriefDescription": "L1D On-Chip Memory Sourced Writes", "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from On-Chip memory" }, { + "Unit": "CPU-M-CF", "EventCode": "162", "EventName": "L1I_ONCHIP_L3_SOURCED_WRITES", "BriefDescription": "L1I On-Chip L3 Sourced Writes", "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an On-Chip Level-3 cache without intervention" }, { + "Unit": "CPU-M-CF", "EventCode": "163", "EventName": "L1I_ONCHIP_L3_SOURCED_WRITES_IV", "BriefDescription": "L1I On-Chip L3 Sourced Writes with Intervention", "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an On Chip Level-3 cache with intervention" }, { + "Unit": "CPU-M-CF", "EventCode": "164", "EventName": "L1I_ONNODE_L4_SOURCED_WRITES", "BriefDescription": "L1I On-Chip L4 Sourced Writes", "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an On-Node Level-4 cache" }, { + "Unit": "CPU-M-CF", "EventCode": "165", "EventName": "L1I_ONNODE_L3_SOURCED_WRITES_IV", "BriefDescription": "L1I On-Node L3 Sourced Writes with Intervention", "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an On-Node Level-3 cache with intervention" }, { + "Unit": "CPU-M-CF", "EventCode": "166", "EventName": "L1I_ONNODE_L3_SOURCED_WRITES", "BriefDescription": "L1I On-Node L3 Sourced Writes", "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an On-Node Level-3 cache without intervention" }, { + "Unit": "CPU-M-CF", "EventCode": "167", "EventName": "L1I_ONDRAWER_L4_SOURCED_WRITES", "BriefDescription": "L1I On-Drawer L4 Sourced Writes", "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an On-Drawer Level-4 cache" }, { + "Unit": "CPU-M-CF", "EventCode": "168", "EventName": "L1I_ONDRAWER_L3_SOURCED_WRITES_IV", "BriefDescription": "L1I On-Drawer L3 Sourced Writes with Intervention", "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an On-Drawer Level-3 cache with intervention" }, { + "Unit": "CPU-M-CF", "EventCode": "169", "EventName": "L1I_ONDRAWER_L3_SOURCED_WRITES", "BriefDescription": "L1I On-Drawer L3 Sourced Writes", "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an On-Drawer Level-3 cache without intervention" }, { + "Unit": "CPU-M-CF", "EventCode": "170", "EventName": "L1I_OFFDRAWER_SCOL_L4_SOURCED_WRITES", "BriefDescription": "L1I Off-Drawer Same-Column L4 Sourced Writes", "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an Off-Drawer Same-Column Level-4 cache" }, { + "Unit": "CPU-M-CF", "EventCode": "171", "EventName": "L1I_OFFDRAWER_SCOL_L3_SOURCED_WRITES_IV", "BriefDescription": "L1I Off-Drawer Same-Column L3 Sourced Writes with Intervention", "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an Off-Drawer Same-Column Level-3 cache with intervention" }, { + "Unit": "CPU-M-CF", "EventCode": "172", "EventName": "L1I_OFFDRAWER_SCOL_L3_SOURCED_WRITES", "BriefDescription": "L1I Off-Drawer Same-Column L3 Sourced Writes", "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an Off-Drawer Same-Column Level-3 cache without intervention" }, { + "Unit": "CPU-M-CF", "EventCode": "173", "EventName": "L1I_OFFDRAWER_FCOL_L4_SOURCED_WRITES", "BriefDescription": "L1I Off-Drawer Far-Column L4 Sourced Writes", "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an Off-Drawer Far-Column Level-4 cache" }, { + "Unit": "CPU-M-CF", "EventCode": "174", "EventName": "L1I_OFFDRAWER_FCOL_L3_SOURCED_WRITES_IV", "BriefDescription": "L1I Off-Drawer Far-Column L3 Sourced Writes with Intervention", "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an Off-Drawer Far-Column Level-3 cache with intervention" }, { + "Unit": "CPU-M-CF", "EventCode": "175", "EventName": "L1I_OFFDRAWER_FCOL_L3_SOURCED_WRITES", "BriefDescription": "L1I Off-Drawer Far-Column L3 Sourced Writes", "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an Off-Drawer Far-Column Level-3 cache without intervention" }, { + "Unit": "CPU-M-CF", "EventCode": "176", "EventName": "L1I_ONNODE_MEM_SOURCED_WRITES", "BriefDescription": "L1I On-Node Memory Sourced Writes", "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from On-Node memory" }, { + "Unit": "CPU-M-CF", "EventCode": "177", "EventName": "L1I_ONDRAWER_MEM_SOURCED_WRITES", "BriefDescription": "L1I On-Drawer Memory Sourced Writes", "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from On-Drawer memory" }, { + "Unit": "CPU-M-CF", "EventCode": "178", "EventName": "L1I_OFFDRAWER_MEM_SOURCED_WRITES", "BriefDescription": "L1I Off-Drawer Memory Sourced Writes", "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from On-Drawer memory" }, { + "Unit": "CPU-M-CF", "EventCode": "179", "EventName": "L1I_ONCHIP_MEM_SOURCED_WRITES", "BriefDescription": "L1I On-Chip Memory Sourced Writes", "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from On-Chip memory" }, { + "Unit": "CPU-M-CF", "EventCode": "218", "EventName": "TX_NC_TABORT", "BriefDescription": "Aborted transactions in non-constrained TX mode", "PublicDescription": "A transaction abort has occurred in a non-constrained transactional-execution mode" }, { + "Unit": "CPU-M-CF", "EventCode": "219", "EventName": "TX_C_TABORT_NO_SPECIAL", "BriefDescription": "Aborted transactions in constrained TX mode not using special completion logic", "PublicDescription": "A transaction abort has occurred in a constrained transactional-execution mode and the CPU is not using any special logic to allow the transaction to complete" }, { + "Unit": "CPU-M-CF", "EventCode": "220", "EventName": "TX_C_TABORT_SPECIAL", "BriefDescription": "Aborted transactions in constrained TX mode using special completion logic", "PublicDescription": "A transaction abort has occurred in a constrained transactional-execution mode and the CPU is using special logic to allow the transaction to complete" }, { + "Unit": "CPU-M-CF", "EventCode": "448", "EventName": "MT_DIAG_CYCLES_ONE_THR_ACTIVE", "BriefDescription": "Cycle count with one thread active", "PublicDescription": "Cycle count with one thread active" }, { + "Unit": "CPU-M-CF", "EventCode": "449", "EventName": "MT_DIAG_CYCLES_TWO_THR_ACTIVE", "BriefDescription": "Cycle count with two threads active", diff --git a/tools/perf/pmu-events/arch/s390/cf_z14/basic.json b/tools/perf/pmu-events/arch/s390/cf_z14/basic.json index 8f653c9d899d..17fb5241928b 100644 --- a/tools/perf/pmu-events/arch/s390/cf_z14/basic.json +++ b/tools/perf/pmu-events/arch/s390/cf_z14/basic.json @@ -1,47 +1,55 @@ [ { + "Unit": "CPU-M-CF", "EventCode": "0", "EventName": "CPU_CYCLES", "BriefDescription": "CPU Cycles", "PublicDescription": "Cycle Count" }, { + "Unit": "CPU-M-CF", "EventCode": "1", "EventName": "INSTRUCTIONS", "BriefDescription": "Instructions", "PublicDescription": "Instruction Count" }, { + "Unit": "CPU-M-CF", "EventCode": "2", "EventName": "L1I_DIR_WRITES", "BriefDescription": "L1I Directory Writes", "PublicDescription": "Level-1 I-Cache Directory Write Count" }, { + "Unit": "CPU-M-CF", "EventCode": "3", "EventName": "L1I_PENALTY_CYCLES", "BriefDescription": "L1I Penalty Cycles", "PublicDescription": "Level-1 I-Cache Penalty Cycle Count" }, { + "Unit": "CPU-M-CF", "EventCode": "4", "EventName": "L1D_DIR_WRITES", "BriefDescription": "L1D Directory Writes", "PublicDescription": "Level-1 D-Cache Directory Write Count" }, { + "Unit": "CPU-M-CF", "EventCode": "5", "EventName": "L1D_PENALTY_CYCLES", "BriefDescription": "L1D Penalty Cycles", "PublicDescription": "Level-1 D-Cache Penalty Cycle Count" }, { + "Unit": "CPU-M-CF", "EventCode": "32", "EventName": "PROBLEM_STATE_CPU_CYCLES", "BriefDescription": "Problem-State CPU Cycles", "PublicDescription": "Problem-State Cycle Count" }, { + "Unit": "CPU-M-CF", "EventCode": "33", "EventName": "PROBLEM_STATE_INSTRUCTIONS", "BriefDescription": "Problem-State Instructions", diff --git a/tools/perf/pmu-events/arch/s390/cf_z14/crypto.json b/tools/perf/pmu-events/arch/s390/cf_z14/crypto.json index 7e5b72492141..db286f19e7b6 100644 --- a/tools/perf/pmu-events/arch/s390/cf_z14/crypto.json +++ b/tools/perf/pmu-events/arch/s390/cf_z14/crypto.json @@ -1,95 +1,111 @@ [ { + "Unit": "CPU-M-CF", "EventCode": "64", "EventName": "PRNG_FUNCTIONS", "BriefDescription": "PRNG Functions", "PublicDescription": "Total number of the PRNG functions issued by the CPU" }, { + "Unit": "CPU-M-CF", "EventCode": "65", "EventName": "PRNG_CYCLES", "BriefDescription": "PRNG Cycles", "PublicDescription": "Total number of CPU cycles when the DEA/AES coprocessor is busy performing PRNG functions issued by the CPU" }, { + "Unit": "CPU-M-CF", "EventCode": "66", "EventName": "PRNG_BLOCKED_FUNCTIONS", "BriefDescription": "PRNG Blocked Functions", "PublicDescription": "Total number of the PRNG functions that are issued by the CPU and are blocked because the DEA/AES coprocessor is busy performing a function issued by another CPU" }, { + "Unit": "CPU-M-CF", "EventCode": "67", "EventName": "PRNG_BLOCKED_CYCLES", "BriefDescription": "PRNG Blocked Cycles", "PublicDescription": "Total number of CPU cycles blocked for the PRNG functions issued by the CPU because the DEA/AES coprocessor is busy performing a function issued by another CPU" }, { + "Unit": "CPU-M-CF", "EventCode": "68", "EventName": "SHA_FUNCTIONS", "BriefDescription": "SHA Functions", "PublicDescription": "Total number of SHA functions issued by the CPU" }, { + "Unit": "CPU-M-CF", "EventCode": "69", "EventName": "SHA_CYCLES", "BriefDescription": "SHA Cycles", "PublicDescription": "Total number of CPU cycles when the SHA coprocessor is busy performing the SHA functions issued by the CPU" }, { + "Unit": "CPU-M-CF", "EventCode": "70", "EventName": "SHA_BLOCKED_FUNCTIONS", "BriefDescription": "SHA Blocked Functions", "PublicDescription": "Total number of the SHA functions that are issued by the CPU and are blocked because the SHA coprocessor is busy performing a function issued by another CPU" }, { + "Unit": "CPU-M-CF", "EventCode": "71", "EventName": "SHA_BLOCKED_CYCLES", "BriefDescription": "SHA Bloced Cycles", "PublicDescription": "Total number of CPU cycles blocked for the SHA functions issued by the CPU because the SHA coprocessor is busy performing a function issued by another CPU" }, { + "Unit": "CPU-M-CF", "EventCode": "72", "EventName": "DEA_FUNCTIONS", "BriefDescription": "DEA Functions", "PublicDescription": "Total number of the DEA functions issued by the CPU" }, { + "Unit": "CPU-M-CF", "EventCode": "73", "EventName": "DEA_CYCLES", "BriefDescription": "DEA Cycles", "PublicDescription": "Total number of CPU cycles when the DEA/AES coprocessor is busy performing the DEA functions issued by the CPU" }, { + "Unit": "CPU-M-CF", "EventCode": "74", "EventName": "DEA_BLOCKED_FUNCTIONS", "BriefDescription": "DEA Blocked Functions", "PublicDescription": "Total number of the DEA functions that are issued by the CPU and are blocked because the DEA/AES coprocessor is busy performing a function issued by another CPU" }, { + "Unit": "CPU-M-CF", "EventCode": "75", "EventName": "DEA_BLOCKED_CYCLES", "BriefDescription": "DEA Blocked Cycles", "PublicDescription": "Total number of CPU cycles blocked for the DEA functions issued by the CPU because the DEA/AES coprocessor is busy performing a function issued by another CPU" }, { + "Unit": "CPU-M-CF", "EventCode": "76", "EventName": "AES_FUNCTIONS", "BriefDescription": "AES Functions", "PublicDescription": "Total number of AES functions issued by the CPU" }, { + "Unit": "CPU-M-CF", "EventCode": "77", "EventName": "AES_CYCLES", "BriefDescription": "AES Cycles", "PublicDescription": "Total number of CPU cycles when the DEA/AES coprocessor is busy performing the AES functions issued by the CPU" }, { + "Unit": "CPU-M-CF", "EventCode": "78", "EventName": "AES_BLOCKED_FUNCTIONS", "BriefDescription": "AES Blocked Functions", "PublicDescription": "Total number of AES functions that are issued by the CPU and are blocked because the DEA/AES coprocessor is busy performing a function issued by another CPU" }, { + "Unit": "CPU-M-CF", "EventCode": "79", "EventName": "AES_BLOCKED_CYCLES", "BriefDescription": "AES Blocked Cycles", diff --git a/tools/perf/pmu-events/arch/s390/cf_z14/extended.json b/tools/perf/pmu-events/arch/s390/cf_z14/extended.json index aa4dfb46b65b..e7a3524b748f 100644 --- a/tools/perf/pmu-events/arch/s390/cf_z14/extended.json +++ b/tools/perf/pmu-events/arch/s390/cf_z14/extended.json @@ -1,317 +1,370 @@ [ { + "Unit": "CPU-M-CF", "EventCode": "128", "EventName": "L1D_RO_EXCL_WRITES", "BriefDescription": "L1D Read-only Exclusive Writes", "PublicDescription": "Counter:128 Name:L1D_RO_EXCL_WRITES A directory write to the Level-1 Data cache where the line was originally in a Read-Only state in the cache but has been updated to be in the Exclusive state that allows stores to the cache line" }, { + "Unit": "CPU-M-CF", "EventCode": "129", "EventName": "DTLB2_WRITES", "BriefDescription": "DTLB2 Writes", "PublicDescription": "A translation has been written into The Translation Lookaside Buffer 2 (TLB2) and the request was made by the data cache" }, { + "Unit": "CPU-M-CF", "EventCode": "130", "EventName": "DTLB2_MISSES", "BriefDescription": "DTLB2 Misses", "PublicDescription": "A TLB2 miss is in progress for a request made by the data cache. Incremented by one for every TLB2 miss in progress for the Level-1 Data cache on this cycle" }, { + "Unit": "CPU-M-CF", "EventCode": "131", "EventName": "DTLB2_HPAGE_WRITES", "BriefDescription": "DTLB2 One-Megabyte Page Writes", "PublicDescription": "A translation entry was written into the Combined Region and Segment Table Entry array in the Level-2 TLB for a one-megabyte page or a Last Host Translation was done" }, { + "Unit": "CPU-M-CF", "EventCode": "132", "EventName": "DTLB2_GPAGE_WRITES", "BriefDescription": "DTLB2 Two-Gigabyte Page Writes", "PublicDescription": "A translation entry for a two-gigabyte page was written into the Level-2 TLB" }, { + "Unit": "CPU-M-CF", "EventCode": "133", "EventName": "L1D_L2D_SOURCED_WRITES", "BriefDescription": "L1D L2D Sourced Writes", "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from the Level-2 Data cache" }, { + "Unit": "CPU-M-CF", "EventCode": "134", "EventName": "ITLB2_WRITES", "BriefDescription": "ITLB2 Writes", "PublicDescription": "A translation entry has been written into the Translation Lookaside Buffer 2 (TLB2) and the request was made by the instruction cache" }, { + "Unit": "CPU-M-CF", "EventCode": "135", "EventName": "ITLB2_MISSES", "BriefDescription": "ITLB2 Misses", "PublicDescription": "A TLB2 miss is in progress for a request made by the instruction cache. Incremented by one for every TLB2 miss in progress for the Level-1 Instruction cache in a cycle" }, { + "Unit": "CPU-M-CF", "EventCode": "136", "EventName": "L1I_L2I_SOURCED_WRITES", "BriefDescription": "L1I L2I Sourced Writes", "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from the Level-2 Instruction cache" }, { + "Unit": "CPU-M-CF", "EventCode": "137", "EventName": "TLB2_PTE_WRITES", "BriefDescription": "TLB2 PTE Writes", "PublicDescription": "A translation entry was written into the Page Table Entry array in the Level-2 TLB" }, { + "Unit": "CPU-M-CF", "EventCode": "138", "EventName": "TLB2_CRSTE_WRITES", "BriefDescription": "TLB2 CRSTE Writes", "PublicDescription": "Translation entries were written into the Combined Region and Segment Table Entry array and the Page Table Entry array in the Level-2 TLB" }, { + "Unit": "CPU-M-CF", "EventCode": "139", "EventName": "TLB2_ENGINES_BUSY", "BriefDescription": "TLB2 Engines Busy", "PublicDescription": "The number of Level-2 TLB translation engines busy in a cycle" }, { + "Unit": "CPU-M-CF", "EventCode": "140", "EventName": "TX_C_TEND", "BriefDescription": "Completed TEND instructions in constrained TX mode", "PublicDescription": "A TEND instruction has completed in a constrained transactional-execution mode" }, { + "Unit": "CPU-M-CF", "EventCode": "141", "EventName": "TX_NC_TEND", "BriefDescription": "Completed TEND instructions in non-constrained TX mode", "PublicDescription": "A TEND instruction has completed in a non-constrained transactional-execution mode" }, { + "Unit": "CPU-M-CF", "EventCode": "143", "EventName": "L1C_TLB2_MISSES", "BriefDescription": "L1C TLB2 Misses", "PublicDescription": "Increments by one for any cycle where a level-1 cache or level-2 TLB miss is in progress" }, { + "Unit": "CPU-M-CF", "EventCode": "144", "EventName": "L1D_ONCHIP_L3_SOURCED_WRITES", "BriefDescription": "L1D On-Chip L3 Sourced Writes", "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On-Chip Level-3 cache without intervention" }, { + "Unit": "CPU-M-CF", "EventCode": "145", "EventName": "L1D_ONCHIP_MEMORY_SOURCED_WRITES", "BriefDescription": "L1D On-Chip Memory Sourced Writes", "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from On-Chip memory" }, { + "Unit": "CPU-M-CF", "EventCode": "146", "EventName": "L1D_ONCHIP_L3_SOURCED_WRITES_IV", "BriefDescription": "L1D On-Chip L3 Sourced Writes with Intervention", "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On-Chip Level-3 cache with intervention" }, { + "Unit": "CPU-M-CF", "EventCode": "147", "EventName": "L1D_ONCLUSTER_L3_SOURCED_WRITES", "BriefDescription": "L1D On-Cluster L3 Sourced Writes", "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from On-Cluster Level-3 cache withountervention" }, { + "Unit": "CPU-M-CF", "EventCode": "148", "EventName": "L1D_ONCLUSTER_MEMORY_SOURCED_WRITES", "BriefDescription": "L1D On-Cluster Memory Sourced Writes", "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On-Cluster memory" }, { + "Unit": "CPU-M-CF", "EventCode": "149", "EventName": "L1D_ONCLUSTER_L3_SOURCED_WRITES_IV", "BriefDescription": "L1D On-Cluster L3 Sourced Writes with Intervention", "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On-Cluster Level-3 cache with intervention" }, { + "Unit": "CPU-M-CF", "EventCode": "150", "EventName": "L1D_OFFCLUSTER_L3_SOURCED_WRITES", "BriefDescription": "L1D Off-Cluster L3 Sourced Writes", "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an Off-Cluster Level-3 cache without intervention" }, { + "Unit": "CPU-M-CF", "EventCode": "151", "EventName": "L1D_OFFCLUSTER_MEMORY_SOURCED_WRITES", "BriefDescription": "L1D Off-Cluster Memory Sourced Writes", "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from Off-Cluster memory" }, { + "Unit": "CPU-M-CF", "EventCode": "152", "EventName": "L1D_OFFCLUSTER_L3_SOURCED_WRITES_IV", "BriefDescription": "L1D Off-Cluster L3 Sourced Writes with Intervention", "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an Off-Cluster Level-3 cache with intervention" }, { + "Unit": "CPU-M-CF", "EventCode": "153", "EventName": "L1D_OFFDRAWER_L3_SOURCED_WRITES", "BriefDescription": "L1D Off-Drawer L3 Sourced Writes", "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an Off-Drawer Level-3 cache without intervention" }, { + "Unit": "CPU-M-CF", "EventCode": "154", "EventName": "L1D_OFFDRAWER_MEMORY_SOURCED_WRITES", "BriefDescription": "L1D Off-Drawer Memory Sourced Writes", "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from Off-Drawer memory" }, { + "Unit": "CPU-M-CF", "EventCode": "155", "EventName": "L1D_OFFDRAWER_L3_SOURCED_WRITES_IV", "BriefDescription": "L1D Off-Drawer L3 Sourced Writes with Intervention", "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an Off-Drawer Level-3 cache with intervention" }, { + "Unit": "CPU-M-CF", "EventCode": "156", "EventName": "L1D_ONDRAWER_L4_SOURCED_WRITES", "BriefDescription": "L1D On-Drawer L4 Sourced Writes", "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from On-Drawer Level-4 cache" }, { + "Unit": "CPU-M-CF", "EventCode": "157", "EventName": "L1D_OFFDRAWER_L4_SOURCED_WRITES", "BriefDescription": "L1D Off-Drawer L4 Sourced Writes", "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from Off-Drawer Level-4 cache" }, { + "Unit": "CPU-M-CF", "EventCode": "158", "EventName": "L1D_ONCHIP_L3_SOURCED_WRITES_RO", "BriefDescription": "L1D On-Chip L3 Sourced Writes read-only", "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from On-Chip L3 but a read-only invalidate was done to remove other copies of the cache line" }, { + "Unit": "CPU-M-CF", "EventCode": "162", "EventName": "L1I_ONCHIP_L3_SOURCED_WRITES", "BriefDescription": "L1I On-Chip L3 Sourced Writes", "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache ine was sourced from an On-Chip Level-3 cache without intervention" }, { + "Unit": "CPU-M-CF", "EventCode": "163", "EventName": "L1I_ONCHIP_MEMORY_SOURCED_WRITES", "BriefDescription": "L1I On-Chip Memory Sourced Writes", "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache ine was sourced from On-Chip memory" }, { + "Unit": "CPU-M-CF", "EventCode": "164", "EventName": "L1I_ONCHIP_L3_SOURCED_WRITES_IV", "BriefDescription": "L1I On-Chip L3 Sourced Writes with Intervention", "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache ine was sourced from an On-Chip Level-3 cache with intervention" }, { + "Unit": "CPU-M-CF", "EventCode": "165", "EventName": "L1I_ONCLUSTER_L3_SOURCED_WRITES", "BriefDescription": "L1I On-Cluster L3 Sourced Writes", "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an On-Cluster Level-3 cache without intervention" }, { + "Unit": "CPU-M-CF", "EventCode": "166", "EventName": "L1I_ONCLUSTER_MEMORY_SOURCED_WRITES", "BriefDescription": "L1I On-Cluster Memory Sourced Writes", "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an On-Cluster memory" }, { + "Unit": "CPU-M-CF", "EventCode": "167", "EventName": "L1I_ONCLUSTER_L3_SOURCED_WRITES_IV", "BriefDescription": "L1I On-Cluster L3 Sourced Writes with Intervention", "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from On-Cluster Level-3 cache with intervention" }, { + "Unit": "CPU-M-CF", "EventCode": "168", "EventName": "L1I_OFFCLUSTER_L3_SOURCED_WRITES", "BriefDescription": "L1I Off-Cluster L3 Sourced Writes", "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an Off-Cluster Level-3 cache without intervention" }, { + "Unit": "CPU-M-CF", "EventCode": "169", "EventName": "L1I_OFFCLUSTER_MEMORY_SOURCED_WRITES", "BriefDescription": "L1I Off-Cluster Memory Sourced Writes", "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from Off-Cluster memory" }, { + "Unit": "CPU-M-CF", "EventCode": "170", "EventName": "L1I_OFFCLUSTER_L3_SOURCED_WRITES_IV", "BriefDescription": "L1I Off-Cluster L3 Sourced Writes with Intervention", "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an Off-Cluster Level-3 cache with intervention" }, { + "Unit": "CPU-M-CF", "EventCode": "171", "EventName": "L1I_OFFDRAWER_L3_SOURCED_WRITES", "BriefDescription": "L1I Off-Drawer L3 Sourced Writes", "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an Off-Drawer Level-3 cache without intervention" }, { + "Unit": "CPU-M-CF", "EventCode": "172", "EventName": "L1I_OFFDRAWER_MEMORY_SOURCED_WRITES", "BriefDescription": "L1I Off-Drawer Memory Sourced Writes", "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from Off-Drawer memory" }, { + "Unit": "CPU-M-CF", "EventCode": "173", "EventName": "L1I_OFFDRAWER_L3_SOURCED_WRITES_IV", "BriefDescription": "L1I Off-Drawer L3 Sourced Writes with Intervention", "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an Off-Drawer Level-3 cache with intervention" }, { + "Unit": "CPU-M-CF", "EventCode": "174", "EventName": "L1I_ONDRAWER_L4_SOURCED_WRITES", "BriefDescription": "L1I On-Drawer L4 Sourced Writes", "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from On-Drawer Level-4 cache" }, { + "Unit": "CPU-M-CF", "EventCode": "175", "EventName": "L1I_OFFDRAWER_L4_SOURCED_WRITES", "BriefDescription": "L1I Off-Drawer L4 Sourced Writes", "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from Off-Drawer Level-4 cache" }, { + "Unit": "CPU-M-CF", "EventCode": "224", "EventName": "BCD_DFP_EXECUTION_SLOTS", "BriefDescription": "BCD DFP Execution Slots", "PublicDescription": "Count of floating point execution slots used for finished Binary Coded Decimal to Decimal Floating Point conversions. Instructions: CDZT, CXZT, CZDT, CZXT" }, { + "Unit": "CPU-M-CF", "EventCode": "225", "EventName": "VX_BCD_EXECUTION_SLOTS", "BriefDescription": "VX BCD Execution Slots", "PublicDescription": "Count of floating point execution slots used for finished vector arithmetic Binary Coded Decimal instructions. Instructions: VAP, VSP, VMPVMSP, VDP, VSDP, VRP, VLIP, VSRP, VPSOPVCP, VTP, VPKZ, VUPKZ, VCVB, VCVBG, VCVDVCVDG" }, { + "Unit": "CPU-M-CF", "EventCode": "226", "EventName": "DECIMAL_INSTRUCTIONS", "BriefDescription": "Decimal Instructions", "PublicDescription": "Decimal instructions dispatched. Instructions: CVB, CVD, AP, CP, DP, ED, EDMK, MP, SRP, SP, ZAP" }, { + "Unit": "CPU-M-CF", "EventCode": "232", "EventName": "LAST_HOST_TRANSLATIONS", "BriefDescription": "Last host translation done", "PublicDescription": "Last Host Translation done" }, { + "Unit": "CPU-M-CF", "EventCode": "243", "EventName": "TX_NC_TABORT", "BriefDescription": "Aborted transactions in non-constrained TX mode", "PublicDescription": "A transaction abort has occurred in a non-constrained transactional-execution mode" }, { + "Unit": "CPU-M-CF", "EventCode": "244", "EventName": "TX_C_TABORT_NO_SPECIAL", "BriefDescription": "Aborted transactions in constrained TX mode not using special completion logic", "PublicDescription": "A transaction abort has occurred in a constrained transactional-execution mode and the CPU is not using any special logic to allow the transaction to complete" }, { + "Unit": "CPU-M-CF", "EventCode": "245", "EventName": "TX_C_TABORT_SPECIAL", "BriefDescription": "Aborted transactions in constrained TX mode using special completion logic", "PublicDescription": "A transaction abort has occurred in a constrained transactional-execution mode and the CPU is using special logic to allow the transaction to complete" }, { + "Unit": "CPU-M-CF", "EventCode": "448", "EventName": "MT_DIAG_CYCLES_ONE_THR_ACTIVE", "BriefDescription": "Cycle count with one thread active", "PublicDescription": "Cycle count with one thread active" }, { + "Unit": "CPU-M-CF", "EventCode": "449", "EventName": "MT_DIAG_CYCLES_TWO_THR_ACTIVE", "BriefDescription": "Cycle count with two threads active", diff --git a/tools/perf/pmu-events/arch/s390/cf_z196/basic.json b/tools/perf/pmu-events/arch/s390/cf_z196/basic.json index 8bf16759ca53..2dd8dafff2ef 100644 --- a/tools/perf/pmu-events/arch/s390/cf_z196/basic.json +++ b/tools/perf/pmu-events/arch/s390/cf_z196/basic.json @@ -1,71 +1,83 @@ [ { + "Unit": "CPU-M-CF", "EventCode": "0", "EventName": "CPU_CYCLES", "BriefDescription": "CPU Cycles", "PublicDescription": "Cycle Count" }, { + "Unit": "CPU-M-CF", "EventCode": "1", "EventName": "INSTRUCTIONS", "BriefDescription": "Instructions", "PublicDescription": "Instruction Count" }, { + "Unit": "CPU-M-CF", "EventCode": "2", "EventName": "L1I_DIR_WRITES", "BriefDescription": "L1I Directory Writes", "PublicDescription": "Level-1 I-Cache Directory Write Count" }, { + "Unit": "CPU-M-CF", "EventCode": "3", "EventName": "L1I_PENALTY_CYCLES", "BriefDescription": "L1I Penalty Cycles", "PublicDescription": "Level-1 I-Cache Penalty Cycle Count" }, { + "Unit": "CPU-M-CF", "EventCode": "4", "EventName": "L1D_DIR_WRITES", "BriefDescription": "L1D Directory Writes", "PublicDescription": "Level-1 D-Cache Directory Write Count" }, { + "Unit": "CPU-M-CF", "EventCode": "5", "EventName": "L1D_PENALTY_CYCLES", "BriefDescription": "L1D Penalty Cycles", "PublicDescription": "Level-1 D-Cache Penalty Cycle Count" }, { + "Unit": "CPU-M-CF", "EventCode": "32", "EventName": "PROBLEM_STATE_CPU_CYCLES", "BriefDescription": "Problem-State CPU Cycles", "PublicDescription": "Problem-State Cycle Count" }, { + "Unit": "CPU-M-CF", "EventCode": "33", "EventName": "PROBLEM_STATE_INSTRUCTIONS", "BriefDescription": "Problem-State Instructions", "PublicDescription": "Problem-State Instruction Count" }, { + "Unit": "CPU-M-CF", "EventCode": "34", "EventName": "PROBLEM_STATE_L1I_DIR_WRITES", "BriefDescription": "Problem-State L1I Directory Writes", "PublicDescription": "Problem-State Level-1 I-Cache Directory Write Count" }, { + "Unit": "CPU-M-CF", "EventCode": "35", "EventName": "PROBLEM_STATE_L1I_PENALTY_CYCLES", "BriefDescription": "Problem-State L1I Penalty Cycles", "PublicDescription": "Problem-State Level-1 I-Cache Penalty Cycle Count" }, { + "Unit": "CPU-M-CF", "EventCode": "36", "EventName": "PROBLEM_STATE_L1D_DIR_WRITES", "BriefDescription": "Problem-State L1D Directory Writes", "PublicDescription": "Problem-State Level-1 D-Cache Directory Write Count" }, { + "Unit": "CPU-M-CF", "EventCode": "37", "EventName": "PROBLEM_STATE_L1D_PENALTY_CYCLES", "BriefDescription": "Problem-State L1D Penalty Cycles", diff --git a/tools/perf/pmu-events/arch/s390/cf_z196/crypto.json b/tools/perf/pmu-events/arch/s390/cf_z196/crypto.json index 7e5b72492141..db286f19e7b6 100644 --- a/tools/perf/pmu-events/arch/s390/cf_z196/crypto.json +++ b/tools/perf/pmu-events/arch/s390/cf_z196/crypto.json @@ -1,95 +1,111 @@ [ { + "Unit": "CPU-M-CF", "EventCode": "64", "EventName": "PRNG_FUNCTIONS", "BriefDescription": "PRNG Functions", "PublicDescription": "Total number of the PRNG functions issued by the CPU" }, { + "Unit": "CPU-M-CF", "EventCode": "65", "EventName": "PRNG_CYCLES", "BriefDescription": "PRNG Cycles", "PublicDescription": "Total number of CPU cycles when the DEA/AES coprocessor is busy performing PRNG functions issued by the CPU" }, { + "Unit": "CPU-M-CF", "EventCode": "66", "EventName": "PRNG_BLOCKED_FUNCTIONS", "BriefDescription": "PRNG Blocked Functions", "PublicDescription": "Total number of the PRNG functions that are issued by the CPU and are blocked because the DEA/AES coprocessor is busy performing a function issued by another CPU" }, { + "Unit": "CPU-M-CF", "EventCode": "67", "EventName": "PRNG_BLOCKED_CYCLES", "BriefDescription": "PRNG Blocked Cycles", "PublicDescription": "Total number of CPU cycles blocked for the PRNG functions issued by the CPU because the DEA/AES coprocessor is busy performing a function issued by another CPU" }, { + "Unit": "CPU-M-CF", "EventCode": "68", "EventName": "SHA_FUNCTIONS", "BriefDescription": "SHA Functions", "PublicDescription": "Total number of SHA functions issued by the CPU" }, { + "Unit": "CPU-M-CF", "EventCode": "69", "EventName": "SHA_CYCLES", "BriefDescription": "SHA Cycles", "PublicDescription": "Total number of CPU cycles when the SHA coprocessor is busy performing the SHA functions issued by the CPU" }, { + "Unit": "CPU-M-CF", "EventCode": "70", "EventName": "SHA_BLOCKED_FUNCTIONS", "BriefDescription": "SHA Blocked Functions", "PublicDescription": "Total number of the SHA functions that are issued by the CPU and are blocked because the SHA coprocessor is busy performing a function issued by another CPU" }, { + "Unit": "CPU-M-CF", "EventCode": "71", "EventName": "SHA_BLOCKED_CYCLES", "BriefDescription": "SHA Bloced Cycles", "PublicDescription": "Total number of CPU cycles blocked for the SHA functions issued by the CPU because the SHA coprocessor is busy performing a function issued by another CPU" }, { + "Unit": "CPU-M-CF", "EventCode": "72", "EventName": "DEA_FUNCTIONS", "BriefDescription": "DEA Functions", "PublicDescription": "Total number of the DEA functions issued by the CPU" }, { + "Unit": "CPU-M-CF", "EventCode": "73", "EventName": "DEA_CYCLES", "BriefDescription": "DEA Cycles", "PublicDescription": "Total number of CPU cycles when the DEA/AES coprocessor is busy performing the DEA functions issued by the CPU" }, { + "Unit": "CPU-M-CF", "EventCode": "74", "EventName": "DEA_BLOCKED_FUNCTIONS", "BriefDescription": "DEA Blocked Functions", "PublicDescription": "Total number of the DEA functions that are issued by the CPU and are blocked because the DEA/AES coprocessor is busy performing a function issued by another CPU" }, { + "Unit": "CPU-M-CF", "EventCode": "75", "EventName": "DEA_BLOCKED_CYCLES", "BriefDescription": "DEA Blocked Cycles", "PublicDescription": "Total number of CPU cycles blocked for the DEA functions issued by the CPU because the DEA/AES coprocessor is busy performing a function issued by another CPU" }, { + "Unit": "CPU-M-CF", "EventCode": "76", "EventName": "AES_FUNCTIONS", "BriefDescription": "AES Functions", "PublicDescription": "Total number of AES functions issued by the CPU" }, { + "Unit": "CPU-M-CF", "EventCode": "77", "EventName": "AES_CYCLES", "BriefDescription": "AES Cycles", "PublicDescription": "Total number of CPU cycles when the DEA/AES coprocessor is busy performing the AES functions issued by the CPU" }, { + "Unit": "CPU-M-CF", "EventCode": "78", "EventName": "AES_BLOCKED_FUNCTIONS", "BriefDescription": "AES Blocked Functions", "PublicDescription": "Total number of AES functions that are issued by the CPU and are blocked because the DEA/AES coprocessor is busy performing a function issued by another CPU" }, { + "Unit": "CPU-M-CF", "EventCode": "79", "EventName": "AES_BLOCKED_CYCLES", "BriefDescription": "AES Blocked Cycles", diff --git a/tools/perf/pmu-events/arch/s390/cf_z196/extended.json b/tools/perf/pmu-events/arch/s390/cf_z196/extended.json index b6d7fec7c2e7..b7b42a870bb0 100644 --- a/tools/perf/pmu-events/arch/s390/cf_z196/extended.json +++ b/tools/perf/pmu-events/arch/s390/cf_z196/extended.json @@ -1,143 +1,167 @@ [ { + "Unit": "CPU-M-CF", "EventCode": "128", "EventName": "L1D_L2_SOURCED_WRITES", "BriefDescription": "L1D L2 Sourced Writes", "PublicDescription": "A directory write to the Level-1 D-Cache directory where the returned cache line was sourced from the Level-2 cache" }, { + "Unit": "CPU-M-CF", "EventCode": "129", "EventName": "L1I_L2_SOURCED_WRITES", "BriefDescription": "L1I L2 Sourced Writes", "PublicDescription": "A directory write to the Level-1 I-Cache directory where the returned cache line was sourced from the Level-2 cache" }, { + "Unit": "CPU-M-CF", "EventCode": "130", "EventName": "DTLB1_MISSES", "BriefDescription": "DTLB1 Misses", "PublicDescription": "Level-1 Data TLB miss in progress. Incremented by one for every cycle a DTLB1 miss is in progress." }, { + "Unit": "CPU-M-CF", "EventCode": "131", "EventName": "ITLB1_MISSES", "BriefDescription": "ITLB1 Misses", "PublicDescription": "Level-1 Instruction TLB miss in progress. Incremented by one for every cycle a ITLB1 miss is in progress." }, { + "Unit": "CPU-M-CF", "EventCode": "133", "EventName": "L2C_STORES_SENT", "BriefDescription": "L2C Stores Sent", "PublicDescription": "Incremented by one for every store sent to Level-2 cache" }, { + "Unit": "CPU-M-CF", "EventCode": "134", "EventName": "L1D_OFFBOOK_L3_SOURCED_WRITES", "BriefDescription": "L1D Off-Book L3 Sourced Writes", "PublicDescription": "A directory write to the Level-1 D-Cache directory where the returned cache line was sourced from an Off Book Level-3 cache" }, { + "Unit": "CPU-M-CF", "EventCode": "135", "EventName": "L1D_ONBOOK_L4_SOURCED_WRITES", "BriefDescription": "L1D On-Book L4 Sourced Writes", "PublicDescription": "A directory write to the Level-1 D-Cache directory where the returned cache line was sourced from an On Book Level-4 cache" }, { + "Unit": "CPU-M-CF", "EventCode": "136", "EventName": "L1I_ONBOOK_L4_SOURCED_WRITES", "BriefDescription": "L1I On-Book L4 Sourced Writes", "PublicDescription": "A directory write to the Level-1 I-Cache directory where the returned cache line was sourced from an On Book Level-4 cache" }, { + "Unit": "CPU-M-CF", "EventCode": "137", "EventName": "L1D_RO_EXCL_WRITES", "BriefDescription": "L1D Read-only Exclusive Writes", "PublicDescription": "A directory write to the Level-1 D-Cache where the line was originally in a Read-Only state in the cache but has been updated to be in the Exclusive state that allows stores to the cache line" }, { + "Unit": "CPU-M-CF", "EventCode": "138", "EventName": "L1D_OFFBOOK_L4_SOURCED_WRITES", "BriefDescription": "L1D Off-Book L4 Sourced Writes", "PublicDescription": "A directory write to the Level-1 D-Cache directory where the returned cache line was sourced from an Off Book Level-4 cache" }, { + "Unit": "CPU-M-CF", "EventCode": "139", "EventName": "L1I_OFFBOOK_L4_SOURCED_WRITES", "BriefDescription": "L1I Off-Book L4 Sourced Writes", "PublicDescription": "A directory write to the Level-1 I-Cache directory where the returned cache line was sourced from an Off Book Level-4 cache" }, { + "Unit": "CPU-M-CF", "EventCode": "140", "EventName": "DTLB1_HPAGE_WRITES", "BriefDescription": "DTLB1 One-Megabyte Page Writes", "PublicDescription": "A translation entry has been written to the Level-1 Data Translation Lookaside Buffer for a one-megabyte page" }, { + "Unit": "CPU-M-CF", "EventCode": "141", "EventName": "L1D_LMEM_SOURCED_WRITES", "BriefDescription": "L1D Local Memory Sourced Writes", "PublicDescription": "A directory write to the Level-1 D-Cache where the installed cache line was sourced from memory that is attached to the same book as the Data cache (Local Memory)" }, { + "Unit": "CPU-M-CF", "EventCode": "142", "EventName": "L1I_LMEM_SOURCED_WRITES", "BriefDescription": "L1I Local Memory Sourced Writes", "PublicDescription": "A directory write to the Level-1 I-Cache where the installed cache line was sourced from memory that is attached to the same book as the Instruction cache (Local Memory)" }, { + "Unit": "CPU-M-CF", "EventCode": "143", "EventName": "L1I_OFFBOOK_L3_SOURCED_WRITES", "BriefDescription": "L1I Off-Book L3 Sourced Writes", "PublicDescription": "A directory write to the Level-1 I-Cache directory where the returned cache line was sourced from an Off Book Level-3 cache" }, { + "Unit": "CPU-M-CF", "EventCode": "144", "EventName": "DTLB1_WRITES", "BriefDescription": "DTLB1 Writes", "PublicDescription": "A translation entry has been written to the Level-1 Data Translation Lookaside Buffer" }, { + "Unit": "CPU-M-CF", "EventCode": "145", "EventName": "ITLB1_WRITES", "BriefDescription": "ITLB1 Writes", "PublicDescription": "A translation entry has been written to the Level-1 Instruction Translation Lookaside Buffer" }, { + "Unit": "CPU-M-CF", "EventCode": "146", "EventName": "TLB2_PTE_WRITES", "BriefDescription": "TLB2 PTE Writes", "PublicDescription": "A translation entry has been written to the Level-2 TLB Page Table Entry arrays" }, { + "Unit": "CPU-M-CF", "EventCode": "147", "EventName": "TLB2_CRSTE_HPAGE_WRITES", "BriefDescription": "TLB2 CRSTE One-Megabyte Page Writes", "PublicDescription": "A translation entry has been written to the Level-2 TLB Common Region Segment Table Entry arrays for a one-megabyte large page translation" }, { + "Unit": "CPU-M-CF", "EventCode": "148", "EventName": "TLB2_CRSTE_WRITES", "BriefDescription": "TLB2 CRSTE Writes", "PublicDescription": "A translation entry has been written to the Level-2 TLB Common Region Segment Table Entry arrays" }, { + "Unit": "CPU-M-CF", "EventCode": "150", "EventName": "L1D_ONCHIP_L3_SOURCED_WRITES", "BriefDescription": "L1D On-Chip L3 Sourced Writes", "PublicDescription": "A directory write to the Level-1 D-Cache directory where the returned cache line was sourced from an On Chip Level-3 cache" }, { + "Unit": "CPU-M-CF", "EventCode": "152", "EventName": "L1D_OFFCHIP_L3_SOURCED_WRITES", "BriefDescription": "L1D Off-Chip L3 Sourced Writes", "PublicDescription": "A directory write to the Level-1 D-Cache directory where the returned cache line was sourced from an Off Chip/On Book Level-3 cache" }, { + "Unit": "CPU-M-CF", "EventCode": "153", "EventName": "L1I_ONCHIP_L3_SOURCED_WRITES", "BriefDescription": "L1I On-Chip L3 Sourced Writes", "PublicDescription": "A directory write to the Level-1 I-Cache directory where the returned cache line was sourced from an On Chip Level-3 cache" }, { + "Unit": "CPU-M-CF", "EventCode": "155", "EventName": "L1I_OFFCHIP_L3_SOURCED_WRITES", "BriefDescription": "L1I Off-Chip L3 Sourced Writes", diff --git a/tools/perf/pmu-events/arch/s390/cf_zec12/basic.json b/tools/perf/pmu-events/arch/s390/cf_zec12/basic.json index 8bf16759ca53..2dd8dafff2ef 100644 --- a/tools/perf/pmu-events/arch/s390/cf_zec12/basic.json +++ b/tools/perf/pmu-events/arch/s390/cf_zec12/basic.json @@ -1,71 +1,83 @@ [ { + "Unit": "CPU-M-CF", "EventCode": "0", "EventName": "CPU_CYCLES", "BriefDescription": "CPU Cycles", "PublicDescription": "Cycle Count" }, { + "Unit": "CPU-M-CF", "EventCode": "1", "EventName": "INSTRUCTIONS", "BriefDescription": "Instructions", "PublicDescription": "Instruction Count" }, { + "Unit": "CPU-M-CF", "EventCode": "2", "EventName": "L1I_DIR_WRITES", "BriefDescription": "L1I Directory Writes", "PublicDescription": "Level-1 I-Cache Directory Write Count" }, { + "Unit": "CPU-M-CF", "EventCode": "3", "EventName": "L1I_PENALTY_CYCLES", "BriefDescription": "L1I Penalty Cycles", "PublicDescription": "Level-1 I-Cache Penalty Cycle Count" }, { + "Unit": "CPU-M-CF", "EventCode": "4", "EventName": "L1D_DIR_WRITES", "BriefDescription": "L1D Directory Writes", "PublicDescription": "Level-1 D-Cache Directory Write Count" }, { + "Unit": "CPU-M-CF", "EventCode": "5", "EventName": "L1D_PENALTY_CYCLES", "BriefDescription": "L1D Penalty Cycles", "PublicDescription": "Level-1 D-Cache Penalty Cycle Count" }, { + "Unit": "CPU-M-CF", "EventCode": "32", "EventName": "PROBLEM_STATE_CPU_CYCLES", "BriefDescription": "Problem-State CPU Cycles", "PublicDescription": "Problem-State Cycle Count" }, { + "Unit": "CPU-M-CF", "EventCode": "33", "EventName": "PROBLEM_STATE_INSTRUCTIONS", "BriefDescription": "Problem-State Instructions", "PublicDescription": "Problem-State Instruction Count" }, { + "Unit": "CPU-M-CF", "EventCode": "34", "EventName": "PROBLEM_STATE_L1I_DIR_WRITES", "BriefDescription": "Problem-State L1I Directory Writes", "PublicDescription": "Problem-State Level-1 I-Cache Directory Write Count" }, { + "Unit": "CPU-M-CF", "EventCode": "35", "EventName": "PROBLEM_STATE_L1I_PENALTY_CYCLES", "BriefDescription": "Problem-State L1I Penalty Cycles", "PublicDescription": "Problem-State Level-1 I-Cache Penalty Cycle Count" }, { + "Unit": "CPU-M-CF", "EventCode": "36", "EventName": "PROBLEM_STATE_L1D_DIR_WRITES", "BriefDescription": "Problem-State L1D Directory Writes", "PublicDescription": "Problem-State Level-1 D-Cache Directory Write Count" }, { + "Unit": "CPU-M-CF", "EventCode": "37", "EventName": "PROBLEM_STATE_L1D_PENALTY_CYCLES", "BriefDescription": "Problem-State L1D Penalty Cycles", diff --git a/tools/perf/pmu-events/arch/s390/cf_zec12/crypto.json b/tools/perf/pmu-events/arch/s390/cf_zec12/crypto.json index 7e5b72492141..db286f19e7b6 100644 --- a/tools/perf/pmu-events/arch/s390/cf_zec12/crypto.json +++ b/tools/perf/pmu-events/arch/s390/cf_zec12/crypto.json @@ -1,95 +1,111 @@ [ { + "Unit": "CPU-M-CF", "EventCode": "64", "EventName": "PRNG_FUNCTIONS", "BriefDescription": "PRNG Functions", "PublicDescription": "Total number of the PRNG functions issued by the CPU" }, { + "Unit": "CPU-M-CF", "EventCode": "65", "EventName": "PRNG_CYCLES", "BriefDescription": "PRNG Cycles", "PublicDescription": "Total number of CPU cycles when the DEA/AES coprocessor is busy performing PRNG functions issued by the CPU" }, { + "Unit": "CPU-M-CF", "EventCode": "66", "EventName": "PRNG_BLOCKED_FUNCTIONS", "BriefDescription": "PRNG Blocked Functions", "PublicDescription": "Total number of the PRNG functions that are issued by the CPU and are blocked because the DEA/AES coprocessor is busy performing a function issued by another CPU" }, { + "Unit": "CPU-M-CF", "EventCode": "67", "EventName": "PRNG_BLOCKED_CYCLES", "BriefDescription": "PRNG Blocked Cycles", "PublicDescription": "Total number of CPU cycles blocked for the PRNG functions issued by the CPU because the DEA/AES coprocessor is busy performing a function issued by another CPU" }, { + "Unit": "CPU-M-CF", "EventCode": "68", "EventName": "SHA_FUNCTIONS", "BriefDescription": "SHA Functions", "PublicDescription": "Total number of SHA functions issued by the CPU" }, { + "Unit": "CPU-M-CF", "EventCode": "69", "EventName": "SHA_CYCLES", "BriefDescription": "SHA Cycles", "PublicDescription": "Total number of CPU cycles when the SHA coprocessor is busy performing the SHA functions issued by the CPU" }, { + "Unit": "CPU-M-CF", "EventCode": "70", "EventName": "SHA_BLOCKED_FUNCTIONS", "BriefDescription": "SHA Blocked Functions", "PublicDescription": "Total number of the SHA functions that are issued by the CPU and are blocked because the SHA coprocessor is busy performing a function issued by another CPU" }, { + "Unit": "CPU-M-CF", "EventCode": "71", "EventName": "SHA_BLOCKED_CYCLES", "BriefDescription": "SHA Bloced Cycles", "PublicDescription": "Total number of CPU cycles blocked for the SHA functions issued by the CPU because the SHA coprocessor is busy performing a function issued by another CPU" }, { + "Unit": "CPU-M-CF", "EventCode": "72", "EventName": "DEA_FUNCTIONS", "BriefDescription": "DEA Functions", "PublicDescription": "Total number of the DEA functions issued by the CPU" }, { + "Unit": "CPU-M-CF", "EventCode": "73", "EventName": "DEA_CYCLES", "BriefDescription": "DEA Cycles", "PublicDescription": "Total number of CPU cycles when the DEA/AES coprocessor is busy performing the DEA functions issued by the CPU" }, { + "Unit": "CPU-M-CF", "EventCode": "74", "EventName": "DEA_BLOCKED_FUNCTIONS", "BriefDescription": "DEA Blocked Functions", "PublicDescription": "Total number of the DEA functions that are issued by the CPU and are blocked because the DEA/AES coprocessor is busy performing a function issued by another CPU" }, { + "Unit": "CPU-M-CF", "EventCode": "75", "EventName": "DEA_BLOCKED_CYCLES", "BriefDescription": "DEA Blocked Cycles", "PublicDescription": "Total number of CPU cycles blocked for the DEA functions issued by the CPU because the DEA/AES coprocessor is busy performing a function issued by another CPU" }, { + "Unit": "CPU-M-CF", "EventCode": "76", "EventName": "AES_FUNCTIONS", "BriefDescription": "AES Functions", "PublicDescription": "Total number of AES functions issued by the CPU" }, { + "Unit": "CPU-M-CF", "EventCode": "77", "EventName": "AES_CYCLES", "BriefDescription": "AES Cycles", "PublicDescription": "Total number of CPU cycles when the DEA/AES coprocessor is busy performing the AES functions issued by the CPU" }, { + "Unit": "CPU-M-CF", "EventCode": "78", "EventName": "AES_BLOCKED_FUNCTIONS", "BriefDescription": "AES Blocked Functions", "PublicDescription": "Total number of AES functions that are issued by the CPU and are blocked because the DEA/AES coprocessor is busy performing a function issued by another CPU" }, { + "Unit": "CPU-M-CF", "EventCode": "79", "EventName": "AES_BLOCKED_CYCLES", "BriefDescription": "AES Blocked Cycles", diff --git a/tools/perf/pmu-events/arch/s390/cf_zec12/extended.json b/tools/perf/pmu-events/arch/s390/cf_zec12/extended.json index 8682126aabb2..162251037219 100644 --- a/tools/perf/pmu-events/arch/s390/cf_zec12/extended.json +++ b/tools/perf/pmu-events/arch/s390/cf_zec12/extended.json @@ -1,209 +1,244 @@ [ { + "Unit": "CPU-M-CF", "EventCode": "128", "EventName": "DTLB1_MISSES", "BriefDescription": "DTLB1 Misses", "PublicDescription": "Level-1 Data TLB miss in progress. Incremented by one for every cycle a DTLB1 miss is in progress." }, { + "Unit": "CPU-M-CF", "EventCode": "129", "EventName": "ITLB1_MISSES", "BriefDescription": "ITLB1 Misses", "PublicDescription": "Level-1 Instruction TLB miss in progress. Incremented by one for every cycle a ITLB1 miss is in progress." }, { + "Unit": "CPU-M-CF", "EventCode": "130", "EventName": "L1D_L2I_SOURCED_WRITES", "BriefDescription": "L1D L2I Sourced Writes", "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from the Level-2 Instruction cache" }, { + "Unit": "CPU-M-CF", "EventCode": "131", "EventName": "L1I_L2I_SOURCED_WRITES", "BriefDescription": "L1I L2I Sourced Writes", "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from the Level-2 Instruction cache" }, { + "Unit": "CPU-M-CF", "EventCode": "132", "EventName": "L1D_L2D_SOURCED_WRITES", "BriefDescription": "L1D L2D Sourced Writes", "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from the Level-2 Data cache" }, { + "Unit": "CPU-M-CF", "EventCode": "133", "EventName": "DTLB1_WRITES", "BriefDescription": "DTLB1 Writes", "PublicDescription": "A translation entry has been written to the Level-1 Data Translation Lookaside Buffer" }, { + "Unit": "CPU-M-CF", "EventCode": "135", "EventName": "L1D_LMEM_SOURCED_WRITES", "BriefDescription": "L1D Local Memory Sourced Writes", "PublicDescription": "A directory write to the Level-1 Data cache where the installed cache line was sourced from memory that is attached to the same book as the Data cache (Local Memory)" }, { + "Unit": "CPU-M-CF", "EventCode": "137", "EventName": "L1I_LMEM_SOURCED_WRITES", "BriefDescription": "L1I Local Memory Sourced Writes", "PublicDescription": "A directory write to the Level-1 Instruction cache where the installed cache line was sourced from memory that is attached to the same book as the Instruction cache (Local Memory)" }, { + "Unit": "CPU-M-CF", "EventCode": "138", "EventName": "L1D_RO_EXCL_WRITES", "BriefDescription": "L1D Read-only Exclusive Writes", "PublicDescription": "A directory write to the Level-1 D-Cache where the line was originally in a Read-Only state in the cache but has been updated to be in the Exclusive state that allows stores to the cache line" }, { + "Unit": "CPU-M-CF", "EventCode": "139", "EventName": "DTLB1_HPAGE_WRITES", "BriefDescription": "DTLB1 One-Megabyte Page Writes", "PublicDescription": "A translation entry has been written to the Level-1 Data Translation Lookaside Buffer for a one-megabyte page" }, { + "Unit": "CPU-M-CF", "EventCode": "140", "EventName": "ITLB1_WRITES", "BriefDescription": "ITLB1 Writes", "PublicDescription": "A translation entry has been written to the Level-1 Instruction Translation Lookaside Buffer" }, { + "Unit": "CPU-M-CF", "EventCode": "141", "EventName": "TLB2_PTE_WRITES", "BriefDescription": "TLB2 PTE Writes", "PublicDescription": "A translation entry has been written to the Level-2 TLB Page Table Entry arrays" }, { + "Unit": "CPU-M-CF", "EventCode": "142", "EventName": "TLB2_CRSTE_HPAGE_WRITES", "BriefDescription": "TLB2 CRSTE One-Megabyte Page Writes", "PublicDescription": "A translation entry has been written to the Level-2 TLB Common Region Segment Table Entry arrays for a one-megabyte large page translation" }, { + "Unit": "CPU-M-CF", "EventCode": "143", "EventName": "TLB2_CRSTE_WRITES", "BriefDescription": "TLB2 CRSTE Writes", "PublicDescription": "A translation entry has been written to the Level-2 TLB Common Region Segment Table Entry arrays" }, { + "Unit": "CPU-M-CF", "EventCode": "144", "EventName": "L1D_ONCHIP_L3_SOURCED_WRITES", "BriefDescription": "L1D On-Chip L3 Sourced Writes", "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On Chip Level-3 cache without intervention" }, { + "Unit": "CPU-M-CF", "EventCode": "145", "EventName": "L1D_OFFCHIP_L3_SOURCED_WRITES", "BriefDescription": "L1D Off-Chip L3 Sourced Writes", "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an Off Chip/On Book Level-3 cache without intervention" }, { + "Unit": "CPU-M-CF", "EventCode": "146", "EventName": "L1D_OFFBOOK_L3_SOURCED_WRITES", "BriefDescription": "L1D Off-Book L3 Sourced Writes", "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an Off Book Level-3 cache without intervention" }, { + "Unit": "CPU-M-CF", "EventCode": "147", "EventName": "L1D_ONBOOK_L4_SOURCED_WRITES", "BriefDescription": "L1D On-Book L4 Sourced Writes", "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On Book Level-4 cache" }, { + "Unit": "CPU-M-CF", "EventCode": "148", "EventName": "L1D_OFFBOOK_L4_SOURCED_WRITES", "BriefDescription": "L1D Off-Book L4 Sourced Writes", "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an Off Book Level-4 cache" }, { + "Unit": "CPU-M-CF", "EventCode": "149", "EventName": "TX_NC_TEND", "BriefDescription": "Completed TEND instructions in non-constrained TX mode", "PublicDescription": "A TEND instruction has completed in a nonconstrained transactional-execution mode" }, { + "Unit": "CPU-M-CF", "EventCode": "150", "EventName": "L1D_ONCHIP_L3_SOURCED_WRITES_IV", "BriefDescription": "L1D On-Chip L3 Sourced Writes with Intervention", "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from a On Chip Level-3 cache with intervention" }, { + "Unit": "CPU-M-CF", "EventCode": "151", "EventName": "L1D_OFFCHIP_L3_SOURCED_WRITES_IV", "BriefDescription": "L1D Off-Chip L3 Sourced Writes with Intervention", "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an Off Chip/On Book Level-3 cache with intervention" }, { + "Unit": "CPU-M-CF", "EventCode": "152", "EventName": "L1D_OFFBOOK_L3_SOURCED_WRITES_IV", "BriefDescription": "L1D Off-Book L3 Sourced Writes with Intervention", "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an Off Book Level-3 cache with intervention" }, { + "Unit": "CPU-M-CF", "EventCode": "153", "EventName": "L1I_ONCHIP_L3_SOURCED_WRITES", "BriefDescription": "L1I On-Chip L3 Sourced Writes", "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an On Chip Level-3 cache without intervention" }, { + "Unit": "CPU-M-CF", "EventCode": "154", "EventName": "L1I_OFFCHIP_L3_SOURCED_WRITES", "BriefDescription": "L1I Off-Chip L3 Sourced Writes", "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an Off Chip/On Book Level-3 cache without intervention" }, { + "Unit": "CPU-M-CF", "EventCode": "155", "EventName": "L1I_OFFBOOK_L3_SOURCED_WRITES", "BriefDescription": "L1I Off-Book L3 Sourced Writes", "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an Off Book Level-3 cache without intervention" }, { + "Unit": "CPU-M-CF", "EventCode": "156", "EventName": "L1I_ONBOOK_L4_SOURCED_WRITES", "BriefDescription": "L1I On-Book L4 Sourced Writes", "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an On Book Level-4 cache" }, { + "Unit": "CPU-M-CF", "EventCode": "157", "EventName": "L1I_OFFBOOK_L4_SOURCED_WRITES", "BriefDescription": "L1I Off-Book L4 Sourced Writes", "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an Off Book Level-4 cache" }, { + "Unit": "CPU-M-CF", "EventCode": "158", "EventName": "TX_C_TEND", "BriefDescription": "Completed TEND instructions in constrained TX mode", "PublicDescription": "A TEND instruction has completed in a constrained transactional-execution mode" }, { + "Unit": "CPU-M-CF", "EventCode": "159", "EventName": "L1I_ONCHIP_L3_SOURCED_WRITES_IV", "BriefDescription": "L1I On-Chip L3 Sourced Writes with Intervention", "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an On Chip Level-3 cache with intervention" }, { + "Unit": "CPU-M-CF", "EventCode": "160", "EventName": "L1I_OFFCHIP_L3_SOURCED_WRITES_IV", "BriefDescription": "L1I Off-Chip L3 Sourced Writes with Intervention", "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an Off Chip/On Book Level-3 cache with intervention" }, { + "Unit": "CPU-M-CF", "EventCode": "161", "EventName": "L1I_OFFBOOK_L3_SOURCED_WRITES_IV", "BriefDescription": "L1I Off-Book L3 Sourced Writes with Intervention", "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an Off Book Level-3 cache with intervention" }, { + "Unit": "CPU-M-CF", "EventCode": "177", "EventName": "TX_NC_TABORT", "BriefDescription": "Aborted transactions in non-constrained TX mode", "PublicDescription": "A transaction abort has occurred in a nonconstrained transactional-execution mode" }, { + "Unit": "CPU-M-CF", "EventCode": "178", "EventName": "TX_C_TABORT_NO_SPECIAL", "BriefDescription": "Aborted transactions in constrained TX mode not using special completion logic", "PublicDescription": "A transaction abort has occurred in a constrained transactional-execution mode and the CPU is not using any special logic to allow the transaction to complete" }, { + "Unit": "CPU-M-CF", "EventCode": "179", "EventName": "TX_C_TABORT_SPECIAL", "BriefDescription": "Aborted transactions in constrained TX mode using special completion logic", diff --git a/tools/perf/pmu-events/jevents.c b/tools/perf/pmu-events/jevents.c index db3a594ee1e4..68c92bb599ee 100644 --- a/tools/perf/pmu-events/jevents.c +++ b/tools/perf/pmu-events/jevents.c @@ -233,6 +233,8 @@ static struct map { { "QPI LL", "uncore_qpi" }, { "SBO", "uncore_sbox" }, { "iMPH-U", "uncore_arb" }, + { "CPU-M-CF", "cpum_cf" }, + { "CPU-M-SF", "cpum_sf" }, {} }; -- cgit From 83eb383e942bba7aab465a334d2c365d3f47a0ae Mon Sep 17 00:00:00 2001 From: Thomas Richter Date: Thu, 21 Jun 2018 10:04:51 +0200 Subject: perf json: Add s390 transaction counter definition 'perf stat' displays transactional counters using flag -T on x86. On s390 use a JSON file defined metric named transaction to achieve the same result. Output before: none Output after: [root@s35lp76 perf]# ./perf stat -M transaction -- \ ~/mytesttx 1 >/tmp/111 Performance counter stats for '/root/mytesttx 1': 1 tx_c_tend # 13.0 transaction 1 tx_nc_tend 11 tx_nc_tabort 0 tx_c_tabort_special 0 tx_c_tabort_no_special 0.001061232 seconds time elapsed [root@s35lp76 perf]# Suggested-by: Andi Kleen Signed-off-by: Thomas Richter Reviewed-by: Hendrik Brueckner Cc: Heiko Carstens Cc: Martin Schwidefsky Link: http://lkml.kernel.org/r/20180621080452.61012-3-tmricht@linux.ibm.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/pmu-events/arch/s390/cf_z13/transaction.json | 7 +++++++ tools/perf/pmu-events/arch/s390/cf_z14/transaction.json | 7 +++++++ tools/perf/pmu-events/arch/s390/cf_zec12/transaction.json | 7 +++++++ 3 files changed, 21 insertions(+) create mode 100644 tools/perf/pmu-events/arch/s390/cf_z13/transaction.json create mode 100644 tools/perf/pmu-events/arch/s390/cf_z14/transaction.json create mode 100644 tools/perf/pmu-events/arch/s390/cf_zec12/transaction.json diff --git a/tools/perf/pmu-events/arch/s390/cf_z13/transaction.json b/tools/perf/pmu-events/arch/s390/cf_z13/transaction.json new file mode 100644 index 000000000000..1a0034f79f73 --- /dev/null +++ b/tools/perf/pmu-events/arch/s390/cf_z13/transaction.json @@ -0,0 +1,7 @@ +[ + { + "BriefDescription": "Transaction count", + "MetricName": "transaction", + "MetricExpr": "TX_C_TEND + TX_NC_TEND + TX_NC_TABORT + TX_C_TABORT_SPECIAL + TX_C_TABORT_NO_SPECIAL" + } +] diff --git a/tools/perf/pmu-events/arch/s390/cf_z14/transaction.json b/tools/perf/pmu-events/arch/s390/cf_z14/transaction.json new file mode 100644 index 000000000000..1a0034f79f73 --- /dev/null +++ b/tools/perf/pmu-events/arch/s390/cf_z14/transaction.json @@ -0,0 +1,7 @@ +[ + { + "BriefDescription": "Transaction count", + "MetricName": "transaction", + "MetricExpr": "TX_C_TEND + TX_NC_TEND + TX_NC_TABORT + TX_C_TABORT_SPECIAL + TX_C_TABORT_NO_SPECIAL" + } +] diff --git a/tools/perf/pmu-events/arch/s390/cf_zec12/transaction.json b/tools/perf/pmu-events/arch/s390/cf_zec12/transaction.json new file mode 100644 index 000000000000..1a0034f79f73 --- /dev/null +++ b/tools/perf/pmu-events/arch/s390/cf_zec12/transaction.json @@ -0,0 +1,7 @@ +[ + { + "BriefDescription": "Transaction count", + "MetricName": "transaction", + "MetricExpr": "TX_C_TEND + TX_NC_TEND + TX_NC_TABORT + TX_C_TABORT_SPECIAL + TX_C_TABORT_NO_SPECIAL" + } +] -- cgit From 742d92ff219f3aa7a67c184a57acfa8d88936cd6 Mon Sep 17 00:00:00 2001 From: Thomas Richter Date: Tue, 26 Jun 2018 09:17:01 +0200 Subject: perf stat: Add transaction flag (-T) support for s390 The 'perf stat' command line flag -T to display transaction counters is currently supported for x86 only. Add support for s390. It is based on the metrics flag -M transaction using the architecture dependent JSON files. This requires a metric named "transaction" in the JSON files for the platform. Introduce a new function metricgroup__has_metric() to check for the existence of a metric_name transaction. As suggested by Andi Kleen, this is the new approach to support transactions counters. Other architectures will follow. Output before: [root@p23lp27 perf]# ./perf stat -T -- sleep 1 Cannot set up transaction events [root@p23lp27 perf]# Output after: [root@s35lp76 perf]# ./perf stat -T -- ~/mytesttx 1 >/tmp/111 Performance counter stats for '/root/mytesttx 1': 1 tx_c_tend # 13.0 transaction 1 tx_nc_tend 11 tx_nc_tabort 0 tx_c_tabort_special 0 tx_c_tabort_no_special 0.001070109 seconds time elapsed [root@s35lp76 perf]# Suggested-by: Andi Kleen Signed-off-by: Thomas Richter Reviewed-by: Hendrik Brueckner Acked-by: Andi Kleen Acked-by: Jiri Olsa Cc: Heiko Carstens Cc: Martin Schwidefsky Link: http://lkml.kernel.org/r/20180626071701.58190-1-tmricht@linux.ibm.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/builtin-stat.c | 12 ++++++++++++ tools/perf/util/metricgroup.c | 22 ++++++++++++++++++++++ tools/perf/util/metricgroup.h | 1 + 3 files changed, 35 insertions(+) diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c index 05be023c3f0e..dfd13d6e2931 100644 --- a/tools/perf/builtin-stat.c +++ b/tools/perf/builtin-stat.c @@ -2449,6 +2449,18 @@ static int add_default_attributes(void) return 0; if (transaction_run) { + /* Handle -T as -M transaction. Once platform specific metrics + * support has been added to the json files, all archictures + * will use this approach. To determine transaction support + * on an architecture test for such a metric name. + */ + if (metricgroup__has_metric("transaction")) { + struct option opt = { .value = &evsel_list }; + + return metricgroup__parse_groups(&opt, "transaction", + &metric_events); + } + if (pmu_have_event("cpu", "cycles-ct") && pmu_have_event("cpu", "el-start")) err = parse_events(evsel_list, transaction_attrs, diff --git a/tools/perf/util/metricgroup.c b/tools/perf/util/metricgroup.c index 1ddc3d1d0147..96eab4ec34ff 100644 --- a/tools/perf/util/metricgroup.c +++ b/tools/perf/util/metricgroup.c @@ -490,3 +490,25 @@ out: metricgroup__free_egroups(&group_list); return ret; } + +bool metricgroup__has_metric(const char *metric) +{ + struct pmu_events_map *map = perf_pmu__find_map(NULL); + struct pmu_event *pe; + int i; + + if (!map) + return false; + + for (i = 0; ; i++) { + pe = &map->table[i]; + + if (!pe->name && !pe->metric_group && !pe->metric_name) + break; + if (!pe->metric_expr) + continue; + if (match_metric(pe->metric_name, metric)) + return true; + } + return false; +} diff --git a/tools/perf/util/metricgroup.h b/tools/perf/util/metricgroup.h index 06854e125ee7..8a155dba0581 100644 --- a/tools/perf/util/metricgroup.h +++ b/tools/perf/util/metricgroup.h @@ -28,4 +28,5 @@ int metricgroup__parse_groups(const struct option *opt, struct rblist *metric_events); void metricgroup__print(bool metrics, bool groups, char *filter, bool raw); +bool metricgroup__has_metric(const char *metric); #endif -- cgit From 8a95c8994509c55abf1e38c0cc037b1205725e21 Mon Sep 17 00:00:00 2001 From: Thomas Richter Date: Thu, 12 Jul 2018 09:09:36 +0200 Subject: perf kvm: Fix subcommands on s390 With commit eca0fa28cd0d ("perf record: Provide detailed information on s390 CPU") s390 platform provides detailed type/model/capacity information in the CPU identifier string instead of just "IBM/S390". This breaks 'perf kvm' support which uses hard coded string IBM/S390 to compare with the CPU identifier string. Fix this by changing the comparison. Reported-by: Stefan Raspl Signed-off-by: Thomas Richter Reviewed-by: Hendrik Brueckner Tested-by: Stefan Raspl Acked-by: Christian Borntraeger Cc: Heiko Carstens Cc: Martin Schwidefsky Cc: stable@vger.kernel.org Fixes: eca0fa28cd0d ("perf record: Provide detailed information on s390 CPU") Link: http://lkml.kernel.org/r/20180712070936.67547-1-tmricht@linux.ibm.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/arch/s390/util/kvm-stat.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/perf/arch/s390/util/kvm-stat.c b/tools/perf/arch/s390/util/kvm-stat.c index d233e2eb9592..aaabab5e2830 100644 --- a/tools/perf/arch/s390/util/kvm-stat.c +++ b/tools/perf/arch/s390/util/kvm-stat.c @@ -102,7 +102,7 @@ const char * const kvm_skip_events[] = { int cpu_isa_init(struct perf_kvm_stat *kvm, const char *cpuid) { - if (strstr(cpuid, "IBM/S390")) { + if (strstr(cpuid, "IBM")) { kvm->exit_reasons = sie_exit_reasons; kvm->exit_reasons_isa = "SIE"; } else -- cgit From 6feb3fec519ed112f7a77af4e51e51a35148d01c Mon Sep 17 00:00:00 2001 From: Sangwon Hong Date: Tue, 17 Jul 2018 20:07:38 +0900 Subject: perf list: Add missing documentation for --desc and --debug options Add missing documentation for --desc and --debug options to the 'perf list' man page. Signed-off-by: Sangwon Hong Cc: Andi Kleen Cc: Jiri Olsa Cc: Namhyung Kim Link: http://lkml.kernel.org/r/20180717110738.10779-1-qpakzk@gmail.com [ Clarify that --desc is by default active ] Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/Documentation/perf-list.txt | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/tools/perf/Documentation/perf-list.txt b/tools/perf/Documentation/perf-list.txt index 14e13512c05f..236b9b97dfdb 100644 --- a/tools/perf/Documentation/perf-list.txt +++ b/tools/perf/Documentation/perf-list.txt @@ -18,6 +18,10 @@ various perf commands with the -e option. OPTIONS ------- +-d:: +--desc:: +Print extra event descriptions. (default) + --no-desc:: Don't print descriptions. @@ -25,11 +29,13 @@ Don't print descriptions. --long-desc:: Print longer event descriptions. +--debug:: +Enable debugging output. + --details:: Print how named events are resolved internally into perf events, and also any extra expressions computed by perf stat. - [[EVENT_MODIFIERS]] EVENT MODIFIERS --------------- -- cgit From c715fcfda5a08edabaa15508742be926b7ee51db Mon Sep 17 00:00:00 2001 From: Sandipan Das Date: Tue, 10 Jul 2018 19:28:13 +0530 Subject: perf powerpc: Fix callchain ip filtering For powerpc64, redundant entries in the callchain are filtered out by determining the state of the return address and the stack frame using DWARF debug information. For making these filtering decisions we must analyze the debug information for the location corresponding to the program counter value, i.e. the first entry in the callchain, and not the LR value; otherwise, perf may filter out either the second or the third entry in the callchain incorrectly. This can be observed on a powerpc64le system running Fedora 27 as shown below. Case 1 - Attaching a probe at inet_pton+0x8 (binary offset 0x15af28). Return address is still in LR and a new stack frame is not yet allocated. The LR value, i.e. the second entry, should not be filtered out. # objdump -d /usr/lib64/libc-2.26.so | less ... 000000000010eb10 : ... 10fa48: 78 bb e4 7e mr r4,r23 10fa4c: 0a 00 60 38 li r3,10 10fa50: d9 b4 04 48 bl 15af28 10fa54: 00 00 00 60 nop 10fa58: ac f4 ff 4b b 10ef04 ... 0000000000110450 : ... 1105a8: 54 00 ff 38 addi r7,r31,84 1105ac: 58 00 df 38 addi r6,r31,88 1105b0: 69 e5 ff 4b bl 10eb18 1105b4: 78 1b 71 7c mr r17,r3 1105b8: 50 01 7f e8 ld r3,336(r31) ... 000000000015af20 : 15af20: 0b 00 4c 3c addis r2,r12,11 15af24: e0 c1 42 38 addi r2,r2,-15904 15af28: a6 02 08 7c mflr r0 15af2c: f0 ff c1 fb std r30,-16(r1) 15af30: f8 ff e1 fb std r31,-8(r1) ... # perf probe -x /usr/lib64/libc-2.26.so -a inet_pton+0x8 # perf record -e probe_libc:inet_pton -g ping -6 -c 1 ::1 # perf script Before: ping 4507 [002] 514985.546540: probe_libc:inet_pton: (7fffa7dbaf28) 7fffa7dbaf28 __GI___inet_pton+0x8 (/usr/lib64/libc-2.26.so) 7fffa7d705b4 getaddrinfo+0x164 (/usr/lib64/libc-2.26.so) 13fb52d70 _init+0xbfc (/usr/bin/ping) 7fffa7c836a0 generic_start_main.isra.0+0x140 (/usr/lib64/libc-2.26.so) 7fffa7c83898 __libc_start_main+0xb8 (/usr/lib64/libc-2.26.so) 0 [unknown] ([unknown]) After: ping 4507 [002] 514985.546540: probe_libc:inet_pton: (7fffa7dbaf28) 7fffa7dbaf28 __GI___inet_pton+0x8 (/usr/lib64/libc-2.26.so) 7fffa7d6fa54 gaih_inet.constprop.7+0xf44 (/usr/lib64/libc-2.26.so) 7fffa7d705b4 getaddrinfo+0x164 (/usr/lib64/libc-2.26.so) 13fb52d70 _init+0xbfc (/usr/bin/ping) 7fffa7c836a0 generic_start_main.isra.0+0x140 (/usr/lib64/libc-2.26.so) 7fffa7c83898 __libc_start_main+0xb8 (/usr/lib64/libc-2.26.so) 0 [unknown] ([unknown]) Case 2 - Attaching a probe at _int_malloc+0x180 (binary offset 0x9cf10). Return address in still in LR and a new stack frame has already been allocated but not used. The caller's caller, i.e. the third entry, is invalid and should be filtered out and not the second one. # objdump -d /usr/lib64/libc-2.26.so | less ... 000000000009cd90 <_int_malloc>: 9cd90: 17 00 4c 3c addis r2,r12,23 9cd94: 70 a3 42 38 addi r2,r2,-23696 9cd98: 26 00 80 7d mfcr r12 9cd9c: f8 ff e1 fb std r31,-8(r1) 9cda0: 17 00 e4 3b addi r31,r4,23 9cda4: d8 ff 61 fb std r27,-40(r1) 9cda8: 78 23 9b 7c mr r27,r4 9cdac: 1f 00 bf 2b cmpldi cr7,r31,31 9cdb0: f0 ff c1 fb std r30,-16(r1) 9cdb4: b0 ff c1 fa std r22,-80(r1) 9cdb8: 78 1b 7e 7c mr r30,r3 9cdbc: 08 00 81 91 stw r12,8(r1) 9cdc0: 11 ff 21 f8 stdu r1,-240(r1) 9cdc4: 4c 01 9d 41 bgt cr7,9cf10 <_int_malloc+0x180> 9cdc8: 20 00 a4 2b cmpldi cr7,r4,32 ... 9cf08: 00 00 00 60 nop 9cf0c: 00 00 42 60 ori r2,r2,0 9cf10: e4 06 ff 7b rldicr r31,r31,0,59 9cf14: 40 f8 a4 7f cmpld cr7,r4,r31 9cf18: 68 05 9d 41 bgt cr7,9d480 <_int_malloc+0x6f0> ... 000000000009e3c0 : ... 9e420: 40 02 80 38 li r4,576 9e424: 78 fb e3 7f mr r3,r31 9e428: 71 e9 ff 4b bl 9cd98 <_int_malloc+0x8> 9e42c: 00 00 a3 2f cmpdi cr7,r3,0 9e430: 78 1b 7e 7c mr r30,r3 ... 000000000009f7a0 <__libc_malloc>: ... 9f8f8: 00 00 89 2f cmpwi cr7,r9,0 9f8fc: 1c ff 9e 40 bne cr7,9f818 <__libc_malloc+0x78> 9f900: c9 ea ff 4b bl 9e3c8 9f904: 00 00 00 60 nop 9f908: e8 90 22 e9 ld r9,-28440(r2) ... # perf probe -x /usr/lib64/libc-2.26.so -a _int_malloc+0x180 # perf record -e probe_libc:_int_malloc -g ./test-malloc # perf script Before: test-malloc 6554 [009] 515975.797403: probe_libc:_int_malloc: (7fffa6e6cf10) 7fffa6e6cf10 _int_malloc+0x180 (/usr/lib64/libc-2.26.so) 7fffa6dd0000 [unknown] (/usr/lib64/libc-2.26.so) 7fffa6e6f904 malloc+0x164 (/usr/lib64/libc-2.26.so) 7fffa6e6f9fc malloc+0x25c (/usr/lib64/libc-2.26.so) 100006b4 main+0x38 (/home/testuser/test-malloc) 7fffa6df36a0 generic_start_main.isra.0+0x140 (/usr/lib64/libc-2.26.so) 7fffa6df3898 __libc_start_main+0xb8 (/usr/lib64/libc-2.26.so) 0 [unknown] ([unknown]) After: test-malloc 6554 [009] 515975.797403: probe_libc:_int_malloc: (7fffa6e6cf10) 7fffa6e6cf10 _int_malloc+0x180 (/usr/lib64/libc-2.26.so) 7fffa6e6e42c tcache_init.part.4+0x6c (/usr/lib64/libc-2.26.so) 7fffa6e6f904 malloc+0x164 (/usr/lib64/libc-2.26.so) 7fffa6e6f9fc malloc+0x25c (/usr/lib64/libc-2.26.so) 100006b4 main+0x38 (/home/sandipan/test-malloc) 7fffa6df36a0 generic_start_main.isra.0+0x140 (/usr/lib64/libc-2.26.so) 7fffa6df3898 __libc_start_main+0xb8 (/usr/lib64/libc-2.26.so) 0 [unknown] ([unknown]) Signed-off-by: Sandipan Das Cc: Jiri Olsa Cc: Maynard Johnson Cc: Naveen N. Rao Cc: Ravi Bangoria Cc: Sukadev Bhattiprolu Fixes: a60335ba3298 ("perf tools powerpc: Adjust callchain based on DWARF debug info") Link: http://lkml.kernel.org/r/24bb726d91ed173aebc972ec3f41a2ef2249434e.1530724939.git.sandipan@linux.ibm.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/arch/powerpc/util/skip-callchain-idx.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/perf/arch/powerpc/util/skip-callchain-idx.c b/tools/perf/arch/powerpc/util/skip-callchain-idx.c index ef5d59a5742e..264fc0158100 100644 --- a/tools/perf/arch/powerpc/util/skip-callchain-idx.c +++ b/tools/perf/arch/powerpc/util/skip-callchain-idx.c @@ -246,7 +246,7 @@ int arch_skip_callchain_idx(struct thread *thread, struct ip_callchain *chain) if (!chain || chain->nr < 3) return skip_slot; - ip = chain->ips[2]; + ip = chain->ips[1]; thread__find_symbol(thread, PERF_RECORD_MISC_USER, ip, &al); -- cgit From 9068533e4f470daf2b0f29c71d865990acd8826e Mon Sep 17 00:00:00 2001 From: Sandipan Das Date: Tue, 10 Jul 2018 19:28:14 +0530 Subject: perf powerpc: Fix callchain ip filtering when return address is in a register For powerpc64, perf will filter out the second entry in the callchain, i.e. the LR value, if the return address of the function corresponding to the probed location has already been saved on its caller's stack. The state of the return address is determined using debug information. At any point within a function, if the return address is already saved somewhere, a DWARF expression can tell us about its location. If the return address in still in LR only, no DWARF expression would exist. Typically, the instructions in a function's prologue first copy the LR value to R0 and then pushes R0 on to the stack. If LR has already been copied to R0 but R0 is yet to be pushed to the stack, we can still get a DWARF expression that says that the return address is in R0. This is indicating that getting a DWARF expression for the return address does not guarantee the fact that it has already been saved on the stack. This can be observed on a powerpc64le system running Fedora 27 as shown below. # objdump -d /usr/lib64/libc-2.26.so | less ... 000000000015af20 : 15af20: 0b 00 4c 3c addis r2,r12,11 15af24: e0 c1 42 38 addi r2,r2,-15904 15af28: a6 02 08 7c mflr r0 15af2c: f0 ff c1 fb std r30,-16(r1) 15af30: f8 ff e1 fb std r31,-8(r1) 15af34: 78 1b 7f 7c mr r31,r3 15af38: 78 23 83 7c mr r3,r4 15af3c: 78 2b be 7c mr r30,r5 15af40: 10 00 01 f8 std r0,16(r1) 15af44: c1 ff 21 f8 stdu r1,-64(r1) 15af48: 28 00 81 f8 std r4,40(r1) ... # readelf --debug-dump=frames-interp /usr/lib64/libc-2.26.so | less ... 00027024 0000000000000024 00027028 FDE cie=00000000 pc=000000000015af20..000000000015af88 LOC CFA r30 r31 ra 000000000015af20 r1+0 u u u 000000000015af34 r1+0 c-16 c-8 r0 000000000015af48 r1+64 c-16 c-8 c+16 000000000015af5c r1+0 c-16 c-8 c+16 000000000015af78 r1+0 u u ... # perf probe -x /usr/lib64/libc-2.26.so -a inet_pton+0x18 # perf record -e probe_libc:inet_pton -g ping -6 -c 1 ::1 # perf script Before: ping 2829 [005] 512917.460174: probe_libc:inet_pton: (7fff7e2baf38) 7fff7e2baf38 __GI___inet_pton+0x18 (/usr/lib64/libc-2.26.so) 7fff7e2705b4 getaddrinfo+0x164 (/usr/lib64/libc-2.26.so) 12f152d70 _init+0xbfc (/usr/bin/ping) 7fff7e1836a0 generic_start_main.isra.0+0x140 (/usr/lib64/libc-2.26.so) 7fff7e183898 __libc_start_main+0xb8 (/usr/lib64/libc-2.26.so) 0 [unknown] ([unknown]) After: ping 2829 [005] 512917.460174: probe_libc:inet_pton: (7fff7e2baf38) 7fff7e2baf38 __GI___inet_pton+0x18 (/usr/lib64/libc-2.26.so) 7fff7e26fa54 gaih_inet.constprop.7+0xf44 (/usr/lib64/libc-2.26.so) 7fff7e2705b4 getaddrinfo+0x164 (/usr/lib64/libc-2.26.so) 12f152d70 _init+0xbfc (/usr/bin/ping) 7fff7e1836a0 generic_start_main.isra.0+0x140 (/usr/lib64/libc-2.26.so) 7fff7e183898 __libc_start_main+0xb8 (/usr/lib64/libc-2.26.so) 0 [unknown] ([unknown]) Reported-by: Ravi Bangoria Signed-off-by: Sandipan Das Cc: Jiri Olsa Cc: Maynard Johnson Cc: Naveen N. Rao Cc: Ravi Bangoria Cc: Sukadev Bhattiprolu Link: http://lkml.kernel.org/r/66e848a7bdf2d43b39210a705ff6d828a0865661.1530724939.git.sandipan@linux.ibm.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/arch/powerpc/util/skip-callchain-idx.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/tools/perf/arch/powerpc/util/skip-callchain-idx.c b/tools/perf/arch/powerpc/util/skip-callchain-idx.c index 264fc0158100..7c6eeb4633fe 100644 --- a/tools/perf/arch/powerpc/util/skip-callchain-idx.c +++ b/tools/perf/arch/powerpc/util/skip-callchain-idx.c @@ -58,9 +58,13 @@ static int check_return_reg(int ra_regno, Dwarf_Frame *frame) } /* - * Check if return address is on the stack. + * Check if return address is on the stack. If return address + * is in a register (typically R0), it is yet to be saved on + * the stack. */ - if (nops != 0 || ops != NULL) + if ((nops != 0 || ops != NULL) && + !(nops == 1 && ops[0].atom == DW_OP_regx && + ops[0].number2 == 0 && ops[0].offset == 0)) return 0; /* -- cgit From 3eae52f842329a95f8549124079518231c0daba8 Mon Sep 17 00:00:00 2001 From: Sandipan Das Date: Tue, 10 Jul 2018 19:28:15 +0530 Subject: perf tests: Fix record+probe_libc_inet_pton.sh for powerpc64 For powerpc64, this test currently fails due to a mismatch in the expected output. This can be observed on a powerpc64le system running Fedora 27 as shown below. # perf test -v "probe libc's inet_pton & backtrace it with ping" Before: 62: probe libc's inet_pton & backtrace it with ping : --- start --- test child forked, pid 23948 ping 23965 [003] 71136.075084: probe_libc:inet_pton: (7fff996aaf28) 7fff996aaf28 __GI___inet_pton+0x8 (/usr/lib64/libc-2.26.so) 7fff9965fa54 gaih_inet.constprop.7+0xf44 (/usr/lib64/libc-2.26.so) FAIL: expected backtrace entry 2 "getaddrinfo\+0x[[:xdigit:]]+[[:space:]]\(/usr/lib64/libc-2.26.so\)$" got "7fff9965fa54 gaih_inet.constprop.7+0xf44 (/usr/lib64/libc-2.26.so)" test child finished with -1 ---- end ---- probe libc's inet_pton & backtrace it with ping: FAILED! After: 62: probe libc's inet_pton & backtrace it with ping : --- start --- test child forked, pid 24638 ping 24655 [001] 71208.525396: probe_libc:inet_pton: (7fffa245af28) 7fffa245af28 __GI___inet_pton+0x8 (/usr/lib64/libc-2.26.so) 7fffa240fa54 gaih_inet.constprop.7+0xf44 (/usr/lib64/libc-2.26.so) 7fffa24105b4 getaddrinfo+0x164 (/usr/lib64/libc-2.26.so) 138d52d70 main+0x3e0 (/usr/bin/ping) test child finished with 0 ---- end ---- probe libc's inet_pton & backtrace it with ping: Ok Signed-off-by: Sandipan Das Cc: Jiri Olsa Cc: Kim Phillips Cc: Maynard Johnson Cc: Naveen N. Rao Cc: Ravi Bangoria Cc: Sukadev Bhattiprolu Fixes: e07d585e2454 ("perf tests: Switch trace+probe_libc_inet_pton to use record") Link: http://lkml.kernel.org/r/49621ec5f37109f0655e5a8c32287ad68d85a1e5.1530724939.git.sandipan@linux.ibm.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/tests/shell/record+probe_libc_inet_pton.sh | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/tools/perf/tests/shell/record+probe_libc_inet_pton.sh b/tools/perf/tests/shell/record+probe_libc_inet_pton.sh index 94e513e62b34..1220e5e052bb 100755 --- a/tools/perf/tests/shell/record+probe_libc_inet_pton.sh +++ b/tools/perf/tests/shell/record+probe_libc_inet_pton.sh @@ -26,6 +26,12 @@ trace_libc_inet_pton_backtrace() { echo "(__GI_)?getaddrinfo\+0x[[:xdigit:]]+[[:space:]]\($libc|inlined\)$" >> $expected echo "main\+0x[[:xdigit:]]+[[:space:]]\(.*/bin/ping.*\)$" >> $expected ;; + ppc64|ppc64le) + eventattr='max-stack=4' + echo "gaih_inet.*\+0x[[:xdigit:]]+[[:space:]]\($libc\)$" >> $expected + echo "getaddrinfo\+0x[[:xdigit:]]+[[:space:]]\($libc\)$" >> $expected + echo ".*\+0x[[:xdigit:]]+[[:space:]]\(.*/bin/ping.*\)$" >> $expected + ;; *) eventattr='max-stack=3' echo "getaddrinfo\+0x[[:xdigit:]]+[[:space:]]\($libc\)$" >> $expected -- cgit From 83e3b6d73e66a10088f362b08b99c36fec3a14e7 Mon Sep 17 00:00:00 2001 From: Sandipan Das Date: Tue, 10 Jul 2018 19:28:16 +0530 Subject: perf tests: Fix record+probe_libc_inet_pton.sh to ensure cleanups If there is a mismatch in the perf script output, this test fails and exits before the event and temporary files created during its execution are cleaned up. This can be observed on a powerpc64 system running Fedora 27 as shown below. # perf test -v "probe libc's inet_pton & backtrace it with ping" 62: probe libc's inet_pton & backtrace it with ping : --- start --- test child forked, pid 18655 ping 18674 [013] 24511.496995: probe_libc:inet_pton: (7fffa6b423b0) 7fffa6b423b0 __GI___inet_pton+0x0 (/usr/lib64/power8/libc-2.26.so) 7fffa6af90dc gaih_inet.constprop.7+0xf4c (/usr/lib64/power8/libc-2.26.so) FAIL: expected backtrace entry "getaddrinfo\+0x[[:xdigit:]]+[[:space:]]\(/usr/lib64/power8/libc-2.26.so\)$" got "7fffa6af90dc gaih_inet.constprop.7+0xf4c (/usr/lib64/power8/libc-2.26.so)" test child finished with -1 ---- end ---- probe libc's inet_pton & backtrace it with ping: FAILED! # ls /tmp/expected.* /tmp/perf.data.* /tmp/perf.script.* /tmp/expected.u31 /tmp/perf.data.Pki /tmp/perf.script.Bhs # perf probe --list probe_libc:inet_pton (on __inet_pton@resolv/inet_pton.c in /usr/lib64/power8/libc-2.26.so) Cleanup of the event and the temporary files are now ensured by allowing the cleanup code to be executed even if the lines from the backtrace do not match their expected patterns instead of simply exiting from the point of failure. Signed-off-by: Sandipan Das Cc: Jiri Olsa Cc: Kim Phillips Cc: Naveen N. Rao Cc: Ravi Bangoria Link: http://lkml.kernel.org/r/ce9fb091dd3028fba8749a1a267cfbcb264bbfb1.1530724939.git.sandipan@linux.ibm.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/tests/shell/record+probe_libc_inet_pton.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/perf/tests/shell/record+probe_libc_inet_pton.sh b/tools/perf/tests/shell/record+probe_libc_inet_pton.sh index 1220e5e052bb..0502a9e04c79 100755 --- a/tools/perf/tests/shell/record+probe_libc_inet_pton.sh +++ b/tools/perf/tests/shell/record+probe_libc_inet_pton.sh @@ -52,7 +52,7 @@ trace_libc_inet_pton_backtrace() { echo "$line" | egrep -q "$pattern" if [ $? -ne 0 ] ; then printf "FAIL: expected backtrace entry \"%s\" got \"%s\"\n" "$pattern" "$line" - exit 1 + return 1 fi done -- cgit From 60089e42d38438772e2f83334e3e5b7497009366 Mon Sep 17 00:00:00 2001 From: Sandipan Das Date: Tue, 10 Jul 2018 19:28:17 +0530 Subject: perf tests: Fix record+probe_libc_inet_pton.sh when event exists If the event 'probe_libc:inet_pton' already exists, this test fails and deletes the existing event before exiting. This will then pass for any subsequent executions. Instead of skipping to deleting the existing event because of failing to add a new event, a duplicate event is now created and the script continues with the usual checks. Only the new duplicate event that is created at the beginning of the test is deleted as a part of the cleanups in the end. All existing events remain as it is. This can be observed on a powerpc64 system running Fedora 27 as shown below. # perf probe -x /usr/lib64/power8/libc-2.26.so -a inet_pton Added new event: probe_libc:inet_pton (on inet_pton in /usr/lib64/power8/libc-2.26.so) Before: # perf test -v "probe libc's inet_pton & backtrace it with ping" 62: probe libc's inet_pton & backtrace it with ping : --- start --- test child forked, pid 21302 test child finished with -1 ---- end ---- probe libc's inet_pton & backtrace it with ping: FAILED! # perf probe --list After: # perf test -v "probe libc's inet_pton & backtrace it with ping" 62: probe libc's inet_pton & backtrace it with ping : --- start --- test child forked, pid 21490 ping 21513 [035] 39357.565561: probe_libc:inet_pton_1: (7fffa4c623b0) 7fffa4c623b0 __GI___inet_pton+0x0 (/usr/lib64/power8/libc-2.26.so) 7fffa4c190dc gaih_inet.constprop.7+0xf4c (/usr/lib64/power8/libc-2.26.so) 7fffa4c19c4c getaddrinfo+0x15c (/usr/lib64/power8/libc-2.26.so) 111d93c20 main+0x3e0 (/usr/bin/ping) test child finished with 0 ---- end ---- probe libc's inet_pton & backtrace it with ping: Ok # perf probe --list probe_libc:inet_pton (on __inet_pton@resolv/inet_pton.c in /usr/lib64/power8/libc-2.26.so) Signed-off-by: Sandipan Das Cc: Jiri Olsa Cc: Kim Phillips Cc: Naveen N. Rao Cc: Ravi Bangoria Link: http://lkml.kernel.org/r/e11fecff96e6cf4c65cdbd9012463513d7b8356c.1530724939.git.sandipan@linux.ibm.com Signed-off-by: Arnaldo Carvalho de Melo --- .../tests/shell/record+probe_libc_inet_pton.sh | 28 ++++++++++++++++++---- 1 file changed, 24 insertions(+), 4 deletions(-) diff --git a/tools/perf/tests/shell/record+probe_libc_inet_pton.sh b/tools/perf/tests/shell/record+probe_libc_inet_pton.sh index 0502a9e04c79..3013ac8f83d0 100755 --- a/tools/perf/tests/shell/record+probe_libc_inet_pton.sh +++ b/tools/perf/tests/shell/record+probe_libc_inet_pton.sh @@ -13,11 +13,24 @@ libc=$(grep -w libc /proc/self/maps | head -1 | sed -r 's/.*[[:space:]](\/.*)/\1/g') nm -Dg $libc 2>/dev/null | fgrep -q inet_pton || exit 254 +event_pattern='probe_libc:inet_pton(\_[[:digit:]]+)?' + +add_libc_inet_pton_event() { + + event_name=$(perf probe -f -x $libc -a inet_pton 2>&1 | tail -n +2 | head -n -5 | \ + grep -P -o "$event_pattern(?=[[:space:]]\(on inet_pton in $libc\))") + + if [ $? -ne 0 -o -z "$event_name" ] ; then + printf "FAIL: could not add event\n" + return 1 + fi +} + trace_libc_inet_pton_backtrace() { expected=`mktemp -u /tmp/expected.XXX` - echo "ping[][0-9 \.:]+probe_libc:inet_pton: \([[:xdigit:]]+\)" > $expected + echo "ping[][0-9 \.:]+$event_name: \([[:xdigit:]]+\)" > $expected echo ".*inet_pton\+0x[[:xdigit:]]+[[:space:]]\($libc|inlined\)$" >> $expected case "$(uname -m)" in s390x) @@ -41,7 +54,7 @@ trace_libc_inet_pton_backtrace() { perf_data=`mktemp -u /tmp/perf.data.XXX` perf_script=`mktemp -u /tmp/perf.script.XXX` - perf record -e probe_libc:inet_pton/$eventattr/ -o $perf_data ping -6 -c 1 ::1 > /dev/null 2>&1 + perf record -e $event_name/$eventattr/ -o $perf_data ping -6 -c 1 ::1 > /dev/null 2>&1 perf script -i $perf_data > $perf_script exec 3<$perf_script @@ -62,13 +75,20 @@ trace_libc_inet_pton_backtrace() { # even if the perf script output does not match. } +delete_libc_inet_pton_event() { + + if [ -n "$event_name" ] ; then + perf probe -q -d $event_name + fi +} + # Check for IPv6 interface existence ip a sh lo | fgrep -q inet6 || exit 2 skip_if_no_perf_probe && \ -perf probe -q $libc inet_pton && \ +add_libc_inet_pton_event && \ trace_libc_inet_pton_backtrace err=$? rm -f ${perf_data} ${perf_script} ${expected} -perf probe -q -d probe_libc:inet_pton +delete_libc_inet_pton_event exit $err -- cgit From 34b009cfde2b8ce20a69c7bfd6bad4ce0e7cd970 Mon Sep 17 00:00:00 2001 From: Kim Phillips Date: Fri, 6 Jul 2018 16:34:34 -0500 Subject: tools include: Grab copies of arm64 dependent unistd.h files Will be used for generating the syscall id/string translation table. The arm64 unistd.h file simply #includes the asm-generic/unistd.h, so, since we will want to know whether either change, we grab both: arch/arm64/include/uapi/asm/unistd.h and include/uapi/asm-generic/unistd.h Signed-off-by: Kim Phillips Cc: Alexander Shishkin Cc: Hendrik Brueckner Cc: Jiri Olsa Cc: Michael Ellerman Cc: Namhyung Kim Cc: Peter Zijlstra Cc: Ravi Bangoria Cc: Thomas Richter Link: http://lkml.kernel.org/r/20180706163434.1b64ffbcc0284fb79982f53b@arm.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/arch/arm64/include/uapi/asm/unistd.h | 20 + tools/include/uapi/asm-generic/unistd.h | 783 +++++++++++++++++++++++++++++ tools/perf/check-headers.sh | 2 + 3 files changed, 805 insertions(+) create mode 100644 tools/arch/arm64/include/uapi/asm/unistd.h create mode 100644 tools/include/uapi/asm-generic/unistd.h diff --git a/tools/arch/arm64/include/uapi/asm/unistd.h b/tools/arch/arm64/include/uapi/asm/unistd.h new file mode 100644 index 000000000000..5072cbd15c82 --- /dev/null +++ b/tools/arch/arm64/include/uapi/asm/unistd.h @@ -0,0 +1,20 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +/* + * Copyright (C) 2012 ARM Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#define __ARCH_WANT_RENAMEAT + +#include diff --git a/tools/include/uapi/asm-generic/unistd.h b/tools/include/uapi/asm-generic/unistd.h new file mode 100644 index 000000000000..42990676a55e --- /dev/null +++ b/tools/include/uapi/asm-generic/unistd.h @@ -0,0 +1,783 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#include + +/* + * This file contains the system call numbers, based on the + * layout of the x86-64 architecture, which embeds the + * pointer to the syscall in the table. + * + * As a basic principle, no duplication of functionality + * should be added, e.g. we don't use lseek when llseek + * is present. New architectures should use this file + * and implement the less feature-full calls in user space. + */ + +#ifndef __SYSCALL +#define __SYSCALL(x, y) +#endif + +#if __BITS_PER_LONG == 32 || defined(__SYSCALL_COMPAT) +#define __SC_3264(_nr, _32, _64) __SYSCALL(_nr, _32) +#else +#define __SC_3264(_nr, _32, _64) __SYSCALL(_nr, _64) +#endif + +#ifdef __SYSCALL_COMPAT +#define __SC_COMP(_nr, _sys, _comp) __SYSCALL(_nr, _comp) +#define __SC_COMP_3264(_nr, _32, _64, _comp) __SYSCALL(_nr, _comp) +#else +#define __SC_COMP(_nr, _sys, _comp) __SYSCALL(_nr, _sys) +#define __SC_COMP_3264(_nr, _32, _64, _comp) __SC_3264(_nr, _32, _64) +#endif + +#define __NR_io_setup 0 +__SC_COMP(__NR_io_setup, sys_io_setup, compat_sys_io_setup) +#define __NR_io_destroy 1 +__SYSCALL(__NR_io_destroy, sys_io_destroy) +#define __NR_io_submit 2 +__SC_COMP(__NR_io_submit, sys_io_submit, compat_sys_io_submit) +#define __NR_io_cancel 3 +__SYSCALL(__NR_io_cancel, sys_io_cancel) +#define __NR_io_getevents 4 +__SC_COMP(__NR_io_getevents, sys_io_getevents, compat_sys_io_getevents) + +/* fs/xattr.c */ +#define __NR_setxattr 5 +__SYSCALL(__NR_setxattr, sys_setxattr) +#define __NR_lsetxattr 6 +__SYSCALL(__NR_lsetxattr, sys_lsetxattr) +#define __NR_fsetxattr 7 +__SYSCALL(__NR_fsetxattr, sys_fsetxattr) +#define __NR_getxattr 8 +__SYSCALL(__NR_getxattr, sys_getxattr) +#define __NR_lgetxattr 9 +__SYSCALL(__NR_lgetxattr, sys_lgetxattr) +#define __NR_fgetxattr 10 +__SYSCALL(__NR_fgetxattr, sys_fgetxattr) +#define __NR_listxattr 11 +__SYSCALL(__NR_listxattr, sys_listxattr) +#define __NR_llistxattr 12 +__SYSCALL(__NR_llistxattr, sys_llistxattr) +#define __NR_flistxattr 13 +__SYSCALL(__NR_flistxattr, sys_flistxattr) +#define __NR_removexattr 14 +__SYSCALL(__NR_removexattr, sys_removexattr) +#define __NR_lremovexattr 15 +__SYSCALL(__NR_lremovexattr, sys_lremovexattr) +#define __NR_fremovexattr 16 +__SYSCALL(__NR_fremovexattr, sys_fremovexattr) + +/* fs/dcache.c */ +#define __NR_getcwd 17 +__SYSCALL(__NR_getcwd, sys_getcwd) + +/* fs/cookies.c */ +#define __NR_lookup_dcookie 18 +__SC_COMP(__NR_lookup_dcookie, sys_lookup_dcookie, compat_sys_lookup_dcookie) + +/* fs/eventfd.c */ +#define __NR_eventfd2 19 +__SYSCALL(__NR_eventfd2, sys_eventfd2) + +/* fs/eventpoll.c */ +#define __NR_epoll_create1 20 +__SYSCALL(__NR_epoll_create1, sys_epoll_create1) +#define __NR_epoll_ctl 21 +__SYSCALL(__NR_epoll_ctl, sys_epoll_ctl) +#define __NR_epoll_pwait 22 +__SC_COMP(__NR_epoll_pwait, sys_epoll_pwait, compat_sys_epoll_pwait) + +/* fs/fcntl.c */ +#define __NR_dup 23 +__SYSCALL(__NR_dup, sys_dup) +#define __NR_dup3 24 +__SYSCALL(__NR_dup3, sys_dup3) +#define __NR3264_fcntl 25 +__SC_COMP_3264(__NR3264_fcntl, sys_fcntl64, sys_fcntl, compat_sys_fcntl64) + +/* fs/inotify_user.c */ +#define __NR_inotify_init1 26 +__SYSCALL(__NR_inotify_init1, sys_inotify_init1) +#define __NR_inotify_add_watch 27 +__SYSCALL(__NR_inotify_add_watch, sys_inotify_add_watch) +#define __NR_inotify_rm_watch 28 +__SYSCALL(__NR_inotify_rm_watch, sys_inotify_rm_watch) + +/* fs/ioctl.c */ +#define __NR_ioctl 29 +__SC_COMP(__NR_ioctl, sys_ioctl, compat_sys_ioctl) + +/* fs/ioprio.c */ +#define __NR_ioprio_set 30 +__SYSCALL(__NR_ioprio_set, sys_ioprio_set) +#define __NR_ioprio_get 31 +__SYSCALL(__NR_ioprio_get, sys_ioprio_get) + +/* fs/locks.c */ +#define __NR_flock 32 +__SYSCALL(__NR_flock, sys_flock) + +/* fs/namei.c */ +#define __NR_mknodat 33 +__SYSCALL(__NR_mknodat, sys_mknodat) +#define __NR_mkdirat 34 +__SYSCALL(__NR_mkdirat, sys_mkdirat) +#define __NR_unlinkat 35 +__SYSCALL(__NR_unlinkat, sys_unlinkat) +#define __NR_symlinkat 36 +__SYSCALL(__NR_symlinkat, sys_symlinkat) +#define __NR_linkat 37 +__SYSCALL(__NR_linkat, sys_linkat) +#ifdef __ARCH_WANT_RENAMEAT +/* renameat is superseded with flags by renameat2 */ +#define __NR_renameat 38 +__SYSCALL(__NR_renameat, sys_renameat) +#endif /* __ARCH_WANT_RENAMEAT */ + +/* fs/namespace.c */ +#define __NR_umount2 39 +__SYSCALL(__NR_umount2, sys_umount) +#define __NR_mount 40 +__SC_COMP(__NR_mount, sys_mount, compat_sys_mount) +#define __NR_pivot_root 41 +__SYSCALL(__NR_pivot_root, sys_pivot_root) + +/* fs/nfsctl.c */ +#define __NR_nfsservctl 42 +__SYSCALL(__NR_nfsservctl, sys_ni_syscall) + +/* fs/open.c */ +#define __NR3264_statfs 43 +__SC_COMP_3264(__NR3264_statfs, sys_statfs64, sys_statfs, \ + compat_sys_statfs64) +#define __NR3264_fstatfs 44 +__SC_COMP_3264(__NR3264_fstatfs, sys_fstatfs64, sys_fstatfs, \ + compat_sys_fstatfs64) +#define __NR3264_truncate 45 +__SC_COMP_3264(__NR3264_truncate, sys_truncate64, sys_truncate, \ + compat_sys_truncate64) +#define __NR3264_ftruncate 46 +__SC_COMP_3264(__NR3264_ftruncate, sys_ftruncate64, sys_ftruncate, \ + compat_sys_ftruncate64) + +#define __NR_fallocate 47 +__SC_COMP(__NR_fallocate, sys_fallocate, compat_sys_fallocate) +#define __NR_faccessat 48 +__SYSCALL(__NR_faccessat, sys_faccessat) +#define __NR_chdir 49 +__SYSCALL(__NR_chdir, sys_chdir) +#define __NR_fchdir 50 +__SYSCALL(__NR_fchdir, sys_fchdir) +#define __NR_chroot 51 +__SYSCALL(__NR_chroot, sys_chroot) +#define __NR_fchmod 52 +__SYSCALL(__NR_fchmod, sys_fchmod) +#define __NR_fchmodat 53 +__SYSCALL(__NR_fchmodat, sys_fchmodat) +#define __NR_fchownat 54 +__SYSCALL(__NR_fchownat, sys_fchownat) +#define __NR_fchown 55 +__SYSCALL(__NR_fchown, sys_fchown) +#define __NR_openat 56 +__SC_COMP(__NR_openat, sys_openat, compat_sys_openat) +#define __NR_close 57 +__SYSCALL(__NR_close, sys_close) +#define __NR_vhangup 58 +__SYSCALL(__NR_vhangup, sys_vhangup) + +/* fs/pipe.c */ +#define __NR_pipe2 59 +__SYSCALL(__NR_pipe2, sys_pipe2) + +/* fs/quota.c */ +#define __NR_quotactl 60 +__SYSCALL(__NR_quotactl, sys_quotactl) + +/* fs/readdir.c */ +#define __NR_getdents64 61 +__SYSCALL(__NR_getdents64, sys_getdents64) + +/* fs/read_write.c */ +#define __NR3264_lseek 62 +__SC_3264(__NR3264_lseek, sys_llseek, sys_lseek) +#define __NR_read 63 +__SYSCALL(__NR_read, sys_read) +#define __NR_write 64 +__SYSCALL(__NR_write, sys_write) +#define __NR_readv 65 +__SC_COMP(__NR_readv, sys_readv, compat_sys_readv) +#define __NR_writev 66 +__SC_COMP(__NR_writev, sys_writev, compat_sys_writev) +#define __NR_pread64 67 +__SC_COMP(__NR_pread64, sys_pread64, compat_sys_pread64) +#define __NR_pwrite64 68 +__SC_COMP(__NR_pwrite64, sys_pwrite64, compat_sys_pwrite64) +#define __NR_preadv 69 +__SC_COMP(__NR_preadv, sys_preadv, compat_sys_preadv) +#define __NR_pwritev 70 +__SC_COMP(__NR_pwritev, sys_pwritev, compat_sys_pwritev) + +/* fs/sendfile.c */ +#define __NR3264_sendfile 71 +__SYSCALL(__NR3264_sendfile, sys_sendfile64) + +/* fs/select.c */ +#define __NR_pselect6 72 +__SC_COMP(__NR_pselect6, sys_pselect6, compat_sys_pselect6) +#define __NR_ppoll 73 +__SC_COMP(__NR_ppoll, sys_ppoll, compat_sys_ppoll) + +/* fs/signalfd.c */ +#define __NR_signalfd4 74 +__SC_COMP(__NR_signalfd4, sys_signalfd4, compat_sys_signalfd4) + +/* fs/splice.c */ +#define __NR_vmsplice 75 +__SC_COMP(__NR_vmsplice, sys_vmsplice, compat_sys_vmsplice) +#define __NR_splice 76 +__SYSCALL(__NR_splice, sys_splice) +#define __NR_tee 77 +__SYSCALL(__NR_tee, sys_tee) + +/* fs/stat.c */ +#define __NR_readlinkat 78 +__SYSCALL(__NR_readlinkat, sys_readlinkat) +#define __NR3264_fstatat 79 +__SC_3264(__NR3264_fstatat, sys_fstatat64, sys_newfstatat) +#define __NR3264_fstat 80 +__SC_3264(__NR3264_fstat, sys_fstat64, sys_newfstat) + +/* fs/sync.c */ +#define __NR_sync 81 +__SYSCALL(__NR_sync, sys_sync) +#define __NR_fsync 82 +__SYSCALL(__NR_fsync, sys_fsync) +#define __NR_fdatasync 83 +__SYSCALL(__NR_fdatasync, sys_fdatasync) +#ifdef __ARCH_WANT_SYNC_FILE_RANGE2 +#define __NR_sync_file_range2 84 +__SC_COMP(__NR_sync_file_range2, sys_sync_file_range2, \ + compat_sys_sync_file_range2) +#else +#define __NR_sync_file_range 84 +__SC_COMP(__NR_sync_file_range, sys_sync_file_range, \ + compat_sys_sync_file_range) +#endif + +/* fs/timerfd.c */ +#define __NR_timerfd_create 85 +__SYSCALL(__NR_timerfd_create, sys_timerfd_create) +#define __NR_timerfd_settime 86 +__SC_COMP(__NR_timerfd_settime, sys_timerfd_settime, \ + compat_sys_timerfd_settime) +#define __NR_timerfd_gettime 87 +__SC_COMP(__NR_timerfd_gettime, sys_timerfd_gettime, \ + compat_sys_timerfd_gettime) + +/* fs/utimes.c */ +#define __NR_utimensat 88 +__SC_COMP(__NR_utimensat, sys_utimensat, compat_sys_utimensat) + +/* kernel/acct.c */ +#define __NR_acct 89 +__SYSCALL(__NR_acct, sys_acct) + +/* kernel/capability.c */ +#define __NR_capget 90 +__SYSCALL(__NR_capget, sys_capget) +#define __NR_capset 91 +__SYSCALL(__NR_capset, sys_capset) + +/* kernel/exec_domain.c */ +#define __NR_personality 92 +__SYSCALL(__NR_personality, sys_personality) + +/* kernel/exit.c */ +#define __NR_exit 93 +__SYSCALL(__NR_exit, sys_exit) +#define __NR_exit_group 94 +__SYSCALL(__NR_exit_group, sys_exit_group) +#define __NR_waitid 95 +__SC_COMP(__NR_waitid, sys_waitid, compat_sys_waitid) + +/* kernel/fork.c */ +#define __NR_set_tid_address 96 +__SYSCALL(__NR_set_tid_address, sys_set_tid_address) +#define __NR_unshare 97 +__SYSCALL(__NR_unshare, sys_unshare) + +/* kernel/futex.c */ +#define __NR_futex 98 +__SC_COMP(__NR_futex, sys_futex, compat_sys_futex) +#define __NR_set_robust_list 99 +__SC_COMP(__NR_set_robust_list, sys_set_robust_list, \ + compat_sys_set_robust_list) +#define __NR_get_robust_list 100 +__SC_COMP(__NR_get_robust_list, sys_get_robust_list, \ + compat_sys_get_robust_list) + +/* kernel/hrtimer.c */ +#define __NR_nanosleep 101 +__SC_COMP(__NR_nanosleep, sys_nanosleep, compat_sys_nanosleep) + +/* kernel/itimer.c */ +#define __NR_getitimer 102 +__SC_COMP(__NR_getitimer, sys_getitimer, compat_sys_getitimer) +#define __NR_setitimer 103 +__SC_COMP(__NR_setitimer, sys_setitimer, compat_sys_setitimer) + +/* kernel/kexec.c */ +#define __NR_kexec_load 104 +__SC_COMP(__NR_kexec_load, sys_kexec_load, compat_sys_kexec_load) + +/* kernel/module.c */ +#define __NR_init_module 105 +__SYSCALL(__NR_init_module, sys_init_module) +#define __NR_delete_module 106 +__SYSCALL(__NR_delete_module, sys_delete_module) + +/* kernel/posix-timers.c */ +#define __NR_timer_create 107 +__SC_COMP(__NR_timer_create, sys_timer_create, compat_sys_timer_create) +#define __NR_timer_gettime 108 +__SC_COMP(__NR_timer_gettime, sys_timer_gettime, compat_sys_timer_gettime) +#define __NR_timer_getoverrun 109 +__SYSCALL(__NR_timer_getoverrun, sys_timer_getoverrun) +#define __NR_timer_settime 110 +__SC_COMP(__NR_timer_settime, sys_timer_settime, compat_sys_timer_settime) +#define __NR_timer_delete 111 +__SYSCALL(__NR_timer_delete, sys_timer_delete) +#define __NR_clock_settime 112 +__SC_COMP(__NR_clock_settime, sys_clock_settime, compat_sys_clock_settime) +#define __NR_clock_gettime 113 +__SC_COMP(__NR_clock_gettime, sys_clock_gettime, compat_sys_clock_gettime) +#define __NR_clock_getres 114 +__SC_COMP(__NR_clock_getres, sys_clock_getres, compat_sys_clock_getres) +#define __NR_clock_nanosleep 115 +__SC_COMP(__NR_clock_nanosleep, sys_clock_nanosleep, \ + compat_sys_clock_nanosleep) + +/* kernel/printk.c */ +#define __NR_syslog 116 +__SYSCALL(__NR_syslog, sys_syslog) + +/* kernel/ptrace.c */ +#define __NR_ptrace 117 +__SYSCALL(__NR_ptrace, sys_ptrace) + +/* kernel/sched/core.c */ +#define __NR_sched_setparam 118 +__SYSCALL(__NR_sched_setparam, sys_sched_setparam) +#define __NR_sched_setscheduler 119 +__SYSCALL(__NR_sched_setscheduler, sys_sched_setscheduler) +#define __NR_sched_getscheduler 120 +__SYSCALL(__NR_sched_getscheduler, sys_sched_getscheduler) +#define __NR_sched_getparam 121 +__SYSCALL(__NR_sched_getparam, sys_sched_getparam) +#define __NR_sched_setaffinity 122 +__SC_COMP(__NR_sched_setaffinity, sys_sched_setaffinity, \ + compat_sys_sched_setaffinity) +#define __NR_sched_getaffinity 123 +__SC_COMP(__NR_sched_getaffinity, sys_sched_getaffinity, \ + compat_sys_sched_getaffinity) +#define __NR_sched_yield 124 +__SYSCALL(__NR_sched_yield, sys_sched_yield) +#define __NR_sched_get_priority_max 125 +__SYSCALL(__NR_sched_get_priority_max, sys_sched_get_priority_max) +#define __NR_sched_get_priority_min 126 +__SYSCALL(__NR_sched_get_priority_min, sys_sched_get_priority_min) +#define __NR_sched_rr_get_interval 127 +__SC_COMP(__NR_sched_rr_get_interval, sys_sched_rr_get_interval, \ + compat_sys_sched_rr_get_interval) + +/* kernel/signal.c */ +#define __NR_restart_syscall 128 +__SYSCALL(__NR_restart_syscall, sys_restart_syscall) +#define __NR_kill 129 +__SYSCALL(__NR_kill, sys_kill) +#define __NR_tkill 130 +__SYSCALL(__NR_tkill, sys_tkill) +#define __NR_tgkill 131 +__SYSCALL(__NR_tgkill, sys_tgkill) +#define __NR_sigaltstack 132 +__SC_COMP(__NR_sigaltstack, sys_sigaltstack, compat_sys_sigaltstack) +#define __NR_rt_sigsuspend 133 +__SC_COMP(__NR_rt_sigsuspend, sys_rt_sigsuspend, compat_sys_rt_sigsuspend) +#define __NR_rt_sigaction 134 +__SC_COMP(__NR_rt_sigaction, sys_rt_sigaction, compat_sys_rt_sigaction) +#define __NR_rt_sigprocmask 135 +__SC_COMP(__NR_rt_sigprocmask, sys_rt_sigprocmask, compat_sys_rt_sigprocmask) +#define __NR_rt_sigpending 136 +__SC_COMP(__NR_rt_sigpending, sys_rt_sigpending, compat_sys_rt_sigpending) +#define __NR_rt_sigtimedwait 137 +__SC_COMP(__NR_rt_sigtimedwait, sys_rt_sigtimedwait, \ + compat_sys_rt_sigtimedwait) +#define __NR_rt_sigqueueinfo 138 +__SC_COMP(__NR_rt_sigqueueinfo, sys_rt_sigqueueinfo, \ + compat_sys_rt_sigqueueinfo) +#define __NR_rt_sigreturn 139 +__SC_COMP(__NR_rt_sigreturn, sys_rt_sigreturn, compat_sys_rt_sigreturn) + +/* kernel/sys.c */ +#define __NR_setpriority 140 +__SYSCALL(__NR_setpriority, sys_setpriority) +#define __NR_getpriority 141 +__SYSCALL(__NR_getpriority, sys_getpriority) +#define __NR_reboot 142 +__SYSCALL(__NR_reboot, sys_reboot) +#define __NR_setregid 143 +__SYSCALL(__NR_setregid, sys_setregid) +#define __NR_setgid 144 +__SYSCALL(__NR_setgid, sys_setgid) +#define __NR_setreuid 145 +__SYSCALL(__NR_setreuid, sys_setreuid) +#define __NR_setuid 146 +__SYSCALL(__NR_setuid, sys_setuid) +#define __NR_setresuid 147 +__SYSCALL(__NR_setresuid, sys_setresuid) +#define __NR_getresuid 148 +__SYSCALL(__NR_getresuid, sys_getresuid) +#define __NR_setresgid 149 +__SYSCALL(__NR_setresgid, sys_setresgid) +#define __NR_getresgid 150 +__SYSCALL(__NR_getresgid, sys_getresgid) +#define __NR_setfsuid 151 +__SYSCALL(__NR_setfsuid, sys_setfsuid) +#define __NR_setfsgid 152 +__SYSCALL(__NR_setfsgid, sys_setfsgid) +#define __NR_times 153 +__SC_COMP(__NR_times, sys_times, compat_sys_times) +#define __NR_setpgid 154 +__SYSCALL(__NR_setpgid, sys_setpgid) +#define __NR_getpgid 155 +__SYSCALL(__NR_getpgid, sys_getpgid) +#define __NR_getsid 156 +__SYSCALL(__NR_getsid, sys_getsid) +#define __NR_setsid 157 +__SYSCALL(__NR_setsid, sys_setsid) +#define __NR_getgroups 158 +__SYSCALL(__NR_getgroups, sys_getgroups) +#define __NR_setgroups 159 +__SYSCALL(__NR_setgroups, sys_setgroups) +#define __NR_uname 160 +__SYSCALL(__NR_uname, sys_newuname) +#define __NR_sethostname 161 +__SYSCALL(__NR_sethostname, sys_sethostname) +#define __NR_setdomainname 162 +__SYSCALL(__NR_setdomainname, sys_setdomainname) +#define __NR_getrlimit 163 +__SC_COMP(__NR_getrlimit, sys_getrlimit, compat_sys_getrlimit) +#define __NR_setrlimit 164 +__SC_COMP(__NR_setrlimit, sys_setrlimit, compat_sys_setrlimit) +#define __NR_getrusage 165 +__SC_COMP(__NR_getrusage, sys_getrusage, compat_sys_getrusage) +#define __NR_umask 166 +__SYSCALL(__NR_umask, sys_umask) +#define __NR_prctl 167 +__SYSCALL(__NR_prctl, sys_prctl) +#define __NR_getcpu 168 +__SYSCALL(__NR_getcpu, sys_getcpu) + +/* kernel/time.c */ +#define __NR_gettimeofday 169 +__SC_COMP(__NR_gettimeofday, sys_gettimeofday, compat_sys_gettimeofday) +#define __NR_settimeofday 170 +__SC_COMP(__NR_settimeofday, sys_settimeofday, compat_sys_settimeofday) +#define __NR_adjtimex 171 +__SC_COMP(__NR_adjtimex, sys_adjtimex, compat_sys_adjtimex) + +/* kernel/timer.c */ +#define __NR_getpid 172 +__SYSCALL(__NR_getpid, sys_getpid) +#define __NR_getppid 173 +__SYSCALL(__NR_getppid, sys_getppid) +#define __NR_getuid 174 +__SYSCALL(__NR_getuid, sys_getuid) +#define __NR_geteuid 175 +__SYSCALL(__NR_geteuid, sys_geteuid) +#define __NR_getgid 176 +__SYSCALL(__NR_getgid, sys_getgid) +#define __NR_getegid 177 +__SYSCALL(__NR_getegid, sys_getegid) +#define __NR_gettid 178 +__SYSCALL(__NR_gettid, sys_gettid) +#define __NR_sysinfo 179 +__SC_COMP(__NR_sysinfo, sys_sysinfo, compat_sys_sysinfo) + +/* ipc/mqueue.c */ +#define __NR_mq_open 180 +__SC_COMP(__NR_mq_open, sys_mq_open, compat_sys_mq_open) +#define __NR_mq_unlink 181 +__SYSCALL(__NR_mq_unlink, sys_mq_unlink) +#define __NR_mq_timedsend 182 +__SC_COMP(__NR_mq_timedsend, sys_mq_timedsend, compat_sys_mq_timedsend) +#define __NR_mq_timedreceive 183 +__SC_COMP(__NR_mq_timedreceive, sys_mq_timedreceive, \ + compat_sys_mq_timedreceive) +#define __NR_mq_notify 184 +__SC_COMP(__NR_mq_notify, sys_mq_notify, compat_sys_mq_notify) +#define __NR_mq_getsetattr 185 +__SC_COMP(__NR_mq_getsetattr, sys_mq_getsetattr, compat_sys_mq_getsetattr) + +/* ipc/msg.c */ +#define __NR_msgget 186 +__SYSCALL(__NR_msgget, sys_msgget) +#define __NR_msgctl 187 +__SC_COMP(__NR_msgctl, sys_msgctl, compat_sys_msgctl) +#define __NR_msgrcv 188 +__SC_COMP(__NR_msgrcv, sys_msgrcv, compat_sys_msgrcv) +#define __NR_msgsnd 189 +__SC_COMP(__NR_msgsnd, sys_msgsnd, compat_sys_msgsnd) + +/* ipc/sem.c */ +#define __NR_semget 190 +__SYSCALL(__NR_semget, sys_semget) +#define __NR_semctl 191 +__SC_COMP(__NR_semctl, sys_semctl, compat_sys_semctl) +#define __NR_semtimedop 192 +__SC_COMP(__NR_semtimedop, sys_semtimedop, compat_sys_semtimedop) +#define __NR_semop 193 +__SYSCALL(__NR_semop, sys_semop) + +/* ipc/shm.c */ +#define __NR_shmget 194 +__SYSCALL(__NR_shmget, sys_shmget) +#define __NR_shmctl 195 +__SC_COMP(__NR_shmctl, sys_shmctl, compat_sys_shmctl) +#define __NR_shmat 196 +__SC_COMP(__NR_shmat, sys_shmat, compat_sys_shmat) +#define __NR_shmdt 197 +__SYSCALL(__NR_shmdt, sys_shmdt) + +/* net/socket.c */ +#define __NR_socket 198 +__SYSCALL(__NR_socket, sys_socket) +#define __NR_socketpair 199 +__SYSCALL(__NR_socketpair, sys_socketpair) +#define __NR_bind 200 +__SYSCALL(__NR_bind, sys_bind) +#define __NR_listen 201 +__SYSCALL(__NR_listen, sys_listen) +#define __NR_accept 202 +__SYSCALL(__NR_accept, sys_accept) +#define __NR_connect 203 +__SYSCALL(__NR_connect, sys_connect) +#define __NR_getsockname 204 +__SYSCALL(__NR_getsockname, sys_getsockname) +#define __NR_getpeername 205 +__SYSCALL(__NR_getpeername, sys_getpeername) +#define __NR_sendto 206 +__SYSCALL(__NR_sendto, sys_sendto) +#define __NR_recvfrom 207 +__SC_COMP(__NR_recvfrom, sys_recvfrom, compat_sys_recvfrom) +#define __NR_setsockopt 208 +__SC_COMP(__NR_setsockopt, sys_setsockopt, compat_sys_setsockopt) +#define __NR_getsockopt 209 +__SC_COMP(__NR_getsockopt, sys_getsockopt, compat_sys_getsockopt) +#define __NR_shutdown 210 +__SYSCALL(__NR_shutdown, sys_shutdown) +#define __NR_sendmsg 211 +__SC_COMP(__NR_sendmsg, sys_sendmsg, compat_sys_sendmsg) +#define __NR_recvmsg 212 +__SC_COMP(__NR_recvmsg, sys_recvmsg, compat_sys_recvmsg) + +/* mm/filemap.c */ +#define __NR_readahead 213 +__SC_COMP(__NR_readahead, sys_readahead, compat_sys_readahead) + +/* mm/nommu.c, also with MMU */ +#define __NR_brk 214 +__SYSCALL(__NR_brk, sys_brk) +#define __NR_munmap 215 +__SYSCALL(__NR_munmap, sys_munmap) +#define __NR_mremap 216 +__SYSCALL(__NR_mremap, sys_mremap) + +/* security/keys/keyctl.c */ +#define __NR_add_key 217 +__SYSCALL(__NR_add_key, sys_add_key) +#define __NR_request_key 218 +__SYSCALL(__NR_request_key, sys_request_key) +#define __NR_keyctl 219 +__SC_COMP(__NR_keyctl, sys_keyctl, compat_sys_keyctl) + +/* arch/example/kernel/sys_example.c */ +#define __NR_clone 220 +__SYSCALL(__NR_clone, sys_clone) +#define __NR_execve 221 +__SC_COMP(__NR_execve, sys_execve, compat_sys_execve) + +#define __NR3264_mmap 222 +__SC_3264(__NR3264_mmap, sys_mmap2, sys_mmap) +/* mm/fadvise.c */ +#define __NR3264_fadvise64 223 +__SC_COMP(__NR3264_fadvise64, sys_fadvise64_64, compat_sys_fadvise64_64) + +/* mm/, CONFIG_MMU only */ +#ifndef __ARCH_NOMMU +#define __NR_swapon 224 +__SYSCALL(__NR_swapon, sys_swapon) +#define __NR_swapoff 225 +__SYSCALL(__NR_swapoff, sys_swapoff) +#define __NR_mprotect 226 +__SYSCALL(__NR_mprotect, sys_mprotect) +#define __NR_msync 227 +__SYSCALL(__NR_msync, sys_msync) +#define __NR_mlock 228 +__SYSCALL(__NR_mlock, sys_mlock) +#define __NR_munlock 229 +__SYSCALL(__NR_munlock, sys_munlock) +#define __NR_mlockall 230 +__SYSCALL(__NR_mlockall, sys_mlockall) +#define __NR_munlockall 231 +__SYSCALL(__NR_munlockall, sys_munlockall) +#define __NR_mincore 232 +__SYSCALL(__NR_mincore, sys_mincore) +#define __NR_madvise 233 +__SYSCALL(__NR_madvise, sys_madvise) +#define __NR_remap_file_pages 234 +__SYSCALL(__NR_remap_file_pages, sys_remap_file_pages) +#define __NR_mbind 235 +__SC_COMP(__NR_mbind, sys_mbind, compat_sys_mbind) +#define __NR_get_mempolicy 236 +__SC_COMP(__NR_get_mempolicy, sys_get_mempolicy, compat_sys_get_mempolicy) +#define __NR_set_mempolicy 237 +__SC_COMP(__NR_set_mempolicy, sys_set_mempolicy, compat_sys_set_mempolicy) +#define __NR_migrate_pages 238 +__SC_COMP(__NR_migrate_pages, sys_migrate_pages, compat_sys_migrate_pages) +#define __NR_move_pages 239 +__SC_COMP(__NR_move_pages, sys_move_pages, compat_sys_move_pages) +#endif + +#define __NR_rt_tgsigqueueinfo 240 +__SC_COMP(__NR_rt_tgsigqueueinfo, sys_rt_tgsigqueueinfo, \ + compat_sys_rt_tgsigqueueinfo) +#define __NR_perf_event_open 241 +__SYSCALL(__NR_perf_event_open, sys_perf_event_open) +#define __NR_accept4 242 +__SYSCALL(__NR_accept4, sys_accept4) +#define __NR_recvmmsg 243 +__SC_COMP(__NR_recvmmsg, sys_recvmmsg, compat_sys_recvmmsg) + +/* + * Architectures may provide up to 16 syscalls of their own + * starting with this value. + */ +#define __NR_arch_specific_syscall 244 + +#define __NR_wait4 260 +__SC_COMP(__NR_wait4, sys_wait4, compat_sys_wait4) +#define __NR_prlimit64 261 +__SYSCALL(__NR_prlimit64, sys_prlimit64) +#define __NR_fanotify_init 262 +__SYSCALL(__NR_fanotify_init, sys_fanotify_init) +#define __NR_fanotify_mark 263 +__SYSCALL(__NR_fanotify_mark, sys_fanotify_mark) +#define __NR_name_to_handle_at 264 +__SYSCALL(__NR_name_to_handle_at, sys_name_to_handle_at) +#define __NR_open_by_handle_at 265 +__SC_COMP(__NR_open_by_handle_at, sys_open_by_handle_at, \ + compat_sys_open_by_handle_at) +#define __NR_clock_adjtime 266 +__SC_COMP(__NR_clock_adjtime, sys_clock_adjtime, compat_sys_clock_adjtime) +#define __NR_syncfs 267 +__SYSCALL(__NR_syncfs, sys_syncfs) +#define __NR_setns 268 +__SYSCALL(__NR_setns, sys_setns) +#define __NR_sendmmsg 269 +__SC_COMP(__NR_sendmmsg, sys_sendmmsg, compat_sys_sendmmsg) +#define __NR_process_vm_readv 270 +__SC_COMP(__NR_process_vm_readv, sys_process_vm_readv, \ + compat_sys_process_vm_readv) +#define __NR_process_vm_writev 271 +__SC_COMP(__NR_process_vm_writev, sys_process_vm_writev, \ + compat_sys_process_vm_writev) +#define __NR_kcmp 272 +__SYSCALL(__NR_kcmp, sys_kcmp) +#define __NR_finit_module 273 +__SYSCALL(__NR_finit_module, sys_finit_module) +#define __NR_sched_setattr 274 +__SYSCALL(__NR_sched_setattr, sys_sched_setattr) +#define __NR_sched_getattr 275 +__SYSCALL(__NR_sched_getattr, sys_sched_getattr) +#define __NR_renameat2 276 +__SYSCALL(__NR_renameat2, sys_renameat2) +#define __NR_seccomp 277 +__SYSCALL(__NR_seccomp, sys_seccomp) +#define __NR_getrandom 278 +__SYSCALL(__NR_getrandom, sys_getrandom) +#define __NR_memfd_create 279 +__SYSCALL(__NR_memfd_create, sys_memfd_create) +#define __NR_bpf 280 +__SYSCALL(__NR_bpf, sys_bpf) +#define __NR_execveat 281 +__SC_COMP(__NR_execveat, sys_execveat, compat_sys_execveat) +#define __NR_userfaultfd 282 +__SYSCALL(__NR_userfaultfd, sys_userfaultfd) +#define __NR_membarrier 283 +__SYSCALL(__NR_membarrier, sys_membarrier) +#define __NR_mlock2 284 +__SYSCALL(__NR_mlock2, sys_mlock2) +#define __NR_copy_file_range 285 +__SYSCALL(__NR_copy_file_range, sys_copy_file_range) +#define __NR_preadv2 286 +__SC_COMP(__NR_preadv2, sys_preadv2, compat_sys_preadv2) +#define __NR_pwritev2 287 +__SC_COMP(__NR_pwritev2, sys_pwritev2, compat_sys_pwritev2) +#define __NR_pkey_mprotect 288 +__SYSCALL(__NR_pkey_mprotect, sys_pkey_mprotect) +#define __NR_pkey_alloc 289 +__SYSCALL(__NR_pkey_alloc, sys_pkey_alloc) +#define __NR_pkey_free 290 +__SYSCALL(__NR_pkey_free, sys_pkey_free) +#define __NR_statx 291 +__SYSCALL(__NR_statx, sys_statx) +#define __NR_io_pgetevents 292 +__SC_COMP(__NR_io_pgetevents, sys_io_pgetevents, compat_sys_io_pgetevents) + +#undef __NR_syscalls +#define __NR_syscalls 293 + +/* + * 32 bit systems traditionally used different + * syscalls for off_t and loff_t arguments, while + * 64 bit systems only need the off_t version. + * For new 32 bit platforms, there is no need to + * implement the old 32 bit off_t syscalls, so + * they take different names. + * Here we map the numbers so that both versions + * use the same syscall table layout. + */ +#if __BITS_PER_LONG == 64 && !defined(__SYSCALL_COMPAT) +#define __NR_fcntl __NR3264_fcntl +#define __NR_statfs __NR3264_statfs +#define __NR_fstatfs __NR3264_fstatfs +#define __NR_truncate __NR3264_truncate +#define __NR_ftruncate __NR3264_ftruncate +#define __NR_lseek __NR3264_lseek +#define __NR_sendfile __NR3264_sendfile +#define __NR_newfstatat __NR3264_fstatat +#define __NR_fstat __NR3264_fstat +#define __NR_mmap __NR3264_mmap +#define __NR_fadvise64 __NR3264_fadvise64 +#ifdef __NR3264_stat +#define __NR_stat __NR3264_stat +#define __NR_lstat __NR3264_lstat +#endif +#else +#define __NR_fcntl64 __NR3264_fcntl +#define __NR_statfs64 __NR3264_statfs +#define __NR_fstatfs64 __NR3264_fstatfs +#define __NR_truncate64 __NR3264_truncate +#define __NR_ftruncate64 __NR3264_ftruncate +#define __NR_llseek __NR3264_lseek +#define __NR_sendfile64 __NR3264_sendfile +#define __NR_fstatat64 __NR3264_fstatat +#define __NR_fstat64 __NR3264_fstat +#define __NR_mmap2 __NR3264_mmap +#define __NR_fadvise64_64 __NR3264_fadvise64 +#ifdef __NR3264_stat +#define __NR_stat64 __NR3264_stat +#define __NR_lstat64 __NR3264_lstat +#endif +#endif diff --git a/tools/perf/check-headers.sh b/tools/perf/check-headers.sh index 10f333e2e825..814aaf269949 100755 --- a/tools/perf/check-headers.sh +++ b/tools/perf/check-headers.sh @@ -35,6 +35,7 @@ arch/s390/include/uapi/asm/ptrace.h arch/s390/include/uapi/asm/sie.h arch/arm/include/uapi/asm/kvm.h arch/arm64/include/uapi/asm/kvm.h +arch/arm64/include/uapi/asm/unistd.h arch/alpha/include/uapi/asm/errno.h arch/mips/include/asm/errno.h arch/mips/include/uapi/asm/errno.h @@ -53,6 +54,7 @@ include/uapi/asm-generic/errno.h include/uapi/asm-generic/errno-base.h include/uapi/asm-generic/ioctls.h include/uapi/asm-generic/mman-common.h +include/uapi/asm-generic/unistd.h ' check_2 () { -- cgit From 2b5882435606c209ebc052230f03505ea477a252 Mon Sep 17 00:00:00 2001 From: Kim Phillips Date: Fri, 6 Jul 2018 16:34:43 -0500 Subject: perf arm64: Generate system call table from asm/unistd.h This should speed up accessing new system calls introduced with the kernel rather than waiting for libaudit updates to include them. Using the existing other arch scripts resulted in this error: tools/perf/arch/arm64/entry/syscalls//mksyscalltbl: 25: printf: __NR3264_ftruncate: expected numeric value because, unlike other arches, asm-generic's unistd.h does things like: #define __NR_ftruncate __NR3264_ftruncate Turning the scripts printf's %d into a %s resulted in this in the generated syscalls.c file: static const char *syscalltbl_arm64[] = { [__NR3264_ftruncate] = "ftruncate", So we use the host C compiler to fold the macros, and print them out from within a temporary C program, in order to get the correct output: static const char *syscalltbl_arm64[] = { [46] = "ftruncate", Committer notes: Testing this with a container with an old toolchain breaks because it ends up using the system's /usr/include/asm-generic/unistd.h, included from tools/arch/arm64/include/uapi/asm/unistd.h when what is desired is for it to include tools/include/uapi/asm-generic/unistd.h. Since all that tools/arch/arm64/include/uapi/asm/unistd.h is to set a define and then include asm-generic/unistd.h, do that directly and use tools/include/uapi/asm-generic/unistd.h as the file to get the syscall definitions to expand. Testing it: tools/perf/arch/arm64/entry/syscalls/mksyscalltbl /gcc-linaro-5.4.1-2017.05-x86_64_aarch64-linux-gnu/bin/aarch64-linux-gnu-gcc gcc tools/include/uapi/asm-generic/unistd.h Now works and generates in the syscall string table. Before it ended up as: $ tools/perf/arch/arm64/entry/syscalls/mksyscalltbl /gcc-linaro-5.4.1-2017.05-x86_64_aarch64-linux-gnu/bin/aarch64-linux-gnu-gcc gcc tools/arch/arm64/include/uapi/asm/unistd.h static const char *syscalltbl_arm64[] = { : In function 'main': :257:38: error: '__NR_getrandom' undeclared (first use in this function) :257:38: note: each undeclared identifier is reported only once for each function it appears in :258:41: error: '__NR_memfd_create' undeclared (first use in this function) :259:32: error: '__NR_bpf' undeclared (first use in this function) :260:37: error: '__NR_execveat' undeclared (first use in this function) tools/perf/arch/arm64/entry/syscalls/mksyscalltbl: 47: tools/perf/arch/arm64/entry/syscalls/mksyscalltbl: /tmp/create-table-60liya: Permission denied }; $ Signed-off-by: Kim Phillips Reviewed-by: Hendrik Brueckner Tested-by: Arnaldo Carvalho de Melo Cc: Alexander Shishkin Cc: Jiri Olsa Cc: Michael Ellerman Cc: Namhyung Kim Cc: Peter Zijlstra Cc: Ravi Bangoria Cc: Thomas Richter Link: http://lkml.kernel.org/r/20180706163443.22626f5e9e10e5bab5e5c662@arm.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/arch/arm64/Makefile | 21 ++++++++ tools/perf/arch/arm64/entry/syscalls/mksyscalltbl | 62 +++++++++++++++++++++++ 2 files changed, 83 insertions(+) create mode 100755 tools/perf/arch/arm64/entry/syscalls/mksyscalltbl diff --git a/tools/perf/arch/arm64/Makefile b/tools/perf/arch/arm64/Makefile index 91de4860faad..f013b115dc86 100644 --- a/tools/perf/arch/arm64/Makefile +++ b/tools/perf/arch/arm64/Makefile @@ -4,3 +4,24 @@ PERF_HAVE_DWARF_REGS := 1 endif PERF_HAVE_JITDUMP := 1 PERF_HAVE_ARCH_REGS_QUERY_REGISTER_OFFSET := 1 + +# +# Syscall table generation for perf +# + +out := $(OUTPUT)arch/arm64/include/generated/asm +header := $(out)/syscalls.c +sysdef := $(srctree)/tools/include/uapi/asm-generic/unistd.h +sysprf := $(srctree)/tools/perf/arch/arm64/entry/syscalls/ +systbl := $(sysprf)/mksyscalltbl + +# Create output directory if not already present +_dummy := $(shell [ -d '$(out)' ] || mkdir -p '$(out)') + +$(header): $(sysdef) $(systbl) + $(Q)$(SHELL) '$(systbl)' '$(CC)' '$(HOSTCC)' $(sysdef) > $@ + +clean:: + $(call QUIET_CLEAN, arm64) $(RM) $(header) + +archheaders: $(header) diff --git a/tools/perf/arch/arm64/entry/syscalls/mksyscalltbl b/tools/perf/arch/arm64/entry/syscalls/mksyscalltbl new file mode 100755 index 000000000000..52e197317d3e --- /dev/null +++ b/tools/perf/arch/arm64/entry/syscalls/mksyscalltbl @@ -0,0 +1,62 @@ +#!/bin/sh +# SPDX-License-Identifier: GPL-2.0 +# +# Generate system call table for perf. Derived from +# powerpc script. +# +# Copyright IBM Corp. 2017 +# Author(s): Hendrik Brueckner +# Changed by: Ravi Bangoria +# Changed by: Kim Phillips + +gcc=$1 +hostcc=$2 +input=$3 + +if ! test -r $input; then + echo "Could not read input file" >&2 + exit 1 +fi + +create_table_from_c() +{ + local sc nr last_sc + + create_table_exe=`mktemp /tmp/create-table-XXXXXX` + + { + + cat <<-_EoHEADER + #include + #define __ARCH_WANT_RENAMEAT + #include "$input" + int main(int argc, char *argv[]) + { + _EoHEADER + + while read sc nr; do + printf "%s\n" " printf(\"\\t[%d] = \\\"$sc\\\",\\n\", __NR_$sc);" + last_sc=$sc + done + + printf "%s\n" " printf(\"#define SYSCALLTBL_ARM64_MAX_ID %d\\n\", __NR_$last_sc);" + printf "}\n" + + } | $hostcc -o $create_table_exe -x c - + + $create_table_exe + + rm -f $create_table_exe +} + +create_table() +{ + echo "static const char *syscalltbl_arm64[] = {" + create_table_from_c + echo "};" +} + +$gcc -E -dM -x c $input \ + |sed -ne 's/^#define __NR_//p' \ + |sort -t' ' -k2 -nu \ + |create_table -- cgit From a7f660d6576a5f4504c8ab4f4956bba03d48bf52 Mon Sep 17 00:00:00 2001 From: Kim Phillips Date: Fri, 6 Jul 2018 16:34:54 -0500 Subject: perf trace arm64: Use generated syscall table This should speed up accessing new system calls introduced with the kernel rather than waiting for libaudit updates to include them. It also enables users to specify wildcards, for example, perf trace -e 'open*', just like was already possible on x86, s390, and powerpc, which means arm64 can now pass the "Check open filename arg using perf trace + vfs_getname" test. Signed-off-by: Kim Phillips Reviewed-by: Hendrik Brueckner Cc: Alexander Shishkin Cc: Jiri Olsa Cc: Michael Ellerman Cc: Namhyung Kim Cc: Peter Zijlstra Cc: Ravi Bangoria Cc: Thomas Richter Link: http://lkml.kernel.org/r/20180706163454.f714b9ab49ecc8566a0b3565@arm.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/Makefile.config | 2 ++ tools/perf/util/syscalltbl.c | 4 ++++ 2 files changed, 6 insertions(+) diff --git a/tools/perf/Makefile.config b/tools/perf/Makefile.config index f5a3b402589e..d3318f99006c 100644 --- a/tools/perf/Makefile.config +++ b/tools/perf/Makefile.config @@ -54,6 +54,8 @@ endif ifeq ($(SRCARCH),arm64) NO_PERF_REGS := 0 + NO_SYSCALL_TABLE := 0 + CFLAGS += -I$(OUTPUT)arch/arm64/include/generated LIBUNWIND_LIBS = -lunwind -lunwind-aarch64 endif diff --git a/tools/perf/util/syscalltbl.c b/tools/perf/util/syscalltbl.c index 0ee7f568d60c..3393d7ee9401 100644 --- a/tools/perf/util/syscalltbl.c +++ b/tools/perf/util/syscalltbl.c @@ -38,6 +38,10 @@ static const char **syscalltbl_native = syscalltbl_powerpc_64; #include const int syscalltbl_native_max_id = SYSCALLTBL_POWERPC_32_MAX_ID; static const char **syscalltbl_native = syscalltbl_powerpc_32; +#elif defined(__aarch64__) +#include +const int syscalltbl_native_max_id = SYSCALLTBL_ARM64_MAX_ID; +static const char **syscalltbl_native = syscalltbl_arm64; #endif struct syscall { -- cgit From 2a9d5050dc84fa2060f08a52f632976923e0fa7e Mon Sep 17 00:00:00 2001 From: Sandipan Das Date: Tue, 3 Jul 2018 17:35:55 +0530 Subject: perf script: Show correct offsets for DWARF-based unwinding When perf/data is recorded with the dwarf call-graph option, the callchain shown by 'perf script' still shows the binary offsets of the userspace symbols instead of their virtual addresses. Since the symbol offset calculation is based on using virtual address as the ip, we see incorrect offsets as well. The use of virtual addresses affects the ability to find out the line number in the corresponding source file to which an address maps to as described in commit 67540759151a ("perf unwind: Use addr_location::addr instead of ip for entries"). This has also been addressed by temporarily converting the virtual address to the correponding binary offset so that it can be mapped to the source line number correctly. This is a follow-up for commit 19610184693c ("perf script: Show virtual addresses instead of offsets"). This can be verified on a powerpc64le system running Fedora 27 as shown below: # perf probe -x /usr/lib64/libc-2.26.so -a inet_pton # perf record -e probe_libc:inet_pton --call-graph=dwarf ping -6 -c 1 ::1 Before: # perf report --stdio --no-children -s sym,srcline -g address # Samples: 1 of event 'probe_libc:inet_pton' # Event count (approx.): 1 # # Overhead Symbol Source:Line # ........ .................... ........... # 100.00% [.] __GI___inet_pton inet_pton.c | ---gaih_inet getaddrinfo.c:537 (inlined) __GI_getaddrinfo getaddrinfo.c:2304 (inlined) main ping.c:519 generic_start_main libc-start.c:308 (inlined) __libc_start_main libc-start.c:102 ... # perf script -F comm,ip,sym,symoff,srcline,dso ping 15af28 __GI___inet_pton+0xffff000099160008 (/usr/lib64/libc-2.26.so) libc-2.26.so[ffff80004ca0af28] 10fa53 gaih_inet+0xffff000099160f43 libc-2.26.so[ffff80004c9bfa53] (inlined) 1105b3 __GI_getaddrinfo+0xffff000099160163 libc-2.26.so[ffff80004c9c05b3] (inlined) 2d6f main+0xfffffffd9f1003df (/usr/bin/ping) ping[fffffffecf882d6f] 2369f generic_start_main+0xffff00009916013f libc-2.26.so[ffff80004c8d369f] (inlined) 23897 __libc_start_main+0xffff0000991600b7 (/usr/lib64/libc-2.26.so) libc-2.26.so[ffff80004c8d3897] After: # perf report --stdio --no-children -s sym,srcline -g address # Samples: 1 of event 'probe_libc:inet_pton' # Event count (approx.): 1 # # Overhead Symbol Source:Line # ........ .................... ........... # 100.00% [.] __GI___inet_pton inet_pton.c | ---gaih_inet.constprop.7 getaddrinfo.c:537 getaddrinfo getaddrinfo.c:2304 main ping.c:519 generic_start_main.isra.0 libc-start.c:308 __libc_start_main libc-start.c:102 ... # perf script -F comm,ip,sym,symoff,srcline,dso ping 7fffb38aaf28 __GI___inet_pton+0x8 (/usr/lib64/libc-2.26.so) inet_pton.c:68 7fffb385fa53 gaih_inet.constprop.7+0xf43 (/usr/lib64/libc-2.26.so) getaddrinfo.c:537 7fffb38605b3 getaddrinfo+0x163 (/usr/lib64/libc-2.26.so) getaddrinfo.c:2304 130782d6f main+0x3df (/usr/bin/ping) ping.c:519 7fffb377369f generic_start_main.isra.0+0x13f (/usr/lib64/libc-2.26.so) libc-start.c:308 7fffb3773897 __libc_start_main+0xb7 (/usr/lib64/libc-2.26.so) libc-start.c:102 Signed-off-by: Sandipan Das Acked-by: Jiri Olsa Cc: Milian Wolff Cc: Namhyung Kim Cc: Naveen N. Rao Cc: Ravi Bangoria Fixes: 67540759151a ("perf unwind: Use addr_location::addr instead of ip for entries") Link: http://lkml.kernel.org/r/20180703120555.32971-1-sandipan@linux.ibm.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/machine.c | 9 ++++++++- tools/perf/util/unwind-libdw.c | 2 +- tools/perf/util/unwind-libunwind-local.c | 2 +- 3 files changed, 10 insertions(+), 3 deletions(-) diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c index e7b4a8b513f2..22dbb6612b41 100644 --- a/tools/perf/util/machine.c +++ b/tools/perf/util/machine.c @@ -2272,6 +2272,7 @@ static int unwind_entry(struct unwind_entry *entry, void *arg) { struct callchain_cursor *cursor = arg; const char *srcline = NULL; + u64 addr; if (symbol_conf.hide_unresolved && entry->sym == NULL) return 0; @@ -2279,7 +2280,13 @@ static int unwind_entry(struct unwind_entry *entry, void *arg) if (append_inlines(cursor, entry->map, entry->sym, entry->ip) == 0) return 0; - srcline = callchain_srcline(entry->map, entry->sym, entry->ip); + /* + * Convert entry->ip from a virtual address to an offset in + * its corresponding binary. + */ + addr = map__map_ip(entry->map, entry->ip); + + srcline = callchain_srcline(entry->map, entry->sym, addr); return callchain_cursor_append(cursor, entry->ip, entry->map, entry->sym, false, NULL, 0, 0, 0, srcline); diff --git a/tools/perf/util/unwind-libdw.c b/tools/perf/util/unwind-libdw.c index 538db4e5d1e6..6f318b15950e 100644 --- a/tools/perf/util/unwind-libdw.c +++ b/tools/perf/util/unwind-libdw.c @@ -77,7 +77,7 @@ static int entry(u64 ip, struct unwind_info *ui) if (__report_module(&al, ip, ui)) return -1; - e->ip = al.addr; + e->ip = ip; e->map = al.map; e->sym = al.sym; diff --git a/tools/perf/util/unwind-libunwind-local.c b/tools/perf/util/unwind-libunwind-local.c index 6a11bc7e6b27..79f521a552cf 100644 --- a/tools/perf/util/unwind-libunwind-local.c +++ b/tools/perf/util/unwind-libunwind-local.c @@ -575,7 +575,7 @@ static int entry(u64 ip, struct thread *thread, struct addr_location al; e.sym = thread__find_symbol(thread, PERF_RECORD_MISC_USER, ip, &al); - e.ip = al.addr; + e.ip = ip; e.map = al.map; pr_debug("unwind: %s:ip = 0x%" PRIx64 " (0x%" PRIx64 ")\n", -- cgit From e8fedff1cc729fd227924305152ccc6f580e8c83 Mon Sep 17 00:00:00 2001 From: Jiri Olsa Date: Thu, 12 Jul 2018 15:52:02 +0200 Subject: perf tools: Synthesize GROUP_DESC feature in pipe mode Stephan reported, that pipe mode does not carry the group information and thus the piped report won't display the grouped output for following command: # perf record -e '{cycles,instructions,branches}' -a sleep 4 | perf report It has no idea about the group setup, so it will display events separately: # Overhead Command Shared Object ... # ........ ............... ....................... # 6.71% swapper [kernel.kallsyms] 2.28% offlineimap libpython2.7.so.1.0 0.78% perf [kernel.kallsyms] ... Fix GROUP_DESC feature record to be synthesized in pipe mode, so the report output is grouped if there are groups defined in record: # Overhead Command Shared ... # ........................ ............... ....... # 7.57% 0.16% 0.30% swapper [kernel 1.87% 3.15% 2.46% offlineimap libpyth 1.33% 0.00% 0.00% perf [kernel ... Reported-by: Stephane Eranian Signed-off-by: Jiri Olsa Tested-by: Arnaldo Carvalho de Melo Tested-by: Stephane Eranian Cc: Alexander Shishkin Cc: David Ahern Cc: David Carrillo-Cisneros Cc: Namhyung Kim Cc: Peter Zijlstra Link: http://lkml.kernel.org/r/20180712135202.14774-1-jolsa@kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/header.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c index 653ff65aa2c3..5af58aac91ad 100644 --- a/tools/perf/util/header.c +++ b/tools/perf/util/header.c @@ -2587,7 +2587,7 @@ static const struct feature_ops feat_ops[HEADER_LAST_FEATURE] = { FEAT_OPR(NUMA_TOPOLOGY, numa_topology, true), FEAT_OPN(BRANCH_STACK, branch_stack, false), FEAT_OPR(PMU_MAPPINGS, pmu_mappings, false), - FEAT_OPN(GROUP_DESC, group_desc, false), + FEAT_OPR(GROUP_DESC, group_desc, false), FEAT_OPN(AUXTRACE, auxtrace, false), FEAT_OPN(STAT, stat, false), FEAT_OPN(CACHE, cache, true), -- cgit From f8b2ebb532e0553b60ae5ad1b84d1d4f0c285752 Mon Sep 17 00:00:00 2001 From: Jiri Olsa Date: Thu, 19 Jul 2018 16:33:42 +0200 Subject: perf machine: Add threads__get_last_match function Separating threads::last_match cache read/check into separate threads__get_last_match function. This will be useful in following patch. Signed-off-by: Jiri Olsa Cc: Alexander Shishkin Cc: Andi Kleen Cc: David Ahern Cc: Kan Liang Cc: Lukasz Odzioba Cc: Namhyung Kim Cc: Peter Zijlstra Cc: Wang Nan Link: http://lkml.kernel.org/r/20180719143345.12963-2-jolsa@kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/machine.c | 39 ++++++++++++++++++++++++++------------- 1 file changed, 26 insertions(+), 13 deletions(-) diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c index 22dbb6612b41..df41aa1a4cf9 100644 --- a/tools/perf/util/machine.c +++ b/tools/perf/util/machine.c @@ -408,23 +408,16 @@ out_err: } /* - * Caller must eventually drop thread->refcnt returned with a successful - * lookup/new thread inserted. + * Front-end cache - TID lookups come in blocks, + * so most of the time we dont have to look up + * the full rbtree: */ -static struct thread *____machine__findnew_thread(struct machine *machine, - struct threads *threads, - pid_t pid, pid_t tid, - bool create) +static struct thread* +threads__get_last_match(struct threads *threads, struct machine *machine, + int pid, int tid) { - struct rb_node **p = &threads->entries.rb_node; - struct rb_node *parent = NULL; struct thread *th; - /* - * Front-end cache - TID lookups come in blocks, - * so most of the time we dont have to look up - * the full rbtree: - */ th = threads->last_match; if (th != NULL) { if (th->tid == tid) { @@ -435,6 +428,26 @@ static struct thread *____machine__findnew_thread(struct machine *machine, threads->last_match = NULL; } + return NULL; +} + +/* + * Caller must eventually drop thread->refcnt returned with a successful + * lookup/new thread inserted. + */ +static struct thread *____machine__findnew_thread(struct machine *machine, + struct threads *threads, + pid_t pid, pid_t tid, + bool create) +{ + struct rb_node **p = &threads->entries.rb_node; + struct rb_node *parent = NULL; + struct thread *th; + + th = threads__get_last_match(threads, machine, pid, tid); + if (th) + return th; + while (*p != NULL) { parent = *p; th = rb_entry(parent, struct thread, rb_node); -- cgit From 67fda0f32cd9428cb9a3166796162097d7fcbcbf Mon Sep 17 00:00:00 2001 From: Jiri Olsa Date: Thu, 19 Jul 2018 16:33:43 +0200 Subject: perf machine: Add threads__set_last_match function Separating threads::last_match cache set into separate threads__set_last_match function. This will be useful in following patch. Signed-off-by: Jiri Olsa Cc: Alexander Shishkin Cc: Andi Kleen Cc: David Ahern Cc: Kan Liang Cc: Lukasz Odzioba Cc: Namhyung Kim Cc: Peter Zijlstra Cc: Wang Nan Link: http://lkml.kernel.org/r/20180719143345.12963-3-jolsa@kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/machine.c | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c index df41aa1a4cf9..8992fcf42257 100644 --- a/tools/perf/util/machine.c +++ b/tools/perf/util/machine.c @@ -431,6 +431,12 @@ threads__get_last_match(struct threads *threads, struct machine *machine, return NULL; } +static void +threads__set_last_match(struct threads *threads, struct thread *th) +{ + threads->last_match = th; +} + /* * Caller must eventually drop thread->refcnt returned with a successful * lookup/new thread inserted. @@ -453,7 +459,7 @@ static struct thread *____machine__findnew_thread(struct machine *machine, th = rb_entry(parent, struct thread, rb_node); if (th->tid == tid) { - threads->last_match = th; + threads__set_last_match(threads, th); machine__update_thread_pid(machine, th, pid); return thread__get(th); } @@ -490,7 +496,7 @@ static struct thread *____machine__findnew_thread(struct machine *machine, * It is now in the rbtree, get a ref */ thread__get(th); - threads->last_match = th; + threads__set_last_match(threads, th); ++threads->nr; } @@ -1648,7 +1654,7 @@ static void __machine__remove_thread(struct machine *machine, struct thread *th, struct threads *threads = machine__threads(machine, th->tid); if (threads->last_match == th) - threads->last_match = NULL; + threads__set_last_match(threads, NULL); BUG_ON(refcount_read(&th->refcnt) == 0); if (lock) -- cgit From b57334b9453949bf81281321d14d86d60aee6fde Mon Sep 17 00:00:00 2001 From: Jiri Olsa Date: Thu, 19 Jul 2018 16:33:44 +0200 Subject: perf machine: Use last_match threads cache only in single thread mode There's an issue with using threads::last_match in multithread mode which is enabled during the perf top synthesize. It might crash with following assertion: perf: ...include/linux/refcount.h:109: refcount_inc: Assertion `!(!refcount_inc_not_zero(r))' failed. The gdb backtrace looks like this: 0x00007ffff50839fb in raise () from /lib64/libc.so.6 (gdb) #0 0x00007ffff50839fb in raise () from /lib64/libc.so.6 #1 0x00007ffff5085800 in abort () from /lib64/libc.so.6 #2 0x00007ffff507c0da in __assert_fail_base () from /lib64/libc.so.6 #3 0x00007ffff507c152 in __assert_fail () from /lib64/libc.so.6 #4 0x0000000000535ff9 in refcount_inc (r=0x7fffe8009a70) at ...include/linux/refcount.h:109 #5 0x0000000000536771 in thread__get (thread=0x7fffe8009a40) at util/thread.c:115 #6 0x0000000000523cd0 in ____machine__findnew_thread (machine=0xbfde38, threads=0xbfdf28, pid=2, tid=2, create=true) at util/machine.c:432 #7 0x0000000000523eb4 in __machine__findnew_thread (machine=0xbfde38, pid=2, tid=2) at util/machine.c:489 #8 0x0000000000523f24 in machine__findnew_thread (machine=0xbfde38, pid=2, tid=2) at util/machine.c:499 #9 0x0000000000526fbe in machine__process_fork_event (machine=0xbfde38, ... The failing assertion is this one: REFCOUNT_WARN(!refcount_inc_not_zero(r), ... the problem is that we don't serialize access to threads::last_match. We serialize the access to the threads tree, but we don't care how's threads::last_match being accessed. Both locked/unlocked paths use that data and can set it. In multithreaded mode we can end up with invalid object in thread__get call, like in following paths race: thread 1 ... machine__findnew_thread down_write(&threads->lock); __machine__findnew_thread ____machine__findnew_thread th = threads->last_match; if (th->tid == tid) { thread__get thread 2 ... machine__find_thread down_read(&threads->lock); __machine__findnew_thread ____machine__findnew_thread th = threads->last_match; if (th->tid == tid) { thread__get thread 3 ... machine__process_fork_event machine__remove_thread __machine__remove_thread threads->last_match = NULL thread__put thread__put Thread 1 and 2 might got stale last_match, before thread 3 clears it. Thread 1 and 2 then race with thread 3's thread__put and they might trigger the refcnt == 0 assertion above. The patch is disabling the last_match cache for multiple thread mode. It was originally meant for single thread scenarios, where it's common to have multiple sequential searches of the same thread. In multithread mode this does not make sense, because top's threads processes different /proc entries and so the 'struct threads' object is queried for various threads. Moreover we'd need to add more locks to make it work. Signed-off-by: Jiri Olsa Cc: Alexander Shishkin Cc: Andi Kleen Cc: David Ahern Cc: Kan Liang Cc: Lukasz Odzioba Cc: Namhyung Kim Cc: Peter Zijlstra Cc: Wang Nan Link: http://lkml.kernel.org/r/20180719143345.12963-4-jolsa@kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/machine.c | 25 ++++++++++++++++++++++--- 1 file changed, 22 insertions(+), 3 deletions(-) diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c index 8992fcf42257..b300a3973448 100644 --- a/tools/perf/util/machine.c +++ b/tools/perf/util/machine.c @@ -413,8 +413,8 @@ out_err: * the full rbtree: */ static struct thread* -threads__get_last_match(struct threads *threads, struct machine *machine, - int pid, int tid) +__threads__get_last_match(struct threads *threads, struct machine *machine, + int pid, int tid) { struct thread *th; @@ -431,12 +431,31 @@ threads__get_last_match(struct threads *threads, struct machine *machine, return NULL; } +static struct thread* +threads__get_last_match(struct threads *threads, struct machine *machine, + int pid, int tid) +{ + struct thread *th = NULL; + + if (perf_singlethreaded) + th = __threads__get_last_match(threads, machine, pid, tid); + + return th; +} + static void -threads__set_last_match(struct threads *threads, struct thread *th) +__threads__set_last_match(struct threads *threads, struct thread *th) { threads->last_match = th; } +static void +threads__set_last_match(struct threads *threads, struct thread *th) +{ + if (perf_singlethreaded) + __threads__set_last_match(threads, th); +} + /* * Caller must eventually drop thread->refcnt returned with a successful * lookup/new thread inserted. -- cgit From 46b3722cc7765582354488da633aafffcb138458 Mon Sep 17 00:00:00 2001 From: Jiri Olsa Date: Fri, 20 Jul 2018 12:17:40 +0200 Subject: perf tools: Fix struct comm_str removal crash We occasionaly hit following assert failure in 'perf top', when processing the /proc info in multiple threads. perf: ...include/linux/refcount.h:109: refcount_inc: Assertion `!(!refcount_inc_not_zero(r))' failed. The gdb backtrace looks like this: [Switching to Thread 0x7ffff11ba700 (LWP 13749)] 0x00007ffff50839fb in raise () from /lib64/libc.so.6 (gdb) #0 0x00007ffff50839fb in raise () from /lib64/libc.so.6 #1 0x00007ffff5085800 in abort () from /lib64/libc.so.6 #2 0x00007ffff507c0da in __assert_fail_base () from /lib64/libc.so.6 #3 0x00007ffff507c152 in __assert_fail () from /lib64/libc.so.6 #4 0x0000000000535373 in refcount_inc (r=0x7fffdc009be0) at ...include/linux/refcount.h:109 #5 0x00000000005354f1 in comm_str__get (cs=0x7fffdc009bc0) at util/comm.c:24 #6 0x00000000005356bd in __comm_str__findnew (str=0x7fffd000b260 ":2", root=0xbed5c0 ) at util/comm.c:72 #7 0x000000000053579e in comm_str__findnew (str=0x7fffd000b260 ":2", root=0xbed5c0 ) at util/comm.c:95 #8 0x000000000053582e in comm__new (str=0x7fffd000b260 ":2", timestamp=0, exec=false) at util/comm.c:111 #9 0x00000000005363bc in thread__new (pid=2, tid=2) at util/thread.c:57 #10 0x0000000000523da0 in ____machine__findnew_thread (machine=0xbfde38, threads=0xbfdf28, pid=2, tid=2, create=true) at util/machine.c:457 #11 0x0000000000523eb4 in __machine__findnew_thread (machine=0xbfde38, ... The failing assertion is this one: REFCOUNT_WARN(!refcount_inc_not_zero(r), ... The problem is that we keep global comm_str_root list, which is accessed by multiple threads during the 'perf top' startup and following 2 paths can race: thread 1: ... thread__new comm__new comm_str__findnew down_write(&comm_str_lock); __comm_str__findnew comm_str__get thread 2: ... comm__override or comm__free comm_str__put refcount_dec_and_test down_write(&comm_str_lock); rb_erase(&cs->rb_node, &comm_str_root); Because thread 2 first decrements the refcnt and only after then it removes the struct comm_str from the list, the thread 1 can find this object on the list with refcnt equls to 0 and hit the assert. This patch fixes the thread 1 __comm_str__findnew path, by ignoring objects that already dropped the refcnt to 0. For the rest of the objects we take the refcnt before comparing its name and release it afterwards with comm_str__put, which can also release the object completely. Signed-off-by: Jiri Olsa Acked-by: Namhyung Kim Cc: Alexander Shishkin Cc: Andi Kleen Cc: David Ahern Cc: Kan Liang Cc: Lukasz Odzioba Cc: Peter Zijlstra Cc: Wang Nan Cc: kernel-team@lge.com Link: http://lkml.kernel.org/r/20180720101740.GA27176@krava Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/comm.c | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-) diff --git a/tools/perf/util/comm.c b/tools/perf/util/comm.c index 7798a2cc8a86..31279a7bd919 100644 --- a/tools/perf/util/comm.c +++ b/tools/perf/util/comm.c @@ -20,9 +20,10 @@ static struct rw_semaphore comm_str_lock = {.lock = PTHREAD_RWLOCK_INITIALIZER,} static struct comm_str *comm_str__get(struct comm_str *cs) { - if (cs) - refcount_inc(&cs->refcnt); - return cs; + if (cs && refcount_inc_not_zero(&cs->refcnt)) + return cs; + + return NULL; } static void comm_str__put(struct comm_str *cs) @@ -67,9 +68,14 @@ struct comm_str *__comm_str__findnew(const char *str, struct rb_root *root) parent = *p; iter = rb_entry(parent, struct comm_str, rb_node); + /* + * If we race with comm_str__put, iter->refcnt is 0 + * and it will be removed within comm_str__put call + * shortly, ignore it in this search. + */ cmp = strcmp(str, iter->str); - if (!cmp) - return comm_str__get(iter); + if (!cmp && comm_str__get(iter)) + return iter; if (cmp < 0) p = &(*p)->rb_left; -- cgit From 2d6cae13f10d5d8b370038dbfdf60317c22b9f52 Mon Sep 17 00:00:00 2001 From: Jiri Olsa Date: Fri, 20 Jul 2018 13:00:33 +0200 Subject: perf tools: Use perf_evsel__match instead of open coded equivalent Use perf_evsel__match() helper in perf_evsel__is_bpf_output(). Signed-off-by: Jiri Olsa Cc: Alexander Shishkin Cc: David Ahern Cc: Namhyung Kim Cc: Peter Zijlstra Link: http://lkml.kernel.org/r/20180720110036.32251-1-jolsa@kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/evsel.h | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/tools/perf/util/evsel.h b/tools/perf/util/evsel.h index d277930b19a1..890babf9ce86 100644 --- a/tools/perf/util/evsel.h +++ b/tools/perf/util/evsel.h @@ -402,10 +402,7 @@ bool perf_evsel__is_function_event(struct perf_evsel *evsel); static inline bool perf_evsel__is_bpf_output(struct perf_evsel *evsel) { - struct perf_event_attr *attr = &evsel->attr; - - return (attr->config == PERF_COUNT_SW_BPF_OUTPUT) && - (attr->type == PERF_TYPE_SOFTWARE); + return perf_evsel__match(evsel, SOFTWARE, SW_BPF_OUTPUT); } struct perf_attr_details { -- cgit From 0aa802a79469a86ebe143019144cd4df8ae852e4 Mon Sep 17 00:00:00 2001 From: Jiri Olsa Date: Fri, 20 Jul 2018 13:00:34 +0200 Subject: perf stat: Get rid of extra clock display function There's no reason to have separate function to display clock events. It's only purpose was to convert the nanosecond value into microseconds. We do that now in generic code, if the unit and scale values are properly set, which this patch do for clock events. The output differs in the unit field being displayed in its columns rather than having it added as a suffix of the event name. Plus the value is rounded into 2 decimal numbers as for any other event. Before: # perf stat -e cpu-clock,task-clock -C 0 sleep 3 Performance counter stats for 'CPU(s) 0': 3001.123137 cpu-clock (msec) # 1.000 CPUs utilized 3001.133250 task-clock (msec) # 1.000 CPUs utilized 3.001159813 seconds time elapsed Now: # perf stat -e cpu-clock,task-clock -C 0 sleep 3 Performance counter stats for 'CPU(s) 0': 3,001.05 msec cpu-clock # 1.000 CPUs utilized 3,001.05 msec task-clock # 1.000 CPUs utilized 3.001077794 seconds time elapsed There's a small difference in csv output, as we now output the unit field, which was empty before. It's in the proper spot, so there's no compatibility issue. Before: # perf stat -e cpu-clock,task-clock -C 0 -x, sleep 3 3001.065177,,cpu-clock,3001064187,100.00,1.000,CPUs utilized 3001.077085,,task-clock,3001077085,100.00,1.000,CPUs utilized # perf stat -e cpu-clock,task-clock -C 0 -x, sleep 3 3000.80,msec,cpu-clock,3000799026,100.00,1.000,CPUs utilized 3000.80,msec,task-clock,3000799550,100.00,1.000,CPUs utilized Add perf_evsel__is_clock to replace nsec_counter. Signed-off-by: Jiri Olsa Tested-by: Arnaldo Carvalho de Melo Cc: Alexander Shishkin Cc: Andi Kleen Cc: David Ahern Cc: Namhyung Kim Cc: Peter Zijlstra Link: http://lkml.kernel.org/r/20180720110036.32251-2-jolsa@kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/builtin-stat.c | 48 ++----------------------------------------- tools/perf/util/evsel.c | 11 ++++++++++ tools/perf/util/evsel.h | 6 ++++++ tools/perf/util/stat-shadow.c | 5 ++--- 4 files changed, 21 insertions(+), 49 deletions(-) diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c index dfd13d6e2931..d097b5b47eb8 100644 --- a/tools/perf/builtin-stat.c +++ b/tools/perf/builtin-stat.c @@ -296,18 +296,6 @@ static int create_perf_stat_counter(struct perf_evsel *evsel) return perf_evsel__open_per_thread(evsel, evsel_list->threads); } -/* - * Does the counter have nsecs as a unit? - */ -static inline int nsec_counter(struct perf_evsel *evsel) -{ - if (perf_evsel__match(evsel, SOFTWARE, SW_CPU_CLOCK) || - perf_evsel__match(evsel, SOFTWARE, SW_TASK_CLOCK)) - return 1; - - return 0; -} - static int process_synthesized_event(struct perf_tool *tool __maybe_unused, union perf_event *event, struct perf_sample *sample __maybe_unused, @@ -1058,34 +1046,6 @@ static void print_metric_header(void *ctx, const char *color __maybe_unused, fprintf(os->fh, "%*s ", metric_only_len, unit); } -static void nsec_printout(int id, int nr, struct perf_evsel *evsel, double avg) -{ - FILE *output = stat_config.output; - double msecs = avg / NSEC_PER_MSEC; - const char *fmt_v, *fmt_n; - char name[25]; - - fmt_v = csv_output ? "%.6f%s" : "%18.6f%s"; - fmt_n = csv_output ? "%s" : "%-25s"; - - aggr_printout(evsel, id, nr); - - scnprintf(name, sizeof(name), "%s%s", - perf_evsel__name(evsel), csv_output ? "" : " (msec)"); - - fprintf(output, fmt_v, msecs, csv_sep); - - if (csv_output) - fprintf(output, "%s%s", evsel->unit, csv_sep); - else - fprintf(output, "%-*s%s", unit_width, evsel->unit, csv_sep); - - fprintf(output, fmt_n, name); - - if (evsel->cgrp) - fprintf(output, "%s%s", csv_sep, evsel->cgrp->name); -} - static int first_shadow_cpu(struct perf_evsel *evsel, int id) { int i; @@ -1241,11 +1201,7 @@ static void printout(int id, int nr, struct perf_evsel *counter, double uval, return; } - if (metric_only) - /* nothing */; - else if (nsec_counter(counter)) - nsec_printout(id, nr, counter, uval); - else + if (!metric_only) abs_printout(id, nr, counter, uval); out.print_metric = pm; @@ -1331,7 +1287,7 @@ static void collect_all_aliases(struct perf_evsel *counter, alias->scale != counter->scale || alias->cgrp != counter->cgrp || strcmp(alias->unit, counter->unit) || - nsec_counter(alias) != nsec_counter(counter)) + perf_evsel__is_clock(alias) != perf_evsel__is_clock(counter)) break; alias->merged_stat = true; cb(alias, data, false); diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c index 94fce4f537e9..5285da0417c5 100644 --- a/tools/perf/util/evsel.c +++ b/tools/perf/util/evsel.c @@ -260,6 +260,17 @@ struct perf_evsel *perf_evsel__new_idx(struct perf_event_attr *attr, int idx) evsel->attr.sample_period = 1; } + if (perf_evsel__is_clock(evsel)) { + /* + * The evsel->unit points to static alias->unit + * so it's ok to use static string in here. + */ + static const char *unit = "msec"; + + evsel->unit = unit; + evsel->scale = 1e-6; + } + return evsel; } diff --git a/tools/perf/util/evsel.h b/tools/perf/util/evsel.h index 890babf9ce86..973c03167947 100644 --- a/tools/perf/util/evsel.h +++ b/tools/perf/util/evsel.h @@ -405,6 +405,12 @@ static inline bool perf_evsel__is_bpf_output(struct perf_evsel *evsel) return perf_evsel__match(evsel, SOFTWARE, SW_BPF_OUTPUT); } +static inline bool perf_evsel__is_clock(struct perf_evsel *evsel) +{ + return perf_evsel__match(evsel, SOFTWARE, SW_CPU_CLOCK) || + perf_evsel__match(evsel, SOFTWARE, SW_TASK_CLOCK); +} + struct perf_attr_details { bool freq; bool verbose; diff --git a/tools/perf/util/stat-shadow.c b/tools/perf/util/stat-shadow.c index 594d14a02b67..99990f5f2512 100644 --- a/tools/perf/util/stat-shadow.c +++ b/tools/perf/util/stat-shadow.c @@ -913,11 +913,10 @@ void perf_stat__print_shadow_stats(struct perf_evsel *evsel, ratio = total / avg; print_metric(ctxp, NULL, "%8.0f", "cycles / elision", ratio); - } else if (perf_evsel__match(evsel, SOFTWARE, SW_TASK_CLOCK) || - perf_evsel__match(evsel, SOFTWARE, SW_CPU_CLOCK)) { + } else if (perf_evsel__is_clock(evsel)) { if ((ratio = avg_stats(&walltime_nsecs_stats)) != 0) print_metric(ctxp, NULL, "%8.3f", "CPUs utilized", - avg / ratio); + avg / (ratio * evsel->scale)); else print_metric(ctxp, NULL, NULL, "CPUs utilized", 0); } else if (perf_stat_evsel__is(evsel, TOPDOWN_FETCH_BUBBLES)) { -- cgit From 9ef0112442bdddef5fb55adf20b3a5464b33de75 Mon Sep 17 00:00:00 2001 From: Thomas Richter Date: Tue, 24 Jul 2018 15:48:58 +0200 Subject: perf test: Fix subtest number when showing results Perf test 40 for example has several subtests numbered 1-4 when displaying the start of the subtest. When the subtest results are displayed the subtests are numbered 0-3. Use this command to generate trace output: [root@s35lp76 perf]# ./perf test -Fv 40 2>/tmp/bpf1 Fix this by adjusting the subtest number when show the subtest result. Output before: [root@s35lp76 perf]# egrep '(^40\.[0-4]| subtest [0-4]:)' /tmp/bpf1 40.1: Basic BPF filtering : BPF filter subtest 0: Ok 40.2: BPF pinning : BPF filter subtest 1: Ok 40.3: BPF prologue generation : BPF filter subtest 2: Ok 40.4: BPF relocation checker : BPF filter subtest 3: Ok [root@s35lp76 perf]# Output after: root@s35lp76 ~]# egrep '(^40\.[0-4]| subtest [0-4]:)' /tmp/bpf1 40.1: Basic BPF filtering : BPF filter subtest 1: Ok 40.2: BPF pinning : BPF filter subtest 2: Ok 40.3: BPF prologue generation : BPF filter subtest 3: Ok 40.4: BPF relocation checker : BPF filter subtest 4: Ok [root@s35lp76 ~]# Signed-off-by: Thomas Richter Reviewed-by: Hendrik Brueckner Cc: Heiko Carstens Cc: Martin Schwidefsky Link: http://lkml.kernel.org/r/20180724134858.100644-1-tmricht@linux.ibm.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/tests/builtin-test.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/perf/tests/builtin-test.c b/tools/perf/tests/builtin-test.c index dd850a26d579..4f5de8245b32 100644 --- a/tools/perf/tests/builtin-test.c +++ b/tools/perf/tests/builtin-test.c @@ -599,7 +599,7 @@ static int __cmd_test(int argc, const char *argv[], struct intlist *skiplist) for (subi = 0; subi < subn; subi++) { pr_info("%2d.%1d: %-*s:", i, subi + 1, subw, t->subtest.get_desc(subi)); - err = test_and_print(t, skip, subi); + err = test_and_print(t, skip, subi + 1); if (err != TEST_OK && t->subtest.skip_if_fail) skip = true; } -- cgit From 3196234039155a33c80e52d7aa41a29dce9a5c51 Mon Sep 17 00:00:00 2001 From: Kan Liang Date: Thu, 8 Mar 2018 18:15:39 -0800 Subject: perf/x86/intel: Introduce PMU flag for Extended PEBS The Extended PEBS feature, introduced in the Goldmont Plus microarchitecture, supports all events as "Extended PEBS". Introduce flag PMU_FL_PEBS_ALL to indicate the platforms which support extended PEBS. To support all events, it needs to support all constraints for PEBS. To avoid duplicating all the constraints in the PEBS table, making the PEBS code search the normal constraints too. Based-on-code-from: Andi Kleen Signed-off-by: Kan Liang Signed-off-by: Peter Zijlstra (Intel) Cc: Alexander Shishkin Cc: Arnaldo Carvalho de Melo Cc: Jiri Olsa Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Stephane Eranian Cc: Thomas Gleixner Cc: Vince Weaver Cc: acme@kernel.org Link: http://lkml.kernel.org/r/20180309021542.11374-1-kan.liang@linux.intel.com Signed-off-by: Ingo Molnar --- arch/x86/events/intel/ds.c | 7 +++++++ arch/x86/events/perf_event.h | 1 + 2 files changed, 8 insertions(+) diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c index 8dbba77e0518..9fd9cb1d2cc8 100644 --- a/arch/x86/events/intel/ds.c +++ b/arch/x86/events/intel/ds.c @@ -871,6 +871,13 @@ struct event_constraint *intel_pebs_constraints(struct perf_event *event) } } + /* + * Extended PEBS support + * Makes the PEBS code search the normal constraints. + */ + if (x86_pmu.flags & PMU_FL_PEBS_ALL) + return NULL; + return &emptyconstraint; } diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h index 2430398befd8..156286335351 100644 --- a/arch/x86/events/perf_event.h +++ b/arch/x86/events/perf_event.h @@ -673,6 +673,7 @@ do { \ #define PMU_FL_HAS_RSP_1 0x2 /* has 2 equivalent offcore_rsp regs */ #define PMU_FL_EXCL_CNTRS 0x4 /* has exclusive counter requirements */ #define PMU_FL_EXCL_ENABLED 0x8 /* exclusive counter active */ +#define PMU_FL_PEBS_ALL 0x10 /* all events are valid PEBS events */ #define EVENT_VAR(_id) event_attr_##_id #define EVENT_PTR(_id) &event_attr_##_id.attr.attr -- cgit From 4f08b6255adb1e379b4fcc8d304ec1263d465677 Mon Sep 17 00:00:00 2001 From: Kan Liang Date: Thu, 8 Mar 2018 18:15:40 -0800 Subject: perf/x86/intel: Support PEBS on fixed counters The Extended PEBS feature supports PEBS on fixed-function performance counters as well as all four general purpose counters. It has to change the order of PEBS and fixed counter enabling to make sure PEBS is enabled for the fixed counters. The change of the order doesn't impact the behavior of current code on other platforms which don't support extended PEBS. Because there is no dependency among those enable/disable functions. Don't enable IRQ generation (0x8) for MSR_ARCH_PERFMON_FIXED_CTR_CTRL. The PEBS ucode will handle the interrupt generation. Based-on-code-from: Andi Kleen Signed-off-by: Kan Liang Signed-off-by: Peter Zijlstra (Intel) Cc: Alexander Shishkin Cc: Arnaldo Carvalho de Melo Cc: Jiri Olsa Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Stephane Eranian Cc: Thomas Gleixner Cc: Vince Weaver Cc: acme@kernel.org Link: http://lkml.kernel.org/r/20180309021542.11374-2-kan.liang@linux.intel.com Signed-off-by: Ingo Molnar --- arch/x86/events/intel/core.c | 24 +++++++++++++----------- 1 file changed, 13 insertions(+), 11 deletions(-) diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c index 86f0c15dcc2d..d5a3124605f5 100644 --- a/arch/x86/events/intel/core.c +++ b/arch/x86/events/intel/core.c @@ -2041,15 +2041,15 @@ static void intel_pmu_disable_event(struct perf_event *event) cpuc->intel_ctrl_host_mask &= ~(1ull << hwc->idx); cpuc->intel_cp_status &= ~(1ull << hwc->idx); + if (unlikely(event->attr.precise_ip)) + intel_pmu_pebs_disable(event); + if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) { intel_pmu_disable_fixed(hwc); return; } x86_pmu_disable_event(event); - - if (unlikely(event->attr.precise_ip)) - intel_pmu_pebs_disable(event); } static void intel_pmu_del_event(struct perf_event *event) @@ -2068,17 +2068,19 @@ static void intel_pmu_read_event(struct perf_event *event) x86_perf_event_update(event); } -static void intel_pmu_enable_fixed(struct hw_perf_event *hwc) +static void intel_pmu_enable_fixed(struct perf_event *event) { + struct hw_perf_event *hwc = &event->hw; int idx = hwc->idx - INTEL_PMC_IDX_FIXED; - u64 ctrl_val, bits, mask; + u64 ctrl_val, mask, bits = 0; /* - * Enable IRQ generation (0x8), + * Enable IRQ generation (0x8), if not PEBS, * and enable ring-3 counting (0x2) and ring-0 counting (0x1) * if requested: */ - bits = 0x8ULL; + if (!event->attr.precise_ip) + bits |= 0x8; if (hwc->config & ARCH_PERFMON_EVENTSEL_USR) bits |= 0x2; if (hwc->config & ARCH_PERFMON_EVENTSEL_OS) @@ -2120,14 +2122,14 @@ static void intel_pmu_enable_event(struct perf_event *event) if (unlikely(event_is_checkpointed(event))) cpuc->intel_cp_status |= (1ull << hwc->idx); + if (unlikely(event->attr.precise_ip)) + intel_pmu_pebs_enable(event); + if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) { - intel_pmu_enable_fixed(hwc); + intel_pmu_enable_fixed(event); return; } - if (unlikely(event->attr.precise_ip)) - intel_pmu_pebs_enable(event); - __x86_pmu_enable_event(hwc, ARCH_PERFMON_EVENTSEL_ENABLE); } -- cgit From ec71a398c1bf6d8188cb24ebab6f5202523d95e1 Mon Sep 17 00:00:00 2001 From: Kan Liang Date: Thu, 8 Mar 2018 18:15:41 -0800 Subject: perf/x86/intel/ds: Handle PEBS overflow for fixed counters The pebs_drain() need to support fixed counters. The DS Save Area now include "counter reset value" fields for each fixed counters. Extend the related variables (e.g. mask, counters, error) to support fixed counters. There is no extended PEBS in PEBS v2 and earlier PEBS format. Only need to change the code for PEBS v3 and later PEBS format. Extend the pebs_event_reset[] logic to support new "counter reset value" fields. Increase the reserve space for fixed counters. Based-on-code-from: Andi Kleen Signed-off-by: Kan Liang Signed-off-by: Peter Zijlstra (Intel) Cc: Alexander Shishkin Cc: Arnaldo Carvalho de Melo Cc: Jiri Olsa Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Stephane Eranian Cc: Thomas Gleixner Cc: Vince Weaver Cc: acme@kernel.org Link: http://lkml.kernel.org/r/20180309021542.11374-3-kan.liang@linux.intel.com Signed-off-by: Ingo Molnar --- arch/x86/events/intel/core.c | 5 ++++- arch/x86/events/intel/ds.c | 36 +++++++++++++++++++++++++++--------- arch/x86/include/asm/intel_ds.h | 3 ++- 3 files changed, 33 insertions(+), 11 deletions(-) diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c index d5a3124605f5..b1a49a108a59 100644 --- a/arch/x86/events/intel/core.c +++ b/arch/x86/events/intel/core.c @@ -2282,7 +2282,10 @@ again: * counters from the GLOBAL_STATUS mask and we always process PEBS * events via drain_pebs(). */ - status &= ~(cpuc->pebs_enabled & PEBS_COUNTER_MASK); + if (x86_pmu.flags & PMU_FL_PEBS_ALL) + status &= ~cpuc->pebs_enabled; + else + status &= ~(cpuc->pebs_enabled & PEBS_COUNTER_MASK); /* * PEBS overflow sets bit 62 in the global status register diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c index 9fd9cb1d2cc8..595b96ae8a00 100644 --- a/arch/x86/events/intel/ds.c +++ b/arch/x86/events/intel/ds.c @@ -903,10 +903,16 @@ static inline void pebs_update_threshold(struct cpu_hw_events *cpuc) { struct debug_store *ds = cpuc->ds; u64 threshold; + int reserved; + + if (x86_pmu.flags & PMU_FL_PEBS_ALL) + reserved = x86_pmu.max_pebs_events + x86_pmu.num_counters_fixed; + else + reserved = x86_pmu.max_pebs_events; if (cpuc->n_pebs == cpuc->n_large_pebs) { threshold = ds->pebs_absolute_maximum - - x86_pmu.max_pebs_events * x86_pmu.pebs_record_size; + reserved * x86_pmu.pebs_record_size; } else { threshold = ds->pebs_buffer_base + x86_pmu.pebs_record_size; } @@ -970,7 +976,11 @@ void intel_pmu_pebs_enable(struct perf_event *event) * This must be done in pmu::start(), because PERF_EVENT_IOC_PERIOD. */ if (hwc->flags & PERF_X86_EVENT_AUTO_RELOAD) { - ds->pebs_event_reset[hwc->idx] = + unsigned int idx = hwc->idx; + + if (idx >= INTEL_PMC_IDX_FIXED) + idx = MAX_PEBS_EVENTS + (idx - INTEL_PMC_IDX_FIXED); + ds->pebs_event_reset[idx] = (u64)(-hwc->sample_period) & x86_pmu.cntval_mask; } else { ds->pebs_event_reset[hwc->idx] = 0; @@ -1488,9 +1498,10 @@ static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs) struct debug_store *ds = cpuc->ds; struct perf_event *event; void *base, *at, *top; - short counts[MAX_PEBS_EVENTS] = {}; - short error[MAX_PEBS_EVENTS] = {}; - int bit, i; + short counts[INTEL_PMC_IDX_FIXED + MAX_FIXED_PEBS_EVENTS] = {}; + short error[INTEL_PMC_IDX_FIXED + MAX_FIXED_PEBS_EVENTS] = {}; + int bit, i, size; + u64 mask; if (!x86_pmu.pebs_active) return; @@ -1500,6 +1511,13 @@ static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs) ds->pebs_index = ds->pebs_buffer_base; + mask = (1ULL << x86_pmu.max_pebs_events) - 1; + size = x86_pmu.max_pebs_events; + if (x86_pmu.flags & PMU_FL_PEBS_ALL) { + mask |= ((1ULL << x86_pmu.num_counters_fixed) - 1) << INTEL_PMC_IDX_FIXED; + size = INTEL_PMC_IDX_FIXED + x86_pmu.num_counters_fixed; + } + if (unlikely(base >= top)) { /* * The drain_pebs() could be called twice in a short period @@ -1509,7 +1527,7 @@ static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs) * update the event->count for this case. */ for_each_set_bit(bit, (unsigned long *)&cpuc->pebs_enabled, - x86_pmu.max_pebs_events) { + size) { event = cpuc->events[bit]; if (event->hw.flags & PERF_X86_EVENT_AUTO_RELOAD) intel_pmu_save_and_restart_reload(event, 0); @@ -1522,12 +1540,12 @@ static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs) u64 pebs_status; pebs_status = p->status & cpuc->pebs_enabled; - pebs_status &= (1ULL << x86_pmu.max_pebs_events) - 1; + pebs_status &= mask; /* PEBS v3 has more accurate status bits */ if (x86_pmu.intel_cap.pebs_format >= 3) { for_each_set_bit(bit, (unsigned long *)&pebs_status, - x86_pmu.max_pebs_events) + size) counts[bit]++; continue; @@ -1575,7 +1593,7 @@ static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs) counts[bit]++; } - for (bit = 0; bit < x86_pmu.max_pebs_events; bit++) { + for (bit = 0; bit < size; bit++) { if ((counts[bit] == 0) && (error[bit] == 0)) continue; diff --git a/arch/x86/include/asm/intel_ds.h b/arch/x86/include/asm/intel_ds.h index 62a9f4966b42..ae26df1c2789 100644 --- a/arch/x86/include/asm/intel_ds.h +++ b/arch/x86/include/asm/intel_ds.h @@ -8,6 +8,7 @@ /* The maximal number of PEBS events: */ #define MAX_PEBS_EVENTS 8 +#define MAX_FIXED_PEBS_EVENTS 3 /* * A debug store configuration. @@ -23,7 +24,7 @@ struct debug_store { u64 pebs_index; u64 pebs_absolute_maximum; u64 pebs_interrupt_threshold; - u64 pebs_event_reset[MAX_PEBS_EVENTS]; + u64 pebs_event_reset[MAX_PEBS_EVENTS + MAX_FIXED_PEBS_EVENTS]; } __aligned(PAGE_SIZE); DECLARE_PER_CPU_PAGE_ALIGNED(struct debug_store, cpu_debug_store); -- cgit From a38b0ba1b7d2e7a6d19877540240e8a4352fc93c Mon Sep 17 00:00:00 2001 From: Kan Liang Date: Thu, 8 Mar 2018 18:15:42 -0800 Subject: perf/x86/intel: Support Extended PEBS for Goldmont Plus Enable the extended PEBS for Goldmont Plus. There is no specific PEBS constrains for Goldmont Plus. Removing the pebs_constraints for Goldmont Plus. Signed-off-by: Kan Liang Signed-off-by: Peter Zijlstra (Intel) Cc: Alexander Shishkin Cc: Arnaldo Carvalho de Melo Cc: Jiri Olsa Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Stephane Eranian Cc: Thomas Gleixner Cc: Vince Weaver Cc: acme@kernel.org Link: http://lkml.kernel.org/r/20180309021542.11374-4-kan.liang@linux.intel.com Signed-off-by: Ingo Molnar --- arch/x86/events/intel/core.c | 2 +- arch/x86/events/intel/ds.c | 6 ------ 2 files changed, 1 insertion(+), 7 deletions(-) diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c index b1a49a108a59..035c37481f57 100644 --- a/arch/x86/events/intel/core.c +++ b/arch/x86/events/intel/core.c @@ -4077,7 +4077,6 @@ __init int intel_pmu_init(void) intel_pmu_lbr_init_skl(); x86_pmu.event_constraints = intel_slm_event_constraints; - x86_pmu.pebs_constraints = intel_glp_pebs_event_constraints; x86_pmu.extra_regs = intel_glm_extra_regs; /* * It's recommended to use CPU_CLK_UNHALTED.CORE_P + NPEBS @@ -4087,6 +4086,7 @@ __init int intel_pmu_init(void) x86_pmu.pebs_prec_dist = true; x86_pmu.lbr_pt_coexist = true; x86_pmu.flags |= PMU_FL_HAS_RSP_1; + x86_pmu.flags |= PMU_FL_PEBS_ALL; x86_pmu.get_event_constraints = glp_get_event_constraints; x86_pmu.cpu_events = glm_events_attrs; /* Goldmont Plus has 4-wide pipeline */ diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c index 595b96ae8a00..b7b01d762d32 100644 --- a/arch/x86/events/intel/ds.c +++ b/arch/x86/events/intel/ds.c @@ -713,12 +713,6 @@ struct event_constraint intel_glm_pebs_event_constraints[] = { EVENT_CONSTRAINT_END }; -struct event_constraint intel_glp_pebs_event_constraints[] = { - /* Allow all events as PEBS with no flags */ - INTEL_ALL_EVENT_CONSTRAINT(0, 0xf), - EVENT_CONSTRAINT_END -}; - struct event_constraint intel_nehalem_pebs_event_constraints[] = { INTEL_PLD_CONSTRAINT(0x100b, 0xf), /* MEM_INST_RETIRED.* */ INTEL_FLAGS_EVENT_CONSTRAINT(0x0f, 0xf), /* MEM_UNCORE_RETIRED.* */ -- cgit From 61b229ce2c152e7700ca603c7b86cdd99619db01 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Wed, 25 Jul 2018 14:24:02 -0300 Subject: perf trace beauty: Default header_dir to cwd to work without parms Useful when checking the effects of header synchs for the files it uses as a input to generate string tables, in retrospect this is how it should've been done from day 1, not requiring the header_dir to be set on the Makefile, will change everything later, so that the only parm, common to all generators will be $(srctree) and $(beauty_outdir). So, to see what it generates, just call it without any parameters: $ tools/perf/trace/beauty/vhost_virtio_ioctl.sh static const char *vhost_virtio_ioctl_cmds[] = { [0x00] = "SET_FEATURES", [0x01] = "SET_OWNER", [0x02] = "RESET_OWNER", [0x03] = "SET_MEM_TABLE", [0x04] = "SET_LOG_BASE", [0x07] = "SET_LOG_FD", [0x10] = "SET_VRING_NUM", [0x11] = "SET_VRING_ADDR", [0x12] = "SET_VRING_BASE", [0x13] = "SET_VRING_ENDIAN", [0x14] = "GET_VRING_ENDIAN", [0x20] = "SET_VRING_KICK", [0x21] = "SET_VRING_CALL", [0x22] = "SET_VRING_ERR", [0x23] = "SET_VRING_BUSYLOOP_TIMEOUT", [0x24] = "GET_VRING_BUSYLOOP_TIMEOUT", [0x30] = "NET_SET_BACKEND", [0x40] = "SCSI_SET_ENDPOINT", [0x41] = "SCSI_CLEAR_ENDPOINT", [0x42] = "SCSI_GET_ABI_VERSION", [0x43] = "SCSI_SET_EVENTS_MISSED", [0x44] = "SCSI_GET_EVENTS_MISSED", [0x60] = "VSOCK_SET_GUEST_CID", [0x61] = "VSOCK_SET_RUNNING", }; static const char *vhost_virtio_ioctl_read_cmds[] = { [0x00] = "GET_FEATURES", [0x12] = "GET_VRING_BASE", }; $ Or: $ tools/perf/trace/beauty/sndrv_pcm_ioctl.sh static const char *sndrv_pcm_ioctl_cmds[] = { [0x00] = "PVERSION", [0x01] = "INFO", [0x02] = "TSTAMP", [0x03] = "TTSTAMP", [0x04] = "USER_PVERSION", [0x10] = "HW_REFINE", [0x11] = "HW_PARAMS", [0x12] = "HW_FREE", [0x13] = "SW_PARAMS", [0x20] = "STATUS", [0x21] = "DELAY", [0x22] = "HWSYNC", [0x23] = "SYNC_PTR", [0x24] = "STATUS_EXT", [0x32] = "CHANNEL_INFO", [0x40] = "PREPARE", [0x41] = "RESET", [0x42] = "START", [0x43] = "DROP", [0x44] = "DRAIN", [0x45] = "PAUSE", [0x46] = "REWIND", [0x47] = "RESUME", [0x48] = "XRUN", [0x49] = "FORWARD", [0x50] = "WRITEI_FRAMES", [0x51] = "READI_FRAMES", [0x52] = "WRITEN_FRAMES", [0x53] = "READN_FRAMES", [0x60] = "LINK", [0x61] = "UNLINK", }; $ Etc. Cc: Adrian Hunter Cc: David Ahern Cc: Jiri Olsa Cc: Namhyung Kim Cc: Wang Nan Link: https://lkml.kernel.org/n/tip-90am4vm8hh1osms894dp2otr@git.kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/trace/beauty/drm_ioctl.sh | 9 +++++---- tools/perf/trace/beauty/kcmp_type.sh | 2 +- tools/perf/trace/beauty/kvm_ioctl.sh | 4 ++-- tools/perf/trace/beauty/madvise_behavior.sh | 2 +- tools/perf/trace/beauty/perf_ioctl.sh | 2 +- tools/perf/trace/beauty/pkey_alloc_access_rights.sh | 2 +- tools/perf/trace/beauty/sndrv_ctl_ioctl.sh | 4 ++-- tools/perf/trace/beauty/sndrv_pcm_ioctl.sh | 4 ++-- tools/perf/trace/beauty/vhost_virtio_ioctl.sh | 6 +++--- 9 files changed, 18 insertions(+), 17 deletions(-) diff --git a/tools/perf/trace/beauty/drm_ioctl.sh b/tools/perf/trace/beauty/drm_ioctl.sh index 2149d3a98e42..9d3816815e60 100755 --- a/tools/perf/trace/beauty/drm_ioctl.sh +++ b/tools/perf/trace/beauty/drm_ioctl.sh @@ -1,13 +1,14 @@ #!/bin/sh -drm_header_dir=$1 +[ $# -eq 1 ] && header_dir=$1 || header_dir=tools/include/uapi/drm/ + printf "#ifndef DRM_COMMAND_BASE\n" -grep "#define DRM_COMMAND_BASE" $drm_header_dir/drm.h +grep "#define DRM_COMMAND_BASE" $header_dir/drm.h printf "#endif\n" printf "static const char *drm_ioctl_cmds[] = {\n" -grep "^#define DRM_IOCTL.*DRM_IO" $drm_header_dir/drm.h | \ +grep "^#define DRM_IOCTL.*DRM_IO" $header_dir/drm.h | \ sed -r 's/^#define +DRM_IOCTL_([A-Z0-9_]+)[ ]+DRM_IO[A-Z]* *\( *(0x[[:xdigit:]]+),*.*/ [\2] = "\1",/g' -grep "^#define DRM_I915_[A-Z_0-9]\+[ ]\+0x" $drm_header_dir/i915_drm.h | \ +grep "^#define DRM_I915_[A-Z_0-9]\+[ ]\+0x" $header_dir/i915_drm.h | \ sed -r 's/^#define +DRM_I915_([A-Z0-9_]+)[ ]+(0x[[:xdigit:]]+)/\t[DRM_COMMAND_BASE + \2] = "I915_\1",/g' printf "};\n" diff --git a/tools/perf/trace/beauty/kcmp_type.sh b/tools/perf/trace/beauty/kcmp_type.sh index 40d063b8c082..a3c304caa336 100755 --- a/tools/perf/trace/beauty/kcmp_type.sh +++ b/tools/perf/trace/beauty/kcmp_type.sh @@ -1,6 +1,6 @@ #!/bin/sh -header_dir=$1 +[ $# -eq 1 ] && header_dir=$1 || header_dir=tools/include/uapi/linux/ printf "static const char *kcmp_types[] = {\n" regex='^[[:space:]]+(KCMP_(\w+)),' diff --git a/tools/perf/trace/beauty/kvm_ioctl.sh b/tools/perf/trace/beauty/kvm_ioctl.sh index bd28817afced..c4699fd46bb6 100755 --- a/tools/perf/trace/beauty/kvm_ioctl.sh +++ b/tools/perf/trace/beauty/kvm_ioctl.sh @@ -1,10 +1,10 @@ #!/bin/sh -kvm_header_dir=$1 +[ $# -eq 1 ] && header_dir=$1 || header_dir=tools/include/uapi/linux/ printf "static const char *kvm_ioctl_cmds[] = {\n" regex='^#[[:space:]]*define[[:space:]]+KVM_(\w+)[[:space:]]+_IO[RW]*\([[:space:]]*KVMIO[[:space:]]*,[[:space:]]*(0x[[:xdigit:]]+).*' -egrep $regex ${kvm_header_dir}/kvm.h | \ +egrep $regex ${header_dir}/kvm.h | \ sed -r "s/$regex/\2 \1/g" | \ egrep -v " ((ARM|PPC|S390)_|[GS]ET_(DEBUGREGS|PIT2|XSAVE|TSC_KHZ)|CREATE_SPAPR_TCE_64)" | \ sort | xargs printf "\t[%s] = \"%s\",\n" diff --git a/tools/perf/trace/beauty/madvise_behavior.sh b/tools/perf/trace/beauty/madvise_behavior.sh index 60ef8640ee70..431639eb4d29 100755 --- a/tools/perf/trace/beauty/madvise_behavior.sh +++ b/tools/perf/trace/beauty/madvise_behavior.sh @@ -1,6 +1,6 @@ #!/bin/sh -header_dir=$1 +[ $# -eq 1 ] && header_dir=$1 || header_dir=tools/include/uapi/asm-generic/ printf "static const char *madvise_advices[] = {\n" regex='^[[:space:]]*#[[:space:]]*define[[:space:]]+MADV_([[:alnum:]_]+)[[:space:]]+([[:digit:]]+)[[:space:]]*.*' diff --git a/tools/perf/trace/beauty/perf_ioctl.sh b/tools/perf/trace/beauty/perf_ioctl.sh index faea4237c793..6492c74df928 100755 --- a/tools/perf/trace/beauty/perf_ioctl.sh +++ b/tools/perf/trace/beauty/perf_ioctl.sh @@ -1,6 +1,6 @@ #!/bin/sh -header_dir=$1 +[ $# -eq 1 ] && header_dir=$1 || header_dir=tools/include/uapi/linux/ printf "static const char *perf_ioctl_cmds[] = {\n" regex='^#[[:space:]]*define[[:space:]]+PERF_EVENT_IOC_(\w+)[[:space:]]+_IO[RW]*[[:space:]]*\([[:space:]]*.\$.[[:space:]]*,[[:space:]]*([[:digit:]]+).*' diff --git a/tools/perf/trace/beauty/pkey_alloc_access_rights.sh b/tools/perf/trace/beauty/pkey_alloc_access_rights.sh index 62e51a02b839..e0a51aeb20b2 100755 --- a/tools/perf/trace/beauty/pkey_alloc_access_rights.sh +++ b/tools/perf/trace/beauty/pkey_alloc_access_rights.sh @@ -1,6 +1,6 @@ #!/bin/sh -header_dir=$1 +[ $# -eq 1 ] && header_dir=$1 || header_dir=tools/include/uapi/asm-generic/ printf "static const char *pkey_alloc_access_rights[] = {\n" regex='^[[:space:]]*#[[:space:]]*define[[:space:]]+PKEY_([[:alnum:]_]+)[[:space:]]+(0x[[:xdigit:]]+)[[:space:]]*' diff --git a/tools/perf/trace/beauty/sndrv_ctl_ioctl.sh b/tools/perf/trace/beauty/sndrv_ctl_ioctl.sh index aad5ab130539..eb511bb5fbd3 100755 --- a/tools/perf/trace/beauty/sndrv_ctl_ioctl.sh +++ b/tools/perf/trace/beauty/sndrv_ctl_ioctl.sh @@ -1,8 +1,8 @@ #!/bin/sh -sound_header_dir=$1 +[ $# -eq 1 ] && header_dir=$1 || header_dir=tools/include/uapi/sound/ printf "static const char *sndrv_ctl_ioctl_cmds[] = {\n" -grep "^#define[\t ]\+SNDRV_CTL_IOCTL_" $sound_header_dir/asound.h | \ +grep "^#define[\t ]\+SNDRV_CTL_IOCTL_" $header_dir/asound.h | \ sed -r 's/^#define +SNDRV_CTL_IOCTL_([A-Z0-9_]+)[\t ]+_IO[RW]*\( *.U., *(0x[[:xdigit:]]+),?.*/\t[\2] = \"\1\",/g' printf "};\n" diff --git a/tools/perf/trace/beauty/sndrv_pcm_ioctl.sh b/tools/perf/trace/beauty/sndrv_pcm_ioctl.sh index b7e9ef6b2f55..6818392968b2 100755 --- a/tools/perf/trace/beauty/sndrv_pcm_ioctl.sh +++ b/tools/perf/trace/beauty/sndrv_pcm_ioctl.sh @@ -1,8 +1,8 @@ #!/bin/sh -sound_header_dir=$1 +[ $# -eq 1 ] && header_dir=$1 || header_dir=tools/include/uapi/sound/ printf "static const char *sndrv_pcm_ioctl_cmds[] = {\n" -grep "^#define[\t ]\+SNDRV_PCM_IOCTL_" $sound_header_dir/asound.h | \ +grep "^#define[\t ]\+SNDRV_PCM_IOCTL_" $header_dir/asound.h | \ sed -r 's/^#define +SNDRV_PCM_IOCTL_([A-Z0-9_]+)[\t ]+_IO[RW]*\( *.A., *(0x[[:xdigit:]]+),?.*/\t[\2] = \"\1\",/g' printf "};\n" diff --git a/tools/perf/trace/beauty/vhost_virtio_ioctl.sh b/tools/perf/trace/beauty/vhost_virtio_ioctl.sh index 76f1de697787..0f6a5197d0be 100755 --- a/tools/perf/trace/beauty/vhost_virtio_ioctl.sh +++ b/tools/perf/trace/beauty/vhost_virtio_ioctl.sh @@ -1,17 +1,17 @@ #!/bin/sh -vhost_virtio_header_dir=$1 +[ $# -eq 1 ] && header_dir=$1 || header_dir=tools/include/uapi/linux/ printf "static const char *vhost_virtio_ioctl_cmds[] = {\n" regex='^#[[:space:]]*define[[:space:]]+VHOST_(\w+)[[:space:]]+_IOW?\([[:space:]]*VHOST_VIRTIO[[:space:]]*,[[:space:]]*(0x[[:xdigit:]]+).*' -egrep $regex ${vhost_virtio_header_dir}/vhost.h | \ +egrep $regex ${header_dir}/vhost.h | \ sed -r "s/$regex/\2 \1/g" | \ sort | xargs printf "\t[%s] = \"%s\",\n" printf "};\n" printf "static const char *vhost_virtio_ioctl_read_cmds[] = {\n" regex='^#[[:space:]]*define[[:space:]]+VHOST_(\w+)[[:space:]]+_IOW?R\([[:space:]]*VHOST_VIRTIO[[:space:]]*,[[:space:]]*(0x[[:xdigit:]]+).*' -egrep $regex ${vhost_virtio_header_dir}/vhost.h | \ +egrep $regex ${header_dir}/vhost.h | \ sed -r "s/$regex/\2 \1/g" | \ sort | xargs printf "\t[%s] = \"%s\",\n" printf "};\n" -- cgit From 95035c5e167ae6e740b1ddd30210ae0eaf39a5db Mon Sep 17 00:00:00 2001 From: Kan Liang Date: Mon, 9 Jul 2018 07:15:22 -0700 Subject: perf evlist: Fix error out while applying initial delay and LBR 'perf record' will error out if both --delay and LBR are applied. For example: # perf record -D 1000 -a -e cycles -j any -- sleep 2 Error: dummy:HG: PMU Hardware doesn't support sampling/overflow-interrupts. Try 'perf stat' # A dummy event is added implicitly for initial delay, which has the same configurations as real sampling events. The dummy event is a software event. If LBR is configured, perf must error out. The dummy event will only be used to track PERF_RECORD_MMAP while perf waits for the initial delay to enable the real events. The BRANCH_STACK bit can be safely cleared for the dummy event. After applying the patch: # perf record -D 1000 -a -e cycles -j any -- sleep 2 [ perf record: Woken up 1 times to write data ] [ perf record: Captured and wrote 1.054 MB perf.data (828 samples) ] # Reported-by: Sunil K Pandey Signed-off-by: Kan Liang Acked-by: Jiri Olsa Tested-by: Arnaldo Carvalho de Melo Cc: Andi Kleen Cc: Namhyung Kim Cc: Peter Zijlstra Link: http://lkml.kernel.org/r/1531145722-16404-1-git-send-email-kan.liang@linux.intel.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/evsel.c | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c index 5285da0417c5..ddf84b941abf 100644 --- a/tools/perf/util/evsel.c +++ b/tools/perf/util/evsel.c @@ -859,6 +859,12 @@ static void apply_config_terms(struct perf_evsel *evsel, } } +static bool is_dummy_event(struct perf_evsel *evsel) +{ + return (evsel->attr.type == PERF_TYPE_SOFTWARE) && + (evsel->attr.config == PERF_COUNT_SW_DUMMY); +} + /* * The enable_on_exec/disabled value strategy: * @@ -1097,6 +1103,14 @@ void perf_evsel__config(struct perf_evsel *evsel, struct record_opts *opts, else perf_evsel__reset_sample_bit(evsel, PERIOD); } + + /* + * For initial_delay, a dummy event is added implicitly. + * The software event will trigger -EOPNOTSUPP error out, + * if BRANCH_STACK bit is set. + */ + if (opts->initial_delay && is_dummy_event(evsel)) + perf_evsel__reset_sample_bit(evsel, BRANCH_STACK); } static int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads) -- cgit From a6f39cecf71537d105f4bf87b8d4760851960838 Mon Sep 17 00:00:00 2001 From: Sandipan Das Date: Thu, 26 Jul 2018 16:25:02 +0530 Subject: perf tests: Fix complex event name parsing The 'umask' event parameter is unsupported on some architectures like powerpc64. This can be observed on a powerpc64le system running Fedora 27 as shown below. # perf test "Parse event definition strings" -v 6: Parse event definition strings : --- start --- test child forked, pid 45915 ... running test 3 'cpu/name='COMPLEX_CYCLES_NAME:orig=cycles,desc=chip-clock-ticks',period=0x1,event=0x2,umask=0x3/ukp'Invalid event/parameter 'umask' Invalid event/parameter 'umask' failed to parse event 'cpu/name='COMPLEX_CYCLES_NAME:orig=cycles,desc=chip-clock-ticks',period=0x1,event=0x2,umask=0x3/ukp', err 1, str 'unknown term' event syntax error: '..,event=0x2,umask=0x3/ukp' \___ unknown term valid terms: event,mark,pmc,cache_sel,pmcxsel,unit,thresh_stop,thresh_start,combine,thresh_sel,thresh_cmp,sample_mode,config,config1,config2,name,period,freq,branch_type,time,call-graph,stack-size,no-inherit,inherit,max-stack,no-overwrite,overwrite,driver-config mem_access -> cpu/event=0x10401e0/ running test 0 'config=10,config1,config2=3,umask=1' test child finished with 1 ---- end ---- Parse event definition strings: FAILED! Committer testing: After applying the patch these test passes and in verbose mode we get: # perf test -v "event definition" 6: Parse event definition strings: --- start --- test child forked, pid 11061 running test 0 'syscalls:sys_enter_openat'Using CPUID GenuineIntel-6-9E running test 53 'cycles/name='COMPLEX_CYCLES_NAME:orig=cycles,desc=chip-clock-ticks'/Duk' running test 0 'cpu/config=10,config1,config2=3,period=1000/u' running test 1 'cpu/config=1,name=krava/u,cpu/config=2/u' running test 2 'cpu/config=1,call-graph=fp,time,period=100000/,cpu/config=2,call-graph=no,time=0,period=2000/' running test 3 'cpu/name='COMPLEX_CYCLES_NAME:orig=cycles,desc=chip-clock-ticks',period=0x1,event=0x2/ukp' test child finished with 0 ---- end ---- Parse event definition strings: Ok # Suggested-by: Ravi Bangoria Signed-off-by: Sandipan Das Tested-by: Arnaldo Carvalho de Melo Cc: Alexey Budankov Cc: Jiri Olsa Cc: Naveen N. Rao Fixes: 06dc5bf21f3f ("perf tests: Check that complex event name is parsed correctly") Link: http://lkml.kernel.org/r/20180726105502.31670-1-sandipan@linux.ibm.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/tests/parse-events.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/perf/tests/parse-events.c b/tools/perf/tests/parse-events.c index a36313daec4e..3b97ac018d5a 100644 --- a/tools/perf/tests/parse-events.c +++ b/tools/perf/tests/parse-events.c @@ -1690,7 +1690,7 @@ static struct evlist_test test__events_pmu[] = { .id = 2, }, { - .name = "cpu/name='COMPLEX_CYCLES_NAME:orig=cycles,desc=chip-clock-ticks',period=0x1,event=0x2,umask=0x3/ukp", + .name = "cpu/name='COMPLEX_CYCLES_NAME:orig=cycles,desc=chip-clock-ticks',period=0x1,event=0x2/ukp", .check = test__checkevent_complex_name, .id = 3, } -- cgit From a4b2061242ecc024b1ee975e066b6e38db1c6b9e Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Thu, 26 Jul 2018 09:26:13 -0300 Subject: tools include uapi: Grab a copy of linux/in.h We'll use it to create tables for the 'protocol' argument to the socket syscall when the 'family' arg is one of AF_INET or AF_INET6. Add it to check_headers.sh so that when a new protocol gets added we get a notification during the build process. Cc: Adrian Hunter Cc: David Ahern Cc: Jiri Olsa Cc: Namhyung Kim Cc: Wang Nan Link: https://lkml.kernel.org/n/tip-2amnveu1ns4emjn70xuavpje@git.kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/include/uapi/linux/in.h | 301 ++++++++++++++++++++++++++++++++++++++++++ tools/perf/check-headers.sh | 1 + 2 files changed, 302 insertions(+) create mode 100644 tools/include/uapi/linux/in.h diff --git a/tools/include/uapi/linux/in.h b/tools/include/uapi/linux/in.h new file mode 100644 index 000000000000..48e8a225b985 --- /dev/null +++ b/tools/include/uapi/linux/in.h @@ -0,0 +1,301 @@ +/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */ +/* + * INET An implementation of the TCP/IP protocol suite for the LINUX + * operating system. INET is implemented using the BSD Socket + * interface as the means of communication with the user level. + * + * Definitions of the Internet Protocol. + * + * Version: @(#)in.h 1.0.1 04/21/93 + * + * Authors: Original taken from the GNU Project file. + * Fred N. van Kempen, + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ +#ifndef _UAPI_LINUX_IN_H +#define _UAPI_LINUX_IN_H + +#include +#include +#include + +#if __UAPI_DEF_IN_IPPROTO +/* Standard well-defined IP protocols. */ +enum { + IPPROTO_IP = 0, /* Dummy protocol for TCP */ +#define IPPROTO_IP IPPROTO_IP + IPPROTO_ICMP = 1, /* Internet Control Message Protocol */ +#define IPPROTO_ICMP IPPROTO_ICMP + IPPROTO_IGMP = 2, /* Internet Group Management Protocol */ +#define IPPROTO_IGMP IPPROTO_IGMP + IPPROTO_IPIP = 4, /* IPIP tunnels (older KA9Q tunnels use 94) */ +#define IPPROTO_IPIP IPPROTO_IPIP + IPPROTO_TCP = 6, /* Transmission Control Protocol */ +#define IPPROTO_TCP IPPROTO_TCP + IPPROTO_EGP = 8, /* Exterior Gateway Protocol */ +#define IPPROTO_EGP IPPROTO_EGP + IPPROTO_PUP = 12, /* PUP protocol */ +#define IPPROTO_PUP IPPROTO_PUP + IPPROTO_UDP = 17, /* User Datagram Protocol */ +#define IPPROTO_UDP IPPROTO_UDP + IPPROTO_IDP = 22, /* XNS IDP protocol */ +#define IPPROTO_IDP IPPROTO_IDP + IPPROTO_TP = 29, /* SO Transport Protocol Class 4 */ +#define IPPROTO_TP IPPROTO_TP + IPPROTO_DCCP = 33, /* Datagram Congestion Control Protocol */ +#define IPPROTO_DCCP IPPROTO_DCCP + IPPROTO_IPV6 = 41, /* IPv6-in-IPv4 tunnelling */ +#define IPPROTO_IPV6 IPPROTO_IPV6 + IPPROTO_RSVP = 46, /* RSVP Protocol */ +#define IPPROTO_RSVP IPPROTO_RSVP + IPPROTO_GRE = 47, /* Cisco GRE tunnels (rfc 1701,1702) */ +#define IPPROTO_GRE IPPROTO_GRE + IPPROTO_ESP = 50, /* Encapsulation Security Payload protocol */ +#define IPPROTO_ESP IPPROTO_ESP + IPPROTO_AH = 51, /* Authentication Header protocol */ +#define IPPROTO_AH IPPROTO_AH + IPPROTO_MTP = 92, /* Multicast Transport Protocol */ +#define IPPROTO_MTP IPPROTO_MTP + IPPROTO_BEETPH = 94, /* IP option pseudo header for BEET */ +#define IPPROTO_BEETPH IPPROTO_BEETPH + IPPROTO_ENCAP = 98, /* Encapsulation Header */ +#define IPPROTO_ENCAP IPPROTO_ENCAP + IPPROTO_PIM = 103, /* Protocol Independent Multicast */ +#define IPPROTO_PIM IPPROTO_PIM + IPPROTO_COMP = 108, /* Compression Header Protocol */ +#define IPPROTO_COMP IPPROTO_COMP + IPPROTO_SCTP = 132, /* Stream Control Transport Protocol */ +#define IPPROTO_SCTP IPPROTO_SCTP + IPPROTO_UDPLITE = 136, /* UDP-Lite (RFC 3828) */ +#define IPPROTO_UDPLITE IPPROTO_UDPLITE + IPPROTO_MPLS = 137, /* MPLS in IP (RFC 4023) */ +#define IPPROTO_MPLS IPPROTO_MPLS + IPPROTO_RAW = 255, /* Raw IP packets */ +#define IPPROTO_RAW IPPROTO_RAW + IPPROTO_MAX +}; +#endif + +#if __UAPI_DEF_IN_ADDR +/* Internet address. */ +struct in_addr { + __be32 s_addr; +}; +#endif + +#define IP_TOS 1 +#define IP_TTL 2 +#define IP_HDRINCL 3 +#define IP_OPTIONS 4 +#define IP_ROUTER_ALERT 5 +#define IP_RECVOPTS 6 +#define IP_RETOPTS 7 +#define IP_PKTINFO 8 +#define IP_PKTOPTIONS 9 +#define IP_MTU_DISCOVER 10 +#define IP_RECVERR 11 +#define IP_RECVTTL 12 +#define IP_RECVTOS 13 +#define IP_MTU 14 +#define IP_FREEBIND 15 +#define IP_IPSEC_POLICY 16 +#define IP_XFRM_POLICY 17 +#define IP_PASSSEC 18 +#define IP_TRANSPARENT 19 + +/* BSD compatibility */ +#define IP_RECVRETOPTS IP_RETOPTS + +/* TProxy original addresses */ +#define IP_ORIGDSTADDR 20 +#define IP_RECVORIGDSTADDR IP_ORIGDSTADDR + +#define IP_MINTTL 21 +#define IP_NODEFRAG 22 +#define IP_CHECKSUM 23 +#define IP_BIND_ADDRESS_NO_PORT 24 +#define IP_RECVFRAGSIZE 25 + +/* IP_MTU_DISCOVER values */ +#define IP_PMTUDISC_DONT 0 /* Never send DF frames */ +#define IP_PMTUDISC_WANT 1 /* Use per route hints */ +#define IP_PMTUDISC_DO 2 /* Always DF */ +#define IP_PMTUDISC_PROBE 3 /* Ignore dst pmtu */ +/* Always use interface mtu (ignores dst pmtu) but don't set DF flag. + * Also incoming ICMP frag_needed notifications will be ignored on + * this socket to prevent accepting spoofed ones. + */ +#define IP_PMTUDISC_INTERFACE 4 +/* weaker version of IP_PMTUDISC_INTERFACE, which allos packets to get + * fragmented if they exeed the interface mtu + */ +#define IP_PMTUDISC_OMIT 5 + +#define IP_MULTICAST_IF 32 +#define IP_MULTICAST_TTL 33 +#define IP_MULTICAST_LOOP 34 +#define IP_ADD_MEMBERSHIP 35 +#define IP_DROP_MEMBERSHIP 36 +#define IP_UNBLOCK_SOURCE 37 +#define IP_BLOCK_SOURCE 38 +#define IP_ADD_SOURCE_MEMBERSHIP 39 +#define IP_DROP_SOURCE_MEMBERSHIP 40 +#define IP_MSFILTER 41 +#define MCAST_JOIN_GROUP 42 +#define MCAST_BLOCK_SOURCE 43 +#define MCAST_UNBLOCK_SOURCE 44 +#define MCAST_LEAVE_GROUP 45 +#define MCAST_JOIN_SOURCE_GROUP 46 +#define MCAST_LEAVE_SOURCE_GROUP 47 +#define MCAST_MSFILTER 48 +#define IP_MULTICAST_ALL 49 +#define IP_UNICAST_IF 50 + +#define MCAST_EXCLUDE 0 +#define MCAST_INCLUDE 1 + +/* These need to appear somewhere around here */ +#define IP_DEFAULT_MULTICAST_TTL 1 +#define IP_DEFAULT_MULTICAST_LOOP 1 + +/* Request struct for multicast socket ops */ + +#if __UAPI_DEF_IP_MREQ +struct ip_mreq { + struct in_addr imr_multiaddr; /* IP multicast address of group */ + struct in_addr imr_interface; /* local IP address of interface */ +}; + +struct ip_mreqn { + struct in_addr imr_multiaddr; /* IP multicast address of group */ + struct in_addr imr_address; /* local IP address of interface */ + int imr_ifindex; /* Interface index */ +}; + +struct ip_mreq_source { + __be32 imr_multiaddr; + __be32 imr_interface; + __be32 imr_sourceaddr; +}; + +struct ip_msfilter { + __be32 imsf_multiaddr; + __be32 imsf_interface; + __u32 imsf_fmode; + __u32 imsf_numsrc; + __be32 imsf_slist[1]; +}; + +#define IP_MSFILTER_SIZE(numsrc) \ + (sizeof(struct ip_msfilter) - sizeof(__u32) \ + + (numsrc) * sizeof(__u32)) + +struct group_req { + __u32 gr_interface; /* interface index */ + struct __kernel_sockaddr_storage gr_group; /* group address */ +}; + +struct group_source_req { + __u32 gsr_interface; /* interface index */ + struct __kernel_sockaddr_storage gsr_group; /* group address */ + struct __kernel_sockaddr_storage gsr_source; /* source address */ +}; + +struct group_filter { + __u32 gf_interface; /* interface index */ + struct __kernel_sockaddr_storage gf_group; /* multicast address */ + __u32 gf_fmode; /* filter mode */ + __u32 gf_numsrc; /* number of sources */ + struct __kernel_sockaddr_storage gf_slist[1]; /* interface index */ +}; + +#define GROUP_FILTER_SIZE(numsrc) \ + (sizeof(struct group_filter) - sizeof(struct __kernel_sockaddr_storage) \ + + (numsrc) * sizeof(struct __kernel_sockaddr_storage)) +#endif + +#if __UAPI_DEF_IN_PKTINFO +struct in_pktinfo { + int ipi_ifindex; + struct in_addr ipi_spec_dst; + struct in_addr ipi_addr; +}; +#endif + +/* Structure describing an Internet (IP) socket address. */ +#if __UAPI_DEF_SOCKADDR_IN +#define __SOCK_SIZE__ 16 /* sizeof(struct sockaddr) */ +struct sockaddr_in { + __kernel_sa_family_t sin_family; /* Address family */ + __be16 sin_port; /* Port number */ + struct in_addr sin_addr; /* Internet address */ + + /* Pad to size of `struct sockaddr'. */ + unsigned char __pad[__SOCK_SIZE__ - sizeof(short int) - + sizeof(unsigned short int) - sizeof(struct in_addr)]; +}; +#define sin_zero __pad /* for BSD UNIX comp. -FvK */ +#endif + +#if __UAPI_DEF_IN_CLASS +/* + * Definitions of the bits in an Internet address integer. + * On subnets, host and network parts are found according + * to the subnet mask, not these masks. + */ +#define IN_CLASSA(a) ((((long int) (a)) & 0x80000000) == 0) +#define IN_CLASSA_NET 0xff000000 +#define IN_CLASSA_NSHIFT 24 +#define IN_CLASSA_HOST (0xffffffff & ~IN_CLASSA_NET) +#define IN_CLASSA_MAX 128 + +#define IN_CLASSB(a) ((((long int) (a)) & 0xc0000000) == 0x80000000) +#define IN_CLASSB_NET 0xffff0000 +#define IN_CLASSB_NSHIFT 16 +#define IN_CLASSB_HOST (0xffffffff & ~IN_CLASSB_NET) +#define IN_CLASSB_MAX 65536 + +#define IN_CLASSC(a) ((((long int) (a)) & 0xe0000000) == 0xc0000000) +#define IN_CLASSC_NET 0xffffff00 +#define IN_CLASSC_NSHIFT 8 +#define IN_CLASSC_HOST (0xffffffff & ~IN_CLASSC_NET) + +#define IN_CLASSD(a) ((((long int) (a)) & 0xf0000000) == 0xe0000000) +#define IN_MULTICAST(a) IN_CLASSD(a) +#define IN_MULTICAST_NET 0xF0000000 + +#define IN_EXPERIMENTAL(a) ((((long int) (a)) & 0xf0000000) == 0xf0000000) +#define IN_BADCLASS(a) IN_EXPERIMENTAL((a)) + +/* Address to accept any incoming messages. */ +#define INADDR_ANY ((unsigned long int) 0x00000000) + +/* Address to send to all hosts. */ +#define INADDR_BROADCAST ((unsigned long int) 0xffffffff) + +/* Address indicating an error return. */ +#define INADDR_NONE ((unsigned long int) 0xffffffff) + +/* Network number for local host loopback. */ +#define IN_LOOPBACKNET 127 + +/* Address to loopback in software to local host. */ +#define INADDR_LOOPBACK 0x7f000001 /* 127.0.0.1 */ +#define IN_LOOPBACK(a) ((((long int) (a)) & 0xff000000) == 0x7f000000) + +/* Defines for Multicast INADDR */ +#define INADDR_UNSPEC_GROUP 0xe0000000U /* 224.0.0.0 */ +#define INADDR_ALLHOSTS_GROUP 0xe0000001U /* 224.0.0.1 */ +#define INADDR_ALLRTRS_GROUP 0xe0000002U /* 224.0.0.2 */ +#define INADDR_MAX_LOCAL_GROUP 0xe00000ffU /* 224.0.0.255 */ +#endif + +/* contains the htonl type stuff.. */ +#include + + +#endif /* _UAPI_LINUX_IN_H */ diff --git a/tools/perf/check-headers.sh b/tools/perf/check-headers.sh index 814aaf269949..de28466c0186 100755 --- a/tools/perf/check-headers.sh +++ b/tools/perf/check-headers.sh @@ -7,6 +7,7 @@ include/uapi/drm/i915_drm.h include/uapi/linux/fcntl.h include/uapi/linux/kcmp.h include/uapi/linux/kvm.h +include/uapi/linux/in.h include/uapi/linux/perf_event.h include/uapi/linux/prctl.h include/uapi/linux/sched.h -- cgit From 9849eec3a44c6f47948117c14d7afb8cf53bf0fb Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Thu, 26 Jul 2018 09:26:13 -0300 Subject: perf beauty: Add a generator for IPPROTO_ socket's protocol constants It'll use tools/include copy of linux/in.h to generate a table to be used by tools, initially by the 'socket' and 'socketpair' beautifiers in 'perf trace', but that could also be used to translate from a string constant to the integer value to be used in a eBPF or tracefs tracepoint filter. When used without any args it produces: $ tools/perf/trace/beauty/socket_ipproto.sh static const char *socket_ipproto[] = { [0] = "IP", [103] = "PIM", [108] = "COMP", [12] = "PUP", [132] = "SCTP", [136] = "UDPLITE", [137] = "MPLS", [17] = "UDP", [1] = "ICMP", [22] = "IDP", [255] = "RAW", [29] = "TP", [2] = "IGMP", [33] = "DCCP", [41] = "IPV6", [46] = "RSVP", [47] = "GRE", [4] = "IPIP", [50] = "ESP", [51] = "AH", [6] = "TCP", [8] = "EGP", [92] = "MTP", [94] = "BEETPH", [98] = "ENCAP", }; $ Cc: Adrian Hunter Cc: David Ahern Cc: Jiri Olsa Cc: Namhyung Kim Cc: Wang Nan Link: https://lkml.kernel.org/n/tip-v9rafqh3qn6b9kp9vfvj9f8s@git.kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/trace/beauty/socket_ipproto.sh | 11 +++++++++++ 1 file changed, 11 insertions(+) create mode 100755 tools/perf/trace/beauty/socket_ipproto.sh diff --git a/tools/perf/trace/beauty/socket_ipproto.sh b/tools/perf/trace/beauty/socket_ipproto.sh new file mode 100755 index 000000000000..a3cc24633bec --- /dev/null +++ b/tools/perf/trace/beauty/socket_ipproto.sh @@ -0,0 +1,11 @@ +#!/bin/sh + +[ $# -eq 1 ] && header_dir=$1 || header_dir=tools/include/uapi/linux/ + +printf "static const char *socket_ipproto[] = {\n" +regex='^[[:space:]]+IPPROTO_(\w+)[[:space:]]+=[[:space:]]+([[:digit:]]+),.*' + +egrep $regex ${header_dir}/in.h | \ + sed -r "s/$regex/\2 \1/g" | \ + sort | xargs printf "\t[%s] = \"%s\",\n" +printf "};\n" -- cgit From bc972ada4f85450ebf20c4981ee84a1a4b060161 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Thu, 26 Jul 2018 15:30:33 -0300 Subject: perf trace beauty: Do not print NULL strarray entries We may have string tables where not all slots have values, in those cases its better to print the numeric value, for instance: In the table below we would show "protocol: (null)" for socket_ipproto[3] Where it would be better to show "protocol: 3". $ tools/perf/trace/beauty/socket_ipproto.sh static const char *socket_ipproto[] = { [0] = "IP", [103] = "PIM", [108] = "COMP", [12] = "PUP", [132] = "SCTP", [136] = "UDPLITE", [137] = "MPLS", [17] = "UDP", [1] = "ICMP", [22] = "IDP", [255] = "RAW", [29] = "TP", [2] = "IGMP", [33] = "DCCP", [41] = "IPV6", [46] = "RSVP", [47] = "GRE", [4] = "IPIP", [50] = "ESP", [51] = "AH", [6] = "TCP", [8] = "EGP", [92] = "MTP", [94] = "BEETPH", [98] = "ENCAP", }; $ Cc: Adrian Hunter Cc: David Ahern Cc: Jiri Olsa Cc: Namhyung Kim Cc: Wang Nan Link: https://lkml.kernel.org/n/tip-7djfak94eb3b9ltr79cpn3ti@git.kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/builtin-trace.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c index 6a748eca2edb..632ef5f8f060 100644 --- a/tools/perf/builtin-trace.c +++ b/tools/perf/builtin-trace.c @@ -291,7 +291,7 @@ size_t strarray__scnprintf(struct strarray *sa, char *bf, size_t size, const cha { int idx = val - sa->offset; - if (idx < 0 || idx >= sa->nr_entries) + if (idx < 0 || idx >= sa->nr_entries || sa->entries[idx] == NULL) return scnprintf(bf, size, intfmt, val); return scnprintf(bf, size, "%s", sa->entries[idx]); -- cgit From 03aeb6c818b608a2b344e21bb978030c41158045 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Thu, 26 Jul 2018 09:26:13 -0300 Subject: perf trace beauty: Add beautifiers for 'socket''s 'protocol' arg It'll be wired to 'perf trace' in the next cset. Cc: Adrian Hunter Cc: David Ahern Cc: Jiri Olsa Cc: Namhyung Kim Cc: Wang Nan Link: https://lkml.kernel.org/n/tip-2i9vkvm1ik8yu4hgjmxhsyjv@git.kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/Makefile.perf | 10 ++++++++++ tools/perf/trace/beauty/Build | 1 + tools/perf/trace/beauty/beauty.h | 3 +++ tools/perf/trace/beauty/socket.c | 28 ++++++++++++++++++++++++++++ 4 files changed, 42 insertions(+) create mode 100644 tools/perf/trace/beauty/socket.c diff --git a/tools/perf/Makefile.perf b/tools/perf/Makefile.perf index ecc9fc952655..b3d1b12a5081 100644 --- a/tools/perf/Makefile.perf +++ b/tools/perf/Makefile.perf @@ -384,6 +384,8 @@ export INSTALL SHELL_PATH SHELL = $(SHELL_PATH) +linux_uapi_dir := $(srctree)/tools/include/uapi/linux + beauty_outdir := $(OUTPUT)trace/beauty/generated beauty_ioctl_outdir := $(beauty_outdir)/ioctl drm_ioctl_array := $(beauty_ioctl_outdir)/drm_ioctl_array.c @@ -431,6 +433,12 @@ kvm_ioctl_tbl := $(srctree)/tools/perf/trace/beauty/kvm_ioctl.sh $(kvm_ioctl_array): $(kvm_hdr_dir)/kvm.h $(kvm_ioctl_tbl) $(Q)$(SHELL) '$(kvm_ioctl_tbl)' $(kvm_hdr_dir) > $@ +socket_ipproto_array := $(beauty_outdir)/socket_ipproto_array.c +socket_ipproto_tbl := $(srctree)/tools/perf/trace/beauty/socket_ipproto.sh + +$(socket_ipproto_array): $(linux_uapi_dir)/in.h $(socket_ipproto_tbl) + $(Q)$(SHELL) '$(socket_ipproto_tbl)' $(linux_uapi_dir) > $@ + vhost_virtio_ioctl_array := $(beauty_ioctl_outdir)/vhost_virtio_ioctl_array.c vhost_virtio_hdr_dir := $(srctree)/tools/include/uapi/linux vhost_virtio_ioctl_tbl := $(srctree)/tools/perf/trace/beauty/vhost_virtio_ioctl.sh @@ -566,6 +574,7 @@ prepare: $(OUTPUT)PERF-VERSION-FILE $(OUTPUT)common-cmds.h archheaders $(drm_ioc $(sndrv_ctl_ioctl_array) \ $(kcmp_type_array) \ $(kvm_ioctl_array) \ + $(socket_ipproto_array) \ $(vhost_virtio_ioctl_array) \ $(madvise_behavior_array) \ $(perf_ioctl_array) \ @@ -860,6 +869,7 @@ clean:: $(LIBTRACEEVENT)-clean $(LIBAPI)-clean $(LIBBPF)-clean $(LIBSUBCMD)-clea $(OUTPUT)$(sndrv_pcm_ioctl_array) \ $(OUTPUT)$(kvm_ioctl_array) \ $(OUTPUT)$(kcmp_type_array) \ + $(OUTPUT)$(socket_ipproto_array) \ $(OUTPUT)$(vhost_virtio_ioctl_array) \ $(OUTPUT)$(perf_ioctl_array) \ $(OUTPUT)$(prctl_option_array) \ diff --git a/tools/perf/trace/beauty/Build b/tools/perf/trace/beauty/Build index 66330d4b739b..f528ba35e140 100644 --- a/tools/perf/trace/beauty/Build +++ b/tools/perf/trace/beauty/Build @@ -7,4 +7,5 @@ endif libperf-y += kcmp.o libperf-y += pkey_alloc.o libperf-y += prctl.o +libperf-y += socket.o libperf-y += statx.o diff --git a/tools/perf/trace/beauty/beauty.h b/tools/perf/trace/beauty/beauty.h index 984a504d335c..9615af5d412b 100644 --- a/tools/perf/trace/beauty/beauty.h +++ b/tools/perf/trace/beauty/beauty.h @@ -106,6 +106,9 @@ size_t syscall_arg__scnprintf_prctl_arg2(char *bf, size_t size, struct syscall_a size_t syscall_arg__scnprintf_prctl_arg3(char *bf, size_t size, struct syscall_arg *arg); #define SCA_PRCTL_ARG3 syscall_arg__scnprintf_prctl_arg3 +size_t syscall_arg__scnprintf_socket_protocol(char *bf, size_t size, struct syscall_arg *arg); +#define SCA_SK_PROTO syscall_arg__scnprintf_socket_protocol + size_t syscall_arg__scnprintf_statx_flags(char *bf, size_t size, struct syscall_arg *arg); #define SCA_STATX_FLAGS syscall_arg__scnprintf_statx_flags diff --git a/tools/perf/trace/beauty/socket.c b/tools/perf/trace/beauty/socket.c new file mode 100644 index 000000000000..65227269384b --- /dev/null +++ b/tools/perf/trace/beauty/socket.c @@ -0,0 +1,28 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * trace/beauty/socket.c + * + * Copyright (C) 2018, Red Hat Inc, Arnaldo Carvalho de Melo + */ + +#include "trace/beauty/beauty.h" +#include +#include + +static size_t socket__scnprintf_ipproto(int protocol, char *bf, size_t size) +{ +#include "trace/beauty/generated/socket_ipproto_array.c" + static DEFINE_STRARRAY(socket_ipproto); + + return strarray__scnprintf(&strarray__socket_ipproto, bf, size, "%d", protocol); +} + +size_t syscall_arg__scnprintf_socket_protocol(char *bf, size_t size, struct syscall_arg *arg) +{ + int domain = syscall_arg__val(arg, 0); + + if (domain == AF_INET || domain == AF_INET6) + return socket__scnprintf_ipproto(arg->val, bf, size); + + return syscall_arg__scnprintf_int(bf, size, arg); +} -- cgit From 162d3edbe591a97939516b546162b9ba05ec62cb Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Thu, 26 Jul 2018 09:26:13 -0300 Subject: perf trace: Beautify the AF_INET & AF_INET6 'socket' syscall 'protocol' args For instance: $ trace -e socket* ssh sandy 0.000 ( 0.031 ms): ssh/19919 socket(family: LOCAL, type: STREAM|CLOEXEC|NONBLOCK ) = 3 0.052 ( 0.015 ms): ssh/19919 socket(family: LOCAL, type: STREAM|CLOEXEC|NONBLOCK ) = 3 1.568 ( 0.020 ms): ssh/19919 socket(family: LOCAL, type: STREAM|CLOEXEC|NONBLOCK ) = 3 1.603 ( 0.012 ms): ssh/19919 socket(family: LOCAL, type: STREAM|CLOEXEC|NONBLOCK ) = 3 1.699 ( 0.014 ms): ssh/19919 socket(family: LOCAL, type: STREAM|CLOEXEC|NONBLOCK ) = 3 1.724 ( 0.012 ms): ssh/19919 socket(family: LOCAL, type: STREAM|CLOEXEC|NONBLOCK ) = 3 1.804 ( 0.020 ms): ssh/19919 socket(family: INET, type: STREAM, protocol: TCP ) = 3 17.549 ( 0.098 ms): ssh/19919 socket(family: LOCAL, type: STREAM ) = 4 acme@sandy's password: Just like with other syscall args, the common bits are supressed so that the output is more compact, i.e. we use "TCP" instead of "IPPROTO_TCP", but we can make this show the original constant names if we like it by using some command line knob or ~/.perfconfig "[trace]" section variable. Also needed is to make perf's event parser accept things like: $ perf trace -e socket*/protocol=TCP/ By using both the tracefs event 'format' files and these tables built from the kernel sources. Cc: Adrian Hunter Cc: David Ahern Cc: Jiri Olsa Cc: Namhyung Kim Cc: Wang Nan Link: https://lkml.kernel.org/n/tip-l39jz1vnyda0b6jsufuc8bz7@git.kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/builtin-trace.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c index 632ef5f8f060..9aca65e6b9aa 100644 --- a/tools/perf/builtin-trace.c +++ b/tools/perf/builtin-trace.c @@ -761,10 +761,12 @@ static struct syscall_fmt { .arg = { [0] = STRARRAY(resource, rlimit_resources), }, }, { .name = "socket", .arg = { [0] = STRARRAY(family, socket_families), - [1] = { .scnprintf = SCA_SK_TYPE, /* type */ }, }, }, + [1] = { .scnprintf = SCA_SK_TYPE, /* type */ }, + [2] = { .scnprintf = SCA_SK_PROTO, /* protocol */ }, }, }, { .name = "socketpair", .arg = { [0] = STRARRAY(family, socket_families), - [1] = { .scnprintf = SCA_SK_TYPE, /* type */ }, }, }, + [1] = { .scnprintf = SCA_SK_TYPE, /* type */ }, + [2] = { .scnprintf = SCA_SK_PROTO, /* protocol */ }, }, }, { .name = "stat", .alias = "newstat", }, { .name = "statx", .arg = { [0] = { .scnprintf = SCA_FDAT, /* fdat */ }, -- cgit From aa90f9f9554616d5738f7bedb4a8f0e5e14d1bc6 Mon Sep 17 00:00:00 2001 From: Sandipan Das Date: Thu, 26 Jul 2018 22:47:33 +0530 Subject: perf tests: Fix indexing when invoking subtests Recently, the subtest numbering was changed to start from 1. While it is fine for displaying results, this should not be the case when the subtests are actually invoked. Typically, the subtests are stored in zero-indexed arrays and invoked based on the index passed to the main test function. Since the index now starts from 1, the second subtest in the array (index 1) gets invoked instead of the first (index 0). This applies to all of the following subtests but for the last one, the subtest always fails because it does not meet the boundary condition of the subtest index being lesser than the number of subtests. This can be observed on powerpc64 and x86_64 systems running Fedora 28 as shown below. Before: # perf test "builtin clang support" 55: builtin clang support : 55.1: builtin clang compile C source to IR : Ok 55.2: builtin clang compile C source to ELF object : FAILED! # perf test "LLVM search and compile" 38: LLVM search and compile : 38.1: Basic BPF llvm compile : Ok 38.2: kbuild searching : Ok 38.3: Compile source for BPF prologue generation : Ok 38.4: Compile source for BPF relocation : FAILED! # perf test "BPF filter" 40: BPF filter : 40.1: Basic BPF filtering : Ok 40.2: BPF pinning : Ok 40.3: BPF prologue generation : Ok 40.4: BPF relocation checker : FAILED! After: # perf test "builtin clang support" 55: builtin clang support : 55.1: builtin clang compile C source to IR : Ok 55.2: builtin clang compile C source to ELF object : Ok # perf test "LLVM search and compile" 38: LLVM search and compile : 38.1: Basic BPF llvm compile : Ok 38.2: kbuild searching : Ok 38.3: Compile source for BPF prologue generation : Ok 38.4: Compile source for BPF relocation : Ok # perf test "BPF filter" 40: BPF filter : 40.1: Basic BPF filtering : Ok 40.2: BPF pinning : Ok 40.3: BPF prologue generation : Ok 40.4: BPF relocation checker : Ok Signed-off-by: Sandipan Das Reported-by: Arnaldo Carvalho de Melo Tested-by: Arnaldo Carvalho de Melo Cc: Heiko Carstens Cc: Hendrik Brueckner Cc: Jiri Olsa Cc: Martin Schwidefsky Cc: Naveen N. Rao Cc: Ravi Bangoria Cc: Thomas Richter Fixes: 9ef0112442bd ("perf test: Fix subtest number when showing results") Link: http://lkml.kernel.org/r/20180726171733.33208-1-sandipan@linux.ibm.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/tests/builtin-test.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tools/perf/tests/builtin-test.c b/tools/perf/tests/builtin-test.c index 4f5de8245b32..d7a5e1b9aa6f 100644 --- a/tools/perf/tests/builtin-test.c +++ b/tools/perf/tests/builtin-test.c @@ -385,7 +385,7 @@ static int test_and_print(struct test *t, bool force_skip, int subtest) if (!t->subtest.get_nr) pr_debug("%s:", t->desc); else - pr_debug("%s subtest %d:", t->desc, subtest); + pr_debug("%s subtest %d:", t->desc, subtest + 1); switch (err) { case TEST_OK: @@ -599,7 +599,7 @@ static int __cmd_test(int argc, const char *argv[], struct intlist *skiplist) for (subi = 0; subi < subn; subi++) { pr_info("%2d.%1d: %-*s:", i, subi + 1, subw, t->subtest.get_desc(subi)); - err = test_and_print(t, skip, subi + 1); + err = test_and_print(t, skip, subi); if (err != TEST_OK && t->subtest.skip_if_fail) skip = true; } -- cgit From 73978332572ccf5e364c31e9a70ba953f8202b46 Mon Sep 17 00:00:00 2001 From: Jiri Olsa Date: Tue, 24 Jul 2018 08:20:08 +0200 Subject: perf c2c report: Fix crash for empty browser 'perf c2c' scans read/write accesses and tries to find false sharing cases, so when the events it wants were not asked for or ended up not taking place, we get no histograms. So do not try to display entry details if there's not any. Currently this ends up in crash: $ perf c2c report # then press 'd' perf: Segmentation fault $ Committer testing: Before: Record a perf.data file without events of interest to 'perf c2c report', then call it and press 'd': # perf record sleep 1 [ perf record: Woken up 1 times to write data ] [ perf record: Captured and wrote 0.001 MB perf.data (6 samples) ] # perf c2c report perf: Segmentation fault -------- backtrace -------- perf[0x5b1d2a] /lib64/libc.so.6(+0x346df)[0x7fcb566e36df] perf[0x46fcae] perf[0x4a9f1e] perf[0x4aa220] perf(main+0x301)[0x42c561] /lib64/libc.so.6(__libc_start_main+0xe9)[0x7fcb566cff29] perf(_start+0x29)[0x42c999] # After the patch the segfault doesn't take place, a follow up patch to tell the user why nothing changes when 'd' is pressed would be good. Reported-by: rodia@autistici.org Signed-off-by: Jiri Olsa Tested-by: Arnaldo Carvalho de Melo Cc: Alexander Shishkin Cc: David Ahern Cc: Don Zickus Cc: Joe Mario Cc: Namhyung Kim Cc: Peter Zijlstra Fixes: f1c5fd4d0bb9 ("perf c2c report: Add TUI cacheline browser") Link: http://lkml.kernel.org/r/20180724062008.26126-1-jolsa@kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/builtin-c2c.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/tools/perf/builtin-c2c.c b/tools/perf/builtin-c2c.c index f2ea85ee573f..f3aa9d02a5ab 100644 --- a/tools/perf/builtin-c2c.c +++ b/tools/perf/builtin-c2c.c @@ -2349,6 +2349,9 @@ static int perf_c2c__browse_cacheline(struct hist_entry *he) " s Toggle full length of symbol and source line columns \n" " q Return back to cacheline list \n"; + if (!he) + return 0; + /* Display compact version first. */ c2c.symbol_full = false; -- cgit From 83868bf71d2eb7700b37f1ea188007f0125e4ee4 Mon Sep 17 00:00:00 2001 From: Thomas Richter Date: Tue, 31 Jul 2018 09:32:54 +0200 Subject: perf build: Fix installation directory for eBPF The perf tool build and install is controlled via a Makefile. The 'install' rule creates directories and copies files. Among them are header files installed in /usr/lib/include/perf/bpf/. However all listed examples are installing its header files in /usr/lib//...[/include]/header.h and not in /usr/lib/include//.../header.h. Background information: Building the Fedora 28 glibc RPM on s390x and s390 fails on s390 (gcc -m31) as gcc is not able to find header-files like stdbool.h. In the glibc.spec file, you can see that glibc is configured with "--with-headers". In this case, first -nostdinc is added to the CFLAGS and then further include paths are added via -isystem. One of those paths should contain header files like stdbool.h. In order to get this path, gcc is invoked with: - on Fedora 28 (with 4.18 kernel): $ gcc -print-file-name=include /usr/lib/gcc/s390x-redhat-linux/8/include $ gcc -m31 -print-file-name=include /usr/lib/gcc/s390x-redhat-linux/8/../../../../lib/include => If perf is installed, this is: /usr/lib/include On my machine this directory is only containing the directory "perf". If perf is not installed gcc returns: /usr/lib/gcc/s390x-redhat-linux/8/include - on Ubuntu 18.04 (with 4.15 kernel): $ gcc -print-file-name=include /usr/lib/gcc/s390x-linux-gnu/7/include $ gcc -m31 -print-file-name=include /usr/lib/gcc/s390x-linux-gnu/7/include => gcc returns the correct path even if perf is installed. In each case, the introduction of the subdirectory /usr/lib/include leads to the regression that one can not build the glibc RPM for s390 anymore as gcc can not find headers like stdbool.h. To remedy this install bpf.h to /usr/lib/perf/include/bpf/bpf.h Output before using the command 'perf test -Fv 40': echo '...[bpf-program-source]...' | /usr/bin/clang ... \ -I/root/lib/include/perf/bpf ... ^^^^^^^^^^^^ ... [root@p23lp27 perf]# perf test -F 40 40: BPF filter : 40.1: Basic BPF filtering : Ok 40.2: BPF pinning : Ok 40.3: BPF prologue generation : Ok 40.4: BPF relocation checker : Ok [root@p23lp27 perf]# Output after using command 'perf test -Fv 40': echo '...[bpf-program-source]...' | /usr/bin/clang ... \ -I/root/lib/perf/include/bpf ... ^^^^^^^^^^^^ ... [root@p23lp27 perf]# perf test -F 40 40: BPF filter : 40.1: Basic BPF filtering : Ok 40.2: BPF pinning : Ok 40.3: BPF prologue generation : Ok 40.4: BPF relocation checker : Ok [root@p23lp27 perf]# Committer testing: While the above 'perf test -F 40' (or 'perf test bpf') will allow us to see that the correct path is now added via -I, to actually test this we better try to use a bpf script that includes files in the changed directory. We have the files that now reside in /root/lib/perf/examples/bpf/ to do just that: # tail -8 /root/lib/perf/examples/bpf/5sec.c #include int probe(hrtimer_nanosleep, rqtp->tv_sec)(void *ctx, int err, long sec) { return sec == 5; } license(GPL); # perf trace -e *sleep -e /root/lib/perf/examples/bpf/5sec.c sleep 4 0.333 (4000.086 ms): sleep/9248 nanosleep(rqtp: 0x7ffc155f3300) = 0 # perf trace -e *sleep -e /root/lib/perf/examples/bpf/5sec.c sleep 5 0.287 ( ): sleep/9659 nanosleep(rqtp: 0x7ffeafe38200) ... 0.290 ( ): perf_bpf_probe:hrtimer_nanosleep:(ffffffff9911efe0) tv_sec=5 0.287 (5000.059 ms): sleep/9659 ... [continued]: nanosleep()) = 0 # perf trace -e *sleep -e /root/lib/perf/examples/bpf/5sec.c sleep 6 0.247 (5999.951 ms): sleep/10068 nanosleep(rqtp: 0x7fff2086d900) = 0 # perf trace -e *sleep -e /root/lib/perf/examples/bpf/5sec.c sleep 5.987 0.293 ( ): sleep/10489 nanosleep(rqtp: 0x7ffdd4fc10e0) ... 0.296 ( ): perf_bpf_probe:hrtimer_nanosleep:(ffffffff9911efe0) tv_sec=5 0.293 (5986.912 ms): sleep/10489 ... [continued]: nanosleep()) = 0 # Suggested-by: Stefan Liebler Suggested-by: Arnaldo Carvalho de Melo Signed-off-by: Thomas Richter Reviewed-by: Hendrik Brueckner Tested-by: Arnaldo Carvalho de Melo Cc: Heiko Carstens Cc: Martin Schwidefsky Fixes: 1b16fffa389d ("perf llvm-utils: Add bpf include path to clang command line") Link: http://lkml.kernel.org/r/20180731073254.91090-1-tmricht@linux.ibm.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/Makefile.config | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tools/perf/Makefile.config b/tools/perf/Makefile.config index d3318f99006c..f6d1a03c7523 100644 --- a/tools/perf/Makefile.config +++ b/tools/perf/Makefile.config @@ -907,8 +907,8 @@ bindir = $(abspath $(prefix)/$(bindir_relative)) mandir = share/man infodir = share/info perfexecdir = libexec/perf-core -perf_include_dir = lib/include/perf -perf_examples_dir = lib/examples/perf +perf_include_dir = lib/perf/include +perf_examples_dir = lib/perf/examples sharedir = $(prefix)/share template_dir = share/perf-core/templates STRACE_GROUPS_DIR = share/perf-core/strace/groups -- cgit From 3eb3e07bcfee0670d7142f839c76f33f3d265793 Mon Sep 17 00:00:00 2001 From: Leo Yan Date: Wed, 11 Jul 2018 15:45:42 +0800 Subject: perf cs-etm: Fix start tracing packet handling Usually the start tracing packet is a CS_ETM_TRACE_ON packet, this packet is passed to cs_etm__flush(); cs_etm__flush() will check the condition 'prev_packet->sample_type == CS_ETM_RANGE' but 'prev_packet' is allocated by zalloc() so 'prev_packet->sample_type' is zero in initialization and this condition is false. So cs_etm__flush() will directly bail out without handling the start tracing packet. This patch is to introduce a new sample type CS_ETM_EMPTY, which is used to indicate the packet is an empty packet. cs_etm__flush() will swap packets when it finds the previous packet is empty, so this can record the start tracing packet into 'etmq->prev_packet'. Signed-off-by: Leo Yan Reviewed-by: Mathieu Poirier Cc: Alexander Shishkin Cc: Jiri Olsa Cc: Kim Phillips Cc: Mike Leach Cc: Namhyung Kim Cc: Peter Zijlstra Cc: Robert Walker Cc: linux-arm-kernel@lists.infradead.org Link: http://lkml.kernel.org/r/1531295145-596-4-git-send-email-leo.yan@linaro.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/cs-etm-decoder/cs-etm-decoder.h | 1 + tools/perf/util/cs-etm.c | 19 ++++++++++++++++++- 2 files changed, 19 insertions(+), 1 deletion(-) diff --git a/tools/perf/util/cs-etm-decoder/cs-etm-decoder.h b/tools/perf/util/cs-etm-decoder/cs-etm-decoder.h index 743f5f444304..612b5755f742 100644 --- a/tools/perf/util/cs-etm-decoder/cs-etm-decoder.h +++ b/tools/perf/util/cs-etm-decoder/cs-etm-decoder.h @@ -23,6 +23,7 @@ struct cs_etm_buffer { }; enum cs_etm_sample_type { + CS_ETM_EMPTY = 0, CS_ETM_RANGE = 1 << 0, CS_ETM_TRACE_ON = 1 << 1, }; diff --git a/tools/perf/util/cs-etm.c b/tools/perf/util/cs-etm.c index 8b2c099e750a..ae7c9c880cb2 100644 --- a/tools/perf/util/cs-etm.c +++ b/tools/perf/util/cs-etm.c @@ -924,8 +924,14 @@ static int cs_etm__flush(struct cs_etm_queue *etmq) int err = 0; struct cs_etm_packet *tmp; + if (!etmq->prev_packet) + return 0; + + /* Handle start tracing packet */ + if (etmq->prev_packet->sample_type == CS_ETM_EMPTY) + goto swap_packet; + if (etmq->etm->synth_opts.last_branch && - etmq->prev_packet && etmq->prev_packet->sample_type == CS_ETM_RANGE) { /* * Generate a last branch event for the branches left in the @@ -944,6 +950,10 @@ static int cs_etm__flush(struct cs_etm_queue *etmq) etmq->period_instructions = 0; + } + +swap_packet: + if (etmq->etm->synth_opts.last_branch) { /* * Swap PACKET with PREV_PACKET: PACKET becomes PREV_PACKET for * the next incoming packet. @@ -1023,6 +1033,13 @@ static int cs_etm__run_decoder(struct cs_etm_queue *etmq) */ cs_etm__flush(etmq); break; + case CS_ETM_EMPTY: + /* + * Should not receive empty packet, + * report error. + */ + pr_err("CS ETM Trace: empty packet\n"); + return -EINVAL; default: break; } -- cgit From 6035b6804bdfd662c8ee6226dc90f3764060ec73 Mon Sep 17 00:00:00 2001 From: Leo Yan Date: Wed, 11 Jul 2018 15:45:43 +0800 Subject: perf cs-etm: Support dummy address value for CS_ETM_TRACE_ON packet For CS_ETM_TRACE_ON packet, its fields 'packet->start_addr' and 'packet->end_addr' equal to 0xdeadbeefdeadbeefUL which are emitted in the decoder layer as dummy value, but the dummy value is pointless for branch sample when we use 'perf script' command to check program flow. This patch is a preparation to support CS_ETM_TRACE_ON packet for branch sample, it converts the dummy address value to zero for more readable; this is accomplished by cs_etm__last_executed_instr() and cs_etm__first_executed_instr(). The later one is a new function introduced by this patch. Signed-off-by: Leo Yan Reviewed-by: Mathieu Poirier Cc: Alexander Shishkin Cc: Jiri Olsa Cc: Kim Phillips Cc: Mike Leach Cc: Namhyung Kim Cc: Peter Zijlstra Cc: Robert Walker Cc: linux-arm-kernel@lists.infradead.org Link: http://lkml.kernel.org/r/1531295145-596-5-git-send-email-leo.yan@linaro.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/cs-etm.c | 17 +++++++++++++++-- 1 file changed, 15 insertions(+), 2 deletions(-) diff --git a/tools/perf/util/cs-etm.c b/tools/perf/util/cs-etm.c index ae7c9c880cb2..976db8483478 100644 --- a/tools/perf/util/cs-etm.c +++ b/tools/perf/util/cs-etm.c @@ -494,6 +494,10 @@ static inline void cs_etm__reset_last_branch_rb(struct cs_etm_queue *etmq) static inline u64 cs_etm__last_executed_instr(struct cs_etm_packet *packet) { + /* Returns 0 for the CS_ETM_TRACE_ON packet */ + if (packet->sample_type == CS_ETM_TRACE_ON) + return 0; + /* * The packet records the execution range with an exclusive end address * @@ -505,6 +509,15 @@ static inline u64 cs_etm__last_executed_instr(struct cs_etm_packet *packet) return packet->end_addr - A64_INSTR_SIZE; } +static inline u64 cs_etm__first_executed_instr(struct cs_etm_packet *packet) +{ + /* Returns 0 for the CS_ETM_TRACE_ON packet */ + if (packet->sample_type == CS_ETM_TRACE_ON) + return 0; + + return packet->start_addr; +} + static inline u64 cs_etm__instr_count(const struct cs_etm_packet *packet) { /* @@ -546,7 +559,7 @@ static void cs_etm__update_last_branch_rb(struct cs_etm_queue *etmq) be = &bs->entries[etmq->last_branch_pos]; be->from = cs_etm__last_executed_instr(etmq->prev_packet); - be->to = etmq->packet->start_addr; + be->to = cs_etm__first_executed_instr(etmq->packet); /* No support for mispredict */ be->flags.mispred = 0; be->flags.predicted = 1; @@ -701,7 +714,7 @@ static int cs_etm__synth_branch_sample(struct cs_etm_queue *etmq) sample.ip = cs_etm__last_executed_instr(etmq->prev_packet); sample.pid = etmq->pid; sample.tid = etmq->tid; - sample.addr = etmq->packet->start_addr; + sample.addr = cs_etm__first_executed_instr(etmq->packet); sample.id = etmq->etm->branches_id; sample.stream_id = etmq->etm->branches_id; sample.period = 1; -- cgit From d603b4e9f9c3181f2dacb989bba60081b99befe4 Mon Sep 17 00:00:00 2001 From: Leo Yan Date: Wed, 11 Jul 2018 15:45:44 +0800 Subject: perf cs-etm: Generate branch sample when receiving a CS_ETM_TRACE_ON packet If one CS_ETM_TRACE_ON packet is inserted, we miss to generate branch sample for the previous CS_ETM_RANGE packet. This patch is to generate branch sample when receiving a CS_ETM_TRACE_ON packet, so this can save complete info for the previous CS_ETM_RANGE packet just before CS_ETM_TRACE_ON packet. Signed-off-by: Leo Yan Reviewed-by: Mathieu Poirier Cc: Alexander Shishkin Cc: Jiri Olsa Cc: Kim Phillips Cc: Mike Leach Cc: Namhyung Kim Cc: Peter Zijlstra Cc: Robert Walker Cc: linux-arm-kernel@lists.infradead.org Link: http://lkml.kernel.org/r/1531295145-596-6-git-send-email-leo.yan@linaro.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/cs-etm.c | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/tools/perf/util/cs-etm.c b/tools/perf/util/cs-etm.c index 976db8483478..d3b794286bca 100644 --- a/tools/perf/util/cs-etm.c +++ b/tools/perf/util/cs-etm.c @@ -935,6 +935,7 @@ static int cs_etm__sample(struct cs_etm_queue *etmq) static int cs_etm__flush(struct cs_etm_queue *etmq) { int err = 0; + struct cs_etm_auxtrace *etm = etmq->etm; struct cs_etm_packet *tmp; if (!etmq->prev_packet) @@ -965,6 +966,13 @@ static int cs_etm__flush(struct cs_etm_queue *etmq) } + if (etm->sample_branches && + etmq->prev_packet->sample_type == CS_ETM_RANGE) { + err = cs_etm__synth_branch_sample(etmq); + if (err) + return err; + } + swap_packet: if (etmq->etm->synth_opts.last_branch) { /* -- cgit From 14a85b1eca348e00a9f209b522f94ec3ead94acb Mon Sep 17 00:00:00 2001 From: Leo Yan Date: Wed, 11 Jul 2018 15:45:45 +0800 Subject: perf cs-etm: Generate branch sample for CS_ETM_TRACE_ON packet CS_ETM_TRACE_ON packet itself can give the info that there have a discontinuity in the trace, this patch is to add branch sample for CS_ETM_TRACE_ON packet if it is inserted in the middle of CS_ETM_RANGE packets; as result we can have hint for the trace discontinuity. Signed-off-by: Leo Yan Reviewed-by: Mathieu Poirier Cc: Alexander Shishkin Cc: Jiri Olsa Cc: Kim Phillips Cc: Mike Leach Cc: Namhyung Kim Cc: Peter Zijlstra Cc: Robert Walker Cc: linux-arm-kernel@lists.infradead.org Link: http://lkml.kernel.org/r/1531295145-596-7-git-send-email-leo.yan@linaro.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/cs-etm.c | 24 +++++++++++++++++------- 1 file changed, 17 insertions(+), 7 deletions(-) diff --git a/tools/perf/util/cs-etm.c b/tools/perf/util/cs-etm.c index d3b794286bca..2ae640257fdb 100644 --- a/tools/perf/util/cs-etm.c +++ b/tools/perf/util/cs-etm.c @@ -910,13 +910,23 @@ static int cs_etm__sample(struct cs_etm_queue *etmq) etmq->period_instructions = instrs_over; } - if (etm->sample_branches && - etmq->prev_packet && - etmq->prev_packet->sample_type == CS_ETM_RANGE && - etmq->prev_packet->last_instr_taken_branch) { - ret = cs_etm__synth_branch_sample(etmq); - if (ret) - return ret; + if (etm->sample_branches && etmq->prev_packet) { + bool generate_sample = false; + + /* Generate sample for tracing on packet */ + if (etmq->prev_packet->sample_type == CS_ETM_TRACE_ON) + generate_sample = true; + + /* Generate sample for branch taken packet */ + if (etmq->prev_packet->sample_type == CS_ETM_RANGE && + etmq->prev_packet->last_instr_taken_branch) + generate_sample = true; + + if (generate_sample) { + ret = cs_etm__synth_branch_sample(etmq); + if (ret) + return ret; + } } if (etm->sample_branches || etm->synth_opts.last_branch) { -- cgit From b9b77222d4ff6b5bb8f5d87fca20de0910618bb9 Mon Sep 17 00:00:00 2001 From: Ganapatrao Kulkarni Date: Tue, 31 Jul 2018 15:32:51 +0530 Subject: perf vendor events arm64: Update ThunderX2 implementation defined pmu core events Signed-off-by: Ganapatrao Kulkarni Cc: Alexander Shishkin Cc: Ganapatrao Kulkarni Cc: Jan Glauber Cc: Jayachandran C Cc: Jiri Olsa Cc: linux-arm-kernel@lists.infradead.org Cc: Mark Rutland Cc: Namhyung Kim Cc: Peter Zijlstra Cc: Robert Richter Cc: Vadim Lomovtsev Cc: Will Deacon Link: http://lkml.kernel.org/r/20180731100251.23575-1-ganapatrao.kulkarni@cavium.com Signed-off-by: Arnaldo Carvalho de Melo --- .../arch/arm64/cavium/thunderx2/core-imp-def.json | 87 +++++++++++++++++++++- 1 file changed, 84 insertions(+), 3 deletions(-) diff --git a/tools/perf/pmu-events/arch/arm64/cavium/thunderx2/core-imp-def.json b/tools/perf/pmu-events/arch/arm64/cavium/thunderx2/core-imp-def.json index bc03c06c3918..752e47eb6977 100644 --- a/tools/perf/pmu-events/arch/arm64/cavium/thunderx2/core-imp-def.json +++ b/tools/perf/pmu-events/arch/arm64/cavium/thunderx2/core-imp-def.json @@ -11,6 +11,21 @@ { "ArchStdEvent": "L1D_CACHE_REFILL_WR", }, + { + "ArchStdEvent": "L1D_CACHE_REFILL_INNER", + }, + { + "ArchStdEvent": "L1D_CACHE_REFILL_OUTER", + }, + { + "ArchStdEvent": "L1D_CACHE_WB_VICTIM", + }, + { + "ArchStdEvent": "L1D_CACHE_WB_CLEAN", + }, + { + "ArchStdEvent": "L1D_CACHE_INVAL", + }, { "ArchStdEvent": "L1D_TLB_REFILL_RD", }, @@ -23,10 +38,76 @@ { "ArchStdEvent": "L1D_TLB_WR", }, + { + "ArchStdEvent": "L2D_TLB_REFILL_RD", + }, + { + "ArchStdEvent": "L2D_TLB_REFILL_WR", + }, + { + "ArchStdEvent": "L2D_TLB_RD", + }, + { + "ArchStdEvent": "L2D_TLB_WR", + }, { "ArchStdEvent": "BUS_ACCESS_RD", - }, - { + }, + { "ArchStdEvent": "BUS_ACCESS_WR", - } + }, + { + "ArchStdEvent": "MEM_ACCESS_RD", + }, + { + "ArchStdEvent": "MEM_ACCESS_WR", + }, + { + "ArchStdEvent": "UNALIGNED_LD_SPEC", + }, + { + "ArchStdEvent": "UNALIGNED_ST_SPEC", + }, + { + "ArchStdEvent": "UNALIGNED_LDST_SPEC", + }, + { + "ArchStdEvent": "EXC_UNDEF", + }, + { + "ArchStdEvent": "EXC_SVC", + }, + { + "ArchStdEvent": "EXC_PABORT", + }, + { + "ArchStdEvent": "EXC_DABORT", + }, + { + "ArchStdEvent": "EXC_IRQ", + }, + { + "ArchStdEvent": "EXC_FIQ", + }, + { + "ArchStdEvent": "EXC_SMC", + }, + { + "ArchStdEvent": "EXC_HVC", + }, + { + "ArchStdEvent": "EXC_TRAP_PABORT", + }, + { + "ArchStdEvent": "EXC_TRAP_DABORT", + }, + { + "ArchStdEvent": "EXC_TRAP_OTHER", + }, + { + "ArchStdEvent": "EXC_TRAP_IRQ", + }, + { + "ArchStdEvent": "EXC_TRAP_FIQ", + } ] -- cgit From 95f04328e42dc7d1eb08f3cb38150c1f2ec09f57 Mon Sep 17 00:00:00 2001 From: Michael Petlan Date: Mon, 30 Jul 2018 17:35:04 -0400 Subject: perf list: Unify metric group description format with PMU event description PMU event descriptions use 7 spaces + '[' or 8 spaces as indentation. Metric groups used a tab + '['. This patch unifies it to the way PMU event descriptions are indented. BEFORE: $ perf list [...] Metric Groups: DSB: DSB_Coverage [Fraction of Uops delivered by the DSB (aka Decoded Icache; or Uop Cache)] [...] AFTER: $ perf list [...] Metric Groups: DSB: DSB_Coverage [Fraction of Uops delivered by the DSB (aka Decoded Icache; or Uop Cache)] [...] Signed-off-by: Michael Petlan Acked-by: Jiri Olsa Cc: Kim Phillips LPU-Reference: 771439042.22924766.1532986504631.JavaMail.zimbra@redhat.com Link: https://lkml.kernel.org/n/tip-mlo850517m6u1rbjndvd1bwr@git.kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/metricgroup.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tools/perf/util/metricgroup.c b/tools/perf/util/metricgroup.c index 96eab4ec34ff..a28f9b5cc4ff 100644 --- a/tools/perf/util/metricgroup.c +++ b/tools/perf/util/metricgroup.c @@ -326,8 +326,8 @@ void metricgroup__print(bool metrics, bool metricgroups, char *filter, if (raw) s = (char *)pe->metric_name; else { - if (asprintf(&s, "%s\n\t[%s]", - pe->metric_name, pe->desc) < 0) + if (asprintf(&s, "%s\n%*s%s]", + pe->metric_name, 8, "[", pe->desc) < 0) return; } -- cgit From 739e2edc848af30fefa66d6c422c00ca9a701c06 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Tue, 31 Jul 2018 11:58:57 -0300 Subject: perf bpf: Show better message when failing to load an object Before: libbpf: license of tools/perf/examples/bpf/etcsnoop.c is GPL libbpf: section(6) version, size 4, link 0, flags 3, type=1 libbpf: kernel version of tools/perf/examples/bpf/etcsnoop.c is 41200 libbpf: section(7) .symtab, size 120, link 1, flags 0, type=2 bpf: config program 'syscalls:sys_enter_openat' libbpf: load bpf program failed: Operation not permitted libbpf: failed to load program 'syscalls:sys_enter_openat' libbpf: failed to load object 'tools/perf/examples/bpf/etcsnoop.c' bpf: load objects failed After: (just the last line changes) bpf: load objects failed: err=-4009: (Incorrect kernel version) Cc: Adrian Hunter Cc: David Ahern Cc: Jiri Olsa Cc: Namhyung Kim Cc: Wang Nan Link: https://lkml.kernel.org/n/tip-wi44iid0yjfht3lcvplc75fm@git.kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/bpf-loader.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/tools/perf/util/bpf-loader.c b/tools/perf/util/bpf-loader.c index cee658733e2c..3d02ae38ec56 100644 --- a/tools/perf/util/bpf-loader.c +++ b/tools/perf/util/bpf-loader.c @@ -747,7 +747,9 @@ int bpf__load(struct bpf_object *obj) err = bpf_object__load(obj); if (err) { - pr_debug("bpf: load objects failed\n"); + char bf[128]; + libbpf_strerror(err, bf, sizeof(bf)); + pr_debug("bpf: load objects failed: err=%d: (%s)\n", err, bf); return err; } return 0; -- cgit From 21b8732eb4479b579bda9ee38e62b2c312c2a0e5 Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Fri, 22 Sep 2017 13:20:43 +0200 Subject: perf tools: Allow overriding MAX_NR_CPUS at compile time After update of kernel, the perf tool doesn't run anymore on my 32MB RAM powerpc board, but still runs on a 128MB RAM board: ~# strace perf execve("/usr/sbin/perf", ["perf"], [/* 12 vars */]) = -1 ENOMEM (Cannot allocate memory) --- SIGSEGV {si_signo=SIGSEGV, si_code=SI_KERNEL, si_addr=0} --- +++ killed by SIGSEGV +++ Segmentation fault objdump -x shows that .bss section has a huge size of 24Mbytes: 27 .bss 016baca8 101cebb8 101cebb8 001cd988 2**3 With especially the following objects having quite big size: 10205f80 l O .bss 00140000 runtime_cycles_stats 10345f80 l O .bss 00140000 runtime_stalled_cycles_front_stats 10485f80 l O .bss 00140000 runtime_stalled_cycles_back_stats 105c5f80 l O .bss 00140000 runtime_branches_stats 10705f80 l O .bss 00140000 runtime_cacherefs_stats 10845f80 l O .bss 00140000 runtime_l1_dcache_stats 10985f80 l O .bss 00140000 runtime_l1_icache_stats 10ac5f80 l O .bss 00140000 runtime_ll_cache_stats 10c05f80 l O .bss 00140000 runtime_itlb_cache_stats 10d45f80 l O .bss 00140000 runtime_dtlb_cache_stats 10e85f80 l O .bss 00140000 runtime_cycles_in_tx_stats 10fc5f80 l O .bss 00140000 runtime_transaction_stats 11105f80 l O .bss 00140000 runtime_elision_stats 11245f80 l O .bss 00140000 runtime_topdown_total_slots 11385f80 l O .bss 00140000 runtime_topdown_slots_retired 114c5f80 l O .bss 00140000 runtime_topdown_slots_issued 11605f80 l O .bss 00140000 runtime_topdown_fetch_bubbles 11745f80 l O .bss 00140000 runtime_topdown_recovery_bubbles This is due to commit 4d255766d28b1 ("perf: Bump max number of cpus to 1024"), because many tables are sized with MAX_NR_CPUS This patch gives the opportunity to redefine MAX_NR_CPUS via $ make EXTRA_CFLAGS=-DMAX_NR_CPUS=1 Signed-off-by: Christophe Leroy Cc: Alexander Shishkin Cc: Peter Zijlstra Cc: linuxppc-dev@lists.ozlabs.org Link: http://lkml.kernel.org/r/20170922112043.8349468C57@po15668-vm-win7.idsi0.si.c-s.fr Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/perf.h | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tools/perf/perf.h b/tools/perf/perf.h index d215714f48df..21bf7f5a3cf5 100644 --- a/tools/perf/perf.h +++ b/tools/perf/perf.h @@ -25,7 +25,9 @@ static inline unsigned long long rdclock(void) return ts.tv_sec * 1000000000ULL + ts.tv_nsec; } +#ifndef MAX_NR_CPUS #define MAX_NR_CPUS 1024 +#endif extern const char *input_name; extern bool perf_host, perf_guest; -- cgit From 822c2621daadcf9e8fa8f11f7334e3c349434154 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Wed, 1 Aug 2018 10:50:51 -0300 Subject: perf bpf: Include uapi/linux/bpf.h from the 'perf trace' script's bpf.h The next example scripts need the definition for the BPF functions, i.e. things like BPF_FUNC_probe_read, and in time will require lots of other definitions found in uapi/linux/bpf.h, so include it from the bpf.h file included from the eBPF scripts build with clang via '-e bpf_script.c' like in this example: $ tail -8 tools/perf/examples/bpf/5sec.c #include int probe(hrtimer_nanosleep, rqtp->tv_sec)(void *ctx, int err, long sec) { return sec == 5; } license(GPL); $ That 'bpf.h' include in the 5sec.c eBPF example will come from a set of header files crafted for building eBPF objects, that in a end-user system will come from: /usr/lib/perf/include/bpf/bpf.h And will include either from the place where the kernel was built, or from a kernel-devel rpm package like: -working-directory /lib/modules/4.17.9-100.fc27.x86_64/build That is set up by tools/perf/util/llvm-utils.c, and can be overriden by setting the 'kbuild-dir' variable in the "llvm" ~/.perfconfig file, like: # cat ~/.perfconfig [llvm] kbuild-dir = /home/foo/git/build/linux This usually doesn't need any change, just documenting here my findings while working with this code. In the future we may want to instead just use what is in /usr/include/linux/bpf.h, that comes from the UAPI provided from the kernel sources, for now, to avoid getting the kernel's non-UAPI "linux/bpf.h" file, that will cause clang to fail and is not what we want anyway (no BPF function definitions, etc), do it explicitely by asking for "uapi/linux/bpf.h". Cc: Adrian Hunter Cc: David Ahern Cc: Jiri Olsa Cc: Namhyung Kim Cc: Wang Nan Link: https://lkml.kernel.org/n/tip-zd8zeyhr2sappevojdem9xxt@git.kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/include/bpf/bpf.h | 3 +++ 1 file changed, 3 insertions(+) diff --git a/tools/perf/include/bpf/bpf.h b/tools/perf/include/bpf/bpf.h index dd764ad5efdf..a63aa6241b7f 100644 --- a/tools/perf/include/bpf/bpf.h +++ b/tools/perf/include/bpf/bpf.h @@ -1,6 +1,9 @@ // SPDX-License-Identifier: GPL-2.0 #ifndef _PERF_BPF_H #define _PERF_BPF_H + +#include + #define SEC(NAME) __attribute__((section(NAME), used)) #define probe(function, vars) \ -- cgit From b912885ab75c7c8aa841c615108afd755d0b97f8 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Wed, 1 Aug 2018 16:20:28 -0300 Subject: perf trace: Do not require --no-syscalls to suppress strace like output So far the --syscalls option was the default, requiring explicit --no-syscalls when wanting to process just some other event, invert that and assume it only when no other event was specified, allowing its explicit enablement when wanting to see all syscalls together with some other event: E.g: The existing default is maintained for a single workload: # perf trace sleep 1 0.264 ( 0.003 ms): sleep/12762 mmap(len: 113045344, prot: READ, flags: PRIVATE, fd: 3) = 0x7f62cbf04000 0.271 ( 0.001 ms): sleep/12762 close(fd: 3) = 0 0.295 (1000.130 ms): sleep/12762 nanosleep(rqtp: 0x7ffd15194fd0) = 0 1000.469 ( 0.006 ms): sleep/12762 close(fd: 1) = 0 1000.480 ( 0.004 ms): sleep/12762 close(fd: 2) = 0 1000.502 ( ): sleep/12762 exit_group() # For a pid: # pidof ssh 7826 3961 3226 2628 2493 # perf trace -p 3961 ? ( ): ... [continued]: select()) = 1 0.023 ( 0.005 ms): clock_gettime(which_clock: BOOTTIME, tp: 0x7ffcc8fce870 ) = 0 0.036 ( 0.009 ms): read(fd: 5, buf: 0x7ffcc8fca7b0, count: 16384 ) = 3 0.060 ( 0.004 ms): getpid( ) = 3961 (ssh) 0.079 ( 0.004 ms): clock_gettime(which_clock: BOOTTIME, tp: 0x7ffcc8fce8e0 ) = 0 0.088 ( 0.003 ms): clock_gettime(which_clock: BOOTTIME, tp: 0x7ffcc8fce7c0 ) = 0 For system wide, threads, cgroups, user, etc when no event is specified, the existing behaviour is maintained, i.e. --syscalls is selected. When some event is specified, then --no-syscalls doesn't need to be specified: # perf trace -e tcp:tcp_probe ssh localhost 0.000 tcp:tcp_probe:src=[::1]:22 dest=[::1]:39074 mark=0 length=53 snd_nxt=0xb67ce8f7 snd_una=0xb67ce8f7 snd_cwnd=10 ssthresh=2147483647 snd_wnd=43776 srtt=18 rcv_wnd=43690 0.010 tcp:tcp_probe:src=[::1]:39074 dest=[::1]:22 mark=0 length=32 snd_nxt=0xa8f9ef38 snd_una=0xa8f9ef23 snd_cwnd=10 ssthresh=2147483647 snd_wnd=43690 srtt=31 rcv_wnd=43776 4.525 tcp:tcp_probe:src=[::1]:22 dest=[::1]:39074 mark=0 length=1240 snd_nxt=0xb67ce90c snd_una=0xb67ce90c snd_cwnd=10 ssthresh=2147483647 snd_wnd=43776 srtt=18 rcv_wnd=43776 7.242 tcp:tcp_probe:src=[::1]:22 dest=[::1]:39074 mark=0 length=80 snd_nxt=0xb67ced44 snd_una=0xb67ce90c snd_cwnd=10 ssthresh=2147483647 snd_wnd=43776 srtt=18 rcv_wnd=174720 The authenticity of host 'localhost (::1)' can't be established. ECDSA key fingerprint is SHA256:TKZS58923458203490asekfjaklskljmkjfgPMBfHzY. ECDSA key fingerprint is MD5:d8:29:54:40:71:fa:b8:44:89:52:64:8a:35:42:d0:e8. Are you sure you want to continue connecting (yes/no)? ^C # To get the previous behaviour just use --syscalls and get all syscalls formatted strace like + the specified extra events: # trace -e sched:*switch --syscalls sleep 1 0.160 ( 0.003 ms): sleep/12877 mprotect(start: 0x7fdfe2361000, len: 4096, prot: READ) = 0 0.164 ( 0.009 ms): sleep/12877 munmap(addr: 0x7fdfe2345000, len: 113155) = 0 0.211 ( 0.001 ms): sleep/12877 brk() = 0x55d3ce68e000 0.212 ( 0.002 ms): sleep/12877 brk(brk: 0x55d3ce6af000) = 0x55d3ce6af000 0.215 ( 0.001 ms): sleep/12877 brk() = 0x55d3ce6af000 0.219 ( 0.004 ms): sleep/12877 open(filename: 0xe1f07c00, flags: CLOEXEC) = 3 0.225 ( 0.001 ms): sleep/12877 fstat(fd: 3, statbuf: 0x7fdfe2138aa0) = 0 0.227 ( 0.003 ms): sleep/12877 mmap(len: 113045344, prot: READ, flags: PRIVATE, fd: 3) = 0x7fdfdb1b8000 0.234 ( 0.001 ms): sleep/12877 close(fd: 3) = 0 0.257 ( ): sleep/12877 nanosleep(rqtp: 0x7fffb36b6020) ... 0.260 ( ): sched:sched_switch:prev_comm=sleep prev_pid=12877 prev_prio=120 prev_state=D ==> next_comm=swapper/3 next_pid=0 next_prio=120 0.257 (1000.134 ms): sleep/12877 ... [continued]: nanosleep()) = 0 1000.428 ( 0.006 ms): sleep/12877 close(fd: 1) = 0 1000.440 ( 0.004 ms): sleep/12877 close(fd: 2) = 0 1000.461 ( ): sleep/12877 exit_group() # When specifiying just some syscalls, the behaviour doesn't change, i.e.: # trace -e nanosleep -e sched:*switch sleep 1 0.000 ( ): sleep/14974 nanosleep(rqtp: 0x7ffc344ba9c0 ) ... 0.007 ( ): sched:sched_switch:prev_comm=sleep prev_pid=14974 prev_prio=120 prev_state=D ==> next_comm=swapper/2 next_pid=0 next_prio=120 0.000 (1000.139 ms): sleep/14974 ... [continued]: nanosleep()) = 0 # Cc: Adrian Hunter Cc: David Ahern Cc: Jiri Olsa Cc: Namhyung Kim Cc: Wang Nan Link: https://lkml.kernel.org/n/tip-om2fulll97ytnxv40ler8jkf@git.kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/builtin-trace.c | 11 +++-------- 1 file changed, 3 insertions(+), 8 deletions(-) diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c index 9aca65e6b9aa..88561eed7950 100644 --- a/tools/perf/builtin-trace.c +++ b/tools/perf/builtin-trace.c @@ -2992,6 +2992,7 @@ static int trace__parse_events_option(const struct option *opt, const char *str, if (trace__validate_ev_qualifier(trace)) goto out; + trace->trace_syscalls = true; } err = 0; @@ -3047,7 +3048,7 @@ int cmd_trace(int argc, const char **argv) }, .output = stderr, .show_comm = true, - .trace_syscalls = true, + .trace_syscalls = false, .kernel_syscallchains = false, .max_stack = UINT_MAX, }; @@ -3193,13 +3194,7 @@ int cmd_trace(int argc, const char **argv) if (!trace.trace_syscalls && !trace.trace_pgfaults && trace.evlist->nr_entries == 0 /* Was --events used? */) { - pr_err("Please specify something to trace.\n"); - return -1; - } - - if (!trace.trace_syscalls && trace.ev_qualifier) { - pr_err("The -e option can't be used with --no-syscalls.\n"); - goto out; + trace.trace_syscalls = true; } if (output_name != NULL) { -- cgit