From ef54c1a476aef7eef26fe13ea10dc090952c00f8 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Thu, 8 Apr 2021 12:35:56 +0200 Subject: perf: Rework perf_event_exit_event() Make perf_event_exit_event() more robust, such that we can use it from other contexts. Specifically the up and coming remove_on_exec. For this to work we need to address a few issues. Remove_on_exec will not destroy the entire context, so we cannot rely on TASK_TOMBSTONE to disable event_function_call() and we thus have to use perf_remove_from_context(). When using perf_remove_from_context(), there's two races to consider. The first is against close(), where we can have concurrent tear-down of the event. The second is against child_list iteration, which should not find a half baked event. To address this, teach perf_remove_from_context() to special case !ctx->is_active and about DETACH_CHILD. [ elver@google.com: fix racing parent/child exit in sync_child_event(). ] Signed-off-by: Marco Elver Signed-off-by: Peter Zijlstra (Intel) Link: https://lkml.kernel.org/r/20210408103605.1676875-2-elver@google.com --- include/linux/perf_event.h | 1 + 1 file changed, 1 insertion(+) (limited to 'include') diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 3f7f89ea5e51..3d478abf411c 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -607,6 +607,7 @@ struct swevent_hlist { #define PERF_ATTACH_TASK_DATA 0x08 #define PERF_ATTACH_ITRACE 0x10 #define PERF_ATTACH_SCHED_CB 0x20 +#define PERF_ATTACH_CHILD 0x40 struct perf_cgroup; struct perf_buffer; -- cgit From 2b26f0aa004995f49f7b6f4100dd0e4c39a9ed5f Mon Sep 17 00:00:00 2001 From: Marco Elver Date: Thu, 8 Apr 2021 12:35:58 +0200 Subject: perf: Support only inheriting events if cloned with CLONE_THREAD Adds bit perf_event_attr::inherit_thread, to restricting inheriting events only if the child was cloned with CLONE_THREAD. This option supports the case where an event is supposed to be process-wide only (including subthreads), but should not propagate beyond the current process's shared environment. Suggested-by: Peter Zijlstra Signed-off-by: Marco Elver Signed-off-by: Peter Zijlstra (Intel) Link: https://lore.kernel.org/lkml/YBvj6eJR%2FDY2TsEB@hirez.programming.kicks-ass.net/ --- include/linux/perf_event.h | 5 +++-- include/uapi/linux/perf_event.h | 3 ++- 2 files changed, 5 insertions(+), 3 deletions(-) (limited to 'include') diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 3d478abf411c..1660039199b2 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -958,7 +958,7 @@ extern void __perf_event_task_sched_in(struct task_struct *prev, struct task_struct *task); extern void __perf_event_task_sched_out(struct task_struct *prev, struct task_struct *next); -extern int perf_event_init_task(struct task_struct *child); +extern int perf_event_init_task(struct task_struct *child, u64 clone_flags); extern void perf_event_exit_task(struct task_struct *child); extern void perf_event_free_task(struct task_struct *task); extern void perf_event_delayed_put(struct task_struct *task); @@ -1449,7 +1449,8 @@ perf_event_task_sched_in(struct task_struct *prev, static inline void perf_event_task_sched_out(struct task_struct *prev, struct task_struct *next) { } -static inline int perf_event_init_task(struct task_struct *child) { return 0; } +static inline int perf_event_init_task(struct task_struct *child, + u64 clone_flags) { return 0; } static inline void perf_event_exit_task(struct task_struct *child) { } static inline void perf_event_free_task(struct task_struct *task) { } static inline void perf_event_delayed_put(struct task_struct *task) { } diff --git a/include/uapi/linux/perf_event.h b/include/uapi/linux/perf_event.h index ad15e40d7f5d..813efb65fea8 100644 --- a/include/uapi/linux/perf_event.h +++ b/include/uapi/linux/perf_event.h @@ -389,7 +389,8 @@ struct perf_event_attr { cgroup : 1, /* include cgroup events */ text_poke : 1, /* include text poke events */ build_id : 1, /* use build id in mmap2 events */ - __reserved_1 : 29; + inherit_thread : 1, /* children only inherit if cloned with CLONE_THREAD */ + __reserved_1 : 28; union { __u32 wakeup_events; /* wakeup every n events */ -- cgit From 2e498d0a74e5b88a6689ae1b811f247f91ff188e Mon Sep 17 00:00:00 2001 From: Marco Elver Date: Thu, 8 Apr 2021 12:35:59 +0200 Subject: perf: Add support for event removal on exec Adds bit perf_event_attr::remove_on_exec, to support removing an event from a task on exec. This option supports the case where an event is supposed to be process-wide only, and should not propagate beyond exec, to limit monitoring to the original process image only. Suggested-by: Peter Zijlstra Signed-off-by: Marco Elver Signed-off-by: Peter Zijlstra (Intel) Link: https://lkml.kernel.org/r/20210408103605.1676875-5-elver@google.com --- include/uapi/linux/perf_event.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/include/uapi/linux/perf_event.h b/include/uapi/linux/perf_event.h index 813efb65fea8..8c5b9f5ad63f 100644 --- a/include/uapi/linux/perf_event.h +++ b/include/uapi/linux/perf_event.h @@ -390,7 +390,8 @@ struct perf_event_attr { text_poke : 1, /* include text poke events */ build_id : 1, /* use build id in mmap2 events */ inherit_thread : 1, /* children only inherit if cloned with CLONE_THREAD */ - __reserved_1 : 28; + remove_on_exec : 1, /* event is removed from task on exec */ + __reserved_1 : 27; union { __u32 wakeup_events; /* wakeup every n events */ -- cgit From fb6cc127e0b6e629252cdd0f77d5a1f49db95b92 Mon Sep 17 00:00:00 2001 From: Marco Elver Date: Thu, 8 Apr 2021 12:36:00 +0200 Subject: signal: Introduce TRAP_PERF si_code and si_perf to siginfo Introduces the TRAP_PERF si_code, and associated siginfo_t field si_perf. These will be used by the perf event subsystem to send signals (if requested) to the task where an event occurred. Signed-off-by: Marco Elver Signed-off-by: Peter Zijlstra (Intel) Acked-by: Geert Uytterhoeven # m68k Acked-by: Arnd Bergmann # asm-generic Link: https://lkml.kernel.org/r/20210408103605.1676875-6-elver@google.com --- include/linux/compat.h | 2 ++ include/linux/signal.h | 1 + include/uapi/asm-generic/siginfo.h | 6 +++++- include/uapi/linux/signalfd.h | 4 +++- 4 files changed, 11 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/include/linux/compat.h b/include/linux/compat.h index 6e65be753603..c8821d966812 100644 --- a/include/linux/compat.h +++ b/include/linux/compat.h @@ -236,6 +236,8 @@ typedef struct compat_siginfo { char _dummy_pkey[__COMPAT_ADDR_BND_PKEY_PAD]; u32 _pkey; } _addr_pkey; + /* used when si_code=TRAP_PERF */ + compat_u64 _perf; }; } _sigfault; diff --git a/include/linux/signal.h b/include/linux/signal.h index 205526c4003a..1e98548d7cf6 100644 --- a/include/linux/signal.h +++ b/include/linux/signal.h @@ -43,6 +43,7 @@ enum siginfo_layout { SIL_FAULT_MCEERR, SIL_FAULT_BNDERR, SIL_FAULT_PKUERR, + SIL_PERF_EVENT, SIL_CHLD, SIL_RT, SIL_SYS, diff --git a/include/uapi/asm-generic/siginfo.h b/include/uapi/asm-generic/siginfo.h index d2597000407a..d0bb9125c853 100644 --- a/include/uapi/asm-generic/siginfo.h +++ b/include/uapi/asm-generic/siginfo.h @@ -91,6 +91,8 @@ union __sifields { char _dummy_pkey[__ADDR_BND_PKEY_PAD]; __u32 _pkey; } _addr_pkey; + /* used when si_code=TRAP_PERF */ + __u64 _perf; }; } _sigfault; @@ -155,6 +157,7 @@ typedef struct siginfo { #define si_lower _sifields._sigfault._addr_bnd._lower #define si_upper _sifields._sigfault._addr_bnd._upper #define si_pkey _sifields._sigfault._addr_pkey._pkey +#define si_perf _sifields._sigfault._perf #define si_band _sifields._sigpoll._band #define si_fd _sifields._sigpoll._fd #define si_call_addr _sifields._sigsys._call_addr @@ -253,7 +256,8 @@ typedef struct siginfo { #define TRAP_BRANCH 3 /* process taken branch trap */ #define TRAP_HWBKPT 4 /* hardware breakpoint/watchpoint */ #define TRAP_UNK 5 /* undiagnosed trap */ -#define NSIGTRAP 5 +#define TRAP_PERF 6 /* perf event with sigtrap=1 */ +#define NSIGTRAP 6 /* * There is an additional set of SIGTRAP si_codes used by ptrace diff --git a/include/uapi/linux/signalfd.h b/include/uapi/linux/signalfd.h index 83429a05b698..7e333042c7e3 100644 --- a/include/uapi/linux/signalfd.h +++ b/include/uapi/linux/signalfd.h @@ -39,6 +39,8 @@ struct signalfd_siginfo { __s32 ssi_syscall; __u64 ssi_call_addr; __u32 ssi_arch; + __u32 __pad3; + __u64 ssi_perf; /* * Pad strcture to 128 bytes. Remember to update the @@ -49,7 +51,7 @@ struct signalfd_siginfo { * comes out of a read(2) and we really don't want to have * a compat on read(2). */ - __u8 __pad[28]; + __u8 __pad[16]; }; -- cgit From 97ba62b278674293762c3d91f724f1bb922f04e0 Mon Sep 17 00:00:00 2001 From: Marco Elver Date: Thu, 8 Apr 2021 12:36:01 +0200 Subject: perf: Add support for SIGTRAP on perf events Adds bit perf_event_attr::sigtrap, which can be set to cause events to send SIGTRAP (with si_code TRAP_PERF) to the task where the event occurred. The primary motivation is to support synchronous signals on perf events in the task where an event (such as breakpoints) triggered. To distinguish perf events based on the event type, the type is set in si_errno. For events that are associated with an address, si_addr is copied from perf_sample_data. The new field perf_event_attr::sig_data is copied to si_perf, which allows user space to disambiguate which event (of the same type) triggered the signal. For example, user space could encode the relevant information it cares about in sig_data. We note that the choice of an opaque u64 provides the simplest and most flexible option. Alternatives where a reference to some user space data is passed back suffer from the problem that modification of referenced data (be it the event fd, or the perf_event_attr) can race with the signal being delivered (of course, the same caveat applies if user space decides to store a pointer in sig_data, but the ABI explicitly avoids prescribing such a design). Suggested-by: Peter Zijlstra Signed-off-by: Marco Elver Signed-off-by: Peter Zijlstra (Intel) Acked-by: Dmitry Vyukov Link: https://lore.kernel.org/lkml/YBv3rAT566k+6zjg@hirez.programming.kicks-ass.net/ --- include/linux/perf_event.h | 1 + include/uapi/linux/perf_event.h | 10 +++++++++- 2 files changed, 10 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 1660039199b2..7d7280aa4e22 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -735,6 +735,7 @@ struct perf_event { int pending_wakeup; int pending_kill; int pending_disable; + unsigned long pending_addr; /* SIGTRAP */ struct irq_work pending; atomic_t event_limit; diff --git a/include/uapi/linux/perf_event.h b/include/uapi/linux/perf_event.h index 8c5b9f5ad63f..31b00e3b69c9 100644 --- a/include/uapi/linux/perf_event.h +++ b/include/uapi/linux/perf_event.h @@ -311,6 +311,7 @@ enum perf_event_read_format { #define PERF_ATTR_SIZE_VER4 104 /* add: sample_regs_intr */ #define PERF_ATTR_SIZE_VER5 112 /* add: aux_watermark */ #define PERF_ATTR_SIZE_VER6 120 /* add: aux_sample_size */ +#define PERF_ATTR_SIZE_VER7 128 /* add: sig_data */ /* * Hardware event_id to monitor via a performance monitoring event: @@ -391,7 +392,8 @@ struct perf_event_attr { build_id : 1, /* use build id in mmap2 events */ inherit_thread : 1, /* children only inherit if cloned with CLONE_THREAD */ remove_on_exec : 1, /* event is removed from task on exec */ - __reserved_1 : 27; + sigtrap : 1, /* send synchronous SIGTRAP on event */ + __reserved_1 : 26; union { __u32 wakeup_events; /* wakeup every n events */ @@ -443,6 +445,12 @@ struct perf_event_attr { __u16 __reserved_2; __u32 aux_sample_size; __u32 __reserved_3; + + /* + * User provided data if sigtrap=1, passed back to user via + * siginfo_t::si_perf, e.g. to permit user to identify the event. + */ + __u64 sig_data; }; /* -- cgit From 7c8056bb366b1b2dc8e4a3cc0b876e15a8ebca2c Mon Sep 17 00:00:00 2001 From: Namhyung Kim Date: Wed, 10 Feb 2021 17:33:25 +0900 Subject: perf core: Factor out __perf_sw_event_sched In some cases, we need to check more than whether the software event is enabled. So split the condition check and the actual event handling. This is a preparation for the next change. Suggested-by: Peter Zijlstra Signed-off-by: Namhyung Kim Signed-off-by: Peter Zijlstra (Intel) Link: https://lkml.kernel.org/r/20210210083327.22726-1-namhyung@kernel.org --- include/linux/perf_event.h | 33 ++++++++++++--------------------- 1 file changed, 12 insertions(+), 21 deletions(-) (limited to 'include') diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 7d7280aa4e22..92d51a7beaea 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -1178,30 +1178,24 @@ DECLARE_PER_CPU(struct pt_regs, __perf_regs[4]); * which is guaranteed by us not actually scheduling inside other swevents * because those disable preemption. */ -static __always_inline void -perf_sw_event_sched(u32 event_id, u64 nr, u64 addr) +static __always_inline void __perf_sw_event_sched(u32 event_id, u64 nr, u64 addr) { - if (static_key_false(&perf_swevent_enabled[event_id])) { - struct pt_regs *regs = this_cpu_ptr(&__perf_regs[0]); + struct pt_regs *regs = this_cpu_ptr(&__perf_regs[0]); - perf_fetch_caller_regs(regs); - ___perf_sw_event(event_id, nr, regs, addr); - } + perf_fetch_caller_regs(regs); + ___perf_sw_event(event_id, nr, regs, addr); } extern struct static_key_false perf_sched_events; -static __always_inline bool -perf_sw_migrate_enabled(void) +static __always_inline bool __perf_sw_enabled(int swevt) { - if (static_key_false(&perf_swevent_enabled[PERF_COUNT_SW_CPU_MIGRATIONS])) - return true; - return false; + return static_key_false(&perf_swevent_enabled[swevt]); } static inline void perf_event_task_migrate(struct task_struct *task) { - if (perf_sw_migrate_enabled()) + if (__perf_sw_enabled(PERF_COUNT_SW_CPU_MIGRATIONS)) task->sched_migrated = 1; } @@ -1211,11 +1205,9 @@ static inline void perf_event_task_sched_in(struct task_struct *prev, if (static_branch_unlikely(&perf_sched_events)) __perf_event_task_sched_in(prev, task); - if (perf_sw_migrate_enabled() && task->sched_migrated) { - struct pt_regs *regs = this_cpu_ptr(&__perf_regs[0]); - - perf_fetch_caller_regs(regs); - ___perf_sw_event(PERF_COUNT_SW_CPU_MIGRATIONS, 1, regs, 0); + if (__perf_sw_enabled(PERF_COUNT_SW_CPU_MIGRATIONS) && + task->sched_migrated) { + __perf_sw_event_sched(PERF_COUNT_SW_CPU_MIGRATIONS, 1, 0); task->sched_migrated = 0; } } @@ -1223,7 +1215,8 @@ static inline void perf_event_task_sched_in(struct task_struct *prev, static inline void perf_event_task_sched_out(struct task_struct *prev, struct task_struct *next) { - perf_sw_event_sched(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, 0); + if (__perf_sw_enabled(PERF_COUNT_SW_CONTEXT_SWITCHES)) + __perf_sw_event_sched(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, 0); if (static_branch_unlikely(&perf_sched_events)) __perf_event_task_sched_out(prev, next); @@ -1480,8 +1473,6 @@ static inline int perf_event_refresh(struct perf_event *event, int refresh) static inline void perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr) { } static inline void -perf_sw_event_sched(u32 event_id, u64 nr, u64 addr) { } -static inline void perf_bp_event(struct perf_event *event, void *data) { } static inline int perf_register_guest_info_callbacks -- cgit From d0d1dd628527c77db2391ce0293c1ed344b2365f Mon Sep 17 00:00:00 2001 From: Namhyung Kim Date: Wed, 10 Feb 2021 17:33:26 +0900 Subject: perf core: Add PERF_COUNT_SW_CGROUP_SWITCHES event This patch adds a new software event to count context switches involving cgroup switches. So it's counted only if cgroups of previous and next tasks are different. Note that it only checks the cgroups in the perf_event subsystem. For cgroup v2, it shouldn't matter anyway. One can argue that we can do this by using existing sched_switch event with eBPF. But some systems might not have eBPF for some reason so I'd like to add this as a simple way. Signed-off-by: Namhyung Kim Signed-off-by: Peter Zijlstra (Intel) Link: https://lkml.kernel.org/r/20210210083327.22726-2-namhyung@kernel.org --- include/linux/perf_event.h | 7 +++++++ include/uapi/linux/perf_event.h | 1 + 2 files changed, 8 insertions(+) (limited to 'include') diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 92d51a7beaea..8989b2b7268d 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -1218,6 +1218,13 @@ static inline void perf_event_task_sched_out(struct task_struct *prev, if (__perf_sw_enabled(PERF_COUNT_SW_CONTEXT_SWITCHES)) __perf_sw_event_sched(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, 0); +#ifdef CONFIG_CGROUP_PERF + if (__perf_sw_enabled(PERF_COUNT_SW_CGROUP_SWITCHES) && + perf_cgroup_from_task(prev, NULL) != + perf_cgroup_from_task(next, NULL)) + __perf_sw_event_sched(PERF_COUNT_SW_CGROUP_SWITCHES, 1, 0); +#endif + if (static_branch_unlikely(&perf_sched_events)) __perf_event_task_sched_out(prev, next); } diff --git a/include/uapi/linux/perf_event.h b/include/uapi/linux/perf_event.h index 31b00e3b69c9..0b58970bab6b 100644 --- a/include/uapi/linux/perf_event.h +++ b/include/uapi/linux/perf_event.h @@ -112,6 +112,7 @@ enum perf_sw_ids { PERF_COUNT_SW_EMULATION_FAULTS = 8, PERF_COUNT_SW_DUMMY = 9, PERF_COUNT_SW_BPF_OUTPUT = 10, + PERF_COUNT_SW_CGROUP_SWITCHES = 11, PERF_COUNT_SW_MAX, /* non-ABI */ }; -- cgit From a9c81ccdf52dd73a20178c40bca34cf52991fdea Mon Sep 17 00:00:00 2001 From: Kan Liang Date: Mon, 12 Apr 2021 07:30:57 -0700 Subject: perf/x86: Add structures for the attributes of Hybrid PMUs Hybrid PMUs have different events and formats. In theory, Hybrid PMU specific attributes should be maintained in the dedicated struct x86_hybrid_pmu, but it wastes space because the events and formats are similar among Hybrid PMUs. To reduce duplication, all hybrid PMUs will share a group of attributes in the following patch. To distinguish an attribute from different Hybrid PMUs, a PMU aware attribute structure is introduced. A PMU type is required for the attribute structure. The type is internal usage. It is not visible in the sysfs API. Hybrid PMUs may support the same event name, but with different event encoding, e.g., the mem-loads event on an Atom PMU has different event encoding from a Core PMU. It brings issue if two attributes are created for them. Current sysfs_update_group finds an attribute by searching the attr name (aka event name). If two attributes have the same event name, the first attribute will be replaced. To address the issue, only one attribute is created for the event. The event_str is extended and stores event encodings from all Hybrid PMUs. Each event encoding is divided by ";". The order of the event encodings must follow the order of the hybrid PMU index. The event_str is internal usage as well. When a user wants to show the attribute of a Hybrid PMU, only the corresponding part of the string is displayed. Signed-off-by: Kan Liang Signed-off-by: Peter Zijlstra (Intel) Reviewed-by: Andi Kleen Link: https://lkml.kernel.org/r/1618237865-33448-18-git-send-email-kan.liang@linux.intel.com --- include/linux/perf_event.h | 12 ++++++++++++ 1 file changed, 12 insertions(+) (limited to 'include') diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 8989b2b7268d..61b385154c4a 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -1549,6 +1549,18 @@ struct perf_pmu_events_ht_attr { const char *event_str_noht; }; +struct perf_pmu_events_hybrid_attr { + struct device_attribute attr; + u64 id; + const char *event_str; + u64 pmu_type; +}; + +struct perf_pmu_format_hybrid_attr { + struct device_attribute attr; + u64 pmu_type; +}; + ssize_t perf_event_sysfs_show(struct device *dev, struct device_attribute *attr, char *page); -- cgit From 55bcf6ef314ae8ba81bcd74aa760247b635ed47b Mon Sep 17 00:00:00 2001 From: Kan Liang Date: Mon, 12 Apr 2021 07:31:01 -0700 Subject: perf: Extend PERF_TYPE_HARDWARE and PERF_TYPE_HW_CACHE Current Hardware events and Hardware cache events have special perf types, PERF_TYPE_HARDWARE and PERF_TYPE_HW_CACHE. The two types don't pass the PMU type in the user interface. For a hybrid system, the perf subsystem doesn't know which PMU the events belong to. The first capable PMU will always be assigned to the events. The events never get a chance to run on the other capable PMUs. Extend the two types to become PMU aware types. The PMU type ID is stored at attr.config[63:32]. Add a new PMU capability, PERF_PMU_CAP_EXTENDED_HW_TYPE, to indicate a PMU which supports the extended PERF_TYPE_HARDWARE and PERF_TYPE_HW_CACHE. The PMU type is only required when searching a specific PMU. The PMU specific codes will only be interested in the 'real' config value, which is stored in the low 32 bit of the event->attr.config. Update the event->attr.config in the generic code, so the PMU specific codes don't need to calculate it separately. If a user specifies a PMU type, but the PMU doesn't support the extended type, error out. If an event cannot be initialized in a PMU specified by a user, error out immediately. Perf should not try to open it on other PMUs. The new PMU capability is only set for the X86 hybrid PMUs for now. Other architectures, e.g., ARM, may need it as well. The support on ARM may be implemented later separately. Suggested-by: Peter Zijlstra (Intel) Signed-off-by: Kan Liang Signed-off-by: Peter Zijlstra (Intel) Link: https://lkml.kernel.org/r/1618237865-33448-22-git-send-email-kan.liang@linux.intel.com --- include/linux/perf_event.h | 19 ++++++++++--------- include/uapi/linux/perf_event.h | 15 +++++++++++++++ 2 files changed, 25 insertions(+), 9 deletions(-) (limited to 'include') diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 61b385154c4a..a763928a0e41 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -260,15 +260,16 @@ struct perf_event; /** * pmu::capabilities flags */ -#define PERF_PMU_CAP_NO_INTERRUPT 0x01 -#define PERF_PMU_CAP_NO_NMI 0x02 -#define PERF_PMU_CAP_AUX_NO_SG 0x04 -#define PERF_PMU_CAP_EXTENDED_REGS 0x08 -#define PERF_PMU_CAP_EXCLUSIVE 0x10 -#define PERF_PMU_CAP_ITRACE 0x20 -#define PERF_PMU_CAP_HETEROGENEOUS_CPUS 0x40 -#define PERF_PMU_CAP_NO_EXCLUDE 0x80 -#define PERF_PMU_CAP_AUX_OUTPUT 0x100 +#define PERF_PMU_CAP_NO_INTERRUPT 0x0001 +#define PERF_PMU_CAP_NO_NMI 0x0002 +#define PERF_PMU_CAP_AUX_NO_SG 0x0004 +#define PERF_PMU_CAP_EXTENDED_REGS 0x0008 +#define PERF_PMU_CAP_EXCLUSIVE 0x0010 +#define PERF_PMU_CAP_ITRACE 0x0020 +#define PERF_PMU_CAP_HETEROGENEOUS_CPUS 0x0040 +#define PERF_PMU_CAP_NO_EXCLUDE 0x0080 +#define PERF_PMU_CAP_AUX_OUTPUT 0x0100 +#define PERF_PMU_CAP_EXTENDED_HW_TYPE 0x0200 struct perf_output_handle; diff --git a/include/uapi/linux/perf_event.h b/include/uapi/linux/perf_event.h index 0b58970bab6b..e54e639248c8 100644 --- a/include/uapi/linux/perf_event.h +++ b/include/uapi/linux/perf_event.h @@ -37,6 +37,21 @@ enum perf_type_id { PERF_TYPE_MAX, /* non-ABI */ }; +/* + * attr.config layout for type PERF_TYPE_HARDWARE and PERF_TYPE_HW_CACHE + * PERF_TYPE_HARDWARE: 0xEEEEEEEE000000AA + * AA: hardware event ID + * EEEEEEEE: PMU type ID + * PERF_TYPE_HW_CACHE: 0xEEEEEEEE00DDCCBB + * BB: hardware cache ID + * CC: hardware cache op ID + * DD: hardware cache op result ID + * EEEEEEEE: PMU type ID + * If the PMU type ID is 0, the PERF_TYPE_RAW will be applied. + */ +#define PERF_PMU_TYPE_SHIFT 32 +#define PERF_HW_EVENT_MASK 0xffffffff + /* * Generalized performance event event_id types, used by the * attr.event_id parameter of the sys_perf_event_open() -- cgit From 3ddb3fd8cdb0a6c11b7c8d91ba42d84c4ea3cc43 Mon Sep 17 00:00:00 2001 From: Marco Elver Date: Thu, 22 Apr 2021 21:18:22 +0200 Subject: signal, perf: Fix siginfo_t by avoiding u64 on 32-bit architectures The alignment of a structure is that of its largest member. On architectures like 32-bit Arm (but not e.g. 32-bit x86) 64-bit integers will require 64-bit alignment and not its natural word size. This means that there is no portable way to add 64-bit integers to siginfo_t on 32-bit architectures without breaking the ABI, because siginfo_t does not yet (and therefore likely never will) contain 64-bit fields on 32-bit architectures. Adding a 64-bit integer could change the alignment of the union after the 3 initial int si_signo, si_errno, si_code, thus introducing 4 bytes of padding shifting the entire union, which would break the ABI. One alternative would be to use the __packed attribute, however, it is non-standard C. Given siginfo_t has definitions outside the Linux kernel in various standard libraries that can be compiled with any number of different compilers (not just those we rely on), using non-standard attributes on siginfo_t should be avoided to ensure portability. In the case of the si_perf field, word size is sufficient since there is no exact requirement on size, given the data it contains is user-defined via perf_event_attr::sig_data. On 32-bit architectures, any excess bits of perf_event_attr::sig_data will therefore be truncated when copying into si_perf. Since si_perf is intended to disambiguate events (e.g. encoding relevant information if there are more events of the same type), 32 bits should provide enough entropy to do so on 32-bit architectures. For 64-bit architectures, no change is intended. Fixes: fb6cc127e0b6 ("signal: Introduce TRAP_PERF si_code and si_perf to siginfo") Reported-by: Marek Szyprowski Reported-by: Jon Hunter Signed-off-by: Marco Elver Signed-off-by: Peter Zijlstra (Intel) Tested-by: Marek Szyprowski Tested-by: Jon Hunter Link: https://lkml.kernel.org/r/20210422191823.79012-1-elver@google.com --- include/linux/compat.h | 2 +- include/uapi/asm-generic/siginfo.h | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/include/linux/compat.h b/include/linux/compat.h index c8821d966812..f0d2dd35d408 100644 --- a/include/linux/compat.h +++ b/include/linux/compat.h @@ -237,7 +237,7 @@ typedef struct compat_siginfo { u32 _pkey; } _addr_pkey; /* used when si_code=TRAP_PERF */ - compat_u64 _perf; + compat_ulong_t _perf; }; } _sigfault; diff --git a/include/uapi/asm-generic/siginfo.h b/include/uapi/asm-generic/siginfo.h index d0bb9125c853..03d6f6d2c1fe 100644 --- a/include/uapi/asm-generic/siginfo.h +++ b/include/uapi/asm-generic/siginfo.h @@ -92,7 +92,7 @@ union __sifields { __u32 _pkey; } _addr_pkey; /* used when si_code=TRAP_PERF */ - __u64 _perf; + unsigned long _perf; }; } _sigfault; -- cgit