summaryrefslogtreecommitdiff
path: root/include/linux/perf_event.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/perf_event.h')
-rw-r--r--include/linux/perf_event.h262
1 files changed, 186 insertions, 76 deletions
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index d2a15c0c6f8a..5a9bf15d4461 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -168,6 +168,15 @@ struct hw_perf_event {
struct hw_perf_event_extra extra_reg;
struct hw_perf_event_extra branch_reg;
};
+ struct { /* aux / Intel-PT */
+ u64 aux_config;
+ /*
+ * For AUX area events, aux_paused cannot be a state
+ * flag because it can be updated asynchronously to
+ * state.
+ */
+ unsigned int aux_paused;
+ };
struct { /* software */
struct hrtimer hrtimer;
};
@@ -291,6 +300,20 @@ struct perf_event_pmu_context;
#define PERF_PMU_CAP_NO_EXCLUDE 0x0040
#define PERF_PMU_CAP_AUX_OUTPUT 0x0080
#define PERF_PMU_CAP_EXTENDED_HW_TYPE 0x0100
+#define PERF_PMU_CAP_AUX_PAUSE 0x0200
+
+/**
+ * pmu::scope
+ */
+enum perf_pmu_scope {
+ PERF_PMU_SCOPE_NONE = 0,
+ PERF_PMU_SCOPE_CORE,
+ PERF_PMU_SCOPE_DIE,
+ PERF_PMU_SCOPE_CLUSTER,
+ PERF_PMU_SCOPE_PKG,
+ PERF_PMU_SCOPE_SYS_WIDE,
+ PERF_PMU_MAX_SCOPE,
+};
struct perf_output_handle;
@@ -315,8 +338,12 @@ struct pmu {
*/
int capabilities;
- int __percpu *pmu_disable_count;
- struct perf_cpu_pmu_context __percpu *cpu_pmu_context;
+ /*
+ * PMU scope
+ */
+ unsigned int scope;
+
+ struct perf_cpu_pmu_context * __percpu *cpu_pmu_context;
atomic_t exclusive_cnt; /* < 0: cpu; > 0: tsk */
int task_ctx_nr;
int hrtimer_interval_ms;
@@ -363,6 +390,8 @@ struct pmu {
#define PERF_EF_START 0x01 /* start the counter when adding */
#define PERF_EF_RELOAD 0x02 /* reload the counter when starting */
#define PERF_EF_UPDATE 0x04 /* update the counter when stopping */
+#define PERF_EF_PAUSE 0x08 /* AUX area event, pause tracing */
+#define PERF_EF_RESUME 0x10 /* AUX area event, resume tracing */
/*
* Adds/Removes a counter to/from the PMU, can be done inside a
@@ -402,6 +431,18 @@ struct pmu {
*
* ->start() with PERF_EF_RELOAD will reprogram the counter
* value, must be preceded by a ->stop() with PERF_EF_UPDATE.
+ *
+ * ->stop() with PERF_EF_PAUSE will stop as simply as possible. Will not
+ * overlap another ->stop() with PERF_EF_PAUSE nor ->start() with
+ * PERF_EF_RESUME.
+ *
+ * ->start() with PERF_EF_RESUME will start as simply as possible but
+ * only if the counter is not otherwise stopped. Will not overlap
+ * another ->start() with PERF_EF_RESUME nor ->stop() with
+ * PERF_EF_PAUSE.
+ *
+ * Notably, PERF_EF_PAUSE/PERF_EF_RESUME *can* be concurrent with other
+ * ->stop()/->start() invocations, just not itself.
*/
void (*start) (struct perf_event *event, int flags);
void (*stop) (struct perf_event *event, int flags);
@@ -453,7 +494,7 @@ struct pmu {
* context-switches callback
*/
void (*sched_task) (struct perf_event_pmu_context *pmu_ctx,
- bool sched_in);
+ struct task_struct *task, bool sched_in);
/*
* Kmem cache of PMU specific data
@@ -461,16 +502,6 @@ struct pmu {
struct kmem_cache *task_ctx_cache;
/*
- * PMU specific parts of task perf event context (i.e. ctx->task_ctx_data)
- * can be synchronized using this function. See Intel LBR callstack support
- * implementation and Perf core context switch handling callbacks for usage
- * examples.
- */
- void (*swap_task_ctx) (struct perf_event_pmu_context *prev_epc,
- struct perf_event_pmu_context *next_epc);
- /* optional */
-
- /*
* Set up pmu-private data structures for an AUX area
*/
void *(*setup_aux) (struct perf_event *event, void **pages,
@@ -615,10 +646,13 @@ typedef void (*perf_overflow_handler_t)(struct perf_event *,
* PERF_EV_CAP_SIBLING: An event with this flag must be a group sibling and
* cannot be a group leader. If an event with this flag is detached from the
* group it is scheduled out and moved into an unrecoverable ERROR state.
+ * PERF_EV_CAP_READ_SCOPE: A CPU event that can be read from any CPU of the
+ * PMU scope where it is active.
*/
#define PERF_EV_CAP_SOFTWARE BIT(0)
#define PERF_EV_CAP_READ_ACTIVE_PKG BIT(1)
#define PERF_EV_CAP_SIBLING BIT(2)
+#define PERF_EV_CAP_READ_SCOPE BIT(3)
#define SWEVENT_HLIST_BITS 8
#define SWEVENT_HLIST_SIZE (1 << SWEVENT_HLIST_BITS)
@@ -628,13 +662,16 @@ struct swevent_hlist {
struct rcu_head rcu_head;
};
-#define PERF_ATTACH_CONTEXT 0x01
-#define PERF_ATTACH_GROUP 0x02
-#define PERF_ATTACH_TASK 0x04
-#define PERF_ATTACH_TASK_DATA 0x08
-#define PERF_ATTACH_ITRACE 0x10
-#define PERF_ATTACH_SCHED_CB 0x20
-#define PERF_ATTACH_CHILD 0x40
+#define PERF_ATTACH_CONTEXT 0x0001
+#define PERF_ATTACH_GROUP 0x0002
+#define PERF_ATTACH_TASK 0x0004
+#define PERF_ATTACH_TASK_DATA 0x0008
+#define PERF_ATTACH_GLOBAL_DATA 0x0010
+#define PERF_ATTACH_SCHED_CB 0x0020
+#define PERF_ATTACH_CHILD 0x0040
+#define PERF_ATTACH_EXCLUSIVE 0x0080
+#define PERF_ATTACH_CALLCHAIN 0x0100
+#define PERF_ATTACH_ITRACE 0x0200
struct bpf_prog;
struct perf_cgroup;
@@ -781,11 +818,12 @@ struct perf_event {
unsigned int pending_wakeup;
unsigned int pending_kill;
unsigned int pending_disable;
- unsigned int pending_sigtrap;
unsigned long pending_addr; /* SIGTRAP */
struct irq_work pending_irq;
+ struct irq_work pending_disable_irq;
struct callback_head pending_task;
unsigned int pending_work;
+ struct rcuwait pending_work_wait;
atomic_t event_limit;
@@ -809,11 +847,8 @@ struct perf_event {
u64 (*clock)(void);
perf_overflow_handler_t overflow_handler;
void *overflow_handler_context;
-#ifdef CONFIG_BPF_SYSCALL
- perf_overflow_handler_t orig_overflow_handler;
struct bpf_prog *prog;
u64 bpf_cookie;
-#endif
#ifdef CONFIG_EVENT_TRACING
struct trace_event_call *tp_event;
@@ -878,16 +913,16 @@ struct perf_event_pmu_context {
struct list_head pinned_active;
struct list_head flexible_active;
- /* Used to avoid freeing per-cpu perf_event_pmu_context */
+ /* Used to identify the per-cpu perf_event_pmu_context */
unsigned int embedded : 1;
unsigned int nr_events;
unsigned int nr_cgroups;
+ unsigned int nr_freq;
atomic_t refcount; /* event <-> epc */
struct rcu_head rcu_head;
- void *task_ctx_data; /* pmu specific data */
/*
* Set when one or more (plausibly active) event can't be scheduled
* due to pmu overcommit or pmu constraints, except tolerant to
@@ -897,6 +932,11 @@ struct perf_event_pmu_context {
int rotate_necessary;
};
+static inline bool perf_pmu_ctx_is_active(struct perf_event_pmu_context *epc)
+{
+ return !list_empty(&epc->flexible_active) || !list_empty(&epc->pinned_active);
+}
+
struct perf_event_groups {
struct rb_root tree;
u64 index;
@@ -930,7 +970,6 @@ struct perf_event_context {
int nr_user;
int is_active;
- int nr_task_data;
int nr_stat;
int nr_freq;
int rotate_disable;
@@ -959,19 +998,52 @@ struct perf_event_context {
struct rcu_head rcu_head;
/*
- * Sum (event->pending_sigtrap + event->pending_work)
+ * The count of events for which using the switch-out fast path
+ * should be avoided.
+ *
+ * Sum (event->pending_work + events with
+ * (attr->inherit && (attr->sample_type & PERF_SAMPLE_READ)))
*
* The SIGTRAP is targeted at ctx->task, as such it won't do changing
* that until the signal is delivered.
*/
- local_t nr_pending;
+ local_t nr_no_switch_fast;
};
-/*
- * Number of contexts where an event can trigger:
- * task, softirq, hardirq, nmi.
+/**
+ * struct perf_ctx_data - PMU specific data for a task
+ * @rcu_head: To avoid the race on free PMU specific data
+ * @refcount: To track users
+ * @global: To track system-wide users
+ * @ctx_cache: Kmem cache of PMU specific data
+ * @data: PMU specific data
+ *
+ * Currently, the struct is only used in Intel LBR call stack mode to
+ * save/restore the call stack of a task on context switches.
+ *
+ * The rcu_head is used to prevent the race on free the data.
+ * The data only be allocated when Intel LBR call stack mode is enabled.
+ * The data will be freed when the mode is disabled.
+ * The content of the data will only be accessed in context switch, which
+ * should be protected by rcu_read_lock().
+ *
+ * Because of the alignment requirement of Intel Arch LBR, the Kmem cache
+ * is used to allocate the PMU specific data. The ctx_cache is to track
+ * the Kmem cache.
+ *
+ * Careful: Struct perf_ctx_data is added as a pointer in struct task_struct.
+ * When system-wide Intel LBR call stack mode is enabled, a buffer with
+ * constant size will be allocated for each task.
+ * Also, system memory consumption can further grow when the size of
+ * struct perf_ctx_data enlarges.
*/
-#define PERF_NR_CONTEXTS 4
+struct perf_ctx_data {
+ struct rcu_head rcu_head;
+ refcount_t refcount;
+ int global;
+ struct kmem_cache *ctx_cache;
+ void *data;
+};
struct perf_cpu_pmu_context {
struct perf_event_pmu_context epc;
@@ -982,6 +1054,7 @@ struct perf_cpu_pmu_context {
int active_oncpu;
int exclusive;
+ int pmu_disable_count;
raw_spinlock_t hrtimer_lock;
struct hrtimer hrtimer;
@@ -1015,7 +1088,13 @@ struct perf_output_handle {
struct perf_buffer *rb;
unsigned long wakeup;
unsigned long size;
- u64 aux_flags;
+ union {
+ u64 flags; /* perf_output*() */
+ u64 aux_flags; /* perf_aux_output*() */
+ struct {
+ u64 skip_read : 1;
+ };
+ };
union {
void *addr;
unsigned long head;
@@ -1232,6 +1311,11 @@ static inline void perf_sample_save_callchain(struct perf_sample_data *data,
{
int size = 1;
+ if (!(event->attr.sample_type & PERF_SAMPLE_CALLCHAIN))
+ return;
+ if (WARN_ON_ONCE(data->sample_flags & PERF_SAMPLE_CALLCHAIN))
+ return;
+
data->callchain = perf_callchain(event, regs);
size += data->callchain->nr;
@@ -1240,12 +1324,18 @@ static inline void perf_sample_save_callchain(struct perf_sample_data *data,
}
static inline void perf_sample_save_raw_data(struct perf_sample_data *data,
+ struct perf_event *event,
struct perf_raw_record *raw)
{
struct perf_raw_frag *frag = &raw->frag;
u32 sum = 0;
int size;
+ if (!(event->attr.sample_type & PERF_SAMPLE_RAW))
+ return;
+ if (WARN_ON_ONCE(data->sample_flags & PERF_SAMPLE_RAW))
+ return;
+
do {
sum += frag->size;
if (perf_raw_frag_last(frag))
@@ -1262,6 +1352,11 @@ static inline void perf_sample_save_raw_data(struct perf_sample_data *data,
data->sample_flags |= PERF_SAMPLE_RAW;
}
+static inline bool has_branch_stack(struct perf_event *event)
+{
+ return event->attr.sample_type & PERF_SAMPLE_BRANCH_STACK;
+}
+
static inline void perf_sample_save_brstack(struct perf_sample_data *data,
struct perf_event *event,
struct perf_branch_stack *brs,
@@ -1269,8 +1364,16 @@ static inline void perf_sample_save_brstack(struct perf_sample_data *data,
{
int size = sizeof(u64); /* nr */
+ if (!has_branch_stack(event))
+ return;
+ if (WARN_ON_ONCE(data->sample_flags & PERF_SAMPLE_BRANCH_STACK))
+ return;
+
if (branch_sample_hw_index(event))
size += sizeof(u64);
+
+ brs->nr = min_t(u16, event->attr.sample_max_stack, brs->nr);
+
size += brs->nr * sizeof(struct perf_branch_entry);
/*
@@ -1342,8 +1445,10 @@ extern int perf_event_output(struct perf_event *event,
struct pt_regs *regs);
static inline bool
-__is_default_overflow_handler(perf_overflow_handler_t overflow_handler)
+is_default_overflow_handler(struct perf_event *event)
{
+ perf_overflow_handler_t overflow_handler = event->overflow_handler;
+
if (likely(overflow_handler == perf_event_output_forward))
return true;
if (unlikely(overflow_handler == perf_event_output_backward))
@@ -1351,22 +1456,6 @@ __is_default_overflow_handler(perf_overflow_handler_t overflow_handler)
return false;
}
-#define is_default_overflow_handler(event) \
- __is_default_overflow_handler((event)->overflow_handler)
-
-#ifdef CONFIG_BPF_SYSCALL
-static inline bool uses_default_overflow_handler(struct perf_event *event)
-{
- if (likely(is_default_overflow_handler(event)))
- return true;
-
- return __is_default_overflow_handler(event->orig_overflow_handler);
-}
-#else
-#define uses_default_overflow_handler(event) \
- is_default_overflow_handler(event)
-#endif
-
extern void
perf_event_header__init_id(struct perf_event_header *header,
struct perf_sample_data *data,
@@ -1592,19 +1681,10 @@ static inline int perf_callchain_store(struct perf_callchain_entry_ctx *ctx, u64
}
extern int sysctl_perf_event_paranoid;
-extern int sysctl_perf_event_mlock;
extern int sysctl_perf_event_sample_rate;
-extern int sysctl_perf_cpu_time_max_percent;
extern void perf_sample_event_took(u64 sample_len_ns);
-int perf_event_max_sample_rate_handler(struct ctl_table *table, int write,
- void *buffer, size_t *lenp, loff_t *ppos);
-int perf_cpu_time_max_percent_handler(struct ctl_table *table, int write,
- void *buffer, size_t *lenp, loff_t *ppos);
-int perf_event_max_stack_handler(struct ctl_table *table, int write,
- void *buffer, size_t *lenp, loff_t *ppos);
-
/* Access to perf_event_open(2) syscall. */
#define PERF_SECURITY_OPEN 0
@@ -1618,30 +1698,26 @@ static inline int perf_is_paranoid(void)
return sysctl_perf_event_paranoid > -1;
}
-static inline int perf_allow_kernel(struct perf_event_attr *attr)
-{
- if (sysctl_perf_event_paranoid > 1 && !perfmon_capable())
- return -EACCES;
-
- return security_perf_event_open(attr, PERF_SECURITY_KERNEL);
-}
+int perf_allow_kernel(void);
-static inline int perf_allow_cpu(struct perf_event_attr *attr)
+static inline int perf_allow_cpu(void)
{
if (sysctl_perf_event_paranoid > 0 && !perfmon_capable())
return -EACCES;
- return security_perf_event_open(attr, PERF_SECURITY_CPU);
+ return security_perf_event_open(PERF_SECURITY_CPU);
}
-static inline int perf_allow_tracepoint(struct perf_event_attr *attr)
+static inline int perf_allow_tracepoint(void)
{
if (sysctl_perf_event_paranoid > -1 && !perfmon_capable())
return -EPERM;
- return security_perf_event_open(attr, PERF_SECURITY_TRACEPOINT);
+ return security_perf_event_open(PERF_SECURITY_TRACEPOINT);
}
+extern int perf_exclude_event(struct perf_event *event, struct pt_regs *regs);
+
extern void perf_event_init(void);
extern void perf_tp_event(u16 event_type, u64 count, void *record,
int entry_size, struct pt_regs *regs,
@@ -1649,19 +1725,34 @@ extern void perf_tp_event(u16 event_type, u64 count, void *record,
struct task_struct *task);
extern void perf_bp_event(struct perf_event *event, void *data);
-#ifndef perf_misc_flags
-# define perf_misc_flags(regs) \
+extern unsigned long perf_misc_flags(struct perf_event *event, struct pt_regs *regs);
+extern unsigned long perf_instruction_pointer(struct perf_event *event,
+ struct pt_regs *regs);
+
+#ifndef perf_arch_misc_flags
+# define perf_arch_misc_flags(regs) \
(user_mode(regs) ? PERF_RECORD_MISC_USER : PERF_RECORD_MISC_KERNEL)
-# define perf_instruction_pointer(regs) instruction_pointer(regs)
+# define perf_arch_instruction_pointer(regs) instruction_pointer(regs)
#endif
#ifndef perf_arch_bpf_user_pt_regs
# define perf_arch_bpf_user_pt_regs(regs) regs
#endif
-static inline bool has_branch_stack(struct perf_event *event)
+#ifndef perf_arch_guest_misc_flags
+static inline unsigned long perf_arch_guest_misc_flags(struct pt_regs *regs)
{
- return event->attr.sample_type & PERF_SAMPLE_BRANCH_STACK;
+ unsigned long guest_state = perf_guest_state();
+
+ if (!(guest_state & PERF_GUEST_ACTIVE))
+ return 0;
+
+ if (guest_state & PERF_GUEST_USER)
+ return PERF_RECORD_MISC_GUEST_USER;
+ else
+ return PERF_RECORD_MISC_GUEST_KERNEL;
}
+# define perf_arch_guest_misc_flags(regs) perf_arch_guest_misc_flags(regs)
+#endif
static inline bool needs_branch_stack(struct perf_event *event)
{
@@ -1673,6 +1764,13 @@ static inline bool has_aux(struct perf_event *event)
return event->pmu->setup_aux;
}
+static inline bool has_aux_action(struct perf_event *event)
+{
+ return event->attr.aux_sample_size ||
+ event->attr.aux_pause ||
+ event->attr.aux_resume;
+}
+
static inline bool is_write_backward(struct perf_event *event)
{
return !!event->attr.write_backward;
@@ -1697,6 +1795,14 @@ perf_event_addr_filters(struct perf_event *event)
return ifh;
}
+static inline struct fasync_struct **perf_event_fasync(struct perf_event *event)
+{
+ /* Only the parent has fasync state */
+ if (event->parent)
+ event = event->parent;
+ return &event->fasync;
+}
+
extern void perf_event_addr_filters_sync(struct perf_event *event);
extern void perf_report_aux_output_id(struct perf_event *event, u64 hw_id);
@@ -1817,6 +1923,10 @@ static inline u64 perf_event_pause(struct perf_event *event, bool reset)
{
return 0;
}
+static inline int perf_exclude_event(struct perf_event *event, struct pt_regs *regs)
+{
+ return 0;
+}
#endif
#if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_INTEL)