diff options
Diffstat (limited to 'kernel/events/internal.h')
| -rw-r--r-- | kernel/events/internal.h | 169 |
1 files changed, 118 insertions, 51 deletions
diff --git a/kernel/events/internal.h b/kernel/events/internal.h index ca6599723be5..d9cc57083091 100644 --- a/kernel/events/internal.h +++ b/kernel/events/internal.h @@ -1,15 +1,17 @@ +/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _KERNEL_EVENTS_INTERNAL_H #define _KERNEL_EVENTS_INTERNAL_H #include <linux/hardirq.h> #include <linux/uaccess.h> +#include <linux/refcount.h> /* Buffer handling */ #define RING_BUFFER_WRITABLE 0x01 -struct ring_buffer { - atomic_t refcount; +struct perf_buffer { + refcount_t refcount; struct rcu_head rcu_head; #ifdef CONFIG_PERF_USE_VMALLOC struct work_struct work; @@ -17,44 +19,84 @@ struct ring_buffer { #endif int nr_pages; /* nr of data pages */ int overwrite; /* can overwrite itself */ + int paused; /* can write into ring buffer */ atomic_t poll; /* POLL_ for wakeups */ local_t head; /* write position */ - local_t nest; /* nested writers */ + unsigned int nest; /* nested writers */ local_t events; /* event limit */ local_t wakeup; /* wakeup stamp */ local_t lost; /* nr records lost */ long watermark; /* wakeup watermark */ + long aux_watermark; /* poll crap */ spinlock_t event_lock; struct list_head event_list; - atomic_t mmap_count; + refcount_t mmap_count; unsigned long mmap_locked; struct user_struct *mmap_user; + /* AUX area */ + struct mutex aux_mutex; + long aux_head; + unsigned int aux_nest; + long aux_wakeup; /* last aux_watermark boundary crossed by aux_head */ + unsigned long aux_pgoff; + int aux_nr_pages; + int aux_overwrite; + refcount_t aux_mmap_count; + unsigned long aux_mmap_locked; + void (*free_aux)(void *); + refcount_t aux_refcount; + int aux_in_sampling; + int aux_in_pause_resume; + void **aux_pages; + void *aux_priv; + struct perf_event_mmap_page *user_page; - void *data_pages[0]; + void *data_pages[]; }; -extern void rb_free(struct ring_buffer *rb); -extern struct ring_buffer * +extern void rb_free(struct perf_buffer *rb); + +static inline void rb_free_rcu(struct rcu_head *rcu_head) +{ + struct perf_buffer *rb; + + rb = container_of(rcu_head, struct perf_buffer, rcu_head); + rb_free(rb); +} + +static inline void rb_toggle_paused(struct perf_buffer *rb, bool pause) +{ + if (!pause && rb->nr_pages) + rb->paused = 0; + else + rb->paused = 1; +} + +extern struct perf_buffer * rb_alloc(int nr_pages, long watermark, int cpu, int flags); extern void perf_event_wakeup(struct perf_event *event); +extern int rb_alloc_aux(struct perf_buffer *rb, struct perf_event *event, + pgoff_t pgoff, int nr_pages, long watermark, int flags); +extern void rb_free_aux(struct perf_buffer *rb); +extern struct perf_buffer *ring_buffer_get(struct perf_event *event); +extern void ring_buffer_put(struct perf_buffer *rb); -extern void -perf_event_header__init_id(struct perf_event_header *header, - struct perf_sample_data *data, - struct perf_event *event); -extern void -perf_event__output_id_sample(struct perf_event *event, - struct perf_output_handle *handle, - struct perf_sample_data *sample); +static inline bool rb_has_aux(struct perf_buffer *rb) +{ + return !!rb->aux_nr_pages; +} + +void perf_event_aux_event(struct perf_event *event, unsigned long head, + unsigned long size, u64 flags); extern struct page * -perf_mmap_to_page(struct ring_buffer *rb, unsigned long pgoff); +perf_mmap_to_page(struct perf_buffer *rb, unsigned long pgoff); #ifdef CONFIG_PERF_USE_VMALLOC /* @@ -63,42 +105,50 @@ perf_mmap_to_page(struct ring_buffer *rb, unsigned long pgoff); * Required for architectures that have d-cache aliasing issues. */ -static inline int page_order(struct ring_buffer *rb) +static inline int page_order(struct perf_buffer *rb) { return rb->page_order; } #else -static inline int page_order(struct ring_buffer *rb) +static inline int page_order(struct perf_buffer *rb) { return 0; } #endif -static inline unsigned long perf_data_size(struct ring_buffer *rb) +static inline int data_page_nr(struct perf_buffer *rb) +{ + return rb->nr_pages << page_order(rb); +} + +static inline unsigned long perf_data_size(struct perf_buffer *rb) { return rb->nr_pages << (PAGE_SHIFT + page_order(rb)); } -#define DEFINE_OUTPUT_COPY(func_name, memcpy_func) \ -static inline unsigned int \ -func_name(struct perf_output_handle *handle, \ - const void *buf, unsigned int len) \ +static inline unsigned long perf_aux_size(struct perf_buffer *rb) +{ + return (unsigned long)rb->aux_nr_pages << PAGE_SHIFT; +} + +#define __DEFINE_OUTPUT_COPY_BODY(advance_buf, memcpy_func, ...) \ { \ unsigned long size, written; \ \ do { \ - size = min_t(unsigned long, handle->size, len); \ - \ - written = memcpy_func(handle->addr, buf, size); \ + size = min(handle->size, len); \ + written = memcpy_func(__VA_ARGS__); \ + written = size - written; \ \ len -= written; \ handle->addr += written; \ - buf += written; \ + if (advance_buf) \ + buf += written; \ handle->size -= written; \ if (!handle->size) { \ - struct ring_buffer *rb = handle->rb; \ + struct perf_buffer *rb = handle->rb; \ \ handle->page++; \ handle->page &= rb->nr_pages - 1; \ @@ -110,42 +160,59 @@ func_name(struct perf_output_handle *handle, \ return len; \ } -static inline int memcpy_common(void *dst, const void *src, size_t n) +#define DEFINE_OUTPUT_COPY(func_name, memcpy_func) \ +static inline unsigned long \ +func_name(struct perf_output_handle *handle, \ + const void *buf, unsigned long len) \ +__DEFINE_OUTPUT_COPY_BODY(true, memcpy_func, handle->addr, buf, size) + +static inline unsigned long +__output_custom(struct perf_output_handle *handle, perf_copy_f copy_func, + const void *buf, unsigned long len) +{ + unsigned long orig_len = len; + __DEFINE_OUTPUT_COPY_BODY(false, copy_func, handle->addr, buf, + orig_len - len, size) +} + +static inline unsigned long +memcpy_common(void *dst, const void *src, unsigned long n) { memcpy(dst, src, n); - return n; + return 0; } DEFINE_OUTPUT_COPY(__output_copy, memcpy_common) -#define MEMCPY_SKIP(dst, src, n) (n) +static inline unsigned long +memcpy_skip(void *dst, const void *src, unsigned long n) +{ + return 0; +} -DEFINE_OUTPUT_COPY(__output_skip, MEMCPY_SKIP) +DEFINE_OUTPUT_COPY(__output_skip, memcpy_skip) #ifndef arch_perf_out_copy_user -#define arch_perf_out_copy_user __copy_from_user_inatomic +#define arch_perf_out_copy_user arch_perf_out_copy_user + +static inline unsigned long +arch_perf_out_copy_user(void *dst, const void *src, unsigned long n) +{ + unsigned long ret; + + pagefault_disable(); + ret = __copy_from_user_inatomic(dst, src, n); + pagefault_enable(); + + return ret; +} #endif DEFINE_OUTPUT_COPY(__output_copy_user, arch_perf_out_copy_user) -/* Callchain handling */ -extern struct perf_callchain_entry * -perf_callchain(struct perf_event *event, struct pt_regs *regs); -extern int get_callchain_buffers(void); -extern void put_callchain_buffers(void); - -static inline int get_recursion_context(int *recursion) +static inline int get_recursion_context(u8 *recursion) { - int rctx; - - if (in_nmi()) - rctx = 3; - else if (in_irq()) - rctx = 2; - else if (in_softirq()) - rctx = 1; - else - rctx = 0; + unsigned char rctx = interrupt_context_level(); if (recursion[rctx]) return -1; @@ -156,7 +223,7 @@ static inline int get_recursion_context(int *recursion) return rctx; } -static inline void put_recursion_context(int *recursion, int rctx) +static inline void put_recursion_context(u8 *recursion, unsigned char rctx) { barrier(); recursion[rctx]--; |
