summaryrefslogtreecommitdiff
path: root/tools/perf/util/auxtrace.h
blob: 19910b9011f3b533e4b05c606c4cea35e6868db4 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
/* SPDX-License-Identifier: GPL-2.0-only */
/*
 * auxtrace.h: AUX area trace support
 * Copyright (c) 2013-2015, Intel Corporation.
 */

#ifndef __PERF_AUXTRACE_H
#define __PERF_AUXTRACE_H

#include <sys/types.h>
#include <errno.h>
#include <stdbool.h>
#include <stddef.h>
#include <stdio.h> // FILE
#include <linux/list.h>
#include <linux/perf_event.h>
#include <linux/types.h>
#include <internal/cpumap.h>
#include <asm/bitsperlong.h>
#include <asm/barrier.h>

union perf_event;
struct perf_session;
struct evlist;
struct evsel;
struct perf_tool;
struct mmap;
struct perf_sample;
struct option;
struct record_opts;
struct perf_record_auxtrace_error;
struct perf_record_auxtrace_info;
struct events_stats;
struct perf_pmu;

enum auxtrace_error_type {
       PERF_AUXTRACE_ERROR_ITRACE  = 1,
       PERF_AUXTRACE_ERROR_MAX
};

/* Auxtrace records must have the same alignment as perf event records */
#define PERF_AUXTRACE_RECORD_ALIGNMENT 8

enum auxtrace_type {
	PERF_AUXTRACE_UNKNOWN,
	PERF_AUXTRACE_INTEL_PT,
	PERF_AUXTRACE_INTEL_BTS,
	PERF_AUXTRACE_CS_ETM,
	PERF_AUXTRACE_ARM_SPE,
	PERF_AUXTRACE_S390_CPUMSF,
};

enum itrace_period_type {
	PERF_ITRACE_PERIOD_INSTRUCTIONS,
	PERF_ITRACE_PERIOD_TICKS,
	PERF_ITRACE_PERIOD_NANOSECS,
};

#define AUXTRACE_ERR_FLG_OVERFLOW	(1 << ('o' - 'a'))
#define AUXTRACE_ERR_FLG_DATA_LOST	(1 << ('l' - 'a'))

#define AUXTRACE_LOG_FLG_ALL_PERF_EVTS	(1 << ('a' - 'a'))
#define AUXTRACE_LOG_FLG_USE_STDOUT	(1 << ('o' - 'a'))

/**
 * struct itrace_synth_opts - AUX area tracing synthesis options.
 * @set: indicates whether or not options have been set
 * @default_no_sample: Default to no sampling.
 * @inject: indicates the event (not just the sample) must be fully synthesized
 *          because 'perf inject' will write it out
 * @instructions: whether to synthesize 'instructions' events
 * @branches: whether to synthesize 'branches' events
 *            (branch misses only for Arm SPE)
 * @transactions: whether to synthesize events for transactions
 * @ptwrites: whether to synthesize events for ptwrites
 * @pwr_events: whether to synthesize power events
 * @other_events: whether to synthesize other events recorded due to the use of
 *                aux_output
 * @errors: whether to synthesize decoder error events
 * @dont_decode: whether to skip decoding entirely
 * @log: write a decoding log
 * @calls: limit branch samples to calls (can be combined with @returns)
 * @returns: limit branch samples to returns (can be combined with @calls)
 * @callchain: add callchain to 'instructions' events
 * @add_callchain: add callchain to existing event records
 * @thread_stack: feed branches to the thread_stack
 * @last_branch: add branch context to 'instruction' events
 * @add_last_branch: add branch context to existing event records
 * @approx_ipc: approximate IPC
 * @flc: whether to synthesize first level cache events
 * @llc: whether to synthesize last level cache events
 * @tlb: whether to synthesize TLB events
 * @remote_access: whether to synthesize remote access events
 * @mem: whether to synthesize memory events
 * @timeless_decoding: prefer "timeless" decoding i.e. ignore timestamps
 * @vm_time_correlation: perform VM Time Correlation
 * @vm_tm_corr_dry_run: VM Time Correlation dry-run
 * @vm_tm_corr_args:  VM Time Correlation implementation-specific arguments
 * @callchain_sz: maximum callchain size
 * @last_branch_sz: branch context size
 * @period: 'instructions' events period
 * @period_type: 'instructions' events period type
 * @initial_skip: skip N events at the beginning.
 * @cpu_bitmap: CPUs for which to synthesize events, or NULL for all
 * @ptime_range: time intervals to trace or NULL
 * @range_num: number of time intervals to trace
 * @error_plus_flags: flags to affect what errors are reported
 * @error_minus_flags: flags to affect what errors are reported
 * @log_plus_flags: flags to affect what is logged
 * @log_minus_flags: flags to affect what is logged
 * @quick: quicker (less detailed) decoding
 */
struct itrace_synth_opts {
	bool			set;
	bool			default_no_sample;
	bool			inject;
	bool			instructions;
	bool			branches;
	bool			transactions;
	bool			ptwrites;
	bool			pwr_events;
	bool			other_events;
	bool			errors;
	bool			dont_decode;
	bool			log;
	bool			calls;
	bool			returns;
	bool			callchain;
	bool			add_callchain;
	bool			thread_stack;
	bool			last_branch;
	bool			add_last_branch;
	bool			approx_ipc;
	bool			flc;
	bool			llc;
	bool			tlb;
	bool			remote_access;
	bool			mem;
	bool			timeless_decoding;
	bool			vm_time_correlation;
	bool			vm_tm_corr_dry_run;
	char			*vm_tm_corr_args;
	unsigned int		callchain_sz;
	unsigned int		last_branch_sz;
	unsigned long long	period;
	enum itrace_period_type	period_type;
	unsigned long		initial_skip;
	unsigned long		*cpu_bitmap;
	struct perf_time_interval *ptime_range;
	int			range_num;
	unsigned int		error_plus_flags;
	unsigned int		error_minus_flags;
	unsigned int		log_plus_flags;
	unsigned int		log_minus_flags;
	unsigned int		quick;
};

/**
 * struct auxtrace_index_entry - indexes a AUX area tracing event within a
 *                               perf.data file.
 * @file_offset: offset within the perf.data file
 * @sz: size of the event
 */
struct auxtrace_index_entry {
	u64			file_offset;
	u64			sz;
};

#define PERF_AUXTRACE_INDEX_ENTRY_COUNT 256

/**
 * struct auxtrace_index - index of AUX area tracing events within a perf.data
 *                         file.
 * @list: linking a number of arrays of entries
 * @nr: number of entries
 * @entries: array of entries
 */
struct auxtrace_index {
	struct list_head	list;
	size_t			nr;
	struct auxtrace_index_entry entries[PERF_AUXTRACE_INDEX_ENTRY_COUNT];
};

/**
 * struct auxtrace - session callbacks to allow AUX area data decoding.
 * @process_event: lets the decoder see all session events
 * @process_auxtrace_event: process a PERF_RECORD_AUXTRACE event
 * @queue_data: queue an AUX sample or PERF_RECORD_AUXTRACE event for later
 *              processing
 * @dump_auxtrace_sample: dump AUX area sample data
 * @flush_events: process any remaining data
 * @free_events: free resources associated with event processing
 * @free: free resources associated with the session
 */
struct auxtrace {
	int (*process_event)(struct perf_session *session,
			     union perf_event *event,
			     struct perf_sample *sample,
			     struct perf_tool *tool);
	int (*process_auxtrace_event)(struct perf_session *session,
				      union perf_event *event,
				      struct perf_tool *tool);
	int (*queue_data)(struct perf_session *session,
			  struct perf_sample *sample, union perf_event *event,
			  u64 data_offset);
	void (*dump_auxtrace_sample)(struct perf_session *session,
				     struct perf_sample *sample);
	int (*flush_events)(struct perf_session *session,
			    struct perf_tool *tool);
	void (*free_events)(struct perf_session *session);
	void (*free)(struct perf_session *session);
	bool (*evsel_is_auxtrace)(struct perf_session *session,
				  struct evsel *evsel);
};

/**
 * struct auxtrace_buffer - a buffer containing AUX area tracing data.
 * @list: buffers are queued in a list held by struct auxtrace_queue
 * @size: size of the buffer in bytes
 * @pid: in per-thread mode, the pid this buffer is associated with
 * @tid: in per-thread mode, the tid this buffer is associated with
 * @cpu: in per-cpu mode, the cpu this buffer is associated with
 * @data: actual buffer data (can be null if the data has not been loaded)
 * @data_offset: file offset at which the buffer can be read
 * @mmap_addr: mmap address at which the buffer can be read
 * @mmap_size: size of the mmap at @mmap_addr
 * @data_needs_freeing: @data was malloc'd so free it when it is no longer
 *                      needed
 * @consecutive: the original data was split up and this buffer is consecutive
 *               to the previous buffer
 * @offset: offset as determined by aux_head / aux_tail members of struct
 *          perf_event_mmap_page
 * @reference: an implementation-specific reference determined when the data is
 *             recorded
 * @buffer_nr: used to number each buffer
 * @use_size: implementation actually only uses this number of bytes
 * @use_data: implementation actually only uses data starting at this address
 */
struct auxtrace_buffer {
	struct list_head	list;
	size_t			size;
	pid_t			pid;
	pid_t			tid;
	struct perf_cpu		cpu;
	void			*data;
	off_t			data_offset;
	void			*mmap_addr;
	size_t			mmap_size;
	bool			data_needs_freeing;
	bool			consecutive;
	u64			offset;
	u64			reference;
	u64			buffer_nr;
	size_t			use_size;
	void			*use_data;
};

/**
 * struct auxtrace_queue - a queue of AUX area tracing data buffers.
 * @head: head of buffer list
 * @tid: in per-thread mode, the tid this queue is associated with
 * @cpu: in per-cpu mode, the cpu this queue is associated with
 * @set: %true once this queue has been dedicated to a specific thread or cpu
 * @priv: implementation-specific data
 */
struct auxtrace_queue {
	struct list_head	head;
	pid_t			tid;
	int			cpu;
	bool			set;
	void			*priv;
};

/**
 * struct auxtrace_queues - an array of AUX area tracing queues.
 * @queue_array: array of queues
 * @nr_queues: number of queues
 * @new_data: set whenever new data is queued
 * @populated: queues have been fully populated using the auxtrace_index
 * @next_buffer_nr: used to number each buffer
 */
struct auxtrace_queues {
	struct auxtrace_queue	*queue_array;
	unsigned int		nr_queues;
	bool			new_data;
	bool			populated;
	u64			next_buffer_nr;
};

/**
 * struct auxtrace_heap_item - element of struct auxtrace_heap.
 * @queue_nr: queue number
 * @ordinal: value used for sorting (lowest ordinal is top of the heap) expected
 *           to be a timestamp
 */
struct auxtrace_heap_item {
	unsigned int		queue_nr;
	u64			ordinal;
};

/**
 * struct auxtrace_heap - a heap suitable for sorting AUX area tracing queues.
 * @heap_array: the heap
 * @heap_cnt: the number of elements in the heap
 * @heap_sz: maximum number of elements (grows as needed)
 */
struct auxtrace_heap {
	struct auxtrace_heap_item	*heap_array;
	unsigned int		heap_cnt;
	unsigned int		heap_sz;
};

/**
 * struct auxtrace_mmap - records an mmap of the auxtrace buffer.
 * @base: address of mapped area
 * @userpg: pointer to buffer's perf_event_mmap_page
 * @mask: %0 if @len is not a power of two, otherwise (@len - %1)
 * @len: size of mapped area
 * @prev: previous aux_head
 * @idx: index of this mmap
 * @tid: tid for a per-thread mmap (also set if there is only 1 tid on a per-cpu
 *       mmap) otherwise %0
 * @cpu: cpu number for a per-cpu mmap otherwise %-1
 */
struct auxtrace_mmap {
	void		*base;
	void		*userpg;
	size_t		mask;
	size_t		len;
	u64		prev;
	int		idx;
	pid_t		tid;
	int		cpu;
};

/**
 * struct auxtrace_mmap_params - parameters to set up struct auxtrace_mmap.
 * @mask: %0 if @len is not a power of two, otherwise (@len - %1)
 * @offset: file offset of mapped area
 * @len: size of mapped area
 * @prot: mmap memory protection
 * @idx: index of this mmap
 * @tid: tid for a per-thread mmap (also set if there is only 1 tid on a per-cpu
 *       mmap) otherwise %0
 * @cpu: cpu number for a per-cpu mmap otherwise %-1
 */
struct auxtrace_mmap_params {
	size_t		mask;
	off_t		offset;
	size_t		len;
	int		prot;
	int		idx;
	pid_t		tid;
	struct perf_cpu	cpu;
};

/**
 * struct auxtrace_record - callbacks for recording AUX area data.
 * @recording_options: validate and process recording options
 * @info_priv_size: return the size of the private data in auxtrace_info_event
 * @info_fill: fill-in the private data in auxtrace_info_event
 * @free: free this auxtrace record structure
 * @snapshot_start: starting a snapshot
 * @snapshot_finish: finishing a snapshot
 * @find_snapshot: find data to snapshot within auxtrace mmap
 * @parse_snapshot_options: parse snapshot options
 * @reference: provide a 64-bit reference number for auxtrace_event
 * @read_finish: called after reading from an auxtrace mmap
 * @alignment: alignment (if any) for AUX area data
 * @default_aux_sample_size: default sample size for --aux sample option
 * @pmu: associated pmu
 * @evlist: selected events list
 */
struct auxtrace_record {
	int (*recording_options)(struct auxtrace_record *itr,
				 struct evlist *evlist,
				 struct record_opts *opts);
	size_t (*info_priv_size)(struct auxtrace_record *itr,
				 struct evlist *evlist);
	int (*info_fill)(struct auxtrace_record *itr,
			 struct perf_session *session,
			 struct perf_record_auxtrace_info *auxtrace_info,
			 size_t priv_size);
	void (*free)(struct auxtrace_record *itr);
	int (*snapshot_start)(struct auxtrace_record *itr);
	int (*snapshot_finish)(struct auxtrace_record *itr);
	int (*find_snapshot)(struct auxtrace_record *itr, int idx,
			     struct auxtrace_mmap *mm, unsigned char *data,
			     u64 *head, u64 *old);
	int (*parse_snapshot_options)(struct auxtrace_record *itr,
				      struct record_opts *opts,
				      const char *str);
	u64 (*reference)(struct auxtrace_record *itr);
	int (*read_finish)(struct auxtrace_record *itr, int idx);
	unsigned int alignment;
	unsigned int default_aux_sample_size;
	struct perf_pmu *pmu;
	struct evlist *evlist;
};

/**
 * struct addr_filter - address filter.
 * @list: list node
 * @range: true if it is a range filter
 * @start: true if action is 'filter' or 'start'
 * @action: 'filter', 'start' or 'stop' ('tracestop' is accepted but converted
 *          to 'stop')
 * @sym_from: symbol name for the filter address
 * @sym_to: symbol name that determines the filter size
 * @sym_from_idx: selects n'th from symbols with the same name (0 means global
 *                and less than 0 means symbol must be unique)
 * @sym_to_idx: same as @sym_from_idx but for @sym_to
 * @addr: filter address
 * @size: filter region size (for range filters)
 * @filename: DSO file name or NULL for the kernel
 * @str: allocated string that contains the other string members
 */
struct addr_filter {
	struct list_head	list;
	bool			range;
	bool			start;
	const char		*action;
	const char		*sym_from;
	const char		*sym_to;
	int			sym_from_idx;
	int			sym_to_idx;
	u64			addr;
	u64			size;
	const char		*filename;
	char			*str;
};

/**
 * struct addr_filters - list of address filters.
 * @head: list of address filters
 * @cnt: number of address filters
 */
struct addr_filters {
	struct list_head	head;
	int			cnt;
};

struct auxtrace_cache;

#ifdef HAVE_AUXTRACE_SUPPORT

u64 compat_auxtrace_mmap__read_head(struct auxtrace_mmap *mm);
int compat_auxtrace_mmap__write_tail(struct auxtrace_mmap *mm, u64 tail);

static inline u64 auxtrace_mmap__read_head(struct auxtrace_mmap *mm,
					   int kernel_is_64_bit __maybe_unused)
{
	struct perf_event_mmap_page *pc = mm->userpg;
	u64 head;

#if BITS_PER_LONG == 32
	if (kernel_is_64_bit)
		return compat_auxtrace_mmap__read_head(mm);
#endif
	head = READ_ONCE(pc->aux_head);

	/* Ensure all reads are done after we read the head */
	smp_rmb();
	return head;
}

static inline int auxtrace_mmap__write_tail(struct auxtrace_mmap *mm, u64 tail,
					    int kernel_is_64_bit __maybe_unused)
{
	struct perf_event_mmap_page *pc = mm->userpg;

#if BITS_PER_LONG == 32
	if (kernel_is_64_bit)
		return compat_auxtrace_mmap__write_tail(mm, tail);
#endif
	/* Ensure all reads are done before we write the tail out */
	smp_mb();
	WRITE_ONCE(pc->aux_tail, tail);
	return 0;
}

int auxtrace_mmap__mmap(struct auxtrace_mmap *mm,
			struct auxtrace_mmap_params *mp,
			void *userpg, int fd);
void auxtrace_mmap__munmap(struct auxtrace_mmap *mm);
void auxtrace_mmap_params__init(struct auxtrace_mmap_params *mp,
				off_t auxtrace_offset,
				unsigned int auxtrace_pages,
				bool auxtrace_overwrite);
void auxtrace_mmap_params__set_idx(struct auxtrace_mmap_params *mp,
				   struct evlist *evlist, int idx,
				   bool per_cpu);

typedef int (*process_auxtrace_t)(struct perf_tool *tool,
				  struct mmap *map,
				  union perf_event *event, void *data1,
				  size_t len1, void *data2, size_t len2);

int auxtrace_mmap__read(struct mmap *map, struct auxtrace_record *itr,
			struct perf_tool *tool, process_auxtrace_t fn);

int auxtrace_mmap__read_snapshot(struct mmap *map,
				 struct auxtrace_record *itr,
				 struct perf_tool *tool, process_auxtrace_t fn,
				 size_t snapshot_size);

int auxtrace_queues__init(struct auxtrace_queues *queues);
int auxtrace_queues__add_event(struct auxtrace_queues *queues,
			       struct perf_session *session,
			       union perf_event *event, off_t data_offset,
			       struct auxtrace_buffer **buffer_ptr);
struct auxtrace_queue *
auxtrace_queues__sample_queue(struct auxtrace_queues *queues,
			      struct perf_sample *sample,
			      struct perf_session *session);
int auxtrace_queues__add_sample(struct auxtrace_queues *queues,
				struct perf_session *session,
				struct perf_sample *sample, u64 data_offset,
				u64 reference);
void auxtrace_queues__free(struct auxtrace_queues *queues);
int auxtrace_queues__process_index(struct auxtrace_queues *queues,
				   struct perf_session *session);
int auxtrace_queue_data(struct perf_session *session, bool samples,
			bool events);
struct auxtrace_buffer *auxtrace_buffer__next(struct auxtrace_queue *queue,
					      struct auxtrace_buffer *buffer);
void *auxtrace_buffer__get_data_rw(struct auxtrace_buffer *buffer, int fd, bool rw);
static inline void *auxtrace_buffer__get_data(struct auxtrace_buffer *buffer, int fd)
{
	return auxtrace_buffer__get_data_rw(buffer, fd, false);
}
void auxtrace_buffer__put_data(struct auxtrace_buffer *buffer);
void auxtrace_buffer__drop_data(struct auxtrace_buffer *buffer);
void auxtrace_buffer__free(struct auxtrace_buffer *buffer);

int auxtrace_heap__add(struct auxtrace_heap *heap, unsigned int queue_nr,
		       u64 ordinal);
void auxtrace_heap__pop(struct auxtrace_heap *heap);
void auxtrace_heap__free(struct auxtrace_heap *heap);

struct auxtrace_cache_entry {
	struct hlist_node hash;
	u32 key;
};

struct auxtrace_cache *auxtrace_cache__new(unsigned int bits, size_t entry_size,
					   unsigned int limit_percent);
void auxtrace_cache__free(struct auxtrace_cache *auxtrace_cache);
void *auxtrace_cache__alloc_entry(struct auxtrace_cache *c);
void auxtrace_cache__free_entry(struct auxtrace_cache *c, void *entry);
int auxtrace_cache__add(struct auxtrace_cache *c, u32 key,
			struct auxtrace_cache_entry *entry);
void auxtrace_cache__remove(struct auxtrace_cache *c, u32 key);
void *auxtrace_cache__lookup(struct auxtrace_cache *c, u32 key);

struct auxtrace_record *auxtrace_record__init(struct evlist *evlist,
					      int *err);

int auxtrace_parse_snapshot_options(struct auxtrace_record *itr,
				    struct record_opts *opts,
				    const char *str);
int auxtrace_parse_sample_options(struct auxtrace_record *itr,
				  struct evlist *evlist,
				  struct record_opts *opts, const char *str);
void auxtrace_regroup_aux_output(struct evlist *evlist);
int auxtrace_record__options(struct auxtrace_record *itr,
			     struct evlist *evlist,
			     struct record_opts *opts);
size_t auxtrace_record__info_priv_size(struct auxtrace_record *itr,
				       struct evlist *evlist);
int auxtrace_record__info_fill(struct auxtrace_record *itr,
			       struct perf_session *session,
			       struct perf_record_auxtrace_info *auxtrace_info,
			       size_t priv_size);
void auxtrace_record__free(struct auxtrace_record *itr);
int auxtrace_record__snapshot_start(struct auxtrace_record *itr);
int auxtrace_record__snapshot_finish(struct auxtrace_record *itr, bool on_exit);
int auxtrace_record__find_snapshot(struct auxtrace_record *itr, int idx,
				   struct auxtrace_mmap *mm,
				   unsigned char *data, u64 *head, u64 *old);
u64 auxtrace_record__reference(struct auxtrace_record *itr);
int auxtrace_record__read_finish(struct auxtrace_record *itr, int idx);

int auxtrace_index__auxtrace_event(struct list_head *head, union perf_event *event,
				   off_t file_offset);
int auxtrace_index__write(int fd, struct list_head *head);
int auxtrace_index__process(int fd, u64 size, struct perf_session *session,
			    bool needs_swap);
void auxtrace_index__free(struct list_head *head);

void auxtrace_synth_error(struct perf_record_auxtrace_error *auxtrace_error, int type,
			  int code, int cpu, pid_t pid, pid_t tid, u64 ip,
			  const char *msg, u64 timestamp);

int perf_event__process_auxtrace_info(struct perf_session *session,
				      union perf_event *event);
s64 perf_event__process_auxtrace(struct perf_session *session,
				 union perf_event *event);
int perf_event__process_auxtrace_error(struct perf_session *session,
				       union perf_event *event);
int itrace_do_parse_synth_opts(struct itrace_synth_opts *synth_opts,
			       const char *str, int unset);
int itrace_parse_synth_opts(const struct option *opt, const char *str,
			    int unset);
void itrace_synth_opts__set_default(struct itrace_synth_opts *synth_opts,
				    bool no_sample);

size_t perf_event__fprintf_auxtrace_error(union perf_event *event, FILE *fp);
void perf_session__auxtrace_error_inc(struct perf_session *session,
				      union perf_event *event);
void events_stats__auxtrace_error_warn(const struct events_stats *stats);

void addr_filters__init(struct addr_filters *filts);
void addr_filters__exit(struct addr_filters *filts);
int addr_filters__parse_bare_filter(struct addr_filters *filts,
				    const char *filter);
int auxtrace_parse_filters(struct evlist *evlist);

int auxtrace__process_event(struct perf_session *session, union perf_event *event,
			    struct perf_sample *sample, struct perf_tool *tool);
void auxtrace__dump_auxtrace_sample(struct perf_session *session,
				    struct perf_sample *sample);
int auxtrace__flush_events(struct perf_session *session, struct perf_tool *tool);
void auxtrace__free_events(struct perf_session *session);
void auxtrace__free(struct perf_session *session);
bool auxtrace__evsel_is_auxtrace(struct perf_session *session,
				 struct evsel *evsel);

#define ITRACE_HELP \
"				i[period]:    		synthesize instructions events\n" \
"				b:	    		synthesize branches events (branch misses for Arm SPE)\n" \
"				c:	    		synthesize branches events (calls only)\n"	\
"				r:	    		synthesize branches events (returns only)\n" \
"				x:	    		synthesize transactions events\n"		\
"				w:	    		synthesize ptwrite events\n"		\
"				p:	    		synthesize power events\n"			\
"				o:			synthesize other events recorded due to the use\n" \
"							of aux-output (refer to perf record)\n"	\
"				e[flags]:		synthesize error events\n" \
"							each flag must be preceded by + or -\n" \
"							error flags are: o (overflow)\n" \
"									 l (data lost)\n" \
"				d[flags]:		create a debug log\n" \
"							each flag must be preceded by + or -\n" \
"							log flags are: a (all perf events)\n" \
"							               o (output to stdout)\n" \
"				f:	    		synthesize first level cache events\n" \
"				m:	    		synthesize last level cache events\n" \
"				t:	    		synthesize TLB events\n" \
"				a:	    		synthesize remote access events\n" \
"				g[len]:     		synthesize a call chain (use with i or x)\n" \
"				G[len]:			synthesize a call chain on existing event records\n" \
"				l[len]:     		synthesize last branch entries (use with i or x)\n" \
"				L[len]:			synthesize last branch entries on existing event records\n" \
"				sNUMBER:    		skip initial number of events\n"		\
"				q:			quicker (less detailed) decoding\n" \
"				A:			approximate IPC\n" \
"				Z:			prefer to ignore timestamps (so-called \"timeless\" decoding)\n" \
"				PERIOD[ns|us|ms|i|t]:   specify period to sample stream\n" \
"				concatenate multiple options. Default is ibxwpe or cewp\n"

static inline
void itrace_synth_opts__set_time_range(struct itrace_synth_opts *opts,
				       struct perf_time_interval *ptime_range,
				       int range_num)
{
	opts->ptime_range = ptime_range;
	opts->range_num = range_num;
}

static inline
void itrace_synth_opts__clear_time_range(struct itrace_synth_opts *opts)
{
	opts->ptime_range = NULL;
	opts->range_num = 0;
}

#else
#include "debug.h"

static inline struct auxtrace_record *
auxtrace_record__init(struct evlist *evlist __maybe_unused,
		      int *err)
{
	*err = 0;
	return NULL;
}

static inline
void auxtrace_record__free(struct auxtrace_record *itr __maybe_unused)
{
}

static inline
int auxtrace_record__options(struct auxtrace_record *itr __maybe_unused,
			     struct evlist *evlist __maybe_unused,
			     struct record_opts *opts __maybe_unused)
{
	return 0;
}

static inline
int perf_event__process_auxtrace_info(struct perf_session *session __maybe_unused,
				      union perf_event *event __maybe_unused)
{
	return 0;
}

static inline
s64 perf_event__process_auxtrace(struct perf_session *session __maybe_unused,
				 union perf_event *event __maybe_unused)
{
	return 0;
}

static inline
int perf_event__process_auxtrace_error(struct perf_session *session __maybe_unused,
				       union perf_event *event __maybe_unused)
{
	return 0;
}

static inline
void perf_session__auxtrace_error_inc(struct perf_session *session
				      __maybe_unused,
				      union perf_event *event
				      __maybe_unused)
{
}

static inline
void events_stats__auxtrace_error_warn(const struct events_stats *stats
				       __maybe_unused)
{
}

static inline
int itrace_do_parse_synth_opts(struct itrace_synth_opts *synth_opts __maybe_unused,
			       const char *str __maybe_unused, int unset __maybe_unused)
{
	pr_err("AUX area tracing not supported\n");
	return -EINVAL;
}

static inline
int itrace_parse_synth_opts(const struct option *opt __maybe_unused,
			    const char *str __maybe_unused,
			    int unset __maybe_unused)
{
	pr_err("AUX area tracing not supported\n");
	return -EINVAL;
}

static inline
int auxtrace_parse_snapshot_options(struct auxtrace_record *itr __maybe_unused,
				    struct record_opts *opts __maybe_unused,
				    const char *str)
{
	if (!str)
		return 0;
	pr_err("AUX area tracing not supported\n");
	return -EINVAL;
}

static inline
int auxtrace_parse_sample_options(struct auxtrace_record *itr __maybe_unused,
				  struct evlist *evlist __maybe_unused,
				  struct record_opts *opts __maybe_unused,
				  const char *str)
{
	if (!str)
		return 0;
	pr_err("AUX area tracing not supported\n");
	return -EINVAL;
}

static inline
void auxtrace_regroup_aux_output(struct evlist *evlist __maybe_unused)
{
}

static inline
int auxtrace__process_event(struct perf_session *session __maybe_unused,
			    union perf_event *event __maybe_unused,
			    struct perf_sample *sample __maybe_unused,
			    struct perf_tool *tool __maybe_unused)
{
	return 0;
}

static inline
void auxtrace__dump_auxtrace_sample(struct perf_session *session __maybe_unused,
				    struct perf_sample *sample __maybe_unused)
{
}

static inline
int auxtrace__flush_events(struct perf_session *session __maybe_unused,
			   struct perf_tool *tool __maybe_unused)
{
	return 0;
}

static inline
void auxtrace__free_events(struct perf_session *session __maybe_unused)
{
}

static inline
void auxtrace_cache__free(struct auxtrace_cache *auxtrace_cache __maybe_unused)
{
}

static inline
void auxtrace__free(struct perf_session *session __maybe_unused)
{
}

static inline
int auxtrace_index__write(int fd __maybe_unused,
			  struct list_head *head __maybe_unused)
{
	return -EINVAL;
}

static inline
int auxtrace_index__process(int fd __maybe_unused,
			    u64 size __maybe_unused,
			    struct perf_session *session __maybe_unused,
			    bool needs_swap __maybe_unused)
{
	return -EINVAL;
}

static inline
void auxtrace_index__free(struct list_head *head __maybe_unused)
{
}

static inline
bool auxtrace__evsel_is_auxtrace(struct perf_session *session __maybe_unused,
				 struct evsel *evsel __maybe_unused)
{
	return false;
}

static inline
int auxtrace_parse_filters(struct evlist *evlist __maybe_unused)
{
	return 0;
}

int auxtrace_mmap__mmap(struct auxtrace_mmap *mm,
			struct auxtrace_mmap_params *mp,
			void *userpg, int fd);
void auxtrace_mmap__munmap(struct auxtrace_mmap *mm);
void auxtrace_mmap_params__init(struct auxtrace_mmap_params *mp,
				off_t auxtrace_offset,
				unsigned int auxtrace_pages,
				bool auxtrace_overwrite);
void auxtrace_mmap_params__set_idx(struct auxtrace_mmap_params *mp,
				   struct evlist *evlist, int idx,
				   bool per_cpu);

#define ITRACE_HELP ""

static inline
void itrace_synth_opts__set_time_range(struct itrace_synth_opts *opts
				       __maybe_unused,
				       struct perf_time_interval *ptime_range
				       __maybe_unused,
				       int range_num __maybe_unused)
{
}

static inline
void itrace_synth_opts__clear_time_range(struct itrace_synth_opts *opts
					 __maybe_unused)
{
}

#endif

#endif