summaryrefslogtreecommitdiff
path: root/kernel/trace
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/trace')
-rw-r--r--kernel/trace/Kconfig346
-rw-r--r--kernel/trace/Makefile21
-rw-r--r--kernel/trace/blktrace.c527
-rw-r--r--kernel/trace/bpf_trace.c2288
-rw-r--r--kernel/trace/bpf_trace.h34
-rw-r--r--kernel/trace/error_report-traces.c11
-rw-r--r--kernel/trace/fgraph.c83
-rw-r--r--kernel/trace/fprobe.c386
-rw-r--r--kernel/trace/ftrace.c2072
-rw-r--r--kernel/trace/ftrace_internal.h5
-rw-r--r--kernel/trace/kprobe_event_gen_test.c119
-rw-r--r--kernel/trace/pid_list.c495
-rw-r--r--kernel/trace/pid_list.h88
-rw-r--r--kernel/trace/preemptirq_delay_test.c14
-rw-r--r--kernel/trace/rethook.c335
-rw-r--r--kernel/trace/ring_buffer.c2164
-rw-r--r--kernel/trace/ring_buffer_benchmark.c60
-rw-r--r--kernel/trace/rv/Kconfig78
-rw-r--r--kernel/trace/rv/Makefile8
-rw-r--r--kernel/trace/rv/monitors/wip/wip.c88
-rw-r--r--kernel/trace/rv/monitors/wip/wip.h46
-rw-r--r--kernel/trace/rv/monitors/wwnr/wwnr.c87
-rw-r--r--kernel/trace/rv/monitors/wwnr/wwnr.h46
-rw-r--r--kernel/trace/rv/reactor_panic.c42
-rw-r--r--kernel/trace/rv/reactor_printk.c41
-rw-r--r--kernel/trace/rv/rv.c797
-rw-r--r--kernel/trace/rv/rv.h68
-rw-r--r--kernel/trace/rv/rv_reactors.c510
-rw-r--r--kernel/trace/synth_event_gen_test.c51
-rw-r--r--kernel/trace/trace.c3246
-rw-r--r--kernel/trace/trace.h632
-rw-r--r--kernel/trace/trace_benchmark.c8
-rw-r--r--kernel/trace/trace_benchmark.h8
-rw-r--r--kernel/trace/trace_boot.c371
-rw-r--r--kernel/trace/trace_branch.c6
-rw-r--r--kernel/trace/trace_btf.c122
-rw-r--r--kernel/trace/trace_btf.h11
-rw-r--r--kernel/trace/trace_clock.c44
-rw-r--r--kernel/trace/trace_dynevent.c100
-rw-r--r--kernel/trace/trace_dynevent.h14
-rw-r--r--kernel/trace/trace_entries.h95
-rw-r--r--kernel/trace/trace_eprobe.c984
-rw-r--r--kernel/trace/trace_event_perf.c54
-rw-r--r--kernel/trace/trace_events.c1170
-rw-r--r--kernel/trace/trace_events_filter.c814
-rw-r--r--kernel/trace/trace_events_hist.c1752
-rw-r--r--kernel/trace/trace_events_inject.c24
-rw-r--r--kernel/trace/trace_events_synth.c841
-rw-r--r--kernel/trace/trace_events_trigger.c821
-rw-r--r--kernel/trace/trace_events_user.c2784
-rw-r--r--kernel/trace/trace_export.c16
-rw-r--r--kernel/trace/trace_fprobe.c1231
-rw-r--r--kernel/trace/trace_functions.c294
-rw-r--r--kernel/trace/trace_functions_graph.c139
-rw-r--r--kernel/trace/trace_hwlat.c568
-rw-r--r--kernel/trace/trace_irqsoff.c95
-rw-r--r--kernel/trace/trace_kdb.c12
-rw-r--r--kernel/trace/trace_kprobe.c505
-rw-r--r--kernel/trace/trace_kprobe_selftest.c3
-rw-r--r--kernel/trace/trace_mmiotrace.c16
-rw-r--r--kernel/trace/trace_osnoise.c3133
-rw-r--r--kernel/trace/trace_output.c534
-rw-r--r--kernel/trace/trace_output.h3
-rw-r--r--kernel/trace/trace_preemptirq.c63
-rw-r--r--kernel/trace/trace_printk.c23
-rw-r--r--kernel/trace/trace_probe.c1146
-rw-r--r--kernel/trace/trace_probe.h127
-rw-r--r--kernel/trace/trace_probe_kernel.h119
-rw-r--r--kernel/trace/trace_probe_tmpl.h111
-rw-r--r--kernel/trace/trace_recursion_record.c233
-rw-r--r--kernel/trace/trace_sched_switch.c5
-rw-r--r--kernel/trace/trace_sched_wakeup.c102
-rw-r--r--kernel/trace/trace_selftest.c147
-rw-r--r--kernel/trace/trace_seq.c45
-rw-r--r--kernel/trace/trace_stack.c15
-rw-r--r--kernel/trace/trace_stat.c14
-rw-r--r--kernel/trace/trace_synth.h9
-rw-r--r--kernel/trace/trace_syscalls.c59
-rw-r--r--kernel/trace/trace_uprobe.c171
-rw-r--r--kernel/trace/tracing_map.c66
-rw-r--r--kernel/trace/tracing_map.h6
81 files changed, 27970 insertions, 5851 deletions
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig
index a4020c0b4508..61c541c36596 100644
--- a/kernel/trace/Kconfig
+++ b/kernel/trace/Kconfig
@@ -10,6 +10,17 @@ config USER_STACKTRACE_SUPPORT
config NOP_TRACER
bool
+config HAVE_RETHOOK
+ bool
+
+config RETHOOK
+ bool
+ depends on HAVE_RETHOOK
+ help
+ Enable generic return hooking feature. This is an internal
+ API, which will be used by other function-entry hooking
+ features like fprobe and kprobes.
+
config HAVE_FUNCTION_TRACER
bool
help
@@ -20,6 +31,9 @@ config HAVE_FUNCTION_GRAPH_TRACER
help
See Documentation/trace/ftrace-design.rst
+config HAVE_FUNCTION_GRAPH_RETVAL
+ bool
+
config HAVE_DYNAMIC_FTRACE
bool
help
@@ -31,6 +45,24 @@ config HAVE_DYNAMIC_FTRACE_WITH_REGS
config HAVE_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
bool
+config HAVE_DYNAMIC_FTRACE_WITH_CALL_OPS
+ bool
+
+config HAVE_DYNAMIC_FTRACE_WITH_ARGS
+ bool
+ help
+ If this is set, then arguments and stack can be found from
+ the ftrace_regs passed into the function callback regs parameter
+ by default, even without setting the REGS flag in the ftrace_ops.
+ This allows for use of ftrace_regs_get_argument() and
+ ftrace_regs_get_stack_pointer().
+
+config HAVE_DYNAMIC_FTRACE_NO_PATCHABLE
+ bool
+ help
+ If the architecture generates __patchable_function_entries sections
+ but does not want them included in the ftrace locations.
+
config HAVE_FTRACE_MCOUNT_RECORD
bool
help
@@ -51,11 +83,36 @@ config HAVE_NOP_MCOUNT
help
Arch supports the gcc options -pg with -mrecord-mcount and -nop-mcount
+config HAVE_OBJTOOL_MCOUNT
+ bool
+ help
+ Arch supports objtool --mcount
+
+config HAVE_OBJTOOL_NOP_MCOUNT
+ bool
+ help
+ Arch supports the objtool options --mcount with --mnop.
+ An architecture can select this if it wants to enable nop'ing
+ of ftrace locations.
+
config HAVE_C_RECORDMCOUNT
bool
help
C version of recordmcount available?
+config HAVE_BUILDTIME_MCOUNT_SORT
+ bool
+ help
+ An architecture selects this if it sorts the mcount_loc section
+ at build time.
+
+config BUILDTIME_MCOUNT_SORT
+ bool
+ default y
+ depends on HAVE_BUILDTIME_MCOUNT_SORT && DYNAMIC_FTRACE
+ help
+ Sort the mcount_loc section at build time.
+
config TRACER_MAX_TRACE
bool
@@ -106,6 +163,7 @@ config TRACING
select BINARY_PRINTF
select EVENT_TRACING
select TRACE_CLOCK
+ select TASKS_RCU if PREEMPTION
config GENERIC_TRACER
bool
@@ -121,10 +179,9 @@ config TRACING_SUPPORT
depends on STACKTRACE_SUPPORT
default y
-if TRACING_SUPPORT
-
menuconfig FTRACE
bool "Tracers"
+ depends on TRACING_SUPPORT
default y if DEBUG_KERNEL
help
Enable the kernel tracing infrastructure.
@@ -156,7 +213,8 @@ config FUNCTION_TRACER
sequence is then dynamically patched into a tracer call when
tracing is enabled by the administrator. If it's runtime disabled
(the bootup default), then the overhead of the instructions is very
- small and not measurable even in micro-benchmarks.
+ small and not measurable even in micro-benchmarks (at least on
+ x86, but may have impact on other architectures).
config FUNCTION_GRAPH_TRACER
bool "Kernel Function Graph Tracer"
@@ -172,6 +230,18 @@ config FUNCTION_GRAPH_TRACER
the return value. This is done by setting the current return
address on the current task structure into a stack of calls.
+config FUNCTION_GRAPH_RETVAL
+ bool "Kernel Function Graph Return Value"
+ depends on HAVE_FUNCTION_GRAPH_RETVAL
+ depends on FUNCTION_GRAPH_TRACER
+ default n
+ help
+ Support recording and printing the function return value when
+ using function graph tracer. It can be helpful to locate functions
+ that return errors. This feature is off by default, and you can
+ enable it via the trace option funcgraph-retval.
+ See Documentation/trace/ftrace.rst
+
config DYNAMIC_FTRACE
bool "enable/disable function tracing dynamically"
depends on FUNCTION_TRACER
@@ -187,7 +257,7 @@ config DYNAMIC_FTRACE
enabled, and the functions not enabled will not affect
performance of the system.
- See the files in /sys/kernel/debug/tracing:
+ See the files in /sys/kernel/tracing:
available_filter_functions
set_ftrace_filter
set_ftrace_notrace
@@ -202,9 +272,33 @@ config DYNAMIC_FTRACE_WITH_REGS
config DYNAMIC_FTRACE_WITH_DIRECT_CALLS
def_bool y
- depends on DYNAMIC_FTRACE
+ depends on DYNAMIC_FTRACE_WITH_REGS || DYNAMIC_FTRACE_WITH_ARGS
depends on HAVE_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
+config DYNAMIC_FTRACE_WITH_CALL_OPS
+ def_bool y
+ depends on HAVE_DYNAMIC_FTRACE_WITH_CALL_OPS
+
+config DYNAMIC_FTRACE_WITH_ARGS
+ def_bool y
+ depends on DYNAMIC_FTRACE
+ depends on HAVE_DYNAMIC_FTRACE_WITH_ARGS
+
+config FPROBE
+ bool "Kernel Function Probe (fprobe)"
+ depends on FUNCTION_TRACER
+ depends on DYNAMIC_FTRACE_WITH_REGS
+ depends on HAVE_RETHOOK
+ select RETHOOK
+ default n
+ help
+ This option enables kernel function probe (fprobe) based on ftrace.
+ The fprobe is similar to kprobes, but probes only for kernel function
+ entries and exits. This also can probe multiple functions by one
+ fprobe.
+
+ If unsure, say N.
+
config FUNCTION_PROFILER
bool "Kernel function profiler"
depends on FUNCTION_TRACER
@@ -227,7 +321,7 @@ config STACK_TRACER
select KALLSYMS
help
This special tracer records the maximum stack footprint of the
- kernel and displays it in /sys/kernel/debug/tracing/stack_trace.
+ kernel and displays it in /sys/kernel/tracing/stack_trace.
This tracer works by hooking into every function call that the
kernel executes, and keeping a maximum stack depth value and
@@ -253,7 +347,6 @@ config IRQSOFF_TRACER
bool "Interrupts-off Latency Tracer"
default n
depends on TRACE_IRQFLAGS_SUPPORT
- depends on !ARCH_USES_GETTIMEOFFSET
select TRACE_IRQFLAGS
select GENERIC_TRACER
select TRACER_MAX_TRACE
@@ -268,7 +361,7 @@ config IRQSOFF_TRACER
disabled by default and can be runtime (re-)started
via:
- echo 0 > /sys/kernel/debug/tracing/tracing_max_latency
+ echo 0 > /sys/kernel/tracing/tracing_max_latency
(Note that kernel size and overhead increase with this option
enabled. This option and the preempt-off timing option can be
@@ -277,7 +370,6 @@ config IRQSOFF_TRACER
config PREEMPT_TRACER
bool "Preemption-off Latency Tracer"
default n
- depends on !ARCH_USES_GETTIMEOFFSET
depends on PREEMPTION
select GENERIC_TRACER
select TRACER_MAX_TRACE
@@ -293,7 +385,7 @@ config PREEMPT_TRACER
disabled by default and can be runtime (re-)started
via:
- echo 0 > /sys/kernel/debug/tracing/tracing_max_latency
+ echo 0 > /sys/kernel/tracing/tracing_max_latency
(Note that kernel size and overhead increase with this option
enabled. This option and the irqs-off timing option can be
@@ -312,6 +404,7 @@ config SCHED_TRACER
config HWLAT_TRACER
bool "Tracer to detect hardware latencies (like SMIs)"
select GENERIC_TRACER
+ select TRACER_MAX_TRACE
help
This tracer, when enabled will create one or more kernel threads,
depending on what the cpumask file is set to, which each thread
@@ -344,6 +437,69 @@ config HWLAT_TRACER
file. Every time a latency is greater than tracing_thresh, it will
be recorded into the ring buffer.
+config OSNOISE_TRACER
+ bool "OS Noise tracer"
+ select GENERIC_TRACER
+ select TRACER_MAX_TRACE
+ help
+ In the context of high-performance computing (HPC), the Operating
+ System Noise (osnoise) refers to the interference experienced by an
+ application due to activities inside the operating system. In the
+ context of Linux, NMIs, IRQs, SoftIRQs, and any other system thread
+ can cause noise to the system. Moreover, hardware-related jobs can
+ also cause noise, for example, via SMIs.
+
+ The osnoise tracer leverages the hwlat_detector by running a similar
+ loop with preemption, SoftIRQs and IRQs enabled, thus allowing all
+ the sources of osnoise during its execution. The osnoise tracer takes
+ note of the entry and exit point of any source of interferences,
+ increasing a per-cpu interference counter. It saves an interference
+ counter for each source of interference. The interference counter for
+ NMI, IRQs, SoftIRQs, and threads is increased anytime the tool
+ observes these interferences' entry events. When a noise happens
+ without any interference from the operating system level, the
+ hardware noise counter increases, pointing to a hardware-related
+ noise. In this way, osnoise can account for any source of
+ interference. At the end of the period, the osnoise tracer prints
+ the sum of all noise, the max single noise, the percentage of CPU
+ available for the thread, and the counters for the noise sources.
+
+ In addition to the tracer, a set of tracepoints were added to
+ facilitate the identification of the osnoise source.
+
+ The output will appear in the trace and trace_pipe files.
+
+ To enable this tracer, echo in "osnoise" into the current_tracer
+ file.
+
+config TIMERLAT_TRACER
+ bool "Timerlat tracer"
+ select OSNOISE_TRACER
+ select GENERIC_TRACER
+ help
+ The timerlat tracer aims to help the preemptive kernel developers
+ to find sources of wakeup latencies of real-time threads.
+
+ The tracer creates a per-cpu kernel thread with real-time priority.
+ The tracer thread sets a periodic timer to wakeup itself, and goes
+ to sleep waiting for the timer to fire. At the wakeup, the thread
+ then computes a wakeup latency value as the difference between
+ the current time and the absolute time that the timer was set
+ to expire.
+
+ The tracer prints two lines at every activation. The first is the
+ timer latency observed at the hardirq context before the
+ activation of the thread. The second is the timer latency observed
+ by the thread, which is the same level that cyclictest reports. The
+ ACTIVATION ID field serves to relate the irq execution to its
+ respective thread execution.
+
+ The tracer is build on top of osnoise tracer, and the osnoise:
+ events can be used to trace the source of interference from NMI,
+ IRQs and other threads. It also enables the capture of the
+ stacktrace at the IRQ context, which helps to identify the code
+ path that can cause thread delay.
+
config MMIOTRACE
bool "Memory mapped IO tracing"
depends on HAVE_MMIOTRACE_SUPPORT && PCI
@@ -381,7 +537,7 @@ config TRACER_SNAPSHOT
Allow tracing users to take snapshot of the current buffer using the
ftrace interface, e.g.:
- echo 1 > /sys/kernel/debug/tracing/snapshot
+ echo 1 > /sys/kernel/tracing/snapshot
cat snapshot
config TRACER_SNAPSHOT_PER_CPU_SWAP
@@ -393,7 +549,7 @@ config TRACER_SNAPSHOT_PER_CPU_SWAP
full swap (all buffers). If this is set, then the following is
allowed:
- echo 1 > /sys/kernel/debug/tracing/per_cpu/cpu2/snapshot
+ echo 1 > /sys/kernel/tracing/per_cpu/cpu2/snapshot
After which, only the tracing buffer for CPU 2 was swapped with
the main tracing buffer, and the other CPU buffers remain the same.
@@ -440,7 +596,7 @@ config PROFILE_ANNOTATED_BRANCHES
This tracer profiles all likely and unlikely macros
in the kernel. It will display the results in:
- /sys/kernel/debug/tracing/trace_stat/branch_annotated
+ /sys/kernel/tracing/trace_stat/branch_annotated
Note: this will add a significant overhead; only turn this
on if you need to profile the system's use of these macros.
@@ -453,7 +609,7 @@ config PROFILE_ALL_BRANCHES
taken in the kernel is recorded whether it hit or miss.
The results will be displayed in:
- /sys/kernel/debug/tracing/trace_stat/branch_all
+ /sys/kernel/tracing/trace_stat/branch_all
This option also enables the likely/unlikely profiler.
@@ -504,11 +660,37 @@ config BLK_DEV_IO_TRACE
Tracing also is possible using the ftrace interface, e.g.:
echo 1 > /sys/block/sda/sda1/trace/enable
- echo blk > /sys/kernel/debug/tracing/current_tracer
- cat /sys/kernel/debug/tracing/trace_pipe
+ echo blk > /sys/kernel/tracing/current_tracer
+ cat /sys/kernel/tracing/trace_pipe
If unsure, say N.
+config FPROBE_EVENTS
+ depends on FPROBE
+ depends on HAVE_REGS_AND_STACK_ACCESS_API
+ bool "Enable fprobe-based dynamic events"
+ select TRACING
+ select PROBE_EVENTS
+ select DYNAMIC_EVENTS
+ default y
+ help
+ This allows user to add tracing events on the function entry and
+ exit via ftrace interface. The syntax is same as the kprobe events
+ and the kprobe events on function entry and exit will be
+ transparently converted to this fprobe events.
+
+config PROBE_EVENTS_BTF_ARGS
+ depends on HAVE_FUNCTION_ARG_ACCESS_API
+ depends on FPROBE_EVENTS || KPROBE_EVENTS
+ depends on DEBUG_INFO_BTF && BPF_SYSCALL
+ bool "Support BTF function arguments for probe events"
+ default y
+ help
+ The user can specify the arguments of the probe event using the names
+ of the arguments of the probed function, when the probe location is a
+ kernel function entry or a tracepoint.
+ This is available only if BTF (BPF Type Format) support is enabled.
+
config KPROBE_EVENTS
depends on KPROBES
depends on HAVE_REGS_AND_STACK_ACCESS_API
@@ -531,14 +713,14 @@ config KPROBE_EVENTS
config KPROBE_EVENTS_ON_NOTRACE
bool "Do NOT protect notrace function from kprobe events"
depends on KPROBE_EVENTS
- depends on KPROBES_ON_FTRACE
+ depends on DYNAMIC_FTRACE
default n
help
This is only for the developers who want to debug ftrace itself
using kprobe events.
If kprobes can use ftrace instead of breakpoint, ftrace related
- functions are protected from kprobe-events to prevent an infinit
+ functions are protected from kprobe-events to prevent an infinite
recursion or any unexpected execution path which leads to a kernel
crash.
@@ -595,6 +777,31 @@ config FTRACE_MCOUNT_RECORD
depends on DYNAMIC_FTRACE
depends on HAVE_FTRACE_MCOUNT_RECORD
+config FTRACE_MCOUNT_USE_PATCHABLE_FUNCTION_ENTRY
+ bool
+ depends on FTRACE_MCOUNT_RECORD
+
+config FTRACE_MCOUNT_USE_CC
+ def_bool y
+ depends on $(cc-option,-mrecord-mcount)
+ depends on !FTRACE_MCOUNT_USE_PATCHABLE_FUNCTION_ENTRY
+ depends on FTRACE_MCOUNT_RECORD
+
+config FTRACE_MCOUNT_USE_OBJTOOL
+ def_bool y
+ depends on HAVE_OBJTOOL_MCOUNT
+ depends on !FTRACE_MCOUNT_USE_PATCHABLE_FUNCTION_ENTRY
+ depends on !FTRACE_MCOUNT_USE_CC
+ depends on FTRACE_MCOUNT_RECORD
+ select OBJTOOL
+
+config FTRACE_MCOUNT_USE_RECORDMCOUNT
+ def_bool y
+ depends on !FTRACE_MCOUNT_USE_PATCHABLE_FUNCTION_ENTRY
+ depends on !FTRACE_MCOUNT_USE_CC
+ depends on !FTRACE_MCOUNT_USE_OBJTOOL
+ depends on FTRACE_MCOUNT_RECORD
+
config TRACING_MAP
bool
depends on ARCH_HAVE_NMI_SAFE_CMPXCHG
@@ -622,6 +829,21 @@ config SYNTH_EVENTS
If in doubt, say N.
+config USER_EVENTS
+ bool "User trace events"
+ select TRACING
+ select DYNAMIC_EVENTS
+ help
+ User trace events are user-defined trace events that
+ can be used like an existing kernel trace event. User trace
+ events are generated by writing to a tracefs file. User
+ processes can determine if their tracing events should be
+ generated by registering a value and bit with the kernel
+ that reflects when it is enabled or not.
+
+ See Documentation/trace/user_events.rst.
+ If in doubt, say N.
+
config HIST_TRIGGERS
bool "Histogram triggers"
depends on ARCH_HAVE_NMI_SAFE_CMPXCHG
@@ -658,7 +880,7 @@ config TRACEPOINT_BENCHMARK
help
This option creates the tracepoint "benchmark:benchmark_event".
When the tracepoint is enabled, it kicks off a kernel thread that
- goes into an infinite loop (calling cond_sched() to let other tasks
+ goes into an infinite loop (calling cond_resched() to let other tasks
run), and calls the tracepoint. Each iteration will record the time
it took to write to the tracepoint and the next iteration that
data will be passed to the tracepoint itself. That is, the tracepoint
@@ -727,6 +949,45 @@ config TRACE_EVAL_MAP_FILE
If unsure, say N.
+config FTRACE_RECORD_RECURSION
+ bool "Record functions that recurse in function tracing"
+ depends on FUNCTION_TRACER
+ help
+ All callbacks that attach to the function tracing have some sort
+ of protection against recursion. Even though the protection exists,
+ it adds overhead. This option will create a file in the tracefs
+ file system called "recursed_functions" that will list the functions
+ that triggered a recursion.
+
+ This will add more overhead to cases that have recursion.
+
+ If unsure, say N
+
+config FTRACE_RECORD_RECURSION_SIZE
+ int "Max number of recursed functions to record"
+ default 128
+ depends on FTRACE_RECORD_RECURSION
+ help
+ This defines the limit of number of functions that can be
+ listed in the "recursed_functions" file, that lists all
+ the functions that caused a recursion to happen.
+ This file can be reset, but the limit can not change in
+ size at runtime.
+
+config RING_BUFFER_RECORD_RECURSION
+ bool "Record functions that recurse in the ring buffer"
+ depends on FTRACE_RECORD_RECURSION
+ # default y, because it is coupled with FTRACE_RECORD_RECURSION
+ default y
+ help
+ The ring buffer has its own internal recursion. Although when
+ recursion happens it won't cause harm because of the protection,
+ but it does cause unwanted overhead. Enabling this option will
+ place where recursion was detected into the ftrace "recursed_functions"
+ file.
+
+ This will add more overhead to cases that have recursion.
+
config GCOV_PROFILE_FTRACE
bool "Enable GCOV profiling on ftrace subsystem"
depends on GCOV_KERNEL
@@ -774,6 +1035,20 @@ config EVENT_TRACE_TEST_SYSCALLS
TBD - enable a way to actually call the syscalls as we test their
events
+config FTRACE_SORT_STARTUP_TEST
+ bool "Verify compile time sorting of ftrace functions"
+ depends on DYNAMIC_FTRACE
+ depends on BUILDTIME_MCOUNT_SORT
+ help
+ Sorting of the mcount_loc sections that is used to find the
+ where the ftrace knows where to patch functions for tracing
+ and other callbacks is done at compile time. But if the sort
+ is not done correctly, it will cause non-deterministic failures.
+ When this is set, the sorted sections will be verified that they
+ are in deed sorted and will warn if they are not.
+
+ If unsure, say N
+
config RING_BUFFER_STARTUP_TEST
bool "Ring buffer startup self test"
depends on RING_BUFFER
@@ -790,13 +1065,33 @@ config RING_BUFFER_STARTUP_TEST
The test runs for 10 seconds. This will slow your boot time
by at least 10 more seconds.
- At the end of the test, statics and more checks are done.
- It will output the stats of each per cpu buffer. What
+ At the end of the test, statistics and more checks are done.
+ It will output the stats of each per cpu buffer: What
was written, the sizes, what was read, what was lost, and
other similar details.
If unsure, say N
+config RING_BUFFER_VALIDATE_TIME_DELTAS
+ bool "Verify ring buffer time stamp deltas"
+ depends on RING_BUFFER
+ help
+ This will audit the time stamps on the ring buffer sub
+ buffer to make sure that all the time deltas for the
+ events on a sub buffer matches the current time stamp.
+ This audit is performed for every event that is not
+ interrupted, or interrupting another event. A check
+ is also made when traversing sub buffers to make sure
+ that all the deltas on the previous sub buffer do not
+ add up to be greater than the current time stamp.
+
+ NOTE: This adds significant overhead to recording of events,
+ and should only be used to test the logic of the ring buffer.
+ Do not use it on production systems.
+
+ Only say Y if you understand what this does, and you
+ still want it enabled. Otherwise say N
+
config MMIOTRACE_TEST
tristate "Test module for mmiotrace"
depends on MMIOTRACE && m
@@ -820,6 +1115,10 @@ config PREEMPTIRQ_DELAY_TEST
irq-disabled critical sections for 500us:
modprobe preemptirq_delay_test test_mode=irq delay=500 burst_size=3
+ What's more, if you want to attach the test on the cpu which the latency
+ tracer is running on, specify cpu_affinity=cpu_num at the end of the
+ command.
+
If unsure, say N
config SYNTH_EVENT_GEN_TEST
@@ -870,7 +1169,6 @@ config HIST_TRIGGERS_DEBUG
If unsure, say N.
-endif # FTRACE
-
-endif # TRACING_SUPPORT
+source "kernel/trace/rv/Kconfig"
+endif # FTRACE
diff --git a/kernel/trace/Makefile b/kernel/trace/Makefile
index 6575bb0a0434..057cd975d014 100644
--- a/kernel/trace/Makefile
+++ b/kernel/trace/Makefile
@@ -2,9 +2,9 @@
# Do not instrument the tracer itself:
+ccflags-remove-$(CONFIG_FUNCTION_TRACER) += $(CC_FLAGS_FTRACE)
+
ifdef CONFIG_FUNCTION_TRACER
-ORIG_CFLAGS := $(KBUILD_CFLAGS)
-KBUILD_CFLAGS = $(subst $(CC_FLAGS_FTRACE),,$(ORIG_CFLAGS))
# Avoid recursion due to instrumentation.
KCSAN_SANITIZE := n
@@ -31,6 +31,12 @@ ifdef CONFIG_GCOV_PROFILE_FTRACE
GCOV_PROFILE := y
endif
+# Functions in this file could be invoked from early interrupt
+# code and produce random code coverage.
+KCOV_INSTRUMENT_trace_preemptirq.o := n
+
+CFLAGS_bpf_trace.o := -I$(src)
+
CFLAGS_trace_benchmark.o := -I$(src)
CFLAGS_trace_events_filter.o := -I$(src)
@@ -45,6 +51,7 @@ obj-$(CONFIG_TRACING) += trace_output.o
obj-$(CONFIG_TRACING) += trace_seq.o
obj-$(CONFIG_TRACING) += trace_stat.o
obj-$(CONFIG_TRACING) += trace_printk.o
+obj-$(CONFIG_TRACING) += pid_list.o
obj-$(CONFIG_TRACING_MAP) += tracing_map.o
obj-$(CONFIG_PREEMPTIRQ_DELAY_TEST) += preemptirq_delay_test.o
obj-$(CONFIG_SYNTH_EVENT_GEN_TEST) += synth_event_gen_test.o
@@ -56,6 +63,7 @@ obj-$(CONFIG_IRQSOFF_TRACER) += trace_irqsoff.o
obj-$(CONFIG_PREEMPT_TRACER) += trace_irqsoff.o
obj-$(CONFIG_SCHED_TRACER) += trace_sched_wakeup.o
obj-$(CONFIG_HWLAT_TRACER) += trace_hwlat.o
+obj-$(CONFIG_OSNOISE_TRACER) += trace_osnoise.o
obj-$(CONFIG_NOP_TRACER) += trace_nop.o
obj-$(CONFIG_STACK_TRACER) += trace_stack.o
obj-$(CONFIG_MMIOTRACE) += trace_mmiotrace.o
@@ -74,11 +82,14 @@ obj-$(CONFIG_EVENT_TRACING) += trace_event_perf.o
endif
obj-$(CONFIG_EVENT_TRACING) += trace_events_filter.o
obj-$(CONFIG_EVENT_TRACING) += trace_events_trigger.o
+obj-$(CONFIG_PROBE_EVENTS) += trace_eprobe.o
obj-$(CONFIG_TRACE_EVENT_INJECT) += trace_events_inject.o
obj-$(CONFIG_SYNTH_EVENTS) += trace_events_synth.o
obj-$(CONFIG_HIST_TRIGGERS) += trace_events_hist.o
+obj-$(CONFIG_USER_EVENTS) += trace_events_user.o
obj-$(CONFIG_BPF_EVENTS) += bpf_trace.o
obj-$(CONFIG_KPROBE_EVENTS) += trace_kprobe.o
+obj-$(CONFIG_TRACEPOINTS) += error_report-traces.o
obj-$(CONFIG_TRACEPOINTS) += power-traces.o
ifeq ($(CONFIG_PM),y)
obj-$(CONFIG_TRACEPOINTS) += rpm-traces.o
@@ -88,9 +99,15 @@ obj-$(CONFIG_KGDB_KDB) += trace_kdb.o
endif
obj-$(CONFIG_DYNAMIC_EVENTS) += trace_dynevent.o
obj-$(CONFIG_PROBE_EVENTS) += trace_probe.o
+obj-$(CONFIG_PROBE_EVENTS_BTF_ARGS) += trace_btf.o
obj-$(CONFIG_UPROBE_EVENTS) += trace_uprobe.o
obj-$(CONFIG_BOOTTIME_TRACING) += trace_boot.o
+obj-$(CONFIG_FTRACE_RECORD_RECURSION) += trace_recursion_record.o
+obj-$(CONFIG_FPROBE) += fprobe.o
+obj-$(CONFIG_RETHOOK) += rethook.o
+obj-$(CONFIG_FPROBE_EVENTS) += trace_fprobe.o
obj-$(CONFIG_TRACEPOINT_BENCHMARK) += trace_benchmark.o
+obj-$(CONFIG_RV) += rv/
libftrace-y := ftrace.o
diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
index 5ef0484513ec..d5d94510afd3 100644
--- a/kernel/trace/blktrace.c
+++ b/kernel/trace/blktrace.c
@@ -34,7 +34,7 @@ static struct trace_array *blk_tr;
static bool blk_tracer_enabled __read_mostly;
static LIST_HEAD(running_trace_list);
-static __cacheline_aligned_in_smp DEFINE_SPINLOCK(running_trace_lock);
+static __cacheline_aligned_in_smp DEFINE_RAW_SPINLOCK(running_trace_lock);
/* Select an alternative, minimalistic output than the original one */
#define TRACE_BLK_OPT_CLASSIC 0x1
@@ -72,17 +72,17 @@ static void trace_note(struct blk_trace *bt, pid_t pid, int action,
struct blk_io_trace *t;
struct ring_buffer_event *event = NULL;
struct trace_buffer *buffer = NULL;
- int pc = 0;
+ unsigned int trace_ctx = 0;
int cpu = smp_processor_id();
bool blk_tracer = blk_tracer_enabled;
ssize_t cgid_len = cgid ? sizeof(cgid) : 0;
if (blk_tracer) {
buffer = blk_tr->array_buffer.buffer;
- pc = preempt_count();
+ trace_ctx = tracing_gen_ctx_flags(0);
event = trace_buffer_lock_reserve(buffer, TRACE_BLK,
sizeof(*t) + len + cgid_len,
- 0, pc);
+ trace_ctx);
if (!event)
return;
t = ring_buffer_event_data(event);
@@ -107,7 +107,7 @@ record_it:
memcpy((void *) t + sizeof(*t) + cgid_len, data, len);
if (blk_tracer)
- trace_buffer_unlock_commit(blk_tr, buffer, event, 0, pc);
+ trace_buffer_unlock_commit(blk_tr, buffer, event, trace_ctx);
}
}
@@ -121,12 +121,12 @@ static void trace_note_tsk(struct task_struct *tsk)
struct blk_trace *bt;
tsk->btrace_seq = blktrace_seq;
- spin_lock_irqsave(&running_trace_lock, flags);
+ raw_spin_lock_irqsave(&running_trace_lock, flags);
list_for_each_entry(bt, &running_trace_list, running_list) {
trace_note(bt, tsk->pid, BLK_TN_PROCESS, tsk->comm,
sizeof(tsk->comm), 0);
}
- spin_unlock_irqrestore(&running_trace_lock, flags);
+ raw_spin_unlock_irqrestore(&running_trace_lock, flags);
}
static void trace_note_time(struct blk_trace *bt)
@@ -145,13 +145,14 @@ static void trace_note_time(struct blk_trace *bt)
local_irq_restore(flags);
}
-void __trace_note_message(struct blk_trace *bt, struct blkcg *blkcg,
- const char *fmt, ...)
+void __blk_trace_note_message(struct blk_trace *bt,
+ struct cgroup_subsys_state *css, const char *fmt, ...)
{
int n;
va_list args;
unsigned long flags;
char *buf;
+ u64 cgid = 0;
if (unlikely(bt->trace_state != Blktrace_running &&
!blk_tracer_enabled))
@@ -170,17 +171,16 @@ void __trace_note_message(struct blk_trace *bt, struct blkcg *blkcg,
n = vscnprintf(buf, BLK_TN_MAX_MSG, fmt, args);
va_end(args);
- if (!(blk_tracer_flags.val & TRACE_BLK_OPT_CGROUP))
- blkcg = NULL;
#ifdef CONFIG_BLK_CGROUP
- trace_note(bt, current->pid, BLK_TN_MESSAGE, buf, n,
- blkcg ? cgroup_id(blkcg->css.cgroup) : 1);
-#else
- trace_note(bt, current->pid, BLK_TN_MESSAGE, buf, n, 0);
+ if (css && (blk_tracer_flags.val & TRACE_BLK_OPT_CGROUP))
+ cgid = cgroup_id(css->cgroup);
+ else
+ cgid = 1;
#endif
+ trace_note(bt, current->pid, BLK_TN_MESSAGE, buf, n, cgid);
local_irq_restore(flags);
}
-EXPORT_SYMBOL_GPL(__trace_note_message);
+EXPORT_SYMBOL_GPL(__blk_trace_note_message);
static int act_log_check(struct blk_trace *bt, u32 what, sector_t sector,
pid_t pid)
@@ -205,7 +205,7 @@ static const u32 ddir_act[2] = { BLK_TC_ACT(BLK_TC_READ),
#define BLK_TC_PREFLUSH BLK_TC_FLUSH
/* The ilog2() calls fall out because they're constant */
-#define MASK_TC_BIT(rw, __name) ((rw & REQ_ ## __name) << \
+#define MASK_TC_BIT(rw, __name) ((__force u32)(rw & REQ_ ## __name) << \
(ilog2(BLK_TC_ ## __name) + BLK_TC_SHIFT - __REQ_ ## __name))
/*
@@ -213,8 +213,8 @@ static const u32 ddir_act[2] = { BLK_TC_ACT(BLK_TC_READ),
* blk_io_trace structure and places it in a per-cpu subbuffer.
*/
static void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes,
- int op, int op_flags, u32 what, int error, int pdu_len,
- void *pdu_data, u64 cgid)
+ const blk_opf_t opf, u32 what, int error,
+ int pdu_len, void *pdu_data, u64 cgid)
{
struct task_struct *tsk = current;
struct ring_buffer_event *event = NULL;
@@ -222,20 +222,22 @@ static void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes,
struct blk_io_trace *t;
unsigned long flags = 0;
unsigned long *sequence;
+ unsigned int trace_ctx = 0;
pid_t pid;
- int cpu, pc = 0;
+ int cpu;
bool blk_tracer = blk_tracer_enabled;
ssize_t cgid_len = cgid ? sizeof(cgid) : 0;
+ const enum req_op op = opf & REQ_OP_MASK;
if (unlikely(bt->trace_state != Blktrace_running && !blk_tracer))
return;
what |= ddir_act[op_is_write(op) ? WRITE : READ];
- what |= MASK_TC_BIT(op_flags, SYNC);
- what |= MASK_TC_BIT(op_flags, RAHEAD);
- what |= MASK_TC_BIT(op_flags, META);
- what |= MASK_TC_BIT(op_flags, PREFLUSH);
- what |= MASK_TC_BIT(op_flags, FUA);
+ what |= MASK_TC_BIT(opf, SYNC);
+ what |= MASK_TC_BIT(opf, RAHEAD);
+ what |= MASK_TC_BIT(opf, META);
+ what |= MASK_TC_BIT(opf, PREFLUSH);
+ what |= MASK_TC_BIT(opf, FUA);
if (op == REQ_OP_DISCARD || op == REQ_OP_SECURE_ERASE)
what |= BLK_TC_ACT(BLK_TC_DISCARD);
if (op == REQ_OP_FLUSH)
@@ -252,10 +254,10 @@ static void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes,
tracing_record_cmdline(current);
buffer = blk_tr->array_buffer.buffer;
- pc = preempt_count();
+ trace_ctx = tracing_gen_ctx_flags(0);
event = trace_buffer_lock_reserve(buffer, TRACE_BLK,
sizeof(*t) + pdu_len + cgid_len,
- 0, pc);
+ trace_ctx);
if (!event)
return;
t = ring_buffer_event_data(event);
@@ -301,7 +303,7 @@ record_it:
memcpy((void *)t + sizeof(*t) + cgid_len, pdu_data, pdu_len);
if (blk_tracer) {
- trace_buffer_unlock_commit(blk_tr, buffer, event, 0, pc);
+ trace_buffer_unlock_commit(blk_tr, buffer, event, trace_ctx);
return;
}
}
@@ -309,12 +311,20 @@ record_it:
local_irq_restore(flags);
}
-static void blk_trace_free(struct blk_trace *bt)
+static void blk_trace_free(struct request_queue *q, struct blk_trace *bt)
{
- debugfs_remove(bt->msg_file);
- debugfs_remove(bt->dropped_file);
relay_close(bt->rchan);
- debugfs_remove(bt->dir);
+
+ /*
+ * If 'bt->dir' is not set, then both 'dropped' and 'msg' are created
+ * under 'q->debugfs_dir', thus lookup and remove them.
+ */
+ if (!bt->dir) {
+ debugfs_lookup_and_remove("dropped", q->debugfs_dir);
+ debugfs_lookup_and_remove("msg", q->debugfs_dir);
+ } else {
+ debugfs_remove(bt->dir);
+ }
free_percpu(bt->sequence);
free_percpu(bt->msg_data);
kfree(bt);
@@ -336,10 +346,42 @@ static void put_probe_ref(void)
mutex_unlock(&blk_probe_mutex);
}
-static void blk_trace_cleanup(struct blk_trace *bt)
+static int blk_trace_start(struct blk_trace *bt)
+{
+ if (bt->trace_state != Blktrace_setup &&
+ bt->trace_state != Blktrace_stopped)
+ return -EINVAL;
+
+ blktrace_seq++;
+ smp_mb();
+ bt->trace_state = Blktrace_running;
+ raw_spin_lock_irq(&running_trace_lock);
+ list_add(&bt->running_list, &running_trace_list);
+ raw_spin_unlock_irq(&running_trace_lock);
+ trace_note_time(bt);
+
+ return 0;
+}
+
+static int blk_trace_stop(struct blk_trace *bt)
{
+ if (bt->trace_state != Blktrace_running)
+ return -EINVAL;
+
+ bt->trace_state = Blktrace_stopped;
+ raw_spin_lock_irq(&running_trace_lock);
+ list_del_init(&bt->running_list);
+ raw_spin_unlock_irq(&running_trace_lock);
+ relay_flush(bt->rchan);
+
+ return 0;
+}
+
+static void blk_trace_cleanup(struct request_queue *q, struct blk_trace *bt)
+{
+ blk_trace_stop(bt);
synchronize_rcu();
- blk_trace_free(bt);
+ blk_trace_free(q, bt);
put_probe_ref();
}
@@ -348,12 +390,11 @@ static int __blk_trace_remove(struct request_queue *q)
struct blk_trace *bt;
bt = rcu_replace_pointer(q->blk_trace, NULL,
- lockdep_is_held(&q->blk_trace_mutex));
+ lockdep_is_held(&q->debugfs_mutex));
if (!bt)
return -EINVAL;
- if (bt->trace_state != Blktrace_running)
- blk_trace_cleanup(bt);
+ blk_trace_cleanup(q, bt);
return 0;
}
@@ -362,9 +403,9 @@ int blk_trace_remove(struct request_queue *q)
{
int ret;
- mutex_lock(&q->blk_trace_mutex);
+ mutex_lock(&q->debugfs_mutex);
ret = __blk_trace_remove(q);
- mutex_unlock(&q->blk_trace_mutex);
+ mutex_unlock(&q->debugfs_mutex);
return ret;
}
@@ -402,7 +443,7 @@ static ssize_t blk_msg_write(struct file *filp, const char __user *buffer,
return PTR_ERR(msg);
bt = filp->private_data;
- __trace_note_message(bt, NULL, "%s", msg);
+ __blk_trace_note_message(bt, NULL, "%s", msg);
kfree(msg);
return count;
@@ -449,7 +490,7 @@ static struct dentry *blk_create_buf_file_callback(const char *filename,
&relay_file_operations);
}
-static struct rchan_callbacks blk_relay_callbacks = {
+static const struct rchan_callbacks blk_relay_callbacks = {
.subbuf_start = blk_subbuf_start_callback,
.create_buf_file = blk_create_buf_file_callback,
.remove_buf_file = blk_remove_buf_file_callback,
@@ -458,14 +499,9 @@ static struct rchan_callbacks blk_relay_callbacks = {
static void blk_trace_setup_lba(struct blk_trace *bt,
struct block_device *bdev)
{
- struct hd_struct *part = NULL;
-
- if (bdev)
- part = bdev->bd_part;
-
- if (part) {
- bt->start_lba = part->start_sect;
- bt->end_lba = part->start_sect + part->nr_sects;
+ if (bdev) {
+ bt->start_lba = bdev->bd_start_sect;
+ bt->end_lba = bdev->bd_start_sect + bdev_nr_sectors(bdev);
} else {
bt->start_lba = 0;
bt->end_lba = -1ULL;
@@ -483,12 +519,11 @@ static int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
struct dentry *dir = NULL;
int ret;
+ lockdep_assert_held(&q->debugfs_mutex);
+
if (!buts->buf_size || !buts->buf_nr)
return -EINVAL;
- if (!blk_debugfs_root)
- return -ENOENT;
-
strncpy(buts->name, name, BLKTRACE_BDEV_SIZE);
buts->name[BLKTRACE_BDEV_SIZE - 1] = '\0';
@@ -503,7 +538,7 @@ static int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
* we can be.
*/
if (rcu_dereference_protected(q->blk_trace,
- lockdep_is_held(&q->blk_trace_mutex))) {
+ lockdep_is_held(&q->debugfs_mutex))) {
pr_warn("Concurrent blktraces are not allowed on %s\n",
buts->name);
return -EBUSY;
@@ -522,21 +557,36 @@ static int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
if (!bt->msg_data)
goto err;
- ret = -ENOENT;
-
- dir = debugfs_lookup(buts->name, blk_debugfs_root);
- if (!dir)
+ /*
+ * When tracing the whole disk reuse the existing debugfs directory
+ * created by the block layer on init. For partitions block devices,
+ * and scsi-generic block devices we create a temporary new debugfs
+ * directory that will be removed once the trace ends.
+ */
+ if (bdev && !bdev_is_partition(bdev))
+ dir = q->debugfs_dir;
+ else
bt->dir = dir = debugfs_create_dir(buts->name, blk_debugfs_root);
+ /*
+ * As blktrace relies on debugfs for its interface the debugfs directory
+ * is required, contrary to the usual mantra of not checking for debugfs
+ * files or directories.
+ */
+ if (IS_ERR_OR_NULL(dir)) {
+ pr_warn("debugfs_dir not present for %s so skipping\n",
+ buts->name);
+ ret = -ENOENT;
+ goto err;
+ }
+
bt->dev = dev;
atomic_set(&bt->dropped, 0);
INIT_LIST_HEAD(&bt->running_list);
ret = -EIO;
- bt->dropped_file = debugfs_create_file("dropped", 0444, dir, bt,
- &blk_dropped_fops);
-
- bt->msg_file = debugfs_create_file("msg", 0222, dir, bt, &blk_msg_fops);
+ debugfs_create_file("dropped", 0444, dir, bt, &blk_dropped_fops);
+ debugfs_create_file("msg", 0222, dir, bt, &blk_msg_fops);
bt->rchan = relay_open("trace", dir, buts->buf_size,
buts->buf_nr, &blk_relay_callbacks, bt);
@@ -563,10 +613,8 @@ static int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
ret = 0;
err:
- if (dir && !bt->dir)
- dput(dir);
if (ret)
- blk_trace_free(bt);
+ blk_trace_free(q, bt);
return ret;
}
@@ -597,9 +645,9 @@ int blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
{
int ret;
- mutex_lock(&q->blk_trace_mutex);
+ mutex_lock(&q->debugfs_mutex);
ret = __blk_trace_setup(q, name, dev, bdev, arg);
- mutex_unlock(&q->blk_trace_mutex);
+ mutex_unlock(&q->debugfs_mutex);
return ret;
}
@@ -641,53 +689,26 @@ static int compat_blk_trace_setup(struct request_queue *q, char *name,
static int __blk_trace_startstop(struct request_queue *q, int start)
{
- int ret;
struct blk_trace *bt;
bt = rcu_dereference_protected(q->blk_trace,
- lockdep_is_held(&q->blk_trace_mutex));
+ lockdep_is_held(&q->debugfs_mutex));
if (bt == NULL)
return -EINVAL;
- /*
- * For starting a trace, we can transition from a setup or stopped
- * trace. For stopping a trace, the state must be running
- */
- ret = -EINVAL;
- if (start) {
- if (bt->trace_state == Blktrace_setup ||
- bt->trace_state == Blktrace_stopped) {
- blktrace_seq++;
- smp_mb();
- bt->trace_state = Blktrace_running;
- spin_lock_irq(&running_trace_lock);
- list_add(&bt->running_list, &running_trace_list);
- spin_unlock_irq(&running_trace_lock);
-
- trace_note_time(bt);
- ret = 0;
- }
- } else {
- if (bt->trace_state == Blktrace_running) {
- bt->trace_state = Blktrace_stopped;
- spin_lock_irq(&running_trace_lock);
- list_del_init(&bt->running_list);
- spin_unlock_irq(&running_trace_lock);
- relay_flush(bt->rchan);
- ret = 0;
- }
- }
-
- return ret;
+ if (start)
+ return blk_trace_start(bt);
+ else
+ return blk_trace_stop(bt);
}
int blk_trace_startstop(struct request_queue *q, int start)
{
int ret;
- mutex_lock(&q->blk_trace_mutex);
+ mutex_lock(&q->debugfs_mutex);
ret = __blk_trace_startstop(q, start);
- mutex_unlock(&q->blk_trace_mutex);
+ mutex_unlock(&q->debugfs_mutex);
return ret;
}
@@ -700,7 +721,7 @@ EXPORT_SYMBOL_GPL(blk_trace_startstop);
*/
/**
- * blk_trace_ioctl: - handle the ioctls associated with tracing
+ * blk_trace_ioctl - handle the ioctls associated with tracing
* @bdev: the block device
* @cmd: the ioctl cmd
* @arg: the argument data, if any
@@ -708,30 +729,26 @@ EXPORT_SYMBOL_GPL(blk_trace_startstop);
**/
int blk_trace_ioctl(struct block_device *bdev, unsigned cmd, char __user *arg)
{
- struct request_queue *q;
+ struct request_queue *q = bdev_get_queue(bdev);
int ret, start = 0;
char b[BDEVNAME_SIZE];
- q = bdev_get_queue(bdev);
- if (!q)
- return -ENXIO;
-
- mutex_lock(&q->blk_trace_mutex);
+ mutex_lock(&q->debugfs_mutex);
switch (cmd) {
case BLKTRACESETUP:
- bdevname(bdev, b);
+ snprintf(b, sizeof(b), "%pg", bdev);
ret = __blk_trace_setup(q, b, bdev->bd_dev, bdev, arg);
break;
#if defined(CONFIG_COMPAT) && defined(CONFIG_X86_64)
case BLKTRACESETUP32:
- bdevname(bdev, b);
+ snprintf(b, sizeof(b), "%pg", bdev);
ret = compat_blk_trace_setup(q, b, bdev->bd_dev, bdev, arg);
break;
#endif
case BLKTRACESTART:
start = 1;
- /* fall through */
+ fallthrough;
case BLKTRACESTOP:
ret = __blk_trace_startstop(q, start);
break;
@@ -743,30 +760,26 @@ int blk_trace_ioctl(struct block_device *bdev, unsigned cmd, char __user *arg)
break;
}
- mutex_unlock(&q->blk_trace_mutex);
+ mutex_unlock(&q->debugfs_mutex);
return ret;
}
/**
- * blk_trace_shutdown: - stop and cleanup trace structures
+ * blk_trace_shutdown - stop and cleanup trace structures
* @q: the request queue associated with the device
*
**/
void blk_trace_shutdown(struct request_queue *q)
{
- mutex_lock(&q->blk_trace_mutex);
if (rcu_dereference_protected(q->blk_trace,
- lockdep_is_held(&q->blk_trace_mutex))) {
- __blk_trace_startstop(q, 0);
+ lockdep_is_held(&q->debugfs_mutex)))
__blk_trace_remove(q);
- }
-
- mutex_unlock(&q->blk_trace_mutex);
}
#ifdef CONFIG_BLK_CGROUP
static u64 blk_trace_bio_get_cgid(struct request_queue *q, struct bio *bio)
{
+ struct cgroup_subsys_state *blkcg_css;
struct blk_trace *bt;
/* We don't use the 'bt' value here except as an optimization... */
@@ -774,24 +787,25 @@ static u64 blk_trace_bio_get_cgid(struct request_queue *q, struct bio *bio)
if (!bt || !(blk_tracer_flags.val & TRACE_BLK_OPT_CGROUP))
return 0;
- if (!bio->bi_blkg)
+ blkcg_css = bio_blkcg_css(bio);
+ if (!blkcg_css)
return 0;
- return cgroup_id(bio_blkcg(bio)->css.cgroup);
+ return cgroup_id(blkcg_css->cgroup);
}
#else
-u64 blk_trace_bio_get_cgid(struct request_queue *q, struct bio *bio)
+static u64 blk_trace_bio_get_cgid(struct request_queue *q, struct bio *bio)
{
return 0;
}
#endif
static u64
-blk_trace_request_get_cgid(struct request_queue *q, struct request *rq)
+blk_trace_request_get_cgid(struct request *rq)
{
if (!rq->bio)
return 0;
/* Use the first bio */
- return blk_trace_bio_get_cgid(q, rq->bio);
+ return blk_trace_bio_get_cgid(rq->q, rq->bio);
}
/*
@@ -810,7 +824,7 @@ blk_trace_request_get_cgid(struct request_queue *q, struct request *rq)
* Records an action against a request. Will log the bio offset + size.
*
**/
-static void blk_add_trace_rq(struct request *rq, int error,
+static void blk_add_trace_rq(struct request *rq, blk_status_t error,
unsigned int nr_bytes, u32 what, u64 cgid)
{
struct blk_trace *bt;
@@ -827,38 +841,40 @@ static void blk_add_trace_rq(struct request *rq, int error,
else
what |= BLK_TC_ACT(BLK_TC_FS);
- __blk_add_trace(bt, blk_rq_trace_sector(rq), nr_bytes, req_op(rq),
- rq->cmd_flags, what, error, 0, NULL, cgid);
+ __blk_add_trace(bt, blk_rq_trace_sector(rq), nr_bytes, rq->cmd_flags,
+ what, blk_status_to_errno(error), 0, NULL, cgid);
rcu_read_unlock();
}
-static void blk_add_trace_rq_insert(void *ignore,
- struct request_queue *q, struct request *rq)
+static void blk_add_trace_rq_insert(void *ignore, struct request *rq)
{
blk_add_trace_rq(rq, 0, blk_rq_bytes(rq), BLK_TA_INSERT,
- blk_trace_request_get_cgid(q, rq));
+ blk_trace_request_get_cgid(rq));
}
-static void blk_add_trace_rq_issue(void *ignore,
- struct request_queue *q, struct request *rq)
+static void blk_add_trace_rq_issue(void *ignore, struct request *rq)
{
blk_add_trace_rq(rq, 0, blk_rq_bytes(rq), BLK_TA_ISSUE,
- blk_trace_request_get_cgid(q, rq));
+ blk_trace_request_get_cgid(rq));
+}
+
+static void blk_add_trace_rq_merge(void *ignore, struct request *rq)
+{
+ blk_add_trace_rq(rq, 0, blk_rq_bytes(rq), BLK_TA_BACKMERGE,
+ blk_trace_request_get_cgid(rq));
}
-static void blk_add_trace_rq_requeue(void *ignore,
- struct request_queue *q,
- struct request *rq)
+static void blk_add_trace_rq_requeue(void *ignore, struct request *rq)
{
blk_add_trace_rq(rq, 0, blk_rq_bytes(rq), BLK_TA_REQUEUE,
- blk_trace_request_get_cgid(q, rq));
+ blk_trace_request_get_cgid(rq));
}
static void blk_add_trace_rq_complete(void *ignore, struct request *rq,
- int error, unsigned int nr_bytes)
+ blk_status_t error, unsigned int nr_bytes)
{
blk_add_trace_rq(rq, error, nr_bytes, BLK_TA_COMPLETE,
- blk_trace_request_get_cgid(rq->q, rq));
+ blk_trace_request_get_cgid(rq));
}
/**
@@ -885,15 +901,14 @@ static void blk_add_trace_bio(struct request_queue *q, struct bio *bio,
}
__blk_add_trace(bt, bio->bi_iter.bi_sector, bio->bi_iter.bi_size,
- bio_op(bio), bio->bi_opf, what, error, 0, NULL,
+ bio->bi_opf, what, error, 0, NULL,
blk_trace_bio_get_cgid(q, bio));
rcu_read_unlock();
}
-static void blk_add_trace_bio_bounce(void *ignore,
- struct request_queue *q, struct bio *bio)
+static void blk_add_trace_bio_bounce(void *ignore, struct bio *bio)
{
- blk_add_trace_bio(q, bio, BLK_TA_BOUNCE, 0);
+ blk_add_trace_bio(bio->bi_bdev->bd_disk->queue, bio, BLK_TA_BOUNCE, 0);
}
static void blk_add_trace_bio_complete(void *ignore,
@@ -903,63 +918,26 @@ static void blk_add_trace_bio_complete(void *ignore,
blk_status_to_errno(bio->bi_status));
}
-static void blk_add_trace_bio_backmerge(void *ignore,
- struct request_queue *q,
- struct request *rq,
- struct bio *bio)
+static void blk_add_trace_bio_backmerge(void *ignore, struct bio *bio)
{
- blk_add_trace_bio(q, bio, BLK_TA_BACKMERGE, 0);
+ blk_add_trace_bio(bio->bi_bdev->bd_disk->queue, bio, BLK_TA_BACKMERGE,
+ 0);
}
-static void blk_add_trace_bio_frontmerge(void *ignore,
- struct request_queue *q,
- struct request *rq,
- struct bio *bio)
+static void blk_add_trace_bio_frontmerge(void *ignore, struct bio *bio)
{
- blk_add_trace_bio(q, bio, BLK_TA_FRONTMERGE, 0);
+ blk_add_trace_bio(bio->bi_bdev->bd_disk->queue, bio, BLK_TA_FRONTMERGE,
+ 0);
}
-static void blk_add_trace_bio_queue(void *ignore,
- struct request_queue *q, struct bio *bio)
+static void blk_add_trace_bio_queue(void *ignore, struct bio *bio)
{
- blk_add_trace_bio(q, bio, BLK_TA_QUEUE, 0);
-}
-
-static void blk_add_trace_getrq(void *ignore,
- struct request_queue *q,
- struct bio *bio, int rw)
-{
- if (bio)
- blk_add_trace_bio(q, bio, BLK_TA_GETRQ, 0);
- else {
- struct blk_trace *bt;
-
- rcu_read_lock();
- bt = rcu_dereference(q->blk_trace);
- if (bt)
- __blk_add_trace(bt, 0, 0, rw, 0, BLK_TA_GETRQ, 0, 0,
- NULL, 0);
- rcu_read_unlock();
- }
+ blk_add_trace_bio(bio->bi_bdev->bd_disk->queue, bio, BLK_TA_QUEUE, 0);
}
-
-static void blk_add_trace_sleeprq(void *ignore,
- struct request_queue *q,
- struct bio *bio, int rw)
+static void blk_add_trace_getrq(void *ignore, struct bio *bio)
{
- if (bio)
- blk_add_trace_bio(q, bio, BLK_TA_SLEEPRQ, 0);
- else {
- struct blk_trace *bt;
-
- rcu_read_lock();
- bt = rcu_dereference(q->blk_trace);
- if (bt)
- __blk_add_trace(bt, 0, 0, rw, 0, BLK_TA_SLEEPRQ,
- 0, 0, NULL, 0);
- rcu_read_unlock();
- }
+ blk_add_trace_bio(bio->bi_bdev->bd_disk->queue, bio, BLK_TA_GETRQ, 0);
}
static void blk_add_trace_plug(void *ignore, struct request_queue *q)
@@ -969,7 +947,7 @@ static void blk_add_trace_plug(void *ignore, struct request_queue *q)
rcu_read_lock();
bt = rcu_dereference(q->blk_trace);
if (bt)
- __blk_add_trace(bt, 0, 0, 0, 0, BLK_TA_PLUG, 0, 0, NULL, 0);
+ __blk_add_trace(bt, 0, 0, 0, BLK_TA_PLUG, 0, 0, NULL, 0);
rcu_read_unlock();
}
@@ -989,15 +967,14 @@ static void blk_add_trace_unplug(void *ignore, struct request_queue *q,
else
what = BLK_TA_UNPLUG_TIMER;
- __blk_add_trace(bt, 0, 0, 0, 0, what, 0, sizeof(rpdu), &rpdu, 0);
+ __blk_add_trace(bt, 0, 0, 0, what, 0, sizeof(rpdu), &rpdu, 0);
}
rcu_read_unlock();
}
-static void blk_add_trace_split(void *ignore,
- struct request_queue *q, struct bio *bio,
- unsigned int pdu)
+static void blk_add_trace_split(void *ignore, struct bio *bio, unsigned int pdu)
{
+ struct request_queue *q = bio->bi_bdev->bd_disk->queue;
struct blk_trace *bt;
rcu_read_lock();
@@ -1006,8 +983,7 @@ static void blk_add_trace_split(void *ignore,
__be64 rpdu = cpu_to_be64(pdu);
__blk_add_trace(bt, bio->bi_iter.bi_sector,
- bio->bi_iter.bi_size, bio_op(bio), bio->bi_opf,
- BLK_TA_SPLIT,
+ bio->bi_iter.bi_size, bio->bi_opf, BLK_TA_SPLIT,
blk_status_to_errno(bio->bi_status),
sizeof(rpdu), &rpdu,
blk_trace_bio_get_cgid(q, bio));
@@ -1018,20 +994,16 @@ static void blk_add_trace_split(void *ignore,
/**
* blk_add_trace_bio_remap - Add a trace for a bio-remap operation
* @ignore: trace callback data parameter (not used)
- * @q: queue the io is for
* @bio: the source bio
- * @dev: target device
+ * @dev: source device
* @from: source sector
*
- * Description:
- * Device mapper or raid target sometimes need to split a bio because
- * it spans a stripe (or similar). Add a trace for that action.
- *
+ * Called after a bio is remapped to a different device and/or sector.
**/
-static void blk_add_trace_bio_remap(void *ignore,
- struct request_queue *q, struct bio *bio,
- dev_t dev, sector_t from)
+static void blk_add_trace_bio_remap(void *ignore, struct bio *bio, dev_t dev,
+ sector_t from)
{
+ struct request_queue *q = bio->bi_bdev->bd_disk->queue;
struct blk_trace *bt;
struct blk_io_trace_remap r;
@@ -1047,7 +1019,7 @@ static void blk_add_trace_bio_remap(void *ignore,
r.sector_from = cpu_to_be64(from);
__blk_add_trace(bt, bio->bi_iter.bi_sector, bio->bi_iter.bi_size,
- bio_op(bio), bio->bi_opf, BLK_TA_REMAP,
+ bio->bi_opf, BLK_TA_REMAP,
blk_status_to_errno(bio->bi_status),
sizeof(r), &r, blk_trace_bio_get_cgid(q, bio));
rcu_read_unlock();
@@ -1056,7 +1028,6 @@ static void blk_add_trace_bio_remap(void *ignore,
/**
* blk_add_trace_rq_remap - Add a trace for a request-remap operation
* @ignore: trace callback data parameter (not used)
- * @q: queue the io is for
* @rq: the source request
* @dev: target device
* @from: source sector
@@ -1066,34 +1037,31 @@ static void blk_add_trace_bio_remap(void *ignore,
* Add a trace for that action.
*
**/
-static void blk_add_trace_rq_remap(void *ignore,
- struct request_queue *q,
- struct request *rq, dev_t dev,
+static void blk_add_trace_rq_remap(void *ignore, struct request *rq, dev_t dev,
sector_t from)
{
struct blk_trace *bt;
struct blk_io_trace_remap r;
rcu_read_lock();
- bt = rcu_dereference(q->blk_trace);
+ bt = rcu_dereference(rq->q->blk_trace);
if (likely(!bt)) {
rcu_read_unlock();
return;
}
r.device_from = cpu_to_be32(dev);
- r.device_to = cpu_to_be32(disk_devt(rq->rq_disk));
+ r.device_to = cpu_to_be32(disk_devt(rq->q->disk));
r.sector_from = cpu_to_be64(from);
__blk_add_trace(bt, blk_rq_pos(rq), blk_rq_bytes(rq),
- rq_data_dir(rq), 0, BLK_TA_REMAP, 0,
- sizeof(r), &r, blk_trace_request_get_cgid(q, rq));
+ rq->cmd_flags, BLK_TA_REMAP, 0,
+ sizeof(r), &r, blk_trace_request_get_cgid(rq));
rcu_read_unlock();
}
/**
* blk_add_driver_data - Add binary message with driver-specific data
- * @q: queue the io is for
* @rq: io request
* @data: driver-specific data
* @len: length of driver-specific data
@@ -1102,22 +1070,20 @@ static void blk_add_trace_rq_remap(void *ignore,
* Some drivers might want to write driver-specific data per request.
*
**/
-void blk_add_driver_data(struct request_queue *q,
- struct request *rq,
- void *data, size_t len)
+void blk_add_driver_data(struct request *rq, void *data, size_t len)
{
struct blk_trace *bt;
rcu_read_lock();
- bt = rcu_dereference(q->blk_trace);
+ bt = rcu_dereference(rq->q->blk_trace);
if (likely(!bt)) {
rcu_read_unlock();
return;
}
- __blk_add_trace(bt, blk_rq_trace_sector(rq), blk_rq_bytes(rq), 0, 0,
+ __blk_add_trace(bt, blk_rq_trace_sector(rq), blk_rq_bytes(rq), 0,
BLK_TA_DRV_DATA, 0, len, data,
- blk_trace_request_get_cgid(q, rq));
+ blk_trace_request_get_cgid(rq));
rcu_read_unlock();
}
EXPORT_SYMBOL_GPL(blk_add_driver_data);
@@ -1130,6 +1096,8 @@ static void blk_register_tracepoints(void)
WARN_ON(ret);
ret = register_trace_block_rq_issue(blk_add_trace_rq_issue, NULL);
WARN_ON(ret);
+ ret = register_trace_block_rq_merge(blk_add_trace_rq_merge, NULL);
+ WARN_ON(ret);
ret = register_trace_block_rq_requeue(blk_add_trace_rq_requeue, NULL);
WARN_ON(ret);
ret = register_trace_block_rq_complete(blk_add_trace_rq_complete, NULL);
@@ -1146,8 +1114,6 @@ static void blk_register_tracepoints(void)
WARN_ON(ret);
ret = register_trace_block_getrq(blk_add_trace_getrq, NULL);
WARN_ON(ret);
- ret = register_trace_block_sleeprq(blk_add_trace_sleeprq, NULL);
- WARN_ON(ret);
ret = register_trace_block_plug(blk_add_trace_plug, NULL);
WARN_ON(ret);
ret = register_trace_block_unplug(blk_add_trace_unplug, NULL);
@@ -1167,7 +1133,6 @@ static void blk_unregister_tracepoints(void)
unregister_trace_block_split(blk_add_trace_split, NULL);
unregister_trace_block_unplug(blk_add_trace_unplug, NULL);
unregister_trace_block_plug(blk_add_trace_plug, NULL);
- unregister_trace_block_sleeprq(blk_add_trace_sleeprq, NULL);
unregister_trace_block_getrq(blk_add_trace_getrq, NULL);
unregister_trace_block_bio_queue(blk_add_trace_bio_queue, NULL);
unregister_trace_block_bio_frontmerge(blk_add_trace_bio_frontmerge, NULL);
@@ -1176,6 +1141,7 @@ static void blk_unregister_tracepoints(void)
unregister_trace_block_bio_bounce(blk_add_trace_bio_bounce, NULL);
unregister_trace_block_rq_complete(blk_add_trace_rq_complete, NULL);
unregister_trace_block_rq_requeue(blk_add_trace_rq_requeue, NULL);
+ unregister_trace_block_rq_merge(blk_add_trace_rq_merge, NULL);
unregister_trace_block_rq_issue(blk_add_trace_rq_issue, NULL);
unregister_trace_block_rq_insert(blk_add_trace_rq_insert, NULL);
@@ -1319,7 +1285,7 @@ static void blk_log_action(struct trace_iterator *iter, const char *act,
* ones now use the 64bit ino as the whole ID and
* no longer use generation.
*
- * Regarldess of the content, always output
+ * Regardless of the content, always output
* "LOW32,HIGH32" so that FILEID_INO32_GEN fid can
* be mapped back to @id on both 64 and 32bit ino
* setups. See __kernfs_fh_to_dentry().
@@ -1361,7 +1327,7 @@ static void blk_log_dump_pdu(struct trace_seq *s,
i == 0 ? "" : " ", pdu_buf[i]);
/*
- * stop when the rest is just zeroes and indicate so
+ * stop when the rest is just zeros and indicate so
* with a ".." appended
*/
if (i == end && end != pdu_len - 1) {
@@ -1578,7 +1544,8 @@ blk_trace_event_print_binary(struct trace_iterator *iter, int flags,
static enum print_line_t blk_tracer_print_line(struct trace_iterator *iter)
{
- if (!(blk_tracer_flags.val & TRACE_BLK_OPT_CLASSIC))
+ if ((iter->ent->type != TRACE_BLK) ||
+ !(blk_tracer_flags.val & TRACE_BLK_OPT_CLASSIC))
return TRACE_TYPE_UNHANDLED;
return print_one_line(iter, true);
@@ -1642,13 +1609,15 @@ static int blk_trace_remove_queue(struct request_queue *q)
struct blk_trace *bt;
bt = rcu_replace_pointer(q->blk_trace, NULL,
- lockdep_is_held(&q->blk_trace_mutex));
+ lockdep_is_held(&q->debugfs_mutex));
if (bt == NULL)
return -EINVAL;
+ blk_trace_stop(bt);
+
put_probe_ref();
synchronize_rcu();
- blk_trace_free(bt);
+ blk_trace_free(q, bt);
return 0;
}
@@ -1679,7 +1648,7 @@ static int blk_trace_setup_queue(struct request_queue *q,
return 0;
free_bt:
- blk_trace_free(bt);
+ blk_trace_free(q, bt);
return ret;
}
@@ -1791,36 +1760,19 @@ static ssize_t blk_trace_mask2str(char *buf, int mask)
return p - buf;
}
-static struct request_queue *blk_trace_get_queue(struct block_device *bdev)
-{
- if (bdev->bd_disk == NULL)
- return NULL;
-
- return bdev_get_queue(bdev);
-}
-
static ssize_t sysfs_blk_trace_attr_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- struct hd_struct *p = dev_to_part(dev);
- struct request_queue *q;
- struct block_device *bdev;
+ struct block_device *bdev = dev_to_bdev(dev);
+ struct request_queue *q = bdev_get_queue(bdev);
struct blk_trace *bt;
ssize_t ret = -ENXIO;
- bdev = bdget(part_devt(p));
- if (bdev == NULL)
- goto out;
-
- q = blk_trace_get_queue(bdev);
- if (q == NULL)
- goto out_bdput;
-
- mutex_lock(&q->blk_trace_mutex);
+ mutex_lock(&q->debugfs_mutex);
bt = rcu_dereference_protected(q->blk_trace,
- lockdep_is_held(&q->blk_trace_mutex));
+ lockdep_is_held(&q->debugfs_mutex));
if (attr == &dev_attr_enable) {
ret = sprintf(buf, "%u\n", !!bt);
goto out_unlock_bdev;
@@ -1838,10 +1790,7 @@ static ssize_t sysfs_blk_trace_attr_show(struct device *dev,
ret = sprintf(buf, "%llu\n", bt->end_lba);
out_unlock_bdev:
- mutex_unlock(&q->blk_trace_mutex);
-out_bdput:
- bdput(bdev);
-out:
+ mutex_unlock(&q->debugfs_mutex);
return ret;
}
@@ -1849,9 +1798,8 @@ static ssize_t sysfs_blk_trace_attr_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
- struct block_device *bdev;
- struct request_queue *q;
- struct hd_struct *p;
+ struct block_device *bdev = dev_to_bdev(dev);
+ struct request_queue *q = bdev_get_queue(bdev);
struct blk_trace *bt;
u64 value;
ssize_t ret = -EINVAL;
@@ -1867,24 +1815,15 @@ static ssize_t sysfs_blk_trace_attr_store(struct device *dev,
goto out;
value = ret;
}
- } else if (kstrtoull(buf, 0, &value))
- goto out;
-
- ret = -ENXIO;
-
- p = dev_to_part(dev);
- bdev = bdget(part_devt(p));
- if (bdev == NULL)
- goto out;
-
- q = blk_trace_get_queue(bdev);
- if (q == NULL)
- goto out_bdput;
+ } else {
+ if (kstrtoull(buf, 0, &value))
+ goto out;
+ }
- mutex_lock(&q->blk_trace_mutex);
+ mutex_lock(&q->debugfs_mutex);
bt = rcu_dereference_protected(q->blk_trace,
- lockdep_is_held(&q->blk_trace_mutex));
+ lockdep_is_held(&q->debugfs_mutex));
if (attr == &dev_attr_enable) {
if (!!value == !!bt) {
ret = 0;
@@ -1901,7 +1840,7 @@ static ssize_t sysfs_blk_trace_attr_store(struct device *dev,
if (bt == NULL) {
ret = blk_trace_setup_queue(q, bdev);
bt = rcu_dereference_protected(q->blk_trace,
- lockdep_is_held(&q->blk_trace_mutex));
+ lockdep_is_held(&q->debugfs_mutex));
}
if (ret == 0) {
@@ -1916,37 +1855,33 @@ static ssize_t sysfs_blk_trace_attr_store(struct device *dev,
}
out_unlock_bdev:
- mutex_unlock(&q->blk_trace_mutex);
-out_bdput:
- bdput(bdev);
+ mutex_unlock(&q->debugfs_mutex);
out:
return ret ? ret : count;
}
-
-int blk_trace_init_sysfs(struct device *dev)
-{
- return sysfs_create_group(&dev->kobj, &blk_trace_attr_group);
-}
-
-void blk_trace_remove_sysfs(struct device *dev)
-{
- sysfs_remove_group(&dev->kobj, &blk_trace_attr_group);
-}
-
#endif /* CONFIG_BLK_DEV_IO_TRACE */
#ifdef CONFIG_EVENT_TRACING
-void blk_fill_rwbs(char *rwbs, unsigned int op, int bytes)
+/**
+ * blk_fill_rwbs - Fill the buffer rwbs by mapping op to character string.
+ * @rwbs: buffer to be filled
+ * @opf: request operation type (REQ_OP_XXX) and flags for the tracepoint
+ *
+ * Description:
+ * Maps each request operation and flag to a single character and fills the
+ * buffer provided by the caller with resulting string.
+ *
+ **/
+void blk_fill_rwbs(char *rwbs, blk_opf_t opf)
{
int i = 0;
- if (op & REQ_PREFLUSH)
+ if (opf & REQ_PREFLUSH)
rwbs[i++] = 'F';
- switch (op & REQ_OP_MASK) {
+ switch (opf & REQ_OP_MASK) {
case REQ_OP_WRITE:
- case REQ_OP_WRITE_SAME:
rwbs[i++] = 'W';
break;
case REQ_OP_DISCARD:
@@ -1966,13 +1901,13 @@ void blk_fill_rwbs(char *rwbs, unsigned int op, int bytes)
rwbs[i++] = 'N';
}
- if (op & REQ_FUA)
+ if (opf & REQ_FUA)
rwbs[i++] = 'F';
- if (op & REQ_RAHEAD)
+ if (opf & REQ_RAHEAD)
rwbs[i++] = 'A';
- if (op & REQ_SYNC)
+ if (opf & REQ_SYNC)
rwbs[i++] = 'S';
- if (op & REQ_META)
+ if (opf & REQ_META)
rwbs[i++] = 'M';
rwbs[i] = '\0';
diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
index 7bc3d6175868..7ac6c52b25eb 100644
--- a/kernel/trace/bpf_trace.c
+++ b/kernel/trace/bpf_trace.c
@@ -6,22 +6,45 @@
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/bpf.h>
+#include <linux/bpf_verifier.h>
#include <linux/bpf_perf_event.h>
+#include <linux/btf.h>
#include <linux/filter.h>
#include <linux/uaccess.h>
#include <linux/ctype.h>
#include <linux/kprobes.h>
+#include <linux/spinlock.h>
#include <linux/syscalls.h>
#include <linux/error-injection.h>
+#include <linux/btf_ids.h>
+#include <linux/bpf_lsm.h>
+#include <linux/fprobe.h>
+#include <linux/bsearch.h>
+#include <linux/sort.h>
+#include <linux/key.h>
+#include <linux/verification.h>
+#include <linux/namei.h>
+#include <linux/fileattr.h>
+
+#include <net/bpf_sk_storage.h>
+
+#include <uapi/linux/bpf.h>
+#include <uapi/linux/btf.h>
#include <asm/tlb.h>
#include "trace_probe.h"
#include "trace.h"
+#define CREATE_TRACE_POINTS
+#include "bpf_trace.h"
+
#define bpf_event_rcu_dereference(p) \
rcu_dereference_protected(p, lockdep_is_held(&bpf_event_mutex))
+#define MAX_UPROBE_MULTI_CNT (1U << 20)
+#define MAX_KPROBE_MULTI_CNT (1U << 20)
+
#ifdef CONFIG_MODULES
struct bpf_trace_module {
struct module *module;
@@ -62,6 +85,15 @@ static struct bpf_raw_event_map *bpf_get_raw_tracepoint_module(const char *name)
u64 bpf_get_stackid(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
u64 bpf_get_stack(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
+static int bpf_btf_printf_prepare(struct btf_ptr *ptr, u32 btf_ptr_size,
+ u64 flags, const struct btf **btf,
+ s32 *btf_id);
+static u64 bpf_kprobe_multi_cookie(struct bpf_run_ctx *ctx);
+static u64 bpf_kprobe_multi_entry_ip(struct bpf_run_ctx *ctx);
+
+static u64 bpf_uprobe_multi_cookie(struct bpf_run_ctx *ctx);
+static u64 bpf_uprobe_multi_entry_ip(struct bpf_run_ctx *ctx);
+
/**
* trace_call_bpf - invoke BPF program
* @call: tracepoint event
@@ -80,9 +112,6 @@ unsigned int trace_call_bpf(struct trace_event_call *call, void *ctx)
{
unsigned int ret;
- if (in_nmi()) /* not supported yet */
- return 1;
-
cant_sleep();
if (unlikely(__this_cpu_inc_return(bpf_prog_active) != 1)) {
@@ -92,6 +121,9 @@ unsigned int trace_call_bpf(struct trace_event_call *call, void *ctx)
* and don't send kprobe event into ring-buffer,
* so return zero here
*/
+ rcu_read_lock();
+ bpf_prog_inc_misses_counters(rcu_dereference(call->prog_array));
+ rcu_read_unlock();
ret = 0;
goto out;
}
@@ -100,7 +132,7 @@ unsigned int trace_call_bpf(struct trace_event_call *call, void *ctx)
* Instead of moving rcu_read_lock/rcu_dereference/rcu_read_unlock
* to all call sites, we did a bpf_prog_array_valid() there to check
* whether call->prog_array is empty or not, which is
- * a heurisitc to speed up execution.
+ * a heuristic to speed up execution.
*
* If bpf_prog_array_valid() fetched prog_array was
* non-NULL, we go into trace_call_bpf() and do the actual
@@ -111,7 +143,10 @@ unsigned int trace_call_bpf(struct trace_event_call *call, void *ctx)
* out of events when it was updated in between this and the
* rcu_dereference() which is accepted risk.
*/
- ret = BPF_PROG_RUN_ARRAY_CHECK(call->prog_array, ctx, BPF_PROG_RUN);
+ rcu_read_lock();
+ ret = bpf_prog_run_array(rcu_dereference(call->prog_array),
+ ctx, bpf_prog_run);
+ rcu_read_unlock();
out:
__this_cpu_dec(bpf_prog_active);
@@ -168,6 +203,16 @@ bpf_probe_read_user_str_common(void *dst, u32 size,
{
int ret;
+ /*
+ * NB: We rely on strncpy_from_user() not copying junk past the NUL
+ * terminator into `dst`.
+ *
+ * strncpy_from_user() does long-sized strides in the fast path. If the
+ * strncpy does not mask out the bytes after the NUL in `unsafe_ptr`,
+ * then there could be junk after the NUL in `dst`. If user takes `dst`
+ * and keys a hash map with it, then semantically identical strings can
+ * occupy multiple entries in the map.
+ */
ret = strncpy_from_user_nofault(dst, unsafe_ptr, size);
if (unlikely(ret < 0))
memset(dst, 0, size);
@@ -189,22 +234,6 @@ const struct bpf_func_proto bpf_probe_read_user_str_proto = {
.arg3_type = ARG_ANYTHING,
};
-static __always_inline int
-bpf_probe_read_kernel_common(void *dst, u32 size, const void *unsafe_ptr)
-{
- int ret = security_locked_down(LOCKDOWN_BPF_READ);
-
- if (unlikely(ret < 0))
- goto fail;
- ret = copy_from_kernel_nofault(dst, unsafe_ptr, size);
- if (unlikely(ret < 0))
- goto fail;
- return ret;
-fail:
- memset(dst, 0, size);
- return ret;
-}
-
BPF_CALL_3(bpf_probe_read_kernel, void *, dst, u32, size,
const void *, unsafe_ptr)
{
@@ -223,10 +252,7 @@ const struct bpf_func_proto bpf_probe_read_kernel_proto = {
static __always_inline int
bpf_probe_read_kernel_str_common(void *dst, u32 size, const void *unsafe_ptr)
{
- int ret = security_locked_down(LOCKDOWN_BPF_READ);
-
- if (unlikely(ret < 0))
- goto fail;
+ int ret;
/*
* The strncpy_from_kernel_nofault() call will likely not fill the
@@ -239,11 +265,7 @@ bpf_probe_read_kernel_str_common(void *dst, u32 size, const void *unsafe_ptr)
*/
ret = strncpy_from_kernel_nofault(dst, unsafe_ptr, size);
if (unlikely(ret < 0))
- goto fail;
-
- return ret;
-fail:
- memset(dst, 0, size);
+ memset(dst, 0, size);
return ret;
}
@@ -321,8 +343,6 @@ BPF_CALL_3(bpf_probe_write_user, void __user *, unsafe_ptr, const void *, src,
if (unlikely(in_interrupt() ||
current->flags & (PF_KTHREAD | PF_EXITING)))
return -EPERM;
- if (unlikely(uaccess_kernel()))
- return -EPERM;
if (unlikely(!nmi_uaccess_okay()))
return -EPERM;
@@ -334,7 +354,7 @@ static const struct bpf_func_proto bpf_probe_write_user_proto = {
.gpl_only = true,
.ret_type = RET_INTEGER,
.arg1_type = ARG_ANYTHING,
- .arg2_type = ARG_PTR_TO_MEM,
+ .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY,
.arg3_type = ARG_CONST_SIZE,
};
@@ -349,371 +369,140 @@ static const struct bpf_func_proto *bpf_get_probe_write_proto(void)
return &bpf_probe_write_user_proto;
}
-static void bpf_trace_copy_string(char *buf, void *unsafe_ptr, char fmt_ptype,
- size_t bufsz)
-{
- void __user *user_ptr = (__force void __user *)unsafe_ptr;
-
- buf[0] = 0;
-
- switch (fmt_ptype) {
- case 's':
-#ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
- if ((unsigned long)unsafe_ptr < TASK_SIZE) {
- strncpy_from_user_nofault(buf, user_ptr, bufsz);
- break;
- }
- fallthrough;
-#endif
- case 'k':
- strncpy_from_kernel_nofault(buf, unsafe_ptr, bufsz);
- break;
- case 'u':
- strncpy_from_user_nofault(buf, user_ptr, bufsz);
- break;
- }
-}
+#define MAX_TRACE_PRINTK_VARARGS 3
+#define BPF_TRACE_PRINTK_SIZE 1024
-/*
- * Only limited trace_printk() conversion specifiers allowed:
- * %d %i %u %x %ld %li %lu %lx %lld %lli %llu %llx %p %pks %pus %s
- */
BPF_CALL_5(bpf_trace_printk, char *, fmt, u32, fmt_size, u64, arg1,
u64, arg2, u64, arg3)
{
- int i, mod[3] = {}, fmt_cnt = 0;
- char buf[64], fmt_ptype;
- void *unsafe_ptr = NULL;
- bool str_seen = false;
-
- /*
- * bpf_check()->check_func_arg()->check_stack_boundary()
- * guarantees that fmt points to bpf program stack,
- * fmt_size bytes of it were initialized and fmt_size > 0
- */
- if (fmt[--fmt_size] != 0)
- return -EINVAL;
-
- /* check format string for allowed specifiers */
- for (i = 0; i < fmt_size; i++) {
- if ((!isprint(fmt[i]) && !isspace(fmt[i])) || !isascii(fmt[i]))
- return -EINVAL;
-
- if (fmt[i] != '%')
- continue;
-
- if (fmt_cnt >= 3)
- return -EINVAL;
-
- /* fmt[i] != 0 && fmt[last] == 0, so we can access fmt[i + 1] */
- i++;
- if (fmt[i] == 'l') {
- mod[fmt_cnt]++;
- i++;
- } else if (fmt[i] == 'p') {
- mod[fmt_cnt]++;
- if ((fmt[i + 1] == 'k' ||
- fmt[i + 1] == 'u') &&
- fmt[i + 2] == 's') {
- fmt_ptype = fmt[i + 1];
- i += 2;
- goto fmt_str;
- }
+ u64 args[MAX_TRACE_PRINTK_VARARGS] = { arg1, arg2, arg3 };
+ struct bpf_bprintf_data data = {
+ .get_bin_args = true,
+ .get_buf = true,
+ };
+ int ret;
- /* disallow any further format extensions */
- if (fmt[i + 1] != 0 &&
- !isspace(fmt[i + 1]) &&
- !ispunct(fmt[i + 1]))
- return -EINVAL;
-
- goto fmt_next;
- } else if (fmt[i] == 's') {
- mod[fmt_cnt]++;
- fmt_ptype = fmt[i];
-fmt_str:
- if (str_seen)
- /* allow only one '%s' per fmt string */
- return -EINVAL;
- str_seen = true;
-
- if (fmt[i + 1] != 0 &&
- !isspace(fmt[i + 1]) &&
- !ispunct(fmt[i + 1]))
- return -EINVAL;
-
- switch (fmt_cnt) {
- case 0:
- unsafe_ptr = (void *)(long)arg1;
- arg1 = (long)buf;
- break;
- case 1:
- unsafe_ptr = (void *)(long)arg2;
- arg2 = (long)buf;
- break;
- case 2:
- unsafe_ptr = (void *)(long)arg3;
- arg3 = (long)buf;
- break;
- }
+ ret = bpf_bprintf_prepare(fmt, fmt_size, args,
+ MAX_TRACE_PRINTK_VARARGS, &data);
+ if (ret < 0)
+ return ret;
- bpf_trace_copy_string(buf, unsafe_ptr, fmt_ptype,
- sizeof(buf));
- goto fmt_next;
- }
+ ret = bstr_printf(data.buf, MAX_BPRINTF_BUF, fmt, data.bin_args);
- if (fmt[i] == 'l') {
- mod[fmt_cnt]++;
- i++;
- }
+ trace_bpf_trace_printk(data.buf);
- if (fmt[i] != 'i' && fmt[i] != 'd' &&
- fmt[i] != 'u' && fmt[i] != 'x')
- return -EINVAL;
-fmt_next:
- fmt_cnt++;
- }
+ bpf_bprintf_cleanup(&data);
-/* Horrid workaround for getting va_list handling working with different
- * argument type combinations generically for 32 and 64 bit archs.
- */
-#define __BPF_TP_EMIT() __BPF_ARG3_TP()
-#define __BPF_TP(...) \
- __trace_printk(0 /* Fake ip */, \
- fmt, ##__VA_ARGS__)
-
-#define __BPF_ARG1_TP(...) \
- ((mod[0] == 2 || (mod[0] == 1 && __BITS_PER_LONG == 64)) \
- ? __BPF_TP(arg1, ##__VA_ARGS__) \
- : ((mod[0] == 1 || (mod[0] == 0 && __BITS_PER_LONG == 32)) \
- ? __BPF_TP((long)arg1, ##__VA_ARGS__) \
- : __BPF_TP((u32)arg1, ##__VA_ARGS__)))
-
-#define __BPF_ARG2_TP(...) \
- ((mod[1] == 2 || (mod[1] == 1 && __BITS_PER_LONG == 64)) \
- ? __BPF_ARG1_TP(arg2, ##__VA_ARGS__) \
- : ((mod[1] == 1 || (mod[1] == 0 && __BITS_PER_LONG == 32)) \
- ? __BPF_ARG1_TP((long)arg2, ##__VA_ARGS__) \
- : __BPF_ARG1_TP((u32)arg2, ##__VA_ARGS__)))
-
-#define __BPF_ARG3_TP(...) \
- ((mod[2] == 2 || (mod[2] == 1 && __BITS_PER_LONG == 64)) \
- ? __BPF_ARG2_TP(arg3, ##__VA_ARGS__) \
- : ((mod[2] == 1 || (mod[2] == 0 && __BITS_PER_LONG == 32)) \
- ? __BPF_ARG2_TP((long)arg3, ##__VA_ARGS__) \
- : __BPF_ARG2_TP((u32)arg3, ##__VA_ARGS__)))
-
- return __BPF_TP_EMIT();
+ return ret;
}
static const struct bpf_func_proto bpf_trace_printk_proto = {
.func = bpf_trace_printk,
.gpl_only = true,
.ret_type = RET_INTEGER,
- .arg1_type = ARG_PTR_TO_MEM,
+ .arg1_type = ARG_PTR_TO_MEM | MEM_RDONLY,
.arg2_type = ARG_CONST_SIZE,
};
-const struct bpf_func_proto *bpf_get_trace_printk_proto(void)
+static void __set_printk_clr_event(void)
{
/*
- * this program might be calling bpf_trace_printk,
- * so allocate per-cpu printk buffers
+ * This program might be calling bpf_trace_printk,
+ * so enable the associated bpf_trace/bpf_trace_printk event.
+ * Repeat this each time as it is possible a user has
+ * disabled bpf_trace_printk events. By loading a program
+ * calling bpf_trace_printk() however the user has expressed
+ * the intent to see such events.
*/
- trace_printk_init_buffers();
+ if (trace_set_clr_event("bpf_trace", "bpf_trace_printk", 1))
+ pr_warn_ratelimited("could not enable bpf_trace_printk events");
+}
+const struct bpf_func_proto *bpf_get_trace_printk_proto(void)
+{
+ __set_printk_clr_event();
return &bpf_trace_printk_proto;
}
-#define MAX_SEQ_PRINTF_VARARGS 12
-#define MAX_SEQ_PRINTF_MAX_MEMCPY 6
-#define MAX_SEQ_PRINTF_STR_LEN 128
-
-struct bpf_seq_printf_buf {
- char buf[MAX_SEQ_PRINTF_MAX_MEMCPY][MAX_SEQ_PRINTF_STR_LEN];
-};
-static DEFINE_PER_CPU(struct bpf_seq_printf_buf, bpf_seq_printf_buf);
-static DEFINE_PER_CPU(int, bpf_seq_printf_buf_used);
-
-BPF_CALL_5(bpf_seq_printf, struct seq_file *, m, char *, fmt, u32, fmt_size,
- const void *, data, u32, data_len)
+BPF_CALL_4(bpf_trace_vprintk, char *, fmt, u32, fmt_size, const void *, args,
+ u32, data_len)
{
- int err = -EINVAL, fmt_cnt = 0, memcpy_cnt = 0;
- int i, buf_used, copy_size, num_args;
- u64 params[MAX_SEQ_PRINTF_VARARGS];
- struct bpf_seq_printf_buf *bufs;
- const u64 *args = data;
-
- buf_used = this_cpu_inc_return(bpf_seq_printf_buf_used);
- if (WARN_ON_ONCE(buf_used > 1)) {
- err = -EBUSY;
- goto out;
- }
-
- bufs = this_cpu_ptr(&bpf_seq_printf_buf);
-
- /*
- * bpf_check()->check_func_arg()->check_stack_boundary()
- * guarantees that fmt points to bpf program stack,
- * fmt_size bytes of it were initialized and fmt_size > 0
- */
- if (fmt[--fmt_size] != 0)
- goto out;
-
- if (data_len & 7)
- goto out;
-
- for (i = 0; i < fmt_size; i++) {
- if (fmt[i] == '%') {
- if (fmt[i + 1] == '%')
- i++;
- else if (!data || !data_len)
- goto out;
- }
- }
+ struct bpf_bprintf_data data = {
+ .get_bin_args = true,
+ .get_buf = true,
+ };
+ int ret, num_args;
+ if (data_len & 7 || data_len > MAX_BPRINTF_VARARGS * 8 ||
+ (data_len && !args))
+ return -EINVAL;
num_args = data_len / 8;
- /* check format string for allowed specifiers */
- for (i = 0; i < fmt_size; i++) {
- /* only printable ascii for now. */
- if ((!isprint(fmt[i]) && !isspace(fmt[i])) || !isascii(fmt[i])) {
- err = -EINVAL;
- goto out;
- }
-
- if (fmt[i] != '%')
- continue;
-
- if (fmt[i + 1] == '%') {
- i++;
- continue;
- }
-
- if (fmt_cnt >= MAX_SEQ_PRINTF_VARARGS) {
- err = -E2BIG;
- goto out;
- }
-
- if (fmt_cnt >= num_args) {
- err = -EINVAL;
- goto out;
- }
-
- /* fmt[i] != 0 && fmt[last] == 0, so we can access fmt[i + 1] */
- i++;
-
- /* skip optional "[0 +-][num]" width formating field */
- while (fmt[i] == '0' || fmt[i] == '+' || fmt[i] == '-' ||
- fmt[i] == ' ')
- i++;
- if (fmt[i] >= '1' && fmt[i] <= '9') {
- i++;
- while (fmt[i] >= '0' && fmt[i] <= '9')
- i++;
- }
-
- if (fmt[i] == 's') {
- void *unsafe_ptr;
-
- /* try our best to copy */
- if (memcpy_cnt >= MAX_SEQ_PRINTF_MAX_MEMCPY) {
- err = -E2BIG;
- goto out;
- }
-
- unsafe_ptr = (void *)(long)args[fmt_cnt];
- err = strncpy_from_kernel_nofault(bufs->buf[memcpy_cnt],
- unsafe_ptr, MAX_SEQ_PRINTF_STR_LEN);
- if (err < 0)
- bufs->buf[memcpy_cnt][0] = '\0';
- params[fmt_cnt] = (u64)(long)bufs->buf[memcpy_cnt];
+ ret = bpf_bprintf_prepare(fmt, fmt_size, args, num_args, &data);
+ if (ret < 0)
+ return ret;
- fmt_cnt++;
- memcpy_cnt++;
- continue;
- }
+ ret = bstr_printf(data.buf, MAX_BPRINTF_BUF, fmt, data.bin_args);
- if (fmt[i] == 'p') {
- if (fmt[i + 1] == 0 ||
- fmt[i + 1] == 'K' ||
- fmt[i + 1] == 'x') {
- /* just kernel pointers */
- params[fmt_cnt] = args[fmt_cnt];
- fmt_cnt++;
- continue;
- }
+ trace_bpf_trace_printk(data.buf);
- /* only support "%pI4", "%pi4", "%pI6" and "%pi6". */
- if (fmt[i + 1] != 'i' && fmt[i + 1] != 'I') {
- err = -EINVAL;
- goto out;
- }
- if (fmt[i + 2] != '4' && fmt[i + 2] != '6') {
- err = -EINVAL;
- goto out;
- }
-
- if (memcpy_cnt >= MAX_SEQ_PRINTF_MAX_MEMCPY) {
- err = -E2BIG;
- goto out;
- }
+ bpf_bprintf_cleanup(&data);
+ return ret;
+}
- copy_size = (fmt[i + 2] == '4') ? 4 : 16;
+static const struct bpf_func_proto bpf_trace_vprintk_proto = {
+ .func = bpf_trace_vprintk,
+ .gpl_only = true,
+ .ret_type = RET_INTEGER,
+ .arg1_type = ARG_PTR_TO_MEM | MEM_RDONLY,
+ .arg2_type = ARG_CONST_SIZE,
+ .arg3_type = ARG_PTR_TO_MEM | PTR_MAYBE_NULL | MEM_RDONLY,
+ .arg4_type = ARG_CONST_SIZE_OR_ZERO,
+};
- err = copy_from_kernel_nofault(bufs->buf[memcpy_cnt],
- (void *) (long) args[fmt_cnt],
- copy_size);
- if (err < 0)
- memset(bufs->buf[memcpy_cnt], 0, copy_size);
- params[fmt_cnt] = (u64)(long)bufs->buf[memcpy_cnt];
+const struct bpf_func_proto *bpf_get_trace_vprintk_proto(void)
+{
+ __set_printk_clr_event();
+ return &bpf_trace_vprintk_proto;
+}
- i += 2;
- fmt_cnt++;
- memcpy_cnt++;
- continue;
- }
+BPF_CALL_5(bpf_seq_printf, struct seq_file *, m, char *, fmt, u32, fmt_size,
+ const void *, args, u32, data_len)
+{
+ struct bpf_bprintf_data data = {
+ .get_bin_args = true,
+ };
+ int err, num_args;
- if (fmt[i] == 'l') {
- i++;
- if (fmt[i] == 'l')
- i++;
- }
+ if (data_len & 7 || data_len > MAX_BPRINTF_VARARGS * 8 ||
+ (data_len && !args))
+ return -EINVAL;
+ num_args = data_len / 8;
- if (fmt[i] != 'i' && fmt[i] != 'd' &&
- fmt[i] != 'u' && fmt[i] != 'x') {
- err = -EINVAL;
- goto out;
- }
+ err = bpf_bprintf_prepare(fmt, fmt_size, args, num_args, &data);
+ if (err < 0)
+ return err;
- params[fmt_cnt] = args[fmt_cnt];
- fmt_cnt++;
- }
+ seq_bprintf(m, fmt, data.bin_args);
- /* Maximumly we can have MAX_SEQ_PRINTF_VARARGS parameter, just give
- * all of them to seq_printf().
- */
- seq_printf(m, fmt, params[0], params[1], params[2], params[3],
- params[4], params[5], params[6], params[7], params[8],
- params[9], params[10], params[11]);
+ bpf_bprintf_cleanup(&data);
- err = seq_has_overflowed(m) ? -EOVERFLOW : 0;
-out:
- this_cpu_dec(bpf_seq_printf_buf_used);
- return err;
+ return seq_has_overflowed(m) ? -EOVERFLOW : 0;
}
-static int bpf_seq_printf_btf_ids[5];
+BTF_ID_LIST_SINGLE(btf_seq_file_ids, struct, seq_file)
+
static const struct bpf_func_proto bpf_seq_printf_proto = {
.func = bpf_seq_printf,
.gpl_only = true,
.ret_type = RET_INTEGER,
.arg1_type = ARG_PTR_TO_BTF_ID,
- .arg2_type = ARG_PTR_TO_MEM,
+ .arg1_btf_id = &btf_seq_file_ids[0],
+ .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY,
.arg3_type = ARG_CONST_SIZE,
- .arg4_type = ARG_PTR_TO_MEM_OR_NULL,
+ .arg4_type = ARG_PTR_TO_MEM | PTR_MAYBE_NULL | MEM_RDONLY,
.arg5_type = ARG_CONST_SIZE_OR_ZERO,
- .btf_id = bpf_seq_printf_btf_ids,
};
BPF_CALL_3(bpf_seq_write, struct seq_file *, m, const void *, data, u32, len)
@@ -721,15 +510,39 @@ BPF_CALL_3(bpf_seq_write, struct seq_file *, m, const void *, data, u32, len)
return seq_write(m, data, len) ? -EOVERFLOW : 0;
}
-static int bpf_seq_write_btf_ids[5];
static const struct bpf_func_proto bpf_seq_write_proto = {
.func = bpf_seq_write,
.gpl_only = true,
.ret_type = RET_INTEGER,
.arg1_type = ARG_PTR_TO_BTF_ID,
- .arg2_type = ARG_PTR_TO_MEM,
+ .arg1_btf_id = &btf_seq_file_ids[0],
+ .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY,
+ .arg3_type = ARG_CONST_SIZE_OR_ZERO,
+};
+
+BPF_CALL_4(bpf_seq_printf_btf, struct seq_file *, m, struct btf_ptr *, ptr,
+ u32, btf_ptr_size, u64, flags)
+{
+ const struct btf *btf;
+ s32 btf_id;
+ int ret;
+
+ ret = bpf_btf_printf_prepare(ptr, btf_ptr_size, flags, &btf, &btf_id);
+ if (ret)
+ return ret;
+
+ return btf_type_seq_show_flags(btf, btf_id, ptr->ptr, m, flags);
+}
+
+static const struct bpf_func_proto bpf_seq_printf_btf_proto = {
+ .func = bpf_seq_printf_btf,
+ .gpl_only = true,
+ .ret_type = RET_INTEGER,
+ .arg1_type = ARG_PTR_TO_BTF_ID,
+ .arg1_btf_id = &btf_seq_file_ids[0],
+ .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY,
.arg3_type = ARG_CONST_SIZE_OR_ZERO,
- .btf_id = bpf_seq_write_btf_ids,
+ .arg4_type = ARG_ANYTHING,
};
static __always_inline int
@@ -848,8 +661,7 @@ static DEFINE_PER_CPU(int, bpf_trace_nest_level);
BPF_CALL_5(bpf_perf_event_output, struct pt_regs *, regs, struct bpf_map *, map,
u64, flags, void *, data, u64, size)
{
- struct bpf_trace_sample_data *sds = this_cpu_ptr(&bpf_trace_sds);
- int nest_level = this_cpu_inc_return(bpf_trace_nest_level);
+ struct bpf_trace_sample_data *sds;
struct perf_raw_record raw = {
.frag = {
.size = size,
@@ -857,7 +669,11 @@ BPF_CALL_5(bpf_perf_event_output, struct pt_regs *, regs, struct bpf_map *, map,
},
};
struct perf_sample_data *sd;
- int err;
+ int nest_level, err;
+
+ preempt_disable();
+ sds = this_cpu_ptr(&bpf_trace_sds);
+ nest_level = this_cpu_inc_return(bpf_trace_nest_level);
if (WARN_ON_ONCE(nest_level > ARRAY_SIZE(sds->sds))) {
err = -EBUSY;
@@ -872,12 +688,12 @@ BPF_CALL_5(bpf_perf_event_output, struct pt_regs *, regs, struct bpf_map *, map,
}
perf_sample_data_init(sd, 0, 0);
- sd->raw = &raw;
+ perf_sample_save_raw_data(sd, &raw);
err = __bpf_perf_event_output(regs, map, flags, sd);
-
out:
this_cpu_dec(bpf_trace_nest_level);
+ preempt_enable();
return err;
}
@@ -888,7 +704,7 @@ static const struct bpf_func_proto bpf_perf_event_output_proto = {
.arg1_type = ARG_PTR_TO_CTX,
.arg2_type = ARG_CONST_MAP_PTR,
.arg3_type = ARG_ANYTHING,
- .arg4_type = ARG_PTR_TO_MEM,
+ .arg4_type = ARG_PTR_TO_MEM | MEM_RDONLY,
.arg5_type = ARG_CONST_SIZE_OR_ZERO,
};
@@ -902,7 +718,6 @@ static DEFINE_PER_CPU(struct bpf_trace_sample_data, bpf_misc_sds);
u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,
void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy)
{
- int nest_level = this_cpu_inc_return(bpf_event_output_nest_level);
struct perf_raw_frag frag = {
.copy = ctx_copy,
.size = ctx_size,
@@ -919,8 +734,12 @@ u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,
};
struct perf_sample_data *sd;
struct pt_regs *regs;
+ int nest_level;
u64 ret;
+ preempt_disable();
+ nest_level = this_cpu_inc_return(bpf_event_output_nest_level);
+
if (WARN_ON_ONCE(nest_level > ARRAY_SIZE(bpf_misc_sds.sds))) {
ret = -EBUSY;
goto out;
@@ -930,11 +749,12 @@ u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,
perf_fetch_caller_regs(regs);
perf_sample_data_init(sd, 0, 0);
- sd->raw = &raw;
+ perf_sample_save_raw_data(sd, &raw);
ret = __bpf_perf_event_output(regs, map, flags, sd);
out:
this_cpu_dec(bpf_event_output_nest_level);
+ preempt_enable();
return ret;
}
@@ -949,6 +769,35 @@ const struct bpf_func_proto bpf_get_current_task_proto = {
.ret_type = RET_INTEGER,
};
+BPF_CALL_0(bpf_get_current_task_btf)
+{
+ return (unsigned long) current;
+}
+
+const struct bpf_func_proto bpf_get_current_task_btf_proto = {
+ .func = bpf_get_current_task_btf,
+ .gpl_only = true,
+ .ret_type = RET_PTR_TO_BTF_ID_TRUSTED,
+ .ret_btf_id = &btf_tracing_ids[BTF_TRACING_TYPE_TASK],
+};
+
+BPF_CALL_1(bpf_task_pt_regs, struct task_struct *, task)
+{
+ return (unsigned long) task_pt_regs(task);
+}
+
+BTF_ID_LIST(bpf_task_pt_regs_ids)
+BTF_ID(struct, pt_regs)
+
+const struct bpf_func_proto bpf_task_pt_regs_proto = {
+ .func = bpf_task_pt_regs,
+ .gpl_only = true,
+ .arg1_type = ARG_PTR_TO_BTF_ID,
+ .arg1_btf_id = &btf_tracing_ids[BTF_TRACING_TYPE_TASK],
+ .ret_type = RET_PTR_TO_BTF_ID,
+ .ret_btf_id = &bpf_task_pt_regs_ids[0],
+};
+
BPF_CALL_2(bpf_current_task_under_cgroup, struct bpf_map *, map, u32, idx)
{
struct bpf_array *array = container_of(map, struct bpf_array, map);
@@ -987,6 +836,7 @@ static void do_bpf_send_signal(struct irq_work *entry)
work = container_of(entry, struct send_signal_irq_work, irq_work);
group_send_sig_info(work->sig, SEND_SIG_PRIV, work->task, work->type);
+ put_task_struct(work->task);
}
static int bpf_send_signal_common(u32 sig, enum pid_type type)
@@ -1000,10 +850,11 @@ static int bpf_send_signal_common(u32 sig, enum pid_type type)
*/
if (unlikely(current->flags & (PF_KTHREAD | PF_EXITING)))
return -EPERM;
- if (unlikely(uaccess_kernel()))
- return -EPERM;
if (unlikely(!nmi_uaccess_okay()))
return -EPERM;
+ /* Task should not be pid=1 to avoid kernel panic. */
+ if (unlikely(is_global_init(current)))
+ return -EPERM;
if (irqs_disabled()) {
/* Do an early check on signal validity. Otherwise,
@@ -1013,14 +864,14 @@ static int bpf_send_signal_common(u32 sig, enum pid_type type)
return -EINVAL;
work = this_cpu_ptr(&send_signal_work);
- if (atomic_read(&work->irq_work.flags) & IRQ_WORK_BUSY)
+ if (irq_work_is_busy(&work->irq_work))
return -EBUSY;
/* Add the current task, which is the target of sending signal,
* to the irq_work. The current task may change when queued
* irq works get executed.
*/
- work->task = current;
+ work->task = get_task_struct(current);
work->sig = sig;
work->type = type;
irq_work_queue(&work->irq_work);
@@ -1054,7 +905,603 @@ static const struct bpf_func_proto bpf_send_signal_thread_proto = {
.arg1_type = ARG_ANYTHING,
};
-const struct bpf_func_proto *
+BPF_CALL_3(bpf_d_path, struct path *, path, char *, buf, u32, sz)
+{
+ struct path copy;
+ long len;
+ char *p;
+
+ if (!sz)
+ return 0;
+
+ /*
+ * The path pointer is verified as trusted and safe to use,
+ * but let's double check it's valid anyway to workaround
+ * potentially broken verifier.
+ */
+ len = copy_from_kernel_nofault(&copy, path, sizeof(*path));
+ if (len < 0)
+ return len;
+
+ p = d_path(&copy, buf, sz);
+ if (IS_ERR(p)) {
+ len = PTR_ERR(p);
+ } else {
+ len = buf + sz - p;
+ memmove(buf, p, len);
+ }
+
+ return len;
+}
+
+BTF_SET_START(btf_allowlist_d_path)
+#ifdef CONFIG_SECURITY
+BTF_ID(func, security_file_permission)
+BTF_ID(func, security_inode_getattr)
+BTF_ID(func, security_file_open)
+#endif
+#ifdef CONFIG_SECURITY_PATH
+BTF_ID(func, security_path_truncate)
+#endif
+BTF_ID(func, vfs_truncate)
+BTF_ID(func, vfs_fallocate)
+BTF_ID(func, dentry_open)
+BTF_ID(func, vfs_getattr)
+BTF_ID(func, filp_close)
+BTF_SET_END(btf_allowlist_d_path)
+
+static bool bpf_d_path_allowed(const struct bpf_prog *prog)
+{
+ if (prog->type == BPF_PROG_TYPE_TRACING &&
+ prog->expected_attach_type == BPF_TRACE_ITER)
+ return true;
+
+ if (prog->type == BPF_PROG_TYPE_LSM)
+ return bpf_lsm_is_sleepable_hook(prog->aux->attach_btf_id);
+
+ return btf_id_set_contains(&btf_allowlist_d_path,
+ prog->aux->attach_btf_id);
+}
+
+BTF_ID_LIST_SINGLE(bpf_d_path_btf_ids, struct, path)
+
+static const struct bpf_func_proto bpf_d_path_proto = {
+ .func = bpf_d_path,
+ .gpl_only = false,
+ .ret_type = RET_INTEGER,
+ .arg1_type = ARG_PTR_TO_BTF_ID,
+ .arg1_btf_id = &bpf_d_path_btf_ids[0],
+ .arg2_type = ARG_PTR_TO_MEM,
+ .arg3_type = ARG_CONST_SIZE_OR_ZERO,
+ .allowed = bpf_d_path_allowed,
+};
+
+#define BTF_F_ALL (BTF_F_COMPACT | BTF_F_NONAME | \
+ BTF_F_PTR_RAW | BTF_F_ZERO)
+
+static int bpf_btf_printf_prepare(struct btf_ptr *ptr, u32 btf_ptr_size,
+ u64 flags, const struct btf **btf,
+ s32 *btf_id)
+{
+ const struct btf_type *t;
+
+ if (unlikely(flags & ~(BTF_F_ALL)))
+ return -EINVAL;
+
+ if (btf_ptr_size != sizeof(struct btf_ptr))
+ return -EINVAL;
+
+ *btf = bpf_get_btf_vmlinux();
+
+ if (IS_ERR_OR_NULL(*btf))
+ return IS_ERR(*btf) ? PTR_ERR(*btf) : -EINVAL;
+
+ if (ptr->type_id > 0)
+ *btf_id = ptr->type_id;
+ else
+ return -EINVAL;
+
+ if (*btf_id > 0)
+ t = btf_type_by_id(*btf, *btf_id);
+ if (*btf_id <= 0 || !t)
+ return -ENOENT;
+
+ return 0;
+}
+
+BPF_CALL_5(bpf_snprintf_btf, char *, str, u32, str_size, struct btf_ptr *, ptr,
+ u32, btf_ptr_size, u64, flags)
+{
+ const struct btf *btf;
+ s32 btf_id;
+ int ret;
+
+ ret = bpf_btf_printf_prepare(ptr, btf_ptr_size, flags, &btf, &btf_id);
+ if (ret)
+ return ret;
+
+ return btf_type_snprintf_show(btf, btf_id, ptr->ptr, str, str_size,
+ flags);
+}
+
+const struct bpf_func_proto bpf_snprintf_btf_proto = {
+ .func = bpf_snprintf_btf,
+ .gpl_only = false,
+ .ret_type = RET_INTEGER,
+ .arg1_type = ARG_PTR_TO_MEM,
+ .arg2_type = ARG_CONST_SIZE,
+ .arg3_type = ARG_PTR_TO_MEM | MEM_RDONLY,
+ .arg4_type = ARG_CONST_SIZE,
+ .arg5_type = ARG_ANYTHING,
+};
+
+BPF_CALL_1(bpf_get_func_ip_tracing, void *, ctx)
+{
+ /* This helper call is inlined by verifier. */
+ return ((u64 *)ctx)[-2];
+}
+
+static const struct bpf_func_proto bpf_get_func_ip_proto_tracing = {
+ .func = bpf_get_func_ip_tracing,
+ .gpl_only = true,
+ .ret_type = RET_INTEGER,
+ .arg1_type = ARG_PTR_TO_CTX,
+};
+
+#ifdef CONFIG_X86_KERNEL_IBT
+static unsigned long get_entry_ip(unsigned long fentry_ip)
+{
+ u32 instr;
+
+ /* Being extra safe in here in case entry ip is on the page-edge. */
+ if (get_kernel_nofault(instr, (u32 *) fentry_ip - 1))
+ return fentry_ip;
+ if (is_endbr(instr))
+ fentry_ip -= ENDBR_INSN_SIZE;
+ return fentry_ip;
+}
+#else
+#define get_entry_ip(fentry_ip) fentry_ip
+#endif
+
+BPF_CALL_1(bpf_get_func_ip_kprobe, struct pt_regs *, regs)
+{
+ struct bpf_trace_run_ctx *run_ctx __maybe_unused;
+ struct kprobe *kp;
+
+#ifdef CONFIG_UPROBES
+ run_ctx = container_of(current->bpf_ctx, struct bpf_trace_run_ctx, run_ctx);
+ if (run_ctx->is_uprobe)
+ return ((struct uprobe_dispatch_data *)current->utask->vaddr)->bp_addr;
+#endif
+
+ kp = kprobe_running();
+
+ if (!kp || !(kp->flags & KPROBE_FLAG_ON_FUNC_ENTRY))
+ return 0;
+
+ return get_entry_ip((uintptr_t)kp->addr);
+}
+
+static const struct bpf_func_proto bpf_get_func_ip_proto_kprobe = {
+ .func = bpf_get_func_ip_kprobe,
+ .gpl_only = true,
+ .ret_type = RET_INTEGER,
+ .arg1_type = ARG_PTR_TO_CTX,
+};
+
+BPF_CALL_1(bpf_get_func_ip_kprobe_multi, struct pt_regs *, regs)
+{
+ return bpf_kprobe_multi_entry_ip(current->bpf_ctx);
+}
+
+static const struct bpf_func_proto bpf_get_func_ip_proto_kprobe_multi = {
+ .func = bpf_get_func_ip_kprobe_multi,
+ .gpl_only = false,
+ .ret_type = RET_INTEGER,
+ .arg1_type = ARG_PTR_TO_CTX,
+};
+
+BPF_CALL_1(bpf_get_attach_cookie_kprobe_multi, struct pt_regs *, regs)
+{
+ return bpf_kprobe_multi_cookie(current->bpf_ctx);
+}
+
+static const struct bpf_func_proto bpf_get_attach_cookie_proto_kmulti = {
+ .func = bpf_get_attach_cookie_kprobe_multi,
+ .gpl_only = false,
+ .ret_type = RET_INTEGER,
+ .arg1_type = ARG_PTR_TO_CTX,
+};
+
+BPF_CALL_1(bpf_get_func_ip_uprobe_multi, struct pt_regs *, regs)
+{
+ return bpf_uprobe_multi_entry_ip(current->bpf_ctx);
+}
+
+static const struct bpf_func_proto bpf_get_func_ip_proto_uprobe_multi = {
+ .func = bpf_get_func_ip_uprobe_multi,
+ .gpl_only = false,
+ .ret_type = RET_INTEGER,
+ .arg1_type = ARG_PTR_TO_CTX,
+};
+
+BPF_CALL_1(bpf_get_attach_cookie_uprobe_multi, struct pt_regs *, regs)
+{
+ return bpf_uprobe_multi_cookie(current->bpf_ctx);
+}
+
+static const struct bpf_func_proto bpf_get_attach_cookie_proto_umulti = {
+ .func = bpf_get_attach_cookie_uprobe_multi,
+ .gpl_only = false,
+ .ret_type = RET_INTEGER,
+ .arg1_type = ARG_PTR_TO_CTX,
+};
+
+BPF_CALL_1(bpf_get_attach_cookie_trace, void *, ctx)
+{
+ struct bpf_trace_run_ctx *run_ctx;
+
+ run_ctx = container_of(current->bpf_ctx, struct bpf_trace_run_ctx, run_ctx);
+ return run_ctx->bpf_cookie;
+}
+
+static const struct bpf_func_proto bpf_get_attach_cookie_proto_trace = {
+ .func = bpf_get_attach_cookie_trace,
+ .gpl_only = false,
+ .ret_type = RET_INTEGER,
+ .arg1_type = ARG_PTR_TO_CTX,
+};
+
+BPF_CALL_1(bpf_get_attach_cookie_pe, struct bpf_perf_event_data_kern *, ctx)
+{
+ return ctx->event->bpf_cookie;
+}
+
+static const struct bpf_func_proto bpf_get_attach_cookie_proto_pe = {
+ .func = bpf_get_attach_cookie_pe,
+ .gpl_only = false,
+ .ret_type = RET_INTEGER,
+ .arg1_type = ARG_PTR_TO_CTX,
+};
+
+BPF_CALL_1(bpf_get_attach_cookie_tracing, void *, ctx)
+{
+ struct bpf_trace_run_ctx *run_ctx;
+
+ run_ctx = container_of(current->bpf_ctx, struct bpf_trace_run_ctx, run_ctx);
+ return run_ctx->bpf_cookie;
+}
+
+static const struct bpf_func_proto bpf_get_attach_cookie_proto_tracing = {
+ .func = bpf_get_attach_cookie_tracing,
+ .gpl_only = false,
+ .ret_type = RET_INTEGER,
+ .arg1_type = ARG_PTR_TO_CTX,
+};
+
+BPF_CALL_3(bpf_get_branch_snapshot, void *, buf, u32, size, u64, flags)
+{
+#ifndef CONFIG_X86
+ return -ENOENT;
+#else
+ static const u32 br_entry_size = sizeof(struct perf_branch_entry);
+ u32 entry_cnt = size / br_entry_size;
+
+ entry_cnt = static_call(perf_snapshot_branch_stack)(buf, entry_cnt);
+
+ if (unlikely(flags))
+ return -EINVAL;
+
+ if (!entry_cnt)
+ return -ENOENT;
+
+ return entry_cnt * br_entry_size;
+#endif
+}
+
+static const struct bpf_func_proto bpf_get_branch_snapshot_proto = {
+ .func = bpf_get_branch_snapshot,
+ .gpl_only = true,
+ .ret_type = RET_INTEGER,
+ .arg1_type = ARG_PTR_TO_UNINIT_MEM,
+ .arg2_type = ARG_CONST_SIZE_OR_ZERO,
+};
+
+BPF_CALL_3(get_func_arg, void *, ctx, u32, n, u64 *, value)
+{
+ /* This helper call is inlined by verifier. */
+ u64 nr_args = ((u64 *)ctx)[-1];
+
+ if ((u64) n >= nr_args)
+ return -EINVAL;
+ *value = ((u64 *)ctx)[n];
+ return 0;
+}
+
+static const struct bpf_func_proto bpf_get_func_arg_proto = {
+ .func = get_func_arg,
+ .ret_type = RET_INTEGER,
+ .arg1_type = ARG_PTR_TO_CTX,
+ .arg2_type = ARG_ANYTHING,
+ .arg3_type = ARG_PTR_TO_LONG,
+};
+
+BPF_CALL_2(get_func_ret, void *, ctx, u64 *, value)
+{
+ /* This helper call is inlined by verifier. */
+ u64 nr_args = ((u64 *)ctx)[-1];
+
+ *value = ((u64 *)ctx)[nr_args];
+ return 0;
+}
+
+static const struct bpf_func_proto bpf_get_func_ret_proto = {
+ .func = get_func_ret,
+ .ret_type = RET_INTEGER,
+ .arg1_type = ARG_PTR_TO_CTX,
+ .arg2_type = ARG_PTR_TO_LONG,
+};
+
+BPF_CALL_1(get_func_arg_cnt, void *, ctx)
+{
+ /* This helper call is inlined by verifier. */
+ return ((u64 *)ctx)[-1];
+}
+
+static const struct bpf_func_proto bpf_get_func_arg_cnt_proto = {
+ .func = get_func_arg_cnt,
+ .ret_type = RET_INTEGER,
+ .arg1_type = ARG_PTR_TO_CTX,
+};
+
+#ifdef CONFIG_KEYS
+__bpf_kfunc_start_defs();
+
+/**
+ * bpf_lookup_user_key - lookup a key by its serial
+ * @serial: key handle serial number
+ * @flags: lookup-specific flags
+ *
+ * Search a key with a given *serial* and the provided *flags*.
+ * If found, increment the reference count of the key by one, and
+ * return it in the bpf_key structure.
+ *
+ * The bpf_key structure must be passed to bpf_key_put() when done
+ * with it, so that the key reference count is decremented and the
+ * bpf_key structure is freed.
+ *
+ * Permission checks are deferred to the time the key is used by
+ * one of the available key-specific kfuncs.
+ *
+ * Set *flags* with KEY_LOOKUP_CREATE, to attempt creating a requested
+ * special keyring (e.g. session keyring), if it doesn't yet exist.
+ * Set *flags* with KEY_LOOKUP_PARTIAL, to lookup a key without waiting
+ * for the key construction, and to retrieve uninstantiated keys (keys
+ * without data attached to them).
+ *
+ * Return: a bpf_key pointer with a valid key pointer if the key is found, a
+ * NULL pointer otherwise.
+ */
+__bpf_kfunc struct bpf_key *bpf_lookup_user_key(u32 serial, u64 flags)
+{
+ key_ref_t key_ref;
+ struct bpf_key *bkey;
+
+ if (flags & ~KEY_LOOKUP_ALL)
+ return NULL;
+
+ /*
+ * Permission check is deferred until the key is used, as the
+ * intent of the caller is unknown here.
+ */
+ key_ref = lookup_user_key(serial, flags, KEY_DEFER_PERM_CHECK);
+ if (IS_ERR(key_ref))
+ return NULL;
+
+ bkey = kmalloc(sizeof(*bkey), GFP_KERNEL);
+ if (!bkey) {
+ key_put(key_ref_to_ptr(key_ref));
+ return NULL;
+ }
+
+ bkey->key = key_ref_to_ptr(key_ref);
+ bkey->has_ref = true;
+
+ return bkey;
+}
+
+/**
+ * bpf_lookup_system_key - lookup a key by a system-defined ID
+ * @id: key ID
+ *
+ * Obtain a bpf_key structure with a key pointer set to the passed key ID.
+ * The key pointer is marked as invalid, to prevent bpf_key_put() from
+ * attempting to decrement the key reference count on that pointer. The key
+ * pointer set in such way is currently understood only by
+ * verify_pkcs7_signature().
+ *
+ * Set *id* to one of the values defined in include/linux/verification.h:
+ * 0 for the primary keyring (immutable keyring of system keys);
+ * VERIFY_USE_SECONDARY_KEYRING for both the primary and secondary keyring
+ * (where keys can be added only if they are vouched for by existing keys
+ * in those keyrings); VERIFY_USE_PLATFORM_KEYRING for the platform
+ * keyring (primarily used by the integrity subsystem to verify a kexec'ed
+ * kerned image and, possibly, the initramfs signature).
+ *
+ * Return: a bpf_key pointer with an invalid key pointer set from the
+ * pre-determined ID on success, a NULL pointer otherwise
+ */
+__bpf_kfunc struct bpf_key *bpf_lookup_system_key(u64 id)
+{
+ struct bpf_key *bkey;
+
+ if (system_keyring_id_check(id) < 0)
+ return NULL;
+
+ bkey = kmalloc(sizeof(*bkey), GFP_ATOMIC);
+ if (!bkey)
+ return NULL;
+
+ bkey->key = (struct key *)(unsigned long)id;
+ bkey->has_ref = false;
+
+ return bkey;
+}
+
+/**
+ * bpf_key_put - decrement key reference count if key is valid and free bpf_key
+ * @bkey: bpf_key structure
+ *
+ * Decrement the reference count of the key inside *bkey*, if the pointer
+ * is valid, and free *bkey*.
+ */
+__bpf_kfunc void bpf_key_put(struct bpf_key *bkey)
+{
+ if (bkey->has_ref)
+ key_put(bkey->key);
+
+ kfree(bkey);
+}
+
+#ifdef CONFIG_SYSTEM_DATA_VERIFICATION
+/**
+ * bpf_verify_pkcs7_signature - verify a PKCS#7 signature
+ * @data_ptr: data to verify
+ * @sig_ptr: signature of the data
+ * @trusted_keyring: keyring with keys trusted for signature verification
+ *
+ * Verify the PKCS#7 signature *sig_ptr* against the supplied *data_ptr*
+ * with keys in a keyring referenced by *trusted_keyring*.
+ *
+ * Return: 0 on success, a negative value on error.
+ */
+__bpf_kfunc int bpf_verify_pkcs7_signature(struct bpf_dynptr_kern *data_ptr,
+ struct bpf_dynptr_kern *sig_ptr,
+ struct bpf_key *trusted_keyring)
+{
+ const void *data, *sig;
+ u32 data_len, sig_len;
+ int ret;
+
+ if (trusted_keyring->has_ref) {
+ /*
+ * Do the permission check deferred in bpf_lookup_user_key().
+ * See bpf_lookup_user_key() for more details.
+ *
+ * A call to key_task_permission() here would be redundant, as
+ * it is already done by keyring_search() called by
+ * find_asymmetric_key().
+ */
+ ret = key_validate(trusted_keyring->key);
+ if (ret < 0)
+ return ret;
+ }
+
+ data_len = __bpf_dynptr_size(data_ptr);
+ data = __bpf_dynptr_data(data_ptr, data_len);
+ sig_len = __bpf_dynptr_size(sig_ptr);
+ sig = __bpf_dynptr_data(sig_ptr, sig_len);
+
+ return verify_pkcs7_signature(data, data_len, sig, sig_len,
+ trusted_keyring->key,
+ VERIFYING_UNSPECIFIED_SIGNATURE, NULL,
+ NULL);
+}
+#endif /* CONFIG_SYSTEM_DATA_VERIFICATION */
+
+__bpf_kfunc_end_defs();
+
+BTF_SET8_START(key_sig_kfunc_set)
+BTF_ID_FLAGS(func, bpf_lookup_user_key, KF_ACQUIRE | KF_RET_NULL | KF_SLEEPABLE)
+BTF_ID_FLAGS(func, bpf_lookup_system_key, KF_ACQUIRE | KF_RET_NULL)
+BTF_ID_FLAGS(func, bpf_key_put, KF_RELEASE)
+#ifdef CONFIG_SYSTEM_DATA_VERIFICATION
+BTF_ID_FLAGS(func, bpf_verify_pkcs7_signature, KF_SLEEPABLE)
+#endif
+BTF_SET8_END(key_sig_kfunc_set)
+
+static const struct btf_kfunc_id_set bpf_key_sig_kfunc_set = {
+ .owner = THIS_MODULE,
+ .set = &key_sig_kfunc_set,
+};
+
+static int __init bpf_key_sig_kfuncs_init(void)
+{
+ return register_btf_kfunc_id_set(BPF_PROG_TYPE_TRACING,
+ &bpf_key_sig_kfunc_set);
+}
+
+late_initcall(bpf_key_sig_kfuncs_init);
+#endif /* CONFIG_KEYS */
+
+/* filesystem kfuncs */
+__bpf_kfunc_start_defs();
+
+/**
+ * bpf_get_file_xattr - get xattr of a file
+ * @file: file to get xattr from
+ * @name__str: name of the xattr
+ * @value_ptr: output buffer of the xattr value
+ *
+ * Get xattr *name__str* of *file* and store the output in *value_ptr*.
+ *
+ * For security reasons, only *name__str* with prefix "user." is allowed.
+ *
+ * Return: 0 on success, a negative value on error.
+ */
+__bpf_kfunc int bpf_get_file_xattr(struct file *file, const char *name__str,
+ struct bpf_dynptr_kern *value_ptr)
+{
+ struct dentry *dentry;
+ u32 value_len;
+ void *value;
+ int ret;
+
+ if (strncmp(name__str, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN))
+ return -EPERM;
+
+ value_len = __bpf_dynptr_size(value_ptr);
+ value = __bpf_dynptr_data_rw(value_ptr, value_len);
+ if (!value)
+ return -EINVAL;
+
+ dentry = file_dentry(file);
+ ret = inode_permission(&nop_mnt_idmap, dentry->d_inode, MAY_READ);
+ if (ret)
+ return ret;
+ return __vfs_getxattr(dentry, dentry->d_inode, name__str, value, value_len);
+}
+
+__bpf_kfunc_end_defs();
+
+BTF_SET8_START(fs_kfunc_set_ids)
+BTF_ID_FLAGS(func, bpf_get_file_xattr, KF_SLEEPABLE | KF_TRUSTED_ARGS)
+BTF_SET8_END(fs_kfunc_set_ids)
+
+static int bpf_get_file_xattr_filter(const struct bpf_prog *prog, u32 kfunc_id)
+{
+ if (!btf_id_set8_contains(&fs_kfunc_set_ids, kfunc_id))
+ return 0;
+
+ /* Only allow to attach from LSM hooks, to avoid recursion */
+ return prog->type != BPF_PROG_TYPE_LSM ? -EACCES : 0;
+}
+
+static const struct btf_kfunc_id_set bpf_fs_kfunc_set = {
+ .owner = THIS_MODULE,
+ .set = &fs_kfunc_set_ids,
+ .filter = bpf_get_file_xattr_filter,
+};
+
+static int __init bpf_fs_kfuncs_init(void)
+{
+ return register_btf_kfunc_id_set(BPF_PROG_TYPE_LSM, &bpf_fs_kfunc_set);
+}
+
+late_initcall(bpf_fs_kfuncs_init);
+
+static const struct bpf_func_proto *
bpf_tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
{
switch (func_id) {
@@ -1070,6 +1517,8 @@ bpf_tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
return &bpf_map_pop_elem_proto;
case BPF_FUNC_map_peek_elem:
return &bpf_map_peek_elem_proto;
+ case BPF_FUNC_map_lookup_percpu_elem:
+ return &bpf_map_lookup_percpu_elem_proto;
case BPF_FUNC_ktime_get_ns:
return &bpf_ktime_get_ns_proto;
case BPF_FUNC_ktime_get_boot_ns:
@@ -1080,6 +1529,10 @@ bpf_tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
return &bpf_get_current_pid_tgid_proto;
case BPF_FUNC_get_current_task:
return &bpf_get_current_task_proto;
+ case BPF_FUNC_get_current_task_btf:
+ return &bpf_get_current_task_btf_proto;
+ case BPF_FUNC_task_pt_regs:
+ return &bpf_task_pt_regs_proto;
case BPF_FUNC_get_current_uid_gid:
return &bpf_get_current_uid_gid_proto;
case BPF_FUNC_get_current_comm:
@@ -1092,29 +1545,36 @@ bpf_tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
return &bpf_get_numa_node_id_proto;
case BPF_FUNC_perf_event_read:
return &bpf_perf_event_read_proto;
- case BPF_FUNC_probe_write_user:
- return bpf_get_probe_write_proto();
case BPF_FUNC_current_task_under_cgroup:
return &bpf_current_task_under_cgroup_proto;
case BPF_FUNC_get_prandom_u32:
return &bpf_get_prandom_u32_proto;
+ case BPF_FUNC_probe_write_user:
+ return security_locked_down(LOCKDOWN_BPF_WRITE_USER) < 0 ?
+ NULL : bpf_get_probe_write_proto();
case BPF_FUNC_probe_read_user:
return &bpf_probe_read_user_proto;
case BPF_FUNC_probe_read_kernel:
- return &bpf_probe_read_kernel_proto;
+ return security_locked_down(LOCKDOWN_BPF_READ_KERNEL) < 0 ?
+ NULL : &bpf_probe_read_kernel_proto;
case BPF_FUNC_probe_read_user_str:
return &bpf_probe_read_user_str_proto;
case BPF_FUNC_probe_read_kernel_str:
- return &bpf_probe_read_kernel_str_proto;
+ return security_locked_down(LOCKDOWN_BPF_READ_KERNEL) < 0 ?
+ NULL : &bpf_probe_read_kernel_str_proto;
#ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
case BPF_FUNC_probe_read:
- return &bpf_probe_read_compat_proto;
+ return security_locked_down(LOCKDOWN_BPF_READ_KERNEL) < 0 ?
+ NULL : &bpf_probe_read_compat_proto;
case BPF_FUNC_probe_read_str:
- return &bpf_probe_read_compat_str_proto;
+ return security_locked_down(LOCKDOWN_BPF_READ_KERNEL) < 0 ?
+ NULL : &bpf_probe_read_compat_str_proto;
#endif
#ifdef CONFIG_CGROUPS
- case BPF_FUNC_get_current_cgroup_id:
- return &bpf_get_current_cgroup_id_proto;
+ case BPF_FUNC_cgrp_storage_get:
+ return &bpf_cgrp_storage_get_proto;
+ case BPF_FUNC_cgrp_storage_delete:
+ return &bpf_cgrp_storage_delete_proto;
#endif
case BPF_FUNC_send_signal:
return &bpf_send_signal_proto;
@@ -1134,8 +1594,42 @@ bpf_tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
return &bpf_ringbuf_discard_proto;
case BPF_FUNC_ringbuf_query:
return &bpf_ringbuf_query_proto;
+ case BPF_FUNC_jiffies64:
+ return &bpf_jiffies64_proto;
+ case BPF_FUNC_get_task_stack:
+ return &bpf_get_task_stack_proto;
+ case BPF_FUNC_copy_from_user:
+ return &bpf_copy_from_user_proto;
+ case BPF_FUNC_copy_from_user_task:
+ return &bpf_copy_from_user_task_proto;
+ case BPF_FUNC_snprintf_btf:
+ return &bpf_snprintf_btf_proto;
+ case BPF_FUNC_per_cpu_ptr:
+ return &bpf_per_cpu_ptr_proto;
+ case BPF_FUNC_this_cpu_ptr:
+ return &bpf_this_cpu_ptr_proto;
+ case BPF_FUNC_task_storage_get:
+ if (bpf_prog_check_recur(prog))
+ return &bpf_task_storage_get_recur_proto;
+ return &bpf_task_storage_get_proto;
+ case BPF_FUNC_task_storage_delete:
+ if (bpf_prog_check_recur(prog))
+ return &bpf_task_storage_delete_recur_proto;
+ return &bpf_task_storage_delete_proto;
+ case BPF_FUNC_for_each_map_elem:
+ return &bpf_for_each_map_elem_proto;
+ case BPF_FUNC_snprintf:
+ return &bpf_snprintf_proto;
+ case BPF_FUNC_get_func_ip:
+ return &bpf_get_func_ip_proto_tracing;
+ case BPF_FUNC_get_branch_snapshot:
+ return &bpf_get_branch_snapshot_proto;
+ case BPF_FUNC_find_vma:
+ return &bpf_find_vma_proto;
+ case BPF_FUNC_trace_vprintk:
+ return bpf_get_trace_vprintk_proto();
default:
- return NULL;
+ return bpf_base_func_proto(func_id);
}
}
@@ -1153,6 +1647,18 @@ kprobe_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
case BPF_FUNC_override_return:
return &bpf_override_return_proto;
#endif
+ case BPF_FUNC_get_func_ip:
+ if (prog->expected_attach_type == BPF_TRACE_KPROBE_MULTI)
+ return &bpf_get_func_ip_proto_kprobe_multi;
+ if (prog->expected_attach_type == BPF_TRACE_UPROBE_MULTI)
+ return &bpf_get_func_ip_proto_uprobe_multi;
+ return &bpf_get_func_ip_proto_kprobe;
+ case BPF_FUNC_get_attach_cookie:
+ if (prog->expected_attach_type == BPF_TRACE_KPROBE_MULTI)
+ return &bpf_get_attach_cookie_proto_kmulti;
+ if (prog->expected_attach_type == BPF_TRACE_UPROBE_MULTI)
+ return &bpf_get_attach_cookie_proto_umulti;
+ return &bpf_get_attach_cookie_proto_trace;
default:
return bpf_tracing_func_proto(func_id, prog);
}
@@ -1207,7 +1713,7 @@ static const struct bpf_func_proto bpf_perf_event_output_proto_tp = {
.arg1_type = ARG_PTR_TO_CTX,
.arg2_type = ARG_CONST_MAP_PTR,
.arg3_type = ARG_ANYTHING,
- .arg4_type = ARG_PTR_TO_MEM,
+ .arg4_type = ARG_PTR_TO_MEM | MEM_RDONLY,
.arg5_type = ARG_CONST_SIZE_OR_ZERO,
};
@@ -1263,6 +1769,8 @@ tp_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
return &bpf_get_stackid_proto_tp;
case BPF_FUNC_get_stack:
return &bpf_get_stack_proto_tp;
+ case BPF_FUNC_get_attach_cookie:
+ return &bpf_get_attach_cookie_proto_trace;
default:
return bpf_tracing_func_proto(func_id, prog);
}
@@ -1320,9 +1828,6 @@ static const struct bpf_func_proto bpf_perf_prog_read_value_proto = {
BPF_CALL_4(bpf_read_branch_records, struct bpf_perf_event_data_kern *, ctx,
void *, buf, u32, size, u64, flags)
{
-#ifndef CONFIG_X86
- return -ENOENT;
-#else
static const u32 br_entry_size = sizeof(struct perf_branch_entry);
struct perf_branch_stack *br_stack = ctx->data->br_stack;
u32 to_copy;
@@ -1330,8 +1835,11 @@ BPF_CALL_4(bpf_read_branch_records, struct bpf_perf_event_data_kern *, ctx,
if (unlikely(flags & ~BPF_F_GET_BRANCH_RECORDS_SIZE))
return -EINVAL;
+ if (unlikely(!(ctx->data->sample_flags & PERF_SAMPLE_BRANCH_STACK)))
+ return -ENOENT;
+
if (unlikely(!br_stack))
- return -EINVAL;
+ return -ENOENT;
if (flags & BPF_F_GET_BRANCH_RECORDS_SIZE)
return br_stack->nr * br_entry_size;
@@ -1343,7 +1851,6 @@ BPF_CALL_4(bpf_read_branch_records, struct bpf_perf_event_data_kern *, ctx,
memcpy(buf, br_stack->entries, to_copy);
return to_copy;
-#endif
}
static const struct bpf_func_proto bpf_read_branch_records_proto = {
@@ -1363,13 +1870,15 @@ pe_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
case BPF_FUNC_perf_event_output:
return &bpf_perf_event_output_proto_tp;
case BPF_FUNC_get_stackid:
- return &bpf_get_stackid_proto_tp;
+ return &bpf_get_stackid_proto_pe;
case BPF_FUNC_get_stack:
- return &bpf_get_stack_proto_tp;
+ return &bpf_get_stack_proto_pe;
case BPF_FUNC_perf_prog_read_value:
return &bpf_perf_prog_read_value_proto;
case BPF_FUNC_read_branch_records:
return &bpf_read_branch_records_proto;
+ case BPF_FUNC_get_attach_cookie:
+ return &bpf_get_attach_cookie_proto_pe;
default:
return bpf_tracing_func_proto(func_id, prog);
}
@@ -1429,12 +1938,13 @@ static const struct bpf_func_proto bpf_perf_event_output_proto_raw_tp = {
.arg1_type = ARG_PTR_TO_CTX,
.arg2_type = ARG_CONST_MAP_PTR,
.arg3_type = ARG_ANYTHING,
- .arg4_type = ARG_PTR_TO_MEM,
+ .arg4_type = ARG_PTR_TO_MEM | MEM_RDONLY,
.arg5_type = ARG_CONST_SIZE_OR_ZERO,
};
extern const struct bpf_func_proto bpf_skb_output_proto;
extern const struct bpf_func_proto bpf_xdp_output_proto;
+extern const struct bpf_func_proto bpf_xdp_get_buff_len_trace_proto;
BPF_CALL_3(bpf_get_stackid_raw_tp, struct bpf_raw_tracepoint_args *, args,
struct bpf_map *, map, u64, flags)
@@ -1483,7 +1993,7 @@ static const struct bpf_func_proto bpf_get_stack_proto_raw_tp = {
.gpl_only = true,
.ret_type = RET_INTEGER,
.arg1_type = ARG_PTR_TO_CTX,
- .arg2_type = ARG_PTR_TO_MEM,
+ .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY,
.arg3_type = ARG_CONST_SIZE_OR_ZERO,
.arg4_type = ARG_ANYTHING,
};
@@ -1506,12 +2016,38 @@ raw_tp_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
const struct bpf_func_proto *
tracing_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
{
+ const struct bpf_func_proto *fn;
+
switch (func_id) {
#ifdef CONFIG_NET
case BPF_FUNC_skb_output:
return &bpf_skb_output_proto;
case BPF_FUNC_xdp_output:
return &bpf_xdp_output_proto;
+ case BPF_FUNC_skc_to_tcp6_sock:
+ return &bpf_skc_to_tcp6_sock_proto;
+ case BPF_FUNC_skc_to_tcp_sock:
+ return &bpf_skc_to_tcp_sock_proto;
+ case BPF_FUNC_skc_to_tcp_timewait_sock:
+ return &bpf_skc_to_tcp_timewait_sock_proto;
+ case BPF_FUNC_skc_to_tcp_request_sock:
+ return &bpf_skc_to_tcp_request_sock_proto;
+ case BPF_FUNC_skc_to_udp6_sock:
+ return &bpf_skc_to_udp6_sock_proto;
+ case BPF_FUNC_skc_to_unix_sock:
+ return &bpf_skc_to_unix_sock_proto;
+ case BPF_FUNC_skc_to_mptcp_sock:
+ return &bpf_skc_to_mptcp_sock_proto;
+ case BPF_FUNC_sk_storage_get:
+ return &bpf_sk_storage_get_tracing_proto;
+ case BPF_FUNC_sk_storage_delete:
+ return &bpf_sk_storage_delete_tracing_proto;
+ case BPF_FUNC_sock_from_file:
+ return &bpf_sock_from_file_proto;
+ case BPF_FUNC_get_socket_cookie:
+ return &bpf_get_socket_ptr_cookie_proto;
+ case BPF_FUNC_xdp_get_buff_len:
+ return &bpf_xdp_get_buff_len_trace_proto;
#endif
case BPF_FUNC_seq_printf:
return prog->expected_attach_type == BPF_TRACE_ITER ?
@@ -1521,8 +2057,25 @@ tracing_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
return prog->expected_attach_type == BPF_TRACE_ITER ?
&bpf_seq_write_proto :
NULL;
+ case BPF_FUNC_seq_printf_btf:
+ return prog->expected_attach_type == BPF_TRACE_ITER ?
+ &bpf_seq_printf_btf_proto :
+ NULL;
+ case BPF_FUNC_d_path:
+ return &bpf_d_path_proto;
+ case BPF_FUNC_get_func_arg:
+ return bpf_prog_has_trampoline(prog) ? &bpf_get_func_arg_proto : NULL;
+ case BPF_FUNC_get_func_ret:
+ return bpf_prog_has_trampoline(prog) ? &bpf_get_func_ret_proto : NULL;
+ case BPF_FUNC_get_func_arg_cnt:
+ return bpf_prog_has_trampoline(prog) ? &bpf_get_func_arg_cnt_proto : NULL;
+ case BPF_FUNC_get_attach_cookie:
+ return bpf_prog_has_trampoline(prog) ? &bpf_get_attach_cookie_proto_tracing : NULL;
default:
- return raw_tp_prog_func_proto(func_id, prog);
+ fn = raw_tp_prog_func_proto(func_id, prog);
+ if (!fn && prog->expected_attach_type == BPF_TRACE_ITER)
+ fn = bpf_iter_get_func_proto(func_id, prog);
+ return fn;
}
}
@@ -1531,13 +2084,7 @@ static bool raw_tp_prog_is_valid_access(int off, int size,
const struct bpf_prog *prog,
struct bpf_insn_access_aux *info)
{
- if (off < 0 || off >= sizeof(__u64) * MAX_BPF_FUNC_ARGS)
- return false;
- if (type != BPF_READ)
- return false;
- if (off % size != 0)
- return false;
- return true;
+ return bpf_tracing_ctx_access(off, size, type);
}
static bool tracing_prog_is_valid_access(int off, int size,
@@ -1545,13 +2092,7 @@ static bool tracing_prog_is_valid_access(int off, int size,
const struct bpf_prog *prog,
struct bpf_insn_access_aux *info)
{
- if (off < 0 || off >= sizeof(__u64) * MAX_BPF_FUNC_ARGS)
- return false;
- if (type != BPF_READ)
- return false;
- if (off % size != 0)
- return false;
- return btf_ctx_access(off, size, type, prog, info);
+ return bpf_tracing_btf_ctx_access(off, size, type, prog, info);
}
int __weak bpf_prog_test_run_tracing(struct bpf_prog *prog,
@@ -1567,6 +2108,9 @@ const struct bpf_verifier_ops raw_tracepoint_verifier_ops = {
};
const struct bpf_prog_ops raw_tracepoint_prog_ops = {
+#ifdef CONFIG_NET
+ .test_run = bpf_prog_test_run_raw_tp,
+#endif
};
const struct bpf_verifier_ops tracing_verifier_ops = {
@@ -1687,7 +2231,8 @@ static DEFINE_MUTEX(bpf_event_mutex);
#define BPF_TRACE_MAX_PROGS 64
int perf_event_attach_bpf_prog(struct perf_event *event,
- struct bpf_prog *prog)
+ struct bpf_prog *prog,
+ u64 bpf_cookie)
{
struct bpf_prog_array *old_array;
struct bpf_prog_array *new_array;
@@ -1714,14 +2259,15 @@ int perf_event_attach_bpf_prog(struct perf_event *event,
goto unlock;
}
- ret = bpf_prog_array_copy(old_array, NULL, prog, &new_array);
+ ret = bpf_prog_array_copy(old_array, NULL, prog, bpf_cookie, &new_array);
if (ret < 0)
goto unlock;
/* set the new array to event->tp_event and set event->prog */
event->prog = prog;
+ event->bpf_cookie = bpf_cookie;
rcu_assign_pointer(event->tp_event->prog_array, new_array);
- bpf_prog_array_free(old_array);
+ bpf_prog_array_free_sleepable(old_array);
unlock:
mutex_unlock(&bpf_event_mutex);
@@ -1740,14 +2286,14 @@ void perf_event_detach_bpf_prog(struct perf_event *event)
goto unlock;
old_array = bpf_event_rcu_dereference(event->tp_event->prog_array);
- ret = bpf_prog_array_copy(old_array, event->prog, NULL, &new_array);
+ ret = bpf_prog_array_copy(old_array, event->prog, NULL, 0, &new_array);
if (ret == -ENOENT)
goto unlock;
if (ret < 0) {
bpf_prog_array_delete_safe(old_array, event->prog);
} else {
rcu_assign_pointer(event->tp_event->prog_array, new_array);
- bpf_prog_array_free(old_array);
+ bpf_prog_array_free_sleepable(old_array);
}
bpf_prog_put(event->prog);
@@ -1815,19 +2361,27 @@ struct bpf_raw_event_map *bpf_get_raw_tracepoint(const char *name)
void bpf_put_raw_tracepoint(struct bpf_raw_event_map *btp)
{
- struct module *mod = __module_address((unsigned long)btp);
+ struct module *mod;
- if (mod)
- module_put(mod);
+ preempt_disable();
+ mod = __module_address((unsigned long)btp);
+ module_put(mod);
+ preempt_enable();
}
static __always_inline
void __bpf_trace_run(struct bpf_prog *prog, u64 *args)
{
cant_sleep();
+ if (unlikely(this_cpu_inc_return(*(prog->active)) != 1)) {
+ bpf_prog_inc_misses_counter(prog);
+ goto out;
+ }
rcu_read_lock();
- (void) BPF_PROG_RUN(prog, args);
+ (void) bpf_prog_run(prog, args);
rcu_read_unlock();
+out:
+ this_cpu_dec(*(prog->active));
}
#define UNPACK(...) __VA_ARGS__
@@ -1889,7 +2443,8 @@ static int __bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog *
if (prog->aux->max_tp_access > btp->writable_size)
return -EINVAL;
- return tracepoint_probe_register(tp, (void *)btp->bpf_func, prog);
+ return tracepoint_probe_register_may_exist(tp, (void *)btp->bpf_func,
+ prog);
}
int bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog *prog)
@@ -1904,7 +2459,8 @@ int bpf_probe_unregister(struct bpf_raw_event_map *btp, struct bpf_prog *prog)
int bpf_get_perf_event_info(const struct perf_event *event, u32 *prog_id,
u32 *fd_type, const char **buf,
- u64 *probe_offset, u64 *probe_addr)
+ u64 *probe_offset, u64 *probe_addr,
+ unsigned long *missed)
{
bool is_tracepoint, is_syscall_tp;
struct bpf_prog *prog;
@@ -1926,22 +2482,26 @@ int bpf_get_perf_event_info(const struct perf_event *event, u32 *prog_id,
if (is_tracepoint || is_syscall_tp) {
*buf = is_tracepoint ? event->tp_event->tp->name
: event->tp_event->name;
- *fd_type = BPF_FD_TYPE_TRACEPOINT;
- *probe_offset = 0x0;
- *probe_addr = 0x0;
+ /* We allow NULL pointer for tracepoint */
+ if (fd_type)
+ *fd_type = BPF_FD_TYPE_TRACEPOINT;
+ if (probe_offset)
+ *probe_offset = 0x0;
+ if (probe_addr)
+ *probe_addr = 0x0;
} else {
/* kprobe/uprobe */
err = -EOPNOTSUPP;
#ifdef CONFIG_KPROBE_EVENTS
if (flags & TRACE_EVENT_FL_KPROBE)
err = bpf_get_kprobe_info(event, fd_type, buf,
- probe_offset, probe_addr,
+ probe_offset, probe_addr, missed,
event->attr.type == PERF_TYPE_TRACEPOINT);
#endif
#ifdef CONFIG_UPROBE_EVENTS
if (flags & TRACE_EVENT_FL_UPROBE)
err = bpf_get_uprobe_info(event, fd_type, buf,
- probe_offset,
+ probe_offset, probe_addr,
event->attr.type == PERF_TYPE_TRACEPOINT);
#endif
}
@@ -1969,10 +2529,11 @@ static int bpf_event_notify(struct notifier_block *nb, unsigned long op,
{
struct bpf_trace_module *btm, *tmp;
struct module *mod = module;
+ int ret = 0;
if (mod->num_bpf_raw_events == 0 ||
(op != MODULE_STATE_COMING && op != MODULE_STATE_GOING))
- return 0;
+ goto out;
mutex_lock(&bpf_module_mutex);
@@ -1982,6 +2543,8 @@ static int bpf_event_notify(struct notifier_block *nb, unsigned long op,
if (btm) {
btm->module = module;
list_add(&btm->list, &bpf_trace_modules);
+ } else {
+ ret = -ENOMEM;
}
break;
case MODULE_STATE_GOING:
@@ -1997,7 +2560,8 @@ static int bpf_event_notify(struct notifier_block *nb, unsigned long op,
mutex_unlock(&bpf_module_mutex);
- return 0;
+out:
+ return notifier_from_errno(ret);
}
static struct notifier_block bpf_module_nb = {
@@ -2012,3 +2576,895 @@ static int __init bpf_event_init(void)
fs_initcall(bpf_event_init);
#endif /* CONFIG_MODULES */
+
+#ifdef CONFIG_FPROBE
+struct bpf_kprobe_multi_link {
+ struct bpf_link link;
+ struct fprobe fp;
+ unsigned long *addrs;
+ u64 *cookies;
+ u32 cnt;
+ u32 mods_cnt;
+ struct module **mods;
+ u32 flags;
+};
+
+struct bpf_kprobe_multi_run_ctx {
+ struct bpf_run_ctx run_ctx;
+ struct bpf_kprobe_multi_link *link;
+ unsigned long entry_ip;
+};
+
+struct user_syms {
+ const char **syms;
+ char *buf;
+};
+
+static int copy_user_syms(struct user_syms *us, unsigned long __user *usyms, u32 cnt)
+{
+ unsigned long __user usymbol;
+ const char **syms = NULL;
+ char *buf = NULL, *p;
+ int err = -ENOMEM;
+ unsigned int i;
+
+ syms = kvmalloc_array(cnt, sizeof(*syms), GFP_KERNEL);
+ if (!syms)
+ goto error;
+
+ buf = kvmalloc_array(cnt, KSYM_NAME_LEN, GFP_KERNEL);
+ if (!buf)
+ goto error;
+
+ for (p = buf, i = 0; i < cnt; i++) {
+ if (__get_user(usymbol, usyms + i)) {
+ err = -EFAULT;
+ goto error;
+ }
+ err = strncpy_from_user(p, (const char __user *) usymbol, KSYM_NAME_LEN);
+ if (err == KSYM_NAME_LEN)
+ err = -E2BIG;
+ if (err < 0)
+ goto error;
+ syms[i] = p;
+ p += err + 1;
+ }
+
+ us->syms = syms;
+ us->buf = buf;
+ return 0;
+
+error:
+ if (err) {
+ kvfree(syms);
+ kvfree(buf);
+ }
+ return err;
+}
+
+static void kprobe_multi_put_modules(struct module **mods, u32 cnt)
+{
+ u32 i;
+
+ for (i = 0; i < cnt; i++)
+ module_put(mods[i]);
+}
+
+static void free_user_syms(struct user_syms *us)
+{
+ kvfree(us->syms);
+ kvfree(us->buf);
+}
+
+static void bpf_kprobe_multi_link_release(struct bpf_link *link)
+{
+ struct bpf_kprobe_multi_link *kmulti_link;
+
+ kmulti_link = container_of(link, struct bpf_kprobe_multi_link, link);
+ unregister_fprobe(&kmulti_link->fp);
+ kprobe_multi_put_modules(kmulti_link->mods, kmulti_link->mods_cnt);
+}
+
+static void bpf_kprobe_multi_link_dealloc(struct bpf_link *link)
+{
+ struct bpf_kprobe_multi_link *kmulti_link;
+
+ kmulti_link = container_of(link, struct bpf_kprobe_multi_link, link);
+ kvfree(kmulti_link->addrs);
+ kvfree(kmulti_link->cookies);
+ kfree(kmulti_link->mods);
+ kfree(kmulti_link);
+}
+
+static int bpf_kprobe_multi_link_fill_link_info(const struct bpf_link *link,
+ struct bpf_link_info *info)
+{
+ u64 __user *uaddrs = u64_to_user_ptr(info->kprobe_multi.addrs);
+ struct bpf_kprobe_multi_link *kmulti_link;
+ u32 ucount = info->kprobe_multi.count;
+ int err = 0, i;
+
+ if (!uaddrs ^ !ucount)
+ return -EINVAL;
+
+ kmulti_link = container_of(link, struct bpf_kprobe_multi_link, link);
+ info->kprobe_multi.count = kmulti_link->cnt;
+ info->kprobe_multi.flags = kmulti_link->flags;
+ info->kprobe_multi.missed = kmulti_link->fp.nmissed;
+
+ if (!uaddrs)
+ return 0;
+ if (ucount < kmulti_link->cnt)
+ err = -ENOSPC;
+ else
+ ucount = kmulti_link->cnt;
+
+ if (kallsyms_show_value(current_cred())) {
+ if (copy_to_user(uaddrs, kmulti_link->addrs, ucount * sizeof(u64)))
+ return -EFAULT;
+ } else {
+ for (i = 0; i < ucount; i++) {
+ if (put_user(0, uaddrs + i))
+ return -EFAULT;
+ }
+ }
+ return err;
+}
+
+static const struct bpf_link_ops bpf_kprobe_multi_link_lops = {
+ .release = bpf_kprobe_multi_link_release,
+ .dealloc = bpf_kprobe_multi_link_dealloc,
+ .fill_link_info = bpf_kprobe_multi_link_fill_link_info,
+};
+
+static void bpf_kprobe_multi_cookie_swap(void *a, void *b, int size, const void *priv)
+{
+ const struct bpf_kprobe_multi_link *link = priv;
+ unsigned long *addr_a = a, *addr_b = b;
+ u64 *cookie_a, *cookie_b;
+
+ cookie_a = link->cookies + (addr_a - link->addrs);
+ cookie_b = link->cookies + (addr_b - link->addrs);
+
+ /* swap addr_a/addr_b and cookie_a/cookie_b values */
+ swap(*addr_a, *addr_b);
+ swap(*cookie_a, *cookie_b);
+}
+
+static int bpf_kprobe_multi_addrs_cmp(const void *a, const void *b)
+{
+ const unsigned long *addr_a = a, *addr_b = b;
+
+ if (*addr_a == *addr_b)
+ return 0;
+ return *addr_a < *addr_b ? -1 : 1;
+}
+
+static int bpf_kprobe_multi_cookie_cmp(const void *a, const void *b, const void *priv)
+{
+ return bpf_kprobe_multi_addrs_cmp(a, b);
+}
+
+static u64 bpf_kprobe_multi_cookie(struct bpf_run_ctx *ctx)
+{
+ struct bpf_kprobe_multi_run_ctx *run_ctx;
+ struct bpf_kprobe_multi_link *link;
+ u64 *cookie, entry_ip;
+ unsigned long *addr;
+
+ if (WARN_ON_ONCE(!ctx))
+ return 0;
+ run_ctx = container_of(current->bpf_ctx, struct bpf_kprobe_multi_run_ctx, run_ctx);
+ link = run_ctx->link;
+ if (!link->cookies)
+ return 0;
+ entry_ip = run_ctx->entry_ip;
+ addr = bsearch(&entry_ip, link->addrs, link->cnt, sizeof(entry_ip),
+ bpf_kprobe_multi_addrs_cmp);
+ if (!addr)
+ return 0;
+ cookie = link->cookies + (addr - link->addrs);
+ return *cookie;
+}
+
+static u64 bpf_kprobe_multi_entry_ip(struct bpf_run_ctx *ctx)
+{
+ struct bpf_kprobe_multi_run_ctx *run_ctx;
+
+ run_ctx = container_of(current->bpf_ctx, struct bpf_kprobe_multi_run_ctx, run_ctx);
+ return run_ctx->entry_ip;
+}
+
+static int
+kprobe_multi_link_prog_run(struct bpf_kprobe_multi_link *link,
+ unsigned long entry_ip, struct pt_regs *regs)
+{
+ struct bpf_kprobe_multi_run_ctx run_ctx = {
+ .link = link,
+ .entry_ip = entry_ip,
+ };
+ struct bpf_run_ctx *old_run_ctx;
+ int err;
+
+ if (unlikely(__this_cpu_inc_return(bpf_prog_active) != 1)) {
+ bpf_prog_inc_misses_counter(link->link.prog);
+ err = 0;
+ goto out;
+ }
+
+ migrate_disable();
+ rcu_read_lock();
+ old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx);
+ err = bpf_prog_run(link->link.prog, regs);
+ bpf_reset_run_ctx(old_run_ctx);
+ rcu_read_unlock();
+ migrate_enable();
+
+ out:
+ __this_cpu_dec(bpf_prog_active);
+ return err;
+}
+
+static int
+kprobe_multi_link_handler(struct fprobe *fp, unsigned long fentry_ip,
+ unsigned long ret_ip, struct pt_regs *regs,
+ void *data)
+{
+ struct bpf_kprobe_multi_link *link;
+
+ link = container_of(fp, struct bpf_kprobe_multi_link, fp);
+ kprobe_multi_link_prog_run(link, get_entry_ip(fentry_ip), regs);
+ return 0;
+}
+
+static void
+kprobe_multi_link_exit_handler(struct fprobe *fp, unsigned long fentry_ip,
+ unsigned long ret_ip, struct pt_regs *regs,
+ void *data)
+{
+ struct bpf_kprobe_multi_link *link;
+
+ link = container_of(fp, struct bpf_kprobe_multi_link, fp);
+ kprobe_multi_link_prog_run(link, get_entry_ip(fentry_ip), regs);
+}
+
+static int symbols_cmp_r(const void *a, const void *b, const void *priv)
+{
+ const char **str_a = (const char **) a;
+ const char **str_b = (const char **) b;
+
+ return strcmp(*str_a, *str_b);
+}
+
+struct multi_symbols_sort {
+ const char **funcs;
+ u64 *cookies;
+};
+
+static void symbols_swap_r(void *a, void *b, int size, const void *priv)
+{
+ const struct multi_symbols_sort *data = priv;
+ const char **name_a = a, **name_b = b;
+
+ swap(*name_a, *name_b);
+
+ /* If defined, swap also related cookies. */
+ if (data->cookies) {
+ u64 *cookie_a, *cookie_b;
+
+ cookie_a = data->cookies + (name_a - data->funcs);
+ cookie_b = data->cookies + (name_b - data->funcs);
+ swap(*cookie_a, *cookie_b);
+ }
+}
+
+struct modules_array {
+ struct module **mods;
+ int mods_cnt;
+ int mods_cap;
+};
+
+static int add_module(struct modules_array *arr, struct module *mod)
+{
+ struct module **mods;
+
+ if (arr->mods_cnt == arr->mods_cap) {
+ arr->mods_cap = max(16, arr->mods_cap * 3 / 2);
+ mods = krealloc_array(arr->mods, arr->mods_cap, sizeof(*mods), GFP_KERNEL);
+ if (!mods)
+ return -ENOMEM;
+ arr->mods = mods;
+ }
+
+ arr->mods[arr->mods_cnt] = mod;
+ arr->mods_cnt++;
+ return 0;
+}
+
+static bool has_module(struct modules_array *arr, struct module *mod)
+{
+ int i;
+
+ for (i = arr->mods_cnt - 1; i >= 0; i--) {
+ if (arr->mods[i] == mod)
+ return true;
+ }
+ return false;
+}
+
+static int get_modules_for_addrs(struct module ***mods, unsigned long *addrs, u32 addrs_cnt)
+{
+ struct modules_array arr = {};
+ u32 i, err = 0;
+
+ for (i = 0; i < addrs_cnt; i++) {
+ struct module *mod;
+
+ preempt_disable();
+ mod = __module_address(addrs[i]);
+ /* Either no module or we it's already stored */
+ if (!mod || has_module(&arr, mod)) {
+ preempt_enable();
+ continue;
+ }
+ if (!try_module_get(mod))
+ err = -EINVAL;
+ preempt_enable();
+ if (err)
+ break;
+ err = add_module(&arr, mod);
+ if (err) {
+ module_put(mod);
+ break;
+ }
+ }
+
+ /* We return either err < 0 in case of error, ... */
+ if (err) {
+ kprobe_multi_put_modules(arr.mods, arr.mods_cnt);
+ kfree(arr.mods);
+ return err;
+ }
+
+ /* or number of modules found if everything is ok. */
+ *mods = arr.mods;
+ return arr.mods_cnt;
+}
+
+static int addrs_check_error_injection_list(unsigned long *addrs, u32 cnt)
+{
+ u32 i;
+
+ for (i = 0; i < cnt; i++) {
+ if (!within_error_injection_list(addrs[i]))
+ return -EINVAL;
+ }
+ return 0;
+}
+
+int bpf_kprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *prog)
+{
+ struct bpf_kprobe_multi_link *link = NULL;
+ struct bpf_link_primer link_primer;
+ void __user *ucookies;
+ unsigned long *addrs;
+ u32 flags, cnt, size;
+ void __user *uaddrs;
+ u64 *cookies = NULL;
+ void __user *usyms;
+ int err;
+
+ /* no support for 32bit archs yet */
+ if (sizeof(u64) != sizeof(void *))
+ return -EOPNOTSUPP;
+
+ if (prog->expected_attach_type != BPF_TRACE_KPROBE_MULTI)
+ return -EINVAL;
+
+ flags = attr->link_create.kprobe_multi.flags;
+ if (flags & ~BPF_F_KPROBE_MULTI_RETURN)
+ return -EINVAL;
+
+ uaddrs = u64_to_user_ptr(attr->link_create.kprobe_multi.addrs);
+ usyms = u64_to_user_ptr(attr->link_create.kprobe_multi.syms);
+ if (!!uaddrs == !!usyms)
+ return -EINVAL;
+
+ cnt = attr->link_create.kprobe_multi.cnt;
+ if (!cnt)
+ return -EINVAL;
+ if (cnt > MAX_KPROBE_MULTI_CNT)
+ return -E2BIG;
+
+ size = cnt * sizeof(*addrs);
+ addrs = kvmalloc_array(cnt, sizeof(*addrs), GFP_KERNEL);
+ if (!addrs)
+ return -ENOMEM;
+
+ ucookies = u64_to_user_ptr(attr->link_create.kprobe_multi.cookies);
+ if (ucookies) {
+ cookies = kvmalloc_array(cnt, sizeof(*addrs), GFP_KERNEL);
+ if (!cookies) {
+ err = -ENOMEM;
+ goto error;
+ }
+ if (copy_from_user(cookies, ucookies, size)) {
+ err = -EFAULT;
+ goto error;
+ }
+ }
+
+ if (uaddrs) {
+ if (copy_from_user(addrs, uaddrs, size)) {
+ err = -EFAULT;
+ goto error;
+ }
+ } else {
+ struct multi_symbols_sort data = {
+ .cookies = cookies,
+ };
+ struct user_syms us;
+
+ err = copy_user_syms(&us, usyms, cnt);
+ if (err)
+ goto error;
+
+ if (cookies)
+ data.funcs = us.syms;
+
+ sort_r(us.syms, cnt, sizeof(*us.syms), symbols_cmp_r,
+ symbols_swap_r, &data);
+
+ err = ftrace_lookup_symbols(us.syms, cnt, addrs);
+ free_user_syms(&us);
+ if (err)
+ goto error;
+ }
+
+ if (prog->kprobe_override && addrs_check_error_injection_list(addrs, cnt)) {
+ err = -EINVAL;
+ goto error;
+ }
+
+ link = kzalloc(sizeof(*link), GFP_KERNEL);
+ if (!link) {
+ err = -ENOMEM;
+ goto error;
+ }
+
+ bpf_link_init(&link->link, BPF_LINK_TYPE_KPROBE_MULTI,
+ &bpf_kprobe_multi_link_lops, prog);
+
+ err = bpf_link_prime(&link->link, &link_primer);
+ if (err)
+ goto error;
+
+ if (flags & BPF_F_KPROBE_MULTI_RETURN)
+ link->fp.exit_handler = kprobe_multi_link_exit_handler;
+ else
+ link->fp.entry_handler = kprobe_multi_link_handler;
+
+ link->addrs = addrs;
+ link->cookies = cookies;
+ link->cnt = cnt;
+ link->flags = flags;
+
+ if (cookies) {
+ /*
+ * Sorting addresses will trigger sorting cookies as well
+ * (check bpf_kprobe_multi_cookie_swap). This way we can
+ * find cookie based on the address in bpf_get_attach_cookie
+ * helper.
+ */
+ sort_r(addrs, cnt, sizeof(*addrs),
+ bpf_kprobe_multi_cookie_cmp,
+ bpf_kprobe_multi_cookie_swap,
+ link);
+ }
+
+ err = get_modules_for_addrs(&link->mods, addrs, cnt);
+ if (err < 0) {
+ bpf_link_cleanup(&link_primer);
+ return err;
+ }
+ link->mods_cnt = err;
+
+ err = register_fprobe_ips(&link->fp, addrs, cnt);
+ if (err) {
+ kprobe_multi_put_modules(link->mods, link->mods_cnt);
+ bpf_link_cleanup(&link_primer);
+ return err;
+ }
+
+ return bpf_link_settle(&link_primer);
+
+error:
+ kfree(link);
+ kvfree(addrs);
+ kvfree(cookies);
+ return err;
+}
+#else /* !CONFIG_FPROBE */
+int bpf_kprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *prog)
+{
+ return -EOPNOTSUPP;
+}
+static u64 bpf_kprobe_multi_cookie(struct bpf_run_ctx *ctx)
+{
+ return 0;
+}
+static u64 bpf_kprobe_multi_entry_ip(struct bpf_run_ctx *ctx)
+{
+ return 0;
+}
+#endif
+
+#ifdef CONFIG_UPROBES
+struct bpf_uprobe_multi_link;
+
+struct bpf_uprobe {
+ struct bpf_uprobe_multi_link *link;
+ loff_t offset;
+ unsigned long ref_ctr_offset;
+ u64 cookie;
+ struct uprobe_consumer consumer;
+};
+
+struct bpf_uprobe_multi_link {
+ struct path path;
+ struct bpf_link link;
+ u32 cnt;
+ u32 flags;
+ struct bpf_uprobe *uprobes;
+ struct task_struct *task;
+};
+
+struct bpf_uprobe_multi_run_ctx {
+ struct bpf_run_ctx run_ctx;
+ unsigned long entry_ip;
+ struct bpf_uprobe *uprobe;
+};
+
+static void bpf_uprobe_unregister(struct path *path, struct bpf_uprobe *uprobes,
+ u32 cnt)
+{
+ u32 i;
+
+ for (i = 0; i < cnt; i++) {
+ uprobe_unregister(d_real_inode(path->dentry), uprobes[i].offset,
+ &uprobes[i].consumer);
+ }
+}
+
+static void bpf_uprobe_multi_link_release(struct bpf_link *link)
+{
+ struct bpf_uprobe_multi_link *umulti_link;
+
+ umulti_link = container_of(link, struct bpf_uprobe_multi_link, link);
+ bpf_uprobe_unregister(&umulti_link->path, umulti_link->uprobes, umulti_link->cnt);
+}
+
+static void bpf_uprobe_multi_link_dealloc(struct bpf_link *link)
+{
+ struct bpf_uprobe_multi_link *umulti_link;
+
+ umulti_link = container_of(link, struct bpf_uprobe_multi_link, link);
+ if (umulti_link->task)
+ put_task_struct(umulti_link->task);
+ path_put(&umulti_link->path);
+ kvfree(umulti_link->uprobes);
+ kfree(umulti_link);
+}
+
+static int bpf_uprobe_multi_link_fill_link_info(const struct bpf_link *link,
+ struct bpf_link_info *info)
+{
+ u64 __user *uref_ctr_offsets = u64_to_user_ptr(info->uprobe_multi.ref_ctr_offsets);
+ u64 __user *ucookies = u64_to_user_ptr(info->uprobe_multi.cookies);
+ u64 __user *uoffsets = u64_to_user_ptr(info->uprobe_multi.offsets);
+ u64 __user *upath = u64_to_user_ptr(info->uprobe_multi.path);
+ u32 upath_size = info->uprobe_multi.path_size;
+ struct bpf_uprobe_multi_link *umulti_link;
+ u32 ucount = info->uprobe_multi.count;
+ int err = 0, i;
+ long left;
+
+ if (!upath ^ !upath_size)
+ return -EINVAL;
+
+ if ((uoffsets || uref_ctr_offsets || ucookies) && !ucount)
+ return -EINVAL;
+
+ umulti_link = container_of(link, struct bpf_uprobe_multi_link, link);
+ info->uprobe_multi.count = umulti_link->cnt;
+ info->uprobe_multi.flags = umulti_link->flags;
+ info->uprobe_multi.pid = umulti_link->task ?
+ task_pid_nr_ns(umulti_link->task, task_active_pid_ns(current)) : 0;
+
+ if (upath) {
+ char *p, *buf;
+
+ upath_size = min_t(u32, upath_size, PATH_MAX);
+
+ buf = kmalloc(upath_size, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+ p = d_path(&umulti_link->path, buf, upath_size);
+ if (IS_ERR(p)) {
+ kfree(buf);
+ return PTR_ERR(p);
+ }
+ upath_size = buf + upath_size - p;
+ left = copy_to_user(upath, p, upath_size);
+ kfree(buf);
+ if (left)
+ return -EFAULT;
+ info->uprobe_multi.path_size = upath_size;
+ }
+
+ if (!uoffsets && !ucookies && !uref_ctr_offsets)
+ return 0;
+
+ if (ucount < umulti_link->cnt)
+ err = -ENOSPC;
+ else
+ ucount = umulti_link->cnt;
+
+ for (i = 0; i < ucount; i++) {
+ if (uoffsets &&
+ put_user(umulti_link->uprobes[i].offset, uoffsets + i))
+ return -EFAULT;
+ if (uref_ctr_offsets &&
+ put_user(umulti_link->uprobes[i].ref_ctr_offset, uref_ctr_offsets + i))
+ return -EFAULT;
+ if (ucookies &&
+ put_user(umulti_link->uprobes[i].cookie, ucookies + i))
+ return -EFAULT;
+ }
+
+ return err;
+}
+
+static const struct bpf_link_ops bpf_uprobe_multi_link_lops = {
+ .release = bpf_uprobe_multi_link_release,
+ .dealloc = bpf_uprobe_multi_link_dealloc,
+ .fill_link_info = bpf_uprobe_multi_link_fill_link_info,
+};
+
+static int uprobe_prog_run(struct bpf_uprobe *uprobe,
+ unsigned long entry_ip,
+ struct pt_regs *regs)
+{
+ struct bpf_uprobe_multi_link *link = uprobe->link;
+ struct bpf_uprobe_multi_run_ctx run_ctx = {
+ .entry_ip = entry_ip,
+ .uprobe = uprobe,
+ };
+ struct bpf_prog *prog = link->link.prog;
+ bool sleepable = prog->aux->sleepable;
+ struct bpf_run_ctx *old_run_ctx;
+ int err = 0;
+
+ if (link->task && current != link->task)
+ return 0;
+
+ if (sleepable)
+ rcu_read_lock_trace();
+ else
+ rcu_read_lock();
+
+ migrate_disable();
+
+ old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx);
+ err = bpf_prog_run(link->link.prog, regs);
+ bpf_reset_run_ctx(old_run_ctx);
+
+ migrate_enable();
+
+ if (sleepable)
+ rcu_read_unlock_trace();
+ else
+ rcu_read_unlock();
+ return err;
+}
+
+static bool
+uprobe_multi_link_filter(struct uprobe_consumer *con, enum uprobe_filter_ctx ctx,
+ struct mm_struct *mm)
+{
+ struct bpf_uprobe *uprobe;
+
+ uprobe = container_of(con, struct bpf_uprobe, consumer);
+ return uprobe->link->task->mm == mm;
+}
+
+static int
+uprobe_multi_link_handler(struct uprobe_consumer *con, struct pt_regs *regs)
+{
+ struct bpf_uprobe *uprobe;
+
+ uprobe = container_of(con, struct bpf_uprobe, consumer);
+ return uprobe_prog_run(uprobe, instruction_pointer(regs), regs);
+}
+
+static int
+uprobe_multi_link_ret_handler(struct uprobe_consumer *con, unsigned long func, struct pt_regs *regs)
+{
+ struct bpf_uprobe *uprobe;
+
+ uprobe = container_of(con, struct bpf_uprobe, consumer);
+ return uprobe_prog_run(uprobe, func, regs);
+}
+
+static u64 bpf_uprobe_multi_entry_ip(struct bpf_run_ctx *ctx)
+{
+ struct bpf_uprobe_multi_run_ctx *run_ctx;
+
+ run_ctx = container_of(current->bpf_ctx, struct bpf_uprobe_multi_run_ctx, run_ctx);
+ return run_ctx->entry_ip;
+}
+
+static u64 bpf_uprobe_multi_cookie(struct bpf_run_ctx *ctx)
+{
+ struct bpf_uprobe_multi_run_ctx *run_ctx;
+
+ run_ctx = container_of(current->bpf_ctx, struct bpf_uprobe_multi_run_ctx, run_ctx);
+ return run_ctx->uprobe->cookie;
+}
+
+int bpf_uprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *prog)
+{
+ struct bpf_uprobe_multi_link *link = NULL;
+ unsigned long __user *uref_ctr_offsets;
+ struct bpf_link_primer link_primer;
+ struct bpf_uprobe *uprobes = NULL;
+ struct task_struct *task = NULL;
+ unsigned long __user *uoffsets;
+ u64 __user *ucookies;
+ void __user *upath;
+ u32 flags, cnt, i;
+ struct path path;
+ char *name;
+ pid_t pid;
+ int err;
+
+ /* no support for 32bit archs yet */
+ if (sizeof(u64) != sizeof(void *))
+ return -EOPNOTSUPP;
+
+ if (prog->expected_attach_type != BPF_TRACE_UPROBE_MULTI)
+ return -EINVAL;
+
+ flags = attr->link_create.uprobe_multi.flags;
+ if (flags & ~BPF_F_UPROBE_MULTI_RETURN)
+ return -EINVAL;
+
+ /*
+ * path, offsets and cnt are mandatory,
+ * ref_ctr_offsets and cookies are optional
+ */
+ upath = u64_to_user_ptr(attr->link_create.uprobe_multi.path);
+ uoffsets = u64_to_user_ptr(attr->link_create.uprobe_multi.offsets);
+ cnt = attr->link_create.uprobe_multi.cnt;
+
+ if (!upath || !uoffsets || !cnt)
+ return -EINVAL;
+ if (cnt > MAX_UPROBE_MULTI_CNT)
+ return -E2BIG;
+
+ uref_ctr_offsets = u64_to_user_ptr(attr->link_create.uprobe_multi.ref_ctr_offsets);
+ ucookies = u64_to_user_ptr(attr->link_create.uprobe_multi.cookies);
+
+ name = strndup_user(upath, PATH_MAX);
+ if (IS_ERR(name)) {
+ err = PTR_ERR(name);
+ return err;
+ }
+
+ err = kern_path(name, LOOKUP_FOLLOW, &path);
+ kfree(name);
+ if (err)
+ return err;
+
+ if (!d_is_reg(path.dentry)) {
+ err = -EBADF;
+ goto error_path_put;
+ }
+
+ pid = attr->link_create.uprobe_multi.pid;
+ if (pid) {
+ rcu_read_lock();
+ task = get_pid_task(find_vpid(pid), PIDTYPE_PID);
+ rcu_read_unlock();
+ if (!task) {
+ err = -ESRCH;
+ goto error_path_put;
+ }
+ }
+
+ err = -ENOMEM;
+
+ link = kzalloc(sizeof(*link), GFP_KERNEL);
+ uprobes = kvcalloc(cnt, sizeof(*uprobes), GFP_KERNEL);
+
+ if (!uprobes || !link)
+ goto error_free;
+
+ for (i = 0; i < cnt; i++) {
+ if (__get_user(uprobes[i].offset, uoffsets + i)) {
+ err = -EFAULT;
+ goto error_free;
+ }
+ if (uprobes[i].offset < 0) {
+ err = -EINVAL;
+ goto error_free;
+ }
+ if (uref_ctr_offsets && __get_user(uprobes[i].ref_ctr_offset, uref_ctr_offsets + i)) {
+ err = -EFAULT;
+ goto error_free;
+ }
+ if (ucookies && __get_user(uprobes[i].cookie, ucookies + i)) {
+ err = -EFAULT;
+ goto error_free;
+ }
+
+ uprobes[i].link = link;
+
+ if (flags & BPF_F_UPROBE_MULTI_RETURN)
+ uprobes[i].consumer.ret_handler = uprobe_multi_link_ret_handler;
+ else
+ uprobes[i].consumer.handler = uprobe_multi_link_handler;
+
+ if (pid)
+ uprobes[i].consumer.filter = uprobe_multi_link_filter;
+ }
+
+ link->cnt = cnt;
+ link->uprobes = uprobes;
+ link->path = path;
+ link->task = task;
+ link->flags = flags;
+
+ bpf_link_init(&link->link, BPF_LINK_TYPE_UPROBE_MULTI,
+ &bpf_uprobe_multi_link_lops, prog);
+
+ for (i = 0; i < cnt; i++) {
+ err = uprobe_register_refctr(d_real_inode(link->path.dentry),
+ uprobes[i].offset,
+ uprobes[i].ref_ctr_offset,
+ &uprobes[i].consumer);
+ if (err) {
+ bpf_uprobe_unregister(&path, uprobes, i);
+ goto error_free;
+ }
+ }
+
+ err = bpf_link_prime(&link->link, &link_primer);
+ if (err)
+ goto error_free;
+
+ return bpf_link_settle(&link_primer);
+
+error_free:
+ kvfree(uprobes);
+ kfree(link);
+ if (task)
+ put_task_struct(task);
+error_path_put:
+ path_put(&path);
+ return err;
+}
+#else /* !CONFIG_UPROBES */
+int bpf_uprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *prog)
+{
+ return -EOPNOTSUPP;
+}
+static u64 bpf_uprobe_multi_cookie(struct bpf_run_ctx *ctx)
+{
+ return 0;
+}
+static u64 bpf_uprobe_multi_entry_ip(struct bpf_run_ctx *ctx)
+{
+ return 0;
+}
+#endif /* CONFIG_UPROBES */
diff --git a/kernel/trace/bpf_trace.h b/kernel/trace/bpf_trace.h
new file mode 100644
index 000000000000..9acbc11ac7bb
--- /dev/null
+++ b/kernel/trace/bpf_trace.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM bpf_trace
+
+#if !defined(_TRACE_BPF_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+
+#define _TRACE_BPF_TRACE_H
+
+#include <linux/tracepoint.h>
+
+TRACE_EVENT(bpf_trace_printk,
+
+ TP_PROTO(const char *bpf_string),
+
+ TP_ARGS(bpf_string),
+
+ TP_STRUCT__entry(
+ __string(bpf_string, bpf_string)
+ ),
+
+ TP_fast_assign(
+ __assign_str(bpf_string, bpf_string);
+ ),
+
+ TP_printk("%s", __get_str(bpf_string))
+);
+
+#endif /* _TRACE_BPF_TRACE_H */
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#define TRACE_INCLUDE_FILE bpf_trace
+
+#include <trace/define_trace.h>
diff --git a/kernel/trace/error_report-traces.c b/kernel/trace/error_report-traces.c
new file mode 100644
index 000000000000..f89792c25b11
--- /dev/null
+++ b/kernel/trace/error_report-traces.c
@@ -0,0 +1,11 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Error reporting trace points.
+ *
+ * Copyright (C) 2021, Google LLC.
+ */
+
+#define CREATE_TRACE_POINTS
+#include <trace/events/error_report.h>
+
+EXPORT_TRACEPOINT_SYMBOL_GPL(error_report_end);
diff --git a/kernel/trace/fgraph.c b/kernel/trace/fgraph.c
index 1af321dec0f1..c83c005e654e 100644
--- a/kernel/trace/fgraph.c
+++ b/kernel/trace/fgraph.c
@@ -7,6 +7,7 @@
*
* Highly modified by Steven Rostedt (VMware).
*/
+#include <linux/jump_label.h>
#include <linux/suspend.h>
#include <linux/ftrace.h>
#include <linux/slab.h>
@@ -14,6 +15,7 @@
#include <trace/events/sched.h>
#include "ftrace_internal.h"
+#include "trace.h"
#ifdef CONFIG_DYNAMIC_FTRACE
#define ASSIGN_OPS_HASH(opsname, val) \
@@ -23,26 +25,34 @@
#define ASSIGN_OPS_HASH(opsname, val)
#endif
-static bool kill_ftrace_graph;
+DEFINE_STATIC_KEY_FALSE(kill_ftrace_graph);
int ftrace_graph_active;
/* Both enabled by default (can be cleared by function_graph tracer flags */
static bool fgraph_sleep_time = true;
-/**
- * ftrace_graph_is_dead - returns true if ftrace_graph_stop() was called
- *
- * ftrace_graph_stop() is called when a severe error is detected in
- * the function graph tracing. This function is called by the critical
- * paths of function graph to keep those paths from doing any more harm.
+#ifdef CONFIG_DYNAMIC_FTRACE
+/*
+ * archs can override this function if they must do something
+ * to enable hook for graph tracer.
+ */
+int __weak ftrace_enable_ftrace_graph_caller(void)
+{
+ return 0;
+}
+
+/*
+ * archs can override this function if they must do something
+ * to disable hook for graph tracer.
*/
-bool ftrace_graph_is_dead(void)
+int __weak ftrace_disable_ftrace_graph_caller(void)
{
- return kill_ftrace_graph;
+ return 0;
}
+#endif
/**
- * ftrace_graph_stop - set to permanently disable function graph tracincg
+ * ftrace_graph_stop - set to permanently disable function graph tracing
*
* In case of an error int function graph tracing, this is called
* to try to keep function graph tracing from causing any more harm.
@@ -51,7 +61,7 @@ bool ftrace_graph_is_dead(void)
*/
void ftrace_graph_stop(void)
{
- kill_ftrace_graph = true;
+ static_branch_enable(&kill_ftrace_graph);
}
/* Add a function return address to the trace stack on thread info.*/
@@ -115,15 +125,17 @@ int function_graph_enter(unsigned long ret, unsigned long func,
{
struct ftrace_graph_ent trace;
+#ifndef CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS
/*
* Skip graph tracing if the return location is served by direct trampoline,
- * since call sequence and return addresses is unpredicatable anymore.
+ * since call sequence and return addresses are unpredictable anyway.
* Ex: BPF trampoline may call original function and may skip frame
* depending on type of BPF programs attached.
*/
if (ftrace_direct_func_count &&
ftrace_find_rec_direct(ret - MCOUNT_INSN_SIZE))
return -EBUSY;
+#endif
trace.func = func;
trace.depth = ++current->curr_ret_depth;
@@ -225,16 +237,23 @@ static struct notifier_block ftrace_suspend_notifier = {
.notifier_call = ftrace_suspend_notifier_call,
};
+/* fgraph_ret_regs is not defined without CONFIG_FUNCTION_GRAPH_RETVAL */
+struct fgraph_ret_regs;
+
/*
* Send the trace to the ring-buffer.
* @return the original return address.
*/
-unsigned long ftrace_return_to_handler(unsigned long frame_pointer)
+static unsigned long __ftrace_return_to_handler(struct fgraph_ret_regs *ret_regs,
+ unsigned long frame_pointer)
{
struct ftrace_graph_ret trace;
unsigned long ret;
ftrace_pop_return_trace(&trace, &ret, frame_pointer);
+#ifdef CONFIG_FUNCTION_GRAPH_RETVAL
+ trace.retval = fgraph_ret_regs_return_value(ret_regs);
+#endif
trace.rettime = trace_clock_local();
ftrace_graph_return(&trace);
/*
@@ -255,6 +274,23 @@ unsigned long ftrace_return_to_handler(unsigned long frame_pointer)
return ret;
}
+/*
+ * After all architecures have selected HAVE_FUNCTION_GRAPH_RETVAL, we can
+ * leave only ftrace_return_to_handler(ret_regs).
+ */
+#ifdef CONFIG_HAVE_FUNCTION_GRAPH_RETVAL
+unsigned long ftrace_return_to_handler(struct fgraph_ret_regs *ret_regs)
+{
+ return __ftrace_return_to_handler(ret_regs,
+ fgraph_ret_regs_frame_pointer(ret_regs));
+}
+#else
+unsigned long ftrace_return_to_handler(unsigned long frame_pointer)
+{
+ return __ftrace_return_to_handler(NULL, frame_pointer);
+}
+#endif
+
/**
* ftrace_graph_get_ret_stack - return the entry of the shadow stack
* @task: The task to read the shadow stack from
@@ -333,11 +369,10 @@ unsigned long ftrace_graph_ret_addr(struct task_struct *task, int *idx,
#endif /* HAVE_FUNCTION_GRAPH_RET_ADDR_PTR */
static struct ftrace_ops graph_ops = {
- .func = ftrace_stub,
- .flags = FTRACE_OPS_FL_RECURSION_SAFE |
- FTRACE_OPS_FL_INITIALIZED |
+ .func = ftrace_graph_func,
+ .flags = FTRACE_OPS_FL_INITIALIZED |
FTRACE_OPS_FL_PID |
- FTRACE_OPS_FL_STUB,
+ FTRACE_OPS_GRAPH_STUB,
#ifdef FTRACE_GRAPH_TRAMP_ADDR
.trampoline = FTRACE_GRAPH_TRAMP_ADDR,
/* trampoline_size is only needed for dynamically allocated tramps */
@@ -387,15 +422,14 @@ static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
}
}
- read_lock(&tasklist_lock);
- do_each_thread(g, t) {
+ rcu_read_lock();
+ for_each_process_thread(g, t) {
if (start == end) {
ret = -EAGAIN;
goto unlock;
}
if (t->ret_stack == NULL) {
- atomic_set(&t->tracing_graph_pause, 0);
atomic_set(&t->trace_overrun, 0);
t->curr_ret_stack = -1;
t->curr_ret_depth = -1;
@@ -403,10 +437,10 @@ static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
smp_wmb();
t->ret_stack = ret_stack_list[start++];
}
- } while_each_thread(g, t);
+ }
unlock:
- read_unlock(&tasklist_lock);
+ rcu_read_unlock();
free:
for (i = start; i < end; i++)
kfree(ret_stack_list[i]);
@@ -415,7 +449,9 @@ free:
static void
ftrace_graph_probe_sched_switch(void *ignore, bool preempt,
- struct task_struct *prev, struct task_struct *next)
+ struct task_struct *prev,
+ struct task_struct *next,
+ unsigned int prev_state)
{
unsigned long long timestamp;
int index;
@@ -490,7 +526,6 @@ static DEFINE_PER_CPU(struct ftrace_ret_stack *, idle_ret_stack);
static void
graph_init_task(struct task_struct *t, struct ftrace_ret_stack *ret_stack)
{
- atomic_set(&t->tracing_graph_pause, 0);
atomic_set(&t->trace_overrun, 0);
t->ftrace_timestamp = 0;
/* make curr_ret_stack visible before we add the ret_stack */
diff --git a/kernel/trace/fprobe.c b/kernel/trace/fprobe.c
new file mode 100644
index 000000000000..9ff018245840
--- /dev/null
+++ b/kernel/trace/fprobe.c
@@ -0,0 +1,386 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * fprobe - Simple ftrace probe wrapper for function entry.
+ */
+#define pr_fmt(fmt) "fprobe: " fmt
+
+#include <linux/err.h>
+#include <linux/fprobe.h>
+#include <linux/kallsyms.h>
+#include <linux/kprobes.h>
+#include <linux/rethook.h>
+#include <linux/slab.h>
+#include <linux/sort.h>
+
+#include "trace.h"
+
+struct fprobe_rethook_node {
+ struct rethook_node node;
+ unsigned long entry_ip;
+ unsigned long entry_parent_ip;
+ char data[];
+};
+
+static inline void __fprobe_handler(unsigned long ip, unsigned long parent_ip,
+ struct ftrace_ops *ops, struct ftrace_regs *fregs)
+{
+ struct fprobe_rethook_node *fpr;
+ struct rethook_node *rh = NULL;
+ struct fprobe *fp;
+ void *entry_data = NULL;
+ int ret = 0;
+
+ fp = container_of(ops, struct fprobe, ops);
+
+ if (fp->exit_handler) {
+ rh = rethook_try_get(fp->rethook);
+ if (!rh) {
+ fp->nmissed++;
+ return;
+ }
+ fpr = container_of(rh, struct fprobe_rethook_node, node);
+ fpr->entry_ip = ip;
+ fpr->entry_parent_ip = parent_ip;
+ if (fp->entry_data_size)
+ entry_data = fpr->data;
+ }
+
+ if (fp->entry_handler)
+ ret = fp->entry_handler(fp, ip, parent_ip, ftrace_get_regs(fregs), entry_data);
+
+ /* If entry_handler returns !0, nmissed is not counted. */
+ if (rh) {
+ if (ret)
+ rethook_recycle(rh);
+ else
+ rethook_hook(rh, ftrace_get_regs(fregs), true);
+ }
+}
+
+static void fprobe_handler(unsigned long ip, unsigned long parent_ip,
+ struct ftrace_ops *ops, struct ftrace_regs *fregs)
+{
+ struct fprobe *fp;
+ int bit;
+
+ fp = container_of(ops, struct fprobe, ops);
+ if (fprobe_disabled(fp))
+ return;
+
+ /* recursion detection has to go before any traceable function and
+ * all functions before this point should be marked as notrace
+ */
+ bit = ftrace_test_recursion_trylock(ip, parent_ip);
+ if (bit < 0) {
+ fp->nmissed++;
+ return;
+ }
+ __fprobe_handler(ip, parent_ip, ops, fregs);
+ ftrace_test_recursion_unlock(bit);
+
+}
+NOKPROBE_SYMBOL(fprobe_handler);
+
+static void fprobe_kprobe_handler(unsigned long ip, unsigned long parent_ip,
+ struct ftrace_ops *ops, struct ftrace_regs *fregs)
+{
+ struct fprobe *fp;
+ int bit;
+
+ fp = container_of(ops, struct fprobe, ops);
+ if (fprobe_disabled(fp))
+ return;
+
+ /* recursion detection has to go before any traceable function and
+ * all functions called before this point should be marked as notrace
+ */
+ bit = ftrace_test_recursion_trylock(ip, parent_ip);
+ if (bit < 0) {
+ fp->nmissed++;
+ return;
+ }
+
+ /*
+ * This user handler is shared with other kprobes and is not expected to be
+ * called recursively. So if any other kprobe handler is running, this will
+ * exit as kprobe does. See the section 'Share the callbacks with kprobes'
+ * in Documentation/trace/fprobe.rst for more information.
+ */
+ if (unlikely(kprobe_running())) {
+ fp->nmissed++;
+ goto recursion_unlock;
+ }
+
+ kprobe_busy_begin();
+ __fprobe_handler(ip, parent_ip, ops, fregs);
+ kprobe_busy_end();
+
+recursion_unlock:
+ ftrace_test_recursion_unlock(bit);
+}
+
+static void fprobe_exit_handler(struct rethook_node *rh, void *data,
+ unsigned long ret_ip, struct pt_regs *regs)
+{
+ struct fprobe *fp = (struct fprobe *)data;
+ struct fprobe_rethook_node *fpr;
+ int bit;
+
+ if (!fp || fprobe_disabled(fp))
+ return;
+
+ fpr = container_of(rh, struct fprobe_rethook_node, node);
+
+ /*
+ * we need to assure no calls to traceable functions in-between the
+ * end of fprobe_handler and the beginning of fprobe_exit_handler.
+ */
+ bit = ftrace_test_recursion_trylock(fpr->entry_ip, fpr->entry_parent_ip);
+ if (bit < 0) {
+ fp->nmissed++;
+ return;
+ }
+
+ fp->exit_handler(fp, fpr->entry_ip, ret_ip, regs,
+ fp->entry_data_size ? (void *)fpr->data : NULL);
+ ftrace_test_recursion_unlock(bit);
+}
+NOKPROBE_SYMBOL(fprobe_exit_handler);
+
+static int symbols_cmp(const void *a, const void *b)
+{
+ const char **str_a = (const char **) a;
+ const char **str_b = (const char **) b;
+
+ return strcmp(*str_a, *str_b);
+}
+
+/* Convert ftrace location address from symbols */
+static unsigned long *get_ftrace_locations(const char **syms, int num)
+{
+ unsigned long *addrs;
+
+ /* Convert symbols to symbol address */
+ addrs = kcalloc(num, sizeof(*addrs), GFP_KERNEL);
+ if (!addrs)
+ return ERR_PTR(-ENOMEM);
+
+ /* ftrace_lookup_symbols expects sorted symbols */
+ sort(syms, num, sizeof(*syms), symbols_cmp, NULL);
+
+ if (!ftrace_lookup_symbols(syms, num, addrs))
+ return addrs;
+
+ kfree(addrs);
+ return ERR_PTR(-ENOENT);
+}
+
+static void fprobe_init(struct fprobe *fp)
+{
+ fp->nmissed = 0;
+ if (fprobe_shared_with_kprobes(fp))
+ fp->ops.func = fprobe_kprobe_handler;
+ else
+ fp->ops.func = fprobe_handler;
+ fp->ops.flags |= FTRACE_OPS_FL_SAVE_REGS;
+}
+
+static int fprobe_init_rethook(struct fprobe *fp, int num)
+{
+ int size;
+
+ if (!fp->exit_handler) {
+ fp->rethook = NULL;
+ return 0;
+ }
+
+ /* Initialize rethook if needed */
+ if (fp->nr_maxactive)
+ num = fp->nr_maxactive;
+ else
+ num *= num_possible_cpus() * 2;
+ if (num <= 0)
+ return -EINVAL;
+
+ size = sizeof(struct fprobe_rethook_node) + fp->entry_data_size;
+
+ /* Initialize rethook */
+ fp->rethook = rethook_alloc((void *)fp, fprobe_exit_handler, size, num);
+ if (IS_ERR(fp->rethook))
+ return PTR_ERR(fp->rethook);
+
+ return 0;
+}
+
+static void fprobe_fail_cleanup(struct fprobe *fp)
+{
+ if (!IS_ERR_OR_NULL(fp->rethook)) {
+ /* Don't need to cleanup rethook->handler because this is not used. */
+ rethook_free(fp->rethook);
+ fp->rethook = NULL;
+ }
+ ftrace_free_filter(&fp->ops);
+}
+
+/**
+ * register_fprobe() - Register fprobe to ftrace by pattern.
+ * @fp: A fprobe data structure to be registered.
+ * @filter: A wildcard pattern of probed symbols.
+ * @notfilter: A wildcard pattern of NOT probed symbols.
+ *
+ * Register @fp to ftrace for enabling the probe on the symbols matched to @filter.
+ * If @notfilter is not NULL, the symbols matched the @notfilter are not probed.
+ *
+ * Return 0 if @fp is registered successfully, -errno if not.
+ */
+int register_fprobe(struct fprobe *fp, const char *filter, const char *notfilter)
+{
+ struct ftrace_hash *hash;
+ unsigned char *str;
+ int ret, len;
+
+ if (!fp || !filter)
+ return -EINVAL;
+
+ fprobe_init(fp);
+
+ len = strlen(filter);
+ str = kstrdup(filter, GFP_KERNEL);
+ ret = ftrace_set_filter(&fp->ops, str, len, 0);
+ kfree(str);
+ if (ret)
+ return ret;
+
+ if (notfilter) {
+ len = strlen(notfilter);
+ str = kstrdup(notfilter, GFP_KERNEL);
+ ret = ftrace_set_notrace(&fp->ops, str, len, 0);
+ kfree(str);
+ if (ret)
+ goto out;
+ }
+
+ /* TODO:
+ * correctly calculate the total number of filtered symbols
+ * from both filter and notfilter.
+ */
+ hash = rcu_access_pointer(fp->ops.local_hash.filter_hash);
+ if (WARN_ON_ONCE(!hash))
+ goto out;
+
+ ret = fprobe_init_rethook(fp, (int)hash->count);
+ if (!ret)
+ ret = register_ftrace_function(&fp->ops);
+
+out:
+ if (ret)
+ fprobe_fail_cleanup(fp);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(register_fprobe);
+
+/**
+ * register_fprobe_ips() - Register fprobe to ftrace by address.
+ * @fp: A fprobe data structure to be registered.
+ * @addrs: An array of target ftrace location addresses.
+ * @num: The number of entries of @addrs.
+ *
+ * Register @fp to ftrace for enabling the probe on the address given by @addrs.
+ * The @addrs must be the addresses of ftrace location address, which may be
+ * the symbol address + arch-dependent offset.
+ * If you unsure what this mean, please use other registration functions.
+ *
+ * Return 0 if @fp is registered successfully, -errno if not.
+ */
+int register_fprobe_ips(struct fprobe *fp, unsigned long *addrs, int num)
+{
+ int ret;
+
+ if (!fp || !addrs || num <= 0)
+ return -EINVAL;
+
+ fprobe_init(fp);
+
+ ret = ftrace_set_filter_ips(&fp->ops, addrs, num, 0, 0);
+ if (ret)
+ return ret;
+
+ ret = fprobe_init_rethook(fp, num);
+ if (!ret)
+ ret = register_ftrace_function(&fp->ops);
+
+ if (ret)
+ fprobe_fail_cleanup(fp);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(register_fprobe_ips);
+
+/**
+ * register_fprobe_syms() - Register fprobe to ftrace by symbols.
+ * @fp: A fprobe data structure to be registered.
+ * @syms: An array of target symbols.
+ * @num: The number of entries of @syms.
+ *
+ * Register @fp to the symbols given by @syms array. This will be useful if
+ * you are sure the symbols exist in the kernel.
+ *
+ * Return 0 if @fp is registered successfully, -errno if not.
+ */
+int register_fprobe_syms(struct fprobe *fp, const char **syms, int num)
+{
+ unsigned long *addrs;
+ int ret;
+
+ if (!fp || !syms || num <= 0)
+ return -EINVAL;
+
+ addrs = get_ftrace_locations(syms, num);
+ if (IS_ERR(addrs))
+ return PTR_ERR(addrs);
+
+ ret = register_fprobe_ips(fp, addrs, num);
+
+ kfree(addrs);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(register_fprobe_syms);
+
+bool fprobe_is_registered(struct fprobe *fp)
+{
+ if (!fp || (fp->ops.saved_func != fprobe_handler &&
+ fp->ops.saved_func != fprobe_kprobe_handler))
+ return false;
+ return true;
+}
+
+/**
+ * unregister_fprobe() - Unregister fprobe from ftrace
+ * @fp: A fprobe data structure to be unregistered.
+ *
+ * Unregister fprobe (and remove ftrace hooks from the function entries).
+ *
+ * Return 0 if @fp is unregistered successfully, -errno if not.
+ */
+int unregister_fprobe(struct fprobe *fp)
+{
+ int ret;
+
+ if (!fprobe_is_registered(fp))
+ return -EINVAL;
+
+ if (!IS_ERR_OR_NULL(fp->rethook))
+ rethook_stop(fp->rethook);
+
+ ret = unregister_ftrace_function(&fp->ops);
+ if (ret < 0)
+ return ret;
+
+ if (!IS_ERR_OR_NULL(fp->rethook))
+ rethook_free(fp->rethook);
+
+ ftrace_free_filter(&fp->ops);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(unregister_fprobe);
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 1903b80db6eb..83ba342aef31 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -45,6 +45,12 @@
#include "trace_output.h"
#include "trace_stat.h"
+/* Flags that do not get reset */
+#define FTRACE_NOCLEAR_FLAGS (FTRACE_FL_DISABLED | FTRACE_FL_TOUCHED | \
+ FTRACE_FL_MODIFIED)
+
+#define FTRACE_INVALID_FUNCTION "__ftrace_invalid_address__"
+
#define FTRACE_WARN_ON(cond) \
({ \
int ___r = cond; \
@@ -80,13 +86,13 @@ enum {
struct ftrace_ops ftrace_list_end __read_mostly = {
.func = ftrace_stub,
- .flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_STUB,
+ .flags = FTRACE_OPS_FL_STUB,
INIT_OPS_HASH(ftrace_list_end)
};
/* ftrace_enabled is a method to turn ftrace on or off */
int ftrace_enabled __read_mostly;
-static int last_ftrace_enabled;
+static int __maybe_unused last_ftrace_enabled;
/* Current function tracing op */
struct ftrace_ops *function_trace_op __read_mostly = &ftrace_list_end;
@@ -119,13 +125,35 @@ struct ftrace_ops __rcu *ftrace_ops_list __read_mostly = &ftrace_list_end;
ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
struct ftrace_ops global_ops;
-#if ARCH_SUPPORTS_FTRACE_OPS
-static void ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
- struct ftrace_ops *op, struct pt_regs *regs);
-#else
-/* See comment below, where ftrace_ops_list_func is defined */
-static void ftrace_ops_no_ops(unsigned long ip, unsigned long parent_ip);
-#define ftrace_ops_list_func ((ftrace_func_t)ftrace_ops_no_ops)
+/* Defined by vmlinux.lds.h see the comment above arch_ftrace_ops_list_func for details */
+void ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
+ struct ftrace_ops *op, struct ftrace_regs *fregs);
+
+#ifdef CONFIG_DYNAMIC_FTRACE_WITH_CALL_OPS
+/*
+ * Stub used to invoke the list ops without requiring a separate trampoline.
+ */
+const struct ftrace_ops ftrace_list_ops = {
+ .func = ftrace_ops_list_func,
+ .flags = FTRACE_OPS_FL_STUB,
+};
+
+static void ftrace_ops_nop_func(unsigned long ip, unsigned long parent_ip,
+ struct ftrace_ops *op,
+ struct ftrace_regs *fregs)
+{
+ /* do nothing */
+}
+
+/*
+ * Stub used when a call site is disabled. May be called transiently by threads
+ * which have made it into ftrace_caller but haven't yet recovered the ops at
+ * the point the call site is disabled.
+ */
+const struct ftrace_ops ftrace_nop_ops = {
+ .func = ftrace_ops_nop_func,
+ .flags = FTRACE_OPS_FL_STUB,
+};
#endif
static inline void ftrace_ops_init(struct ftrace_ops *ops)
@@ -139,11 +167,8 @@ static inline void ftrace_ops_init(struct ftrace_ops *ops)
#endif
}
-#define FTRACE_PID_IGNORE -1
-#define FTRACE_PID_TRACE -2
-
static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip,
- struct ftrace_ops *op, struct pt_regs *regs)
+ struct ftrace_ops *op, struct ftrace_regs *fregs)
{
struct trace_array *tr = op->private;
int pid;
@@ -157,7 +182,7 @@ static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip,
return;
}
- op->saved_func(ip, parent_ip, op, regs);
+ op->saved_func(ip, parent_ip, op, fregs);
}
static void ftrace_sync_ipi(void *data)
@@ -169,7 +194,7 @@ static void ftrace_sync_ipi(void *data)
static ftrace_func_t ftrace_ops_get_list_func(struct ftrace_ops *ops)
{
/*
- * If this is a dynamic, RCU, or per CPU ops, or we force list func,
+ * If this is a dynamic or RCU ops, or we force list func,
* then it needs to call the list anyway.
*/
if (ops->flags & (FTRACE_OPS_FL_DYNAMIC | FTRACE_OPS_FL_RCU) ||
@@ -233,7 +258,7 @@ static void update_ftrace_function(void)
/*
* For static tracing, we need to be a bit more careful.
* The function change takes affect immediately. Thus,
- * we need to coorditate the setting of the function_trace_ops
+ * we need to coordinate the setting of the function_trace_ops
* with the setting of the ftrace_trace_function.
*
* Set the function to the list ops, which will call the
@@ -326,7 +351,7 @@ int __register_ftrace_function(struct ftrace_ops *ops)
if (!ftrace_enabled && (ops->flags & FTRACE_OPS_FL_PERMANENT))
return -EBUSY;
- if (!core_kernel_data((unsigned long)ops))
+ if (!is_kernel_core_data((unsigned long)ops))
ops->flags |= FTRACE_OPS_FL_DYNAMIC;
add_ftrace_ops(&ftrace_ops_list, ops);
@@ -584,7 +609,7 @@ static void ftrace_profile_reset(struct ftrace_profile_stat *stat)
FTRACE_PROFILE_HASH_SIZE * sizeof(struct hlist_head));
}
-int ftrace_profile_pages_init(struct ftrace_profile_stat *stat)
+static int ftrace_profile_pages_init(struct ftrace_profile_stat *stat)
{
struct ftrace_profile_page *pg;
int functions;
@@ -757,7 +782,7 @@ ftrace_profile_alloc(struct ftrace_profile_stat *stat, unsigned long ip)
static void
function_profile_call(unsigned long ip, unsigned long parent_ip,
- struct ftrace_ops *ops, struct pt_regs *regs)
+ struct ftrace_ops *ops, struct ftrace_regs *fregs)
{
struct ftrace_profile_stat *stat;
struct ftrace_profile *rec;
@@ -869,7 +894,7 @@ static void unregister_ftrace_profiler(void)
#else
static struct ftrace_ops ftrace_profile_ops __read_mostly = {
.func = function_profile_call,
- .flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED,
+ .flags = FTRACE_OPS_FL_INITIALIZED,
INIT_OPS_HASH(ftrace_profile_ops)
};
@@ -960,7 +985,6 @@ static struct tracer_stat function_stats __initdata = {
static __init void ftrace_profile_tracefs(struct dentry *d_tracer)
{
struct ftrace_profile_stat *stat;
- struct dentry *entry;
char *name;
int ret;
int cpu;
@@ -991,10 +1015,9 @@ static __init void ftrace_profile_tracefs(struct dentry *d_tracer)
}
}
- entry = tracefs_create_file("function_profile_enabled", 0644,
- d_tracer, NULL, &ftrace_profile_fops);
- if (!entry)
- pr_warn("Could not create tracefs 'function_profile_enabled' entry\n");
+ trace_create_file("function_profile_enabled",
+ TRACE_MODE_WRITE, d_tracer, NULL,
+ &ftrace_profile_fops);
}
#else /* CONFIG_FUNCTION_PROFILER */
@@ -1043,13 +1066,12 @@ struct ftrace_ops global_ops = {
.local_hash.notrace_hash = EMPTY_HASH,
.local_hash.filter_hash = EMPTY_HASH,
INIT_OPS_HASH(global_ops)
- .flags = FTRACE_OPS_FL_RECURSION_SAFE |
- FTRACE_OPS_FL_INITIALIZED |
+ .flags = FTRACE_OPS_FL_INITIALIZED |
FTRACE_OPS_FL_PID,
};
/*
- * Used by the stack undwinder to know about dynamic ftrace trampolines.
+ * Used by the stack unwinder to know about dynamic ftrace trampolines.
*/
struct ftrace_ops *ftrace_ops_trampoline(unsigned long addr)
{
@@ -1094,7 +1116,7 @@ struct ftrace_page {
struct ftrace_page *next;
struct dyn_ftrace *records;
int index;
- int size;
+ int order;
};
#define ENTRY_SIZE sizeof(struct dyn_ftrace)
@@ -1161,18 +1183,19 @@ static void __add_hash_entry(struct ftrace_hash *hash,
hash->count++;
}
-static int add_hash_entry(struct ftrace_hash *hash, unsigned long ip)
+static struct ftrace_func_entry *
+add_hash_entry(struct ftrace_hash *hash, unsigned long ip)
{
struct ftrace_func_entry *entry;
entry = kmalloc(sizeof(*entry), GFP_KERNEL);
if (!entry)
- return -ENOMEM;
+ return NULL;
entry->ip = ip;
__add_hash_entry(hash, entry);
- return 0;
+ return entry;
}
static void
@@ -1257,12 +1280,17 @@ static void free_ftrace_hash_rcu(struct ftrace_hash *hash)
call_rcu(&hash->rcu, __free_ftrace_hash_rcu);
}
+/**
+ * ftrace_free_filter - remove all filters for an ftrace_ops
+ * @ops - the ops to remove the filters from
+ */
void ftrace_free_filter(struct ftrace_ops *ops)
{
ftrace_ops_init(ops);
free_ftrace_hash(ops->func_hash->filter_hash);
free_ftrace_hash(ops->func_hash->notrace_hash);
}
+EXPORT_SYMBOL_GPL(ftrace_free_filter);
static struct ftrace_hash *alloc_ftrace_hash(int size_bits)
{
@@ -1298,6 +1326,7 @@ static int ftrace_add_mod(struct trace_array *tr,
if (!ftrace_mod)
return -ENOMEM;
+ INIT_LIST_HEAD(&ftrace_mod->list);
ftrace_mod->func = kstrdup(func, GFP_KERNEL);
ftrace_mod->module = kstrdup(module, GFP_KERNEL);
ftrace_mod->enable = enable;
@@ -1321,7 +1350,6 @@ alloc_and_copy_ftrace_hash(int size_bits, struct ftrace_hash *hash)
struct ftrace_func_entry *entry;
struct ftrace_hash *new_hash;
int size;
- int ret;
int i;
new_hash = alloc_ftrace_hash(size_bits);
@@ -1338,8 +1366,7 @@ alloc_and_copy_ftrace_hash(int size_bits, struct ftrace_hash *hash)
size = 1 << hash->size_bits;
for (i = 0; i < size; i++) {
hlist_for_each_entry(entry, &hash->buckets[i], hlist) {
- ret = add_hash_entry(new_hash, entry->ip);
- if (ret < 0)
+ if (add_hash_entry(new_hash, entry->ip) == NULL)
goto free_hash;
}
}
@@ -1371,10 +1398,10 @@ static struct ftrace_hash *dup_hash(struct ftrace_hash *src, int size)
int i;
/*
- * Make the hash size about 1/2 the # found
+ * Use around half the size (max bit of it), but
+ * a minimum of 2 is fine (as size of 0 or 1 both give 1 for bits).
*/
- for (size /= 2; size; size >>= 1)
- bits++;
+ bits = fls(size / 2);
/* Don't allocate too much */
if (bits > FTRACE_HASH_MAX_BITS)
@@ -1454,7 +1481,7 @@ static bool hash_contains_ip(unsigned long ip,
{
/*
* The function record is a match if it exists in the filter
- * hash and not in the notrace hash. Note, an emty hash is
+ * hash and not in the notrace hash. Note, an empty hash is
* considered a match for the filter hash, but an empty
* notrace hash is considered not in the notrace hash.
*/
@@ -1540,7 +1567,8 @@ static struct dyn_ftrace *lookup_rec(unsigned long start, unsigned long end)
key.flags = end; /* overload flags, as it is unsigned long */
for (pg = ftrace_pages_start; pg; pg = pg->next) {
- if (end < pg->records[0].ip ||
+ if (pg->index == 0 ||
+ end < pg->records[0].ip ||
start >= (pg->records[pg->index - 1].ip + MCOUNT_INSN_SIZE))
continue;
rec = bsearch(&key, pg->records, pg->index,
@@ -1576,17 +1604,34 @@ unsigned long ftrace_location_range(unsigned long start, unsigned long end)
}
/**
- * ftrace_location - return true if the ip giving is a traced location
+ * ftrace_location - return the ftrace location
* @ip: the instruction pointer to check
*
- * Returns rec->ip if @ip given is a pointer to a ftrace location.
- * That is, the instruction that is either a NOP or call to
- * the function tracer. It checks the ftrace internal tables to
- * determine if the address belongs or not.
+ * If @ip matches the ftrace location, return @ip.
+ * If @ip matches sym+0, return sym's ftrace location.
+ * Otherwise, return 0.
*/
unsigned long ftrace_location(unsigned long ip)
{
- return ftrace_location_range(ip, ip);
+ struct dyn_ftrace *rec;
+ unsigned long offset;
+ unsigned long size;
+
+ rec = lookup_rec(ip, ip);
+ if (!rec) {
+ if (!kallsyms_lookup_size_offset(ip, &size, &offset))
+ goto out;
+
+ /* map sym+0 to __fentry__ */
+ if (!offset)
+ rec = lookup_rec(ip, ip + size - 1);
+ }
+
+ if (rec)
+ return rec->ip;
+
+out:
+ return 0;
}
/**
@@ -1632,8 +1677,22 @@ static bool test_rec_ops_needs_regs(struct dyn_ftrace *rec)
static struct ftrace_ops *
ftrace_find_tramp_ops_any(struct dyn_ftrace *rec);
static struct ftrace_ops *
+ftrace_find_tramp_ops_any_other(struct dyn_ftrace *rec, struct ftrace_ops *op_exclude);
+static struct ftrace_ops *
ftrace_find_tramp_ops_next(struct dyn_ftrace *rec, struct ftrace_ops *ops);
+static bool skip_record(struct dyn_ftrace *rec)
+{
+ /*
+ * At boot up, weak functions are set to disable. Function tracing
+ * can be enabled before they are, and they still need to be disabled now.
+ * If the record is disabled, still continue if it is marked as already
+ * enabled (this is needed to keep the accounting working).
+ */
+ return rec->flags & FTRACE_FL_DISABLED &&
+ !(rec->flags & FTRACE_FL_ENABLED);
+}
+
static bool __ftrace_hash_rec_update(struct ftrace_ops *ops,
int filter_hash,
bool inc)
@@ -1683,7 +1742,7 @@ static bool __ftrace_hash_rec_update(struct ftrace_ops *ops,
int in_hash = 0;
int match = 0;
- if (rec->flags & FTRACE_FL_DISABLED)
+ if (skip_record(rec))
continue;
if (all) {
@@ -1781,7 +1840,7 @@ static bool __ftrace_hash_rec_update(struct ftrace_ops *ops,
* to it.
*/
if (ftrace_rec_count(rec) == 1 &&
- ftrace_find_tramp_ops_any(rec))
+ ftrace_find_tramp_ops_any_other(rec, ops))
rec->flags |= FTRACE_FL_TRAMP;
else
rec->flags &= ~FTRACE_FL_TRAMP;
@@ -1791,6 +1850,18 @@ static bool __ftrace_hash_rec_update(struct ftrace_ops *ops,
* if rec count is zero.
*/
}
+
+ /*
+ * If the rec has a single associated ops, and ops->func can be
+ * called directly, allow the call site to call via the ops.
+ */
+ if (IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_CALL_OPS) &&
+ ftrace_rec_count(rec) == 1 &&
+ ftrace_ops_get_func(ops) == ops->func)
+ rec->flags |= FTRACE_FL_CALL_OPS;
+ else
+ rec->flags &= ~FTRACE_FL_CALL_OPS;
+
count++;
/* Must match FTRACE_UPDATE_CALLS in ftrace_modify_all_code() */
@@ -1859,6 +1930,13 @@ static void ftrace_hash_rec_enable_modify(struct ftrace_ops *ops,
* - If the hash is NULL, it hits all recs (if IPMODIFY is set, this is rejected)
* - If the hash is EMPTY_HASH, it hits nothing
* - Anything else hits the recs which match the hash entries.
+ *
+ * DIRECT ops does not have IPMODIFY flag, but we still need to check it
+ * against functions with FTRACE_FL_IPMODIFY. If there is any overlap, call
+ * ops_func(SHARE_IPMODIFY_SELF) to make sure current ops can share with
+ * IPMODIFY. If ops_func(SHARE_IPMODIFY_SELF) returns non-zero, propagate
+ * the return value to the caller and eventually to the owner of the DIRECT
+ * ops.
*/
static int __ftrace_hash_update_ipmodify(struct ftrace_ops *ops,
struct ftrace_hash *old_hash,
@@ -1867,17 +1945,26 @@ static int __ftrace_hash_update_ipmodify(struct ftrace_ops *ops,
struct ftrace_page *pg;
struct dyn_ftrace *rec, *end = NULL;
int in_old, in_new;
+ bool is_ipmodify, is_direct;
/* Only update if the ops has been registered */
if (!(ops->flags & FTRACE_OPS_FL_ENABLED))
return 0;
- if (!(ops->flags & FTRACE_OPS_FL_IPMODIFY))
+ is_ipmodify = ops->flags & FTRACE_OPS_FL_IPMODIFY;
+ is_direct = ops->flags & FTRACE_OPS_FL_DIRECT;
+
+ /* neither IPMODIFY nor DIRECT, skip */
+ if (!is_ipmodify && !is_direct)
+ return 0;
+
+ if (WARN_ON_ONCE(is_ipmodify && is_direct))
return 0;
/*
- * Since the IPMODIFY is a very address sensitive action, we do not
- * allow ftrace_ops to set all functions to new hash.
+ * Since the IPMODIFY and DIRECT are very address sensitive
+ * actions, we do not allow ftrace_ops to set all functions to new
+ * hash.
*/
if (!new_hash || !old_hash)
return -EINVAL;
@@ -1895,12 +1982,32 @@ static int __ftrace_hash_update_ipmodify(struct ftrace_ops *ops,
continue;
if (in_new) {
- /* New entries must ensure no others are using it */
- if (rec->flags & FTRACE_FL_IPMODIFY)
- goto rollback;
- rec->flags |= FTRACE_FL_IPMODIFY;
- } else /* Removed entry */
+ if (rec->flags & FTRACE_FL_IPMODIFY) {
+ int ret;
+
+ /* Cannot have two ipmodify on same rec */
+ if (is_ipmodify)
+ goto rollback;
+
+ FTRACE_WARN_ON(rec->flags & FTRACE_FL_DIRECT);
+
+ /*
+ * Another ops with IPMODIFY is already
+ * attached. We are now attaching a direct
+ * ops. Run SHARE_IPMODIFY_SELF, to check
+ * whether sharing is supported.
+ */
+ if (!ops->ops_func)
+ return -EBUSY;
+ ret = ops->ops_func(ops, FTRACE_OPS_CMD_ENABLE_SHARE_IPMODIFY_SELF);
+ if (ret)
+ return ret;
+ } else if (is_ipmodify) {
+ rec->flags |= FTRACE_FL_IPMODIFY;
+ }
+ } else if (is_ipmodify) {
rec->flags &= ~FTRACE_FL_IPMODIFY;
+ }
} while_for_each_ftrace_rec();
return 0;
@@ -1969,12 +2076,15 @@ static int ftrace_hash_ipmodify_update(struct ftrace_ops *ops,
static void print_ip_ins(const char *fmt, const unsigned char *p)
{
- int i;
+ char ins[MCOUNT_INSN_SIZE];
- printk(KERN_CONT "%s", fmt);
+ if (copy_from_kernel_nofault(ins, p, MCOUNT_INSN_SIZE)) {
+ printk(KERN_CONT "%s[FAULT] %px\n", fmt, p);
+ return;
+ }
- for (i = 0; i < MCOUNT_INSN_SIZE; i++)
- printk(KERN_CONT "%s%02x", i ? ":" : "", p[i]);
+ printk(KERN_CONT "%s", fmt);
+ pr_cont("%*phC", MCOUNT_INSN_SIZE, ins);
}
enum ftrace_bug_type ftrace_bug_type;
@@ -2046,8 +2156,9 @@ void ftrace_bug(int failed, struct dyn_ftrace *rec)
struct ftrace_ops *ops = NULL;
pr_info("ftrace record flags: %lx\n", rec->flags);
- pr_cont(" (%ld)%s", ftrace_rec_count(rec),
- rec->flags & FTRACE_FL_REGS ? " R" : " ");
+ pr_cont(" (%ld)%s%s", ftrace_rec_count(rec),
+ rec->flags & FTRACE_FL_REGS ? " R" : " ",
+ rec->flags & FTRACE_FL_CALL_OPS ? " O" : " ");
if (rec->flags & FTRACE_FL_TRAMP_EN) {
ops = ftrace_find_tramp_ops_any(rec);
if (ops) {
@@ -2074,7 +2185,7 @@ static int ftrace_check_record(struct dyn_ftrace *rec, bool enable, bool update)
ftrace_bug_type = FTRACE_BUG_UNKNOWN;
- if (rec->flags & FTRACE_FL_DISABLED)
+ if (skip_record(rec))
return FTRACE_UPDATE_IGNORE;
/*
@@ -2115,6 +2226,7 @@ static int ftrace_check_record(struct dyn_ftrace *rec, bool enable, bool update)
* want the direct enabled (it will be done via the
* direct helper). But if DIRECT_EN is set, and
* the count is not one, we need to clear it.
+ *
*/
if (ftrace_rec_count(rec) == 1) {
if (!(rec->flags & FTRACE_FL_DIRECT) !=
@@ -2123,6 +2235,19 @@ static int ftrace_check_record(struct dyn_ftrace *rec, bool enable, bool update)
} else if (rec->flags & FTRACE_FL_DIRECT_EN) {
flag |= FTRACE_FL_DIRECT;
}
+
+ /*
+ * Ops calls are special, as count matters.
+ * As with direct calls, they must only be enabled when count
+ * is one, otherwise they'll be handled via the list ops.
+ */
+ if (ftrace_rec_count(rec) == 1) {
+ if (!(rec->flags & FTRACE_FL_CALL_OPS) !=
+ !(rec->flags & FTRACE_FL_CALL_OPS_EN))
+ flag |= FTRACE_FL_CALL_OPS;
+ } else if (rec->flags & FTRACE_FL_CALL_OPS_EN) {
+ flag |= FTRACE_FL_CALL_OPS;
+ }
}
/* If the state of this record hasn't changed, then do nothing */
@@ -2134,7 +2259,7 @@ static int ftrace_check_record(struct dyn_ftrace *rec, bool enable, bool update)
flag ^= rec->flags & FTRACE_FL_ENABLED;
if (update) {
- rec->flags |= FTRACE_FL_ENABLED;
+ rec->flags |= FTRACE_FL_ENABLED | FTRACE_FL_TOUCHED;
if (flag & FTRACE_FL_REGS) {
if (rec->flags & FTRACE_FL_REGS)
rec->flags |= FTRACE_FL_REGS_EN;
@@ -2147,6 +2272,11 @@ static int ftrace_check_record(struct dyn_ftrace *rec, bool enable, bool update)
else
rec->flags &= ~FTRACE_FL_TRAMP_EN;
}
+
+ /* Keep track of anything that modifies the function */
+ if (rec->flags & (FTRACE_FL_DIRECT | FTRACE_FL_IPMODIFY))
+ rec->flags |= FTRACE_FL_MODIFIED;
+
if (flag & FTRACE_FL_DIRECT) {
/*
* If there's only one user (direct_ops helper)
@@ -2166,6 +2296,21 @@ static int ftrace_check_record(struct dyn_ftrace *rec, bool enable, bool update)
rec->flags &= ~FTRACE_FL_DIRECT_EN;
}
}
+
+ if (flag & FTRACE_FL_CALL_OPS) {
+ if (ftrace_rec_count(rec) == 1) {
+ if (rec->flags & FTRACE_FL_CALL_OPS)
+ rec->flags |= FTRACE_FL_CALL_OPS_EN;
+ else
+ rec->flags &= ~FTRACE_FL_CALL_OPS_EN;
+ } else {
+ /*
+ * Can only call directly if there's
+ * only one set of associated ops.
+ */
+ rec->flags &= ~FTRACE_FL_CALL_OPS_EN;
+ }
+ }
}
/*
@@ -2188,14 +2333,15 @@ static int ftrace_check_record(struct dyn_ftrace *rec, bool enable, bool update)
if (update) {
/* If there's no more users, clear all flags */
if (!ftrace_rec_count(rec))
- rec->flags = 0;
+ rec->flags &= FTRACE_NOCLEAR_FLAGS;
else
/*
* Just disable the record, but keep the ops TRAMP
* and REGS states. The _EN flags must be disabled though.
*/
rec->flags &= ~(FTRACE_FL_ENABLED | FTRACE_FL_TRAMP_EN |
- FTRACE_FL_REGS_EN | FTRACE_FL_DIRECT_EN);
+ FTRACE_FL_REGS_EN | FTRACE_FL_DIRECT_EN |
+ FTRACE_FL_CALL_OPS_EN);
}
ftrace_bug_type = FTRACE_BUG_NOP;
@@ -2203,7 +2349,7 @@ static int ftrace_check_record(struct dyn_ftrace *rec, bool enable, bool update)
}
/**
- * ftrace_update_record, set a record that now is tracing or not
+ * ftrace_update_record - set a record that now is tracing or not
* @rec: the record to update
* @enable: set to true if the record is tracing, false to force disable
*
@@ -2216,7 +2362,7 @@ int ftrace_update_record(struct dyn_ftrace *rec, bool enable)
}
/**
- * ftrace_test_record, check if the record has been enabled or not
+ * ftrace_test_record - check if the record has been enabled or not
* @rec: the record to test
* @enable: set to true to check if enabled, false if it is disabled
*
@@ -2248,6 +2394,24 @@ ftrace_find_tramp_ops_any(struct dyn_ftrace *rec)
}
static struct ftrace_ops *
+ftrace_find_tramp_ops_any_other(struct dyn_ftrace *rec, struct ftrace_ops *op_exclude)
+{
+ struct ftrace_ops *op;
+ unsigned long ip = rec->ip;
+
+ do_for_each_ftrace_op(op, ftrace_ops_list) {
+
+ if (op == op_exclude || !op->trampoline)
+ continue;
+
+ if (hash_contains_ip(ip, op->func_hash))
+ return op;
+ } while_for_each_ftrace_op(op);
+
+ return NULL;
+}
+
+static struct ftrace_ops *
ftrace_find_tramp_ops_next(struct dyn_ftrace *rec,
struct ftrace_ops *op)
{
@@ -2350,9 +2514,28 @@ ftrace_find_tramp_ops_new(struct dyn_ftrace *rec)
return NULL;
}
+struct ftrace_ops *
+ftrace_find_unique_ops(struct dyn_ftrace *rec)
+{
+ struct ftrace_ops *op, *found = NULL;
+ unsigned long ip = rec->ip;
+
+ do_for_each_ftrace_op(op, ftrace_ops_list) {
+
+ if (hash_contains_ip(ip, op->func_hash)) {
+ if (found)
+ return NULL;
+ found = op;
+ }
+
+ } while_for_each_ftrace_op(op);
+
+ return found;
+}
+
#ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
/* Protected by rcu_tasks for reading, and direct_mutex for writing */
-static struct ftrace_hash *direct_functions = EMPTY_HASH;
+static struct ftrace_hash __rcu *direct_functions = EMPTY_HASH;
static DEFINE_MUTEX(direct_mutex);
int ftrace_direct_func_count;
@@ -2372,23 +2555,15 @@ unsigned long ftrace_find_rec_direct(unsigned long ip)
}
static void call_direct_funcs(unsigned long ip, unsigned long pip,
- struct ftrace_ops *ops, struct pt_regs *regs)
+ struct ftrace_ops *ops, struct ftrace_regs *fregs)
{
- unsigned long addr;
+ unsigned long addr = READ_ONCE(ops->direct_call);
- addr = ftrace_find_rec_direct(ip);
if (!addr)
return;
- arch_ftrace_set_direct_caller(regs, addr);
+ arch_ftrace_set_direct_caller(fregs, addr);
}
-
-struct ftrace_ops direct_ops = {
- .func = call_direct_funcs,
- .flags = FTRACE_OPS_FL_IPMODIFY | FTRACE_OPS_FL_RECURSION_SAFE
- | FTRACE_OPS_FL_DIRECT | FTRACE_OPS_FL_SAVE_REGS
- | FTRACE_OPS_FL_PERMANENT,
-};
#endif /* CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS */
/**
@@ -2397,7 +2572,7 @@ struct ftrace_ops direct_ops = {
*
* If the record has the FTRACE_FL_REGS set, that means that it
* wants to convert to a callback that saves all regs. If FTRACE_FL_REGS
- * is not not set, then it wants to convert to the normal callback.
+ * is not set, then it wants to convert to the normal callback.
*
* Returns the address of the trampoline to set to
*/
@@ -2522,7 +2697,7 @@ void __weak ftrace_replace_code(int mod_flags)
do_for_each_ftrace_rec(pg, rec) {
- if (rec->flags & FTRACE_FL_DISABLED)
+ if (skip_record(rec))
continue;
failed = __ftrace_replace_code(rec, enable);
@@ -2542,7 +2717,7 @@ struct ftrace_rec_iter {
};
/**
- * ftrace_rec_iter_start, start up iterating over traced functions
+ * ftrace_rec_iter_start - start up iterating over traced functions
*
* Returns an iterator handle that is used to iterate over all
* the records that represent address locations where functions
@@ -2573,7 +2748,7 @@ struct ftrace_rec_iter *ftrace_rec_iter_start(void)
}
/**
- * ftrace_rec_iter_next, get the next record to process.
+ * ftrace_rec_iter_next - get the next record to process.
* @iter: The handle to the iterator.
*
* Returns the next iterator after the given iterator @iter.
@@ -2598,7 +2773,7 @@ struct ftrace_rec_iter *ftrace_rec_iter_next(struct ftrace_rec_iter *iter)
}
/**
- * ftrace_rec_iter_record, get the record at the iterator location
+ * ftrace_rec_iter_record - get the record at the iterator location
* @iter: The current iterator location
*
* Returns the record that the current @iter is at.
@@ -2629,18 +2804,29 @@ ftrace_nop_initialize(struct module *mod, struct dyn_ftrace *rec)
* archs can override this function if they must do something
* before the modifying code is performed.
*/
-int __weak ftrace_arch_code_modify_prepare(void)
+void __weak ftrace_arch_code_modify_prepare(void)
{
- return 0;
}
/*
* archs can override this function if they must do something
* after the modifying code is performed.
*/
-int __weak ftrace_arch_code_modify_post_process(void)
+void __weak ftrace_arch_code_modify_post_process(void)
{
- return 0;
+}
+
+static int update_ftrace_func(ftrace_func_t func)
+{
+ static ftrace_func_t save_func;
+
+ /* Avoid updating if it hasn't changed */
+ if (func == save_func)
+ return 0;
+
+ save_func = func;
+
+ return ftrace_update_ftrace_func(func);
}
void ftrace_modify_all_code(int command)
@@ -2663,7 +2849,7 @@ void ftrace_modify_all_code(int command)
* traced.
*/
if (update) {
- err = ftrace_update_ftrace_func(ftrace_ops_list_func);
+ err = update_ftrace_func(ftrace_ops_list_func);
if (FTRACE_WARN_ON(err))
return;
}
@@ -2679,7 +2865,7 @@ void ftrace_modify_all_code(int command)
/* If irqs are disabled, we are in stop machine */
if (!irqs_disabled())
smp_call_function(ftrace_sync_ipi, NULL, 1);
- err = ftrace_update_ftrace_func(ftrace_trace_function);
+ err = update_ftrace_func(ftrace_trace_function);
if (FTRACE_WARN_ON(err))
return;
}
@@ -2701,7 +2887,7 @@ static int __ftrace_modify_code(void *data)
}
/**
- * ftrace_run_stop_machine, go back to the stop machine method
+ * ftrace_run_stop_machine - go back to the stop machine method
* @command: The command to tell ftrace what to do
*
* If an arch needs to fall back to the stop machine method, the
@@ -2713,7 +2899,7 @@ void ftrace_run_stop_machine(int command)
}
/**
- * arch_ftrace_update_code, modify the code to trace or not trace
+ * arch_ftrace_update_code - modify the code to trace or not trace
* @command: The command that needs to be done
*
* Archs can override this function if it does not need to
@@ -2726,12 +2912,7 @@ void __weak arch_ftrace_update_code(int command)
static void ftrace_run_update_code(int command)
{
- int ret;
-
- ret = ftrace_arch_code_modify_prepare();
- FTRACE_WARN_ON(ret);
- if (ret)
- return;
+ ftrace_arch_code_modify_prepare();
/*
* By default we use stop_machine() to modify the code.
@@ -2741,8 +2922,7 @@ static void ftrace_run_update_code(int command)
*/
arch_ftrace_update_code(command);
- ret = ftrace_arch_code_modify_post_process();
- FTRACE_WARN_ON(ret);
+ ftrace_arch_code_modify_post_process();
}
static void ftrace_run_modify_code(struct ftrace_ops *ops, int command,
@@ -2764,6 +2944,51 @@ void __weak arch_ftrace_trampoline_free(struct ftrace_ops *ops)
{
}
+/* List of trace_ops that have allocated trampolines */
+static LIST_HEAD(ftrace_ops_trampoline_list);
+
+static void ftrace_add_trampoline_to_kallsyms(struct ftrace_ops *ops)
+{
+ lockdep_assert_held(&ftrace_lock);
+ list_add_rcu(&ops->list, &ftrace_ops_trampoline_list);
+}
+
+static void ftrace_remove_trampoline_from_kallsyms(struct ftrace_ops *ops)
+{
+ lockdep_assert_held(&ftrace_lock);
+ list_del_rcu(&ops->list);
+ synchronize_rcu();
+}
+
+/*
+ * "__builtin__ftrace" is used as a module name in /proc/kallsyms for symbols
+ * for pages allocated for ftrace purposes, even though "__builtin__ftrace" is
+ * not a module.
+ */
+#define FTRACE_TRAMPOLINE_MOD "__builtin__ftrace"
+#define FTRACE_TRAMPOLINE_SYM "ftrace_trampoline"
+
+static void ftrace_trampoline_free(struct ftrace_ops *ops)
+{
+ if (ops && (ops->flags & FTRACE_OPS_FL_ALLOC_TRAMP) &&
+ ops->trampoline) {
+ /*
+ * Record the text poke event before the ksymbol unregister
+ * event.
+ */
+ perf_event_text_poke((void *)ops->trampoline,
+ (void *)ops->trampoline,
+ ops->trampoline_size, NULL, 0);
+ perf_event_ksymbol(PERF_RECORD_KSYMBOL_TYPE_OOL,
+ ops->trampoline, ops->trampoline_size,
+ true, FTRACE_TRAMPOLINE_SYM);
+ /* Remove from kallsyms after the perf events */
+ ftrace_remove_trampoline_from_kallsyms(ops);
+ }
+
+ arch_ftrace_trampoline_free(ops);
+}
+
static void ftrace_startup_enable(int command)
{
if (saved_ftrace_func != ftrace_trace_function) {
@@ -2813,6 +3038,8 @@ int ftrace_startup(struct ftrace_ops *ops, int command)
__unregister_ftrace_function(ops);
ftrace_start_up--;
ops->flags &= ~FTRACE_OPS_FL_ENABLED;
+ if (ops->flags & FTRACE_OPS_FL_DYNAMIC)
+ ftrace_trampoline_free(ops);
return ret;
}
@@ -2821,6 +3048,16 @@ int ftrace_startup(struct ftrace_ops *ops, int command)
ftrace_startup_enable(command);
+ /*
+ * If ftrace is in an undefined state, we just remove ops from list
+ * to prevent the NULL pointer, instead of totally rolling it back and
+ * free trampoline, because those actions could cause further damage.
+ */
+ if (unlikely(ftrace_disabled)) {
+ __unregister_ftrace_function(ops);
+ return -ENODEV;
+ }
+
ops->flags &= ~FTRACE_OPS_FL_ADDING;
return 0;
@@ -2858,18 +3095,8 @@ int ftrace_shutdown(struct ftrace_ops *ops, int command)
command |= FTRACE_UPDATE_TRACE_FUNC;
}
- if (!command || !ftrace_enabled) {
- /*
- * If these are dynamic or per_cpu ops, they still
- * need their data freed. Since, function tracing is
- * not currently active, we can just free them
- * without synchronizing all CPUs.
- */
- if (ops->flags & FTRACE_OPS_FL_DYNAMIC)
- goto free_ops;
-
- return 0;
- }
+ if (!command || !ftrace_enabled)
+ goto out;
/*
* If the ops uses a trampoline, then it needs to be
@@ -2894,7 +3121,7 @@ int ftrace_shutdown(struct ftrace_ops *ops, int command)
struct dyn_ftrace *rec;
do_for_each_ftrace_rec(pg, rec) {
- if (FTRACE_WARN_ON_ONCE(rec->flags & ~FTRACE_FL_DISABLED))
+ if (FTRACE_WARN_ON_ONCE(rec->flags & ~FTRACE_NOCLEAR_FLAGS))
pr_warn(" %pS flags:%lx\n",
(void *)rec->ip, rec->flags);
} while_for_each_ftrace_rec();
@@ -2906,11 +3133,10 @@ int ftrace_shutdown(struct ftrace_ops *ops, int command)
removed_ops = NULL;
ops->flags &= ~FTRACE_OPS_FL_REMOVING;
+out:
/*
* Dynamic ops may be freed, we must make sure that all
* callers are done before leaving this function.
- * The same goes for freeing the per_cpu data of the per_cpu
- * ops.
*/
if (ops->flags & FTRACE_OPS_FL_DYNAMIC) {
/*
@@ -2924,56 +3150,21 @@ int ftrace_shutdown(struct ftrace_ops *ops, int command)
synchronize_rcu_tasks_rude();
/*
- * When the kernel is preeptive, tasks can be preempted
+ * When the kernel is preemptive, tasks can be preempted
* while on a ftrace trampoline. Just scheduling a task on
* a CPU is not good enough to flush them. Calling
- * synchornize_rcu_tasks() will wait for those tasks to
+ * synchronize_rcu_tasks() will wait for those tasks to
* execute and either schedule voluntarily or enter user space.
*/
if (IS_ENABLED(CONFIG_PREEMPTION))
synchronize_rcu_tasks();
- free_ops:
- arch_ftrace_trampoline_free(ops);
+ ftrace_trampoline_free(ops);
}
return 0;
}
-static void ftrace_startup_sysctl(void)
-{
- int command;
-
- if (unlikely(ftrace_disabled))
- return;
-
- /* Force update next time */
- saved_ftrace_func = NULL;
- /* ftrace_start_up is true if we want ftrace running */
- if (ftrace_start_up) {
- command = FTRACE_UPDATE_CALLS;
- if (ftrace_graph_active)
- command |= FTRACE_START_FUNC_RET;
- ftrace_startup_enable(command);
- }
-}
-
-static void ftrace_shutdown_sysctl(void)
-{
- int command;
-
- if (unlikely(ftrace_disabled))
- return;
-
- /* ftrace_start_up is true if ftrace is running */
- if (ftrace_start_up) {
- command = FTRACE_DISABLE_CALLS;
- if (ftrace_graph_active)
- command |= FTRACE_STOP_FUNC_RET;
- ftrace_run_update_code(command);
- }
-}
-
static u64 ftrace_update_time;
unsigned long ftrace_update_tot_cnt;
unsigned long ftrace_number_of_pages;
@@ -2989,38 +3180,9 @@ static inline int ops_traces_mod(struct ftrace_ops *ops)
ftrace_hash_empty(ops->func_hash->notrace_hash);
}
-/*
- * Check if the current ops references the record.
- *
- * If the ops traces all functions, then it was already accounted for.
- * If the ops does not trace the current record function, skip it.
- * If the ops ignores the function via notrace filter, skip it.
- */
-static inline bool
-ops_references_rec(struct ftrace_ops *ops, struct dyn_ftrace *rec)
-{
- /* If ops isn't enabled, ignore it */
- if (!(ops->flags & FTRACE_OPS_FL_ENABLED))
- return false;
-
- /* If ops traces all then it includes this function */
- if (ops_traces_mod(ops))
- return true;
-
- /* The function must be in the filter */
- if (!ftrace_hash_empty(ops->func_hash->filter_hash) &&
- !__ftrace_lookup_ip(ops->func_hash->filter_hash, rec->ip))
- return false;
-
- /* If in notrace hash, we ignore it too */
- if (ftrace_lookup_ip(ops->func_hash->notrace_hash, rec->ip))
- return false;
-
- return true;
-}
-
static int ftrace_update_code(struct module *mod, struct ftrace_page *new_pgs)
{
+ bool init_nop = ftrace_need_init_nop();
struct ftrace_page *pg;
struct dyn_ftrace *p;
u64 start, stop;
@@ -3059,8 +3221,7 @@ static int ftrace_update_code(struct module *mod, struct ftrace_page *new_pgs)
* Do the initial record conversion from mcount jump
* to the NOP instructions.
*/
- if (!__is_defined(CC_USING_NOP_MCOUNT) &&
- !ftrace_nop_initialize(mod, p))
+ if (init_nop && !ftrace_nop_initialize(mod, p))
break;
update_cnt++;
@@ -3077,19 +3238,15 @@ static int ftrace_update_code(struct module *mod, struct ftrace_page *new_pgs)
static int ftrace_allocate_records(struct ftrace_page *pg, int count)
{
int order;
+ int pages;
int cnt;
if (WARN_ON(!count))
return -EINVAL;
- order = get_count_order(DIV_ROUND_UP(count, ENTRIES_PER_PAGE));
-
- /*
- * We want to fill as much as possible. No more than a page
- * may be empty.
- */
- while ((PAGE_SIZE << order) / ENTRY_SIZE >= count + ENTRIES_PER_PAGE)
- order--;
+ /* We want to fill as much as possible, with no empty pages */
+ pages = DIV_ROUND_UP(count, ENTRIES_PER_PAGE);
+ order = fls(pages) - 1;
again:
pg->records = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
@@ -3098,7 +3255,7 @@ static int ftrace_allocate_records(struct ftrace_page *pg, int count)
/* if we can't allocate this size, try something smaller */
if (!order)
return -ENOMEM;
- order >>= 1;
+ order--;
goto again;
}
@@ -3106,7 +3263,7 @@ static int ftrace_allocate_records(struct ftrace_page *pg, int count)
ftrace_number_of_groups++;
cnt = (PAGE_SIZE << order) / ENTRY_SIZE;
- pg->size = cnt;
+ pg->order = order;
if (cnt > count)
cnt = count;
@@ -3114,12 +3271,27 @@ static int ftrace_allocate_records(struct ftrace_page *pg, int count)
return cnt;
}
+static void ftrace_free_pages(struct ftrace_page *pages)
+{
+ struct ftrace_page *pg = pages;
+
+ while (pg) {
+ if (pg->records) {
+ free_pages((unsigned long)pg->records, pg->order);
+ ftrace_number_of_pages -= 1 << pg->order;
+ }
+ pages = pg->next;
+ kfree(pg);
+ pg = pages;
+ ftrace_number_of_groups--;
+ }
+}
+
static struct ftrace_page *
ftrace_allocate_pages(unsigned long num_to_init)
{
struct ftrace_page *start_pg;
struct ftrace_page *pg;
- int order;
int cnt;
if (!num_to_init)
@@ -3153,16 +3325,7 @@ ftrace_allocate_pages(unsigned long num_to_init)
return start_pg;
free_pages:
- pg = start_pg;
- while (pg) {
- order = get_count_order(pg->size / ENTRIES_PER_PAGE);
- free_pages((unsigned long)pg->records, order);
- start_pg = pg->next;
- kfree(pg);
- pg = start_pg;
- ftrace_number_of_pages -= 1 << order;
- ftrace_number_of_groups--;
- }
+ ftrace_free_pages(start_pg);
pr_info("ftrace: FAILED to allocate memory for functions\n");
return NULL;
}
@@ -3415,7 +3578,10 @@ t_func_next(struct seq_file *m, loff_t *pos)
!ftrace_lookup_ip(iter->hash, rec->ip)) ||
((iter->flags & FTRACE_ITER_ENABLED) &&
- !(rec->flags & FTRACE_FL_ENABLED))) {
+ !(rec->flags & FTRACE_FL_ENABLED)) ||
+
+ ((iter->flags & FTRACE_ITER_TOUCHED) &&
+ !(rec->flags & FTRACE_FL_TOUCHED))) {
rec = NULL;
goto retry;
@@ -3544,6 +3710,105 @@ static void add_trampoline_func(struct seq_file *m, struct ftrace_ops *ops,
seq_printf(m, " ->%pS", ptr);
}
+#ifdef FTRACE_MCOUNT_MAX_OFFSET
+/*
+ * Weak functions can still have an mcount/fentry that is saved in
+ * the __mcount_loc section. These can be detected by having a
+ * symbol offset of greater than FTRACE_MCOUNT_MAX_OFFSET, as the
+ * symbol found by kallsyms is not the function that the mcount/fentry
+ * is part of. The offset is much greater in these cases.
+ *
+ * Test the record to make sure that the ip points to a valid kallsyms
+ * and if not, mark it disabled.
+ */
+static int test_for_valid_rec(struct dyn_ftrace *rec)
+{
+ char str[KSYM_SYMBOL_LEN];
+ unsigned long offset;
+ const char *ret;
+
+ ret = kallsyms_lookup(rec->ip, NULL, &offset, NULL, str);
+
+ /* Weak functions can cause invalid addresses */
+ if (!ret || offset > FTRACE_MCOUNT_MAX_OFFSET) {
+ rec->flags |= FTRACE_FL_DISABLED;
+ return 0;
+ }
+ return 1;
+}
+
+static struct workqueue_struct *ftrace_check_wq __initdata;
+static struct work_struct ftrace_check_work __initdata;
+
+/*
+ * Scan all the mcount/fentry entries to make sure they are valid.
+ */
+static __init void ftrace_check_work_func(struct work_struct *work)
+{
+ struct ftrace_page *pg;
+ struct dyn_ftrace *rec;
+
+ mutex_lock(&ftrace_lock);
+ do_for_each_ftrace_rec(pg, rec) {
+ test_for_valid_rec(rec);
+ } while_for_each_ftrace_rec();
+ mutex_unlock(&ftrace_lock);
+}
+
+static int __init ftrace_check_for_weak_functions(void)
+{
+ INIT_WORK(&ftrace_check_work, ftrace_check_work_func);
+
+ ftrace_check_wq = alloc_workqueue("ftrace_check_wq", WQ_UNBOUND, 0);
+
+ queue_work(ftrace_check_wq, &ftrace_check_work);
+ return 0;
+}
+
+static int __init ftrace_check_sync(void)
+{
+ /* Make sure the ftrace_check updates are finished */
+ if (ftrace_check_wq)
+ destroy_workqueue(ftrace_check_wq);
+ return 0;
+}
+
+late_initcall_sync(ftrace_check_sync);
+subsys_initcall(ftrace_check_for_weak_functions);
+
+static int print_rec(struct seq_file *m, unsigned long ip)
+{
+ unsigned long offset;
+ char str[KSYM_SYMBOL_LEN];
+ char *modname;
+ const char *ret;
+
+ ret = kallsyms_lookup(ip, NULL, &offset, &modname, str);
+ /* Weak functions can cause invalid addresses */
+ if (!ret || offset > FTRACE_MCOUNT_MAX_OFFSET) {
+ snprintf(str, KSYM_SYMBOL_LEN, "%s_%ld",
+ FTRACE_INVALID_FUNCTION, offset);
+ ret = NULL;
+ }
+
+ seq_puts(m, str);
+ if (modname)
+ seq_printf(m, " [%s]", modname);
+ return ret == NULL ? -1 : 0;
+}
+#else
+static inline int test_for_valid_rec(struct dyn_ftrace *rec)
+{
+ return 1;
+}
+
+static inline int print_rec(struct seq_file *m, unsigned long ip)
+{
+ seq_printf(m, "%ps", (void *)ip);
+ return 0;
+}
+#endif
+
static int t_show(struct seq_file *m, void *v)
{
struct ftrace_iterator *iter = m->private;
@@ -3568,15 +3833,26 @@ static int t_show(struct seq_file *m, void *v)
if (!rec)
return 0;
- seq_printf(m, "%ps", (void *)rec->ip);
- if (iter->flags & FTRACE_ITER_ENABLED) {
+ if (iter->flags & FTRACE_ITER_ADDRS)
+ seq_printf(m, "%lx ", rec->ip);
+
+ if (print_rec(m, rec->ip)) {
+ /* This should only happen when a rec is disabled */
+ WARN_ON_ONCE(!(rec->flags & FTRACE_FL_DISABLED));
+ seq_putc(m, '\n');
+ return 0;
+ }
+
+ if (iter->flags & (FTRACE_ITER_ENABLED | FTRACE_ITER_TOUCHED)) {
struct ftrace_ops *ops;
- seq_printf(m, " (%ld)%s%s%s",
+ seq_printf(m, " (%ld)%s%s%s%s%s",
ftrace_rec_count(rec),
rec->flags & FTRACE_FL_REGS ? " R" : " ",
rec->flags & FTRACE_FL_IPMODIFY ? " I" : " ",
- rec->flags & FTRACE_FL_DIRECT ? " D" : " ");
+ rec->flags & FTRACE_FL_DIRECT ? " D" : " ",
+ rec->flags & FTRACE_FL_CALL_OPS ? " O" : " ",
+ rec->flags & FTRACE_FL_MODIFIED ? " M " : " ");
if (rec->flags & FTRACE_FL_TRAMP_EN) {
ops = ftrace_find_tramp_ops_any(rec);
if (ops) {
@@ -3592,6 +3868,15 @@ static int t_show(struct seq_file *m, void *v)
} else {
add_trampoline_func(m, NULL, rec);
}
+ if (rec->flags & FTRACE_FL_CALL_OPS_EN) {
+ ops = ftrace_find_unique_ops(rec);
+ if (ops) {
+ seq_printf(m, "\tops: %pS (%pS)",
+ ops, ops->func);
+ } else {
+ seq_puts(m, "\tops: ERROR!");
+ }
+ }
if (rec->flags & FTRACE_FL_DIRECT) {
unsigned long direct;
@@ -3661,6 +3946,55 @@ ftrace_enabled_open(struct inode *inode, struct file *file)
return 0;
}
+static int
+ftrace_touched_open(struct inode *inode, struct file *file)
+{
+ struct ftrace_iterator *iter;
+
+ /*
+ * This shows us what functions have ever been enabled
+ * (traced, direct, patched, etc). Not sure if we want lockdown
+ * to hide such critical information for an admin.
+ * Although, perhaps it can show information we don't
+ * want people to see, but if something had traced
+ * something, we probably want to know about it.
+ */
+
+ iter = __seq_open_private(file, &show_ftrace_seq_ops, sizeof(*iter));
+ if (!iter)
+ return -ENOMEM;
+
+ iter->pg = ftrace_pages_start;
+ iter->flags = FTRACE_ITER_TOUCHED;
+ iter->ops = &global_ops;
+
+ return 0;
+}
+
+static int
+ftrace_avail_addrs_open(struct inode *inode, struct file *file)
+{
+ struct ftrace_iterator *iter;
+ int ret;
+
+ ret = security_locked_down(LOCKDOWN_TRACEFS);
+ if (ret)
+ return ret;
+
+ if (unlikely(ftrace_disabled))
+ return -ENODEV;
+
+ iter = __seq_open_private(file, &show_ftrace_seq_ops, sizeof(*iter));
+ if (!iter)
+ return -ENOMEM;
+
+ iter->pg = ftrace_pages_start;
+ iter->flags = FTRACE_ITER_ADDRS;
+ iter->ops = &global_ops;
+
+ return 0;
+}
+
/**
* ftrace_regex_open - initialize function tracer filter files
* @ops: The ftrace_ops that hold the hash filters
@@ -3855,8 +4189,8 @@ enter_record(struct ftrace_hash *hash, struct dyn_ftrace *rec, int clear_filter)
/* Do nothing if it exists */
if (entry)
return 0;
-
- ret = add_hash_entry(hash, rec->ip);
+ if (add_hash_entry(hash, rec->ip) == NULL)
+ ret = -ENOMEM;
}
return ret;
}
@@ -3886,6 +4220,24 @@ add_rec_by_index(struct ftrace_hash *hash, struct ftrace_glob *func_g,
return 0;
}
+#ifdef FTRACE_MCOUNT_MAX_OFFSET
+static int lookup_ip(unsigned long ip, char **modname, char *str)
+{
+ unsigned long offset;
+
+ kallsyms_lookup(ip, NULL, &offset, modname, str);
+ if (offset > FTRACE_MCOUNT_MAX_OFFSET)
+ return -1;
+ return 0;
+}
+#else
+static int lookup_ip(unsigned long ip, char **modname, char *str)
+{
+ kallsyms_lookup(ip, NULL, NULL, modname, str);
+ return 0;
+}
+#endif
+
static int
ftrace_match_record(struct dyn_ftrace *rec, struct ftrace_glob *func_g,
struct ftrace_glob *mod_g, int exclude_mod)
@@ -3893,7 +4245,12 @@ ftrace_match_record(struct dyn_ftrace *rec, struct ftrace_glob *func_g,
char str[KSYM_SYMBOL_LEN];
char *modname;
- kallsyms_lookup(rec->ip, NULL, NULL, &modname, str);
+ if (lookup_ip(rec->ip, &modname, str)) {
+ /* This should only happen when a rec is disabled */
+ WARN_ON_ONCE(system_state == SYSTEM_RUNNING &&
+ !(rec->flags & FTRACE_FL_DISABLED));
+ return 0;
+ }
if (mod_g) {
int mod_matches = (modname) ? ftrace_match(modname, mod_g) : 0;
@@ -3972,6 +4329,7 @@ match_records(struct ftrace_hash *hash, char *func, int len, char *mod)
}
found = 1;
}
+ cond_resched();
} while_for_each_ftrace_rec();
out_unlock:
mutex_unlock(&ftrace_lock);
@@ -4109,7 +4467,6 @@ static void process_mod_list(struct list_head *head, struct ftrace_ops *ops,
struct ftrace_hash **orig_hash, *new_hash;
LIST_HEAD(process_mods);
char *func;
- int ret;
mutex_lock(&ops->func_hash->regex_lock);
@@ -4138,8 +4495,7 @@ static void process_mod_list(struct list_head *head, struct ftrace_ops *ops,
if (!func) /* warn? */
continue;
- list_del(&ftrace_mod->list);
- list_add(&ftrace_mod->list, &process_mods);
+ list_move(&ftrace_mod->list, &process_mods);
/* Use the newly allocated func, as it may be "*" */
kfree(ftrace_mod->func);
@@ -4162,7 +4518,7 @@ static void process_mod_list(struct list_head *head, struct ftrace_ops *ops,
mutex_lock(&ftrace_lock);
- ret = ftrace_hash_move_and_update_ops(ops, orig_hash,
+ ftrace_hash_move_and_update_ops(ops, orig_hash,
new_hash, enable);
mutex_unlock(&ftrace_lock);
@@ -4240,7 +4596,7 @@ static int __init ftrace_mod_cmd_init(void)
core_initcall(ftrace_mod_cmd_init);
static void function_trace_probe_call(unsigned long ip, unsigned long parent_ip,
- struct ftrace_ops *op, struct pt_regs *pt_regs)
+ struct ftrace_ops *op, struct ftrace_regs *fregs)
{
struct ftrace_probe_ops *probe_ops;
struct ftrace_func_probe *probe;
@@ -4316,7 +4672,7 @@ void **ftrace_func_mapper_find_ip(struct ftrace_func_mapper *mapper,
* @ip: The instruction pointer address to map @data to
* @data: The data to map to @ip
*
- * Returns 0 on succes otherwise an error.
+ * Returns 0 on success otherwise an error.
*/
int ftrace_func_mapper_add_ip(struct ftrace_func_mapper *mapper,
unsigned long ip, void *data)
@@ -4346,7 +4702,7 @@ int ftrace_func_mapper_add_ip(struct ftrace_func_mapper *mapper,
* @ip: The instruction pointer address to remove the data from
*
* Returns the data if it is found, otherwise NULL.
- * Note, if the data pointer is used as the data itself, (see
+ * Note, if the data pointer is used as the data itself, (see
* ftrace_func_mapper_find_ip(), then the return value may be meaningless,
* if the data pointer was set to zero.
*/
@@ -4441,8 +4797,8 @@ register_ftrace_function_probe(char *glob, struct trace_array *tr,
struct ftrace_probe_ops *probe_ops,
void *data)
{
+ struct ftrace_func_probe *probe = NULL, *iter;
struct ftrace_func_entry *entry;
- struct ftrace_func_probe *probe;
struct ftrace_hash **orig_hash;
struct ftrace_hash *old_hash;
struct ftrace_hash *hash;
@@ -4461,11 +4817,13 @@ register_ftrace_function_probe(char *glob, struct trace_array *tr,
mutex_lock(&ftrace_lock);
/* Check if the probe_ops is already registered */
- list_for_each_entry(probe, &tr->func_probes, list) {
- if (probe->probe_ops == probe_ops)
+ list_for_each_entry(iter, &tr->func_probes, list) {
+ if (iter->probe_ops == probe_ops) {
+ probe = iter;
break;
+ }
}
- if (&probe->list == &tr->func_probes) {
+ if (!probe) {
probe = kzalloc(sizeof(*probe), GFP_KERNEL);
if (!probe) {
mutex_unlock(&ftrace_lock);
@@ -4484,7 +4842,7 @@ register_ftrace_function_probe(char *glob, struct trace_array *tr,
/*
* Note, there's a small window here that the func_hash->filter_hash
- * may be NULL or empty. Need to be carefule when reading the loop.
+ * may be NULL or empty. Need to be careful when reading the loop.
*/
mutex_lock(&probe->ops.func_hash->regex_lock);
@@ -4583,9 +4941,9 @@ int
unregister_ftrace_function_probe_func(char *glob, struct trace_array *tr,
struct ftrace_probe_ops *probe_ops)
{
+ struct ftrace_func_probe *probe = NULL, *iter;
struct ftrace_ops_hash old_hash_ops;
struct ftrace_func_entry *entry;
- struct ftrace_func_probe *probe;
struct ftrace_glob func_g;
struct ftrace_hash **orig_hash;
struct ftrace_hash *old_hash;
@@ -4613,11 +4971,13 @@ unregister_ftrace_function_probe_func(char *glob, struct trace_array *tr,
mutex_lock(&ftrace_lock);
/* Check if the probe_ops is already registered */
- list_for_each_entry(probe, &tr->func_probes, list) {
- if (probe->probe_ops == probe_ops)
+ list_for_each_entry(iter, &tr->func_probes, list) {
+ if (iter->probe_ops == probe_ops) {
+ probe = iter;
break;
+ }
}
- if (&probe->list == &tr->func_probes)
+ if (!probe)
goto err_unlock_ftrace;
ret = -EINVAL;
@@ -4856,11 +5216,12 @@ ftrace_notrace_write(struct file *file, const char __user *ubuf,
}
static int
-ftrace_match_addr(struct ftrace_hash *hash, unsigned long ip, int remove)
+__ftrace_match_addr(struct ftrace_hash *hash, unsigned long ip, int remove)
{
struct ftrace_func_entry *entry;
- if (!ftrace_location(ip))
+ ip = ftrace_location(ip);
+ if (!ip)
return -EINVAL;
if (remove) {
@@ -4871,12 +5232,34 @@ ftrace_match_addr(struct ftrace_hash *hash, unsigned long ip, int remove)
return 0;
}
- return add_hash_entry(hash, ip);
+ entry = add_hash_entry(hash, ip);
+ return entry ? 0 : -ENOMEM;
+}
+
+static int
+ftrace_match_addr(struct ftrace_hash *hash, unsigned long *ips,
+ unsigned int cnt, int remove)
+{
+ unsigned int i;
+ int err;
+
+ for (i = 0; i < cnt; i++) {
+ err = __ftrace_match_addr(hash, ips[i], remove);
+ if (err) {
+ /*
+ * This expects the @hash is a temporary hash and if this
+ * fails the caller must free the @hash.
+ */
+ return err;
+ }
+ }
+ return 0;
}
static int
ftrace_set_hash(struct ftrace_ops *ops, unsigned char *buf, int len,
- unsigned long ip, int remove, int reset, int enable)
+ unsigned long *ips, unsigned int cnt,
+ int remove, int reset, int enable)
{
struct ftrace_hash **orig_hash;
struct ftrace_hash *hash;
@@ -4906,8 +5289,8 @@ ftrace_set_hash(struct ftrace_ops *ops, unsigned char *buf, int len,
ret = -EINVAL;
goto out_regex_unlock;
}
- if (ip) {
- ret = ftrace_match_addr(hash, ip, remove);
+ if (ips) {
+ ret = ftrace_match_addr(hash, ips, cnt, remove);
if (ret < 0)
goto out_regex_unlock;
}
@@ -4924,10 +5307,10 @@ ftrace_set_hash(struct ftrace_ops *ops, unsigned char *buf, int len,
}
static int
-ftrace_set_addr(struct ftrace_ops *ops, unsigned long ip, int remove,
- int reset, int enable)
+ftrace_set_addr(struct ftrace_ops *ops, unsigned long *ips, unsigned int cnt,
+ int remove, int reset, int enable)
{
- return ftrace_set_hash(ops, NULL, 0, ip, remove, reset, enable);
+ return ftrace_set_hash(ops, NULL, 0, ips, cnt, remove, reset, enable);
}
#ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
@@ -4940,353 +5323,302 @@ struct ftrace_direct_func {
static LIST_HEAD(ftrace_direct_funcs);
-/**
- * ftrace_find_direct_func - test an address if it is a registered direct caller
- * @addr: The address of a registered direct caller
- *
- * This searches to see if a ftrace direct caller has been registered
- * at a specific address, and if so, it returns a descriptor for it.
- *
- * This can be used by architecture code to see if an address is
- * a direct caller (trampoline) attached to a fentry/mcount location.
- * This is useful for the function_graph tracer, as it may need to
- * do adjustments if it traced a location that also has a direct
- * trampoline attached to it.
+static int register_ftrace_function_nolock(struct ftrace_ops *ops);
+
+/*
+ * If there are multiple ftrace_ops, use SAVE_REGS by default, so that direct
+ * call will be jumped from ftrace_regs_caller. Only if the architecture does
+ * not support ftrace_regs_caller but direct_call, use SAVE_ARGS so that it
+ * jumps from ftrace_caller for multiple ftrace_ops.
*/
-struct ftrace_direct_func *ftrace_find_direct_func(unsigned long addr)
+#ifndef CONFIG_HAVE_DYNAMIC_FTRACE_WITH_REGS
+#define MULTI_FLAGS (FTRACE_OPS_FL_DIRECT | FTRACE_OPS_FL_SAVE_ARGS)
+#else
+#define MULTI_FLAGS (FTRACE_OPS_FL_DIRECT | FTRACE_OPS_FL_SAVE_REGS)
+#endif
+
+static int check_direct_multi(struct ftrace_ops *ops)
{
- struct ftrace_direct_func *entry;
- bool found = false;
+ if (!(ops->flags & FTRACE_OPS_FL_INITIALIZED))
+ return -EINVAL;
+ if ((ops->flags & MULTI_FLAGS) != MULTI_FLAGS)
+ return -EINVAL;
+ return 0;
+}
- /* May be called by fgraph trampoline (protected by rcu tasks) */
- list_for_each_entry_rcu(entry, &ftrace_direct_funcs, next) {
- if (entry->addr == addr) {
- found = true;
- break;
+static void remove_direct_functions_hash(struct ftrace_hash *hash, unsigned long addr)
+{
+ struct ftrace_func_entry *entry, *del;
+ int size, i;
+
+ size = 1 << hash->size_bits;
+ for (i = 0; i < size; i++) {
+ hlist_for_each_entry(entry, &hash->buckets[i], hlist) {
+ del = __ftrace_lookup_ip(direct_functions, entry->ip);
+ if (del && del->direct == addr) {
+ remove_hash_entry(direct_functions, del);
+ kfree(del);
+ }
}
}
- if (found)
- return entry;
-
- return NULL;
}
/**
* register_ftrace_direct - Call a custom trampoline directly
- * @ip: The address of the nop at the beginning of a function
- * @addr: The address of the trampoline to call at @ip
+ * for multiple functions registered in @ops
+ * @ops: The address of the struct ftrace_ops object
+ * @addr: The address of the trampoline to call at @ops functions
+ *
+ * This is used to connect a direct calls to @addr from the nop locations
+ * of the functions registered in @ops (with by ftrace_set_filter_ip
+ * function).
*
- * This is used to connect a direct call from the nop location (@ip)
- * at the start of ftrace traced functions. The location that it calls
- * (@addr) must be able to handle a direct call, and save the parameters
- * of the function being traced, and restore them (or inject new ones
- * if needed), before returning.
+ * The location that it calls (@addr) must be able to handle a direct call,
+ * and save the parameters of the function being traced, and restore them
+ * (or inject new ones if needed), before returning.
*
* Returns:
* 0 on success
- * -EBUSY - Another direct function is already attached (there can be only one)
- * -ENODEV - @ip does not point to a ftrace nop location (or not supported)
- * -ENOMEM - There was an allocation failure.
+ * -EINVAL - The @ops object was already registered with this call or
+ * when there are no functions in @ops object.
+ * -EBUSY - Another direct function is already attached (there can be only one)
+ * -ENODEV - @ip does not point to a ftrace nop location (or not supported)
+ * -ENOMEM - There was an allocation failure.
*/
-int register_ftrace_direct(unsigned long ip, unsigned long addr)
+int register_ftrace_direct(struct ftrace_ops *ops, unsigned long addr)
{
- struct ftrace_direct_func *direct;
- struct ftrace_func_entry *entry;
- struct ftrace_hash *free_hash = NULL;
- struct dyn_ftrace *rec;
- int ret = -EBUSY;
-
- mutex_lock(&direct_mutex);
+ struct ftrace_hash *hash, *new_hash = NULL, *free_hash = NULL;
+ struct ftrace_func_entry *entry, *new;
+ int err = -EBUSY, size, i;
- /* See if there's a direct function at @ip already */
- if (ftrace_find_rec_direct(ip))
- goto out_unlock;
+ if (ops->func || ops->trampoline)
+ return -EINVAL;
+ if (!(ops->flags & FTRACE_OPS_FL_INITIALIZED))
+ return -EINVAL;
+ if (ops->flags & FTRACE_OPS_FL_ENABLED)
+ return -EINVAL;
- ret = -ENODEV;
- rec = lookup_rec(ip, ip);
- if (!rec)
- goto out_unlock;
+ hash = ops->func_hash->filter_hash;
+ if (ftrace_hash_empty(hash))
+ return -EINVAL;
- /*
- * Check if the rec says it has a direct call but we didn't
- * find one earlier?
- */
- if (WARN_ON(rec->flags & FTRACE_FL_DIRECT))
- goto out_unlock;
+ mutex_lock(&direct_mutex);
- /* Make sure the ip points to the exact record */
- if (ip != rec->ip) {
- ip = rec->ip;
- /* Need to check this ip for a direct. */
- if (ftrace_find_rec_direct(ip))
- goto out_unlock;
+ /* Make sure requested entries are not already registered.. */
+ size = 1 << hash->size_bits;
+ for (i = 0; i < size; i++) {
+ hlist_for_each_entry(entry, &hash->buckets[i], hlist) {
+ if (ftrace_find_rec_direct(entry->ip))
+ goto out_unlock;
+ }
}
- ret = -ENOMEM;
- if (ftrace_hash_empty(direct_functions) ||
- direct_functions->count > 2 * (1 << direct_functions->size_bits)) {
- struct ftrace_hash *new_hash;
- int size = ftrace_hash_empty(direct_functions) ? 0 :
- direct_functions->count + 1;
-
- if (size < 32)
- size = 32;
+ err = -ENOMEM;
- new_hash = dup_hash(direct_functions, size);
- if (!new_hash)
- goto out_unlock;
+ /* Make a copy hash to place the new and the old entries in */
+ size = hash->count + direct_functions->count;
+ if (size > 32)
+ size = 32;
+ new_hash = alloc_ftrace_hash(fls(size));
+ if (!new_hash)
+ goto out_unlock;
- free_hash = direct_functions;
- direct_functions = new_hash;
+ /* Now copy over the existing direct entries */
+ size = 1 << direct_functions->size_bits;
+ for (i = 0; i < size; i++) {
+ hlist_for_each_entry(entry, &direct_functions->buckets[i], hlist) {
+ new = add_hash_entry(new_hash, entry->ip);
+ if (!new)
+ goto out_unlock;
+ new->direct = entry->direct;
+ }
}
- entry = kmalloc(sizeof(*entry), GFP_KERNEL);
- if (!entry)
- goto out_unlock;
-
- direct = ftrace_find_direct_func(addr);
- if (!direct) {
- direct = kmalloc(sizeof(*direct), GFP_KERNEL);
- if (!direct) {
- kfree(entry);
- goto out_unlock;
+ /* ... and add the new entries */
+ size = 1 << hash->size_bits;
+ for (i = 0; i < size; i++) {
+ hlist_for_each_entry(entry, &hash->buckets[i], hlist) {
+ new = add_hash_entry(new_hash, entry->ip);
+ if (!new)
+ goto out_unlock;
+ /* Update both the copy and the hash entry */
+ new->direct = addr;
+ entry->direct = addr;
}
- direct->addr = addr;
- direct->count = 0;
- list_add_rcu(&direct->next, &ftrace_direct_funcs);
- ftrace_direct_func_count++;
}
- entry->ip = ip;
- entry->direct = addr;
- __add_hash_entry(direct_functions, entry);
+ free_hash = direct_functions;
+ rcu_assign_pointer(direct_functions, new_hash);
+ new_hash = NULL;
- ret = ftrace_set_filter_ip(&direct_ops, ip, 0, 0);
- if (ret)
- remove_hash_entry(direct_functions, entry);
+ ops->func = call_direct_funcs;
+ ops->flags = MULTI_FLAGS;
+ ops->trampoline = FTRACE_REGS_ADDR;
+ ops->direct_call = addr;
- if (!ret && !(direct_ops.flags & FTRACE_OPS_FL_ENABLED)) {
- ret = register_ftrace_function(&direct_ops);
- if (ret)
- ftrace_set_filter_ip(&direct_ops, ip, 1, 0);
- }
+ err = register_ftrace_function_nolock(ops);
- if (ret) {
- kfree(entry);
- if (!direct->count) {
- list_del_rcu(&direct->next);
- synchronize_rcu_tasks();
- kfree(direct);
- if (free_hash)
- free_ftrace_hash(free_hash);
- free_hash = NULL;
- ftrace_direct_func_count--;
- }
- } else {
- direct->count++;
- }
out_unlock:
mutex_unlock(&direct_mutex);
- if (free_hash) {
+ if (free_hash && free_hash != EMPTY_HASH) {
synchronize_rcu_tasks();
free_ftrace_hash(free_hash);
}
- return ret;
+ if (new_hash)
+ free_ftrace_hash(new_hash);
+
+ return err;
}
EXPORT_SYMBOL_GPL(register_ftrace_direct);
-static struct ftrace_func_entry *find_direct_entry(unsigned long *ip,
- struct dyn_ftrace **recp)
+/**
+ * unregister_ftrace_direct - Remove calls to custom trampoline
+ * previously registered by register_ftrace_direct for @ops object.
+ * @ops: The address of the struct ftrace_ops object
+ *
+ * This is used to remove a direct calls to @addr from the nop locations
+ * of the functions registered in @ops (with by ftrace_set_filter_ip
+ * function).
+ *
+ * Returns:
+ * 0 on success
+ * -EINVAL - The @ops object was not properly registered.
+ */
+int unregister_ftrace_direct(struct ftrace_ops *ops, unsigned long addr,
+ bool free_filters)
{
- struct ftrace_func_entry *entry;
- struct dyn_ftrace *rec;
-
- rec = lookup_rec(*ip, *ip);
- if (!rec)
- return NULL;
-
- entry = __ftrace_lookup_ip(direct_functions, rec->ip);
- if (!entry) {
- WARN_ON(rec->flags & FTRACE_FL_DIRECT);
- return NULL;
- }
+ struct ftrace_hash *hash = ops->func_hash->filter_hash;
+ int err;
- WARN_ON(!(rec->flags & FTRACE_FL_DIRECT));
+ if (check_direct_multi(ops))
+ return -EINVAL;
+ if (!(ops->flags & FTRACE_OPS_FL_ENABLED))
+ return -EINVAL;
- /* Passed in ip just needs to be on the call site */
- *ip = rec->ip;
+ mutex_lock(&direct_mutex);
+ err = unregister_ftrace_function(ops);
+ remove_direct_functions_hash(hash, addr);
+ mutex_unlock(&direct_mutex);
- if (recp)
- *recp = rec;
+ /* cleanup for possible another register call */
+ ops->func = NULL;
+ ops->trampoline = 0;
- return entry;
+ if (free_filters)
+ ftrace_free_filter(ops);
+ return err;
}
+EXPORT_SYMBOL_GPL(unregister_ftrace_direct);
-int unregister_ftrace_direct(unsigned long ip, unsigned long addr)
+static int
+__modify_ftrace_direct(struct ftrace_ops *ops, unsigned long addr)
{
- struct ftrace_direct_func *direct;
- struct ftrace_func_entry *entry;
- int ret = -ENODEV;
-
- mutex_lock(&direct_mutex);
-
- entry = find_direct_entry(&ip, NULL);
- if (!entry)
- goto out_unlock;
+ struct ftrace_hash *hash;
+ struct ftrace_func_entry *entry, *iter;
+ static struct ftrace_ops tmp_ops = {
+ .func = ftrace_stub,
+ .flags = FTRACE_OPS_FL_STUB,
+ };
+ int i, size;
+ int err;
- if (direct_functions->count == 1)
- unregister_ftrace_function(&direct_ops);
+ lockdep_assert_held_once(&direct_mutex);
- ret = ftrace_set_filter_ip(&direct_ops, ip, 1, 0);
+ /* Enable the tmp_ops to have the same functions as the direct ops */
+ ftrace_ops_init(&tmp_ops);
+ tmp_ops.func_hash = ops->func_hash;
+ tmp_ops.direct_call = addr;
- WARN_ON(ret);
+ err = register_ftrace_function_nolock(&tmp_ops);
+ if (err)
+ return err;
- remove_hash_entry(direct_functions, entry);
+ /*
+ * Now the ftrace_ops_list_func() is called to do the direct callers.
+ * We can safely change the direct functions attached to each entry.
+ */
+ mutex_lock(&ftrace_lock);
- direct = ftrace_find_direct_func(addr);
- if (!WARN_ON(!direct)) {
- /* This is the good path (see the ! before WARN) */
- direct->count--;
- WARN_ON(direct->count < 0);
- if (!direct->count) {
- list_del_rcu(&direct->next);
- synchronize_rcu_tasks();
- kfree(direct);
- kfree(entry);
- ftrace_direct_func_count--;
+ hash = ops->func_hash->filter_hash;
+ size = 1 << hash->size_bits;
+ for (i = 0; i < size; i++) {
+ hlist_for_each_entry(iter, &hash->buckets[i], hlist) {
+ entry = __ftrace_lookup_ip(direct_functions, iter->ip);
+ if (!entry)
+ continue;
+ entry->direct = addr;
}
}
- out_unlock:
- mutex_unlock(&direct_mutex);
+ /* Prevent store tearing if a trampoline concurrently accesses the value */
+ WRITE_ONCE(ops->direct_call, addr);
- return ret;
-}
-EXPORT_SYMBOL_GPL(unregister_ftrace_direct);
+ mutex_unlock(&ftrace_lock);
-static struct ftrace_ops stub_ops = {
- .func = ftrace_stub,
-};
+ /* Removing the tmp_ops will add the updated direct callers to the functions */
+ unregister_ftrace_function(&tmp_ops);
+
+ return err;
+}
/**
- * ftrace_modify_direct_caller - modify ftrace nop directly
- * @entry: The ftrace hash entry of the direct helper for @rec
- * @rec: The record representing the function site to patch
- * @old_addr: The location that the site at @rec->ip currently calls
- * @new_addr: The location that the site at @rec->ip should call
+ * modify_ftrace_direct_nolock - Modify an existing direct 'multi' call
+ * to call something else
+ * @ops: The address of the struct ftrace_ops object
+ * @addr: The address of the new trampoline to call at @ops functions
*
- * An architecture may overwrite this function to optimize the
- * changing of the direct callback on an ftrace nop location.
- * This is called with the ftrace_lock mutex held, and no other
- * ftrace callbacks are on the associated record (@rec). Thus,
- * it is safe to modify the ftrace record, where it should be
- * currently calling @old_addr directly, to call @new_addr.
+ * This is used to unregister currently registered direct caller and
+ * register new one @addr on functions registered in @ops object.
*
- * Safety checks should be made to make sure that the code at
- * @rec->ip is currently calling @old_addr. And this must
- * also update entry->direct to @new_addr.
+ * Note there's window between ftrace_shutdown and ftrace_startup calls
+ * where there will be no callbacks called.
+ *
+ * Caller should already have direct_mutex locked, so we don't lock
+ * direct_mutex here.
+ *
+ * Returns: zero on success. Non zero on error, which includes:
+ * -EINVAL - The @ops object was not properly registered.
*/
-int __weak ftrace_modify_direct_caller(struct ftrace_func_entry *entry,
- struct dyn_ftrace *rec,
- unsigned long old_addr,
- unsigned long new_addr)
+int modify_ftrace_direct_nolock(struct ftrace_ops *ops, unsigned long addr)
{
- unsigned long ip = rec->ip;
- int ret;
-
- /*
- * The ftrace_lock was used to determine if the record
- * had more than one registered user to it. If it did,
- * we needed to prevent that from changing to do the quick
- * switch. But if it did not (only a direct caller was attached)
- * then this function is called. But this function can deal
- * with attached callers to the rec that we care about, and
- * since this function uses standard ftrace calls that take
- * the ftrace_lock mutex, we need to release it.
- */
- mutex_unlock(&ftrace_lock);
-
- /*
- * By setting a stub function at the same address, we force
- * the code to call the iterator and the direct_ops helper.
- * This means that @ip does not call the direct call, and
- * we can simply modify it.
- */
- ret = ftrace_set_filter_ip(&stub_ops, ip, 0, 0);
- if (ret)
- goto out_lock;
-
- ret = register_ftrace_function(&stub_ops);
- if (ret) {
- ftrace_set_filter_ip(&stub_ops, ip, 1, 0);
- goto out_lock;
- }
-
- entry->direct = new_addr;
-
- /*
- * By removing the stub, we put back the direct call, calling
- * the @new_addr.
- */
- unregister_ftrace_function(&stub_ops);
- ftrace_set_filter_ip(&stub_ops, ip, 1, 0);
-
- out_lock:
- mutex_lock(&ftrace_lock);
+ if (check_direct_multi(ops))
+ return -EINVAL;
+ if (!(ops->flags & FTRACE_OPS_FL_ENABLED))
+ return -EINVAL;
- return ret;
+ return __modify_ftrace_direct(ops, addr);
}
+EXPORT_SYMBOL_GPL(modify_ftrace_direct_nolock);
/**
- * modify_ftrace_direct - Modify an existing direct call to call something else
- * @ip: The instruction pointer to modify
- * @old_addr: The address that the current @ip calls directly
- * @new_addr: The address that the @ip should call
+ * modify_ftrace_direct - Modify an existing direct 'multi' call
+ * to call something else
+ * @ops: The address of the struct ftrace_ops object
+ * @addr: The address of the new trampoline to call at @ops functions
*
- * This modifies a ftrace direct caller at an instruction pointer without
- * having to disable it first. The direct call will switch over to the
- * @new_addr without missing anything.
+ * This is used to unregister currently registered direct caller and
+ * register new one @addr on functions registered in @ops object.
+ *
+ * Note there's window between ftrace_shutdown and ftrace_startup calls
+ * where there will be no callbacks called.
*
* Returns: zero on success. Non zero on error, which includes:
- * -ENODEV : the @ip given has no direct caller attached
- * -EINVAL : the @old_addr does not match the current direct caller
+ * -EINVAL - The @ops object was not properly registered.
*/
-int modify_ftrace_direct(unsigned long ip,
- unsigned long old_addr, unsigned long new_addr)
+int modify_ftrace_direct(struct ftrace_ops *ops, unsigned long addr)
{
- struct ftrace_func_entry *entry;
- struct dyn_ftrace *rec;
- int ret = -ENODEV;
-
- mutex_lock(&direct_mutex);
-
- mutex_lock(&ftrace_lock);
- entry = find_direct_entry(&ip, &rec);
- if (!entry)
- goto out_unlock;
-
- ret = -EINVAL;
- if (entry->direct != old_addr)
- goto out_unlock;
+ int err;
- /*
- * If there's no other ftrace callback on the rec->ip location,
- * then it can be changed directly by the architecture.
- * If there is another caller, then we just need to change the
- * direct caller helper to point to @new_addr.
- */
- if (ftrace_rec_count(rec) == 1) {
- ret = ftrace_modify_direct_caller(entry, rec, old_addr, new_addr);
- } else {
- entry->direct = new_addr;
- ret = 0;
- }
+ if (check_direct_multi(ops))
+ return -EINVAL;
+ if (!(ops->flags & FTRACE_OPS_FL_ENABLED))
+ return -EINVAL;
- out_unlock:
- mutex_unlock(&ftrace_lock);
+ mutex_lock(&direct_mutex);
+ err = __modify_ftrace_direct(ops, addr);
mutex_unlock(&direct_mutex);
- return ret;
+ return err;
}
EXPORT_SYMBOL_GPL(modify_ftrace_direct);
#endif /* CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS */
@@ -5299,17 +5631,44 @@ EXPORT_SYMBOL_GPL(modify_ftrace_direct);
* @reset - non zero to reset all filters before applying this filter.
*
* Filters denote which functions should be enabled when tracing is enabled
- * If @ip is NULL, it failes to update filter.
+ * If @ip is NULL, it fails to update filter.
+ *
+ * This can allocate memory which must be freed before @ops can be freed,
+ * either by removing each filtered addr or by using
+ * ftrace_free_filter(@ops).
*/
int ftrace_set_filter_ip(struct ftrace_ops *ops, unsigned long ip,
int remove, int reset)
{
ftrace_ops_init(ops);
- return ftrace_set_addr(ops, ip, remove, reset, 1);
+ return ftrace_set_addr(ops, &ip, 1, remove, reset, 1);
}
EXPORT_SYMBOL_GPL(ftrace_set_filter_ip);
/**
+ * ftrace_set_filter_ips - set functions to filter on in ftrace by addresses
+ * @ops - the ops to set the filter with
+ * @ips - the array of addresses to add to or remove from the filter.
+ * @cnt - the number of addresses in @ips
+ * @remove - non zero to remove ips from the filter
+ * @reset - non zero to reset all filters before applying this filter.
+ *
+ * Filters denote which functions should be enabled when tracing is enabled
+ * If @ips array or any ip specified within is NULL , it fails to update filter.
+ *
+ * This can allocate memory which must be freed before @ops can be freed,
+ * either by removing each filtered addr or by using
+ * ftrace_free_filter(@ops).
+*/
+int ftrace_set_filter_ips(struct ftrace_ops *ops, unsigned long *ips,
+ unsigned int cnt, int remove, int reset)
+{
+ ftrace_ops_init(ops);
+ return ftrace_set_addr(ops, ips, cnt, remove, reset, 1);
+}
+EXPORT_SYMBOL_GPL(ftrace_set_filter_ips);
+
+/**
* ftrace_ops_set_global_filter - setup ops to use global filters
* @ops - the ops which will use the global filters
*
@@ -5330,7 +5689,7 @@ static int
ftrace_set_regex(struct ftrace_ops *ops, unsigned char *buf, int len,
int reset, int enable)
{
- return ftrace_set_hash(ops, buf, len, 0, 0, reset, enable);
+ return ftrace_set_hash(ops, buf, len, NULL, 0, 0, reset, enable);
}
/**
@@ -5342,6 +5701,10 @@ ftrace_set_regex(struct ftrace_ops *ops, unsigned char *buf, int len,
*
* Filters denote which functions should be enabled when tracing is enabled.
* If @buf is NULL and reset is set, all functions will be enabled for tracing.
+ *
+ * This can allocate memory which must be freed before @ops can be freed,
+ * either by removing each filtered addr or by using
+ * ftrace_free_filter(@ops).
*/
int ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf,
int len, int reset)
@@ -5361,6 +5724,10 @@ EXPORT_SYMBOL_GPL(ftrace_set_filter);
* Notrace Filters denote which functions should not be enabled when tracing
* is enabled. If @buf is NULL and reset is set, all functions will be enabled
* for tracing.
+ *
+ * This can allocate memory which must be freed before @ops can be freed,
+ * either by removing each filtered addr or by using
+ * ftrace_free_filter(@ops).
*/
int ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf,
int len, int reset)
@@ -5413,7 +5780,7 @@ bool ftrace_filter_param __initdata;
static int __init set_ftrace_notrace(char *str)
{
ftrace_filter_param = true;
- strlcpy(ftrace_notrace_buf, str, FTRACE_FILTER_SIZE);
+ strscpy(ftrace_notrace_buf, str, FTRACE_FILTER_SIZE);
return 1;
}
__setup("ftrace_notrace=", set_ftrace_notrace);
@@ -5421,7 +5788,7 @@ __setup("ftrace_notrace=", set_ftrace_notrace);
static int __init set_ftrace_filter(char *str)
{
ftrace_filter_param = true;
- strlcpy(ftrace_filter_buf, str, FTRACE_FILTER_SIZE);
+ strscpy(ftrace_filter_buf, str, FTRACE_FILTER_SIZE);
return 1;
}
__setup("ftrace_filter=", set_ftrace_filter);
@@ -5433,14 +5800,14 @@ static int ftrace_graph_set_hash(struct ftrace_hash *hash, char *buffer);
static int __init set_graph_function(char *str)
{
- strlcpy(ftrace_graph_buf, str, FTRACE_FILTER_SIZE);
+ strscpy(ftrace_graph_buf, str, FTRACE_FILTER_SIZE);
return 1;
}
__setup("ftrace_graph_filter=", set_graph_function);
static int __init set_graph_notrace_function(char *str)
{
- strlcpy(ftrace_graph_notrace_buf, str, FTRACE_FILTER_SIZE);
+ strscpy(ftrace_graph_notrace_buf, str, FTRACE_FILTER_SIZE);
return 1;
}
__setup("ftrace_graph_notrace=", set_graph_notrace_function);
@@ -5514,7 +5881,6 @@ int ftrace_regex_release(struct inode *inode, struct file *file)
struct ftrace_hash **orig_hash;
struct trace_parser *parser;
int filter_hash;
- int ret;
if (file->f_mode & FMODE_READ) {
iter = m->private;
@@ -5524,7 +5890,10 @@ int ftrace_regex_release(struct inode *inode, struct file *file)
parser = &iter->parser;
if (trace_parser_loaded(parser)) {
- ftrace_match_records(iter->hash, parser->buffer, parser->idx);
+ int enable = !(iter->flags & FTRACE_ITER_NOTRACE);
+
+ ftrace_process_regex(iter, parser->buffer,
+ parser->idx, enable);
}
trace_parser_put(parser);
@@ -5536,13 +5905,17 @@ int ftrace_regex_release(struct inode *inode, struct file *file)
if (filter_hash) {
orig_hash = &iter->ops->func_hash->filter_hash;
- if (iter->tr && !list_empty(&iter->tr->mod_trace))
- iter->hash->flags |= FTRACE_HASH_FL_MOD;
+ if (iter->tr) {
+ if (list_empty(&iter->tr->mod_trace))
+ iter->hash->flags &= ~FTRACE_HASH_FL_MOD;
+ else
+ iter->hash->flags |= FTRACE_HASH_FL_MOD;
+ }
} else
orig_hash = &iter->ops->func_hash->notrace_hash;
mutex_lock(&ftrace_lock);
- ret = ftrace_hash_move_and_update_ops(iter->ops, orig_hash,
+ ftrace_hash_move_and_update_ops(iter->ops, orig_hash,
iter->hash, filter_hash);
mutex_unlock(&ftrace_lock);
} else {
@@ -5573,6 +5946,20 @@ static const struct file_operations ftrace_enabled_fops = {
.release = seq_release_private,
};
+static const struct file_operations ftrace_touched_fops = {
+ .open = ftrace_touched_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release_private,
+};
+
+static const struct file_operations ftrace_avail_addrs_fops = {
+ .open = ftrace_avail_addrs_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release_private,
+};
+
static const struct file_operations ftrace_filter_fops = {
.open = ftrace_filter_open,
.read = seq_read,
@@ -5877,7 +6264,8 @@ ftrace_graph_release(struct inode *inode, struct file *file)
* infrastructure to do the synchronization, thus we must do it
* ourselves.
*/
- synchronize_rcu_tasks_rude();
+ if (old_hash != EMPTY_HASH)
+ synchronize_rcu_tasks_rude();
free_ftrace_hash(old_hash);
}
@@ -5925,7 +6313,7 @@ ftrace_graph_set_hash(struct ftrace_hash *hash, char *buffer)
if (entry)
continue;
- if (add_hash_entry(hash, rec->ip) < 0)
+ if (add_hash_entry(hash, rec->ip) == NULL)
goto out;
} else {
if (entry) {
@@ -6000,10 +6388,10 @@ void ftrace_create_filter_files(struct ftrace_ops *ops,
struct dentry *parent)
{
- trace_create_file("set_ftrace_filter", 0644, parent,
+ trace_create_file("set_ftrace_filter", TRACE_MODE_WRITE, parent,
ops, &ftrace_filter_fops);
- trace_create_file("set_ftrace_notrace", 0644, parent,
+ trace_create_file("set_ftrace_notrace", TRACE_MODE_WRITE, parent,
ops, &ftrace_notrace_fops);
}
@@ -6030,19 +6418,25 @@ void ftrace_destroy_filter_files(struct ftrace_ops *ops)
static __init int ftrace_init_dyn_tracefs(struct dentry *d_tracer)
{
- trace_create_file("available_filter_functions", 0444,
+ trace_create_file("available_filter_functions", TRACE_MODE_READ,
d_tracer, NULL, &ftrace_avail_fops);
- trace_create_file("enabled_functions", 0444,
+ trace_create_file("available_filter_functions_addrs", TRACE_MODE_READ,
+ d_tracer, NULL, &ftrace_avail_addrs_fops);
+
+ trace_create_file("enabled_functions", TRACE_MODE_READ,
d_tracer, NULL, &ftrace_enabled_fops);
+ trace_create_file("touched_functions", TRACE_MODE_READ,
+ d_tracer, NULL, &ftrace_touched_fops);
+
ftrace_create_filter_files(&global_ops, d_tracer);
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
- trace_create_file("set_graph_function", 0644, d_tracer,
+ trace_create_file("set_graph_function", TRACE_MODE_WRITE, d_tracer,
NULL,
&ftrace_graph_fops);
- trace_create_file("set_graph_notrace", 0644, d_tracer,
+ trace_create_file("set_graph_notrace", TRACE_MODE_WRITE, d_tracer,
NULL,
&ftrace_graph_notrace_fops);
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
@@ -6062,13 +6456,36 @@ static int ftrace_cmp_ips(const void *a, const void *b)
return 0;
}
+#ifdef CONFIG_FTRACE_SORT_STARTUP_TEST
+static void test_is_sorted(unsigned long *start, unsigned long count)
+{
+ int i;
+
+ for (i = 1; i < count; i++) {
+ if (WARN(start[i - 1] > start[i],
+ "[%d] %pS at %lx is not sorted with %pS at %lx\n", i,
+ (void *)start[i - 1], start[i - 1],
+ (void *)start[i], start[i]))
+ break;
+ }
+ if (i == count)
+ pr_info("ftrace section at %px sorted properly\n", start);
+}
+#else
+static void test_is_sorted(unsigned long *start, unsigned long count)
+{
+}
+#endif
+
static int ftrace_process_locs(struct module *mod,
unsigned long *start,
unsigned long *end)
{
+ struct ftrace_page *pg_unuse = NULL;
struct ftrace_page *start_pg;
struct ftrace_page *pg;
struct dyn_ftrace *rec;
+ unsigned long skipped = 0;
unsigned long count;
unsigned long *p;
unsigned long addr;
@@ -6080,8 +6497,17 @@ static int ftrace_process_locs(struct module *mod,
if (!count)
return 0;
- sort(start, count, sizeof(*start),
- ftrace_cmp_ips, NULL);
+ /*
+ * Sorting mcount in vmlinux at build time depend on
+ * CONFIG_BUILDTIME_MCOUNT_SORT, while mcount loc in
+ * modules can not be sorted at build time.
+ */
+ if (!IS_ENABLED(CONFIG_BUILDTIME_MCOUNT_SORT) || mod) {
+ sort(start, count, sizeof(*start),
+ ftrace_cmp_ips, NULL);
+ } else {
+ test_is_sorted(start, count);
+ }
start_pg = ftrace_allocate_pages(count);
if (!start_pg)
@@ -6114,6 +6540,7 @@ static int ftrace_process_locs(struct module *mod,
p = start;
pg = start_pg;
while (p < end) {
+ unsigned long end_offset;
addr = ftrace_call_adjust(*p++);
/*
* Some architecture linkers will pad between
@@ -6121,10 +6548,13 @@ static int ftrace_process_locs(struct module *mod,
* object files to satisfy alignments.
* Skip any NULL pointers.
*/
- if (!addr)
+ if (!addr) {
+ skipped++;
continue;
+ }
- if (pg->index == pg->size) {
+ end_offset = (pg->index+1) * sizeof(pg->records[0]);
+ if (end_offset > PAGE_SIZE << pg->order) {
/* We should have allocated enough */
if (WARN_ON(!pg->next))
break;
@@ -6135,8 +6565,10 @@ static int ftrace_process_locs(struct module *mod,
rec->ip = addr;
}
- /* We should have used all pages */
- WARN_ON(pg->next);
+ if (pg->next) {
+ pg_unuse = pg->next;
+ pg->next = NULL;
+ }
/* Assign the last page to ftrace_pages */
ftrace_pages = pg;
@@ -6158,6 +6590,11 @@ static int ftrace_process_locs(struct module *mod,
out:
mutex_unlock(&ftrace_lock);
+ /* We should have used all pages unless we skipped some */
+ if (pg_unuse) {
+ WARN_ON(!skipped);
+ ftrace_free_pages(pg_unuse);
+ }
return ret;
}
@@ -6178,6 +6615,59 @@ struct ftrace_mod_map {
unsigned int num_funcs;
};
+static int ftrace_get_trampoline_kallsym(unsigned int symnum,
+ unsigned long *value, char *type,
+ char *name, char *module_name,
+ int *exported)
+{
+ struct ftrace_ops *op;
+
+ list_for_each_entry_rcu(op, &ftrace_ops_trampoline_list, list) {
+ if (!op->trampoline || symnum--)
+ continue;
+ *value = op->trampoline;
+ *type = 't';
+ strscpy(name, FTRACE_TRAMPOLINE_SYM, KSYM_NAME_LEN);
+ strscpy(module_name, FTRACE_TRAMPOLINE_MOD, MODULE_NAME_LEN);
+ *exported = 0;
+ return 0;
+ }
+
+ return -ERANGE;
+}
+
+#if defined(CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS) || defined(CONFIG_MODULES)
+/*
+ * Check if the current ops references the given ip.
+ *
+ * If the ops traces all functions, then it was already accounted for.
+ * If the ops does not trace the current record function, skip it.
+ * If the ops ignores the function via notrace filter, skip it.
+ */
+static bool
+ops_references_ip(struct ftrace_ops *ops, unsigned long ip)
+{
+ /* If ops isn't enabled, ignore it */
+ if (!(ops->flags & FTRACE_OPS_FL_ENABLED))
+ return false;
+
+ /* If ops traces all then it includes this function */
+ if (ops_traces_mod(ops))
+ return true;
+
+ /* The function must be in the filter */
+ if (!ftrace_hash_empty(ops->func_hash->filter_hash) &&
+ !__ftrace_lookup_ip(ops->func_hash->filter_hash, ip))
+ return false;
+
+ /* If in notrace hash, we ignore it too */
+ if (ftrace_lookup_ip(ops->func_hash->notrace_hash, ip))
+ return false;
+
+ return true;
+}
+#endif
+
#ifdef CONFIG_MODULES
#define next_to_ftrace_page(p) container_of(p, struct ftrace_page, next)
@@ -6190,8 +6680,19 @@ static int referenced_filters(struct dyn_ftrace *rec)
int cnt = 0;
for (ops = ftrace_ops_list; ops != &ftrace_list_end; ops = ops->next) {
- if (ops_references_rec(ops, rec))
- cnt++;
+ if (ops_references_ip(ops, rec->ip)) {
+ if (WARN_ON_ONCE(ops->flags & FTRACE_OPS_FL_DIRECT))
+ continue;
+ if (WARN_ON_ONCE(ops->flags & FTRACE_OPS_FL_IPMODIFY))
+ continue;
+ cnt++;
+ if (ops->flags & FTRACE_OPS_FL_SAVE_REGS)
+ rec->flags |= FTRACE_FL_REGS;
+ if (cnt == 1 && ops->trampoline)
+ rec->flags |= FTRACE_FL_TRAMP;
+ else
+ rec->flags &= ~FTRACE_FL_TRAMP;
+ }
}
return cnt;
@@ -6220,7 +6721,7 @@ clear_mod_from_hash(struct ftrace_page *pg, struct ftrace_hash *hash)
}
}
-/* Clear any records from hashs */
+/* Clear any records from hashes */
static void clear_mod_from_hashes(struct ftrace_page *pg)
{
struct trace_array *tr;
@@ -6261,7 +6762,6 @@ void ftrace_release_mod(struct module *mod)
struct ftrace_page **last_pg;
struct ftrace_page *tmp_page = NULL;
struct ftrace_page *pg;
- int order;
mutex_lock(&ftrace_lock);
@@ -6283,8 +6783,7 @@ void ftrace_release_mod(struct module *mod)
last_pg = &ftrace_pages_start;
for (pg = ftrace_pages_start; pg; pg = *last_pg) {
rec = &pg->records[0];
- if (within_module_core(rec->ip, mod) ||
- within_module_init(rec->ip, mod)) {
+ if (within_module(rec->ip, mod)) {
/*
* As core pages are first, the first
* page should never be a module page.
@@ -6312,11 +6811,12 @@ void ftrace_release_mod(struct module *mod)
/* Needs to be called outside of ftrace_lock */
clear_mod_from_hashes(pg);
- order = get_count_order(pg->size / ENTRIES_PER_PAGE);
- free_pages((unsigned long)pg->records, order);
+ if (pg->records) {
+ free_pages((unsigned long)pg->records, pg->order);
+ ftrace_number_of_pages -= 1 << pg->order;
+ }
tmp_page = pg->next;
kfree(pg);
- ftrace_number_of_pages -= 1 << order;
ftrace_number_of_groups--;
}
}
@@ -6355,10 +6855,16 @@ void ftrace_module_enable(struct module *mod)
* not part of this module, then skip this pg,
* which the "break" will do.
*/
- if (!within_module_core(rec->ip, mod) &&
- !within_module_init(rec->ip, mod))
+ if (!within_module(rec->ip, mod))
break;
+ /* Weak functions should still be ignored */
+ if (!test_for_valid_rec(rec)) {
+ /* Clear all other flags. Should not be enabled anyway */
+ rec->flags = FTRACE_FL_DISABLED;
+ continue;
+ }
+
cnt = 0;
/*
@@ -6370,8 +6876,8 @@ void ftrace_module_enable(struct module *mod)
if (ftrace_start_up)
cnt += referenced_filters(rec);
- /* This clears FTRACE_FL_DISABLED */
- rec->flags = cnt;
+ rec->flags &= ~FTRACE_FL_DISABLED;
+ rec->flags += cnt;
if (ftrace_start_up && cnt) {
int failed = __ftrace_replace_code(rec, 1);
@@ -6395,11 +6901,16 @@ void ftrace_module_enable(struct module *mod)
void ftrace_module_init(struct module *mod)
{
+ int ret;
+
if (ftrace_disabled || !mod->num_ftrace_callsites)
return;
- ftrace_process_locs(mod, mod->ftrace_callsites,
- mod->ftrace_callsites + mod->num_ftrace_callsites);
+ ret = ftrace_process_locs(mod, mod->ftrace_callsites,
+ mod->ftrace_callsites + mod->num_ftrace_callsites);
+ if (ret)
+ pr_warn("ftrace: failed to allocate entries for module '%s' functions\n",
+ mod->name);
}
static void save_ftrace_mod_rec(struct ftrace_mod_map *mod_map,
@@ -6478,7 +6989,7 @@ ftrace_func_address_lookup(struct ftrace_mod_map *mod_map,
if (off)
*off = addr - found_func->ip;
if (sym)
- strlcpy(sym, found_func->name, KSYM_NAME_LEN);
+ strscpy(sym, found_func->name, KSYM_NAME_LEN);
return found_func->name;
}
@@ -6514,6 +7025,7 @@ int ftrace_mod_get_kallsym(unsigned int symnum, unsigned long *value,
{
struct ftrace_mod_map *mod_map;
struct ftrace_mod_func *mod_func;
+ int ret;
preempt_disable();
list_for_each_entry_rcu(mod_map, &ftrace_mod_maps, list) {
@@ -6531,8 +7043,8 @@ int ftrace_mod_get_kallsym(unsigned int symnum, unsigned long *value,
*value = mod_func->ip;
*type = 'T';
- strlcpy(name, mod_func->name, KSYM_NAME_LEN);
- strlcpy(module_name, mod_map->mod->name, MODULE_NAME_LEN);
+ strscpy(name, mod_func->name, KSYM_NAME_LEN);
+ strscpy(module_name, mod_map->mod->name, MODULE_NAME_LEN);
*exported = 1;
preempt_enable();
return 0;
@@ -6540,8 +7052,10 @@ int ftrace_mod_get_kallsym(unsigned int symnum, unsigned long *value,
WARN_ON(1);
break;
}
+ ret = ftrace_get_trampoline_kallsym(symnum, value, type, name,
+ module_name, exported);
preempt_enable();
- return -ERANGE;
+ return ret;
}
#else
@@ -6553,6 +7067,18 @@ allocate_ftrace_mod_map(struct module *mod,
{
return NULL;
}
+int ftrace_mod_get_kallsym(unsigned int symnum, unsigned long *value,
+ char *type, char *name, char *module_name,
+ int *exported)
+{
+ int ret;
+
+ preempt_disable();
+ ret = ftrace_get_trampoline_kallsym(symnum, value, type, name,
+ module_name, exported);
+ preempt_enable();
+ return ret;
+}
#endif /* CONFIG_MODULES */
struct ftrace_init_func {
@@ -6618,10 +7144,7 @@ void ftrace_free_mem(struct module *mod, void *start_ptr, void *end_ptr)
struct dyn_ftrace key;
struct ftrace_mod_map *mod_map = NULL;
struct ftrace_init_func *func, *func_next;
- struct list_head clear_hash;
- int order;
-
- INIT_LIST_HEAD(&clear_hash);
+ LIST_HEAD(clear_hash);
key.ip = start;
key.flags = end; /* overload flags, as it is unsigned long */
@@ -6657,9 +7180,10 @@ void ftrace_free_mem(struct module *mod, void *start_ptr, void *end_ptr)
ftrace_update_tot_cnt--;
if (!pg->index) {
*last_pg = pg->next;
- order = get_count_order(pg->size / ENTRIES_PER_PAGE);
- free_pages((unsigned long)pg->records, order);
- ftrace_number_of_pages -= 1 << order;
+ if (pg->records) {
+ free_pages((unsigned long)pg->records, pg->order);
+ ftrace_number_of_pages -= 1 << pg->order;
+ }
ftrace_number_of_groups--;
kfree(pg);
pg = container_of(last_pg, struct ftrace_page, next);
@@ -6685,9 +7209,16 @@ void __init ftrace_free_init_mem(void)
void *start = (void *)(&__init_begin);
void *end = (void *)(&__init_end);
+ ftrace_boot_snapshot();
+
ftrace_free_mem(NULL, start, end);
}
+int __init __weak ftrace_dyn_arch_init(void)
+{
+ return 0;
+}
+
void __init ftrace_init(void)
{
extern unsigned long __start_mcount_loc[];
@@ -6708,17 +7239,21 @@ void __init ftrace_init(void)
}
pr_info("ftrace: allocating %ld entries in %ld pages\n",
- count, count / ENTRIES_PER_PAGE + 1);
-
- last_ftrace_enabled = ftrace_enabled = 1;
+ count, DIV_ROUND_UP(count, ENTRIES_PER_PAGE));
ret = ftrace_process_locs(NULL,
__start_mcount_loc,
__stop_mcount_loc);
+ if (ret) {
+ pr_warn("ftrace: failed to allocate entries for functions\n");
+ goto failed;
+ }
pr_info("ftrace: allocated %ld pages with %ld groups\n",
ftrace_number_of_pages, ftrace_number_of_groups);
+ last_ftrace_enabled = ftrace_enabled = 1;
+
set_ftrace_early_filters();
return;
@@ -6733,7 +7268,24 @@ void __weak arch_ftrace_update_trampoline(struct ftrace_ops *ops)
static void ftrace_update_trampoline(struct ftrace_ops *ops)
{
+ unsigned long trampoline = ops->trampoline;
+
arch_ftrace_update_trampoline(ops);
+ if (ops->trampoline && ops->trampoline != trampoline &&
+ (ops->flags & FTRACE_OPS_FL_ALLOC_TRAMP)) {
+ /* Add to kallsyms before the perf events */
+ ftrace_add_trampoline_to_kallsyms(ops);
+ perf_event_ksymbol(PERF_RECORD_KSYMBOL_TYPE_OOL,
+ ops->trampoline, ops->trampoline_size, false,
+ FTRACE_TRAMPOLINE_SYM);
+ /*
+ * Record the perf text poke event after the ksymbol register
+ * event.
+ */
+ perf_event_text_poke((void *)ops->trampoline, NULL, 0,
+ (void *)ops->trampoline,
+ ops->trampoline_size);
+ }
}
void ftrace_init_trace_array(struct trace_array *tr)
@@ -6746,8 +7298,7 @@ void ftrace_init_trace_array(struct trace_array *tr)
struct ftrace_ops global_ops = {
.func = ftrace_stub,
- .flags = FTRACE_OPS_FL_RECURSION_SAFE |
- FTRACE_OPS_FL_INITIALIZED |
+ .flags = FTRACE_OPS_FL_INITIALIZED |
FTRACE_OPS_FL_PID,
};
@@ -6759,12 +7310,8 @@ static int __init ftrace_nodyn_init(void)
core_initcall(ftrace_nodyn_init);
static inline int ftrace_init_dyn_tracefs(struct dentry *d_tracer) { return 0; }
-static inline void ftrace_startup_enable(int command) { }
static inline void ftrace_startup_all(int command) { }
-# define ftrace_startup_sysctl() do { } while (0)
-# define ftrace_shutdown_sysctl() do { } while (0)
-
static void ftrace_update_trampoline(struct ftrace_ops *ops)
{
}
@@ -6797,20 +7344,20 @@ void ftrace_reset_array_ops(struct trace_array *tr)
static nokprobe_inline void
__ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
- struct ftrace_ops *ignored, struct pt_regs *regs)
+ struct ftrace_ops *ignored, struct ftrace_regs *fregs)
{
+ struct pt_regs *regs = ftrace_get_regs(fregs);
struct ftrace_ops *op;
int bit;
- bit = trace_test_and_set_recursion(TRACE_LIST_START, TRACE_LIST_MAX);
- if (bit < 0)
- return;
-
/*
- * Some of the ops may be dynamically allocated,
- * they must be freed after a synchronize_rcu().
+ * The ftrace_test_and_set_recursion() will disable preemption,
+ * which is required since some of the ops may be dynamically
+ * allocated, they must be freed after a synchronize_rcu().
*/
- preempt_disable_notrace();
+ bit = trace_test_and_set_recursion(ip, parent_ip, TRACE_LIST_START);
+ if (bit < 0)
+ return;
do_for_each_ftrace_op(op, ftrace_ops_list) {
/* Stub functions don't need to be called nor tested */
@@ -6819,8 +7366,6 @@ __ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
/*
* Check the following for each ops before calling their func:
* if RCU flag is set, then rcu_is_watching() must be true
- * if PER_CPU is set, then ftrace_function_local_disable()
- * must be false
* Otherwise test if the ip matches the ops filter
*
* If any of the above fails then the op->func() is not executed.
@@ -6831,11 +7376,10 @@ __ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
pr_warn("op=%p %pS\n", op, op);
goto out;
}
- op->func(ip, parent_ip, op, regs);
+ op->func(ip, parent_ip, op, fregs);
}
} while_for_each_ftrace_op(op);
out:
- preempt_enable_notrace();
trace_clear_recursion(bit);
}
@@ -6851,44 +7395,41 @@ out:
* Note, CONFIG_DYNAMIC_FTRACE_WITH_REGS expects a full regs to be saved.
* An architecture can pass partial regs with ftrace_ops and still
* set the ARCH_SUPPORTS_FTRACE_OPS.
+ *
+ * In vmlinux.lds.h, ftrace_ops_list_func() is defined to be
+ * arch_ftrace_ops_list_func.
*/
#if ARCH_SUPPORTS_FTRACE_OPS
-static void ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
- struct ftrace_ops *op, struct pt_regs *regs)
+void arch_ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
+ struct ftrace_ops *op, struct ftrace_regs *fregs)
{
- __ftrace_ops_list_func(ip, parent_ip, NULL, regs);
+ __ftrace_ops_list_func(ip, parent_ip, NULL, fregs);
}
-NOKPROBE_SYMBOL(ftrace_ops_list_func);
#else
-static void ftrace_ops_no_ops(unsigned long ip, unsigned long parent_ip)
+void arch_ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip)
{
__ftrace_ops_list_func(ip, parent_ip, NULL, NULL);
}
-NOKPROBE_SYMBOL(ftrace_ops_no_ops);
#endif
+NOKPROBE_SYMBOL(arch_ftrace_ops_list_func);
/*
* If there's only one function registered but it does not support
- * recursion, needs RCU protection and/or requires per cpu handling, then
- * this function will be called by the mcount trampoline.
+ * recursion, needs RCU protection, then this function will be called
+ * by the mcount trampoline.
*/
static void ftrace_ops_assist_func(unsigned long ip, unsigned long parent_ip,
- struct ftrace_ops *op, struct pt_regs *regs)
+ struct ftrace_ops *op, struct ftrace_regs *fregs)
{
int bit;
- if ((op->flags & FTRACE_OPS_FL_RCU) && !rcu_is_watching())
- return;
-
- bit = trace_test_and_set_recursion(TRACE_LIST_START, TRACE_LIST_MAX);
+ bit = trace_test_and_set_recursion(ip, parent_ip, TRACE_LIST_START);
if (bit < 0)
return;
- preempt_disable_notrace();
-
- op->func(ip, parent_ip, op, regs);
+ if (!(op->flags & FTRACE_OPS_FL_RCU) || rcu_is_watching())
+ op->func(ip, parent_ip, op, fregs);
- preempt_enable_notrace();
trace_clear_recursion(bit);
}
NOKPROBE_SYMBOL(ftrace_ops_assist_func);
@@ -6907,11 +7448,11 @@ NOKPROBE_SYMBOL(ftrace_ops_assist_func);
ftrace_func_t ftrace_ops_get_func(struct ftrace_ops *ops)
{
/*
- * If the function does not handle recursion, needs to be RCU safe,
- * or does per cpu logic, then we need to call the assist handler.
+ * If the function does not handle recursion or needs to be RCU safe,
+ * then we need to call the assist handler.
*/
- if (!(ops->flags & FTRACE_OPS_FL_RECURSION_SAFE) ||
- ops->flags & FTRACE_OPS_FL_RCU)
+ if (ops->flags & (FTRACE_OPS_FL_RECURSION |
+ FTRACE_OPS_FL_RCU))
return ftrace_ops_assist_func;
return ops->func;
@@ -6919,7 +7460,9 @@ ftrace_func_t ftrace_ops_get_func(struct ftrace_ops *ops)
static void
ftrace_filter_pid_sched_switch_probe(void *data, bool preempt,
- struct task_struct *prev, struct task_struct *next)
+ struct task_struct *prev,
+ struct task_struct *next,
+ unsigned int prev_state)
{
struct trace_array *tr = data;
struct trace_pid_list *pid_list;
@@ -6969,12 +7512,12 @@ void ftrace_pid_follow_fork(struct trace_array *tr, bool enable)
if (enable) {
register_trace_sched_process_fork(ftrace_pid_follow_sched_process_fork,
tr);
- register_trace_sched_process_exit(ftrace_pid_follow_sched_process_exit,
+ register_trace_sched_process_free(ftrace_pid_follow_sched_process_exit,
tr);
} else {
unregister_trace_sched_process_fork(ftrace_pid_follow_sched_process_fork,
tr);
- unregister_trace_sched_process_exit(ftrace_pid_follow_sched_process_exit,
+ unregister_trace_sched_process_free(ftrace_pid_follow_sched_process_exit,
tr);
}
}
@@ -7011,10 +7554,10 @@ static void clear_ftrace_pids(struct trace_array *tr, int type)
synchronize_rcu();
if ((type & TRACE_PIDS) && pid_list)
- trace_free_pid_list(pid_list);
+ trace_pid_list_free(pid_list);
if ((type & TRACE_NO_PIDS) && no_pid_list)
- trace_free_pid_list(no_pid_list);
+ trace_pid_list_free(no_pid_list);
}
void ftrace_clear_pids(struct trace_array *tr)
@@ -7255,7 +7798,7 @@ pid_write(struct file *filp, const char __user *ubuf,
if (filtered_pids) {
synchronize_rcu();
- trace_free_pid_list(filtered_pids);
+ trace_pid_list_free(filtered_pids);
} else if (pid_list && !other_pids) {
/* Register a probe to set whether to ignore the tracing of a task */
register_trace_sched_switch(ftrace_filter_pid_sched_switch_probe, tr);
@@ -7321,10 +7864,10 @@ static const struct file_operations ftrace_no_pid_fops = {
void ftrace_init_tracefs(struct trace_array *tr, struct dentry *d_tracer)
{
- trace_create_file("set_ftrace_pid", 0644, d_tracer,
+ trace_create_file("set_ftrace_pid", TRACE_MODE_WRITE, d_tracer,
tr, &ftrace_pid_fops);
- trace_create_file("set_ftrace_notrace_pid", 0644, d_tracer,
- tr, &ftrace_no_pid_fops);
+ trace_create_file("set_ftrace_notrace_pid", TRACE_MODE_WRITE,
+ d_tracer, tr, &ftrace_no_pid_fops);
}
void __init ftrace_init_tracefs_toplevel(struct trace_array *tr,
@@ -7352,16 +7895,155 @@ void ftrace_kill(void)
}
/**
- * Test if ftrace is dead or not.
+ * ftrace_is_dead - Test if ftrace is dead or not.
+ *
+ * Returns 1 if ftrace is "dead", zero otherwise.
*/
int ftrace_is_dead(void)
{
return ftrace_disabled;
}
+#ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
+/*
+ * When registering ftrace_ops with IPMODIFY, it is necessary to make sure
+ * it doesn't conflict with any direct ftrace_ops. If there is existing
+ * direct ftrace_ops on a kernel function being patched, call
+ * FTRACE_OPS_CMD_ENABLE_SHARE_IPMODIFY_PEER on it to enable sharing.
+ *
+ * @ops: ftrace_ops being registered.
+ *
+ * Returns:
+ * 0 on success;
+ * Negative on failure.
+ */
+static int prepare_direct_functions_for_ipmodify(struct ftrace_ops *ops)
+{
+ struct ftrace_func_entry *entry;
+ struct ftrace_hash *hash;
+ struct ftrace_ops *op;
+ int size, i, ret;
+
+ lockdep_assert_held_once(&direct_mutex);
+
+ if (!(ops->flags & FTRACE_OPS_FL_IPMODIFY))
+ return 0;
+
+ hash = ops->func_hash->filter_hash;
+ size = 1 << hash->size_bits;
+ for (i = 0; i < size; i++) {
+ hlist_for_each_entry(entry, &hash->buckets[i], hlist) {
+ unsigned long ip = entry->ip;
+ bool found_op = false;
+
+ mutex_lock(&ftrace_lock);
+ do_for_each_ftrace_op(op, ftrace_ops_list) {
+ if (!(op->flags & FTRACE_OPS_FL_DIRECT))
+ continue;
+ if (ops_references_ip(op, ip)) {
+ found_op = true;
+ break;
+ }
+ } while_for_each_ftrace_op(op);
+ mutex_unlock(&ftrace_lock);
+
+ if (found_op) {
+ if (!op->ops_func)
+ return -EBUSY;
+
+ ret = op->ops_func(op, FTRACE_OPS_CMD_ENABLE_SHARE_IPMODIFY_PEER);
+ if (ret)
+ return ret;
+ }
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * Similar to prepare_direct_functions_for_ipmodify, clean up after ops
+ * with IPMODIFY is unregistered. The cleanup is optional for most DIRECT
+ * ops.
+ */
+static void cleanup_direct_functions_after_ipmodify(struct ftrace_ops *ops)
+{
+ struct ftrace_func_entry *entry;
+ struct ftrace_hash *hash;
+ struct ftrace_ops *op;
+ int size, i;
+
+ if (!(ops->flags & FTRACE_OPS_FL_IPMODIFY))
+ return;
+
+ mutex_lock(&direct_mutex);
+
+ hash = ops->func_hash->filter_hash;
+ size = 1 << hash->size_bits;
+ for (i = 0; i < size; i++) {
+ hlist_for_each_entry(entry, &hash->buckets[i], hlist) {
+ unsigned long ip = entry->ip;
+ bool found_op = false;
+
+ mutex_lock(&ftrace_lock);
+ do_for_each_ftrace_op(op, ftrace_ops_list) {
+ if (!(op->flags & FTRACE_OPS_FL_DIRECT))
+ continue;
+ if (ops_references_ip(op, ip)) {
+ found_op = true;
+ break;
+ }
+ } while_for_each_ftrace_op(op);
+ mutex_unlock(&ftrace_lock);
+
+ /* The cleanup is optional, ignore any errors */
+ if (found_op && op->ops_func)
+ op->ops_func(op, FTRACE_OPS_CMD_DISABLE_SHARE_IPMODIFY_PEER);
+ }
+ }
+ mutex_unlock(&direct_mutex);
+}
+
+#define lock_direct_mutex() mutex_lock(&direct_mutex)
+#define unlock_direct_mutex() mutex_unlock(&direct_mutex)
+
+#else /* CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS */
+
+static int prepare_direct_functions_for_ipmodify(struct ftrace_ops *ops)
+{
+ return 0;
+}
+
+static void cleanup_direct_functions_after_ipmodify(struct ftrace_ops *ops)
+{
+}
+
+#define lock_direct_mutex() do { } while (0)
+#define unlock_direct_mutex() do { } while (0)
+
+#endif /* CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS */
+
+/*
+ * Similar to register_ftrace_function, except we don't lock direct_mutex.
+ */
+static int register_ftrace_function_nolock(struct ftrace_ops *ops)
+{
+ int ret;
+
+ ftrace_ops_init(ops);
+
+ mutex_lock(&ftrace_lock);
+
+ ret = ftrace_startup(ops, 0);
+
+ mutex_unlock(&ftrace_lock);
+
+ return ret;
+}
+
/**
* register_ftrace_function - register a function for profiling
- * @ops - ops structure that holds the function for profiling.
+ * @ops: ops structure that holds the function for profiling.
*
* Register a function to be called by all functions in the
* kernel.
@@ -7372,23 +8054,24 @@ int ftrace_is_dead(void)
*/
int register_ftrace_function(struct ftrace_ops *ops)
{
- int ret = -1;
-
- ftrace_ops_init(ops);
-
- mutex_lock(&ftrace_lock);
+ int ret;
- ret = ftrace_startup(ops, 0);
+ lock_direct_mutex();
+ ret = prepare_direct_functions_for_ipmodify(ops);
+ if (ret < 0)
+ goto out_unlock;
- mutex_unlock(&ftrace_lock);
+ ret = register_ftrace_function_nolock(ops);
+out_unlock:
+ unlock_direct_mutex();
return ret;
}
EXPORT_SYMBOL_GPL(register_ftrace_function);
/**
* unregister_ftrace_function - unregister a function for profiling.
- * @ops - ops structure that holds the function to unregister
+ * @ops: ops structure that holds the function to unregister
*
* Unregister a function that was added to be called by ftrace profiling.
*/
@@ -7400,10 +8083,127 @@ int unregister_ftrace_function(struct ftrace_ops *ops)
ret = ftrace_shutdown(ops, 0);
mutex_unlock(&ftrace_lock);
+ cleanup_direct_functions_after_ipmodify(ops);
return ret;
}
EXPORT_SYMBOL_GPL(unregister_ftrace_function);
+static int symbols_cmp(const void *a, const void *b)
+{
+ const char **str_a = (const char **) a;
+ const char **str_b = (const char **) b;
+
+ return strcmp(*str_a, *str_b);
+}
+
+struct kallsyms_data {
+ unsigned long *addrs;
+ const char **syms;
+ size_t cnt;
+ size_t found;
+};
+
+/* This function gets called for all kernel and module symbols
+ * and returns 1 in case we resolved all the requested symbols,
+ * 0 otherwise.
+ */
+static int kallsyms_callback(void *data, const char *name, unsigned long addr)
+{
+ struct kallsyms_data *args = data;
+ const char **sym;
+ int idx;
+
+ sym = bsearch(&name, args->syms, args->cnt, sizeof(*args->syms), symbols_cmp);
+ if (!sym)
+ return 0;
+
+ idx = sym - args->syms;
+ if (args->addrs[idx])
+ return 0;
+
+ if (!ftrace_location(addr))
+ return 0;
+
+ args->addrs[idx] = addr;
+ args->found++;
+ return args->found == args->cnt ? 1 : 0;
+}
+
+/**
+ * ftrace_lookup_symbols - Lookup addresses for array of symbols
+ *
+ * @sorted_syms: array of symbols pointers symbols to resolve,
+ * must be alphabetically sorted
+ * @cnt: number of symbols/addresses in @syms/@addrs arrays
+ * @addrs: array for storing resulting addresses
+ *
+ * This function looks up addresses for array of symbols provided in
+ * @syms array (must be alphabetically sorted) and stores them in
+ * @addrs array, which needs to be big enough to store at least @cnt
+ * addresses.
+ *
+ * This function returns 0 if all provided symbols are found,
+ * -ESRCH otherwise.
+ */
+int ftrace_lookup_symbols(const char **sorted_syms, size_t cnt, unsigned long *addrs)
+{
+ struct kallsyms_data args;
+ int found_all;
+
+ memset(addrs, 0, sizeof(*addrs) * cnt);
+ args.addrs = addrs;
+ args.syms = sorted_syms;
+ args.cnt = cnt;
+ args.found = 0;
+
+ found_all = kallsyms_on_each_symbol(kallsyms_callback, &args);
+ if (found_all)
+ return 0;
+ found_all = module_kallsyms_on_each_symbol(NULL, kallsyms_callback, &args);
+ return found_all ? 0 : -ESRCH;
+}
+
+#ifdef CONFIG_SYSCTL
+
+#ifdef CONFIG_DYNAMIC_FTRACE
+static void ftrace_startup_sysctl(void)
+{
+ int command;
+
+ if (unlikely(ftrace_disabled))
+ return;
+
+ /* Force update next time */
+ saved_ftrace_func = NULL;
+ /* ftrace_start_up is true if we want ftrace running */
+ if (ftrace_start_up) {
+ command = FTRACE_UPDATE_CALLS;
+ if (ftrace_graph_active)
+ command |= FTRACE_START_FUNC_RET;
+ ftrace_startup_enable(command);
+ }
+}
+
+static void ftrace_shutdown_sysctl(void)
+{
+ int command;
+
+ if (unlikely(ftrace_disabled))
+ return;
+
+ /* ftrace_start_up is true if ftrace is running */
+ if (ftrace_start_up) {
+ command = FTRACE_DISABLE_CALLS;
+ if (ftrace_graph_active)
+ command |= FTRACE_STOP_FUNC_RET;
+ ftrace_run_update_code(command);
+ }
+}
+#else
+# define ftrace_startup_sysctl() do { } while (0)
+# define ftrace_shutdown_sysctl() do { } while (0)
+#endif /* CONFIG_DYNAMIC_FTRACE */
+
static bool is_permanent_ops_registered(void)
{
struct ftrace_ops *op;
@@ -7416,10 +8216,9 @@ static bool is_permanent_ops_registered(void)
return false;
}
-int
+static int
ftrace_enable_sysctl(struct ctl_table *table, int write,
- void __user *buffer, size_t *lenp,
- loff_t *ppos)
+ void *buffer, size_t *lenp, loff_t *ppos)
{
int ret = -ENODEV;
@@ -7460,3 +8259,22 @@ ftrace_enable_sysctl(struct ctl_table *table, int write,
mutex_unlock(&ftrace_lock);
return ret;
}
+
+static struct ctl_table ftrace_sysctls[] = {
+ {
+ .procname = "ftrace_enabled",
+ .data = &ftrace_enabled,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = ftrace_enable_sysctl,
+ },
+ {}
+};
+
+static int __init ftrace_sysctl_init(void)
+{
+ register_sysctl_init("kernel", ftrace_sysctls);
+ return 0;
+}
+late_initcall(ftrace_sysctl_init);
+#endif
diff --git a/kernel/trace/ftrace_internal.h b/kernel/trace/ftrace_internal.h
index 382775edf690..5012c04f92c0 100644
--- a/kernel/trace/ftrace_internal.h
+++ b/kernel/trace/ftrace_internal.h
@@ -2,6 +2,9 @@
#ifndef _LINUX_KERNEL_FTRACE_INTERNAL_H
#define _LINUX_KERNEL_FTRACE_INTERNAL_H
+int __register_ftrace_function(struct ftrace_ops *ops);
+int __unregister_ftrace_function(struct ftrace_ops *ops);
+
#ifdef CONFIG_FUNCTION_TRACER
extern struct mutex ftrace_lock;
@@ -15,8 +18,6 @@ int ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip, void *regs);
#else /* !CONFIG_DYNAMIC_FTRACE */
-int __register_ftrace_function(struct ftrace_ops *ops);
-int __unregister_ftrace_function(struct ftrace_ops *ops);
/* Keep as macros so we do not need to define the commands */
# define ftrace_startup(ops, command) \
({ \
diff --git a/kernel/trace/kprobe_event_gen_test.c b/kernel/trace/kprobe_event_gen_test.c
index 18b0f1cbb947..5a4b722b5045 100644
--- a/kernel/trace/kprobe_event_gen_test.c
+++ b/kernel/trace/kprobe_event_gen_test.c
@@ -21,7 +21,7 @@
* Then:
*
* # insmod kernel/trace/kprobe_event_gen_test.ko
- * # cat /sys/kernel/debug/tracing/trace
+ * # cat /sys/kernel/tracing/trace
*
* You should see many instances of the "gen_kprobe_test" and
* "gen_kretprobe_test" events in the trace buffer.
@@ -35,6 +35,49 @@
static struct trace_event_file *gen_kprobe_test;
static struct trace_event_file *gen_kretprobe_test;
+#define KPROBE_GEN_TEST_FUNC "do_sys_open"
+
+/* X86 */
+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_32)
+#define KPROBE_GEN_TEST_ARG0 "dfd=%ax"
+#define KPROBE_GEN_TEST_ARG1 "filename=%dx"
+#define KPROBE_GEN_TEST_ARG2 "flags=%cx"
+#define KPROBE_GEN_TEST_ARG3 "mode=+4($stack)"
+
+/* ARM64 */
+#elif defined(CONFIG_ARM64)
+#define KPROBE_GEN_TEST_ARG0 "dfd=%x0"
+#define KPROBE_GEN_TEST_ARG1 "filename=%x1"
+#define KPROBE_GEN_TEST_ARG2 "flags=%x2"
+#define KPROBE_GEN_TEST_ARG3 "mode=%x3"
+
+/* ARM */
+#elif defined(CONFIG_ARM)
+#define KPROBE_GEN_TEST_ARG0 "dfd=%r0"
+#define KPROBE_GEN_TEST_ARG1 "filename=%r1"
+#define KPROBE_GEN_TEST_ARG2 "flags=%r2"
+#define KPROBE_GEN_TEST_ARG3 "mode=%r3"
+
+/* RISCV */
+#elif defined(CONFIG_RISCV)
+#define KPROBE_GEN_TEST_ARG0 "dfd=%a0"
+#define KPROBE_GEN_TEST_ARG1 "filename=%a1"
+#define KPROBE_GEN_TEST_ARG2 "flags=%a2"
+#define KPROBE_GEN_TEST_ARG3 "mode=%a3"
+
+/* others */
+#else
+#define KPROBE_GEN_TEST_ARG0 NULL
+#define KPROBE_GEN_TEST_ARG1 NULL
+#define KPROBE_GEN_TEST_ARG2 NULL
+#define KPROBE_GEN_TEST_ARG3 NULL
+#endif
+
+static bool trace_event_file_is_valid(struct trace_event_file *input)
+{
+ return input && !IS_ERR(input);
+}
+
/*
* Test to make sure we can create a kprobe event, then add more
* fields.
@@ -58,23 +101,23 @@ static int __init test_gen_kprobe_cmd(void)
* fields.
*/
ret = kprobe_event_gen_cmd_start(&cmd, "gen_kprobe_test",
- "do_sys_open",
- "dfd=%ax", "filename=%dx");
+ KPROBE_GEN_TEST_FUNC,
+ KPROBE_GEN_TEST_ARG0, KPROBE_GEN_TEST_ARG1);
if (ret)
- goto free;
+ goto out;
/* Use kprobe_event_add_fields to add the rest of the fields */
- ret = kprobe_event_add_fields(&cmd, "flags=%cx", "mode=+4($stack)");
+ ret = kprobe_event_add_fields(&cmd, KPROBE_GEN_TEST_ARG2, KPROBE_GEN_TEST_ARG3);
if (ret)
- goto free;
+ goto out;
/*
* This actually creates the event.
*/
ret = kprobe_event_gen_cmd_end(&cmd);
if (ret)
- goto free;
+ goto out;
/*
* Now get the gen_kprobe_test event file. We need to prevent
@@ -97,13 +140,13 @@ static int __init test_gen_kprobe_cmd(void)
goto delete;
}
out:
+ kfree(buf);
return ret;
delete:
+ if (trace_event_file_is_valid(gen_kprobe_test))
+ gen_kprobe_test = NULL;
/* We got an error after creating the event, delete it */
- ret = kprobe_event_delete("gen_kprobe_test");
- free:
- kfree(buf);
-
+ kprobe_event_delete("gen_kprobe_test");
goto out;
}
@@ -128,17 +171,17 @@ static int __init test_gen_kretprobe_cmd(void)
* Define the kretprobe event.
*/
ret = kretprobe_event_gen_cmd_start(&cmd, "gen_kretprobe_test",
- "do_sys_open",
+ KPROBE_GEN_TEST_FUNC,
"$retval");
if (ret)
- goto free;
+ goto out;
/*
* This actually creates the event.
*/
ret = kretprobe_event_gen_cmd_end(&cmd);
if (ret)
- goto free;
+ goto out;
/*
* Now get the gen_kretprobe_test event file. We need to
@@ -162,13 +205,13 @@ static int __init test_gen_kretprobe_cmd(void)
goto delete;
}
out:
+ kfree(buf);
return ret;
delete:
+ if (trace_event_file_is_valid(gen_kretprobe_test))
+ gen_kretprobe_test = NULL;
/* We got an error after creating the event, delete it */
- ret = kprobe_event_delete("gen_kretprobe_test");
- free:
- kfree(buf);
-
+ kprobe_event_delete("gen_kretprobe_test");
goto out;
}
@@ -182,10 +225,12 @@ static int __init kprobe_event_gen_test_init(void)
ret = test_gen_kretprobe_cmd();
if (ret) {
- WARN_ON(trace_array_set_clr_event(gen_kretprobe_test->tr,
- "kprobes",
- "gen_kretprobe_test", false));
- trace_put_event_file(gen_kretprobe_test);
+ if (trace_event_file_is_valid(gen_kretprobe_test)) {
+ WARN_ON(trace_array_set_clr_event(gen_kretprobe_test->tr,
+ "kprobes",
+ "gen_kretprobe_test", false));
+ trace_put_event_file(gen_kretprobe_test);
+ }
WARN_ON(kprobe_event_delete("gen_kretprobe_test"));
}
@@ -194,24 +239,30 @@ static int __init kprobe_event_gen_test_init(void)
static void __exit kprobe_event_gen_test_exit(void)
{
- /* Disable the event or you can't remove it */
- WARN_ON(trace_array_set_clr_event(gen_kprobe_test->tr,
- "kprobes",
- "gen_kprobe_test", false));
+ if (trace_event_file_is_valid(gen_kprobe_test)) {
+ /* Disable the event or you can't remove it */
+ WARN_ON(trace_array_set_clr_event(gen_kprobe_test->tr,
+ "kprobes",
+ "gen_kprobe_test", false));
+
+ /* Now give the file and instance back */
+ trace_put_event_file(gen_kprobe_test);
+ }
- /* Now give the file and instance back */
- trace_put_event_file(gen_kprobe_test);
/* Now unregister and free the event */
WARN_ON(kprobe_event_delete("gen_kprobe_test"));
- /* Disable the event or you can't remove it */
- WARN_ON(trace_array_set_clr_event(gen_kprobe_test->tr,
- "kprobes",
- "gen_kretprobe_test", false));
+ if (trace_event_file_is_valid(gen_kretprobe_test)) {
+ /* Disable the event or you can't remove it */
+ WARN_ON(trace_array_set_clr_event(gen_kretprobe_test->tr,
+ "kprobes",
+ "gen_kretprobe_test", false));
+
+ /* Now give the file and instance back */
+ trace_put_event_file(gen_kretprobe_test);
+ }
- /* Now give the file and instance back */
- trace_put_event_file(gen_kretprobe_test);
/* Now unregister and free the event */
WARN_ON(kprobe_event_delete("gen_kretprobe_test"));
diff --git a/kernel/trace/pid_list.c b/kernel/trace/pid_list.c
new file mode 100644
index 000000000000..95106d02b32d
--- /dev/null
+++ b/kernel/trace/pid_list.c
@@ -0,0 +1,495 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2021 VMware Inc, Steven Rostedt <rostedt@goodmis.org>
+ */
+#include <linux/spinlock.h>
+#include <linux/irq_work.h>
+#include <linux/slab.h>
+#include "trace.h"
+
+/* See pid_list.h for details */
+
+static inline union lower_chunk *get_lower_chunk(struct trace_pid_list *pid_list)
+{
+ union lower_chunk *chunk;
+
+ lockdep_assert_held(&pid_list->lock);
+
+ if (!pid_list->lower_list)
+ return NULL;
+
+ chunk = pid_list->lower_list;
+ pid_list->lower_list = chunk->next;
+ pid_list->free_lower_chunks--;
+ WARN_ON_ONCE(pid_list->free_lower_chunks < 0);
+ chunk->next = NULL;
+ /*
+ * If a refill needs to happen, it can not happen here
+ * as the scheduler run queue locks are held.
+ */
+ if (pid_list->free_lower_chunks <= CHUNK_REALLOC)
+ irq_work_queue(&pid_list->refill_irqwork);
+
+ return chunk;
+}
+
+static inline union upper_chunk *get_upper_chunk(struct trace_pid_list *pid_list)
+{
+ union upper_chunk *chunk;
+
+ lockdep_assert_held(&pid_list->lock);
+
+ if (!pid_list->upper_list)
+ return NULL;
+
+ chunk = pid_list->upper_list;
+ pid_list->upper_list = chunk->next;
+ pid_list->free_upper_chunks--;
+ WARN_ON_ONCE(pid_list->free_upper_chunks < 0);
+ chunk->next = NULL;
+ /*
+ * If a refill needs to happen, it can not happen here
+ * as the scheduler run queue locks are held.
+ */
+ if (pid_list->free_upper_chunks <= CHUNK_REALLOC)
+ irq_work_queue(&pid_list->refill_irqwork);
+
+ return chunk;
+}
+
+static inline void put_lower_chunk(struct trace_pid_list *pid_list,
+ union lower_chunk *chunk)
+{
+ lockdep_assert_held(&pid_list->lock);
+
+ chunk->next = pid_list->lower_list;
+ pid_list->lower_list = chunk;
+ pid_list->free_lower_chunks++;
+}
+
+static inline void put_upper_chunk(struct trace_pid_list *pid_list,
+ union upper_chunk *chunk)
+{
+ lockdep_assert_held(&pid_list->lock);
+
+ chunk->next = pid_list->upper_list;
+ pid_list->upper_list = chunk;
+ pid_list->free_upper_chunks++;
+}
+
+static inline bool upper_empty(union upper_chunk *chunk)
+{
+ /*
+ * If chunk->data has no lower chunks, it will be the same
+ * as a zeroed bitmask. Use find_first_bit() to test it
+ * and if it doesn't find any bits set, then the array
+ * is empty.
+ */
+ int bit = find_first_bit((unsigned long *)chunk->data,
+ sizeof(chunk->data) * 8);
+ return bit >= sizeof(chunk->data) * 8;
+}
+
+static inline int pid_split(unsigned int pid, unsigned int *upper1,
+ unsigned int *upper2, unsigned int *lower)
+{
+ /* MAX_PID should cover all pids */
+ BUILD_BUG_ON(MAX_PID < PID_MAX_LIMIT);
+
+ /* In case a bad pid is passed in, then fail */
+ if (unlikely(pid >= MAX_PID))
+ return -1;
+
+ *upper1 = (pid >> UPPER1_SHIFT) & UPPER_MASK;
+ *upper2 = (pid >> UPPER2_SHIFT) & UPPER_MASK;
+ *lower = pid & LOWER_MASK;
+
+ return 0;
+}
+
+static inline unsigned int pid_join(unsigned int upper1,
+ unsigned int upper2, unsigned int lower)
+{
+ return ((upper1 & UPPER_MASK) << UPPER1_SHIFT) |
+ ((upper2 & UPPER_MASK) << UPPER2_SHIFT) |
+ (lower & LOWER_MASK);
+}
+
+/**
+ * trace_pid_list_is_set - test if the pid is set in the list
+ * @pid_list: The pid list to test
+ * @pid: The pid to see if set in the list.
+ *
+ * Tests if @pid is set in the @pid_list. This is usually called
+ * from the scheduler when a task is scheduled. Its pid is checked
+ * if it should be traced or not.
+ *
+ * Return true if the pid is in the list, false otherwise.
+ */
+bool trace_pid_list_is_set(struct trace_pid_list *pid_list, unsigned int pid)
+{
+ union upper_chunk *upper_chunk;
+ union lower_chunk *lower_chunk;
+ unsigned long flags;
+ unsigned int upper1;
+ unsigned int upper2;
+ unsigned int lower;
+ bool ret = false;
+
+ if (!pid_list)
+ return false;
+
+ if (pid_split(pid, &upper1, &upper2, &lower) < 0)
+ return false;
+
+ raw_spin_lock_irqsave(&pid_list->lock, flags);
+ upper_chunk = pid_list->upper[upper1];
+ if (upper_chunk) {
+ lower_chunk = upper_chunk->data[upper2];
+ if (lower_chunk)
+ ret = test_bit(lower, lower_chunk->data);
+ }
+ raw_spin_unlock_irqrestore(&pid_list->lock, flags);
+
+ return ret;
+}
+
+/**
+ * trace_pid_list_set - add a pid to the list
+ * @pid_list: The pid list to add the @pid to.
+ * @pid: The pid to add.
+ *
+ * Adds @pid to @pid_list. This is usually done explicitly by a user
+ * adding a task to be traced, or indirectly by the fork function
+ * when children should be traced and a task's pid is in the list.
+ *
+ * Return 0 on success, negative otherwise.
+ */
+int trace_pid_list_set(struct trace_pid_list *pid_list, unsigned int pid)
+{
+ union upper_chunk *upper_chunk;
+ union lower_chunk *lower_chunk;
+ unsigned long flags;
+ unsigned int upper1;
+ unsigned int upper2;
+ unsigned int lower;
+ int ret;
+
+ if (!pid_list)
+ return -ENODEV;
+
+ if (pid_split(pid, &upper1, &upper2, &lower) < 0)
+ return -EINVAL;
+
+ raw_spin_lock_irqsave(&pid_list->lock, flags);
+ upper_chunk = pid_list->upper[upper1];
+ if (!upper_chunk) {
+ upper_chunk = get_upper_chunk(pid_list);
+ if (!upper_chunk) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ pid_list->upper[upper1] = upper_chunk;
+ }
+ lower_chunk = upper_chunk->data[upper2];
+ if (!lower_chunk) {
+ lower_chunk = get_lower_chunk(pid_list);
+ if (!lower_chunk) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ upper_chunk->data[upper2] = lower_chunk;
+ }
+ set_bit(lower, lower_chunk->data);
+ ret = 0;
+ out:
+ raw_spin_unlock_irqrestore(&pid_list->lock, flags);
+ return ret;
+}
+
+/**
+ * trace_pid_list_clear - remove a pid from the list
+ * @pid_list: The pid list to remove the @pid from.
+ * @pid: The pid to remove.
+ *
+ * Removes @pid from @pid_list. This is usually done explicitly by a user
+ * removing tasks from tracing, or indirectly by the exit function
+ * when a task that is set to be traced exits.
+ *
+ * Return 0 on success, negative otherwise.
+ */
+int trace_pid_list_clear(struct trace_pid_list *pid_list, unsigned int pid)
+{
+ union upper_chunk *upper_chunk;
+ union lower_chunk *lower_chunk;
+ unsigned long flags;
+ unsigned int upper1;
+ unsigned int upper2;
+ unsigned int lower;
+
+ if (!pid_list)
+ return -ENODEV;
+
+ if (pid_split(pid, &upper1, &upper2, &lower) < 0)
+ return -EINVAL;
+
+ raw_spin_lock_irqsave(&pid_list->lock, flags);
+ upper_chunk = pid_list->upper[upper1];
+ if (!upper_chunk)
+ goto out;
+
+ lower_chunk = upper_chunk->data[upper2];
+ if (!lower_chunk)
+ goto out;
+
+ clear_bit(lower, lower_chunk->data);
+
+ /* if there's no more bits set, add it to the free list */
+ if (find_first_bit(lower_chunk->data, LOWER_MAX) >= LOWER_MAX) {
+ put_lower_chunk(pid_list, lower_chunk);
+ upper_chunk->data[upper2] = NULL;
+ if (upper_empty(upper_chunk)) {
+ put_upper_chunk(pid_list, upper_chunk);
+ pid_list->upper[upper1] = NULL;
+ }
+ }
+ out:
+ raw_spin_unlock_irqrestore(&pid_list->lock, flags);
+ return 0;
+}
+
+/**
+ * trace_pid_list_next - return the next pid in the list
+ * @pid_list: The pid list to examine.
+ * @pid: The pid to start from
+ * @next: The pointer to place the pid that is set starting from @pid.
+ *
+ * Looks for the next consecutive pid that is in @pid_list starting
+ * at the pid specified by @pid. If one is set (including @pid), then
+ * that pid is placed into @next.
+ *
+ * Return 0 when a pid is found, -1 if there are no more pids included.
+ */
+int trace_pid_list_next(struct trace_pid_list *pid_list, unsigned int pid,
+ unsigned int *next)
+{
+ union upper_chunk *upper_chunk;
+ union lower_chunk *lower_chunk;
+ unsigned long flags;
+ unsigned int upper1;
+ unsigned int upper2;
+ unsigned int lower;
+
+ if (!pid_list)
+ return -ENODEV;
+
+ if (pid_split(pid, &upper1, &upper2, &lower) < 0)
+ return -EINVAL;
+
+ raw_spin_lock_irqsave(&pid_list->lock, flags);
+ for (; upper1 <= UPPER_MASK; upper1++, upper2 = 0) {
+ upper_chunk = pid_list->upper[upper1];
+
+ if (!upper_chunk)
+ continue;
+
+ for (; upper2 <= UPPER_MASK; upper2++, lower = 0) {
+ lower_chunk = upper_chunk->data[upper2];
+ if (!lower_chunk)
+ continue;
+
+ lower = find_next_bit(lower_chunk->data, LOWER_MAX,
+ lower);
+ if (lower < LOWER_MAX)
+ goto found;
+ }
+ }
+
+ found:
+ raw_spin_unlock_irqrestore(&pid_list->lock, flags);
+ if (upper1 > UPPER_MASK)
+ return -1;
+
+ *next = pid_join(upper1, upper2, lower);
+ return 0;
+}
+
+/**
+ * trace_pid_list_first - return the first pid in the list
+ * @pid_list: The pid list to examine.
+ * @pid: The pointer to place the pid first found pid that is set.
+ *
+ * Looks for the first pid that is set in @pid_list, and places it
+ * into @pid if found.
+ *
+ * Return 0 when a pid is found, -1 if there are no pids set.
+ */
+int trace_pid_list_first(struct trace_pid_list *pid_list, unsigned int *pid)
+{
+ return trace_pid_list_next(pid_list, 0, pid);
+}
+
+static void pid_list_refill_irq(struct irq_work *iwork)
+{
+ struct trace_pid_list *pid_list = container_of(iwork, struct trace_pid_list,
+ refill_irqwork);
+ union upper_chunk *upper = NULL;
+ union lower_chunk *lower = NULL;
+ union upper_chunk **upper_next = &upper;
+ union lower_chunk **lower_next = &lower;
+ int upper_count;
+ int lower_count;
+ int ucnt = 0;
+ int lcnt = 0;
+
+ again:
+ raw_spin_lock(&pid_list->lock);
+ upper_count = CHUNK_ALLOC - pid_list->free_upper_chunks;
+ lower_count = CHUNK_ALLOC - pid_list->free_lower_chunks;
+ raw_spin_unlock(&pid_list->lock);
+
+ if (upper_count <= 0 && lower_count <= 0)
+ return;
+
+ while (upper_count-- > 0) {
+ union upper_chunk *chunk;
+
+ chunk = kzalloc(sizeof(*chunk), GFP_KERNEL);
+ if (!chunk)
+ break;
+ *upper_next = chunk;
+ upper_next = &chunk->next;
+ ucnt++;
+ }
+
+ while (lower_count-- > 0) {
+ union lower_chunk *chunk;
+
+ chunk = kzalloc(sizeof(*chunk), GFP_KERNEL);
+ if (!chunk)
+ break;
+ *lower_next = chunk;
+ lower_next = &chunk->next;
+ lcnt++;
+ }
+
+ raw_spin_lock(&pid_list->lock);
+ if (upper) {
+ *upper_next = pid_list->upper_list;
+ pid_list->upper_list = upper;
+ pid_list->free_upper_chunks += ucnt;
+ }
+ if (lower) {
+ *lower_next = pid_list->lower_list;
+ pid_list->lower_list = lower;
+ pid_list->free_lower_chunks += lcnt;
+ }
+ raw_spin_unlock(&pid_list->lock);
+
+ /*
+ * On success of allocating all the chunks, both counters
+ * will be less than zero. If they are not, then an allocation
+ * failed, and we should not try again.
+ */
+ if (upper_count >= 0 || lower_count >= 0)
+ return;
+ /*
+ * When the locks were released, free chunks could have
+ * been used and allocation needs to be done again. Might as
+ * well allocate it now.
+ */
+ goto again;
+}
+
+/**
+ * trace_pid_list_alloc - create a new pid_list
+ *
+ * Allocates a new pid_list to store pids into.
+ *
+ * Returns the pid_list on success, NULL otherwise.
+ */
+struct trace_pid_list *trace_pid_list_alloc(void)
+{
+ struct trace_pid_list *pid_list;
+ int i;
+
+ /* According to linux/thread.h, pids can be no bigger that 30 bits */
+ WARN_ON_ONCE(pid_max > (1 << 30));
+
+ pid_list = kzalloc(sizeof(*pid_list), GFP_KERNEL);
+ if (!pid_list)
+ return NULL;
+
+ init_irq_work(&pid_list->refill_irqwork, pid_list_refill_irq);
+
+ raw_spin_lock_init(&pid_list->lock);
+
+ for (i = 0; i < CHUNK_ALLOC; i++) {
+ union upper_chunk *chunk;
+
+ chunk = kzalloc(sizeof(*chunk), GFP_KERNEL);
+ if (!chunk)
+ break;
+ chunk->next = pid_list->upper_list;
+ pid_list->upper_list = chunk;
+ pid_list->free_upper_chunks++;
+ }
+
+ for (i = 0; i < CHUNK_ALLOC; i++) {
+ union lower_chunk *chunk;
+
+ chunk = kzalloc(sizeof(*chunk), GFP_KERNEL);
+ if (!chunk)
+ break;
+ chunk->next = pid_list->lower_list;
+ pid_list->lower_list = chunk;
+ pid_list->free_lower_chunks++;
+ }
+
+ return pid_list;
+}
+
+/**
+ * trace_pid_list_free - Frees an allocated pid_list.
+ *
+ * Frees the memory for a pid_list that was allocated.
+ */
+void trace_pid_list_free(struct trace_pid_list *pid_list)
+{
+ union upper_chunk *upper;
+ union lower_chunk *lower;
+ int i, j;
+
+ if (!pid_list)
+ return;
+
+ irq_work_sync(&pid_list->refill_irqwork);
+
+ while (pid_list->lower_list) {
+ union lower_chunk *chunk;
+
+ chunk = pid_list->lower_list;
+ pid_list->lower_list = pid_list->lower_list->next;
+ kfree(chunk);
+ }
+
+ while (pid_list->upper_list) {
+ union upper_chunk *chunk;
+
+ chunk = pid_list->upper_list;
+ pid_list->upper_list = pid_list->upper_list->next;
+ kfree(chunk);
+ }
+
+ for (i = 0; i < UPPER1_SIZE; i++) {
+ upper = pid_list->upper[i];
+ if (upper) {
+ for (j = 0; j < UPPER2_SIZE; j++) {
+ lower = upper->data[j];
+ kfree(lower);
+ }
+ kfree(upper);
+ }
+ }
+ kfree(pid_list);
+}
diff --git a/kernel/trace/pid_list.h b/kernel/trace/pid_list.h
new file mode 100644
index 000000000000..62e73f1ac85f
--- /dev/null
+++ b/kernel/trace/pid_list.h
@@ -0,0 +1,88 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/* Do not include this file directly. */
+
+#ifndef _TRACE_INTERNAL_PID_LIST_H
+#define _TRACE_INTERNAL_PID_LIST_H
+
+/*
+ * In order to keep track of what pids to trace, a tree is created much
+ * like page tables are used. This creates a sparse bit map, where
+ * the tree is filled in when needed. A PID is at most 30 bits (see
+ * linux/thread.h), and is broken up into 3 sections based on the bit map
+ * of the bits. The 8 MSB is the "upper1" section. The next 8 MSB is the
+ * "upper2" section and the 14 LSB is the "lower" section.
+ *
+ * A trace_pid_list structure holds the "upper1" section, in an
+ * array of 256 pointers (1 or 2K in size) to "upper_chunk" unions, where
+ * each has an array of 256 pointers (1 or 2K in size) to the "lower_chunk"
+ * structures, where each has an array of size 2K bytes representing a bitmask
+ * of the 14 LSB of the PID (256 * 8 = 2048)
+ *
+ * When a trace_pid_list is allocated, it includes the 256 pointer array
+ * of the upper1 unions. Then a "cache" of upper and lower is allocated
+ * where these will be assigned as needed.
+ *
+ * When a bit is set in the pid_list bitmask, the pid to use has
+ * the 8 MSB masked, and this is used to index the array in the
+ * pid_list to find the next upper union. If the element is NULL,
+ * then one is retrieved from the upper_list cache. If none is
+ * available, then -ENOMEM is returned.
+ *
+ * The next 8 MSB is used to index into the "upper2" section. If this
+ * element is NULL, then it is retrieved from the lower_list cache.
+ * Again, if one is not available -ENOMEM is returned.
+ *
+ * Finally the 14 LSB of the PID is used to set the bit in the 16384
+ * bitmask (made up of 2K bytes).
+ *
+ * When the second upper section or the lower section has their last
+ * bit cleared, they are added back to the free list to be reused
+ * when needed.
+ */
+
+#define UPPER_BITS 8
+#define UPPER_MAX (1 << UPPER_BITS)
+#define UPPER1_SIZE (1 << UPPER_BITS)
+#define UPPER2_SIZE (1 << UPPER_BITS)
+
+#define LOWER_BITS 14
+#define LOWER_MAX (1 << LOWER_BITS)
+#define LOWER_SIZE (LOWER_MAX / BITS_PER_LONG)
+
+#define UPPER1_SHIFT (LOWER_BITS + UPPER_BITS)
+#define UPPER2_SHIFT LOWER_BITS
+#define LOWER_MASK (LOWER_MAX - 1)
+
+#define UPPER_MASK (UPPER_MAX - 1)
+
+/* According to linux/thread.h pids can not be bigger than or equal to 1 << 30 */
+#define MAX_PID (1 << 30)
+
+/* Just keep 6 chunks of both upper and lower in the cache on alloc */
+#define CHUNK_ALLOC 6
+
+/* Have 2 chunks free, trigger a refill of the cache */
+#define CHUNK_REALLOC 2
+
+union lower_chunk {
+ union lower_chunk *next;
+ unsigned long data[LOWER_SIZE]; // 2K in size
+};
+
+union upper_chunk {
+ union upper_chunk *next;
+ union lower_chunk *data[UPPER2_SIZE]; // 1 or 2K in size
+};
+
+struct trace_pid_list {
+ raw_spinlock_t lock;
+ struct irq_work refill_irqwork;
+ union upper_chunk *upper[UPPER1_SIZE]; // 1 or 2K in size
+ union upper_chunk *upper_list;
+ union lower_chunk *lower_list;
+ int free_upper_chunks;
+ int free_lower_chunks;
+};
+
+#endif /* _TRACE_INTERNAL_PID_LIST_H */
diff --git a/kernel/trace/preemptirq_delay_test.c b/kernel/trace/preemptirq_delay_test.c
index 312d1a0ca3b6..8c4ffd076162 100644
--- a/kernel/trace/preemptirq_delay_test.c
+++ b/kernel/trace/preemptirq_delay_test.c
@@ -21,13 +21,16 @@
static ulong delay = 100;
static char test_mode[12] = "irq";
static uint burst_size = 1;
+static int cpu_affinity = -1;
module_param_named(delay, delay, ulong, 0444);
module_param_string(test_mode, test_mode, 12, 0444);
module_param_named(burst_size, burst_size, uint, 0444);
+module_param_named(cpu_affinity, cpu_affinity, int, 0444);
MODULE_PARM_DESC(delay, "Period in microseconds (100 us default)");
MODULE_PARM_DESC(test_mode, "Mode of the test such as preempt, irq, or alternate (default irq)");
MODULE_PARM_DESC(burst_size, "The size of a burst (default 1)");
+MODULE_PARM_DESC(cpu_affinity, "Cpu num test is running on");
static struct completion done;
@@ -36,7 +39,9 @@ static struct completion done;
static void busy_wait(ulong time)
{
u64 start, end;
+
start = trace_clock_local();
+
do {
end = trace_clock_local();
if (kthread_should_stop())
@@ -47,6 +52,7 @@ static void busy_wait(ulong time)
static __always_inline void irqoff_test(void)
{
unsigned long flags;
+
local_irq_save(flags);
busy_wait(delay);
local_irq_restore(flags);
@@ -113,6 +119,14 @@ static int preemptirq_delay_run(void *data)
{
int i;
int s = MIN(burst_size, NR_TEST_FUNCS);
+ struct cpumask cpu_mask;
+
+ if (cpu_affinity > -1) {
+ cpumask_clear(&cpu_mask);
+ cpumask_set_cpu(cpu_affinity, &cpu_mask);
+ if (set_cpus_allowed_ptr(current, &cpu_mask))
+ pr_err("cpu_affinity:%d, failed\n", cpu_affinity);
+ }
for (i = 0; i < s; i++)
(testfuncs[i])(i);
diff --git a/kernel/trace/rethook.c b/kernel/trace/rethook.c
new file mode 100644
index 000000000000..fa03094e9e69
--- /dev/null
+++ b/kernel/trace/rethook.c
@@ -0,0 +1,335 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#define pr_fmt(fmt) "rethook: " fmt
+
+#include <linux/bug.h>
+#include <linux/kallsyms.h>
+#include <linux/kprobes.h>
+#include <linux/preempt.h>
+#include <linux/rethook.h>
+#include <linux/slab.h>
+
+/* Return hook list (shadow stack by list) */
+
+/*
+ * This function is called from delayed_put_task_struct() when a task is
+ * dead and cleaned up to recycle any kretprobe instances associated with
+ * this task. These left over instances represent probed functions that
+ * have been called but will never return.
+ */
+void rethook_flush_task(struct task_struct *tk)
+{
+ struct rethook_node *rhn;
+ struct llist_node *node;
+
+ node = __llist_del_all(&tk->rethooks);
+ while (node) {
+ rhn = container_of(node, struct rethook_node, llist);
+ node = node->next;
+ preempt_disable();
+ rethook_recycle(rhn);
+ preempt_enable();
+ }
+}
+
+static void rethook_free_rcu(struct rcu_head *head)
+{
+ struct rethook *rh = container_of(head, struct rethook, rcu);
+ objpool_fini(&rh->pool);
+}
+
+/**
+ * rethook_stop() - Stop using a rethook.
+ * @rh: the struct rethook to stop.
+ *
+ * Stop using a rethook to prepare for freeing it. If you want to wait for
+ * all running rethook handler before calling rethook_free(), you need to
+ * call this first and wait RCU, and call rethook_free().
+ */
+void rethook_stop(struct rethook *rh)
+{
+ rcu_assign_pointer(rh->handler, NULL);
+}
+
+/**
+ * rethook_free() - Free struct rethook.
+ * @rh: the struct rethook to be freed.
+ *
+ * Free the rethook. Before calling this function, user must ensure the
+ * @rh::data is cleaned if needed (or, the handler can access it after
+ * calling this function.) This function will set the @rh to be freed
+ * after all rethook_node are freed (not soon). And the caller must
+ * not touch @rh after calling this.
+ */
+void rethook_free(struct rethook *rh)
+{
+ rethook_stop(rh);
+
+ call_rcu(&rh->rcu, rethook_free_rcu);
+}
+
+static int rethook_init_node(void *nod, void *context)
+{
+ struct rethook_node *node = nod;
+
+ node->rethook = context;
+ return 0;
+}
+
+static int rethook_fini_pool(struct objpool_head *head, void *context)
+{
+ kfree(context);
+ return 0;
+}
+
+static inline rethook_handler_t rethook_get_handler(struct rethook *rh)
+{
+ return (rethook_handler_t)rcu_dereference_check(rh->handler,
+ rcu_read_lock_any_held());
+}
+
+/**
+ * rethook_alloc() - Allocate struct rethook.
+ * @data: a data to pass the @handler when hooking the return.
+ * @handler: the return hook callback function, must NOT be NULL
+ * @size: node size: rethook node and additional data
+ * @num: number of rethook nodes to be preallocated
+ *
+ * Allocate and initialize a new rethook with @data and @handler.
+ * Return pointer of new rethook, or error codes for failures.
+ *
+ * Note that @handler == NULL means this rethook is going to be freed.
+ */
+struct rethook *rethook_alloc(void *data, rethook_handler_t handler,
+ int size, int num)
+{
+ struct rethook *rh;
+
+ if (!handler || num <= 0 || size < sizeof(struct rethook_node))
+ return ERR_PTR(-EINVAL);
+
+ rh = kzalloc(sizeof(struct rethook), GFP_KERNEL);
+ if (!rh)
+ return ERR_PTR(-ENOMEM);
+
+ rh->data = data;
+ rcu_assign_pointer(rh->handler, handler);
+
+ /* initialize the objpool for rethook nodes */
+ if (objpool_init(&rh->pool, num, size, GFP_KERNEL, rh,
+ rethook_init_node, rethook_fini_pool)) {
+ kfree(rh);
+ return ERR_PTR(-ENOMEM);
+ }
+ return rh;
+}
+
+static void free_rethook_node_rcu(struct rcu_head *head)
+{
+ struct rethook_node *node = container_of(head, struct rethook_node, rcu);
+ struct rethook *rh = node->rethook;
+
+ objpool_drop(node, &rh->pool);
+}
+
+/**
+ * rethook_recycle() - return the node to rethook.
+ * @node: The struct rethook_node to be returned.
+ *
+ * Return back the @node to @node::rethook. If the @node::rethook is already
+ * marked as freed, this will free the @node.
+ */
+void rethook_recycle(struct rethook_node *node)
+{
+ rethook_handler_t handler;
+
+ handler = rethook_get_handler(node->rethook);
+ if (likely(handler))
+ objpool_push(node, &node->rethook->pool);
+ else
+ call_rcu(&node->rcu, free_rethook_node_rcu);
+}
+NOKPROBE_SYMBOL(rethook_recycle);
+
+/**
+ * rethook_try_get() - get an unused rethook node.
+ * @rh: The struct rethook which pools the nodes.
+ *
+ * Get an unused rethook node from @rh. If the node pool is empty, this
+ * will return NULL. Caller must disable preemption.
+ */
+struct rethook_node *rethook_try_get(struct rethook *rh)
+{
+ rethook_handler_t handler = rethook_get_handler(rh);
+
+ /* Check whether @rh is going to be freed. */
+ if (unlikely(!handler))
+ return NULL;
+
+ /*
+ * This expects the caller will set up a rethook on a function entry.
+ * When the function returns, the rethook will eventually be reclaimed
+ * or released in the rethook_recycle() with call_rcu().
+ * This means the caller must be run in the RCU-availabe context.
+ */
+ if (unlikely(!rcu_is_watching()))
+ return NULL;
+
+ return (struct rethook_node *)objpool_pop(&rh->pool);
+}
+NOKPROBE_SYMBOL(rethook_try_get);
+
+/**
+ * rethook_hook() - Hook the current function return.
+ * @node: The struct rethook node to hook the function return.
+ * @regs: The struct pt_regs for the function entry.
+ * @mcount: True if this is called from mcount(ftrace) context.
+ *
+ * Hook the current running function return. This must be called when the
+ * function entry (or at least @regs must be the registers of the function
+ * entry.) @mcount is used for identifying the context. If this is called
+ * from ftrace (mcount) callback, @mcount must be set true. If this is called
+ * from the real function entry (e.g. kprobes) @mcount must be set false.
+ * This is because the way to hook the function return depends on the context.
+ */
+void rethook_hook(struct rethook_node *node, struct pt_regs *regs, bool mcount)
+{
+ arch_rethook_prepare(node, regs, mcount);
+ __llist_add(&node->llist, &current->rethooks);
+}
+NOKPROBE_SYMBOL(rethook_hook);
+
+/* This assumes the 'tsk' is the current task or is not running. */
+static unsigned long __rethook_find_ret_addr(struct task_struct *tsk,
+ struct llist_node **cur)
+{
+ struct rethook_node *rh = NULL;
+ struct llist_node *node = *cur;
+
+ if (!node)
+ node = tsk->rethooks.first;
+ else
+ node = node->next;
+
+ while (node) {
+ rh = container_of(node, struct rethook_node, llist);
+ if (rh->ret_addr != (unsigned long)arch_rethook_trampoline) {
+ *cur = node;
+ return rh->ret_addr;
+ }
+ node = node->next;
+ }
+ return 0;
+}
+NOKPROBE_SYMBOL(__rethook_find_ret_addr);
+
+/**
+ * rethook_find_ret_addr -- Find correct return address modified by rethook
+ * @tsk: Target task
+ * @frame: A frame pointer
+ * @cur: a storage of the loop cursor llist_node pointer for next call
+ *
+ * Find the correct return address modified by a rethook on @tsk in unsigned
+ * long type.
+ * The @tsk must be 'current' or a task which is not running. @frame is a hint
+ * to get the currect return address - which is compared with the
+ * rethook::frame field. The @cur is a loop cursor for searching the
+ * kretprobe return addresses on the @tsk. The '*@cur' should be NULL at the
+ * first call, but '@cur' itself must NOT NULL.
+ *
+ * Returns found address value or zero if not found.
+ */
+unsigned long rethook_find_ret_addr(struct task_struct *tsk, unsigned long frame,
+ struct llist_node **cur)
+{
+ struct rethook_node *rhn = NULL;
+ unsigned long ret;
+
+ if (WARN_ON_ONCE(!cur))
+ return 0;
+
+ if (WARN_ON_ONCE(tsk != current && task_is_running(tsk)))
+ return 0;
+
+ do {
+ ret = __rethook_find_ret_addr(tsk, cur);
+ if (!ret)
+ break;
+ rhn = container_of(*cur, struct rethook_node, llist);
+ } while (rhn->frame != frame);
+
+ return ret;
+}
+NOKPROBE_SYMBOL(rethook_find_ret_addr);
+
+void __weak arch_rethook_fixup_return(struct pt_regs *regs,
+ unsigned long correct_ret_addr)
+{
+ /*
+ * Do nothing by default. If the architecture which uses a
+ * frame pointer to record real return address on the stack,
+ * it should fill this function to fixup the return address
+ * so that stacktrace works from the rethook handler.
+ */
+}
+
+/* This function will be called from each arch-defined trampoline. */
+unsigned long rethook_trampoline_handler(struct pt_regs *regs,
+ unsigned long frame)
+{
+ struct llist_node *first, *node = NULL;
+ unsigned long correct_ret_addr;
+ rethook_handler_t handler;
+ struct rethook_node *rhn;
+
+ correct_ret_addr = __rethook_find_ret_addr(current, &node);
+ if (!correct_ret_addr) {
+ pr_err("rethook: Return address not found! Maybe there is a bug in the kernel\n");
+ BUG_ON(1);
+ }
+
+ instruction_pointer_set(regs, correct_ret_addr);
+
+ /*
+ * These loops must be protected from rethook_free_rcu() because those
+ * are accessing 'rhn->rethook'.
+ */
+ preempt_disable_notrace();
+
+ /*
+ * Run the handler on the shadow stack. Do not unlink the list here because
+ * stackdump inside the handlers needs to decode it.
+ */
+ first = current->rethooks.first;
+ while (first) {
+ rhn = container_of(first, struct rethook_node, llist);
+ if (WARN_ON_ONCE(rhn->frame != frame))
+ break;
+ handler = rethook_get_handler(rhn->rethook);
+ if (handler)
+ handler(rhn, rhn->rethook->data,
+ correct_ret_addr, regs);
+
+ if (first == node)
+ break;
+ first = first->next;
+ }
+
+ /* Fixup registers for returning to correct address. */
+ arch_rethook_fixup_return(regs, correct_ret_addr);
+
+ /* Unlink used shadow stack */
+ first = current->rethooks.first;
+ current->rethooks.first = node->next;
+ node->next = NULL;
+
+ while (first) {
+ rhn = container_of(first, struct rethook_node, llist);
+ first = first->next;
+ rethook_recycle(rhn);
+ }
+ preempt_enable_notrace();
+
+ return correct_ret_addr;
+}
+NOKPROBE_SYMBOL(rethook_trampoline_handler);
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 00867ff82412..aa332ace108b 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -4,6 +4,7 @@
*
* Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com>
*/
+#include <linux/trace_recursion.h>
#include <linux/trace_events.h>
#include <linux/ring_buffer.h>
#include <linux/trace_clock.h>
@@ -26,8 +27,17 @@
#include <linux/cpu.h>
#include <linux/oom.h>
+#include <asm/local64.h>
#include <asm/local.h>
+/*
+ * The "absolute" timestamp in the buffer is only 59 bits.
+ * If a clock has the 5 MSBs set, it needs to be saved and
+ * reinserted.
+ */
+#define TS_MSB (0xf8ULL << 56)
+#define ABS_TS_MASK (~TS_MSB)
+
static void update_pages_handler(struct work_struct *work);
/*
@@ -129,7 +139,16 @@ int ring_buffer_print_entry_header(struct trace_seq *s)
#define RB_ALIGNMENT 4U
#define RB_MAX_SMALL_DATA (RB_ALIGNMENT * RINGBUF_TYPE_DATA_TYPE_LEN_MAX)
#define RB_EVNT_MIN_SIZE 8U /* two 32bit words */
-#define RB_ALIGN_DATA __aligned(RB_ALIGNMENT)
+
+#ifndef CONFIG_HAVE_64BIT_ALIGNED_ACCESS
+# define RB_FORCE_8BYTE_ALIGNMENT 0
+# define RB_ARCH_ALIGNMENT RB_ALIGNMENT
+#else
+# define RB_FORCE_8BYTE_ALIGNMENT 1
+# define RB_ARCH_ALIGNMENT 8U
+#endif
+
+#define RB_ALIGN_DATA __aligned(RB_ARCH_ALIGNMENT)
/* define RINGBUF_TYPE_DATA for 'case RINGBUF_TYPE_DATA:' */
#define RINGBUF_TYPE_DATA 0 ... RINGBUF_TYPE_DATA_TYPE_LEN_MAX
@@ -145,7 +164,7 @@ enum {
#define extended_time(event) \
(event->type_len >= RINGBUF_TYPE_TIME_EXTEND)
-static inline int rb_null_event(struct ring_buffer_event *event)
+static inline bool rb_null_event(struct ring_buffer_event *event)
{
return event->type_len == RINGBUF_TYPE_PADDING && !event->time_delta;
}
@@ -270,21 +289,14 @@ EXPORT_SYMBOL_GPL(ring_buffer_event_data);
#define for_each_buffer_cpu(buffer, cpu) \
for_each_cpu(cpu, buffer->cpumask)
+#define for_each_online_buffer_cpu(buffer, cpu) \
+ for_each_cpu_and(cpu, buffer->cpumask, cpu_online_mask)
+
#define TS_SHIFT 27
#define TS_MASK ((1ULL << TS_SHIFT) - 1)
#define TS_DELTA_TEST (~TS_MASK)
-/**
- * ring_buffer_event_time_stamp - return the event's extended timestamp
- * @event: the event to get the timestamp of
- *
- * Returns the extended timestamp associated with a data event.
- * An extended time_stamp is a 64-bit timestamp represented
- * internally in a special way that makes the best use of space
- * contained within a ring buffer event. This function decodes
- * it and maps it to a straight u64 value.
- */
-u64 ring_buffer_event_time_stamp(struct ring_buffer_event *event)
+static u64 rb_event_time_stamp(struct ring_buffer_event *event)
{
u64 ts;
@@ -306,6 +318,11 @@ struct buffer_data_page {
unsigned char data[] RB_ALIGN_DATA; /* data of buffer page */
};
+struct buffer_data_read_page {
+ unsigned order; /* order of the page */
+ struct buffer_data_page *data; /* actual data, stored in this page */
+};
+
/*
* Note, the buffer_page list must be first. The buffer pages
* are allocated in cache lines, which means that each buffer
@@ -320,6 +337,7 @@ struct buffer_page {
unsigned read; /* index for next read */
local_t entries; /* entries on this page */
unsigned long real_end; /* real end of data */
+ unsigned order; /* order of the page */
struct buffer_data_page *page; /* Actual data page */
};
@@ -343,59 +361,23 @@ static void rb_init_page(struct buffer_data_page *bpage)
local_set(&bpage->commit, 0);
}
-/*
- * Also stolen from mm/slob.c. Thanks to Mathieu Desnoyers for pointing
- * this issue out.
- */
+static __always_inline unsigned int rb_page_commit(struct buffer_page *bpage)
+{
+ return local_read(&bpage->page->commit);
+}
+
static void free_buffer_page(struct buffer_page *bpage)
{
- free_page((unsigned long)bpage->page);
+ free_pages((unsigned long)bpage->page, bpage->order);
kfree(bpage);
}
/*
* We need to fit the time_stamp delta into 27 bits.
*/
-static inline int test_time_stamp(u64 delta)
+static inline bool test_time_stamp(u64 delta)
{
- if (delta & TS_DELTA_TEST)
- return 1;
- return 0;
-}
-
-#define BUF_PAGE_SIZE (PAGE_SIZE - BUF_PAGE_HDR_SIZE)
-
-/* Max payload is BUF_PAGE_SIZE - header (8bytes) */
-#define BUF_MAX_DATA_SIZE (BUF_PAGE_SIZE - (sizeof(u32) * 2))
-
-int ring_buffer_print_page_header(struct trace_seq *s)
-{
- struct buffer_data_page field;
-
- trace_seq_printf(s, "\tfield: u64 timestamp;\t"
- "offset:0;\tsize:%u;\tsigned:%u;\n",
- (unsigned int)sizeof(field.time_stamp),
- (unsigned int)is_signed_type(u64));
-
- trace_seq_printf(s, "\tfield: local_t commit;\t"
- "offset:%u;\tsize:%u;\tsigned:%u;\n",
- (unsigned int)offsetof(typeof(field), commit),
- (unsigned int)sizeof(field.commit),
- (unsigned int)is_signed_type(long));
-
- trace_seq_printf(s, "\tfield: int overwrite;\t"
- "offset:%u;\tsize:%u;\tsigned:%u;\n",
- (unsigned int)offsetof(typeof(field), commit),
- 1,
- (unsigned int)is_signed_type(long));
-
- trace_seq_printf(s, "\tfield: char data;\t"
- "offset:%u;\tsize:%u;\tsigned:%u;\n",
- (unsigned int)offsetof(typeof(field), data),
- (unsigned int)BUF_PAGE_SIZE,
- (unsigned int)is_signed_type(char));
-
- return !trace_seq_has_overflowed(s);
+ return !!(delta & TS_DELTA_TEST);
}
struct rb_irq_work {
@@ -413,21 +395,38 @@ struct rb_irq_work {
struct rb_event_info {
u64 ts;
u64 delta;
+ u64 before;
+ u64 after;
unsigned long length;
struct buffer_page *tail_page;
int add_timestamp;
};
/*
+ * Used for the add_timestamp
+ * NONE
+ * EXTEND - wants a time extend
+ * ABSOLUTE - the buffer requests all events to have absolute time stamps
+ * FORCE - force a full time stamp.
+ */
+enum {
+ RB_ADD_STAMP_NONE = 0,
+ RB_ADD_STAMP_EXTEND = BIT(1),
+ RB_ADD_STAMP_ABSOLUTE = BIT(2),
+ RB_ADD_STAMP_FORCE = BIT(3)
+};
+/*
* Used for which event context the event is in.
- * NMI = 0
- * IRQ = 1
- * SOFTIRQ = 2
- * NORMAL = 3
+ * TRANSITION = 0
+ * NMI = 1
+ * IRQ = 2
+ * SOFTIRQ = 3
+ * NORMAL = 4
*
* See trace_recursive_lock() comment below for more details.
*/
enum {
+ RB_CTX_TRANSITION,
RB_CTX_NMI,
RB_CTX_IRQ,
RB_CTX_SOFTIRQ,
@@ -435,6 +434,13 @@ enum {
RB_CTX_MAX
};
+struct rb_time_struct {
+ local64_t time;
+};
+typedef struct rb_time_struct rb_time_t;
+
+#define MAX_NEST 5
+
/*
* head_page == tail_page && head == tail then buffer is empty.
*/
@@ -465,13 +471,18 @@ struct ring_buffer_per_cpu {
local_t committing;
local_t commits;
local_t pages_touched;
+ local_t pages_lost;
local_t pages_read;
long last_pages_touch;
size_t shortest_full;
unsigned long read;
unsigned long read_bytes;
- u64 write_stamp;
+ rb_time_t write_stamp;
+ rb_time_t before_stamp;
+ u64 event_stamp[MAX_NEST];
u64 read_stamp;
+ /* pages removed since last reset */
+ unsigned long pages_removed;
/* ring buffer pages to update, > 0 to add, < 0 to remove */
long nr_pages_to_update;
struct list_head new_pages; /* new pages to add */
@@ -485,6 +496,7 @@ struct trace_buffer {
unsigned flags;
int cpus;
atomic_t record_disabled;
+ atomic_t resizing;
cpumask_var_t cpumask;
struct lock_class_key *reader_lock_key;
@@ -498,6 +510,10 @@ struct trace_buffer {
struct rb_irq_work irq_work;
bool time_stamp_abs;
+
+ unsigned int subbuf_size;
+ unsigned int subbuf_order;
+ unsigned int max_data_size;
};
struct ring_buffer_iter {
@@ -507,12 +523,163 @@ struct ring_buffer_iter {
struct buffer_page *head_page;
struct buffer_page *cache_reader_page;
unsigned long cache_read;
+ unsigned long cache_pages_removed;
u64 read_stamp;
u64 page_stamp;
struct ring_buffer_event *event;
+ size_t event_size;
int missed_events;
};
+int ring_buffer_print_page_header(struct trace_buffer *buffer, struct trace_seq *s)
+{
+ struct buffer_data_page field;
+
+ trace_seq_printf(s, "\tfield: u64 timestamp;\t"
+ "offset:0;\tsize:%u;\tsigned:%u;\n",
+ (unsigned int)sizeof(field.time_stamp),
+ (unsigned int)is_signed_type(u64));
+
+ trace_seq_printf(s, "\tfield: local_t commit;\t"
+ "offset:%u;\tsize:%u;\tsigned:%u;\n",
+ (unsigned int)offsetof(typeof(field), commit),
+ (unsigned int)sizeof(field.commit),
+ (unsigned int)is_signed_type(long));
+
+ trace_seq_printf(s, "\tfield: int overwrite;\t"
+ "offset:%u;\tsize:%u;\tsigned:%u;\n",
+ (unsigned int)offsetof(typeof(field), commit),
+ 1,
+ (unsigned int)is_signed_type(long));
+
+ trace_seq_printf(s, "\tfield: char data;\t"
+ "offset:%u;\tsize:%u;\tsigned:%u;\n",
+ (unsigned int)offsetof(typeof(field), data),
+ (unsigned int)buffer->subbuf_size,
+ (unsigned int)is_signed_type(char));
+
+ return !trace_seq_has_overflowed(s);
+}
+
+static inline void rb_time_read(rb_time_t *t, u64 *ret)
+{
+ *ret = local64_read(&t->time);
+}
+static void rb_time_set(rb_time_t *t, u64 val)
+{
+ local64_set(&t->time, val);
+}
+
+/*
+ * Enable this to make sure that the event passed to
+ * ring_buffer_event_time_stamp() is not committed and also
+ * is on the buffer that it passed in.
+ */
+//#define RB_VERIFY_EVENT
+#ifdef RB_VERIFY_EVENT
+static struct list_head *rb_list_head(struct list_head *list);
+static void verify_event(struct ring_buffer_per_cpu *cpu_buffer,
+ void *event)
+{
+ struct buffer_page *page = cpu_buffer->commit_page;
+ struct buffer_page *tail_page = READ_ONCE(cpu_buffer->tail_page);
+ struct list_head *next;
+ long commit, write;
+ unsigned long addr = (unsigned long)event;
+ bool done = false;
+ int stop = 0;
+
+ /* Make sure the event exists and is not committed yet */
+ do {
+ if (page == tail_page || WARN_ON_ONCE(stop++ > 100))
+ done = true;
+ commit = local_read(&page->page->commit);
+ write = local_read(&page->write);
+ if (addr >= (unsigned long)&page->page->data[commit] &&
+ addr < (unsigned long)&page->page->data[write])
+ return;
+
+ next = rb_list_head(page->list.next);
+ page = list_entry(next, struct buffer_page, list);
+ } while (!done);
+ WARN_ON_ONCE(1);
+}
+#else
+static inline void verify_event(struct ring_buffer_per_cpu *cpu_buffer,
+ void *event)
+{
+}
+#endif
+
+/*
+ * The absolute time stamp drops the 5 MSBs and some clocks may
+ * require them. The rb_fix_abs_ts() will take a previous full
+ * time stamp, and add the 5 MSB of that time stamp on to the
+ * saved absolute time stamp. Then they are compared in case of
+ * the unlikely event that the latest time stamp incremented
+ * the 5 MSB.
+ */
+static inline u64 rb_fix_abs_ts(u64 abs, u64 save_ts)
+{
+ if (save_ts & TS_MSB) {
+ abs |= save_ts & TS_MSB;
+ /* Check for overflow */
+ if (unlikely(abs < save_ts))
+ abs += 1ULL << 59;
+ }
+ return abs;
+}
+
+static inline u64 rb_time_stamp(struct trace_buffer *buffer);
+
+/**
+ * ring_buffer_event_time_stamp - return the event's current time stamp
+ * @buffer: The buffer that the event is on
+ * @event: the event to get the time stamp of
+ *
+ * Note, this must be called after @event is reserved, and before it is
+ * committed to the ring buffer. And must be called from the same
+ * context where the event was reserved (normal, softirq, irq, etc).
+ *
+ * Returns the time stamp associated with the current event.
+ * If the event has an extended time stamp, then that is used as
+ * the time stamp to return.
+ * In the highly unlikely case that the event was nested more than
+ * the max nesting, then the write_stamp of the buffer is returned,
+ * otherwise current time is returned, but that really neither of
+ * the last two cases should ever happen.
+ */
+u64 ring_buffer_event_time_stamp(struct trace_buffer *buffer,
+ struct ring_buffer_event *event)
+{
+ struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[smp_processor_id()];
+ unsigned int nest;
+ u64 ts;
+
+ /* If the event includes an absolute time, then just use that */
+ if (event->type_len == RINGBUF_TYPE_TIME_STAMP) {
+ ts = rb_event_time_stamp(event);
+ return rb_fix_abs_ts(ts, cpu_buffer->tail_page->page->time_stamp);
+ }
+
+ nest = local_read(&cpu_buffer->committing);
+ verify_event(cpu_buffer, event);
+ if (WARN_ON_ONCE(!nest))
+ goto fail;
+
+ /* Read the current saved nesting level time stamp */
+ if (likely(--nest < MAX_NEST))
+ return cpu_buffer->event_stamp[nest];
+
+ /* Shouldn't happen, warn if it does */
+ WARN_ONCE(1, "nest (%d) greater than max", nest);
+
+ fail:
+ rb_time_read(&cpu_buffer->write_stamp, &ts);
+
+ return ts;
+}
+
/**
* ring_buffer_nr_pages - get the number of buffer pages in the ring buffer
* @buffer: The ring_buffer to get the number of pages from
@@ -526,7 +693,7 @@ size_t ring_buffer_nr_pages(struct trace_buffer *buffer, int cpu)
}
/**
- * ring_buffer_nr_pages_dirty - get the number of used pages in the ring buffer
+ * ring_buffer_nr_dirty_pages - get the number of used pages in the ring buffer
* @buffer: The ring_buffer to get the number of pages from
* @cpu: The cpu of the ring_buffer to get the number of pages from
*
@@ -535,10 +702,18 @@ size_t ring_buffer_nr_pages(struct trace_buffer *buffer, int cpu)
size_t ring_buffer_nr_dirty_pages(struct trace_buffer *buffer, int cpu)
{
size_t read;
+ size_t lost;
size_t cnt;
read = local_read(&buffer->buffers[cpu]->pages_read);
+ lost = local_read(&buffer->buffers[cpu]->pages_lost);
cnt = local_read(&buffer->buffers[cpu]->pages_touched);
+
+ if (WARN_ON_ONCE(cnt < lost))
+ return 0;
+
+ cnt -= lost;
+
/* The reader can read an empty page, but not more than that */
if (cnt < read) {
WARN_ON_ONCE(read > cnt + 1);
@@ -548,6 +723,26 @@ size_t ring_buffer_nr_dirty_pages(struct trace_buffer *buffer, int cpu)
return cnt - read;
}
+static __always_inline bool full_hit(struct trace_buffer *buffer, int cpu, int full)
+{
+ struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
+ size_t nr_pages;
+ size_t dirty;
+
+ nr_pages = cpu_buffer->nr_pages;
+ if (!nr_pages || !full)
+ return true;
+
+ /*
+ * Add one as dirty will never equal nr_pages, as the sub-buffer
+ * that the writer is on is not counted as dirty.
+ * This is needed if "buffer_percent" is set to 100.
+ */
+ dirty = ring_buffer_nr_dirty_pages(buffer, cpu) + 1;
+
+ return (dirty * 100) >= (full * nr_pages);
+}
+
/*
* rb_wake_up_waiters - wake up tasks waiting for ring buffer input
*
@@ -559,17 +754,99 @@ static void rb_wake_up_waiters(struct irq_work *work)
struct rb_irq_work *rbwork = container_of(work, struct rb_irq_work, work);
wake_up_all(&rbwork->waiters);
- if (rbwork->wakeup_full) {
+ if (rbwork->full_waiters_pending || rbwork->wakeup_full) {
+ /* Only cpu_buffer sets the above flags */
+ struct ring_buffer_per_cpu *cpu_buffer =
+ container_of(rbwork, struct ring_buffer_per_cpu, irq_work);
+
+ /* Called from interrupt context */
+ raw_spin_lock(&cpu_buffer->reader_lock);
rbwork->wakeup_full = false;
+ rbwork->full_waiters_pending = false;
+
+ /* Waking up all waiters, they will reset the shortest full */
+ cpu_buffer->shortest_full = 0;
+ raw_spin_unlock(&cpu_buffer->reader_lock);
+
wake_up_all(&rbwork->full_waiters);
}
}
/**
+ * ring_buffer_wake_waiters - wake up any waiters on this ring buffer
+ * @buffer: The ring buffer to wake waiters on
+ * @cpu: The CPU buffer to wake waiters on
+ *
+ * In the case of a file that represents a ring buffer is closing,
+ * it is prudent to wake up any waiters that are on this.
+ */
+void ring_buffer_wake_waiters(struct trace_buffer *buffer, int cpu)
+{
+ struct ring_buffer_per_cpu *cpu_buffer;
+ struct rb_irq_work *rbwork;
+
+ if (!buffer)
+ return;
+
+ if (cpu == RING_BUFFER_ALL_CPUS) {
+
+ /* Wake up individual ones too. One level recursion */
+ for_each_buffer_cpu(buffer, cpu)
+ ring_buffer_wake_waiters(buffer, cpu);
+
+ rbwork = &buffer->irq_work;
+ } else {
+ if (WARN_ON_ONCE(!buffer->buffers))
+ return;
+ if (WARN_ON_ONCE(cpu >= nr_cpu_ids))
+ return;
+
+ cpu_buffer = buffer->buffers[cpu];
+ /* The CPU buffer may not have been initialized yet */
+ if (!cpu_buffer)
+ return;
+ rbwork = &cpu_buffer->irq_work;
+ }
+
+ /* This can be called in any context */
+ irq_work_queue(&rbwork->work);
+}
+
+static bool rb_watermark_hit(struct trace_buffer *buffer, int cpu, int full)
+{
+ struct ring_buffer_per_cpu *cpu_buffer;
+ bool ret = false;
+
+ /* Reads of all CPUs always waits for any data */
+ if (cpu == RING_BUFFER_ALL_CPUS)
+ return !ring_buffer_empty(buffer);
+
+ cpu_buffer = buffer->buffers[cpu];
+
+ if (!ring_buffer_empty_cpu(buffer, cpu)) {
+ unsigned long flags;
+ bool pagebusy;
+
+ if (!full)
+ return true;
+
+ raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
+ pagebusy = cpu_buffer->reader_page == cpu_buffer->commit_page;
+ ret = !pagebusy && full_hit(buffer, cpu, full);
+
+ if (!cpu_buffer->shortest_full ||
+ cpu_buffer->shortest_full > full)
+ cpu_buffer->shortest_full = full;
+ raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
+ }
+ return ret;
+}
+
+/**
* ring_buffer_wait - wait for input to the ring buffer
* @buffer: buffer to wait on
* @cpu: the cpu buffer to wait on
- * @full: wait until a full page is available, if @cpu != RING_BUFFER_ALL_CPUS
+ * @full: wait until the percentage of pages are available, if @cpu != RING_BUFFER_ALL_CPUS
*
* If @cpu == RING_BUFFER_ALL_CPUS then the task will wake up as soon
* as data is added to any of the @buffer's cpu buffers. Otherwise
@@ -577,7 +854,7 @@ static void rb_wake_up_waiters(struct irq_work *work)
*/
int ring_buffer_wait(struct trace_buffer *buffer, int cpu, int full)
{
- struct ring_buffer_per_cpu *uninitialized_var(cpu_buffer);
+ struct ring_buffer_per_cpu *cpu_buffer;
DEFINE_WAIT(wait);
struct rb_irq_work *work;
int ret = 0;
@@ -598,77 +875,54 @@ int ring_buffer_wait(struct trace_buffer *buffer, int cpu, int full)
work = &cpu_buffer->irq_work;
}
+ if (full)
+ prepare_to_wait(&work->full_waiters, &wait, TASK_INTERRUPTIBLE);
+ else
+ prepare_to_wait(&work->waiters, &wait, TASK_INTERRUPTIBLE);
- while (true) {
- if (full)
- prepare_to_wait(&work->full_waiters, &wait, TASK_INTERRUPTIBLE);
- else
- prepare_to_wait(&work->waiters, &wait, TASK_INTERRUPTIBLE);
-
- /*
- * The events can happen in critical sections where
- * checking a work queue can cause deadlocks.
- * After adding a task to the queue, this flag is set
- * only to notify events to try to wake up the queue
- * using irq_work.
- *
- * We don't clear it even if the buffer is no longer
- * empty. The flag only causes the next event to run
- * irq_work to do the work queue wake up. The worse
- * that can happen if we race with !trace_empty() is that
- * an event will cause an irq_work to try to wake up
- * an empty queue.
- *
- * There's no reason to protect this flag either, as
- * the work queue and irq_work logic will do the necessary
- * synchronization for the wake ups. The only thing
- * that is necessary is that the wake up happens after
- * a task has been queued. It's OK for spurious wake ups.
- */
- if (full)
- work->full_waiters_pending = true;
- else
- work->waiters_pending = true;
-
- if (signal_pending(current)) {
- ret = -EINTR;
- break;
- }
-
- if (cpu == RING_BUFFER_ALL_CPUS && !ring_buffer_empty(buffer))
- break;
-
- if (cpu != RING_BUFFER_ALL_CPUS &&
- !ring_buffer_empty_cpu(buffer, cpu)) {
- unsigned long flags;
- bool pagebusy;
- size_t nr_pages;
- size_t dirty;
-
- if (!full)
- break;
+ /*
+ * The events can happen in critical sections where
+ * checking a work queue can cause deadlocks.
+ * After adding a task to the queue, this flag is set
+ * only to notify events to try to wake up the queue
+ * using irq_work.
+ *
+ * We don't clear it even if the buffer is no longer
+ * empty. The flag only causes the next event to run
+ * irq_work to do the work queue wake up. The worse
+ * that can happen if we race with !trace_empty() is that
+ * an event will cause an irq_work to try to wake up
+ * an empty queue.
+ *
+ * There's no reason to protect this flag either, as
+ * the work queue and irq_work logic will do the necessary
+ * synchronization for the wake ups. The only thing
+ * that is necessary is that the wake up happens after
+ * a task has been queued. It's OK for spurious wake ups.
+ */
+ if (full)
+ work->full_waiters_pending = true;
+ else
+ work->waiters_pending = true;
- raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
- pagebusy = cpu_buffer->reader_page == cpu_buffer->commit_page;
- nr_pages = cpu_buffer->nr_pages;
- dirty = ring_buffer_nr_dirty_pages(buffer, cpu);
- if (!cpu_buffer->shortest_full ||
- cpu_buffer->shortest_full < full)
- cpu_buffer->shortest_full = full;
- raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
- if (!pagebusy &&
- (!nr_pages || (dirty * 100) > full * nr_pages))
- break;
- }
+ if (rb_watermark_hit(buffer, cpu, full))
+ goto out;
- schedule();
+ if (signal_pending(current)) {
+ ret = -EINTR;
+ goto out;
}
+ schedule();
+ out:
if (full)
finish_wait(&work->full_waiters, &wait);
else
finish_wait(&work->waiters, &wait);
+ if (!ret && !rb_watermark_hit(buffer, cpu, full) && signal_pending(current))
+ ret = -EINTR;
+
return ret;
}
@@ -678,6 +932,7 @@ int ring_buffer_wait(struct trace_buffer *buffer, int cpu, int full)
* @cpu: the cpu buffer to wait on
* @filp: the file descriptor
* @poll_table: The poll descriptor
+ * @full: wait until the percentage of pages are available, if @cpu != RING_BUFFER_ALL_CPUS
*
* If @cpu == RING_BUFFER_ALL_CPUS then the task will wake up as soon
* as data is added to any of the @buffer's cpu buffers. Otherwise
@@ -687,23 +942,38 @@ int ring_buffer_wait(struct trace_buffer *buffer, int cpu, int full)
* zero otherwise.
*/
__poll_t ring_buffer_poll_wait(struct trace_buffer *buffer, int cpu,
- struct file *filp, poll_table *poll_table)
+ struct file *filp, poll_table *poll_table, int full)
{
struct ring_buffer_per_cpu *cpu_buffer;
- struct rb_irq_work *work;
+ struct rb_irq_work *rbwork;
- if (cpu == RING_BUFFER_ALL_CPUS)
- work = &buffer->irq_work;
- else {
+ if (cpu == RING_BUFFER_ALL_CPUS) {
+ rbwork = &buffer->irq_work;
+ full = 0;
+ } else {
if (!cpumask_test_cpu(cpu, buffer->cpumask))
- return -EINVAL;
+ return EPOLLERR;
cpu_buffer = buffer->buffers[cpu];
- work = &cpu_buffer->irq_work;
+ rbwork = &cpu_buffer->irq_work;
+ }
+
+ if (full) {
+ unsigned long flags;
+
+ poll_wait(filp, &rbwork->full_waiters, poll_table);
+
+ raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
+ rbwork->full_waiters_pending = true;
+ if (!cpu_buffer->shortest_full ||
+ cpu_buffer->shortest_full > full)
+ cpu_buffer->shortest_full = full;
+ raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
+ } else {
+ poll_wait(filp, &rbwork->waiters, poll_table);
+ rbwork->waiters_pending = true;
}
- poll_wait(filp, &work->waiters, poll_table);
- work->waiters_pending = true;
/*
* There's a tight race between setting the waiters_pending and
* checking if the ring buffer is empty. Once the waiters_pending bit
@@ -719,6 +989,9 @@ __poll_t ring_buffer_poll_wait(struct trace_buffer *buffer, int cpu,
*/
smp_mb();
+ if (full)
+ return full_hit(buffer, cpu, full) ? EPOLLIN | EPOLLRDNORM : 0;
+
if ((cpu == RING_BUFFER_ALL_CPUS && !ring_buffer_empty(buffer)) ||
(cpu != RING_BUFFER_ALL_CPUS && !ring_buffer_empty_cpu(buffer, cpu)))
return EPOLLIN | EPOLLRDNORM;
@@ -746,11 +1019,19 @@ __poll_t ring_buffer_poll_wait(struct trace_buffer *buffer, int cpu,
static inline u64 rb_time_stamp(struct trace_buffer *buffer)
{
+ u64 ts;
+
+ /* Skip retpolines :-( */
+ if (IS_ENABLED(CONFIG_RETPOLINE) && likely(buffer->clock == trace_clock_local))
+ ts = trace_clock_local();
+ else
+ ts = buffer->clock();
+
/* shift to debug/test normalization and TIME_EXTENTS */
- return buffer->clock() << DEBUG_SHIFT;
+ return ts << DEBUG_SHIFT;
}
-u64 ring_buffer_time_stamp(struct trace_buffer *buffer, int cpu)
+u64 ring_buffer_time_stamp(struct trace_buffer *buffer)
{
u64 time;
@@ -868,8 +1149,7 @@ static struct list_head *rb_list_head(struct list_head *list)
* its flags will be non zero.
*/
static inline int
-rb_is_head_page(struct ring_buffer_per_cpu *cpu_buffer,
- struct buffer_page *page, struct list_head *list)
+rb_is_head_page(struct buffer_page *page, struct list_head *list)
{
unsigned long val;
@@ -898,8 +1178,7 @@ static bool rb_is_reader_page(struct buffer_page *page)
/*
* rb_set_list_to_head - set a list_head to be pointing to head.
*/
-static void rb_set_list_to_head(struct ring_buffer_per_cpu *cpu_buffer,
- struct list_head *list)
+static void rb_set_list_to_head(struct list_head *list)
{
unsigned long *ptr;
@@ -922,7 +1201,7 @@ static void rb_head_page_activate(struct ring_buffer_per_cpu *cpu_buffer)
/*
* Set the previous list pointer to have the HEAD flag.
*/
- rb_set_list_to_head(cpu_buffer, head->list.prev);
+ rb_set_list_to_head(head->list.prev);
}
static void rb_list_head_clear(struct list_head *list)
@@ -997,8 +1276,7 @@ static int rb_head_page_set_normal(struct ring_buffer_per_cpu *cpu_buffer,
old_flag, RB_PAGE_NORMAL);
}
-static inline void rb_inc_page(struct ring_buffer_per_cpu *cpu_buffer,
- struct buffer_page **bpage)
+static inline void rb_inc_page(struct buffer_page **bpage)
{
struct list_head *p = rb_list_head((*bpage)->list.next);
@@ -1030,11 +1308,11 @@ rb_set_head_page(struct ring_buffer_per_cpu *cpu_buffer)
*/
for (i = 0; i < 3; i++) {
do {
- if (rb_is_head_page(cpu_buffer, page, page->list.prev)) {
+ if (rb_is_head_page(page, page->list.prev)) {
cpu_buffer->head_page = page;
return page;
}
- rb_inc_page(cpu_buffer, &page);
+ rb_inc_page(&page);
} while (page != head);
}
@@ -1043,19 +1321,16 @@ rb_set_head_page(struct ring_buffer_per_cpu *cpu_buffer)
return NULL;
}
-static int rb_head_page_replace(struct buffer_page *old,
+static bool rb_head_page_replace(struct buffer_page *old,
struct buffer_page *new)
{
unsigned long *ptr = (unsigned long *)&old->list.prev->next;
unsigned long val;
- unsigned long ret;
val = *ptr & ~RB_FLAG_MASK;
val |= RB_PAGE_HEAD;
- ret = cmpxchg(ptr, val, (unsigned long)&new->list);
-
- return ret == val;
+ return try_cmpxchg(ptr, &val, (unsigned long)&new->list);
}
/*
@@ -1122,28 +1397,12 @@ static void rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer,
}
}
-static int rb_check_bpage(struct ring_buffer_per_cpu *cpu_buffer,
+static void rb_check_bpage(struct ring_buffer_per_cpu *cpu_buffer,
struct buffer_page *bpage)
{
unsigned long val = (unsigned long)bpage;
- if (RB_WARN_ON(cpu_buffer, val & RB_FLAG_MASK))
- return 1;
-
- return 0;
-}
-
-/**
- * rb_check_list - make sure a pointer to a list has the last bits zero
- */
-static int rb_check_list(struct ring_buffer_per_cpu *cpu_buffer,
- struct list_head *list)
-{
- if (RB_WARN_ON(cpu_buffer, rb_list_head(list->prev) != list->prev))
- return 1;
- if (RB_WARN_ON(cpu_buffer, rb_list_head(list->next) != list->next))
- return 1;
- return 0;
+ RB_WARN_ON(cpu_buffer, val & RB_FLAG_MASK);
}
/**
@@ -1153,42 +1412,32 @@ static int rb_check_list(struct ring_buffer_per_cpu *cpu_buffer,
* As a safety measure we check to make sure the data pages have not
* been corrupted.
*/
-static int rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer)
+static void rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer)
{
- struct list_head *head = cpu_buffer->pages;
- struct buffer_page *bpage, *tmp;
-
- /* Reset the head page if it exists */
- if (cpu_buffer->head_page)
- rb_set_head_page(cpu_buffer);
+ struct list_head *head = rb_list_head(cpu_buffer->pages);
+ struct list_head *tmp;
- rb_head_page_deactivate(cpu_buffer);
-
- if (RB_WARN_ON(cpu_buffer, head->next->prev != head))
- return -1;
- if (RB_WARN_ON(cpu_buffer, head->prev->next != head))
- return -1;
+ if (RB_WARN_ON(cpu_buffer,
+ rb_list_head(rb_list_head(head->next)->prev) != head))
+ return;
- if (rb_check_list(cpu_buffer, head))
- return -1;
+ if (RB_WARN_ON(cpu_buffer,
+ rb_list_head(rb_list_head(head->prev)->next) != head))
+ return;
- list_for_each_entry_safe(bpage, tmp, head, list) {
+ for (tmp = rb_list_head(head->next); tmp != head; tmp = rb_list_head(tmp->next)) {
if (RB_WARN_ON(cpu_buffer,
- bpage->list.next->prev != &bpage->list))
- return -1;
+ rb_list_head(rb_list_head(tmp->next)->prev) != tmp))
+ return;
+
if (RB_WARN_ON(cpu_buffer,
- bpage->list.prev->next != &bpage->list))
- return -1;
- if (rb_check_list(cpu_buffer, &bpage->list))
- return -1;
+ rb_list_head(rb_list_head(tmp->prev)->next) != tmp))
+ return;
}
-
- rb_head_page_activate(cpu_buffer);
-
- return 0;
}
-static int __rb_allocate_pages(long nr_pages, struct list_head *pages, int cpu)
+static int __rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
+ long nr_pages, struct list_head *pages)
{
struct buffer_page *bpage, *tmp;
bool user_thread = current->mm != NULL;
@@ -1228,16 +1477,20 @@ static int __rb_allocate_pages(long nr_pages, struct list_head *pages, int cpu)
struct page *page;
bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()),
- mflags, cpu_to_node(cpu));
+ mflags, cpu_to_node(cpu_buffer->cpu));
if (!bpage)
goto free_pages;
+ rb_check_bpage(cpu_buffer, bpage);
+
list_add(&bpage->list, pages);
- page = alloc_pages_node(cpu_to_node(cpu), mflags, 0);
+ page = alloc_pages_node(cpu_to_node(cpu_buffer->cpu), mflags,
+ cpu_buffer->buffer->subbuf_order);
if (!page)
goto free_pages;
bpage->page = page_address(page);
+ bpage->order = cpu_buffer->buffer->subbuf_order;
rb_init_page(bpage->page);
if (user_thread && fatal_signal_pending(current))
@@ -1266,7 +1519,7 @@ static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
WARN_ON(!nr_pages);
- if (__rb_allocate_pages(nr_pages, &pages, cpu_buffer->cpu))
+ if (__rb_allocate_pages(cpu_buffer, nr_pages, &pages))
return -ENOMEM;
/*
@@ -1316,7 +1569,8 @@ rb_allocate_cpu_buffer(struct trace_buffer *buffer, long nr_pages, int cpu)
rb_check_bpage(cpu_buffer, bpage);
cpu_buffer->reader_page = bpage;
- page = alloc_pages_node(cpu_to_node(cpu), GFP_KERNEL, 0);
+
+ page = alloc_pages_node(cpu_to_node(cpu), GFP_KERNEL, cpu_buffer->buffer->subbuf_order);
if (!page)
goto fail_free_reader;
bpage->page = page_address(page);
@@ -1350,11 +1604,13 @@ static void rb_free_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer)
struct list_head *head = cpu_buffer->pages;
struct buffer_page *bpage, *tmp;
- free_buffer_page(cpu_buffer->reader_page);
+ irq_work_sync(&cpu_buffer->irq_work.work);
- rb_head_page_deactivate(cpu_buffer);
+ free_buffer_page(cpu_buffer->reader_page);
if (head) {
+ rb_head_page_deactivate(cpu_buffer);
+
list_for_each_entry_safe(bpage, tmp, head, list) {
list_del_init(&bpage->list);
free_buffer_page(bpage);
@@ -1363,6 +1619,8 @@ static void rb_free_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer)
free_buffer_page(bpage);
}
+ free_page((unsigned long)cpu_buffer->free_page);
+
kfree(cpu_buffer);
}
@@ -1395,7 +1653,14 @@ struct trace_buffer *__ring_buffer_alloc(unsigned long size, unsigned flags,
if (!zalloc_cpumask_var(&buffer->cpumask, GFP_KERNEL))
goto fail_free_buffer;
- nr_pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
+ /* Default buffer page size - one system page */
+ buffer->subbuf_order = 0;
+ buffer->subbuf_size = PAGE_SIZE - BUF_PAGE_HDR_SIZE;
+
+ /* Max payload is buffer page size - header (8bytes) */
+ buffer->max_data_size = buffer->subbuf_size - (sizeof(u32) * 2);
+
+ nr_pages = DIV_ROUND_UP(size, buffer->subbuf_size);
buffer->flags = flags;
buffer->clock = trace_clock_local;
buffer->reader_lock_key = key;
@@ -1456,6 +1721,8 @@ ring_buffer_free(struct trace_buffer *buffer)
cpuhp_state_remove_instance(CPUHP_TRACE_RB_PREPARE, &buffer->node);
+ irq_work_sync(&buffer->irq_work.work);
+
for_each_buffer_cpu(buffer, cpu)
rb_free_cpu_buffer(buffer->buffers[cpu]);
@@ -1494,7 +1761,7 @@ static inline unsigned long rb_page_write(struct buffer_page *bpage)
return local_read(&bpage->write) & RB_WRITE_MASK;
}
-static int
+static bool
rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned long nr_pages)
{
struct list_head *tail_page, *to_remove, *next_page;
@@ -1535,6 +1802,8 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned long nr_pages)
to_remove = rb_list_head(to_remove)->next;
head_bit |= (unsigned long)to_remove & RB_PAGE_HEAD;
}
+ /* Read iterators need to reset themselves when some pages removed */
+ cpu_buffer->pages_removed += nr_removed;
next_page = rb_list_head(to_remove)->next;
@@ -1556,12 +1825,6 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned long nr_pages)
cpu_buffer->head_page = list_entry(next_page,
struct buffer_page, list);
- /*
- * change read pointer to make sure any read iterators reset
- * themselves
- */
- cpu_buffer->read = 0;
-
/* pages are removed, resume tracing and then free the pages */
atomic_dec(&cpu_buffer->record_disabled);
raw_spin_unlock_irq(&cpu_buffer->reader_lock);
@@ -1577,7 +1840,7 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned long nr_pages)
cond_resched();
to_remove_page = tmp_iter_page;
- rb_inc_page(cpu_buffer, &tmp_iter_page);
+ rb_inc_page(&tmp_iter_page);
/* update the counters */
page_entries = rb_page_entries(to_remove_page);
@@ -1589,7 +1852,8 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned long nr_pages)
* Increment overrun to account for the lost events.
*/
local_add(page_entries, &cpu_buffer->overrun);
- local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes);
+ local_sub(rb_page_commit(to_remove_page), &cpu_buffer->entries_bytes);
+ local_inc(&cpu_buffer->pages_lost);
}
/*
@@ -1606,13 +1870,16 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned long nr_pages)
return nr_removed == 0;
}
-static int
+static bool
rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer)
{
struct list_head *pages = &cpu_buffer->new_pages;
- int retries, success;
+ unsigned long flags;
+ bool success;
+ int retries;
- raw_spin_lock_irq(&cpu_buffer->reader_lock);
+ /* Can be called at early boot up, where interrupts must not been enabled */
+ raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
/*
* We are holding the reader lock, so the reader page won't be swapped
* in the ring buffer. Now we are racing with the writer trying to
@@ -1628,15 +1895,16 @@ rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer)
* spinning.
*/
retries = 10;
- success = 0;
+ success = false;
while (retries--) {
- struct list_head *head_page, *prev_page, *r;
+ struct list_head *head_page, *prev_page;
struct list_head *last_page, *first_page;
struct list_head *head_page_with_bit;
+ struct buffer_page *hpage = rb_set_head_page(cpu_buffer);
- head_page = &rb_set_head_page(cpu_buffer)->list;
- if (!head_page)
+ if (!hpage)
break;
+ head_page = &hpage->list;
prev_page = head_page->prev;
first_page = pages->next;
@@ -1648,16 +1916,16 @@ rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer)
last_page->next = head_page_with_bit;
first_page->prev = prev_page;
- r = cmpxchg(&prev_page->next, head_page_with_bit, first_page);
-
- if (r == head_page_with_bit) {
+ /* caution: head_page_with_bit gets updated on cmpxchg failure */
+ if (try_cmpxchg(&prev_page->next,
+ &head_page_with_bit, first_page)) {
/*
* yay, we replaced the page pointer to our new list,
* now, we just have to update to head page's prev
* pointer to point to end of list
*/
head_page->prev = last_page;
- success = 1;
+ success = true;
break;
}
}
@@ -1669,7 +1937,7 @@ rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer)
* tracing
*/
RB_WARN_ON(cpu_buffer, !success);
- raw_spin_unlock_irq(&cpu_buffer->reader_lock);
+ raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
/* free pages if they weren't inserted */
if (!success) {
@@ -1685,7 +1953,7 @@ rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer)
static void rb_update_pages(struct ring_buffer_per_cpu *cpu_buffer)
{
- int success;
+ bool success;
if (cpu_buffer->nr_pages_to_update > 0)
success = rb_insert_pages(cpu_buffer);
@@ -1711,7 +1979,7 @@ static void update_pages_handler(struct work_struct *work)
* @size: the new size.
* @cpu_id: the cpu buffer to resize
*
- * Minimum size is 2 * BUF_PAGE_SIZE.
+ * Minimum size is 2 * buffer->subbuf_size.
*
* Returns 0 on success and < 0 on failure.
*/
@@ -1720,30 +1988,28 @@ int ring_buffer_resize(struct trace_buffer *buffer, unsigned long size,
{
struct ring_buffer_per_cpu *cpu_buffer;
unsigned long nr_pages;
- int cpu, err = 0;
+ int cpu, err;
/*
* Always succeed at resizing a non-existent buffer:
*/
if (!buffer)
- return size;
+ return 0;
/* Make sure the requested buffer exists */
if (cpu_id != RING_BUFFER_ALL_CPUS &&
!cpumask_test_cpu(cpu_id, buffer->cpumask))
- return size;
+ return 0;
- nr_pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
+ nr_pages = DIV_ROUND_UP(size, buffer->subbuf_size);
/* we need a minimum of two pages */
if (nr_pages < 2)
nr_pages = 2;
- size = nr_pages * BUF_PAGE_SIZE;
-
/* prevent another thread from changing buffer sizes */
mutex_lock(&buffer->mutex);
-
+ atomic_inc(&buffer->resizing);
if (cpu_id == RING_BUFFER_ALL_CPUS) {
/*
@@ -1775,15 +2041,17 @@ int ring_buffer_resize(struct trace_buffer *buffer, unsigned long size,
* allocated without receiving ENOMEM
*/
INIT_LIST_HEAD(&cpu_buffer->new_pages);
- if (__rb_allocate_pages(cpu_buffer->nr_pages_to_update,
- &cpu_buffer->new_pages, cpu)) {
+ if (__rb_allocate_pages(cpu_buffer, cpu_buffer->nr_pages_to_update,
+ &cpu_buffer->new_pages)) {
/* not enough memory for new pages */
err = -ENOMEM;
goto out_err;
}
+
+ cond_resched();
}
- get_online_cpus();
+ cpus_read_lock();
/*
* Fire off all the required work handlers
* We can't schedule on offline CPUs, but it's not necessary
@@ -1799,8 +2067,16 @@ int ring_buffer_resize(struct trace_buffer *buffer, unsigned long size,
rb_update_pages(cpu_buffer);
cpu_buffer->nr_pages_to_update = 0;
} else {
- schedule_work_on(cpu,
- &cpu_buffer->update_pages_work);
+ /* Run directly if possible. */
+ migrate_disable();
+ if (cpu != smp_processor_id()) {
+ migrate_enable();
+ schedule_work_on(cpu,
+ &cpu_buffer->update_pages_work);
+ } else {
+ update_pages_handler(&cpu_buffer->update_pages_work);
+ migrate_enable();
+ }
}
}
@@ -1815,12 +2091,8 @@ int ring_buffer_resize(struct trace_buffer *buffer, unsigned long size,
cpu_buffer->nr_pages_to_update = 0;
}
- put_online_cpus();
+ cpus_read_unlock();
} else {
- /* Make sure this CPU has been initialized */
- if (!cpumask_test_cpu(cpu_id, buffer->cpumask))
- goto out;
-
cpu_buffer = buffer->buffers[cpu_id];
if (nr_pages == cpu_buffer->nr_pages)
@@ -1841,25 +2113,33 @@ int ring_buffer_resize(struct trace_buffer *buffer, unsigned long size,
INIT_LIST_HEAD(&cpu_buffer->new_pages);
if (cpu_buffer->nr_pages_to_update > 0 &&
- __rb_allocate_pages(cpu_buffer->nr_pages_to_update,
- &cpu_buffer->new_pages, cpu_id)) {
+ __rb_allocate_pages(cpu_buffer, cpu_buffer->nr_pages_to_update,
+ &cpu_buffer->new_pages)) {
err = -ENOMEM;
goto out_err;
}
- get_online_cpus();
+ cpus_read_lock();
/* Can't run something on an offline CPU. */
if (!cpu_online(cpu_id))
rb_update_pages(cpu_buffer);
else {
- schedule_work_on(cpu_id,
- &cpu_buffer->update_pages_work);
- wait_for_completion(&cpu_buffer->update_done);
+ /* Run directly if possible. */
+ migrate_disable();
+ if (cpu_id == smp_processor_id()) {
+ rb_update_pages(cpu_buffer);
+ migrate_enable();
+ } else {
+ migrate_enable();
+ schedule_work_on(cpu_id,
+ &cpu_buffer->update_pages_work);
+ wait_for_completion(&cpu_buffer->update_done);
+ }
}
cpu_buffer->nr_pages_to_update = 0;
- put_online_cpus();
+ cpus_read_unlock();
}
out:
@@ -1886,8 +2166,9 @@ int ring_buffer_resize(struct trace_buffer *buffer, unsigned long size,
atomic_dec(&buffer->record_disabled);
}
+ atomic_dec(&buffer->resizing);
mutex_unlock(&buffer->mutex);
- return size;
+ return 0;
out_err:
for_each_buffer_cpu(buffer, cpu) {
@@ -1906,6 +2187,7 @@ int ring_buffer_resize(struct trace_buffer *buffer, unsigned long size,
}
}
out_err_unlock:
+ atomic_dec(&buffer->resizing);
mutex_unlock(&buffer->mutex);
return err;
}
@@ -1934,11 +2216,6 @@ rb_reader_event(struct ring_buffer_per_cpu *cpu_buffer)
cpu_buffer->reader_page->read);
}
-static __always_inline unsigned rb_page_commit(struct buffer_page *bpage)
-{
- return local_read(&bpage->page->commit);
-}
-
static struct ring_buffer_event *
rb_iter_head_event(struct ring_buffer_iter *iter)
{
@@ -1957,6 +2234,11 @@ rb_iter_head_event(struct ring_buffer_iter *iter)
*/
commit = rb_page_commit(iter_head_page);
smp_rmb();
+
+ /* An event needs to be at least 8 bytes in size */
+ if (iter->head > commit - 8)
+ goto reset;
+
event = __rb_page_index(iter_head_page, iter->head);
length = rb_event_length(event);
@@ -1966,7 +2248,7 @@ rb_iter_head_event(struct ring_buffer_iter *iter)
*/
barrier();
- if ((iter->head + length) > commit || length > BUF_MAX_DATA_SIZE)
+ if ((iter->head + length) > commit || length > iter->event_size)
/* Writer corrupted the read? */
goto reset;
@@ -2006,11 +2288,13 @@ rb_commit_index(struct ring_buffer_per_cpu *cpu_buffer)
}
static __always_inline unsigned
-rb_event_index(struct ring_buffer_event *event)
+rb_event_index(struct ring_buffer_per_cpu *cpu_buffer, struct ring_buffer_event *event)
{
unsigned long addr = (unsigned long)event;
- return (addr & ~PAGE_MASK) - BUF_PAGE_HDR_SIZE;
+ addr &= (PAGE_SIZE << cpu_buffer->buffer->subbuf_order) - 1;
+
+ return addr - BUF_PAGE_HDR_SIZE;
}
static void rb_inc_iter(struct ring_buffer_iter *iter)
@@ -2026,7 +2310,7 @@ static void rb_inc_iter(struct ring_buffer_iter *iter)
if (iter->head_page == cpu_buffer->reader_page)
iter->head_page = rb_set_head_page(cpu_buffer);
else
- rb_inc_page(cpu_buffer, &iter->head_page);
+ rb_inc_page(&iter->head_page);
iter->page_stamp = iter->read_stamp = iter->head_page->page->time_stamp;
iter->head = 0;
@@ -2079,7 +2363,8 @@ rb_handle_head_page(struct ring_buffer_per_cpu *cpu_buffer,
* the counters.
*/
local_add(entries, &cpu_buffer->overrun);
- local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes);
+ local_sub(rb_page_commit(next_page), &cpu_buffer->entries_bytes);
+ local_inc(&cpu_buffer->pages_lost);
/*
* The entries will be zeroed out when we move the
@@ -2129,7 +2414,7 @@ rb_handle_head_page(struct ring_buffer_per_cpu *cpu_buffer,
* want the outer most commit to reset it.
*/
new_head = next_page;
- rb_inc_page(cpu_buffer, &new_head);
+ rb_inc_page(&new_head);
ret = rb_head_page_set_head(cpu_buffer, new_head, next_page,
RB_PAGE_NORMAL);
@@ -2198,6 +2483,7 @@ static inline void
rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
unsigned long tail, struct rb_event_info *info)
{
+ unsigned long bsize = READ_ONCE(cpu_buffer->buffer->subbuf_size);
struct buffer_page *tail_page = info->tail_page;
struct ring_buffer_event *event;
unsigned long length = info->length;
@@ -2206,13 +2492,13 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
* Only the event that crossed the page boundary
* must fill the old tail_page with padding.
*/
- if (tail >= BUF_PAGE_SIZE) {
+ if (tail >= bsize) {
/*
* If the page was filled, then we still need
* to update the real_end. Reset it to zero
* and the reader will ignore it.
*/
- if (tail == BUF_PAGE_SIZE)
+ if (tail == bsize)
tail_page->real_end = 0;
local_sub(length, &tail_page->write);
@@ -2221,9 +2507,6 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
event = __rb_page_index(tail_page, tail);
- /* account for padding bytes */
- local_add(BUF_PAGE_SIZE - tail, &cpu_buffer->entries_bytes);
-
/*
* Save the original length to the meta data.
* This will be used by the reader to add lost event
@@ -2237,30 +2520,40 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
* write counter enough to allow another writer to slip
* in on this page.
* We put in a discarded commit instead, to make sure
- * that this space is not used again.
+ * that this space is not used again, and this space will
+ * not be accounted into 'entries_bytes'.
*
* If we are less than the minimum size, we don't need to
* worry about it.
*/
- if (tail > (BUF_PAGE_SIZE - RB_EVNT_MIN_SIZE)) {
+ if (tail > (bsize - RB_EVNT_MIN_SIZE)) {
/* No room for any events */
/* Mark the rest of the page with padding */
rb_event_set_padding(event);
+ /* Make sure the padding is visible before the write update */
+ smp_wmb();
+
/* Set the write back to the previous setting */
local_sub(length, &tail_page->write);
return;
}
/* Put in a discarded event */
- event->array[0] = (BUF_PAGE_SIZE - tail) - RB_EVNT_HDR_SIZE;
+ event->array[0] = (bsize - tail) - RB_EVNT_HDR_SIZE;
event->type_len = RINGBUF_TYPE_PADDING;
/* time delta must be non zero */
event->time_delta = 1;
+ /* account for padding bytes */
+ local_add(bsize - tail, &cpu_buffer->entries_bytes);
+
+ /* Make sure the padding is visible before the tail_page->write update */
+ smp_wmb();
+
/* Set write to end of buffer */
- length = (tail + length) - BUF_PAGE_SIZE;
+ length = (tail + length) - bsize;
local_sub(length, &tail_page->write);
}
@@ -2281,7 +2574,7 @@ rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
next_page = tail_page;
- rb_inc_page(cpu_buffer, &next_page);
+ rb_inc_page(&next_page);
/*
* If for some reason, we had an interrupt storm that made
@@ -2307,7 +2600,7 @@ rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
* the buffer, unless the commit page is still on the
* reader page.
*/
- if (rb_is_head_page(cpu_buffer, next_page, &tail_page->list)) {
+ if (rb_is_head_page(next_page, &tail_page->list)) {
/*
* If the commit is not on the reader page, then
@@ -2338,7 +2631,7 @@ rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
* have filled up the buffer with events
* from interrupts and such, and wrapped.
*
- * Note, if the tail page is also the on the
+ * Note, if the tail page is also on the
* reader_page, we let it move out.
*/
if (unlikely((cpu_buffer->commit_page !=
@@ -2372,9 +2665,10 @@ rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
return NULL;
}
-/* Slow path, do not inline */
-static noinline struct ring_buffer_event *
-rb_add_time_stamp(struct ring_buffer_event *event, u64 delta, bool abs)
+/* Slow path */
+static struct ring_buffer_event *
+rb_add_time_stamp(struct ring_buffer_per_cpu *cpu_buffer,
+ struct ring_buffer_event *event, u64 delta, bool abs)
{
if (abs)
event->type_len = RINGBUF_TYPE_TIME_STAMP;
@@ -2382,7 +2676,7 @@ rb_add_time_stamp(struct ring_buffer_event *event, u64 delta, bool abs)
event->type_len = RINGBUF_TYPE_TIME_EXTEND;
/* Not the first event on the page, or not delta? */
- if (abs || rb_event_index(event)) {
+ if (abs || rb_event_index(cpu_buffer, event)) {
event->time_delta = delta & TS_MASK;
event->array[0] = delta >> TS_SHIFT;
} else {
@@ -2394,8 +2688,72 @@ rb_add_time_stamp(struct ring_buffer_event *event, u64 delta, bool abs)
return skip_time_extend(event);
}
-static inline bool rb_event_is_commit(struct ring_buffer_per_cpu *cpu_buffer,
- struct ring_buffer_event *event);
+#ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
+static inline bool sched_clock_stable(void)
+{
+ return true;
+}
+#endif
+
+static void
+rb_check_timestamp(struct ring_buffer_per_cpu *cpu_buffer,
+ struct rb_event_info *info)
+{
+ u64 write_stamp;
+
+ WARN_ONCE(1, "Delta way too big! %llu ts=%llu before=%llu after=%llu write stamp=%llu\n%s",
+ (unsigned long long)info->delta,
+ (unsigned long long)info->ts,
+ (unsigned long long)info->before,
+ (unsigned long long)info->after,
+ (unsigned long long)({rb_time_read(&cpu_buffer->write_stamp, &write_stamp); write_stamp;}),
+ sched_clock_stable() ? "" :
+ "If you just came from a suspend/resume,\n"
+ "please switch to the trace global clock:\n"
+ " echo global > /sys/kernel/tracing/trace_clock\n"
+ "or add trace_clock=global to the kernel command line\n");
+}
+
+static void rb_add_timestamp(struct ring_buffer_per_cpu *cpu_buffer,
+ struct ring_buffer_event **event,
+ struct rb_event_info *info,
+ u64 *delta,
+ unsigned int *length)
+{
+ bool abs = info->add_timestamp &
+ (RB_ADD_STAMP_FORCE | RB_ADD_STAMP_ABSOLUTE);
+
+ if (unlikely(info->delta > (1ULL << 59))) {
+ /*
+ * Some timers can use more than 59 bits, and when a timestamp
+ * is added to the buffer, it will lose those bits.
+ */
+ if (abs && (info->ts & TS_MSB)) {
+ info->delta &= ABS_TS_MASK;
+
+ /* did the clock go backwards */
+ } else if (info->before == info->after && info->before > info->ts) {
+ /* not interrupted */
+ static int once;
+
+ /*
+ * This is possible with a recalibrating of the TSC.
+ * Do not produce a call stack, but just report it.
+ */
+ if (!once) {
+ once++;
+ pr_warn("Ring buffer clock went backwards: %llu -> %llu\n",
+ info->before, info->ts);
+ }
+ } else
+ rb_check_timestamp(cpu_buffer, info);
+ if (!abs)
+ info->delta = 0;
+ }
+ *event = rb_add_time_stamp(cpu_buffer, *event, info->delta, abs);
+ *length -= RB_LEN_TIME_EXTEND;
+ *delta = 0;
+}
/**
* rb_update_event - update event type and data
@@ -2415,26 +2773,21 @@ rb_update_event(struct ring_buffer_per_cpu *cpu_buffer,
{
unsigned length = info->length;
u64 delta = info->delta;
+ unsigned int nest = local_read(&cpu_buffer->committing) - 1;
- /* Only a commit updates the timestamp */
- if (unlikely(!rb_event_is_commit(cpu_buffer, event)))
- delta = 0;
+ if (!WARN_ON_ONCE(nest >= MAX_NEST))
+ cpu_buffer->event_stamp[nest] = info->ts;
/*
* If we need to add a timestamp, then we
* add it to the start of the reserved space.
*/
- if (unlikely(info->add_timestamp)) {
- bool abs = ring_buffer_time_stamp_abs(cpu_buffer->buffer);
-
- event = rb_add_time_stamp(event, abs ? info->delta : delta, abs);
- length -= RB_LEN_TIME_EXTEND;
- delta = 0;
- }
+ if (unlikely(info->add_timestamp))
+ rb_add_timestamp(cpu_buffer, &event, info, &delta, &length);
event->time_delta = delta;
length -= RB_EVNT_HDR_SIZE;
- if (length > RB_MAX_SMALL_DATA) {
+ if (length > RB_MAX_SMALL_DATA || RB_FORCE_8BYTE_ALIGNMENT) {
event->type_len = 0;
event->array[0] = length;
} else
@@ -2449,11 +2802,11 @@ static unsigned rb_calculate_event_length(unsigned length)
if (!length)
length++;
- if (length > RB_MAX_SMALL_DATA)
+ if (length > RB_MAX_SMALL_DATA || RB_FORCE_8BYTE_ALIGNMENT)
length += sizeof(event.array[0]);
length += RB_EVNT_HDR_SIZE;
- length = ALIGN(length, RB_ALIGNMENT);
+ length = ALIGN(length, RB_ARCH_ALIGNMENT);
/*
* In case the time delta is larger than the 27 bits for it
@@ -2473,33 +2826,51 @@ static unsigned rb_calculate_event_length(unsigned length)
return length;
}
-#ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
-static inline bool sched_clock_stable(void)
-{
- return true;
-}
-#endif
-
-static inline int
+static inline bool
rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
struct ring_buffer_event *event)
{
unsigned long new_index, old_index;
struct buffer_page *bpage;
- unsigned long index;
unsigned long addr;
- new_index = rb_event_index(event);
+ new_index = rb_event_index(cpu_buffer, event);
old_index = new_index + rb_event_ts_length(event);
addr = (unsigned long)event;
- addr &= PAGE_MASK;
+ addr &= ~((PAGE_SIZE << cpu_buffer->buffer->subbuf_order) - 1);
bpage = READ_ONCE(cpu_buffer->tail_page);
+ /*
+ * Make sure the tail_page is still the same and
+ * the next write location is the end of this event
+ */
if (bpage->page == (void *)addr && rb_page_write(bpage) == old_index) {
unsigned long write_mask =
local_read(&bpage->write) & ~RB_WRITE_MASK;
unsigned long event_length = rb_event_length(event);
+
+ /*
+ * For the before_stamp to be different than the write_stamp
+ * to make sure that the next event adds an absolute
+ * value and does not rely on the saved write stamp, which
+ * is now going to be bogus.
+ *
+ * By setting the before_stamp to zero, the next event
+ * is not going to use the write_stamp and will instead
+ * create an absolute timestamp. This means there's no
+ * reason to update the wirte_stamp!
+ */
+ rb_time_set(&cpu_buffer->before_stamp, 0);
+
+ /*
+ * If an event were to come in now, it would see that the
+ * write_stamp and the before_stamp are different, and assume
+ * that this event just added itself before updating
+ * the write stamp. The interrupting event will fix the
+ * write stamp for us, and use an absolute timestamp.
+ */
+
/*
* This is on the tail page. It is possible that
* a write could come in and move the tail page
@@ -2508,16 +2879,17 @@ rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
*/
old_index += write_mask;
new_index += write_mask;
- index = local_cmpxchg(&bpage->write, old_index, new_index);
- if (index == old_index) {
+
+ /* caution: old_index gets updated on cmpxchg failure */
+ if (local_try_cmpxchg(&bpage->write, &old_index, new_index)) {
/* update counters */
local_sub(event_length, &cpu_buffer->entries_bytes);
- return 1;
+ return true;
}
}
/* could not discard */
- return 0;
+ return false;
}
static void rb_start_commit(struct ring_buffer_per_cpu *cpu_buffer)
@@ -2548,19 +2920,21 @@ rb_set_commit_to_write(struct ring_buffer_per_cpu *cpu_buffer)
if (RB_WARN_ON(cpu_buffer,
rb_is_reader_page(cpu_buffer->tail_page)))
return;
+ /*
+ * No need for a memory barrier here, as the update
+ * of the tail_page did it for this page.
+ */
local_set(&cpu_buffer->commit_page->page->commit,
rb_page_write(cpu_buffer->commit_page));
- rb_inc_page(cpu_buffer, &cpu_buffer->commit_page);
- /* Only update the write stamp if the page has an event */
- if (rb_page_write(cpu_buffer->commit_page))
- cpu_buffer->write_stamp =
- cpu_buffer->commit_page->page->time_stamp;
+ rb_inc_page(&cpu_buffer->commit_page);
/* add barrier to keep gcc from optimizing too much */
barrier();
}
while (rb_commit_index(cpu_buffer) !=
rb_page_write(cpu_buffer->commit_page)) {
+ /* Make sure the readers see the content of what is committed. */
+ smp_wmb();
local_set(&cpu_buffer->commit_page->page->commit,
rb_page_write(cpu_buffer->commit_page));
RB_WARN_ON(cpu_buffer,
@@ -2626,64 +3000,15 @@ static inline void rb_event_discard(struct ring_buffer_event *event)
event->time_delta = 1;
}
-static __always_inline bool
-rb_event_is_commit(struct ring_buffer_per_cpu *cpu_buffer,
- struct ring_buffer_event *event)
-{
- unsigned long addr = (unsigned long)event;
- unsigned long index;
-
- index = rb_event_index(event);
- addr &= PAGE_MASK;
-
- return cpu_buffer->commit_page->page == (void *)addr &&
- rb_commit_index(cpu_buffer) == index;
-}
-
-static __always_inline void
-rb_update_write_stamp(struct ring_buffer_per_cpu *cpu_buffer,
- struct ring_buffer_event *event)
-{
- u64 delta;
-
- /*
- * The event first in the commit queue updates the
- * time stamp.
- */
- if (rb_event_is_commit(cpu_buffer, event)) {
- /*
- * A commit event that is first on a page
- * updates the write timestamp with the page stamp
- */
- if (!rb_event_index(event))
- cpu_buffer->write_stamp =
- cpu_buffer->commit_page->page->time_stamp;
- else if (event->type_len == RINGBUF_TYPE_TIME_EXTEND) {
- delta = ring_buffer_event_time_stamp(event);
- cpu_buffer->write_stamp += delta;
- } else if (event->type_len == RINGBUF_TYPE_TIME_STAMP) {
- delta = ring_buffer_event_time_stamp(event);
- cpu_buffer->write_stamp = delta;
- } else
- cpu_buffer->write_stamp += event->time_delta;
- }
-}
-
-static void rb_commit(struct ring_buffer_per_cpu *cpu_buffer,
- struct ring_buffer_event *event)
+static void rb_commit(struct ring_buffer_per_cpu *cpu_buffer)
{
local_inc(&cpu_buffer->entries);
- rb_update_write_stamp(cpu_buffer, event);
rb_end_commit(cpu_buffer);
}
static __always_inline void
rb_wakeups(struct trace_buffer *buffer, struct ring_buffer_per_cpu *cpu_buffer)
{
- size_t nr_pages;
- size_t dirty;
- size_t full;
-
if (buffer->irq_work.waiters_pending) {
buffer->irq_work.waiters_pending = false;
/* irq_work_queue() supplies it's own memory barriers */
@@ -2707,10 +3032,7 @@ rb_wakeups(struct trace_buffer *buffer, struct ring_buffer_per_cpu *cpu_buffer)
cpu_buffer->last_pages_touch = local_read(&cpu_buffer->pages_touched);
- full = cpu_buffer->shortest_full;
- nr_pages = cpu_buffer->nr_pages;
- dirty = ring_buffer_nr_dirty_pages(buffer, cpu_buffer->cpu);
- if (full && nr_pages && (dirty * 100) <= full * nr_pages)
+ if (!full_hit(buffer, cpu_buffer->cpu, cpu_buffer->shortest_full))
return;
cpu_buffer->irq_work.wakeup_full = true;
@@ -2719,6 +3041,13 @@ rb_wakeups(struct trace_buffer *buffer, struct ring_buffer_per_cpu *cpu_buffer)
irq_work_queue(&cpu_buffer->irq_work.work);
}
+#ifdef CONFIG_RING_BUFFER_RECORD_RECURSION
+# define do_ring_buffer_record_recursion() \
+ do_ftrace_record_recursion(_THIS_IP_, _RET_IP_)
+#else
+# define do_ring_buffer_record_recursion() do { } while (0)
+#endif
+
/*
* The lock and unlock are done within a preempt disable section.
* The current_context per_cpu variable can only be modified
@@ -2729,10 +3058,10 @@ rb_wakeups(struct trace_buffer *buffer, struct ring_buffer_per_cpu *cpu_buffer)
* a bit of overhead in something as critical as function tracing,
* we use a bitmask trick.
*
- * bit 0 = NMI context
- * bit 1 = IRQ context
- * bit 2 = SoftIRQ context
- * bit 3 = normal context.
+ * bit 1 = NMI context
+ * bit 2 = IRQ context
+ * bit 3 = SoftIRQ context
+ * bit 4 = normal context.
*
* This works because this is the order of contexts that can
* preempt other contexts. A SoftIRQ never preempts an IRQ
@@ -2755,28 +3084,57 @@ rb_wakeups(struct trace_buffer *buffer, struct ring_buffer_per_cpu *cpu_buffer)
* The least significant bit can be cleared this way, and it
* just so happens that it is the same bit corresponding to
* the current context.
+ *
+ * Now the TRANSITION bit breaks the above slightly. The TRANSITION bit
+ * is set when a recursion is detected at the current context, and if
+ * the TRANSITION bit is already set, it will fail the recursion.
+ * This is needed because there's a lag between the changing of
+ * interrupt context and updating the preempt count. In this case,
+ * a false positive will be found. To handle this, one extra recursion
+ * is allowed, and this is done by the TRANSITION bit. If the TRANSITION
+ * bit is already set, then it is considered a recursion and the function
+ * ends. Otherwise, the TRANSITION bit is set, and that bit is returned.
+ *
+ * On the trace_recursive_unlock(), the TRANSITION bit will be the first
+ * to be cleared. Even if it wasn't the context that set it. That is,
+ * if an interrupt comes in while NORMAL bit is set and the ring buffer
+ * is called before preempt_count() is updated, since the check will
+ * be on the NORMAL bit, the TRANSITION bit will then be set. If an
+ * NMI then comes in, it will set the NMI bit, but when the NMI code
+ * does the trace_recursive_unlock() it will clear the TRANSITION bit
+ * and leave the NMI bit set. But this is fine, because the interrupt
+ * code that set the TRANSITION bit will then clear the NMI bit when it
+ * calls trace_recursive_unlock(). If another NMI comes in, it will
+ * set the TRANSITION bit and continue.
+ *
+ * Note: The TRANSITION bit only handles a single transition between context.
*/
-static __always_inline int
+static __always_inline bool
trace_recursive_lock(struct ring_buffer_per_cpu *cpu_buffer)
{
unsigned int val = cpu_buffer->current_context;
- unsigned long pc = preempt_count();
- int bit;
+ int bit = interrupt_context_level();
- if (!(pc & (NMI_MASK | HARDIRQ_MASK | SOFTIRQ_OFFSET)))
- bit = RB_CTX_NORMAL;
- else
- bit = pc & NMI_MASK ? RB_CTX_NMI :
- pc & HARDIRQ_MASK ? RB_CTX_IRQ : RB_CTX_SOFTIRQ;
+ bit = RB_CTX_NORMAL - bit;
- if (unlikely(val & (1 << (bit + cpu_buffer->nest))))
- return 1;
+ if (unlikely(val & (1 << (bit + cpu_buffer->nest)))) {
+ /*
+ * It is possible that this was called by transitioning
+ * between interrupt context, and preempt_count() has not
+ * been updated yet. In this case, use the TRANSITION bit.
+ */
+ bit = RB_CTX_TRANSITION;
+ if (val & (1 << (bit + cpu_buffer->nest))) {
+ do_ring_buffer_record_recursion();
+ return true;
+ }
+ }
val |= (1 << (bit + cpu_buffer->nest));
cpu_buffer->current_context = val;
- return 0;
+ return false;
}
static __always_inline void
@@ -2786,8 +3144,8 @@ trace_recursive_unlock(struct ring_buffer_per_cpu *cpu_buffer)
cpu_buffer->current_context - (1 << cpu_buffer->nest);
}
-/* The recursive locking above uses 4 bits */
-#define NESTED_BITS 4
+/* The recursive locking above uses 5 bits */
+#define NESTED_BITS 5
/**
* ring_buffer_nest_start - Allow to trace while nested
@@ -2838,21 +3196,19 @@ void ring_buffer_nest_end(struct trace_buffer *buffer)
/**
* ring_buffer_unlock_commit - commit a reserved
* @buffer: The buffer to commit to
- * @event: The event pointer to commit.
*
* This commits the data to the ring buffer, and releases any locks held.
*
* Must be paired with ring_buffer_lock_reserve.
*/
-int ring_buffer_unlock_commit(struct trace_buffer *buffer,
- struct ring_buffer_event *event)
+int ring_buffer_unlock_commit(struct trace_buffer *buffer)
{
struct ring_buffer_per_cpu *cpu_buffer;
int cpu = raw_smp_processor_id();
cpu_buffer = buffer->buffers[cpu];
- rb_commit(cpu_buffer, event);
+ rb_commit(cpu_buffer);
rb_wakeups(buffer, cpu_buffer);
@@ -2864,58 +3220,366 @@ int ring_buffer_unlock_commit(struct trace_buffer *buffer,
}
EXPORT_SYMBOL_GPL(ring_buffer_unlock_commit);
-static noinline void
-rb_handle_timestamp(struct ring_buffer_per_cpu *cpu_buffer,
- struct rb_event_info *info)
+/* Special value to validate all deltas on a page. */
+#define CHECK_FULL_PAGE 1L
+
+#ifdef CONFIG_RING_BUFFER_VALIDATE_TIME_DELTAS
+
+static const char *show_irq_str(int bits)
{
- WARN_ONCE(info->delta > (1ULL << 59),
- KERN_WARNING "Delta way too big! %llu ts=%llu write stamp = %llu\n%s",
- (unsigned long long)info->delta,
- (unsigned long long)info->ts,
- (unsigned long long)cpu_buffer->write_stamp,
- sched_clock_stable() ? "" :
- "If you just came from a suspend/resume,\n"
- "please switch to the trace global clock:\n"
- " echo global > /sys/kernel/debug/tracing/trace_clock\n"
- "or add trace_clock=global to the kernel command line\n");
- info->add_timestamp = 1;
+ const char *type[] = {
+ ".", // 0
+ "s", // 1
+ "h", // 2
+ "Hs", // 3
+ "n", // 4
+ "Ns", // 5
+ "Nh", // 6
+ "NHs", // 7
+ };
+
+ return type[bits];
+}
+
+/* Assume this is an trace event */
+static const char *show_flags(struct ring_buffer_event *event)
+{
+ struct trace_entry *entry;
+ int bits = 0;
+
+ if (rb_event_data_length(event) - RB_EVNT_HDR_SIZE < sizeof(*entry))
+ return "X";
+
+ entry = ring_buffer_event_data(event);
+
+ if (entry->flags & TRACE_FLAG_SOFTIRQ)
+ bits |= 1;
+
+ if (entry->flags & TRACE_FLAG_HARDIRQ)
+ bits |= 2;
+
+ if (entry->flags & TRACE_FLAG_NMI)
+ bits |= 4;
+
+ return show_irq_str(bits);
+}
+
+static const char *show_irq(struct ring_buffer_event *event)
+{
+ struct trace_entry *entry;
+
+ if (rb_event_data_length(event) - RB_EVNT_HDR_SIZE < sizeof(*entry))
+ return "";
+
+ entry = ring_buffer_event_data(event);
+ if (entry->flags & TRACE_FLAG_IRQS_OFF)
+ return "d";
+ return "";
+}
+
+static const char *show_interrupt_level(void)
+{
+ unsigned long pc = preempt_count();
+ unsigned char level = 0;
+
+ if (pc & SOFTIRQ_OFFSET)
+ level |= 1;
+
+ if (pc & HARDIRQ_MASK)
+ level |= 2;
+
+ if (pc & NMI_MASK)
+ level |= 4;
+
+ return show_irq_str(level);
}
+static void dump_buffer_page(struct buffer_data_page *bpage,
+ struct rb_event_info *info,
+ unsigned long tail)
+{
+ struct ring_buffer_event *event;
+ u64 ts, delta;
+ int e;
+
+ ts = bpage->time_stamp;
+ pr_warn(" [%lld] PAGE TIME STAMP\n", ts);
+
+ for (e = 0; e < tail; e += rb_event_length(event)) {
+
+ event = (struct ring_buffer_event *)(bpage->data + e);
+
+ switch (event->type_len) {
+
+ case RINGBUF_TYPE_TIME_EXTEND:
+ delta = rb_event_time_stamp(event);
+ ts += delta;
+ pr_warn(" 0x%x: [%lld] delta:%lld TIME EXTEND\n",
+ e, ts, delta);
+ break;
+
+ case RINGBUF_TYPE_TIME_STAMP:
+ delta = rb_event_time_stamp(event);
+ ts = rb_fix_abs_ts(delta, ts);
+ pr_warn(" 0x%x: [%lld] absolute:%lld TIME STAMP\n",
+ e, ts, delta);
+ break;
+
+ case RINGBUF_TYPE_PADDING:
+ ts += event->time_delta;
+ pr_warn(" 0x%x: [%lld] delta:%d PADDING\n",
+ e, ts, event->time_delta);
+ break;
+
+ case RINGBUF_TYPE_DATA:
+ ts += event->time_delta;
+ pr_warn(" 0x%x: [%lld] delta:%d %s%s\n",
+ e, ts, event->time_delta,
+ show_flags(event), show_irq(event));
+ break;
+
+ default:
+ break;
+ }
+ }
+ pr_warn("expected end:0x%lx last event actually ended at:0x%x\n", tail, e);
+}
+
+static DEFINE_PER_CPU(atomic_t, checking);
+static atomic_t ts_dump;
+
+#define buffer_warn_return(fmt, ...) \
+ do { \
+ /* If another report is happening, ignore this one */ \
+ if (atomic_inc_return(&ts_dump) != 1) { \
+ atomic_dec(&ts_dump); \
+ goto out; \
+ } \
+ atomic_inc(&cpu_buffer->record_disabled); \
+ pr_warn(fmt, ##__VA_ARGS__); \
+ dump_buffer_page(bpage, info, tail); \
+ atomic_dec(&ts_dump); \
+ /* There's some cases in boot up that this can happen */ \
+ if (WARN_ON_ONCE(system_state != SYSTEM_BOOTING)) \
+ /* Do not re-enable checking */ \
+ return; \
+ } while (0)
+
+/*
+ * Check if the current event time stamp matches the deltas on
+ * the buffer page.
+ */
+static void check_buffer(struct ring_buffer_per_cpu *cpu_buffer,
+ struct rb_event_info *info,
+ unsigned long tail)
+{
+ struct ring_buffer_event *event;
+ struct buffer_data_page *bpage;
+ u64 ts, delta;
+ bool full = false;
+ int e;
+
+ bpage = info->tail_page->page;
+
+ if (tail == CHECK_FULL_PAGE) {
+ full = true;
+ tail = local_read(&bpage->commit);
+ } else if (info->add_timestamp &
+ (RB_ADD_STAMP_FORCE | RB_ADD_STAMP_ABSOLUTE)) {
+ /* Ignore events with absolute time stamps */
+ return;
+ }
+
+ /*
+ * Do not check the first event (skip possible extends too).
+ * Also do not check if previous events have not been committed.
+ */
+ if (tail <= 8 || tail > local_read(&bpage->commit))
+ return;
+
+ /*
+ * If this interrupted another event,
+ */
+ if (atomic_inc_return(this_cpu_ptr(&checking)) != 1)
+ goto out;
+
+ ts = bpage->time_stamp;
+
+ for (e = 0; e < tail; e += rb_event_length(event)) {
+
+ event = (struct ring_buffer_event *)(bpage->data + e);
+
+ switch (event->type_len) {
+
+ case RINGBUF_TYPE_TIME_EXTEND:
+ delta = rb_event_time_stamp(event);
+ ts += delta;
+ break;
+
+ case RINGBUF_TYPE_TIME_STAMP:
+ delta = rb_event_time_stamp(event);
+ delta = rb_fix_abs_ts(delta, ts);
+ if (delta < ts) {
+ buffer_warn_return("[CPU: %d]ABSOLUTE TIME WENT BACKWARDS: last ts: %lld absolute ts: %lld\n",
+ cpu_buffer->cpu, ts, delta);
+ }
+ ts = delta;
+ break;
+
+ case RINGBUF_TYPE_PADDING:
+ if (event->time_delta == 1)
+ break;
+ fallthrough;
+ case RINGBUF_TYPE_DATA:
+ ts += event->time_delta;
+ break;
+
+ default:
+ RB_WARN_ON(cpu_buffer, 1);
+ }
+ }
+ if ((full && ts > info->ts) ||
+ (!full && ts + info->delta != info->ts)) {
+ buffer_warn_return("[CPU: %d]TIME DOES NOT MATCH expected:%lld actual:%lld delta:%lld before:%lld after:%lld%s context:%s\n",
+ cpu_buffer->cpu,
+ ts + info->delta, info->ts, info->delta,
+ info->before, info->after,
+ full ? " (full)" : "", show_interrupt_level());
+ }
+out:
+ atomic_dec(this_cpu_ptr(&checking));
+}
+#else
+static inline void check_buffer(struct ring_buffer_per_cpu *cpu_buffer,
+ struct rb_event_info *info,
+ unsigned long tail)
+{
+}
+#endif /* CONFIG_RING_BUFFER_VALIDATE_TIME_DELTAS */
+
static struct ring_buffer_event *
__rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
struct rb_event_info *info)
{
struct ring_buffer_event *event;
struct buffer_page *tail_page;
- unsigned long tail, write;
-
- /*
- * If the time delta since the last event is too big to
- * hold in the time field of the event, then we append a
- * TIME EXTEND event ahead of the data event.
- */
- if (unlikely(info->add_timestamp))
- info->length += RB_LEN_TIME_EXTEND;
+ unsigned long tail, write, w;
/* Don't let the compiler play games with cpu_buffer->tail_page */
tail_page = info->tail_page = READ_ONCE(cpu_buffer->tail_page);
- write = local_add_return(info->length, &tail_page->write);
+
+ /*A*/ w = local_read(&tail_page->write) & RB_WRITE_MASK;
+ barrier();
+ rb_time_read(&cpu_buffer->before_stamp, &info->before);
+ rb_time_read(&cpu_buffer->write_stamp, &info->after);
+ barrier();
+ info->ts = rb_time_stamp(cpu_buffer->buffer);
+
+ if ((info->add_timestamp & RB_ADD_STAMP_ABSOLUTE)) {
+ info->delta = info->ts;
+ } else {
+ /*
+ * If interrupting an event time update, we may need an
+ * absolute timestamp.
+ * Don't bother if this is the start of a new page (w == 0).
+ */
+ if (!w) {
+ /* Use the sub-buffer timestamp */
+ info->delta = 0;
+ } else if (unlikely(info->before != info->after)) {
+ info->add_timestamp |= RB_ADD_STAMP_FORCE | RB_ADD_STAMP_EXTEND;
+ info->length += RB_LEN_TIME_EXTEND;
+ } else {
+ info->delta = info->ts - info->after;
+ if (unlikely(test_time_stamp(info->delta))) {
+ info->add_timestamp |= RB_ADD_STAMP_EXTEND;
+ info->length += RB_LEN_TIME_EXTEND;
+ }
+ }
+ }
+
+ /*B*/ rb_time_set(&cpu_buffer->before_stamp, info->ts);
+
+ /*C*/ write = local_add_return(info->length, &tail_page->write);
/* set write to only the index of the write */
write &= RB_WRITE_MASK;
+
tail = write - info->length;
+ /* See if we shot pass the end of this buffer page */
+ if (unlikely(write > cpu_buffer->buffer->subbuf_size)) {
+ check_buffer(cpu_buffer, info, CHECK_FULL_PAGE);
+ return rb_move_tail(cpu_buffer, tail, info);
+ }
+
+ if (likely(tail == w)) {
+ /* Nothing interrupted us between A and C */
+ /*D*/ rb_time_set(&cpu_buffer->write_stamp, info->ts);
+ /*
+ * If something came in between C and D, the write stamp
+ * may now not be in sync. But that's fine as the before_stamp
+ * will be different and then next event will just be forced
+ * to use an absolute timestamp.
+ */
+ if (likely(!(info->add_timestamp &
+ (RB_ADD_STAMP_FORCE | RB_ADD_STAMP_ABSOLUTE))))
+ /* This did not interrupt any time update */
+ info->delta = info->ts - info->after;
+ else
+ /* Just use full timestamp for interrupting event */
+ info->delta = info->ts;
+ check_buffer(cpu_buffer, info, tail);
+ } else {
+ u64 ts;
+ /* SLOW PATH - Interrupted between A and C */
+
+ /* Save the old before_stamp */
+ rb_time_read(&cpu_buffer->before_stamp, &info->before);
+
+ /*
+ * Read a new timestamp and update the before_stamp to make
+ * the next event after this one force using an absolute
+ * timestamp. This is in case an interrupt were to come in
+ * between E and F.
+ */
+ ts = rb_time_stamp(cpu_buffer->buffer);
+ rb_time_set(&cpu_buffer->before_stamp, ts);
+
+ barrier();
+ /*E*/ rb_time_read(&cpu_buffer->write_stamp, &info->after);
+ barrier();
+ /*F*/ if (write == (local_read(&tail_page->write) & RB_WRITE_MASK) &&
+ info->after == info->before && info->after < ts) {
+ /*
+ * Nothing came after this event between C and F, it is
+ * safe to use info->after for the delta as it
+ * matched info->before and is still valid.
+ */
+ info->delta = ts - info->after;
+ } else {
+ /*
+ * Interrupted between C and F:
+ * Lost the previous events time stamp. Just set the
+ * delta to zero, and this will be the same time as
+ * the event this event interrupted. And the events that
+ * came after this will still be correct (as they would
+ * have built their delta on the previous event.
+ */
+ info->delta = 0;
+ }
+ info->ts = ts;
+ info->add_timestamp &= ~RB_ADD_STAMP_FORCE;
+ }
+
/*
* If this is the first commit on the page, then it has the same
* timestamp as the page itself.
*/
- if (!tail && !ring_buffer_time_stamp_abs(cpu_buffer->buffer))
+ if (unlikely(!tail && !(info->add_timestamp &
+ (RB_ADD_STAMP_FORCE | RB_ADD_STAMP_ABSOLUTE))))
info->delta = 0;
- /* See if we shot pass the end of this buffer page */
- if (unlikely(write > BUF_PAGE_SIZE))
- return rb_move_tail(cpu_buffer, tail, info);
-
/* We reserved something on the buffer */
event = __rb_page_index(tail_page, tail);
@@ -2927,7 +3591,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
* If this is the first commit on the page, then update
* its timestamp.
*/
- if (!tail)
+ if (unlikely(!tail))
tail_page->page->time_stamp = info->ts;
/* account for these added bytes */
@@ -2944,9 +3608,16 @@ rb_reserve_next_event(struct trace_buffer *buffer,
struct ring_buffer_event *event;
struct rb_event_info info;
int nr_loops = 0;
- u64 diff;
+ int add_ts_default;
+
+ /* ring buffer does cmpxchg, make sure it is safe in NMI context */
+ if (!IS_ENABLED(CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG) &&
+ (unlikely(in_nmi()))) {
+ return NULL;
+ }
rb_start_commit(cpu_buffer);
+ /* The commit page can not change after this */
#ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
/*
@@ -2964,8 +3635,18 @@ rb_reserve_next_event(struct trace_buffer *buffer,
#endif
info.length = rb_calculate_event_length(length);
+
+ if (ring_buffer_time_stamp_abs(cpu_buffer->buffer)) {
+ add_ts_default = RB_ADD_STAMP_ABSOLUTE;
+ info.length += RB_LEN_TIME_EXTEND;
+ if (info.length > cpu_buffer->buffer->max_data_size)
+ goto out_fail;
+ } else {
+ add_ts_default = RB_ADD_STAMP_NONE;
+ }
+
again:
- info.add_timestamp = 0;
+ info.add_timestamp = add_ts_default;
info.delta = 0;
/*
@@ -2980,35 +3661,16 @@ rb_reserve_next_event(struct trace_buffer *buffer,
if (RB_WARN_ON(cpu_buffer, ++nr_loops > 1000))
goto out_fail;
- info.ts = rb_time_stamp(cpu_buffer->buffer);
- diff = info.ts - cpu_buffer->write_stamp;
-
- /* make sure this diff is calculated here */
- barrier();
-
- if (ring_buffer_time_stamp_abs(buffer)) {
- info.delta = info.ts;
- rb_handle_timestamp(cpu_buffer, &info);
- } else /* Did the write stamp get updated already? */
- if (likely(info.ts >= cpu_buffer->write_stamp)) {
- info.delta = diff;
- if (unlikely(test_time_stamp(info.delta)))
- rb_handle_timestamp(cpu_buffer, &info);
- }
-
event = __rb_reserve_next(cpu_buffer, &info);
if (unlikely(PTR_ERR(event) == -EAGAIN)) {
- if (info.add_timestamp)
+ if (info.add_timestamp & (RB_ADD_STAMP_FORCE | RB_ADD_STAMP_EXTEND))
info.length -= RB_LEN_TIME_EXTEND;
goto again;
}
- if (!event)
- goto out_fail;
-
- return event;
-
+ if (likely(event))
+ return event;
out_fail:
rb_end_commit(cpu_buffer);
return NULL;
@@ -3052,7 +3714,7 @@ ring_buffer_lock_reserve(struct trace_buffer *buffer, unsigned long length)
if (unlikely(atomic_read(&cpu_buffer->record_disabled)))
goto out;
- if (unlikely(length > BUF_MAX_DATA_SIZE))
+ if (unlikely(length > buffer->max_data_size))
goto out;
if (unlikely(trace_recursive_lock(cpu_buffer)))
@@ -3086,7 +3748,7 @@ rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer,
struct buffer_page *bpage = cpu_buffer->commit_page;
struct buffer_page *start;
- addr &= PAGE_MASK;
+ addr &= ~((PAGE_SIZE << cpu_buffer->buffer->subbuf_order) - 1);
/* Do the likely case first */
if (likely(bpage->page == (void *)addr)) {
@@ -3098,14 +3760,14 @@ rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer,
* Because the commit page may be on the reader page we
* start with the next page and check the end loop there.
*/
- rb_inc_page(cpu_buffer, &bpage);
+ rb_inc_page(&bpage);
start = bpage;
do {
if (bpage->page == (void *)addr) {
local_dec(&bpage->entries);
return;
}
- rb_inc_page(cpu_buffer, &bpage);
+ rb_inc_page(&bpage);
} while (bpage != start);
/* commit not part of this buffer?? */
@@ -3113,7 +3775,7 @@ rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer,
}
/**
- * ring_buffer_commit_discard - discard an event that has not been committed
+ * ring_buffer_discard_commit - discard an event that has not been committed
* @buffer: the ring buffer
* @event: non committed event to discard
*
@@ -3154,11 +3816,6 @@ void ring_buffer_discard_commit(struct trace_buffer *buffer,
if (rb_try_to_discard(cpu_buffer, event))
goto out;
- /*
- * The commit is still visible by the reader, so we
- * must still update the timestamp.
- */
- rb_update_write_stamp(cpu_buffer, event);
out:
rb_end_commit(cpu_buffer);
@@ -3207,7 +3864,7 @@ int ring_buffer_write(struct trace_buffer *buffer,
if (atomic_read(&cpu_buffer->record_disabled))
goto out;
- if (length > BUF_MAX_DATA_SIZE)
+ if (length > buffer->max_data_size)
goto out;
if (unlikely(trace_recursive_lock(cpu_buffer)))
@@ -3221,7 +3878,7 @@ int ring_buffer_write(struct trace_buffer *buffer,
memcpy(body, data, length);
- rb_commit(cpu_buffer, event);
+ rb_commit(cpu_buffer);
rb_wakeups(buffer, cpu_buffer);
@@ -3247,10 +3904,30 @@ static bool rb_per_cpu_empty(struct ring_buffer_per_cpu *cpu_buffer)
if (unlikely(!head))
return true;
- return reader->read == rb_page_commit(reader) &&
- (commit == reader ||
- (commit == head &&
- head->read == rb_page_commit(commit)));
+ /* Reader should exhaust content in reader page */
+ if (reader->read != rb_page_commit(reader))
+ return false;
+
+ /*
+ * If writers are committing on the reader page, knowing all
+ * committed content has been read, the ring buffer is empty.
+ */
+ if (commit == reader)
+ return true;
+
+ /*
+ * If writers are committing on a page other than reader page
+ * and head page, there should always be content to read.
+ */
+ if (commit != head)
+ return false;
+
+ /*
+ * Writers are committing on the head page, we just need
+ * to care about there're committed data, and the reader will
+ * swap reader page with head page when it is to read data.
+ */
+ return rb_page_commit(commit) == 0;
}
/**
@@ -3297,10 +3974,10 @@ void ring_buffer_record_off(struct trace_buffer *buffer)
unsigned int rd;
unsigned int new_rd;
+ rd = atomic_read(&buffer->record_disabled);
do {
- rd = atomic_read(&buffer->record_disabled);
new_rd = rd | RB_BUFFER_OFF;
- } while (atomic_cmpxchg(&buffer->record_disabled, rd, new_rd) != rd);
+ } while (!atomic_try_cmpxchg(&buffer->record_disabled, &rd, new_rd));
}
EXPORT_SYMBOL_GPL(ring_buffer_record_off);
@@ -3320,10 +3997,10 @@ void ring_buffer_record_on(struct trace_buffer *buffer)
unsigned int rd;
unsigned int new_rd;
+ rd = atomic_read(&buffer->record_disabled);
do {
- rd = atomic_read(&buffer->record_disabled);
new_rd = rd & ~RB_BUFFER_OFF;
- } while (atomic_cmpxchg(&buffer->record_disabled, rd, new_rd) != rd);
+ } while (!atomic_try_cmpxchg(&buffer->record_disabled, &rd, new_rd));
}
EXPORT_SYMBOL_GPL(ring_buffer_record_on);
@@ -3443,7 +4120,7 @@ u64 ring_buffer_oldest_event_ts(struct trace_buffer *buffer, int cpu)
EXPORT_SYMBOL_GPL(ring_buffer_oldest_event_ts);
/**
- * ring_buffer_bytes_cpu - get the number of bytes consumed in a cpu buffer
+ * ring_buffer_bytes_cpu - get the number of bytes unconsumed in a cpu buffer
* @buffer: The ring buffer
* @cpu: The per CPU buffer to read from.
*/
@@ -3621,6 +4298,7 @@ static void rb_iter_reset(struct ring_buffer_iter *iter)
iter->cache_reader_page = iter->head_page;
iter->cache_read = cpu_buffer->read;
+ iter->cache_pages_removed = cpu_buffer->pages_removed;
if (iter->head) {
iter->read_stamp = cpu_buffer->read_stamp;
@@ -3713,12 +4391,13 @@ rb_update_read_stamp(struct ring_buffer_per_cpu *cpu_buffer,
return;
case RINGBUF_TYPE_TIME_EXTEND:
- delta = ring_buffer_event_time_stamp(event);
+ delta = rb_event_time_stamp(event);
cpu_buffer->read_stamp += delta;
return;
case RINGBUF_TYPE_TIME_STAMP:
- delta = ring_buffer_event_time_stamp(event);
+ delta = rb_event_time_stamp(event);
+ delta = rb_fix_abs_ts(delta, cpu_buffer->read_stamp);
cpu_buffer->read_stamp = delta;
return;
@@ -3729,7 +4408,6 @@ rb_update_read_stamp(struct ring_buffer_per_cpu *cpu_buffer,
default:
RB_WARN_ON(cpu_buffer, 1);
}
- return;
}
static void
@@ -3743,12 +4421,13 @@ rb_update_iter_read_stamp(struct ring_buffer_iter *iter,
return;
case RINGBUF_TYPE_TIME_EXTEND:
- delta = ring_buffer_event_time_stamp(event);
+ delta = rb_event_time_stamp(event);
iter->read_stamp += delta;
return;
case RINGBUF_TYPE_TIME_STAMP:
- delta = ring_buffer_event_time_stamp(event);
+ delta = rb_event_time_stamp(event);
+ delta = rb_fix_abs_ts(delta, iter->read_stamp);
iter->read_stamp = delta;
return;
@@ -3759,17 +4438,17 @@ rb_update_iter_read_stamp(struct ring_buffer_iter *iter,
default:
RB_WARN_ON(iter->cpu_buffer, 1);
}
- return;
}
static struct buffer_page *
rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
{
struct buffer_page *reader = NULL;
+ unsigned long bsize = READ_ONCE(cpu_buffer->buffer->subbuf_size);
unsigned long overwrite;
unsigned long flags;
int nr_loops = 0;
- int ret;
+ bool ret;
local_irq_save(flags);
arch_spin_lock(&cpu_buffer->lock);
@@ -3832,7 +4511,7 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
cpu_buffer->pages = reader->list.prev;
/* The reader page will be pointing to the new head */
- rb_set_list_to_head(cpu_buffer, &cpu_buffer->reader_page->list);
+ rb_set_list_to_head(&cpu_buffer->reader_page->list);
/*
* We want to make sure we read the overruns after we set up our
@@ -3871,7 +4550,7 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
* Now make the new head point back to the reader page.
*/
rb_list_head(reader->list.next)->prev = &cpu_buffer->reader_page->list;
- rb_inc_page(cpu_buffer, &cpu_buffer->head_page);
+ rb_inc_page(&cpu_buffer->head_page);
local_inc(&cpu_buffer->pages_read);
@@ -3894,6 +4573,38 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
arch_spin_unlock(&cpu_buffer->lock);
local_irq_restore(flags);
+ /*
+ * The writer has preempt disable, wait for it. But not forever
+ * Although, 1 second is pretty much "forever"
+ */
+#define USECS_WAIT 1000000
+ for (nr_loops = 0; nr_loops < USECS_WAIT; nr_loops++) {
+ /* If the write is past the end of page, a writer is still updating it */
+ if (likely(!reader || rb_page_write(reader) <= bsize))
+ break;
+
+ udelay(1);
+
+ /* Get the latest version of the reader write value */
+ smp_rmb();
+ }
+
+ /* The writer is not moving forward? Something is wrong */
+ if (RB_WARN_ON(cpu_buffer, nr_loops == USECS_WAIT))
+ reader = NULL;
+
+ /*
+ * Make sure we see any padding after the write update
+ * (see rb_reset_tail()).
+ *
+ * In addition, a writer may be writing on the reader page
+ * if the page has not been fully filled, so the read barrier
+ * is also needed to make sure we see the content of what is
+ * committed by the writer (see rb_set_commit_to_write()).
+ */
+ smp_rmb();
+
+
return reader;
}
@@ -3918,6 +4629,7 @@ static void rb_advance_reader(struct ring_buffer_per_cpu *cpu_buffer)
length = rb_event_length(event);
cpu_buffer->reader_page->read += length;
+ cpu_buffer->read_bytes += length;
}
static void rb_advance_iter(struct ring_buffer_iter *iter)
@@ -4001,7 +4713,8 @@ rb_buffer_peek(struct ring_buffer_per_cpu *cpu_buffer, u64 *ts,
case RINGBUF_TYPE_TIME_STAMP:
if (ts) {
- *ts = ring_buffer_event_time_stamp(event);
+ *ts = rb_event_time_stamp(event);
+ *ts = rb_fix_abs_ts(*ts, reader->page->time_stamp);
ring_buffer_normalize_time_stamp(cpu_buffer->buffer,
cpu_buffer->cpu, ts);
}
@@ -4042,12 +4755,13 @@ rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
buffer = cpu_buffer->buffer;
/*
- * Check if someone performed a consuming read to
- * the buffer. A consuming read invalidates the iterator
- * and we need to reset the iterator in this case.
+ * Check if someone performed a consuming read to the buffer
+ * or removed some pages from the buffer. In these cases,
+ * iterator was invalidated and we need to reset it.
*/
if (unlikely(iter->cache_read != cpu_buffer->read ||
- iter->cache_reader_page != cpu_buffer->reader_page))
+ iter->cache_reader_page != cpu_buffer->reader_page ||
+ iter->cache_pages_removed != cpu_buffer->pages_removed))
rb_iter_reset(iter);
again:
@@ -4092,7 +4806,8 @@ rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
case RINGBUF_TYPE_TIME_STAMP:
if (ts) {
- *ts = ring_buffer_event_time_stamp(event);
+ *ts = rb_event_time_stamp(event);
+ *ts = rb_fix_abs_ts(*ts, iter->head_page->page->time_stamp);
ring_buffer_normalize_time_stamp(cpu_buffer->buffer,
cpu_buffer->cpu, ts);
}
@@ -4145,7 +4860,6 @@ rb_reader_unlock(struct ring_buffer_per_cpu *cpu_buffer, bool locked)
{
if (likely(locked))
raw_spin_unlock(&cpu_buffer->reader_lock);
- return;
}
/**
@@ -4309,7 +5023,9 @@ ring_buffer_read_prepare(struct trace_buffer *buffer, int cpu, gfp_t flags)
if (!iter)
return NULL;
- iter->event = kmalloc(BUF_MAX_DATA_SIZE, flags);
+ /* Holds the entire event: data and meta data */
+ iter->event_size = buffer->subbuf_size;
+ iter->event = kmalloc(iter->event_size, flags);
if (!iter->event) {
kfree(iter);
return NULL;
@@ -4425,41 +5141,56 @@ EXPORT_SYMBOL_GPL(ring_buffer_iter_advance);
*/
unsigned long ring_buffer_size(struct trace_buffer *buffer, int cpu)
{
- /*
- * Earlier, this method returned
- * BUF_PAGE_SIZE * buffer->nr_pages
- * Since the nr_pages field is now removed, we have converted this to
- * return the per cpu buffer value.
- */
if (!cpumask_test_cpu(cpu, buffer->cpumask))
return 0;
- return BUF_PAGE_SIZE * buffer->buffers[cpu]->nr_pages;
+ return buffer->subbuf_size * buffer->buffers[cpu]->nr_pages;
}
EXPORT_SYMBOL_GPL(ring_buffer_size);
+/**
+ * ring_buffer_max_event_size - return the max data size of an event
+ * @buffer: The ring buffer.
+ *
+ * Returns the maximum size an event can be.
+ */
+unsigned long ring_buffer_max_event_size(struct trace_buffer *buffer)
+{
+ /* If abs timestamp is requested, events have a timestamp too */
+ if (ring_buffer_time_stamp_abs(buffer))
+ return buffer->max_data_size - RB_LEN_TIME_EXTEND;
+ return buffer->max_data_size;
+}
+EXPORT_SYMBOL_GPL(ring_buffer_max_event_size);
+
+static void rb_clear_buffer_page(struct buffer_page *page)
+{
+ local_set(&page->write, 0);
+ local_set(&page->entries, 0);
+ rb_init_page(page->page);
+ page->read = 0;
+}
+
static void
rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
{
+ struct buffer_page *page;
+
rb_head_page_deactivate(cpu_buffer);
cpu_buffer->head_page
= list_entry(cpu_buffer->pages, struct buffer_page, list);
- local_set(&cpu_buffer->head_page->write, 0);
- local_set(&cpu_buffer->head_page->entries, 0);
- local_set(&cpu_buffer->head_page->page->commit, 0);
-
- cpu_buffer->head_page->read = 0;
+ rb_clear_buffer_page(cpu_buffer->head_page);
+ list_for_each_entry(page, cpu_buffer->pages, list) {
+ rb_clear_buffer_page(page);
+ }
cpu_buffer->tail_page = cpu_buffer->head_page;
cpu_buffer->commit_page = cpu_buffer->head_page;
INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
INIT_LIST_HEAD(&cpu_buffer->new_pages);
- local_set(&cpu_buffer->reader_page->write, 0);
- local_set(&cpu_buffer->reader_page->entries, 0);
- local_set(&cpu_buffer->reader_page->page->commit, 0);
- cpu_buffer->reader_page->read = 0;
+ rb_clear_buffer_page(cpu_buffer->reader_page);
local_set(&cpu_buffer->entries_bytes, 0);
local_set(&cpu_buffer->overrun, 0);
@@ -4469,19 +5200,43 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
local_set(&cpu_buffer->committing, 0);
local_set(&cpu_buffer->commits, 0);
local_set(&cpu_buffer->pages_touched, 0);
+ local_set(&cpu_buffer->pages_lost, 0);
local_set(&cpu_buffer->pages_read, 0);
cpu_buffer->last_pages_touch = 0;
cpu_buffer->shortest_full = 0;
cpu_buffer->read = 0;
cpu_buffer->read_bytes = 0;
- cpu_buffer->write_stamp = 0;
- cpu_buffer->read_stamp = 0;
+ rb_time_set(&cpu_buffer->write_stamp, 0);
+ rb_time_set(&cpu_buffer->before_stamp, 0);
+
+ memset(cpu_buffer->event_stamp, 0, sizeof(cpu_buffer->event_stamp));
cpu_buffer->lost_events = 0;
cpu_buffer->last_overrun = 0;
rb_head_page_activate(cpu_buffer);
+ cpu_buffer->pages_removed = 0;
+}
+
+/* Must have disabled the cpu buffer then done a synchronize_rcu */
+static void reset_disabled_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer)
+{
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
+
+ if (RB_WARN_ON(cpu_buffer, local_read(&cpu_buffer->committing)))
+ goto out;
+
+ arch_spin_lock(&cpu_buffer->lock);
+
+ rb_reset_cpu(cpu_buffer);
+
+ arch_spin_unlock(&cpu_buffer->lock);
+
+ out:
+ raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
}
/**
@@ -4492,35 +5247,71 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
void ring_buffer_reset_cpu(struct trace_buffer *buffer, int cpu)
{
struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
- unsigned long flags;
if (!cpumask_test_cpu(cpu, buffer->cpumask))
return;
+ /* prevent another thread from changing buffer sizes */
+ mutex_lock(&buffer->mutex);
+
atomic_inc(&cpu_buffer->resize_disabled);
atomic_inc(&cpu_buffer->record_disabled);
/* Make sure all commits have finished */
synchronize_rcu();
- raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
+ reset_disabled_cpu_buffer(cpu_buffer);
- if (RB_WARN_ON(cpu_buffer, local_read(&cpu_buffer->committing)))
- goto out;
+ atomic_dec(&cpu_buffer->record_disabled);
+ atomic_dec(&cpu_buffer->resize_disabled);
- arch_spin_lock(&cpu_buffer->lock);
+ mutex_unlock(&buffer->mutex);
+}
+EXPORT_SYMBOL_GPL(ring_buffer_reset_cpu);
- rb_reset_cpu(cpu_buffer);
+/* Flag to ensure proper resetting of atomic variables */
+#define RESET_BIT (1 << 30)
- arch_spin_unlock(&cpu_buffer->lock);
+/**
+ * ring_buffer_reset_online_cpus - reset a ring buffer per CPU buffer
+ * @buffer: The ring buffer to reset a per cpu buffer of
+ */
+void ring_buffer_reset_online_cpus(struct trace_buffer *buffer)
+{
+ struct ring_buffer_per_cpu *cpu_buffer;
+ int cpu;
- out:
- raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
+ /* prevent another thread from changing buffer sizes */
+ mutex_lock(&buffer->mutex);
- atomic_dec(&cpu_buffer->record_disabled);
- atomic_dec(&cpu_buffer->resize_disabled);
+ for_each_online_buffer_cpu(buffer, cpu) {
+ cpu_buffer = buffer->buffers[cpu];
+
+ atomic_add(RESET_BIT, &cpu_buffer->resize_disabled);
+ atomic_inc(&cpu_buffer->record_disabled);
+ }
+
+ /* Make sure all commits have finished */
+ synchronize_rcu();
+
+ for_each_buffer_cpu(buffer, cpu) {
+ cpu_buffer = buffer->buffers[cpu];
+
+ /*
+ * If a CPU came online during the synchronize_rcu(), then
+ * ignore it.
+ */
+ if (!(atomic_read(&cpu_buffer->resize_disabled) & RESET_BIT))
+ continue;
+
+ reset_disabled_cpu_buffer(cpu_buffer);
+
+ atomic_dec(&cpu_buffer->record_disabled);
+ atomic_sub(RESET_BIT, &cpu_buffer->resize_disabled);
+ }
+
+ mutex_unlock(&buffer->mutex);
}
-EXPORT_SYMBOL_GPL(ring_buffer_reset_cpu);
/**
* ring_buffer_reset - reset a ring buffer
@@ -4528,15 +5319,37 @@ EXPORT_SYMBOL_GPL(ring_buffer_reset_cpu);
*/
void ring_buffer_reset(struct trace_buffer *buffer)
{
+ struct ring_buffer_per_cpu *cpu_buffer;
int cpu;
- for_each_buffer_cpu(buffer, cpu)
- ring_buffer_reset_cpu(buffer, cpu);
+ /* prevent another thread from changing buffer sizes */
+ mutex_lock(&buffer->mutex);
+
+ for_each_buffer_cpu(buffer, cpu) {
+ cpu_buffer = buffer->buffers[cpu];
+
+ atomic_inc(&cpu_buffer->resize_disabled);
+ atomic_inc(&cpu_buffer->record_disabled);
+ }
+
+ /* Make sure all commits have finished */
+ synchronize_rcu();
+
+ for_each_buffer_cpu(buffer, cpu) {
+ cpu_buffer = buffer->buffers[cpu];
+
+ reset_disabled_cpu_buffer(cpu_buffer);
+
+ atomic_dec(&cpu_buffer->record_disabled);
+ atomic_dec(&cpu_buffer->resize_disabled);
+ }
+
+ mutex_unlock(&buffer->mutex);
}
EXPORT_SYMBOL_GPL(ring_buffer_reset);
/**
- * rind_buffer_empty - is the ring buffer empty?
+ * ring_buffer_empty - is the ring buffer empty?
* @buffer: The ring buffer to test
*/
bool ring_buffer_empty(struct trace_buffer *buffer)
@@ -4544,8 +5357,8 @@ bool ring_buffer_empty(struct trace_buffer *buffer)
struct ring_buffer_per_cpu *cpu_buffer;
unsigned long flags;
bool dolock;
+ bool ret;
int cpu;
- int ret;
/* yes this is racy, but if you don't like the race, lock the buffer */
for_each_buffer_cpu(buffer, cpu) {
@@ -4574,7 +5387,7 @@ bool ring_buffer_empty_cpu(struct trace_buffer *buffer, int cpu)
struct ring_buffer_per_cpu *cpu_buffer;
unsigned long flags;
bool dolock;
- int ret;
+ bool ret;
if (!cpumask_test_cpu(cpu, buffer->cpumask))
return true;
@@ -4620,6 +5433,9 @@ int ring_buffer_swap_cpu(struct trace_buffer *buffer_a,
if (cpu_buffer_a->nr_pages != cpu_buffer_b->nr_pages)
goto out;
+ if (buffer_a->subbuf_order != buffer_b->subbuf_order)
+ goto out;
+
ret = -EAGAIN;
if (atomic_read(&buffer_a->record_disabled))
@@ -4649,6 +5465,15 @@ int ring_buffer_swap_cpu(struct trace_buffer *buffer_a,
if (local_read(&cpu_buffer_b->committing))
goto out_dec;
+ /*
+ * When resize is in progress, we cannot swap it because
+ * it will mess the state of the cpu buffer.
+ */
+ if (atomic_read(&buffer_a->resizing))
+ goto out_dec;
+ if (atomic_read(&buffer_b->resizing))
+ goto out_dec;
+
buffer_a->buffers[cpu] = cpu_buffer_b;
buffer_b->buffers[cpu] = cpu_buffer_a;
@@ -4682,40 +5507,48 @@ EXPORT_SYMBOL_GPL(ring_buffer_swap_cpu);
* Returns:
* The page allocated, or ERR_PTR
*/
-void *ring_buffer_alloc_read_page(struct trace_buffer *buffer, int cpu)
+struct buffer_data_read_page *
+ring_buffer_alloc_read_page(struct trace_buffer *buffer, int cpu)
{
struct ring_buffer_per_cpu *cpu_buffer;
- struct buffer_data_page *bpage = NULL;
+ struct buffer_data_read_page *bpage = NULL;
unsigned long flags;
struct page *page;
if (!cpumask_test_cpu(cpu, buffer->cpumask))
return ERR_PTR(-ENODEV);
+ bpage = kzalloc(sizeof(*bpage), GFP_KERNEL);
+ if (!bpage)
+ return ERR_PTR(-ENOMEM);
+
+ bpage->order = buffer->subbuf_order;
cpu_buffer = buffer->buffers[cpu];
local_irq_save(flags);
arch_spin_lock(&cpu_buffer->lock);
if (cpu_buffer->free_page) {
- bpage = cpu_buffer->free_page;
+ bpage->data = cpu_buffer->free_page;
cpu_buffer->free_page = NULL;
}
arch_spin_unlock(&cpu_buffer->lock);
local_irq_restore(flags);
- if (bpage)
+ if (bpage->data)
goto out;
- page = alloc_pages_node(cpu_to_node(cpu),
- GFP_KERNEL | __GFP_NORETRY, 0);
- if (!page)
+ page = alloc_pages_node(cpu_to_node(cpu), GFP_KERNEL | __GFP_NORETRY,
+ cpu_buffer->buffer->subbuf_order);
+ if (!page) {
+ kfree(bpage);
return ERR_PTR(-ENOMEM);
+ }
- bpage = page_address(page);
+ bpage->data = page_address(page);
out:
- rb_init_page(bpage);
+ rb_init_page(bpage->data);
return bpage;
}
@@ -4725,19 +5558,29 @@ EXPORT_SYMBOL_GPL(ring_buffer_alloc_read_page);
* ring_buffer_free_read_page - free an allocated read page
* @buffer: the buffer the page was allocate for
* @cpu: the cpu buffer the page came from
- * @data: the page to free
+ * @data_page: the page to free
*
* Free a page allocated from ring_buffer_alloc_read_page.
*/
-void ring_buffer_free_read_page(struct trace_buffer *buffer, int cpu, void *data)
+void ring_buffer_free_read_page(struct trace_buffer *buffer, int cpu,
+ struct buffer_data_read_page *data_page)
{
- struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
- struct buffer_data_page *bpage = data;
+ struct ring_buffer_per_cpu *cpu_buffer;
+ struct buffer_data_page *bpage = data_page->data;
struct page *page = virt_to_page(bpage);
unsigned long flags;
- /* If the page is still in use someplace else, we can't reuse it */
- if (page_ref_count(page) > 1)
+ if (!buffer || !buffer->buffers || !buffer->buffers[cpu])
+ return;
+
+ cpu_buffer = buffer->buffers[cpu];
+
+ /*
+ * If the page is still in use someplace else, or order of the page
+ * is different from the subbuffer order of the buffer -
+ * we can't reuse it
+ */
+ if (page_ref_count(page) > 1 || data_page->order != buffer->subbuf_order)
goto out;
local_irq_save(flags);
@@ -4752,7 +5595,8 @@ void ring_buffer_free_read_page(struct trace_buffer *buffer, int cpu, void *data
local_irq_restore(flags);
out:
- free_page((unsigned long)bpage);
+ free_pages((unsigned long)bpage, data_page->order);
+ kfree(data_page);
}
EXPORT_SYMBOL_GPL(ring_buffer_free_read_page);
@@ -4773,9 +5617,10 @@ EXPORT_SYMBOL_GPL(ring_buffer_free_read_page);
* rpage = ring_buffer_alloc_read_page(buffer, cpu);
* if (IS_ERR(rpage))
* return PTR_ERR(rpage);
- * ret = ring_buffer_read_page(buffer, &rpage, len, cpu, 0);
+ * ret = ring_buffer_read_page(buffer, rpage, len, cpu, 0);
* if (ret >= 0)
- * process_page(rpage, ret);
+ * process_page(ring_buffer_read_page_data(rpage), ret);
+ * ring_buffer_free_read_page(buffer, cpu, rpage);
*
* When @full is set, the function will not return true unless
* the writer is off the reader page.
@@ -4790,7 +5635,8 @@ EXPORT_SYMBOL_GPL(ring_buffer_free_read_page);
* <0 if no data has been transferred.
*/
int ring_buffer_read_page(struct trace_buffer *buffer,
- void **data_page, size_t len, int cpu, int full)
+ struct buffer_data_read_page *data_page,
+ size_t len, int cpu, int full)
{
struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
struct ring_buffer_event *event;
@@ -4815,10 +5661,12 @@ int ring_buffer_read_page(struct trace_buffer *buffer,
len -= BUF_PAGE_HDR_SIZE;
- if (!data_page)
+ if (!data_page || !data_page->data)
+ goto out;
+ if (data_page->order != buffer->subbuf_order)
goto out;
- bpage = *data_page;
+ bpage = data_page->data;
if (!bpage)
goto out;
@@ -4850,7 +5698,15 @@ int ring_buffer_read_page(struct trace_buffer *buffer,
unsigned int pos = 0;
unsigned int size;
- if (full)
+ /*
+ * If a full page is expected, this can still be returned
+ * if there's been a previous partial read and the
+ * rest of the page can be read and the commit page is off
+ * the reader page.
+ */
+ if (full &&
+ (!read || (len < (commit - read)) ||
+ cpu_buffer->reader_page == cpu_buffer->commit_page))
goto out_unlock;
if (len > (commit - read))
@@ -4899,16 +5755,16 @@ int ring_buffer_read_page(struct trace_buffer *buffer,
} else {
/* update the entry counter */
cpu_buffer->read += rb_page_entries(reader);
- cpu_buffer->read_bytes += BUF_PAGE_SIZE;
+ cpu_buffer->read_bytes += rb_page_commit(reader);
/* swap the pages */
rb_init_page(bpage);
bpage = reader->page;
- reader->page = *data_page;
+ reader->page = data_page->data;
local_set(&reader->write, 0);
local_set(&reader->entries, 0);
reader->read = 0;
- *data_page = bpage;
+ data_page->data = bpage;
/*
* Use the real_end for the data size,
@@ -4930,7 +5786,7 @@ int ring_buffer_read_page(struct trace_buffer *buffer,
/* If there is room at the end of the page to save the
* missed events, then record it there.
*/
- if (BUF_PAGE_SIZE - commit >= sizeof(missed_events)) {
+ if (buffer->subbuf_size - commit >= sizeof(missed_events)) {
memcpy(&bpage->data[commit], &missed_events,
sizeof(missed_events));
local_add(RB_MISSED_STORED, &bpage->commit);
@@ -4942,8 +5798,8 @@ int ring_buffer_read_page(struct trace_buffer *buffer,
/*
* This page may be off to user land. Zero it out here.
*/
- if (commit < BUF_PAGE_SIZE)
- memset(&bpage->data[commit], 0, BUF_PAGE_SIZE - commit);
+ if (commit < buffer->subbuf_size)
+ memset(&bpage->data[commit], 0, buffer->subbuf_size - commit);
out_unlock:
raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
@@ -4953,6 +5809,213 @@ int ring_buffer_read_page(struct trace_buffer *buffer,
}
EXPORT_SYMBOL_GPL(ring_buffer_read_page);
+/**
+ * ring_buffer_read_page_data - get pointer to the data in the page.
+ * @page: the page to get the data from
+ *
+ * Returns pointer to the actual data in this page.
+ */
+void *ring_buffer_read_page_data(struct buffer_data_read_page *page)
+{
+ return page->data;
+}
+EXPORT_SYMBOL_GPL(ring_buffer_read_page_data);
+
+/**
+ * ring_buffer_subbuf_size_get - get size of the sub buffer.
+ * @buffer: the buffer to get the sub buffer size from
+ *
+ * Returns size of the sub buffer, in bytes.
+ */
+int ring_buffer_subbuf_size_get(struct trace_buffer *buffer)
+{
+ return buffer->subbuf_size + BUF_PAGE_HDR_SIZE;
+}
+EXPORT_SYMBOL_GPL(ring_buffer_subbuf_size_get);
+
+/**
+ * ring_buffer_subbuf_order_get - get order of system sub pages in one buffer page.
+ * @buffer: The ring_buffer to get the system sub page order from
+ *
+ * By default, one ring buffer sub page equals to one system page. This parameter
+ * is configurable, per ring buffer. The size of the ring buffer sub page can be
+ * extended, but must be an order of system page size.
+ *
+ * Returns the order of buffer sub page size, in system pages:
+ * 0 means the sub buffer size is 1 system page and so forth.
+ * In case of an error < 0 is returned.
+ */
+int ring_buffer_subbuf_order_get(struct trace_buffer *buffer)
+{
+ if (!buffer)
+ return -EINVAL;
+
+ return buffer->subbuf_order;
+}
+EXPORT_SYMBOL_GPL(ring_buffer_subbuf_order_get);
+
+/**
+ * ring_buffer_subbuf_order_set - set the size of ring buffer sub page.
+ * @buffer: The ring_buffer to set the new page size.
+ * @order: Order of the system pages in one sub buffer page
+ *
+ * By default, one ring buffer pages equals to one system page. This API can be
+ * used to set new size of the ring buffer page. The size must be order of
+ * system page size, that's why the input parameter @order is the order of
+ * system pages that are allocated for one ring buffer page:
+ * 0 - 1 system page
+ * 1 - 2 system pages
+ * 3 - 4 system pages
+ * ...
+ *
+ * Returns 0 on success or < 0 in case of an error.
+ */
+int ring_buffer_subbuf_order_set(struct trace_buffer *buffer, int order)
+{
+ struct ring_buffer_per_cpu *cpu_buffer;
+ struct buffer_page *bpage, *tmp;
+ int old_order, old_size;
+ int nr_pages;
+ int psize;
+ int err;
+ int cpu;
+
+ if (!buffer || order < 0)
+ return -EINVAL;
+
+ if (buffer->subbuf_order == order)
+ return 0;
+
+ psize = (1 << order) * PAGE_SIZE;
+ if (psize <= BUF_PAGE_HDR_SIZE)
+ return -EINVAL;
+
+ /* Size of a subbuf cannot be greater than the write counter */
+ if (psize > RB_WRITE_MASK + 1)
+ return -EINVAL;
+
+ old_order = buffer->subbuf_order;
+ old_size = buffer->subbuf_size;
+
+ /* prevent another thread from changing buffer sizes */
+ mutex_lock(&buffer->mutex);
+ atomic_inc(&buffer->record_disabled);
+
+ /* Make sure all commits have finished */
+ synchronize_rcu();
+
+ buffer->subbuf_order = order;
+ buffer->subbuf_size = psize - BUF_PAGE_HDR_SIZE;
+
+ /* Make sure all new buffers are allocated, before deleting the old ones */
+ for_each_buffer_cpu(buffer, cpu) {
+
+ if (!cpumask_test_cpu(cpu, buffer->cpumask))
+ continue;
+
+ cpu_buffer = buffer->buffers[cpu];
+
+ /* Update the number of pages to match the new size */
+ nr_pages = old_size * buffer->buffers[cpu]->nr_pages;
+ nr_pages = DIV_ROUND_UP(nr_pages, buffer->subbuf_size);
+
+ /* we need a minimum of two pages */
+ if (nr_pages < 2)
+ nr_pages = 2;
+
+ cpu_buffer->nr_pages_to_update = nr_pages;
+
+ /* Include the reader page */
+ nr_pages++;
+
+ /* Allocate the new size buffer */
+ INIT_LIST_HEAD(&cpu_buffer->new_pages);
+ if (__rb_allocate_pages(cpu_buffer, nr_pages,
+ &cpu_buffer->new_pages)) {
+ /* not enough memory for new pages */
+ err = -ENOMEM;
+ goto error;
+ }
+ }
+
+ for_each_buffer_cpu(buffer, cpu) {
+
+ if (!cpumask_test_cpu(cpu, buffer->cpumask))
+ continue;
+
+ cpu_buffer = buffer->buffers[cpu];
+
+ /* Clear the head bit to make the link list normal to read */
+ rb_head_page_deactivate(cpu_buffer);
+
+ /* Now walk the list and free all the old sub buffers */
+ list_for_each_entry_safe(bpage, tmp, cpu_buffer->pages, list) {
+ list_del_init(&bpage->list);
+ free_buffer_page(bpage);
+ }
+ /* The above loop stopped an the last page needing to be freed */
+ bpage = list_entry(cpu_buffer->pages, struct buffer_page, list);
+ free_buffer_page(bpage);
+
+ /* Free the current reader page */
+ free_buffer_page(cpu_buffer->reader_page);
+
+ /* One page was allocated for the reader page */
+ cpu_buffer->reader_page = list_entry(cpu_buffer->new_pages.next,
+ struct buffer_page, list);
+ list_del_init(&cpu_buffer->reader_page->list);
+
+ /* The cpu_buffer pages are a link list with no head */
+ cpu_buffer->pages = cpu_buffer->new_pages.next;
+ cpu_buffer->new_pages.next->prev = cpu_buffer->new_pages.prev;
+ cpu_buffer->new_pages.prev->next = cpu_buffer->new_pages.next;
+
+ /* Clear the new_pages list */
+ INIT_LIST_HEAD(&cpu_buffer->new_pages);
+
+ cpu_buffer->head_page
+ = list_entry(cpu_buffer->pages, struct buffer_page, list);
+ cpu_buffer->tail_page = cpu_buffer->commit_page = cpu_buffer->head_page;
+
+ cpu_buffer->nr_pages = cpu_buffer->nr_pages_to_update;
+ cpu_buffer->nr_pages_to_update = 0;
+
+ free_pages((unsigned long)cpu_buffer->free_page, old_order);
+ cpu_buffer->free_page = NULL;
+
+ rb_head_page_activate(cpu_buffer);
+
+ rb_check_pages(cpu_buffer);
+ }
+
+ atomic_dec(&buffer->record_disabled);
+ mutex_unlock(&buffer->mutex);
+
+ return 0;
+
+error:
+ buffer->subbuf_order = old_order;
+ buffer->subbuf_size = old_size;
+
+ atomic_dec(&buffer->record_disabled);
+ mutex_unlock(&buffer->mutex);
+
+ for_each_buffer_cpu(buffer, cpu) {
+ cpu_buffer = buffer->buffers[cpu];
+
+ if (!cpu_buffer->nr_pages_to_update)
+ continue;
+
+ list_for_each_entry_safe(bpage, tmp, &cpu_buffer->new_pages, list) {
+ list_del_init(&bpage->list);
+ free_buffer_page(bpage);
+ }
+ }
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(ring_buffer_subbuf_order_set);
+
/*
* We only allocate new buffers, never free them if the CPU goes down.
* If we were to free the buffer, then the user would lose any trace that was in
@@ -5111,7 +6174,7 @@ static __init int rb_write_something(struct rb_test_data *data, bool nested)
}
out:
- ring_buffer_unlock_commit(data->buffer, event);
+ ring_buffer_unlock_commit(data->buffer);
return 0;
}
@@ -5179,16 +6242,13 @@ static __init int test_ringbuffer(void)
rb_data[cpu].buffer = buffer;
rb_data[cpu].cpu = cpu;
rb_data[cpu].cnt = cpu;
- rb_threads[cpu] = kthread_create(rb_test, &rb_data[cpu],
- "rbtester/%d", cpu);
+ rb_threads[cpu] = kthread_run_on_cpu(rb_test, &rb_data[cpu],
+ cpu, "rbtester/%u");
if (WARN_ON(IS_ERR(rb_threads[cpu]))) {
pr_cont("FAILED\n");
ret = PTR_ERR(rb_threads[cpu]);
goto out_free;
}
-
- kthread_bind(rb_threads[cpu], cpu);
- wake_up_process(rb_threads[cpu]);
}
/* Now create the rb hammer! */
@@ -5295,10 +6355,10 @@ static __init int test_ringbuffer(void)
pr_info(" total events: %ld\n", total_lost + total_read);
pr_info(" recorded len bytes: %ld\n", total_len);
pr_info(" recorded size bytes: %ld\n", total_size);
- if (total_lost)
+ if (total_lost) {
pr_info(" With dropped events, record len and size may not match\n"
" alloced and written from above\n");
- if (!total_lost) {
+ } else {
if (RB_WARN_ON(buffer, total_len != total_alloc ||
total_size != total_written))
break;
diff --git a/kernel/trace/ring_buffer_benchmark.c b/kernel/trace/ring_buffer_benchmark.c
index 8df0aa810950..008187ebd7fe 100644
--- a/kernel/trace/ring_buffer_benchmark.c
+++ b/kernel/trace/ring_buffer_benchmark.c
@@ -45,8 +45,8 @@ MODULE_PARM_DESC(write_iteration, "# of writes between timestamp readings");
static int producer_nice = MAX_NICE;
static int consumer_nice = MAX_NICE;
-static int producer_fifo = -1;
-static int consumer_fifo = -1;
+static int producer_fifo;
+static int consumer_fifo;
module_param(producer_nice, int, 0644);
MODULE_PARM_DESC(producer_nice, "nice prio for producer");
@@ -55,10 +55,10 @@ module_param(consumer_nice, int, 0644);
MODULE_PARM_DESC(consumer_nice, "nice prio for consumer");
module_param(producer_fifo, int, 0644);
-MODULE_PARM_DESC(producer_fifo, "fifo prio for producer");
+MODULE_PARM_DESC(producer_fifo, "use fifo for producer: 0 - disabled, 1 - low prio, 2 - fifo");
module_param(consumer_fifo, int, 0644);
-MODULE_PARM_DESC(consumer_fifo, "fifo prio for consumer");
+MODULE_PARM_DESC(consumer_fifo, "use fifo for consumer: 0 - disabled, 1 - low prio, 2 - fifo");
static int read_events;
@@ -104,10 +104,11 @@ static enum event_status read_event(int cpu)
static enum event_status read_page(int cpu)
{
+ struct buffer_data_read_page *bpage;
struct ring_buffer_event *event;
struct rb_page *rpage;
unsigned long commit;
- void *bpage;
+ int page_size;
int *entry;
int ret;
int inc;
@@ -117,14 +118,15 @@ static enum event_status read_page(int cpu)
if (IS_ERR(bpage))
return EVENT_DROPPED;
- ret = ring_buffer_read_page(buffer, &bpage, PAGE_SIZE, cpu, 1);
+ page_size = ring_buffer_subbuf_size_get(buffer);
+ ret = ring_buffer_read_page(buffer, bpage, page_size, cpu, 1);
if (ret >= 0) {
- rpage = bpage;
+ rpage = ring_buffer_read_page_data(bpage);
/* The commit may have missed event flags set, clear them */
commit = local_read(&rpage->commit) & 0xfffff;
for (i = 0; i < commit && !test_error ; i += inc) {
- if (i >= (PAGE_SIZE - offsetof(struct rb_page, data))) {
+ if (i >= (page_size - offsetof(struct rb_page, data))) {
TEST_ERROR();
break;
}
@@ -258,7 +260,7 @@ static void ring_buffer_producer(void)
hit++;
entry = ring_buffer_event_data(event);
*entry = smp_processor_id();
- ring_buffer_unlock_commit(buffer, event);
+ ring_buffer_unlock_commit(buffer);
}
}
end_time = ktime_get();
@@ -303,22 +305,22 @@ static void ring_buffer_producer(void)
trace_printk("ERROR!\n");
if (!disable_reader) {
- if (consumer_fifo < 0)
+ if (consumer_fifo)
+ trace_printk("Running Consumer at SCHED_FIFO %s\n",
+ consumer_fifo == 1 ? "low" : "high");
+ else
trace_printk("Running Consumer at nice: %d\n",
consumer_nice);
- else
- trace_printk("Running Consumer at SCHED_FIFO %d\n",
- consumer_fifo);
}
- if (producer_fifo < 0)
+ if (producer_fifo)
+ trace_printk("Running Producer at SCHED_FIFO %s\n",
+ producer_fifo == 1 ? "low" : "high");
+ else
trace_printk("Running Producer at nice: %d\n",
producer_nice);
- else
- trace_printk("Running Producer at SCHED_FIFO %d\n",
- producer_fifo);
/* Let the user know that the test is running at low priority */
- if (producer_fifo < 0 && consumer_fifo < 0 &&
+ if (!producer_fifo && !consumer_fifo &&
producer_nice == MAX_NICE && consumer_nice == MAX_NICE)
trace_printk("WARNING!!! This test is running at lowest priority.\n");
@@ -455,21 +457,19 @@ static int __init ring_buffer_benchmark_init(void)
* Run them as low-prio background tasks by default:
*/
if (!disable_reader) {
- if (consumer_fifo >= 0) {
- struct sched_param param = {
- .sched_priority = consumer_fifo
- };
- sched_setscheduler(consumer, SCHED_FIFO, &param);
- } else
+ if (consumer_fifo >= 2)
+ sched_set_fifo(consumer);
+ else if (consumer_fifo == 1)
+ sched_set_fifo_low(consumer);
+ else
set_user_nice(consumer, consumer_nice);
}
- if (producer_fifo >= 0) {
- struct sched_param param = {
- .sched_priority = producer_fifo
- };
- sched_setscheduler(producer, SCHED_FIFO, &param);
- } else
+ if (producer_fifo >= 2)
+ sched_set_fifo(producer);
+ else if (producer_fifo == 1)
+ sched_set_fifo_low(producer);
+ else
set_user_nice(producer, producer_nice);
return 0;
diff --git a/kernel/trace/rv/Kconfig b/kernel/trace/rv/Kconfig
new file mode 100644
index 000000000000..831779607e84
--- /dev/null
+++ b/kernel/trace/rv/Kconfig
@@ -0,0 +1,78 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+config DA_MON_EVENTS
+ bool
+
+config DA_MON_EVENTS_IMPLICIT
+ select DA_MON_EVENTS
+ bool
+
+config DA_MON_EVENTS_ID
+ select DA_MON_EVENTS
+ bool
+
+menuconfig RV
+ bool "Runtime Verification"
+ depends on TRACING
+ help
+ Enable the kernel runtime verification infrastructure. RV is a
+ lightweight (yet rigorous) method that complements classical
+ exhaustive verification techniques (such as model checking and
+ theorem proving). RV works by analyzing the trace of the system's
+ actual execution, comparing it against a formal specification of
+ the system behavior.
+
+ For further information, see:
+ Documentation/trace/rv/runtime-verification.rst
+
+config RV_MON_WIP
+ depends on RV
+ depends on PREEMPT_TRACER
+ select DA_MON_EVENTS_IMPLICIT
+ bool "wip monitor"
+ help
+ Enable wip (wakeup in preemptive) sample monitor that illustrates
+ the usage of per-cpu monitors, and one limitation of the
+ preempt_disable/enable events.
+
+ For further information, see:
+ Documentation/trace/rv/monitor_wip.rst
+
+config RV_MON_WWNR
+ depends on RV
+ select DA_MON_EVENTS_ID
+ bool "wwnr monitor"
+ help
+ Enable wwnr (wakeup while not running) sample monitor, this is a
+ sample monitor that illustrates the usage of per-task monitor.
+ The model is borken on purpose: it serves to test reactors.
+
+ For further information, see:
+ Documentation/trace/rv/monitor_wwnr.rst
+
+config RV_REACTORS
+ bool "Runtime verification reactors"
+ default y
+ depends on RV
+ help
+ Enables the online runtime verification reactors. A runtime
+ monitor can cause a reaction to the detection of an exception
+ on the model's execution. By default, the monitors have
+ tracing reactions, printing the monitor output via tracepoints,
+ but other reactions can be added (on-demand) via this interface.
+
+config RV_REACT_PRINTK
+ bool "Printk reactor"
+ depends on RV_REACTORS
+ default y
+ help
+ Enables the printk reactor. The printk reactor emits a printk()
+ message if an exception is found.
+
+config RV_REACT_PANIC
+ bool "Panic reactor"
+ depends on RV_REACTORS
+ default y
+ help
+ Enables the panic reactor. The panic reactor emits a printk()
+ message if an exception is found and panic()s the system.
diff --git a/kernel/trace/rv/Makefile b/kernel/trace/rv/Makefile
new file mode 100644
index 000000000000..963d14875b45
--- /dev/null
+++ b/kernel/trace/rv/Makefile
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-2.0
+
+obj-$(CONFIG_RV) += rv.o
+obj-$(CONFIG_RV_MON_WIP) += monitors/wip/wip.o
+obj-$(CONFIG_RV_MON_WWNR) += monitors/wwnr/wwnr.o
+obj-$(CONFIG_RV_REACTORS) += rv_reactors.o
+obj-$(CONFIG_RV_REACT_PRINTK) += reactor_printk.o
+obj-$(CONFIG_RV_REACT_PANIC) += reactor_panic.o
diff --git a/kernel/trace/rv/monitors/wip/wip.c b/kernel/trace/rv/monitors/wip/wip.c
new file mode 100644
index 000000000000..b2b49a27e886
--- /dev/null
+++ b/kernel/trace/rv/monitors/wip/wip.c
@@ -0,0 +1,88 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/ftrace.h>
+#include <linux/tracepoint.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/rv.h>
+#include <rv/instrumentation.h>
+#include <rv/da_monitor.h>
+
+#define MODULE_NAME "wip"
+
+#include <trace/events/rv.h>
+#include <trace/events/sched.h>
+#include <trace/events/preemptirq.h>
+
+#include "wip.h"
+
+static struct rv_monitor rv_wip;
+DECLARE_DA_MON_PER_CPU(wip, unsigned char);
+
+static void handle_preempt_disable(void *data, unsigned long ip, unsigned long parent_ip)
+{
+ da_handle_event_wip(preempt_disable_wip);
+}
+
+static void handle_preempt_enable(void *data, unsigned long ip, unsigned long parent_ip)
+{
+ da_handle_start_event_wip(preempt_enable_wip);
+}
+
+static void handle_sched_waking(void *data, struct task_struct *task)
+{
+ da_handle_event_wip(sched_waking_wip);
+}
+
+static int enable_wip(void)
+{
+ int retval;
+
+ retval = da_monitor_init_wip();
+ if (retval)
+ return retval;
+
+ rv_attach_trace_probe("wip", preempt_enable, handle_preempt_enable);
+ rv_attach_trace_probe("wip", sched_waking, handle_sched_waking);
+ rv_attach_trace_probe("wip", preempt_disable, handle_preempt_disable);
+
+ return 0;
+}
+
+static void disable_wip(void)
+{
+ rv_wip.enabled = 0;
+
+ rv_detach_trace_probe("wip", preempt_disable, handle_preempt_disable);
+ rv_detach_trace_probe("wip", preempt_enable, handle_preempt_enable);
+ rv_detach_trace_probe("wip", sched_waking, handle_sched_waking);
+
+ da_monitor_destroy_wip();
+}
+
+static struct rv_monitor rv_wip = {
+ .name = "wip",
+ .description = "wakeup in preemptive per-cpu testing monitor.",
+ .enable = enable_wip,
+ .disable = disable_wip,
+ .reset = da_monitor_reset_all_wip,
+ .enabled = 0,
+};
+
+static int __init register_wip(void)
+{
+ rv_register_monitor(&rv_wip);
+ return 0;
+}
+
+static void __exit unregister_wip(void)
+{
+ rv_unregister_monitor(&rv_wip);
+}
+
+module_init(register_wip);
+module_exit(unregister_wip);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Daniel Bristot de Oliveira <bristot@kernel.org>");
+MODULE_DESCRIPTION("wip: wakeup in preemptive - per-cpu sample monitor.");
diff --git a/kernel/trace/rv/monitors/wip/wip.h b/kernel/trace/rv/monitors/wip/wip.h
new file mode 100644
index 000000000000..2e373f2c65ed
--- /dev/null
+++ b/kernel/trace/rv/monitors/wip/wip.h
@@ -0,0 +1,46 @@
+/*
+ * Automatically generated C representation of wip automaton
+ * For further information about this format, see kernel documentation:
+ * Documentation/trace/rv/deterministic_automata.rst
+ */
+
+enum states_wip {
+ preemptive_wip = 0,
+ non_preemptive_wip,
+ state_max_wip
+};
+
+#define INVALID_STATE state_max_wip
+
+enum events_wip {
+ preempt_disable_wip = 0,
+ preempt_enable_wip,
+ sched_waking_wip,
+ event_max_wip
+};
+
+struct automaton_wip {
+ char *state_names[state_max_wip];
+ char *event_names[event_max_wip];
+ unsigned char function[state_max_wip][event_max_wip];
+ unsigned char initial_state;
+ bool final_states[state_max_wip];
+};
+
+static const struct automaton_wip automaton_wip = {
+ .state_names = {
+ "preemptive",
+ "non_preemptive"
+ },
+ .event_names = {
+ "preempt_disable",
+ "preempt_enable",
+ "sched_waking"
+ },
+ .function = {
+ { non_preemptive_wip, INVALID_STATE, INVALID_STATE },
+ { INVALID_STATE, preemptive_wip, non_preemptive_wip },
+ },
+ .initial_state = preemptive_wip,
+ .final_states = { 1, 0 },
+};
diff --git a/kernel/trace/rv/monitors/wwnr/wwnr.c b/kernel/trace/rv/monitors/wwnr/wwnr.c
new file mode 100644
index 000000000000..0e43dd2db685
--- /dev/null
+++ b/kernel/trace/rv/monitors/wwnr/wwnr.c
@@ -0,0 +1,87 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/ftrace.h>
+#include <linux/tracepoint.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/rv.h>
+#include <rv/instrumentation.h>
+#include <rv/da_monitor.h>
+
+#define MODULE_NAME "wwnr"
+
+#include <trace/events/rv.h>
+#include <trace/events/sched.h>
+
+#include "wwnr.h"
+
+static struct rv_monitor rv_wwnr;
+DECLARE_DA_MON_PER_TASK(wwnr, unsigned char);
+
+static void handle_switch(void *data, bool preempt, struct task_struct *p,
+ struct task_struct *n, unsigned int prev_state)
+{
+ /* start monitoring only after the first suspension */
+ if (prev_state == TASK_INTERRUPTIBLE)
+ da_handle_start_event_wwnr(p, switch_out_wwnr);
+ else
+ da_handle_event_wwnr(p, switch_out_wwnr);
+
+ da_handle_event_wwnr(n, switch_in_wwnr);
+}
+
+static void handle_wakeup(void *data, struct task_struct *p)
+{
+ da_handle_event_wwnr(p, wakeup_wwnr);
+}
+
+static int enable_wwnr(void)
+{
+ int retval;
+
+ retval = da_monitor_init_wwnr();
+ if (retval)
+ return retval;
+
+ rv_attach_trace_probe("wwnr", sched_switch, handle_switch);
+ rv_attach_trace_probe("wwnr", sched_wakeup, handle_wakeup);
+
+ return 0;
+}
+
+static void disable_wwnr(void)
+{
+ rv_wwnr.enabled = 0;
+
+ rv_detach_trace_probe("wwnr", sched_switch, handle_switch);
+ rv_detach_trace_probe("wwnr", sched_wakeup, handle_wakeup);
+
+ da_monitor_destroy_wwnr();
+}
+
+static struct rv_monitor rv_wwnr = {
+ .name = "wwnr",
+ .description = "wakeup while not running per-task testing model.",
+ .enable = enable_wwnr,
+ .disable = disable_wwnr,
+ .reset = da_monitor_reset_all_wwnr,
+ .enabled = 0,
+};
+
+static int __init register_wwnr(void)
+{
+ rv_register_monitor(&rv_wwnr);
+ return 0;
+}
+
+static void __exit unregister_wwnr(void)
+{
+ rv_unregister_monitor(&rv_wwnr);
+}
+
+module_init(register_wwnr);
+module_exit(unregister_wwnr);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Daniel Bristot de Oliveira <bristot@kernel.org>");
+MODULE_DESCRIPTION("wwnr: wakeup while not running monitor");
diff --git a/kernel/trace/rv/monitors/wwnr/wwnr.h b/kernel/trace/rv/monitors/wwnr/wwnr.h
new file mode 100644
index 000000000000..d0d9c4b8121b
--- /dev/null
+++ b/kernel/trace/rv/monitors/wwnr/wwnr.h
@@ -0,0 +1,46 @@
+/*
+ * Automatically generated C representation of wwnr automaton
+ * For further information about this format, see kernel documentation:
+ * Documentation/trace/rv/deterministic_automata.rst
+ */
+
+enum states_wwnr {
+ not_running_wwnr = 0,
+ running_wwnr,
+ state_max_wwnr
+};
+
+#define INVALID_STATE state_max_wwnr
+
+enum events_wwnr {
+ switch_in_wwnr = 0,
+ switch_out_wwnr,
+ wakeup_wwnr,
+ event_max_wwnr
+};
+
+struct automaton_wwnr {
+ char *state_names[state_max_wwnr];
+ char *event_names[event_max_wwnr];
+ unsigned char function[state_max_wwnr][event_max_wwnr];
+ unsigned char initial_state;
+ bool final_states[state_max_wwnr];
+};
+
+static const struct automaton_wwnr automaton_wwnr = {
+ .state_names = {
+ "not_running",
+ "running"
+ },
+ .event_names = {
+ "switch_in",
+ "switch_out",
+ "wakeup"
+ },
+ .function = {
+ { running_wwnr, INVALID_STATE, not_running_wwnr },
+ { INVALID_STATE, not_running_wwnr, INVALID_STATE },
+ },
+ .initial_state = not_running_wwnr,
+ .final_states = { 1, 0 },
+};
diff --git a/kernel/trace/rv/reactor_panic.c b/kernel/trace/rv/reactor_panic.c
new file mode 100644
index 000000000000..0186ff4cbd0b
--- /dev/null
+++ b/kernel/trace/rv/reactor_panic.c
@@ -0,0 +1,42 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2019-2022 Red Hat, Inc. Daniel Bristot de Oliveira <bristot@kernel.org>
+ *
+ * Panic RV reactor:
+ * Prints the exception msg to the kernel message log and panic().
+ */
+
+#include <linux/ftrace.h>
+#include <linux/tracepoint.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/rv.h>
+
+static void rv_panic_reaction(char *msg)
+{
+ panic(msg);
+}
+
+static struct rv_reactor rv_panic = {
+ .name = "panic",
+ .description = "panic the system if an exception is found.",
+ .react = rv_panic_reaction
+};
+
+static int __init register_react_panic(void)
+{
+ rv_register_reactor(&rv_panic);
+ return 0;
+}
+
+static void __exit unregister_react_panic(void)
+{
+ rv_unregister_reactor(&rv_panic);
+}
+
+module_init(register_react_panic);
+module_exit(unregister_react_panic);
+
+MODULE_AUTHOR("Daniel Bristot de Oliveira");
+MODULE_DESCRIPTION("panic rv reactor: panic if an exception is found.");
diff --git a/kernel/trace/rv/reactor_printk.c b/kernel/trace/rv/reactor_printk.c
new file mode 100644
index 000000000000..178759dbf89f
--- /dev/null
+++ b/kernel/trace/rv/reactor_printk.c
@@ -0,0 +1,41 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2019-2022 Red Hat, Inc. Daniel Bristot de Oliveira <bristot@kernel.org>
+ *
+ * Printk RV reactor:
+ * Prints the exception msg to the kernel message log.
+ */
+#include <linux/ftrace.h>
+#include <linux/tracepoint.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/rv.h>
+
+static void rv_printk_reaction(char *msg)
+{
+ printk_deferred(msg);
+}
+
+static struct rv_reactor rv_printk = {
+ .name = "printk",
+ .description = "prints the exception msg to the kernel message log.",
+ .react = rv_printk_reaction
+};
+
+static int __init register_react_printk(void)
+{
+ rv_register_reactor(&rv_printk);
+ return 0;
+}
+
+static void __exit unregister_react_printk(void)
+{
+ rv_unregister_reactor(&rv_printk);
+}
+
+module_init(register_react_printk);
+module_exit(unregister_react_printk);
+
+MODULE_AUTHOR("Daniel Bristot de Oliveira");
+MODULE_DESCRIPTION("printk rv reactor: printk if an exception is hit.");
diff --git a/kernel/trace/rv/rv.c b/kernel/trace/rv/rv.c
new file mode 100644
index 000000000000..2f68e93fff0b
--- /dev/null
+++ b/kernel/trace/rv/rv.c
@@ -0,0 +1,797 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2019-2022 Red Hat, Inc. Daniel Bristot de Oliveira <bristot@kernel.org>
+ *
+ * This is the online Runtime Verification (RV) interface.
+ *
+ * RV is a lightweight (yet rigorous) method that complements classical
+ * exhaustive verification techniques (such as model checking and
+ * theorem proving) with a more practical approach to complex systems.
+ *
+ * RV works by analyzing the trace of the system's actual execution,
+ * comparing it against a formal specification of the system behavior.
+ * RV can give precise information on the runtime behavior of the
+ * monitored system while enabling the reaction for unexpected
+ * events, avoiding, for example, the propagation of a failure on
+ * safety-critical systems.
+ *
+ * The development of this interface roots in the development of the
+ * paper:
+ *
+ * De Oliveira, Daniel Bristot; Cucinotta, Tommaso; De Oliveira, Romulo
+ * Silva. Efficient formal verification for the Linux kernel. In:
+ * International Conference on Software Engineering and Formal Methods.
+ * Springer, Cham, 2019. p. 315-332.
+ *
+ * And:
+ *
+ * De Oliveira, Daniel Bristot, et al. Automata-based formal analysis
+ * and verification of the real-time Linux kernel. PhD Thesis, 2020.
+ *
+ * == Runtime monitor interface ==
+ *
+ * A monitor is the central part of the runtime verification of a system.
+ *
+ * The monitor stands in between the formal specification of the desired
+ * (or undesired) behavior, and the trace of the actual system.
+ *
+ * In Linux terms, the runtime verification monitors are encapsulated
+ * inside the "RV monitor" abstraction. A RV monitor includes a reference
+ * model of the system, a set of instances of the monitor (per-cpu monitor,
+ * per-task monitor, and so on), and the helper functions that glue the
+ * monitor to the system via trace. Generally, a monitor includes some form
+ * of trace output as a reaction for event parsing and exceptions,
+ * as depicted bellow:
+ *
+ * Linux +----- RV Monitor ----------------------------------+ Formal
+ * Realm | | Realm
+ * +-------------------+ +----------------+ +-----------------+
+ * | Linux kernel | | Monitor | | Reference |
+ * | Tracing | -> | Instance(s) | <- | Model |
+ * | (instrumentation) | | (verification) | | (specification) |
+ * +-------------------+ +----------------+ +-----------------+
+ * | | |
+ * | V |
+ * | +----------+ |
+ * | | Reaction | |
+ * | +--+--+--+-+ |
+ * | | | | |
+ * | | | +-> trace output ? |
+ * +------------------------|--|----------------------+
+ * | +----> panic ?
+ * +-------> <user-specified>
+ *
+ * This file implements the interface for loading RV monitors, and
+ * to control the verification session.
+ *
+ * == Registering monitors ==
+ *
+ * The struct rv_monitor defines a set of callback functions to control
+ * a verification session. For instance, when a given monitor is enabled,
+ * the "enable" callback function is called to hook the instrumentation
+ * functions to the kernel trace events. The "disable" function is called
+ * when disabling the verification session.
+ *
+ * A RV monitor is registered via:
+ * int rv_register_monitor(struct rv_monitor *monitor);
+ * And unregistered via:
+ * int rv_unregister_monitor(struct rv_monitor *monitor);
+ *
+ * == User interface ==
+ *
+ * The user interface resembles kernel tracing interface. It presents
+ * these files:
+ *
+ * "available_monitors"
+ * - List the available monitors, one per line.
+ *
+ * For example:
+ * # cat available_monitors
+ * wip
+ * wwnr
+ *
+ * "enabled_monitors"
+ * - Lists the enabled monitors, one per line;
+ * - Writing to it enables a given monitor;
+ * - Writing a monitor name with a '!' prefix disables it;
+ * - Truncating the file disables all enabled monitors.
+ *
+ * For example:
+ * # cat enabled_monitors
+ * # echo wip > enabled_monitors
+ * # echo wwnr >> enabled_monitors
+ * # cat enabled_monitors
+ * wip
+ * wwnr
+ * # echo '!wip' >> enabled_monitors
+ * # cat enabled_monitors
+ * wwnr
+ * # echo > enabled_monitors
+ * # cat enabled_monitors
+ * #
+ *
+ * Note that more than one monitor can be enabled concurrently.
+ *
+ * "monitoring_on"
+ * - It is an on/off general switcher for monitoring. Note
+ * that it does not disable enabled monitors or detach events,
+ * but stops the per-entity monitors from monitoring the events
+ * received from the instrumentation. It resembles the "tracing_on"
+ * switcher.
+ *
+ * "monitors/"
+ * Each monitor will have its own directory inside "monitors/". There
+ * the monitor specific files will be presented.
+ * The "monitors/" directory resembles the "events" directory on
+ * tracefs.
+ *
+ * For example:
+ * # cd monitors/wip/
+ * # ls
+ * desc enable
+ * # cat desc
+ * auto-generated wakeup in preemptive monitor.
+ * # cat enable
+ * 0
+ *
+ * For further information, see:
+ * Documentation/trace/rv/runtime-verification.rst
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+
+#ifdef CONFIG_DA_MON_EVENTS
+#define CREATE_TRACE_POINTS
+#include <trace/events/rv.h>
+#endif
+
+#include "rv.h"
+
+DEFINE_MUTEX(rv_interface_lock);
+
+static struct rv_interface rv_root;
+
+struct dentry *get_monitors_root(void)
+{
+ return rv_root.monitors_dir;
+}
+
+/*
+ * Interface for the monitor register.
+ */
+static LIST_HEAD(rv_monitors_list);
+
+static int task_monitor_count;
+static bool task_monitor_slots[RV_PER_TASK_MONITORS];
+
+int rv_get_task_monitor_slot(void)
+{
+ int i;
+
+ lockdep_assert_held(&rv_interface_lock);
+
+ if (task_monitor_count == RV_PER_TASK_MONITORS)
+ return -EBUSY;
+
+ task_monitor_count++;
+
+ for (i = 0; i < RV_PER_TASK_MONITORS; i++) {
+ if (task_monitor_slots[i] == false) {
+ task_monitor_slots[i] = true;
+ return i;
+ }
+ }
+
+ WARN_ONCE(1, "RV task_monitor_count and slots are out of sync\n");
+
+ return -EINVAL;
+}
+
+void rv_put_task_monitor_slot(int slot)
+{
+ lockdep_assert_held(&rv_interface_lock);
+
+ if (slot < 0 || slot >= RV_PER_TASK_MONITORS) {
+ WARN_ONCE(1, "RV releasing an invalid slot!: %d\n", slot);
+ return;
+ }
+
+ WARN_ONCE(!task_monitor_slots[slot], "RV releasing unused task_monitor_slots: %d\n",
+ slot);
+
+ task_monitor_count--;
+ task_monitor_slots[slot] = false;
+}
+
+/*
+ * This section collects the monitor/ files and folders.
+ */
+static ssize_t monitor_enable_read_data(struct file *filp, char __user *user_buf, size_t count,
+ loff_t *ppos)
+{
+ struct rv_monitor_def *mdef = filp->private_data;
+ const char *buff;
+
+ buff = mdef->monitor->enabled ? "1\n" : "0\n";
+
+ return simple_read_from_buffer(user_buf, count, ppos, buff, strlen(buff)+1);
+}
+
+/*
+ * __rv_disable_monitor - disabled an enabled monitor
+ */
+static int __rv_disable_monitor(struct rv_monitor_def *mdef, bool sync)
+{
+ lockdep_assert_held(&rv_interface_lock);
+
+ if (mdef->monitor->enabled) {
+ mdef->monitor->enabled = 0;
+ mdef->monitor->disable();
+
+ /*
+ * Wait for the execution of all events to finish.
+ * Otherwise, the data used by the monitor could
+ * be inconsistent. i.e., if the monitor is re-enabled.
+ */
+ if (sync)
+ tracepoint_synchronize_unregister();
+ return 1;
+ }
+ return 0;
+}
+
+/**
+ * rv_disable_monitor - disable a given runtime monitor
+ *
+ * Returns 0 on success.
+ */
+int rv_disable_monitor(struct rv_monitor_def *mdef)
+{
+ __rv_disable_monitor(mdef, true);
+ return 0;
+}
+
+/**
+ * rv_enable_monitor - enable a given runtime monitor
+ *
+ * Returns 0 on success, error otherwise.
+ */
+int rv_enable_monitor(struct rv_monitor_def *mdef)
+{
+ int retval;
+
+ lockdep_assert_held(&rv_interface_lock);
+
+ if (mdef->monitor->enabled)
+ return 0;
+
+ retval = mdef->monitor->enable();
+
+ if (!retval)
+ mdef->monitor->enabled = 1;
+
+ return retval;
+}
+
+/*
+ * interface for enabling/disabling a monitor.
+ */
+static ssize_t monitor_enable_write_data(struct file *filp, const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct rv_monitor_def *mdef = filp->private_data;
+ int retval;
+ bool val;
+
+ retval = kstrtobool_from_user(user_buf, count, &val);
+ if (retval)
+ return retval;
+
+ mutex_lock(&rv_interface_lock);
+
+ if (val)
+ retval = rv_enable_monitor(mdef);
+ else
+ retval = rv_disable_monitor(mdef);
+
+ mutex_unlock(&rv_interface_lock);
+
+ return retval ? : count;
+}
+
+static const struct file_operations interface_enable_fops = {
+ .open = simple_open,
+ .llseek = no_llseek,
+ .write = monitor_enable_write_data,
+ .read = monitor_enable_read_data,
+};
+
+/*
+ * Interface to read monitors description.
+ */
+static ssize_t monitor_desc_read_data(struct file *filp, char __user *user_buf, size_t count,
+ loff_t *ppos)
+{
+ struct rv_monitor_def *mdef = filp->private_data;
+ char buff[256];
+
+ memset(buff, 0, sizeof(buff));
+
+ snprintf(buff, sizeof(buff), "%s\n", mdef->monitor->description);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buff, strlen(buff) + 1);
+}
+
+static const struct file_operations interface_desc_fops = {
+ .open = simple_open,
+ .llseek = no_llseek,
+ .read = monitor_desc_read_data,
+};
+
+/*
+ * During the registration of a monitor, this function creates
+ * the monitor dir, where the specific options of the monitor
+ * are exposed.
+ */
+static int create_monitor_dir(struct rv_monitor_def *mdef)
+{
+ struct dentry *root = get_monitors_root();
+ const char *name = mdef->monitor->name;
+ struct dentry *tmp;
+ int retval;
+
+ mdef->root_d = rv_create_dir(name, root);
+ if (!mdef->root_d)
+ return -ENOMEM;
+
+ tmp = rv_create_file("enable", RV_MODE_WRITE, mdef->root_d, mdef, &interface_enable_fops);
+ if (!tmp) {
+ retval = -ENOMEM;
+ goto out_remove_root;
+ }
+
+ tmp = rv_create_file("desc", RV_MODE_READ, mdef->root_d, mdef, &interface_desc_fops);
+ if (!tmp) {
+ retval = -ENOMEM;
+ goto out_remove_root;
+ }
+
+ retval = reactor_populate_monitor(mdef);
+ if (retval)
+ goto out_remove_root;
+
+ return 0;
+
+out_remove_root:
+ rv_remove(mdef->root_d);
+ return retval;
+}
+
+/*
+ * Available/Enable monitor shared seq functions.
+ */
+static int monitors_show(struct seq_file *m, void *p)
+{
+ struct rv_monitor_def *mon_def = p;
+
+ seq_printf(m, "%s\n", mon_def->monitor->name);
+ return 0;
+}
+
+/*
+ * Used by the seq file operations at the end of a read
+ * operation.
+ */
+static void monitors_stop(struct seq_file *m, void *p)
+{
+ mutex_unlock(&rv_interface_lock);
+}
+
+/*
+ * Available monitor seq functions.
+ */
+static void *available_monitors_start(struct seq_file *m, loff_t *pos)
+{
+ mutex_lock(&rv_interface_lock);
+ return seq_list_start(&rv_monitors_list, *pos);
+}
+
+static void *available_monitors_next(struct seq_file *m, void *p, loff_t *pos)
+{
+ return seq_list_next(p, &rv_monitors_list, pos);
+}
+
+/*
+ * Enable monitor seq functions.
+ */
+static void *enabled_monitors_next(struct seq_file *m, void *p, loff_t *pos)
+{
+ struct rv_monitor_def *m_def = p;
+
+ (*pos)++;
+
+ list_for_each_entry_continue(m_def, &rv_monitors_list, list) {
+ if (m_def->monitor->enabled)
+ return m_def;
+ }
+
+ return NULL;
+}
+
+static void *enabled_monitors_start(struct seq_file *m, loff_t *pos)
+{
+ struct rv_monitor_def *m_def;
+ loff_t l;
+
+ mutex_lock(&rv_interface_lock);
+
+ if (list_empty(&rv_monitors_list))
+ return NULL;
+
+ m_def = list_entry(&rv_monitors_list, struct rv_monitor_def, list);
+
+ for (l = 0; l <= *pos; ) {
+ m_def = enabled_monitors_next(m, m_def, &l);
+ if (!m_def)
+ break;
+ }
+
+ return m_def;
+}
+
+/*
+ * available/enabled monitors seq definition.
+ */
+static const struct seq_operations available_monitors_seq_ops = {
+ .start = available_monitors_start,
+ .next = available_monitors_next,
+ .stop = monitors_stop,
+ .show = monitors_show
+};
+
+static const struct seq_operations enabled_monitors_seq_ops = {
+ .start = enabled_monitors_start,
+ .next = enabled_monitors_next,
+ .stop = monitors_stop,
+ .show = monitors_show
+};
+
+/*
+ * available_monitors interface.
+ */
+static int available_monitors_open(struct inode *inode, struct file *file)
+{
+ return seq_open(file, &available_monitors_seq_ops);
+};
+
+static const struct file_operations available_monitors_ops = {
+ .open = available_monitors_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release
+};
+
+/*
+ * enabled_monitors interface.
+ */
+static void disable_all_monitors(void)
+{
+ struct rv_monitor_def *mdef;
+ int enabled = 0;
+
+ mutex_lock(&rv_interface_lock);
+
+ list_for_each_entry(mdef, &rv_monitors_list, list)
+ enabled += __rv_disable_monitor(mdef, false);
+
+ if (enabled) {
+ /*
+ * Wait for the execution of all events to finish.
+ * Otherwise, the data used by the monitor could
+ * be inconsistent. i.e., if the monitor is re-enabled.
+ */
+ tracepoint_synchronize_unregister();
+ }
+
+ mutex_unlock(&rv_interface_lock);
+}
+
+static int enabled_monitors_open(struct inode *inode, struct file *file)
+{
+ if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC))
+ disable_all_monitors();
+
+ return seq_open(file, &enabled_monitors_seq_ops);
+};
+
+static ssize_t enabled_monitors_write(struct file *filp, const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ char buff[MAX_RV_MONITOR_NAME_SIZE + 2];
+ struct rv_monitor_def *mdef;
+ int retval = -EINVAL;
+ bool enable = true;
+ char *ptr;
+ int len;
+
+ if (count < 1 || count > MAX_RV_MONITOR_NAME_SIZE + 1)
+ return -EINVAL;
+
+ memset(buff, 0, sizeof(buff));
+
+ retval = simple_write_to_buffer(buff, sizeof(buff) - 1, ppos, user_buf, count);
+ if (retval < 0)
+ return -EFAULT;
+
+ ptr = strim(buff);
+
+ if (ptr[0] == '!') {
+ enable = false;
+ ptr++;
+ }
+
+ len = strlen(ptr);
+ if (!len)
+ return count;
+
+ mutex_lock(&rv_interface_lock);
+
+ retval = -EINVAL;
+
+ list_for_each_entry(mdef, &rv_monitors_list, list) {
+ if (strcmp(ptr, mdef->monitor->name) != 0)
+ continue;
+
+ /*
+ * Monitor found!
+ */
+ if (enable)
+ retval = rv_enable_monitor(mdef);
+ else
+ retval = rv_disable_monitor(mdef);
+
+ if (!retval)
+ retval = count;
+
+ break;
+ }
+
+ mutex_unlock(&rv_interface_lock);
+ return retval;
+}
+
+static const struct file_operations enabled_monitors_ops = {
+ .open = enabled_monitors_open,
+ .read = seq_read,
+ .write = enabled_monitors_write,
+ .llseek = seq_lseek,
+ .release = seq_release,
+};
+
+/*
+ * Monitoring on global switcher!
+ */
+static bool __read_mostly monitoring_on;
+
+/**
+ * rv_monitoring_on - checks if monitoring is on
+ *
+ * Returns 1 if on, 0 otherwise.
+ */
+bool rv_monitoring_on(void)
+{
+ /* Ensures that concurrent monitors read consistent monitoring_on */
+ smp_rmb();
+ return READ_ONCE(monitoring_on);
+}
+
+/*
+ * monitoring_on general switcher.
+ */
+static ssize_t monitoring_on_read_data(struct file *filp, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ const char *buff;
+
+ buff = rv_monitoring_on() ? "1\n" : "0\n";
+
+ return simple_read_from_buffer(user_buf, count, ppos, buff, strlen(buff) + 1);
+}
+
+static void turn_monitoring_off(void)
+{
+ WRITE_ONCE(monitoring_on, false);
+ /* Ensures that concurrent monitors read consistent monitoring_on */
+ smp_wmb();
+}
+
+static void reset_all_monitors(void)
+{
+ struct rv_monitor_def *mdef;
+
+ list_for_each_entry(mdef, &rv_monitors_list, list) {
+ if (mdef->monitor->enabled)
+ mdef->monitor->reset();
+ }
+}
+
+static void turn_monitoring_on(void)
+{
+ WRITE_ONCE(monitoring_on, true);
+ /* Ensures that concurrent monitors read consistent monitoring_on */
+ smp_wmb();
+}
+
+static void turn_monitoring_on_with_reset(void)
+{
+ lockdep_assert_held(&rv_interface_lock);
+
+ if (rv_monitoring_on())
+ return;
+
+ /*
+ * Monitors might be out of sync with the system if events were not
+ * processed because of !rv_monitoring_on().
+ *
+ * Reset all monitors, forcing a re-sync.
+ */
+ reset_all_monitors();
+ turn_monitoring_on();
+}
+
+static ssize_t monitoring_on_write_data(struct file *filp, const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ int retval;
+ bool val;
+
+ retval = kstrtobool_from_user(user_buf, count, &val);
+ if (retval)
+ return retval;
+
+ mutex_lock(&rv_interface_lock);
+
+ if (val)
+ turn_monitoring_on_with_reset();
+ else
+ turn_monitoring_off();
+
+ /*
+ * Wait for the execution of all events to finish
+ * before returning to user-space.
+ */
+ tracepoint_synchronize_unregister();
+
+ mutex_unlock(&rv_interface_lock);
+
+ return count;
+}
+
+static const struct file_operations monitoring_on_fops = {
+ .open = simple_open,
+ .llseek = no_llseek,
+ .write = monitoring_on_write_data,
+ .read = monitoring_on_read_data,
+};
+
+static void destroy_monitor_dir(struct rv_monitor_def *mdef)
+{
+ reactor_cleanup_monitor(mdef);
+ rv_remove(mdef->root_d);
+}
+
+/**
+ * rv_register_monitor - register a rv monitor.
+ * @monitor: The rv_monitor to be registered.
+ *
+ * Returns 0 if successful, error otherwise.
+ */
+int rv_register_monitor(struct rv_monitor *monitor)
+{
+ struct rv_monitor_def *r;
+ int retval = 0;
+
+ if (strlen(monitor->name) >= MAX_RV_MONITOR_NAME_SIZE) {
+ pr_info("Monitor %s has a name longer than %d\n", monitor->name,
+ MAX_RV_MONITOR_NAME_SIZE);
+ return -1;
+ }
+
+ mutex_lock(&rv_interface_lock);
+
+ list_for_each_entry(r, &rv_monitors_list, list) {
+ if (strcmp(monitor->name, r->monitor->name) == 0) {
+ pr_info("Monitor %s is already registered\n", monitor->name);
+ retval = -1;
+ goto out_unlock;
+ }
+ }
+
+ r = kzalloc(sizeof(struct rv_monitor_def), GFP_KERNEL);
+ if (!r) {
+ retval = -ENOMEM;
+ goto out_unlock;
+ }
+
+ r->monitor = monitor;
+
+ retval = create_monitor_dir(r);
+ if (retval) {
+ kfree(r);
+ goto out_unlock;
+ }
+
+ list_add_tail(&r->list, &rv_monitors_list);
+
+out_unlock:
+ mutex_unlock(&rv_interface_lock);
+ return retval;
+}
+
+/**
+ * rv_unregister_monitor - unregister a rv monitor.
+ * @monitor: The rv_monitor to be unregistered.
+ *
+ * Returns 0 if successful, error otherwise.
+ */
+int rv_unregister_monitor(struct rv_monitor *monitor)
+{
+ struct rv_monitor_def *ptr, *next;
+
+ mutex_lock(&rv_interface_lock);
+
+ list_for_each_entry_safe(ptr, next, &rv_monitors_list, list) {
+ if (strcmp(monitor->name, ptr->monitor->name) == 0) {
+ rv_disable_monitor(ptr);
+ list_del(&ptr->list);
+ destroy_monitor_dir(ptr);
+ }
+ }
+
+ mutex_unlock(&rv_interface_lock);
+ return 0;
+}
+
+int __init rv_init_interface(void)
+{
+ struct dentry *tmp;
+ int retval;
+
+ rv_root.root_dir = rv_create_dir("rv", NULL);
+ if (!rv_root.root_dir)
+ goto out_err;
+
+ rv_root.monitors_dir = rv_create_dir("monitors", rv_root.root_dir);
+ if (!rv_root.monitors_dir)
+ goto out_err;
+
+ tmp = rv_create_file("available_monitors", RV_MODE_READ, rv_root.root_dir, NULL,
+ &available_monitors_ops);
+ if (!tmp)
+ goto out_err;
+
+ tmp = rv_create_file("enabled_monitors", RV_MODE_WRITE, rv_root.root_dir, NULL,
+ &enabled_monitors_ops);
+ if (!tmp)
+ goto out_err;
+
+ tmp = rv_create_file("monitoring_on", RV_MODE_WRITE, rv_root.root_dir, NULL,
+ &monitoring_on_fops);
+ if (!tmp)
+ goto out_err;
+ retval = init_rv_reactors(rv_root.root_dir);
+ if (retval)
+ goto out_err;
+
+ turn_monitoring_on();
+
+ return 0;
+
+out_err:
+ rv_remove(rv_root.root_dir);
+ printk(KERN_ERR "RV: Error while creating the RV interface\n");
+ return 1;
+}
diff --git a/kernel/trace/rv/rv.h b/kernel/trace/rv/rv.h
new file mode 100644
index 000000000000..db6cb0913dbd
--- /dev/null
+++ b/kernel/trace/rv/rv.h
@@ -0,0 +1,68 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#include <linux/mutex.h>
+
+struct rv_interface {
+ struct dentry *root_dir;
+ struct dentry *monitors_dir;
+};
+
+#include "../trace.h"
+#include <linux/tracefs.h>
+#include <linux/rv.h>
+
+#define RV_MODE_WRITE TRACE_MODE_WRITE
+#define RV_MODE_READ TRACE_MODE_READ
+
+#define rv_create_dir tracefs_create_dir
+#define rv_create_file tracefs_create_file
+#define rv_remove tracefs_remove
+
+#define MAX_RV_MONITOR_NAME_SIZE 32
+#define MAX_RV_REACTOR_NAME_SIZE 32
+
+extern struct mutex rv_interface_lock;
+
+#ifdef CONFIG_RV_REACTORS
+struct rv_reactor_def {
+ struct list_head list;
+ struct rv_reactor *reactor;
+ /* protected by the monitor interface lock */
+ int counter;
+};
+#endif
+
+struct rv_monitor_def {
+ struct list_head list;
+ struct rv_monitor *monitor;
+ struct dentry *root_d;
+#ifdef CONFIG_RV_REACTORS
+ struct rv_reactor_def *rdef;
+ bool reacting;
+#endif
+ bool task_monitor;
+};
+
+struct dentry *get_monitors_root(void);
+int rv_disable_monitor(struct rv_monitor_def *mdef);
+int rv_enable_monitor(struct rv_monitor_def *mdef);
+
+#ifdef CONFIG_RV_REACTORS
+int reactor_populate_monitor(struct rv_monitor_def *mdef);
+void reactor_cleanup_monitor(struct rv_monitor_def *mdef);
+int init_rv_reactors(struct dentry *root_dir);
+#else
+static inline int reactor_populate_monitor(struct rv_monitor_def *mdef)
+{
+ return 0;
+}
+
+static inline void reactor_cleanup_monitor(struct rv_monitor_def *mdef)
+{
+ return;
+}
+
+static inline int init_rv_reactors(struct dentry *root_dir)
+{
+ return 0;
+}
+#endif
diff --git a/kernel/trace/rv/rv_reactors.c b/kernel/trace/rv/rv_reactors.c
new file mode 100644
index 000000000000..6aae106695b6
--- /dev/null
+++ b/kernel/trace/rv/rv_reactors.c
@@ -0,0 +1,510 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2019-2022 Red Hat, Inc. Daniel Bristot de Oliveira <bristot@kernel.org>
+ *
+ * Runtime reactor interface.
+ *
+ * A runtime monitor can cause a reaction to the detection of an
+ * exception on the model's execution. By default, the monitors have
+ * tracing reactions, printing the monitor output via tracepoints.
+ * But other reactions can be added (on-demand) via this interface.
+ *
+ * == Registering reactors ==
+ *
+ * The struct rv_reactor defines a callback function to be executed
+ * in case of a model exception happens. The callback function
+ * receives a message to be (optionally) printed before executing
+ * the reaction.
+ *
+ * A RV reactor is registered via:
+ * int rv_register_reactor(struct rv_reactor *reactor)
+ * And unregistered via:
+ * int rv_unregister_reactor(struct rv_reactor *reactor)
+ *
+ * These functions are exported to modules, enabling reactors to be
+ * dynamically loaded.
+ *
+ * == User interface ==
+ *
+ * The user interface resembles the kernel tracing interface and
+ * presents these files:
+ *
+ * "available_reactors"
+ * - List the available reactors, one per line.
+ *
+ * For example:
+ * # cat available_reactors
+ * nop
+ * panic
+ * printk
+ *
+ * "reacting_on"
+ * - It is an on/off general switch for reactors, disabling
+ * all reactions.
+ *
+ * "monitors/MONITOR/reactors"
+ * - List available reactors, with the select reaction for the given
+ * MONITOR inside []. The default one is the nop (no operation)
+ * reactor.
+ * - Writing the name of an reactor enables it to the given
+ * MONITOR.
+ *
+ * For example:
+ * # cat monitors/wip/reactors
+ * [nop]
+ * panic
+ * printk
+ * # echo panic > monitors/wip/reactors
+ * # cat monitors/wip/reactors
+ * nop
+ * [panic]
+ * printk
+ */
+
+#include <linux/slab.h>
+
+#include "rv.h"
+
+/*
+ * Interface for the reactor register.
+ */
+static LIST_HEAD(rv_reactors_list);
+
+static struct rv_reactor_def *get_reactor_rdef_by_name(char *name)
+{
+ struct rv_reactor_def *r;
+
+ list_for_each_entry(r, &rv_reactors_list, list) {
+ if (strcmp(name, r->reactor->name) == 0)
+ return r;
+ }
+ return NULL;
+}
+
+/*
+ * Available reactors seq functions.
+ */
+static int reactors_show(struct seq_file *m, void *p)
+{
+ struct rv_reactor_def *rea_def = p;
+
+ seq_printf(m, "%s\n", rea_def->reactor->name);
+ return 0;
+}
+
+static void reactors_stop(struct seq_file *m, void *p)
+{
+ mutex_unlock(&rv_interface_lock);
+}
+
+static void *reactors_start(struct seq_file *m, loff_t *pos)
+{
+ mutex_lock(&rv_interface_lock);
+ return seq_list_start(&rv_reactors_list, *pos);
+}
+
+static void *reactors_next(struct seq_file *m, void *p, loff_t *pos)
+{
+ return seq_list_next(p, &rv_reactors_list, pos);
+}
+
+/*
+ * available_reactors seq definition.
+ */
+static const struct seq_operations available_reactors_seq_ops = {
+ .start = reactors_start,
+ .next = reactors_next,
+ .stop = reactors_stop,
+ .show = reactors_show
+};
+
+/*
+ * available_reactors interface.
+ */
+static int available_reactors_open(struct inode *inode, struct file *file)
+{
+ return seq_open(file, &available_reactors_seq_ops);
+};
+
+static const struct file_operations available_reactors_ops = {
+ .open = available_reactors_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release
+};
+
+/*
+ * Monitor's reactor file.
+ */
+static int monitor_reactor_show(struct seq_file *m, void *p)
+{
+ struct rv_monitor_def *mdef = m->private;
+ struct rv_reactor_def *rdef = p;
+
+ if (mdef->rdef == rdef)
+ seq_printf(m, "[%s]\n", rdef->reactor->name);
+ else
+ seq_printf(m, "%s\n", rdef->reactor->name);
+ return 0;
+}
+
+/*
+ * available_reactors seq definition.
+ */
+static const struct seq_operations monitor_reactors_seq_ops = {
+ .start = reactors_start,
+ .next = reactors_next,
+ .stop = reactors_stop,
+ .show = monitor_reactor_show
+};
+
+static void monitor_swap_reactors(struct rv_monitor_def *mdef, struct rv_reactor_def *rdef,
+ bool reacting)
+{
+ bool monitor_enabled;
+
+ /* nothing to do */
+ if (mdef->rdef == rdef)
+ return;
+
+ monitor_enabled = mdef->monitor->enabled;
+ if (monitor_enabled)
+ rv_disable_monitor(mdef);
+
+ /* swap reactor's usage */
+ mdef->rdef->counter--;
+ rdef->counter++;
+
+ mdef->rdef = rdef;
+ mdef->reacting = reacting;
+ mdef->monitor->react = rdef->reactor->react;
+
+ if (monitor_enabled)
+ rv_enable_monitor(mdef);
+}
+
+static ssize_t
+monitor_reactors_write(struct file *file, const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ char buff[MAX_RV_REACTOR_NAME_SIZE + 2];
+ struct rv_monitor_def *mdef;
+ struct rv_reactor_def *rdef;
+ struct seq_file *seq_f;
+ int retval = -EINVAL;
+ bool enable;
+ char *ptr;
+ int len;
+
+ if (count < 1 || count > MAX_RV_REACTOR_NAME_SIZE + 1)
+ return -EINVAL;
+
+ memset(buff, 0, sizeof(buff));
+
+ retval = simple_write_to_buffer(buff, sizeof(buff) - 1, ppos, user_buf, count);
+ if (retval < 0)
+ return -EFAULT;
+
+ ptr = strim(buff);
+
+ len = strlen(ptr);
+ if (!len)
+ return count;
+
+ /*
+ * See monitor_reactors_open()
+ */
+ seq_f = file->private_data;
+ mdef = seq_f->private;
+
+ mutex_lock(&rv_interface_lock);
+
+ retval = -EINVAL;
+
+ list_for_each_entry(rdef, &rv_reactors_list, list) {
+ if (strcmp(ptr, rdef->reactor->name) != 0)
+ continue;
+
+ if (rdef == get_reactor_rdef_by_name("nop"))
+ enable = false;
+ else
+ enable = true;
+
+ monitor_swap_reactors(mdef, rdef, enable);
+
+ retval = count;
+ break;
+ }
+
+ mutex_unlock(&rv_interface_lock);
+
+ return retval;
+}
+
+/*
+ * available_reactors interface.
+ */
+static int monitor_reactors_open(struct inode *inode, struct file *file)
+{
+ struct rv_monitor_def *mdef = inode->i_private;
+ struct seq_file *seq_f;
+ int ret;
+
+ ret = seq_open(file, &monitor_reactors_seq_ops);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * seq_open stores the seq_file on the file->private data.
+ */
+ seq_f = file->private_data;
+
+ /*
+ * Copy the create file "private" data to the seq_file private data.
+ */
+ seq_f->private = mdef;
+
+ return 0;
+};
+
+static const struct file_operations monitor_reactors_ops = {
+ .open = monitor_reactors_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release,
+ .write = monitor_reactors_write
+};
+
+static int __rv_register_reactor(struct rv_reactor *reactor)
+{
+ struct rv_reactor_def *r;
+
+ list_for_each_entry(r, &rv_reactors_list, list) {
+ if (strcmp(reactor->name, r->reactor->name) == 0) {
+ pr_info("Reactor %s is already registered\n", reactor->name);
+ return -EINVAL;
+ }
+ }
+
+ r = kzalloc(sizeof(struct rv_reactor_def), GFP_KERNEL);
+ if (!r)
+ return -ENOMEM;
+
+ r->reactor = reactor;
+ r->counter = 0;
+
+ list_add_tail(&r->list, &rv_reactors_list);
+
+ return 0;
+}
+
+/**
+ * rv_register_reactor - register a rv reactor.
+ * @reactor: The rv_reactor to be registered.
+ *
+ * Returns 0 if successful, error otherwise.
+ */
+int rv_register_reactor(struct rv_reactor *reactor)
+{
+ int retval = 0;
+
+ if (strlen(reactor->name) >= MAX_RV_REACTOR_NAME_SIZE) {
+ pr_info("Reactor %s has a name longer than %d\n",
+ reactor->name, MAX_RV_MONITOR_NAME_SIZE);
+ return -EINVAL;
+ }
+
+ mutex_lock(&rv_interface_lock);
+ retval = __rv_register_reactor(reactor);
+ mutex_unlock(&rv_interface_lock);
+ return retval;
+}
+
+/**
+ * rv_unregister_reactor - unregister a rv reactor.
+ * @reactor: The rv_reactor to be unregistered.
+ *
+ * Returns 0 if successful, error otherwise.
+ */
+int rv_unregister_reactor(struct rv_reactor *reactor)
+{
+ struct rv_reactor_def *ptr, *next;
+ int ret = 0;
+
+ mutex_lock(&rv_interface_lock);
+
+ list_for_each_entry_safe(ptr, next, &rv_reactors_list, list) {
+ if (strcmp(reactor->name, ptr->reactor->name) == 0) {
+
+ if (!ptr->counter) {
+ list_del(&ptr->list);
+ } else {
+ printk(KERN_WARNING
+ "rv: the rv_reactor %s is in use by %d monitor(s)\n",
+ ptr->reactor->name, ptr->counter);
+ printk(KERN_WARNING "rv: the rv_reactor %s cannot be removed\n",
+ ptr->reactor->name);
+ ret = -EBUSY;
+ break;
+ }
+ }
+ }
+
+ mutex_unlock(&rv_interface_lock);
+ return ret;
+}
+
+/*
+ * reacting_on interface.
+ */
+static bool __read_mostly reacting_on;
+
+/**
+ * rv_reacting_on - checks if reacting is on
+ *
+ * Returns 1 if on, 0 otherwise.
+ */
+bool rv_reacting_on(void)
+{
+ /* Ensures that concurrent monitors read consistent reacting_on */
+ smp_rmb();
+ return READ_ONCE(reacting_on);
+}
+
+static ssize_t reacting_on_read_data(struct file *filp,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ char *buff;
+
+ buff = rv_reacting_on() ? "1\n" : "0\n";
+
+ return simple_read_from_buffer(user_buf, count, ppos, buff, strlen(buff)+1);
+}
+
+static void turn_reacting_off(void)
+{
+ WRITE_ONCE(reacting_on, false);
+ /* Ensures that concurrent monitors read consistent reacting_on */
+ smp_wmb();
+}
+
+static void turn_reacting_on(void)
+{
+ WRITE_ONCE(reacting_on, true);
+ /* Ensures that concurrent monitors read consistent reacting_on */
+ smp_wmb();
+}
+
+static ssize_t reacting_on_write_data(struct file *filp, const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ int retval;
+ bool val;
+
+ retval = kstrtobool_from_user(user_buf, count, &val);
+ if (retval)
+ return retval;
+
+ mutex_lock(&rv_interface_lock);
+
+ if (val)
+ turn_reacting_on();
+ else
+ turn_reacting_off();
+
+ /*
+ * Wait for the execution of all events to finish
+ * before returning to user-space.
+ */
+ tracepoint_synchronize_unregister();
+
+ mutex_unlock(&rv_interface_lock);
+
+ return count;
+}
+
+static const struct file_operations reacting_on_fops = {
+ .open = simple_open,
+ .llseek = no_llseek,
+ .write = reacting_on_write_data,
+ .read = reacting_on_read_data,
+};
+
+/**
+ * reactor_populate_monitor - creates per monitor reactors file
+ * @mdef: monitor's definition.
+ *
+ * Returns 0 if successful, error otherwise.
+ */
+int reactor_populate_monitor(struct rv_monitor_def *mdef)
+{
+ struct dentry *tmp;
+
+ tmp = rv_create_file("reactors", RV_MODE_WRITE, mdef->root_d, mdef, &monitor_reactors_ops);
+ if (!tmp)
+ return -ENOMEM;
+
+ /*
+ * Configure as the rv_nop reactor.
+ */
+ mdef->rdef = get_reactor_rdef_by_name("nop");
+ mdef->rdef->counter++;
+ mdef->reacting = false;
+
+ return 0;
+}
+
+/**
+ * reactor_cleanup_monitor - cleanup a monitor reference
+ * @mdef: monitor's definition.
+ */
+void reactor_cleanup_monitor(struct rv_monitor_def *mdef)
+{
+ lockdep_assert_held(&rv_interface_lock);
+ mdef->rdef->counter--;
+ WARN_ON_ONCE(mdef->rdef->counter < 0);
+}
+
+/*
+ * Nop reactor register
+ */
+static void rv_nop_reaction(char *msg)
+{
+}
+
+static struct rv_reactor rv_nop = {
+ .name = "nop",
+ .description = "no-operation reactor: do nothing.",
+ .react = rv_nop_reaction
+};
+
+int init_rv_reactors(struct dentry *root_dir)
+{
+ struct dentry *available, *reacting;
+ int retval;
+
+ available = rv_create_file("available_reactors", RV_MODE_READ, root_dir, NULL,
+ &available_reactors_ops);
+ if (!available)
+ goto out_err;
+
+ reacting = rv_create_file("reacting_on", RV_MODE_WRITE, root_dir, NULL, &reacting_on_fops);
+ if (!reacting)
+ goto rm_available;
+
+ retval = __rv_register_reactor(&rv_nop);
+ if (retval)
+ goto rm_reacting;
+
+ turn_reacting_on();
+
+ return 0;
+
+rm_reacting:
+ rv_remove(reacting);
+rm_available:
+ rv_remove(available);
+out_err:
+ return -ENOMEM;
+}
diff --git a/kernel/trace/synth_event_gen_test.c b/kernel/trace/synth_event_gen_test.c
index 7d56d621ffea..354c2117be43 100644
--- a/kernel/trace/synth_event_gen_test.c
+++ b/kernel/trace/synth_event_gen_test.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Test module for in-kernel sythetic event creation and generation.
+ * Test module for in-kernel synthetic event creation and generation.
*
* Copyright (C) 2019 Tom Zanussi <zanussi@kernel.org>
*/
@@ -22,7 +22,7 @@
* Then:
*
* # insmod kernel/trace/synth_event_gen_test.ko
- * # cat /sys/kernel/debug/tracing/trace
+ * # cat /sys/kernel/tracing/trace
*
* You should see several events in the trace buffer -
* "create_synth_test", "empty_synth_test", and several instances of
@@ -120,15 +120,13 @@ static int __init test_gen_synth_cmd(void)
/* Now generate a gen_synth_test event */
ret = synth_event_trace_array(gen_synth_test, vals, ARRAY_SIZE(vals));
- out:
+ free:
+ kfree(buf);
return ret;
delete:
/* We got an error after creating the event, delete it */
synth_event_delete("gen_synth_test");
- free:
- kfree(buf);
-
- goto out;
+ goto free;
}
/*
@@ -227,24 +225,24 @@ static int __init test_empty_synth_event(void)
/* Now trace an empty_synth_test event */
ret = synth_event_trace_array(empty_synth_test, vals, ARRAY_SIZE(vals));
- out:
+ free:
+ kfree(buf);
return ret;
delete:
/* We got an error after creating the event, delete it */
synth_event_delete("empty_synth_test");
- free:
- kfree(buf);
-
- goto out;
+ goto free;
}
static struct synth_field_desc create_synth_test_fields[] = {
{ .type = "pid_t", .name = "next_pid_field" },
{ .type = "char[16]", .name = "next_comm_field" },
{ .type = "u64", .name = "ts_ns" },
+ { .type = "char[]", .name = "dynstring_field_1" },
{ .type = "u64", .name = "ts_ms" },
{ .type = "unsigned int", .name = "cpu" },
{ .type = "char[64]", .name = "my_string_field" },
+ { .type = "char[]", .name = "dynstring_field_2" },
{ .type = "int", .name = "my_int_field" },
};
@@ -254,7 +252,7 @@ static struct synth_field_desc create_synth_test_fields[] = {
*/
static int __init test_create_synth_event(void)
{
- u64 vals[7];
+ u64 vals[9];
int ret;
/* Create the create_synth_test event with the fields above */
@@ -292,10 +290,12 @@ static int __init test_create_synth_event(void)
vals[0] = 777; /* next_pid_field */
vals[1] = (u64)(long)"tiddlywinks"; /* next_comm_field */
vals[2] = 1000000; /* ts_ns */
- vals[3] = 1000; /* ts_ms */
- vals[4] = raw_smp_processor_id(); /* cpu */
- vals[5] = (u64)(long)"thneed"; /* my_string_field */
- vals[6] = 398; /* my_int_field */
+ vals[3] = (u64)(long)"xrayspecs"; /* dynstring_field_1 */
+ vals[4] = 1000; /* ts_ms */
+ vals[5] = raw_smp_processor_id(); /* cpu */
+ vals[6] = (u64)(long)"thneed"; /* my_string_field */
+ vals[7] = (u64)(long)"kerplunk"; /* dynstring_field_2 */
+ vals[8] = 398; /* my_int_field */
/* Now generate a create_synth_test event */
ret = synth_event_trace_array(create_synth_test, vals, ARRAY_SIZE(vals));
@@ -303,7 +303,7 @@ static int __init test_create_synth_event(void)
return ret;
delete:
/* We got an error after creating the event, delete it */
- ret = synth_event_delete("create_synth_test");
+ synth_event_delete("create_synth_test");
goto out;
}
@@ -422,13 +422,15 @@ static int __init test_trace_synth_event(void)
int ret;
/* Trace some bogus values just for testing */
- ret = synth_event_trace(create_synth_test, 7, /* number of values */
+ ret = synth_event_trace(create_synth_test, 9, /* number of values */
(u64)444, /* next_pid_field */
(u64)(long)"clackers", /* next_comm_field */
(u64)1000000, /* ts_ns */
+ (u64)(long)"viewmaster",/* dynstring_field_1 */
(u64)1000, /* ts_ms */
(u64)raw_smp_processor_id(), /* cpu */
(u64)(long)"Thneed", /* my_string_field */
+ (u64)(long)"yoyos", /* dynstring_field_2 */
(u64)999); /* my_int_field */
return ret;
}
@@ -475,6 +477,17 @@ static int __init synth_event_gen_test_init(void)
ret = test_trace_synth_event();
WARN_ON(ret);
+
+ /* Disable when done */
+ trace_array_set_clr_event(gen_synth_test->tr,
+ "synthetic",
+ "gen_synth_test", false);
+ trace_array_set_clr_event(empty_synth_test->tr,
+ "synthetic",
+ "empty_synth_test", false);
+ trace_array_set_clr_event(create_synth_test->tr,
+ "synthetic",
+ "create_synth_test", false);
out:
return ret;
}
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index bb62269724d5..c9c898307348 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -19,7 +19,6 @@
#include <linux/kallsyms.h>
#include <linux/security.h>
#include <linux/seq_file.h>
-#include <linux/notifier.h>
#include <linux/irqflags.h>
#include <linux/debugfs.h>
#include <linux/tracefs.h>
@@ -39,6 +38,8 @@
#include <linux/slab.h>
#include <linux/ctype.h>
#include <linux/init.h>
+#include <linux/panic_notifier.h>
+#include <linux/kmemleak.h>
#include <linux/poll.h>
#include <linux/nmi.h>
#include <linux/fs.h>
@@ -49,15 +50,12 @@
#include <linux/irq_work.h>
#include <linux/workqueue.h>
+#include <asm/setup.h> /* COMMAND_LINE_SIZE */
+
#include "trace.h"
#include "trace_output.h"
-/*
- * On boot up, the ring buffer is set to the minimum size, so that
- * we do not waste memory on systems that are not using tracing.
- */
-bool ring_buffer_expanded;
-
+#ifdef CONFIG_FTRACE_STARTUP_TEST
/*
* We need to change this state when a selftest is running.
* A selftest will lurk into the ring-buffer to count the
@@ -68,13 +66,27 @@ bool ring_buffer_expanded;
static bool __read_mostly tracing_selftest_running;
/*
- * If a tracer is running, we do not want to run SELFTEST.
+ * If boot-time tracing including tracers/events via kernel cmdline
+ * is running, we do not want to run SELFTEST.
*/
bool __read_mostly tracing_selftest_disabled;
+void __init disable_tracing_selftest(const char *reason)
+{
+ if (!tracing_selftest_disabled) {
+ tracing_selftest_disabled = true;
+ pr_info("Ftrace startup test is disabled due to %s\n", reason);
+ }
+}
+#else
+#define tracing_selftest_running 0
+#define tracing_selftest_disabled 0
+#endif
+
/* Pipe tracepoints to printk */
-struct trace_iterator *tracepoint_print_iter;
+static struct trace_iterator *tracepoint_print_iter;
int tracepoint_printk;
+static bool tracepoint_printk_stop_on_boot __initdata;
static DEFINE_STATIC_KEY_FALSE(tracepoint_printk_key);
/* For tracers that don't implement custom flags */
@@ -163,33 +175,41 @@ static union trace_eval_map_item *trace_eval_maps;
#endif /* CONFIG_TRACE_EVAL_MAP_FILE */
int tracing_set_tracer(struct trace_array *tr, const char *buf);
-static void ftrace_trace_userstack(struct trace_buffer *buffer,
- unsigned long flags, int pc);
+static void ftrace_trace_userstack(struct trace_array *tr,
+ struct trace_buffer *buffer,
+ unsigned int trace_ctx);
#define MAX_TRACER_SIZE 100
static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata;
static char *default_bootup_tracer;
static bool allocate_snapshot;
+static bool snapshot_at_boot;
+
+static char boot_instance_info[COMMAND_LINE_SIZE] __initdata;
+static int boot_instance_index;
+
+static char boot_snapshot_info[COMMAND_LINE_SIZE] __initdata;
+static int boot_snapshot_index;
static int __init set_cmdline_ftrace(char *str)
{
- strlcpy(bootup_tracer_buf, str, MAX_TRACER_SIZE);
+ strscpy(bootup_tracer_buf, str, MAX_TRACER_SIZE);
default_bootup_tracer = bootup_tracer_buf;
/* We are using ftrace early, expand it */
- ring_buffer_expanded = true;
+ trace_set_ring_buffer_expanded(NULL);
return 1;
}
__setup("ftrace=", set_cmdline_ftrace);
static int __init set_ftrace_dump_on_oops(char *str)
{
- if (*str++ != '=' || !*str) {
+ if (*str++ != '=' || !*str || !strcmp("1", str)) {
ftrace_dump_on_oops = DUMP_ALL;
return 1;
}
- if (!strcmp("orig_cpu", str)) {
+ if (!strcmp("orig_cpu", str) || !strcmp("2", str)) {
ftrace_dump_on_oops = DUMP_ORIG;
return 1;
}
@@ -208,20 +228,59 @@ __setup("traceoff_on_warning", stop_trace_on_warning);
static int __init boot_alloc_snapshot(char *str)
{
- allocate_snapshot = true;
- /* We also need the main ring buffer expanded */
- ring_buffer_expanded = true;
+ char *slot = boot_snapshot_info + boot_snapshot_index;
+ int left = sizeof(boot_snapshot_info) - boot_snapshot_index;
+ int ret;
+
+ if (str[0] == '=') {
+ str++;
+ if (strlen(str) >= left)
+ return -1;
+
+ ret = snprintf(slot, left, "%s\t", str);
+ boot_snapshot_index += ret;
+ } else {
+ allocate_snapshot = true;
+ /* We also need the main ring buffer expanded */
+ trace_set_ring_buffer_expanded(NULL);
+ }
return 1;
}
__setup("alloc_snapshot", boot_alloc_snapshot);
+static int __init boot_snapshot(char *str)
+{
+ snapshot_at_boot = true;
+ boot_alloc_snapshot(str);
+ return 1;
+}
+__setup("ftrace_boot_snapshot", boot_snapshot);
+
+
+static int __init boot_instance(char *str)
+{
+ char *slot = boot_instance_info + boot_instance_index;
+ int left = sizeof(boot_instance_info) - boot_instance_index;
+ int ret;
+
+ if (strlen(str) >= left)
+ return -1;
+
+ ret = snprintf(slot, left, "%s\t", str);
+ boot_instance_index += ret;
+
+ return 1;
+}
+__setup("trace_instance=", boot_instance);
+
+
static char trace_boot_options_buf[MAX_TRACER_SIZE] __initdata;
static int __init set_trace_boot_options(char *str)
{
- strlcpy(trace_boot_options_buf, str, MAX_TRACER_SIZE);
- return 0;
+ strscpy(trace_boot_options_buf, str, MAX_TRACER_SIZE);
+ return 1;
}
__setup("trace_options=", set_trace_boot_options);
@@ -230,20 +289,31 @@ static char *trace_boot_clock __initdata;
static int __init set_trace_boot_clock(char *str)
{
- strlcpy(trace_boot_clock_buf, str, MAX_TRACER_SIZE);
+ strscpy(trace_boot_clock_buf, str, MAX_TRACER_SIZE);
trace_boot_clock = trace_boot_clock_buf;
- return 0;
+ return 1;
}
__setup("trace_clock=", set_trace_boot_clock);
static int __init set_tracepoint_printk(char *str)
{
+ /* Ignore the "tp_printk_stop_on_boot" param */
+ if (*str == '_')
+ return 0;
+
if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
tracepoint_printk = 1;
return 1;
}
__setup("tp_printk", set_tracepoint_printk);
+static int __init set_tracepoint_printk_stop(char *str)
+{
+ tracepoint_printk_stop_on_boot = true;
+ return 1;
+}
+__setup("tp_printk_stop_on_boot", set_tracepoint_printk_stop);
+
unsigned long long ns2usecs(u64 nsec)
{
nsec += 500;
@@ -251,13 +321,153 @@ unsigned long long ns2usecs(u64 nsec)
return nsec;
}
+static void
+trace_process_export(struct trace_export *export,
+ struct ring_buffer_event *event, int flag)
+{
+ struct trace_entry *entry;
+ unsigned int size = 0;
+
+ if (export->flags & flag) {
+ entry = ring_buffer_event_data(event);
+ size = ring_buffer_event_length(event);
+ export->write(export, entry, size);
+ }
+}
+
+static DEFINE_MUTEX(ftrace_export_lock);
+
+static struct trace_export __rcu *ftrace_exports_list __read_mostly;
+
+static DEFINE_STATIC_KEY_FALSE(trace_function_exports_enabled);
+static DEFINE_STATIC_KEY_FALSE(trace_event_exports_enabled);
+static DEFINE_STATIC_KEY_FALSE(trace_marker_exports_enabled);
+
+static inline void ftrace_exports_enable(struct trace_export *export)
+{
+ if (export->flags & TRACE_EXPORT_FUNCTION)
+ static_branch_inc(&trace_function_exports_enabled);
+
+ if (export->flags & TRACE_EXPORT_EVENT)
+ static_branch_inc(&trace_event_exports_enabled);
+
+ if (export->flags & TRACE_EXPORT_MARKER)
+ static_branch_inc(&trace_marker_exports_enabled);
+}
+
+static inline void ftrace_exports_disable(struct trace_export *export)
+{
+ if (export->flags & TRACE_EXPORT_FUNCTION)
+ static_branch_dec(&trace_function_exports_enabled);
+
+ if (export->flags & TRACE_EXPORT_EVENT)
+ static_branch_dec(&trace_event_exports_enabled);
+
+ if (export->flags & TRACE_EXPORT_MARKER)
+ static_branch_dec(&trace_marker_exports_enabled);
+}
+
+static void ftrace_exports(struct ring_buffer_event *event, int flag)
+{
+ struct trace_export *export;
+
+ preempt_disable_notrace();
+
+ export = rcu_dereference_raw_check(ftrace_exports_list);
+ while (export) {
+ trace_process_export(export, event, flag);
+ export = rcu_dereference_raw_check(export->next);
+ }
+
+ preempt_enable_notrace();
+}
+
+static inline void
+add_trace_export(struct trace_export **list, struct trace_export *export)
+{
+ rcu_assign_pointer(export->next, *list);
+ /*
+ * We are entering export into the list but another
+ * CPU might be walking that list. We need to make sure
+ * the export->next pointer is valid before another CPU sees
+ * the export pointer included into the list.
+ */
+ rcu_assign_pointer(*list, export);
+}
+
+static inline int
+rm_trace_export(struct trace_export **list, struct trace_export *export)
+{
+ struct trace_export **p;
+
+ for (p = list; *p != NULL; p = &(*p)->next)
+ if (*p == export)
+ break;
+
+ if (*p != export)
+ return -1;
+
+ rcu_assign_pointer(*p, (*p)->next);
+
+ return 0;
+}
+
+static inline void
+add_ftrace_export(struct trace_export **list, struct trace_export *export)
+{
+ ftrace_exports_enable(export);
+
+ add_trace_export(list, export);
+}
+
+static inline int
+rm_ftrace_export(struct trace_export **list, struct trace_export *export)
+{
+ int ret;
+
+ ret = rm_trace_export(list, export);
+ ftrace_exports_disable(export);
+
+ return ret;
+}
+
+int register_ftrace_export(struct trace_export *export)
+{
+ if (WARN_ON_ONCE(!export->write))
+ return -1;
+
+ mutex_lock(&ftrace_export_lock);
+
+ add_ftrace_export(&ftrace_exports_list, export);
+
+ mutex_unlock(&ftrace_export_lock);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(register_ftrace_export);
+
+int unregister_ftrace_export(struct trace_export *export)
+{
+ int ret;
+
+ mutex_lock(&ftrace_export_lock);
+
+ ret = rm_ftrace_export(&ftrace_exports_list, export);
+
+ mutex_unlock(&ftrace_export_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(unregister_ftrace_export);
+
/* trace_flags holds trace_options default values */
#define TRACE_DEFAULT_FLAGS \
(FUNCTION_DEFAULT_FLAGS | \
TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK | \
TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | \
TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE | \
- TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS)
+ TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS | \
+ TRACE_ITER_HASH_PTR)
/* trace_options that are only supported by global_trace */
#define TOP_LEVEL_TRACE_FLAGS (TRACE_ITER_PRINTK | \
@@ -275,6 +485,13 @@ static struct trace_array global_trace = {
.trace_flags = TRACE_DEFAULT_FLAGS,
};
+void trace_set_ring_buffer_expanded(struct trace_array *tr)
+{
+ if (!tr)
+ tr = &global_trace;
+ tr->ring_buffer_expanded = true;
+}
+
LIST_HEAD(ftrace_trace_arrays);
int trace_array_get(struct trace_array *this_tr)
@@ -303,6 +520,7 @@ static void __trace_array_put(struct trace_array *this_tr)
/**
* trace_array_put - Decrement the reference counter for this trace array.
+ * @this_tr : pointer to the trace array
*
* NOTE: Use this when we no longer need the trace array returned by
* trace_array_get_by_name(). This ensures the trace array can be later
@@ -350,35 +568,23 @@ int call_filter_check_discard(struct trace_event_call *call, void *rec,
return 0;
}
-void trace_free_pid_list(struct trace_pid_list *pid_list)
-{
- vfree(pid_list->pids);
- kfree(pid_list);
-}
-
/**
* trace_find_filtered_pid - check if a pid exists in a filtered_pid list
* @filtered_pids: The list of pids to check
* @search_pid: The PID to find in @filtered_pids
*
- * Returns true if @search_pid is fonud in @filtered_pids, and false otherwis.
+ * Returns true if @search_pid is found in @filtered_pids, and false otherwise.
*/
bool
trace_find_filtered_pid(struct trace_pid_list *filtered_pids, pid_t search_pid)
{
- /*
- * If pid_max changed after filtered_pids was created, we
- * by default ignore all pids greater than the previous pid_max.
- */
- if (search_pid >= filtered_pids->pid_max)
- return false;
-
- return test_bit(search_pid, filtered_pids->pids);
+ return trace_pid_list_is_set(filtered_pids, search_pid);
}
/**
* trace_ignore_this_task - should a task be ignored for tracing
* @filtered_pids: The list of pids to check
+ * @filtered_no_pids: The list of pids not to be traced
* @task: The task that should be ignored if not filtered
*
* Checks if @task should be traced or not from @filtered_pids.
@@ -391,7 +597,7 @@ trace_ignore_this_task(struct trace_pid_list *filtered_pids,
struct task_struct *task)
{
/*
- * If filterd_no_pids is not empty, and the task's pid is listed
+ * If filtered_no_pids is not empty, and the task's pid is listed
* in filtered_no_pids, then return true.
* Otherwise, if filtered_pids is empty, that means we can
* trace all tasks. If it has content, then only trace pids
@@ -429,15 +635,11 @@ void trace_filter_add_remove_task(struct trace_pid_list *pid_list,
return;
}
- /* Sorry, but we don't support pid_max changing after setting */
- if (task->pid >= pid_list->pid_max)
- return;
-
/* "self" is set for forks, and NULL for exits */
if (self)
- set_bit(task->pid, pid_list->pids);
+ trace_pid_list_set(pid_list, task->pid);
else
- clear_bit(task->pid, pid_list->pids);
+ trace_pid_list_clear(pid_list, task->pid);
}
/**
@@ -454,18 +656,19 @@ void trace_filter_add_remove_task(struct trace_pid_list *pid_list,
*/
void *trace_pid_next(struct trace_pid_list *pid_list, void *v, loff_t *pos)
{
- unsigned long pid = (unsigned long)v;
+ long pid = (unsigned long)v;
+ unsigned int next;
(*pos)++;
- /* pid already is +1 of the actual prevous bit */
- pid = find_next_bit(pid_list->pids, pid_list->pid_max, pid);
+ /* pid already is +1 of the actual previous bit */
+ if (trace_pid_list_next(pid_list, pid, &next) < 0)
+ return NULL;
- /* Return pid + 1 to allow zero to be represented */
- if (pid < pid_list->pid_max)
- return (void *)(pid + 1);
+ pid = next;
- return NULL;
+ /* Return pid + 1 to allow zero to be represented */
+ return (void *)(pid + 1);
}
/**
@@ -482,12 +685,14 @@ void *trace_pid_next(struct trace_pid_list *pid_list, void *v, loff_t *pos)
void *trace_pid_start(struct trace_pid_list *pid_list, loff_t *pos)
{
unsigned long pid;
+ unsigned int first;
loff_t l = 0;
- pid = find_first_bit(pid_list->pids, pid_list->pid_max);
- if (pid >= pid_list->pid_max)
+ if (trace_pid_list_first(pid_list, &first) < 0)
return NULL;
+ pid = first;
+
/* Return pid + 1 so that zero can be the exit value */
for (pid++; pid && l < *pos;
pid = (unsigned long)trace_pid_next(pid_list, (void *)pid, &l))
@@ -523,7 +728,7 @@ int trace_pid_write(struct trace_pid_list *filtered_pids,
unsigned long val;
int nr_pids = 0;
ssize_t read = 0;
- ssize_t ret = 0;
+ ssize_t ret;
loff_t pos;
pid_t pid;
@@ -536,55 +741,48 @@ int trace_pid_write(struct trace_pid_list *filtered_pids,
* the user. If the operation fails, then the current list is
* not modified.
*/
- pid_list = kmalloc(sizeof(*pid_list), GFP_KERNEL);
+ pid_list = trace_pid_list_alloc();
if (!pid_list) {
trace_parser_put(&parser);
return -ENOMEM;
}
- pid_list->pid_max = READ_ONCE(pid_max);
-
- /* Only truncating will shrink pid_max */
- if (filtered_pids && filtered_pids->pid_max > pid_list->pid_max)
- pid_list->pid_max = filtered_pids->pid_max;
-
- pid_list->pids = vzalloc((pid_list->pid_max + 7) >> 3);
- if (!pid_list->pids) {
- trace_parser_put(&parser);
- kfree(pid_list);
- return -ENOMEM;
- }
-
if (filtered_pids) {
/* copy the current bits to the new max */
- for_each_set_bit(pid, filtered_pids->pids,
- filtered_pids->pid_max) {
- set_bit(pid, pid_list->pids);
+ ret = trace_pid_list_first(filtered_pids, &pid);
+ while (!ret) {
+ trace_pid_list_set(pid_list, pid);
+ ret = trace_pid_list_next(filtered_pids, pid + 1, &pid);
nr_pids++;
}
}
+ ret = 0;
while (cnt > 0) {
pos = 0;
ret = trace_get_user(&parser, ubuf, cnt, &pos);
- if (ret < 0 || !trace_parser_loaded(&parser))
+ if (ret < 0)
break;
read += ret;
ubuf += ret;
cnt -= ret;
+ if (!trace_parser_loaded(&parser))
+ break;
+
ret = -EINVAL;
if (kstrtoul(parser.buffer, 0, &val))
break;
- if (val >= pid_list->pid_max)
- break;
pid = (pid_t)val;
- set_bit(pid, pid_list->pids);
+ if (trace_pid_list_set(pid_list, pid) < 0) {
+ ret = -1;
+ break;
+ }
nr_pids++;
trace_parser_clear(&parser);
@@ -593,14 +791,13 @@ int trace_pid_write(struct trace_pid_list *filtered_pids,
trace_parser_put(&parser);
if (ret < 0) {
- trace_free_pid_list(pid_list);
+ trace_pid_list_free(pid_list);
return ret;
}
if (!nr_pids) {
/* Cleared the list of pids */
- trace_free_pid_list(pid_list);
- read = ret;
+ trace_pid_list_free(pid_list);
pid_list = NULL;
}
@@ -617,7 +814,7 @@ static u64 buffer_ftrace_now(struct array_buffer *buf, int cpu)
if (!buf->buffer)
return trace_clock_local();
- ts = ring_buffer_time_stamp(buf->buffer, cpu);
+ ts = ring_buffer_time_stamp(buf->buffer);
ring_buffer_normalize_time_stamp(buf->buffer, cpu, &ts);
return ts;
@@ -629,7 +826,7 @@ u64 ftrace_now(int cpu)
}
/**
- * tracing_is_enabled - Show if global_trace has been disabled
+ * tracing_is_enabled - Show if global_trace has been enabled
*
* Shows if the global trace has been enabled or not. It uses the
* mirror flag "buffer_disabled" to be used in fast paths such as for
@@ -680,7 +877,7 @@ DEFINE_MUTEX(trace_types_lock);
* The content of events may become garbage if we allow other process consumes
* these events concurrently:
* A) the page of the consumed events may become a normal page
- * (not reader page) in ring buffer, and this page will be rewrited
+ * (not reader page) in ring buffer, and this page will be rewritten
* by events producer.
* B) The page of the consumed events may become a page for splice_read,
* and this page will be returned to system.
@@ -754,23 +951,23 @@ static inline void trace_access_lock_init(void)
#ifdef CONFIG_STACKTRACE
static void __ftrace_trace_stack(struct trace_buffer *buffer,
- unsigned long flags,
- int skip, int pc, struct pt_regs *regs);
+ unsigned int trace_ctx,
+ int skip, struct pt_regs *regs);
static inline void ftrace_trace_stack(struct trace_array *tr,
struct trace_buffer *buffer,
- unsigned long flags,
- int skip, int pc, struct pt_regs *regs);
+ unsigned int trace_ctx,
+ int skip, struct pt_regs *regs);
#else
static inline void __ftrace_trace_stack(struct trace_buffer *buffer,
- unsigned long flags,
- int skip, int pc, struct pt_regs *regs)
+ unsigned int trace_ctx,
+ int skip, struct pt_regs *regs)
{
}
static inline void ftrace_trace_stack(struct trace_array *tr,
struct trace_buffer *buffer,
- unsigned long flags,
- int skip, int pc, struct pt_regs *regs)
+ unsigned long trace_ctx,
+ int skip, struct pt_regs *regs)
{
}
@@ -778,24 +975,24 @@ static inline void ftrace_trace_stack(struct trace_array *tr,
static __always_inline void
trace_event_setup(struct ring_buffer_event *event,
- int type, unsigned long flags, int pc)
+ int type, unsigned int trace_ctx)
{
struct trace_entry *ent = ring_buffer_event_data(event);
- tracing_generic_entry_update(ent, type, flags, pc);
+ tracing_generic_entry_update(ent, type, trace_ctx);
}
static __always_inline struct ring_buffer_event *
__trace_buffer_lock_reserve(struct trace_buffer *buffer,
int type,
unsigned long len,
- unsigned long flags, int pc)
+ unsigned int trace_ctx)
{
struct ring_buffer_event *event;
event = ring_buffer_lock_reserve(buffer, len);
if (event != NULL)
- trace_event_setup(event, type, flags, pc);
+ trace_event_setup(event, type, trace_ctx);
return event;
}
@@ -841,40 +1038,37 @@ __buffer_unlock_commit(struct trace_buffer *buffer, struct ring_buffer_event *ev
ring_buffer_write(buffer, event->array[0], &event->array[1]);
/* Release the temp buffer */
this_cpu_dec(trace_buffered_event_cnt);
+ /* ring_buffer_unlock_commit() enables preemption */
+ preempt_enable_notrace();
} else
- ring_buffer_unlock_commit(buffer, event);
+ ring_buffer_unlock_commit(buffer);
}
-/**
- * __trace_puts - write a constant string into the trace buffer.
- * @ip: The address of the caller
- * @str: The constant string to write
- * @size: The size of the string.
- */
-int __trace_puts(unsigned long ip, const char *str, int size)
+int __trace_array_puts(struct trace_array *tr, unsigned long ip,
+ const char *str, int size)
{
struct ring_buffer_event *event;
struct trace_buffer *buffer;
struct print_entry *entry;
- unsigned long irq_flags;
+ unsigned int trace_ctx;
int alloc;
- int pc;
- if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
+ if (!(tr->trace_flags & TRACE_ITER_PRINTK))
return 0;
- pc = preempt_count();
+ if (unlikely(tracing_selftest_running && tr == &global_trace))
+ return 0;
- if (unlikely(tracing_selftest_running || tracing_disabled))
+ if (unlikely(tracing_disabled))
return 0;
alloc = sizeof(*entry) + size + 2; /* possible \n added */
- local_save_flags(irq_flags);
- buffer = global_trace.array_buffer.buffer;
+ trace_ctx = tracing_gen_ctx();
+ buffer = tr->array_buffer.buffer;
ring_buffer_nest_start(buffer);
- event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc,
- irq_flags, pc);
+ event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc,
+ trace_ctx);
if (!event) {
size = 0;
goto out;
@@ -893,11 +1087,23 @@ int __trace_puts(unsigned long ip, const char *str, int size)
entry->buf[size] = '\0';
__buffer_unlock_commit(buffer, event);
- ftrace_trace_stack(&global_trace, buffer, irq_flags, 4, pc, NULL);
+ ftrace_trace_stack(tr, buffer, trace_ctx, 4, NULL);
out:
ring_buffer_nest_end(buffer);
return size;
}
+EXPORT_SYMBOL_GPL(__trace_array_puts);
+
+/**
+ * __trace_puts - write a constant string into the trace buffer.
+ * @ip: The address of the caller
+ * @str: The constant string to write
+ * @size: The size of the string.
+ */
+int __trace_puts(unsigned long ip, const char *str, int size)
+{
+ return __trace_array_puts(&global_trace, ip, str, size);
+}
EXPORT_SYMBOL_GPL(__trace_puts);
/**
@@ -910,25 +1116,22 @@ int __trace_bputs(unsigned long ip, const char *str)
struct ring_buffer_event *event;
struct trace_buffer *buffer;
struct bputs_entry *entry;
- unsigned long irq_flags;
+ unsigned int trace_ctx;
int size = sizeof(struct bputs_entry);
int ret = 0;
- int pc;
if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
return 0;
- pc = preempt_count();
-
if (unlikely(tracing_selftest_running || tracing_disabled))
return 0;
- local_save_flags(irq_flags);
+ trace_ctx = tracing_gen_ctx();
buffer = global_trace.array_buffer.buffer;
ring_buffer_nest_start(buffer);
event = __trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size,
- irq_flags, pc);
+ trace_ctx);
if (!event)
goto out;
@@ -937,7 +1140,7 @@ int __trace_bputs(unsigned long ip, const char *str)
entry->str = str;
__buffer_unlock_commit(buffer, event);
- ftrace_trace_stack(&global_trace, buffer, irq_flags, 4, pc, NULL);
+ ftrace_trace_stack(&global_trace, buffer, trace_ctx, 4, NULL);
ret = 1;
out:
@@ -954,22 +1157,22 @@ static void tracing_snapshot_instance_cond(struct trace_array *tr,
unsigned long flags;
if (in_nmi()) {
- internal_trace_puts("*** SNAPSHOT CALLED FROM NMI CONTEXT ***\n");
- internal_trace_puts("*** snapshot is being ignored ***\n");
+ trace_array_puts(tr, "*** SNAPSHOT CALLED FROM NMI CONTEXT ***\n");
+ trace_array_puts(tr, "*** snapshot is being ignored ***\n");
return;
}
if (!tr->allocated_snapshot) {
- internal_trace_puts("*** SNAPSHOT NOT ALLOCATED ***\n");
- internal_trace_puts("*** stopping trace here! ***\n");
- tracing_off();
+ trace_array_puts(tr, "*** SNAPSHOT NOT ALLOCATED ***\n");
+ trace_array_puts(tr, "*** stopping trace here! ***\n");
+ tracer_tracing_off(tr);
return;
}
/* Note, snapshot can not be used when the tracer uses it */
if (tracer->use_max_tr) {
- internal_trace_puts("*** LATENCY TRACER ACTIVE ***\n");
- internal_trace_puts("*** Can not use snapshot (sorry) ***\n");
+ trace_array_puts(tr, "*** LATENCY TRACER ACTIVE ***\n");
+ trace_array_puts(tr, "*** Can not use snapshot (sorry) ***\n");
return;
}
@@ -992,7 +1195,7 @@ void tracing_snapshot_instance(struct trace_array *tr)
*
* Note, make sure to allocate the snapshot with either
* a tracing_snapshot_alloc(), or by doing it manually
- * with: echo 1 > /sys/kernel/debug/tracing/snapshot
+ * with: echo 1 > /sys/kernel/tracing/snapshot
*
* If the snapshot buffer is not allocated, it will stop tracing.
* Basically making a permanent snapshot.
@@ -1025,7 +1228,7 @@ void tracing_snapshot_cond(struct trace_array *tr, void *cond_data)
EXPORT_SYMBOL_GPL(tracing_snapshot_cond);
/**
- * tracing_snapshot_cond_data - get the user data associated with a snapshot
+ * tracing_cond_snapshot_data - get the user data associated with a snapshot
* @tr: The tracing instance
*
* When the user enables a conditional snapshot using
@@ -1042,12 +1245,14 @@ void *tracing_cond_snapshot_data(struct trace_array *tr)
{
void *cond_data = NULL;
+ local_irq_disable();
arch_spin_lock(&tr->max_lock);
if (tr->cond_snapshot)
cond_data = tr->cond_snapshot->cond_data;
arch_spin_unlock(&tr->max_lock);
+ local_irq_enable();
return cond_data;
}
@@ -1059,10 +1264,17 @@ static void set_buffer_entries(struct array_buffer *buf, unsigned long val);
int tracing_alloc_snapshot_instance(struct trace_array *tr)
{
+ int order;
int ret;
if (!tr->allocated_snapshot) {
+ /* Make the snapshot buffer have the same order as main buffer */
+ order = ring_buffer_subbuf_order_get(tr->array_buffer.buffer);
+ ret = ring_buffer_subbuf_order_set(tr->max_buffer.buffer, order);
+ if (ret < 0)
+ return ret;
+
/* allocate spare buffer */
ret = resize_buffer_duplicate_size(&tr->max_buffer,
&tr->array_buffer, RING_BUFFER_ALL_CPUS);
@@ -1082,6 +1294,7 @@ static void free_snapshot(struct trace_array *tr)
* The max_tr ring buffer has some state (e.g. ring->clock) and
* we want preserve it.
*/
+ ring_buffer_subbuf_order_set(tr->max_buffer.buffer, 0);
ring_buffer_resize(tr->max_buffer.buffer, 1, RING_BUFFER_ALL_CPUS);
set_buffer_entries(&tr->max_buffer, 1);
tracing_reset_online_cpus(&tr->max_buffer);
@@ -1183,9 +1396,11 @@ int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data,
goto fail_unlock;
}
+ local_irq_disable();
arch_spin_lock(&tr->max_lock);
tr->cond_snapshot = cond_snapshot;
arch_spin_unlock(&tr->max_lock);
+ local_irq_enable();
mutex_unlock(&trace_types_lock);
@@ -1212,6 +1427,7 @@ int tracing_snapshot_cond_disable(struct trace_array *tr)
{
int ret = 0;
+ local_irq_disable();
arch_spin_lock(&tr->max_lock);
if (!tr->cond_snapshot)
@@ -1222,6 +1438,7 @@ int tracing_snapshot_cond_disable(struct trace_array *tr)
}
arch_spin_unlock(&tr->max_lock);
+ local_irq_enable();
return ret;
}
@@ -1264,6 +1481,7 @@ int tracing_snapshot_cond_disable(struct trace_array *tr)
return false;
}
EXPORT_SYMBOL_GPL(tracing_snapshot_cond_disable);
+#define free_snapshot(tr) do { } while (0)
#endif /* CONFIG_TRACER_SNAPSHOT */
void tracer_tracing_off(struct trace_array *tr)
@@ -1315,7 +1533,7 @@ void disable_trace_on_warning(void)
bool tracer_tracing_is_on(struct trace_array *tr)
{
if (tr->array_buffer.buffer)
- return ring_buffer_record_is_on(tr->array_buffer.buffer);
+ return ring_buffer_record_is_set_on(tr->array_buffer.buffer);
return !tr->buffer_disabled;
}
@@ -1335,10 +1553,12 @@ static int __init set_buf_size(char *str)
if (!str)
return 0;
buf_size = memparse(str, &str);
- /* nr_entries can not be zero */
- if (buf_size == 0)
- return 0;
- trace_buf_size = buf_size;
+ /*
+ * nr_entries can not be zero and the startup
+ * tests require some buffer space. Therefore
+ * ensure we have at least 4096 bytes of buffer.
+ */
+ trace_buf_size = max(4096UL, buf_size);
return 1;
}
__setup("trace_buf_size=", set_buf_size);
@@ -1372,7 +1592,7 @@ unsigned long nsecs_to_usecs(unsigned long nsecs)
#undef C
#define C(a, b) b
-/* These must match the bit postions in trace_iterator_flags */
+/* These must match the bit positions in trace_iterator_flags */
static const char *trace_options[] = {
TRACE_FLAGS
NULL
@@ -1391,6 +1611,7 @@ static struct {
{ ktime_get_mono_fast_ns, "mono", 1 },
{ ktime_get_raw_fast_ns, "mono_raw", 1 },
{ ktime_get_boot_fast_ns, "boot", 1 },
+ { ktime_get_tai_fast_ns, "tai", 1 },
ARCH_TRACE_CLOCKS
};
@@ -1519,23 +1740,24 @@ static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
{
int len;
- if (trace_seq_used(s) <= s->seq.readpos)
+ if (trace_seq_used(s) <= s->readpos)
return -EBUSY;
- len = trace_seq_used(s) - s->seq.readpos;
+ len = trace_seq_used(s) - s->readpos;
if (cnt > len)
cnt = len;
- memcpy(buf, s->buffer + s->seq.readpos, cnt);
+ memcpy(buf, s->buffer + s->readpos, cnt);
- s->seq.readpos += cnt;
+ s->readpos += cnt;
return cnt;
}
unsigned long __read_mostly tracing_thresh;
+
+#ifdef CONFIG_TRACER_MAX_TRACE
static const struct file_operations tracing_max_lat_fops;
-#if (defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)) && \
- defined(CONFIG_FSNOTIFY)
+#ifdef LATENCY_FS_NOTIFY
static struct workqueue_struct *fsnotify_wq;
@@ -1543,8 +1765,7 @@ static void latency_fsnotify_workfn(struct work_struct *work)
{
struct trace_array *tr = container_of(work, struct trace_array,
fsnotify_work);
- fsnotify(tr->d_max_latency->d_inode, FS_MODIFY,
- tr->d_max_latency->d_inode, FSNOTIFY_EVENT_INODE, NULL, 0);
+ fsnotify_inode(tr->d_max_latency->d_inode, FS_MODIFY);
}
static void latency_fsnotify_workfn_irq(struct irq_work *iwork)
@@ -1559,8 +1780,9 @@ static void trace_create_maxlat_file(struct trace_array *tr,
{
INIT_WORK(&tr->fsnotify_work, latency_fsnotify_workfn);
init_irq_work(&tr->fsnotify_irqwork, latency_fsnotify_workfn_irq);
- tr->d_max_latency = trace_create_file("tracing_max_latency", 0644,
- d_tracer, &tr->max_latency,
+ tr->d_max_latency = trace_create_file("tracing_max_latency",
+ TRACE_MODE_WRITE,
+ d_tracer, tr,
&tracing_max_lat_fops);
}
@@ -1589,19 +1811,14 @@ void latency_fsnotify(struct trace_array *tr)
irq_work_queue(&tr->fsnotify_irqwork);
}
-/*
- * (defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)) && \
- * defined(CONFIG_FSNOTIFY)
- */
-#else
+#else /* !LATENCY_FS_NOTIFY */
#define trace_create_maxlat_file(tr, d_tracer) \
- trace_create_file("tracing_max_latency", 0644, d_tracer, \
- &tr->max_latency, &tracing_max_lat_fops)
+ trace_create_file("tracing_max_latency", TRACE_MODE_WRITE, \
+ d_tracer, tr, &tracing_max_lat_fops)
#endif
-#ifdef CONFIG_TRACER_MAX_TRACE
/*
* Copy the new maximum trace into the separate maximum-trace
* structure. (this way the maximum trace is permanently saved,
@@ -1676,15 +1893,19 @@ update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu,
ring_buffer_record_off(tr->max_buffer.buffer);
#ifdef CONFIG_TRACER_SNAPSHOT
- if (tr->cond_snapshot && !tr->cond_snapshot->update(tr, cond_data))
- goto out_unlock;
+ if (tr->cond_snapshot && !tr->cond_snapshot->update(tr, cond_data)) {
+ arch_spin_unlock(&tr->max_lock);
+ return;
+ }
#endif
swap(tr->array_buffer.buffer, tr->max_buffer.buffer);
__update_max_tr(tr, tsk, cpu);
- out_unlock:
arch_spin_unlock(&tr->max_lock);
+
+ /* Any waiters on the old snapshot buffer need to wake up */
+ ring_buffer_wake_waiters(tr->array_buffer.buffer, RING_BUFFER_ALL_CPUS);
}
/**
@@ -1720,9 +1941,10 @@ update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
* place on this CPU. We fail to record, but we reset
* the max trace buffer (no one writes directly to it)
* and flag that it failed.
+ * Another reason is resize is in progress.
*/
trace_array_printk_buf(tr->max_buffer.buffer, _THIS_IP_,
- "Failed to swap buffers due to commit in progress\n");
+ "Failed to swap buffers due to commit or resize in progress\n");
}
WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY);
@@ -1730,16 +1952,28 @@ update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
__update_max_tr(tr, tsk, cpu);
arch_spin_unlock(&tr->max_lock);
}
+
#endif /* CONFIG_TRACER_MAX_TRACE */
static int wait_on_pipe(struct trace_iterator *iter, int full)
{
+ int ret;
+
/* Iterators are static, they should be filled or empty */
if (trace_buffer_iter(iter, iter->cpu_file))
return 0;
- return ring_buffer_wait(iter->array_buffer->buffer, iter->cpu_file,
- full);
+ ret = ring_buffer_wait(iter->array_buffer->buffer, iter->cpu_file, full);
+
+#ifdef CONFIG_TRACER_MAX_TRACE
+ /*
+ * Make sure this is still the snapshot buffer, as if a snapshot were
+ * to happen, this would now be the main buffer.
+ */
+ if (iter->snapshot)
+ iter->array_buffer = &iter->tr->max_buffer;
+#endif
+ return ret;
}
#ifdef CONFIG_FTRACE_STARTUP_TEST
@@ -1782,6 +2016,12 @@ static int run_tracer_selftest(struct tracer *type)
if (!selftests_can_run)
return save_selftest(type);
+ if (!tracing_is_on()) {
+ pr_warn("Selftest for tracer %s skipped due to tracing disabled\n",
+ type->name);
+ return 0;
+ }
+
/*
* Run a selftest on this tracer.
* Here we reset the trace buffer, and set the current
@@ -1796,7 +2036,7 @@ static int run_tracer_selftest(struct tracer *type)
#ifdef CONFIG_TRACER_MAX_TRACE
if (type->use_max_tr) {
/* If we expanded the buffers, make sure the max is expanded too */
- if (ring_buffer_expanded)
+ if (tr->ring_buffer_expanded)
ring_buffer_resize(tr->max_buffer.buffer, trace_buf_size,
RING_BUFFER_ALL_CPUS);
tr->allocated_snapshot = true;
@@ -1822,7 +2062,7 @@ static int run_tracer_selftest(struct tracer *type)
tr->allocated_snapshot = false;
/* Shrink the max buffer again */
- if (ring_buffer_expanded)
+ if (tr->ring_buffer_expanded)
ring_buffer_resize(tr->max_buffer.buffer, 1,
RING_BUFFER_ALL_CPUS);
}
@@ -1832,6 +2072,24 @@ static int run_tracer_selftest(struct tracer *type)
return 0;
}
+static int do_run_tracer_selftest(struct tracer *type)
+{
+ int ret;
+
+ /*
+ * Tests can take a long time, especially if they are run one after the
+ * other, as does happen during bootup when all the tracers are
+ * registered. This could cause the soft lockup watchdog to trigger.
+ */
+ cond_resched();
+
+ tracing_selftest_running = true;
+ ret = run_tracer_selftest(type);
+ tracing_selftest_running = false;
+
+ return ret;
+}
+
static __init int init_trace_selftests(void)
{
struct trace_selftests *p, *n;
@@ -1883,6 +2141,10 @@ static inline int run_tracer_selftest(struct tracer *type)
{
return 0;
}
+static inline int do_run_tracer_selftest(struct tracer *type)
+{
+ return 0;
+}
#endif /* CONFIG_FTRACE_STARTUP_TEST */
static void add_tracer_options(struct trace_array *tr, struct tracer *t);
@@ -1918,8 +2180,6 @@ int __init register_tracer(struct tracer *type)
mutex_lock(&trace_types_lock);
- tracing_selftest_running = true;
-
for (t = trace_types; t; t = t->next) {
if (strcmp(type->name, t->name) == 0) {
/* already found */
@@ -1948,7 +2208,7 @@ int __init register_tracer(struct tracer *type)
/* store the tracer for __set_tracer_option */
type->flags->trace = type;
- ret = run_tracer_selftest(type);
+ ret = do_run_tracer_selftest(type);
if (ret < 0)
goto out;
@@ -1957,7 +2217,6 @@ int __init register_tracer(struct tracer *type)
add_tracer_options(&global_trace, type);
out:
- tracing_selftest_running = false;
mutex_unlock(&trace_types_lock);
if (ret || !default_bootup_tracer)
@@ -1974,11 +2233,7 @@ int __init register_tracer(struct tracer *type)
apply_trace_boot_options();
/* disable other selftests, since this will break it. */
- tracing_selftest_disabled = true;
-#ifdef CONFIG_FTRACE_STARTUP_TEST
- printk(KERN_INFO "Disabling FTRACE selftests due to running tracer '%s'\n",
- type->name);
-#endif
+ disable_tracing_selftest("running a tracer");
out_unlock:
return ret;
@@ -2003,7 +2258,6 @@ static void tracing_reset_cpu(struct array_buffer *buf, int cpu)
void tracing_reset_online_cpus(struct array_buffer *buf)
{
struct trace_buffer *buffer = buf->buffer;
- int cpu;
if (!buffer)
return;
@@ -2015,17 +2269,18 @@ void tracing_reset_online_cpus(struct array_buffer *buf)
buf->time_start = buffer_ftrace_now(buf, buf->cpu);
- for_each_online_cpu(cpu)
- ring_buffer_reset_cpu(buffer, cpu);
+ ring_buffer_reset_online_cpus(buffer);
ring_buffer_record_enable(buffer);
}
/* Must have trace_types_lock held */
-void tracing_reset_all_online_cpus(void)
+void tracing_reset_all_online_cpus_unlocked(void)
{
struct trace_array *tr;
+ lockdep_assert_held(&trace_types_lock);
+
list_for_each_entry(tr, &ftrace_trace_arrays, list) {
if (!tr->clear_trace)
continue;
@@ -2037,23 +2292,39 @@ void tracing_reset_all_online_cpus(void)
}
}
+void tracing_reset_all_online_cpus(void)
+{
+ mutex_lock(&trace_types_lock);
+ tracing_reset_all_online_cpus_unlocked();
+ mutex_unlock(&trace_types_lock);
+}
+
+/*
+ * The tgid_map array maps from pid to tgid; i.e. the value stored at index i
+ * is the tgid last observed corresponding to pid=i.
+ */
static int *tgid_map;
+/* The maximum valid index into tgid_map. */
+static size_t tgid_map_max;
+
#define SAVED_CMDLINES_DEFAULT 128
#define NO_CMDLINE_MAP UINT_MAX
+/*
+ * Preemption must be disabled before acquiring trace_cmdline_lock.
+ * The various trace_arrays' max_lock must be acquired in a context
+ * where interrupt is disabled.
+ */
static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED;
struct saved_cmdlines_buffer {
unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
unsigned *map_cmdline_to_pid;
unsigned cmdline_num;
int cmdline_idx;
- char *saved_cmdlines;
+ char saved_cmdlines[];
};
static struct saved_cmdlines_buffer *savedcmd;
-/* temporary disable recording */
-static atomic_t trace_record_taskinfo_disabled __read_mostly;
-
static inline char *get_saved_cmdlines(int idx)
{
return &savedcmd->saved_cmdlines[idx * TASK_COMM_LEN];
@@ -2064,47 +2335,60 @@ static inline void set_cmdline(int idx, const char *cmdline)
strncpy(get_saved_cmdlines(idx), cmdline, TASK_COMM_LEN);
}
-static int allocate_cmdlines_buffer(unsigned int val,
- struct saved_cmdlines_buffer *s)
+static void free_saved_cmdlines_buffer(struct saved_cmdlines_buffer *s)
{
+ int order = get_order(sizeof(*s) + s->cmdline_num * TASK_COMM_LEN);
+
+ kfree(s->map_cmdline_to_pid);
+ kmemleak_free(s);
+ free_pages((unsigned long)s, order);
+}
+
+static struct saved_cmdlines_buffer *allocate_cmdlines_buffer(unsigned int val)
+{
+ struct saved_cmdlines_buffer *s;
+ struct page *page;
+ int orig_size, size;
+ int order;
+
+ /* Figure out how much is needed to hold the given number of cmdlines */
+ orig_size = sizeof(*s) + val * TASK_COMM_LEN;
+ order = get_order(orig_size);
+ size = 1 << (order + PAGE_SHIFT);
+ page = alloc_pages(GFP_KERNEL, order);
+ if (!page)
+ return NULL;
+
+ s = page_address(page);
+ kmemleak_alloc(s, size, 1, GFP_KERNEL);
+ memset(s, 0, sizeof(*s));
+
+ /* Round up to actual allocation */
+ val = (size - sizeof(*s)) / TASK_COMM_LEN;
+ s->cmdline_num = val;
+
s->map_cmdline_to_pid = kmalloc_array(val,
sizeof(*s->map_cmdline_to_pid),
GFP_KERNEL);
- if (!s->map_cmdline_to_pid)
- return -ENOMEM;
-
- s->saved_cmdlines = kmalloc_array(TASK_COMM_LEN, val, GFP_KERNEL);
- if (!s->saved_cmdlines) {
- kfree(s->map_cmdline_to_pid);
- return -ENOMEM;
+ if (!s->map_cmdline_to_pid) {
+ free_saved_cmdlines_buffer(s);
+ return NULL;
}
s->cmdline_idx = 0;
- s->cmdline_num = val;
memset(&s->map_pid_to_cmdline, NO_CMDLINE_MAP,
sizeof(s->map_pid_to_cmdline));
memset(s->map_cmdline_to_pid, NO_CMDLINE_MAP,
val * sizeof(*s->map_cmdline_to_pid));
- return 0;
+ return s;
}
static int trace_create_savedcmd(void)
{
- int ret;
-
- savedcmd = kmalloc(sizeof(*savedcmd), GFP_KERNEL);
- if (!savedcmd)
- return -ENOMEM;
-
- ret = allocate_cmdlines_buffer(SAVED_CMDLINES_DEFAULT, savedcmd);
- if (ret < 0) {
- kfree(savedcmd);
- savedcmd = NULL;
- return -ENOMEM;
- }
+ savedcmd = allocate_cmdlines_buffer(SAVED_CMDLINES_DEFAULT);
- return 0;
+ return savedcmd ? 0 : -ENOMEM;
}
int is_tracing_stopped(void)
@@ -2112,13 +2396,7 @@ int is_tracing_stopped(void)
return global_trace.stop_count;
}
-/**
- * tracing_start - quick start of the tracer
- *
- * If tracing is enabled but was stopped by tracing_stop,
- * this will start the tracer back up.
- */
-void tracing_start(void)
+static void tracing_start_tr(struct trace_array *tr)
{
struct trace_buffer *buffer;
unsigned long flags;
@@ -2126,161 +2404,117 @@ void tracing_start(void)
if (tracing_disabled)
return;
- raw_spin_lock_irqsave(&global_trace.start_lock, flags);
- if (--global_trace.stop_count) {
- if (global_trace.stop_count < 0) {
+ raw_spin_lock_irqsave(&tr->start_lock, flags);
+ if (--tr->stop_count) {
+ if (WARN_ON_ONCE(tr->stop_count < 0)) {
/* Someone screwed up their debugging */
- WARN_ON_ONCE(1);
- global_trace.stop_count = 0;
+ tr->stop_count = 0;
}
goto out;
}
/* Prevent the buffers from switching */
- arch_spin_lock(&global_trace.max_lock);
+ arch_spin_lock(&tr->max_lock);
- buffer = global_trace.array_buffer.buffer;
+ buffer = tr->array_buffer.buffer;
if (buffer)
ring_buffer_record_enable(buffer);
#ifdef CONFIG_TRACER_MAX_TRACE
- buffer = global_trace.max_buffer.buffer;
+ buffer = tr->max_buffer.buffer;
if (buffer)
ring_buffer_record_enable(buffer);
#endif
- arch_spin_unlock(&global_trace.max_lock);
-
- out:
- raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
-}
-
-static void tracing_start_tr(struct trace_array *tr)
-{
- struct trace_buffer *buffer;
- unsigned long flags;
-
- if (tracing_disabled)
- return;
-
- /* If global, we need to also start the max tracer */
- if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
- return tracing_start();
-
- raw_spin_lock_irqsave(&tr->start_lock, flags);
-
- if (--tr->stop_count) {
- if (tr->stop_count < 0) {
- /* Someone screwed up their debugging */
- WARN_ON_ONCE(1);
- tr->stop_count = 0;
- }
- goto out;
- }
-
- buffer = tr->array_buffer.buffer;
- if (buffer)
- ring_buffer_record_enable(buffer);
+ arch_spin_unlock(&tr->max_lock);
out:
raw_spin_unlock_irqrestore(&tr->start_lock, flags);
}
/**
- * tracing_stop - quick stop of the tracer
+ * tracing_start - quick start of the tracer
*
- * Light weight way to stop tracing. Use in conjunction with
- * tracing_start.
+ * If tracing is enabled but was stopped by tracing_stop,
+ * this will start the tracer back up.
*/
-void tracing_stop(void)
+void tracing_start(void)
+
+{
+ return tracing_start_tr(&global_trace);
+}
+
+static void tracing_stop_tr(struct trace_array *tr)
{
struct trace_buffer *buffer;
unsigned long flags;
- raw_spin_lock_irqsave(&global_trace.start_lock, flags);
- if (global_trace.stop_count++)
+ raw_spin_lock_irqsave(&tr->start_lock, flags);
+ if (tr->stop_count++)
goto out;
/* Prevent the buffers from switching */
- arch_spin_lock(&global_trace.max_lock);
+ arch_spin_lock(&tr->max_lock);
- buffer = global_trace.array_buffer.buffer;
+ buffer = tr->array_buffer.buffer;
if (buffer)
ring_buffer_record_disable(buffer);
#ifdef CONFIG_TRACER_MAX_TRACE
- buffer = global_trace.max_buffer.buffer;
+ buffer = tr->max_buffer.buffer;
if (buffer)
ring_buffer_record_disable(buffer);
#endif
- arch_spin_unlock(&global_trace.max_lock);
+ arch_spin_unlock(&tr->max_lock);
out:
- raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
+ raw_spin_unlock_irqrestore(&tr->start_lock, flags);
}
-static void tracing_stop_tr(struct trace_array *tr)
+/**
+ * tracing_stop - quick stop of the tracer
+ *
+ * Light weight way to stop tracing. Use in conjunction with
+ * tracing_start.
+ */
+void tracing_stop(void)
{
- struct trace_buffer *buffer;
- unsigned long flags;
-
- /* If global, we need to also stop the max tracer */
- if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
- return tracing_stop();
-
- raw_spin_lock_irqsave(&tr->start_lock, flags);
- if (tr->stop_count++)
- goto out;
-
- buffer = tr->array_buffer.buffer;
- if (buffer)
- ring_buffer_record_disable(buffer);
-
- out:
- raw_spin_unlock_irqrestore(&tr->start_lock, flags);
+ return tracing_stop_tr(&global_trace);
}
static int trace_save_cmdline(struct task_struct *tsk)
{
- unsigned pid, idx;
+ unsigned tpid, idx;
/* treat recording of idle task as a success */
if (!tsk->pid)
return 1;
- if (unlikely(tsk->pid > PID_MAX_DEFAULT))
- return 0;
+ tpid = tsk->pid & (PID_MAX_DEFAULT - 1);
/*
* It's not the end of the world if we don't get
* the lock, but we also don't want to spin
* nor do we want to disable interrupts,
* so if we miss here, then better luck next time.
+ *
+ * This is called within the scheduler and wake up, so interrupts
+ * had better been disabled and run queue lock been held.
*/
+ lockdep_assert_preemption_disabled();
if (!arch_spin_trylock(&trace_cmdline_lock))
return 0;
- idx = savedcmd->map_pid_to_cmdline[tsk->pid];
+ idx = savedcmd->map_pid_to_cmdline[tpid];
if (idx == NO_CMDLINE_MAP) {
idx = (savedcmd->cmdline_idx + 1) % savedcmd->cmdline_num;
- /*
- * Check whether the cmdline buffer at idx has a pid
- * mapped. We are going to overwrite that entry so we
- * need to clear the map_pid_to_cmdline. Otherwise we
- * would read the new comm for the old pid.
- */
- pid = savedcmd->map_cmdline_to_pid[idx];
- if (pid != NO_CMDLINE_MAP)
- savedcmd->map_pid_to_cmdline[pid] = NO_CMDLINE_MAP;
-
- savedcmd->map_cmdline_to_pid[idx] = tsk->pid;
- savedcmd->map_pid_to_cmdline[tsk->pid] = idx;
-
+ savedcmd->map_pid_to_cmdline[tpid] = idx;
savedcmd->cmdline_idx = idx;
}
+ savedcmd->map_cmdline_to_pid[idx] = tsk->pid;
set_cmdline(idx, tsk->comm);
arch_spin_unlock(&trace_cmdline_lock);
@@ -2291,6 +2525,7 @@ static int trace_save_cmdline(struct task_struct *tsk)
static void __trace_find_cmdline(int pid, char comm[])
{
unsigned map;
+ int tpid;
if (!pid) {
strcpy(comm, "<idle>");
@@ -2302,16 +2537,16 @@ static void __trace_find_cmdline(int pid, char comm[])
return;
}
- if (pid > PID_MAX_DEFAULT) {
- strcpy(comm, "<...>");
- return;
+ tpid = pid & (PID_MAX_DEFAULT - 1);
+ map = savedcmd->map_pid_to_cmdline[tpid];
+ if (map != NO_CMDLINE_MAP) {
+ tpid = savedcmd->map_cmdline_to_pid[map];
+ if (tpid == pid) {
+ strscpy(comm, get_saved_cmdlines(map), TASK_COMM_LEN);
+ return;
+ }
}
-
- map = savedcmd->map_pid_to_cmdline[pid];
- if (map != NO_CMDLINE_MAP)
- strlcpy(comm, get_saved_cmdlines(map), TASK_COMM_LEN);
- else
- strcpy(comm, "<...>");
+ strcpy(comm, "<...>");
}
void trace_find_cmdline(int pid, char comm[])
@@ -2325,24 +2560,41 @@ void trace_find_cmdline(int pid, char comm[])
preempt_enable();
}
+static int *trace_find_tgid_ptr(int pid)
+{
+ /*
+ * Pairs with the smp_store_release in set_tracer_flag() to ensure that
+ * if we observe a non-NULL tgid_map then we also observe the correct
+ * tgid_map_max.
+ */
+ int *map = smp_load_acquire(&tgid_map);
+
+ if (unlikely(!map || pid > tgid_map_max))
+ return NULL;
+
+ return &map[pid];
+}
+
int trace_find_tgid(int pid)
{
- if (unlikely(!tgid_map || !pid || pid > PID_MAX_DEFAULT))
- return 0;
+ int *ptr = trace_find_tgid_ptr(pid);
- return tgid_map[pid];
+ return ptr ? *ptr : 0;
}
static int trace_save_tgid(struct task_struct *tsk)
{
+ int *ptr;
+
/* treat recording of idle task as a success */
if (!tsk->pid)
return 1;
- if (unlikely(!tgid_map || tsk->pid > PID_MAX_DEFAULT))
+ ptr = trace_find_tgid_ptr(tsk->pid);
+ if (!ptr)
return 0;
- tgid_map[tsk->pid] = tsk->tgid;
+ *ptr = tsk->tgid;
return 1;
}
@@ -2350,8 +2602,6 @@ static bool tracing_record_taskinfo_skip(int flags)
{
if (unlikely(!(flags & (TRACE_RECORD_CMDLINE | TRACE_RECORD_TGID))))
return true;
- if (atomic_read(&trace_record_taskinfo_disabled) || !tracing_is_on())
- return true;
if (!__this_cpu_read(trace_taskinfo_save))
return true;
return false;
@@ -2440,36 +2690,46 @@ enum print_line_t trace_handle_return(struct trace_seq *s)
}
EXPORT_SYMBOL_GPL(trace_handle_return);
-void
-tracing_generic_entry_update(struct trace_entry *entry, unsigned short type,
- unsigned long flags, int pc)
+static unsigned short migration_disable_value(void)
{
- struct task_struct *tsk = current;
-
- entry->preempt_count = pc & 0xff;
- entry->pid = (tsk) ? tsk->pid : 0;
- entry->type = type;
- entry->flags =
-#ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
- (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) |
+#if defined(CONFIG_SMP)
+ return current->migration_disabled;
#else
- TRACE_FLAG_IRQS_NOSUPPORT |
+ return 0;
#endif
- ((pc & NMI_MASK ) ? TRACE_FLAG_NMI : 0) |
- ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
- ((pc & SOFTIRQ_OFFSET) ? TRACE_FLAG_SOFTIRQ : 0) |
- (tif_need_resched() ? TRACE_FLAG_NEED_RESCHED : 0) |
- (test_preempt_need_resched() ? TRACE_FLAG_PREEMPT_RESCHED : 0);
}
-EXPORT_SYMBOL_GPL(tracing_generic_entry_update);
+
+unsigned int tracing_gen_ctx_irq_test(unsigned int irqs_status)
+{
+ unsigned int trace_flags = irqs_status;
+ unsigned int pc;
+
+ pc = preempt_count();
+
+ if (pc & NMI_MASK)
+ trace_flags |= TRACE_FLAG_NMI;
+ if (pc & HARDIRQ_MASK)
+ trace_flags |= TRACE_FLAG_HARDIRQ;
+ if (in_serving_softirq())
+ trace_flags |= TRACE_FLAG_SOFTIRQ;
+ if (softirq_count() >> (SOFTIRQ_SHIFT + 1))
+ trace_flags |= TRACE_FLAG_BH_OFF;
+
+ if (tif_need_resched())
+ trace_flags |= TRACE_FLAG_NEED_RESCHED;
+ if (test_preempt_need_resched())
+ trace_flags |= TRACE_FLAG_PREEMPT_RESCHED;
+ return (trace_flags << 16) | (min_t(unsigned int, pc & 0xff, 0xf)) |
+ (min_t(unsigned int, migration_disable_value(), 0xf)) << 4;
+}
struct ring_buffer_event *
trace_buffer_lock_reserve(struct trace_buffer *buffer,
int type,
unsigned long len,
- unsigned long flags, int pc)
+ unsigned int trace_ctx)
{
- return __trace_buffer_lock_reserve(buffer, type, len, flags, pc);
+ return __trace_buffer_lock_reserve(buffer, type, len, trace_ctx);
}
DEFINE_PER_CPU(struct ring_buffer_event *, trace_buffered_event);
@@ -2504,8 +2764,11 @@ void trace_buffered_event_enable(void)
for_each_tracing_cpu(cpu) {
page = alloc_pages_node(cpu_to_node(cpu),
GFP_KERNEL | __GFP_NORETRY, 0);
- if (!page)
- goto failed;
+ /* This is just an optimization and can handle failures */
+ if (!page) {
+ pr_err("Failed to allocate event buffer\n");
+ break;
+ }
event = page_address(page);
memset(event, 0, sizeof(*event));
@@ -2514,15 +2777,11 @@ void trace_buffered_event_enable(void)
preempt_disable();
if (cpu == smp_processor_id() &&
- this_cpu_read(trace_buffered_event) !=
+ __this_cpu_read(trace_buffered_event) !=
per_cpu(trace_buffered_event, cpu))
WARN_ON_ONCE(1);
preempt_enable();
}
-
- return;
- failed:
- trace_buffered_event_disable();
}
static void enable_trace_buffered_event(void *data)
@@ -2557,11 +2816,9 @@ void trace_buffered_event_disable(void)
if (--trace_buffered_event_ref)
return;
- preempt_disable();
/* For each CPU, set the buffer as used. */
- smp_call_function_many(tracing_buffer_mask,
- disable_trace_buffered_event, NULL, 1);
- preempt_enable();
+ on_each_cpu_mask(tracing_buffer_mask, disable_trace_buffered_event,
+ NULL, true);
/* Wait for all current users to finish */
synchronize_rcu();
@@ -2570,17 +2827,19 @@ void trace_buffered_event_disable(void)
free_page((unsigned long)per_cpu(trace_buffered_event, cpu));
per_cpu(trace_buffered_event, cpu) = NULL;
}
+
/*
- * Make sure trace_buffered_event is NULL before clearing
- * trace_buffered_event_cnt.
+ * Wait for all CPUs that potentially started checking if they can use
+ * their event buffer only after the previous synchronize_rcu() call and
+ * they still read a valid pointer from trace_buffered_event. It must be
+ * ensured they don't see cleared trace_buffered_event_cnt else they
+ * could wrongly decide to use the pointed-to buffer which is now freed.
*/
- smp_wmb();
+ synchronize_rcu();
- preempt_disable();
- /* Do the work on each cpu */
- smp_call_function_many(tracing_buffer_mask,
- enable_trace_buffered_event, NULL, 1);
- preempt_enable();
+ /* For each CPU, relinquish the buffer */
+ on_each_cpu_mask(tracing_buffer_mask, enable_trace_buffered_event, NULL,
+ true);
}
static struct trace_buffer *temp_buffer;
@@ -2589,44 +2848,86 @@ struct ring_buffer_event *
trace_event_buffer_lock_reserve(struct trace_buffer **current_rb,
struct trace_event_file *trace_file,
int type, unsigned long len,
- unsigned long flags, int pc)
+ unsigned int trace_ctx)
{
struct ring_buffer_event *entry;
+ struct trace_array *tr = trace_file->tr;
int val;
- *current_rb = trace_file->tr->array_buffer.buffer;
-
- if (!ring_buffer_time_stamp_abs(*current_rb) && (trace_file->flags &
- (EVENT_FILE_FL_SOFT_DISABLED | EVENT_FILE_FL_FILTERED)) &&
- (entry = this_cpu_read(trace_buffered_event))) {
- /* Try to use the per cpu buffer first */
- val = this_cpu_inc_return(trace_buffered_event_cnt);
- if (val == 1) {
- trace_event_setup(entry, type, flags, pc);
- entry->array[0] = len;
- return entry;
+ *current_rb = tr->array_buffer.buffer;
+
+ if (!tr->no_filter_buffering_ref &&
+ (trace_file->flags & (EVENT_FILE_FL_SOFT_DISABLED | EVENT_FILE_FL_FILTERED))) {
+ preempt_disable_notrace();
+ /*
+ * Filtering is on, so try to use the per cpu buffer first.
+ * This buffer will simulate a ring_buffer_event,
+ * where the type_len is zero and the array[0] will
+ * hold the full length.
+ * (see include/linux/ring-buffer.h for details on
+ * how the ring_buffer_event is structured).
+ *
+ * Using a temp buffer during filtering and copying it
+ * on a matched filter is quicker than writing directly
+ * into the ring buffer and then discarding it when
+ * it doesn't match. That is because the discard
+ * requires several atomic operations to get right.
+ * Copying on match and doing nothing on a failed match
+ * is still quicker than no copy on match, but having
+ * to discard out of the ring buffer on a failed match.
+ */
+ if ((entry = __this_cpu_read(trace_buffered_event))) {
+ int max_len = PAGE_SIZE - struct_size(entry, array, 1);
+
+ val = this_cpu_inc_return(trace_buffered_event_cnt);
+
+ /*
+ * Preemption is disabled, but interrupts and NMIs
+ * can still come in now. If that happens after
+ * the above increment, then it will have to go
+ * back to the old method of allocating the event
+ * on the ring buffer, and if the filter fails, it
+ * will have to call ring_buffer_discard_commit()
+ * to remove it.
+ *
+ * Need to also check the unlikely case that the
+ * length is bigger than the temp buffer size.
+ * If that happens, then the reserve is pretty much
+ * guaranteed to fail, as the ring buffer currently
+ * only allows events less than a page. But that may
+ * change in the future, so let the ring buffer reserve
+ * handle the failure in that case.
+ */
+ if (val == 1 && likely(len <= max_len)) {
+ trace_event_setup(entry, type, trace_ctx);
+ entry->array[0] = len;
+ /* Return with preemption disabled */
+ return entry;
+ }
+ this_cpu_dec(trace_buffered_event_cnt);
}
- this_cpu_dec(trace_buffered_event_cnt);
+ /* __trace_buffer_lock_reserve() disables preemption */
+ preempt_enable_notrace();
}
- entry = __trace_buffer_lock_reserve(*current_rb,
- type, len, flags, pc);
+ entry = __trace_buffer_lock_reserve(*current_rb, type, len,
+ trace_ctx);
/*
* If tracing is off, but we have triggers enabled
* we still need to look at the event data. Use the temp_buffer
- * to store the trace event for the tigger to use. It's recusive
+ * to store the trace event for the trigger to use. It's recursive
* safe and will not be recorded anywhere.
*/
if (!entry && trace_file->flags & EVENT_FILE_FL_TRIGGER_COND) {
*current_rb = temp_buffer;
- entry = __trace_buffer_lock_reserve(*current_rb,
- type, len, flags, pc);
+ entry = __trace_buffer_lock_reserve(*current_rb, type, len,
+ trace_ctx);
}
return entry;
}
EXPORT_SYMBOL_GPL(trace_event_buffer_lock_reserve);
-static DEFINE_SPINLOCK(tracepoint_iter_lock);
+static DEFINE_RAW_SPINLOCK(tracepoint_iter_lock);
static DEFINE_MUTEX(tracepoint_printk_mutex);
static void output_printk(struct trace_event_buffer *fbuffer)
@@ -2654,14 +2955,14 @@ static void output_printk(struct trace_event_buffer *fbuffer)
event = &fbuffer->trace_file->event_call->event;
- spin_lock_irqsave(&tracepoint_iter_lock, flags);
+ raw_spin_lock_irqsave(&tracepoint_iter_lock, flags);
trace_seq_init(&iter->seq);
iter->ent = fbuffer->entry;
event_call->event.funcs->trace(iter, 0, event);
trace_seq_putc(&iter->seq, 0);
printk("%s", iter->seq.buffer);
- spin_unlock_irqrestore(&tracepoint_iter_lock, flags);
+ raw_spin_unlock_irqrestore(&tracepoint_iter_lock, flags);
}
int tracepoint_printk_sysctl(struct ctl_table *table, int write,
@@ -2699,12 +3000,26 @@ int tracepoint_printk_sysctl(struct ctl_table *table, int write,
void trace_event_buffer_commit(struct trace_event_buffer *fbuffer)
{
+ enum event_trigger_type tt = ETT_NONE;
+ struct trace_event_file *file = fbuffer->trace_file;
+
+ if (__event_trigger_test_discard(file, fbuffer->buffer, fbuffer->event,
+ fbuffer->entry, &tt))
+ goto discard;
+
if (static_key_false(&tracepoint_printk_key.key))
output_printk(fbuffer);
- event_trigger_unlock_commit_regs(fbuffer->trace_file, fbuffer->buffer,
- fbuffer->event, fbuffer->entry,
- fbuffer->flags, fbuffer->pc, fbuffer->regs);
+ if (static_branch_unlikely(&trace_event_exports_enabled))
+ ftrace_exports(fbuffer->event, TRACE_EXPORT_EVENT);
+
+ trace_buffer_unlock_commit_regs(file->tr, fbuffer->buffer,
+ fbuffer->event, fbuffer->trace_ctx, fbuffer->regs);
+
+discard:
+ if (tt)
+ event_triggers_post_call(file, tt);
+
}
EXPORT_SYMBOL_GPL(trace_event_buffer_commit);
@@ -2720,7 +3035,7 @@ EXPORT_SYMBOL_GPL(trace_event_buffer_commit);
void trace_buffer_unlock_commit_regs(struct trace_array *tr,
struct trace_buffer *buffer,
struct ring_buffer_event *event,
- unsigned long flags, int pc,
+ unsigned int trace_ctx,
struct pt_regs *regs)
{
__buffer_unlock_commit(buffer, event);
@@ -2731,8 +3046,8 @@ void trace_buffer_unlock_commit_regs(struct trace_array *tr,
* and mmiotrace, but that's ok if they lose a function or
* two. They are not that meaningful.
*/
- ftrace_trace_stack(tr, buffer, flags, regs ? 0 : STACK_SKIP, pc, regs);
- ftrace_trace_userstack(buffer, flags, pc);
+ ftrace_trace_stack(tr, buffer, trace_ctx, regs ? 0 : STACK_SKIP, regs);
+ ftrace_trace_userstack(tr, buffer, trace_ctx);
}
/*
@@ -2745,133 +3060,9 @@ trace_buffer_unlock_commit_nostack(struct trace_buffer *buffer,
__buffer_unlock_commit(buffer, event);
}
-static void
-trace_process_export(struct trace_export *export,
- struct ring_buffer_event *event)
-{
- struct trace_entry *entry;
- unsigned int size = 0;
-
- entry = ring_buffer_event_data(event);
- size = ring_buffer_event_length(event);
- export->write(export, entry, size);
-}
-
-static DEFINE_MUTEX(ftrace_export_lock);
-
-static struct trace_export __rcu *ftrace_exports_list __read_mostly;
-
-static DEFINE_STATIC_KEY_FALSE(ftrace_exports_enabled);
-
-static inline void ftrace_exports_enable(void)
-{
- static_branch_enable(&ftrace_exports_enabled);
-}
-
-static inline void ftrace_exports_disable(void)
-{
- static_branch_disable(&ftrace_exports_enabled);
-}
-
-static void ftrace_exports(struct ring_buffer_event *event)
-{
- struct trace_export *export;
-
- preempt_disable_notrace();
-
- export = rcu_dereference_raw_check(ftrace_exports_list);
- while (export) {
- trace_process_export(export, event);
- export = rcu_dereference_raw_check(export->next);
- }
-
- preempt_enable_notrace();
-}
-
-static inline void
-add_trace_export(struct trace_export **list, struct trace_export *export)
-{
- rcu_assign_pointer(export->next, *list);
- /*
- * We are entering export into the list but another
- * CPU might be walking that list. We need to make sure
- * the export->next pointer is valid before another CPU sees
- * the export pointer included into the list.
- */
- rcu_assign_pointer(*list, export);
-}
-
-static inline int
-rm_trace_export(struct trace_export **list, struct trace_export *export)
-{
- struct trace_export **p;
-
- for (p = list; *p != NULL; p = &(*p)->next)
- if (*p == export)
- break;
-
- if (*p != export)
- return -1;
-
- rcu_assign_pointer(*p, (*p)->next);
-
- return 0;
-}
-
-static inline void
-add_ftrace_export(struct trace_export **list, struct trace_export *export)
-{
- if (*list == NULL)
- ftrace_exports_enable();
-
- add_trace_export(list, export);
-}
-
-static inline int
-rm_ftrace_export(struct trace_export **list, struct trace_export *export)
-{
- int ret;
-
- ret = rm_trace_export(list, export);
- if (*list == NULL)
- ftrace_exports_disable();
-
- return ret;
-}
-
-int register_ftrace_export(struct trace_export *export)
-{
- if (WARN_ON_ONCE(!export->write))
- return -1;
-
- mutex_lock(&ftrace_export_lock);
-
- add_ftrace_export(&ftrace_exports_list, export);
-
- mutex_unlock(&ftrace_export_lock);
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(register_ftrace_export);
-
-int unregister_ftrace_export(struct trace_export *export)
-{
- int ret;
-
- mutex_lock(&ftrace_export_lock);
-
- ret = rm_ftrace_export(&ftrace_exports_list, export);
-
- mutex_unlock(&ftrace_export_lock);
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(unregister_ftrace_export);
-
void
-trace_function(struct trace_array *tr,
- unsigned long ip, unsigned long parent_ip, unsigned long flags,
- int pc)
+trace_function(struct trace_array *tr, unsigned long ip, unsigned long
+ parent_ip, unsigned int trace_ctx)
{
struct trace_event_call *call = &event_function;
struct trace_buffer *buffer = tr->array_buffer.buffer;
@@ -2879,7 +3070,7 @@ trace_function(struct trace_array *tr,
struct ftrace_entry *entry;
event = __trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry),
- flags, pc);
+ trace_ctx);
if (!event)
return;
entry = ring_buffer_event_data(event);
@@ -2887,8 +3078,8 @@ trace_function(struct trace_array *tr,
entry->parent_ip = parent_ip;
if (!call_filter_check_discard(call, entry, buffer, event)) {
- if (static_branch_unlikely(&ftrace_exports_enabled))
- ftrace_exports(event);
+ if (static_branch_unlikely(&trace_function_exports_enabled))
+ ftrace_exports(event, TRACE_EXPORT_FUNCTION);
__buffer_unlock_commit(buffer, event);
}
}
@@ -2913,8 +3104,8 @@ static DEFINE_PER_CPU(struct ftrace_stacks, ftrace_stacks);
static DEFINE_PER_CPU(int, ftrace_stack_reserve);
static void __ftrace_trace_stack(struct trace_buffer *buffer,
- unsigned long flags,
- int skip, int pc, struct pt_regs *regs)
+ unsigned int trace_ctx,
+ int skip, struct pt_regs *regs)
{
struct trace_event_call *call = &event_kernel_stack;
struct ring_buffer_event *event;
@@ -2932,18 +3123,12 @@ static void __ftrace_trace_stack(struct trace_buffer *buffer,
skip++;
#endif
- /*
- * Since events can happen in NMIs there's no safe way to
- * use the per cpu ftrace_stacks. We reserve it and if an interrupt
- * or NMI comes in, it will just have to use the default
- * FTRACE_STACK_SIZE.
- */
preempt_disable_notrace();
stackidx = __this_cpu_inc_return(ftrace_stack_reserve) - 1;
/* This should never happen. If it does, yell once and skip */
- if (WARN_ON_ONCE(stackidx > FTRACE_KSTACK_NESTING))
+ if (WARN_ON_ONCE(stackidx >= FTRACE_KSTACK_NESTING))
goto out;
/*
@@ -2965,15 +3150,16 @@ static void __ftrace_trace_stack(struct trace_buffer *buffer,
nr_entries = stack_trace_save(fstack->calls, size, skip);
}
- size = nr_entries * sizeof(unsigned long);
event = __trace_buffer_lock_reserve(buffer, TRACE_STACK,
- sizeof(*entry) + size, flags, pc);
+ struct_size(entry, caller, nr_entries),
+ trace_ctx);
if (!event)
goto out;
entry = ring_buffer_event_data(event);
- memcpy(&entry->caller, fstack->calls, size);
entry->size = nr_entries;
+ memcpy(&entry->caller, fstack->calls,
+ flex_array_size(entry, caller, nr_entries));
if (!call_filter_check_discard(call, entry, buffer, event))
__buffer_unlock_commit(buffer, event);
@@ -2988,37 +3174,40 @@ static void __ftrace_trace_stack(struct trace_buffer *buffer,
static inline void ftrace_trace_stack(struct trace_array *tr,
struct trace_buffer *buffer,
- unsigned long flags,
- int skip, int pc, struct pt_regs *regs)
+ unsigned int trace_ctx,
+ int skip, struct pt_regs *regs)
{
if (!(tr->trace_flags & TRACE_ITER_STACKTRACE))
return;
- __ftrace_trace_stack(buffer, flags, skip, pc, regs);
+ __ftrace_trace_stack(buffer, trace_ctx, skip, regs);
}
-void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
- int pc)
+void __trace_stack(struct trace_array *tr, unsigned int trace_ctx,
+ int skip)
{
struct trace_buffer *buffer = tr->array_buffer.buffer;
if (rcu_is_watching()) {
- __ftrace_trace_stack(buffer, flags, skip, pc, NULL);
+ __ftrace_trace_stack(buffer, trace_ctx, skip, NULL);
return;
}
+ if (WARN_ON_ONCE(IS_ENABLED(CONFIG_GENERIC_ENTRY)))
+ return;
+
/*
- * When an NMI triggers, RCU is enabled via rcu_nmi_enter(),
+ * When an NMI triggers, RCU is enabled via ct_nmi_enter(),
* but if the above rcu_is_watching() failed, then the NMI
- * triggered someplace critical, and rcu_irq_enter() should
+ * triggered someplace critical, and ct_irq_enter() should
* not be called from NMI.
*/
if (unlikely(in_nmi()))
return;
- rcu_irq_enter_irqson();
- __ftrace_trace_stack(buffer, flags, skip, pc, NULL);
- rcu_irq_exit_irqson();
+ ct_irq_enter_irqson();
+ __ftrace_trace_stack(buffer, trace_ctx, skip, NULL);
+ ct_irq_exit_irqson();
}
/**
@@ -3027,19 +3216,15 @@ void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
*/
void trace_dump_stack(int skip)
{
- unsigned long flags;
-
if (tracing_disabled || tracing_selftest_running)
return;
- local_save_flags(flags);
-
#ifndef CONFIG_UNWINDER_ORC
/* Skip 1 to skip this function. */
skip++;
#endif
__ftrace_trace_stack(global_trace.array_buffer.buffer,
- flags, skip, preempt_count(), NULL);
+ tracing_gen_ctx(), skip, NULL);
}
EXPORT_SYMBOL_GPL(trace_dump_stack);
@@ -3047,13 +3232,14 @@ EXPORT_SYMBOL_GPL(trace_dump_stack);
static DEFINE_PER_CPU(int, user_stack_count);
static void
-ftrace_trace_userstack(struct trace_buffer *buffer, unsigned long flags, int pc)
+ftrace_trace_userstack(struct trace_array *tr,
+ struct trace_buffer *buffer, unsigned int trace_ctx)
{
struct trace_event_call *call = &event_user_stack;
struct ring_buffer_event *event;
struct userstack_entry *entry;
- if (!(global_trace.trace_flags & TRACE_ITER_USERSTACKTRACE))
+ if (!(tr->trace_flags & TRACE_ITER_USERSTACKTRACE))
return;
/*
@@ -3074,7 +3260,7 @@ ftrace_trace_userstack(struct trace_buffer *buffer, unsigned long flags, int pc)
__this_cpu_inc(user_stack_count);
event = __trace_buffer_lock_reserve(buffer, TRACE_USER_STACK,
- sizeof(*entry), flags, pc);
+ sizeof(*entry), trace_ctx);
if (!event)
goto out_drop_count;
entry = ring_buffer_event_data(event);
@@ -3092,38 +3278,73 @@ ftrace_trace_userstack(struct trace_buffer *buffer, unsigned long flags, int pc)
preempt_enable();
}
#else /* CONFIG_USER_STACKTRACE_SUPPORT */
-static void ftrace_trace_userstack(struct trace_buffer *buffer,
- unsigned long flags, int pc)
+static void ftrace_trace_userstack(struct trace_array *tr,
+ struct trace_buffer *buffer,
+ unsigned int trace_ctx)
{
}
#endif /* !CONFIG_USER_STACKTRACE_SUPPORT */
#endif /* CONFIG_STACKTRACE */
+static inline void
+func_repeats_set_delta_ts(struct func_repeats_entry *entry,
+ unsigned long long delta)
+{
+ entry->bottom_delta_ts = delta & U32_MAX;
+ entry->top_delta_ts = (delta >> 32);
+}
+
+void trace_last_func_repeats(struct trace_array *tr,
+ struct trace_func_repeats *last_info,
+ unsigned int trace_ctx)
+{
+ struct trace_buffer *buffer = tr->array_buffer.buffer;
+ struct func_repeats_entry *entry;
+ struct ring_buffer_event *event;
+ u64 delta;
+
+ event = __trace_buffer_lock_reserve(buffer, TRACE_FUNC_REPEATS,
+ sizeof(*entry), trace_ctx);
+ if (!event)
+ return;
+
+ delta = ring_buffer_event_time_stamp(buffer, event) -
+ last_info->ts_last_call;
+
+ entry = ring_buffer_event_data(event);
+ entry->ip = last_info->ip;
+ entry->parent_ip = last_info->parent_ip;
+ entry->count = last_info->count;
+ func_repeats_set_delta_ts(entry, delta);
+
+ __buffer_unlock_commit(buffer, event);
+}
+
/* created for use with alloc_percpu */
struct trace_buffer_struct {
int nesting;
char buffer[4][TRACE_BUF_SIZE];
};
-static struct trace_buffer_struct *trace_percpu_buffer;
+static struct trace_buffer_struct __percpu *trace_percpu_buffer;
/*
- * Thise allows for lockless recording. If we're nested too deeply, then
+ * This allows for lockless recording. If we're nested too deeply, then
* this returns NULL.
*/
static char *get_trace_buf(void)
{
struct trace_buffer_struct *buffer = this_cpu_ptr(trace_percpu_buffer);
- if (!buffer || buffer->nesting >= 4)
+ if (!trace_percpu_buffer || buffer->nesting >= 4)
return NULL;
buffer->nesting++;
/* Interrupts must see nesting incremented before we use the buffer */
barrier();
- return &buffer->buffer[buffer->nesting][0];
+ return &buffer->buffer[buffer->nesting - 1][0];
}
static void put_trace_buf(void)
@@ -3135,7 +3356,10 @@ static void put_trace_buf(void)
static int alloc_percpu_trace_buffer(void)
{
- struct trace_buffer_struct *buffers;
+ struct trace_buffer_struct __percpu *buffers;
+
+ if (trace_percpu_buffer)
+ return 0;
buffers = alloc_percpu(struct trace_buffer_struct);
if (MEM_FAIL(!buffers, "Could not allocate percpu trace_printk buffer"))
@@ -3173,7 +3397,7 @@ void trace_printk_init_buffers(void)
pr_warn("**********************************************************\n");
/* Expand the buffers to set size */
- tracing_update_buffers();
+ tracing_update_buffers(&global_trace);
buffers_allocated = 1;
@@ -3220,9 +3444,9 @@ int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
struct trace_buffer *buffer;
struct trace_array *tr = &global_trace;
struct bprint_entry *entry;
- unsigned long flags;
+ unsigned int trace_ctx;
char *tbuffer;
- int len = 0, size, pc;
+ int len = 0, size;
if (unlikely(tracing_selftest_running || tracing_disabled))
return 0;
@@ -3230,7 +3454,7 @@ int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
/* Don't pollute graph traces with trace_vprintk internals */
pause_graph_tracing();
- pc = preempt_count();
+ trace_ctx = tracing_gen_ctx();
preempt_disable_notrace();
tbuffer = get_trace_buf();
@@ -3244,12 +3468,11 @@ int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
if (len > TRACE_BUF_SIZE/sizeof(int) || len < 0)
goto out_put;
- local_save_flags(flags);
size = sizeof(*entry) + sizeof(u32) * len;
buffer = tr->array_buffer.buffer;
ring_buffer_nest_start(buffer);
event = __trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size,
- flags, pc);
+ trace_ctx);
if (!event)
goto out;
entry = ring_buffer_event_data(event);
@@ -3259,7 +3482,7 @@ int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
memcpy(entry->buf, tbuffer, sizeof(u32) * len);
if (!call_filter_check_discard(call, entry, buffer, event)) {
__buffer_unlock_commit(buffer, event);
- ftrace_trace_stack(tr, buffer, flags, 6, pc, NULL);
+ ftrace_trace_stack(tr, buffer, trace_ctx, 6, NULL);
}
out:
@@ -3282,18 +3505,18 @@ __trace_array_vprintk(struct trace_buffer *buffer,
{
struct trace_event_call *call = &event_print;
struct ring_buffer_event *event;
- int len = 0, size, pc;
+ int len = 0, size;
struct print_entry *entry;
- unsigned long flags;
+ unsigned int trace_ctx;
char *tbuffer;
- if (tracing_disabled || tracing_selftest_running)
+ if (tracing_disabled)
return 0;
/* Don't pollute graph traces with trace_vprintk internals */
pause_graph_tracing();
- pc = preempt_count();
+ trace_ctx = tracing_gen_ctx();
preempt_disable_notrace();
@@ -3305,11 +3528,10 @@ __trace_array_vprintk(struct trace_buffer *buffer,
len = vscnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args);
- local_save_flags(flags);
size = sizeof(*entry) + len + 1;
ring_buffer_nest_start(buffer);
event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
- flags, pc);
+ trace_ctx);
if (!event)
goto out;
entry = ring_buffer_event_data(event);
@@ -3318,7 +3540,7 @@ __trace_array_vprintk(struct trace_buffer *buffer,
memcpy(&entry->buf, tbuffer, len + 1);
if (!call_filter_check_discard(call, entry, buffer, event)) {
__buffer_unlock_commit(buffer, event);
- ftrace_trace_stack(&global_trace, buffer, flags, 6, pc, NULL);
+ ftrace_trace_stack(&global_trace, buffer, trace_ctx, 6, NULL);
}
out:
@@ -3336,9 +3558,32 @@ __printf(3, 0)
int trace_array_vprintk(struct trace_array *tr,
unsigned long ip, const char *fmt, va_list args)
{
+ if (tracing_selftest_running && tr == &global_trace)
+ return 0;
+
return __trace_array_vprintk(tr->array_buffer.buffer, ip, fmt, args);
}
+/**
+ * trace_array_printk - Print a message to a specific instance
+ * @tr: The instance trace_array descriptor
+ * @ip: The instruction pointer that this is called from.
+ * @fmt: The format to print (printf format)
+ *
+ * If a subsystem sets up its own instance, they have the right to
+ * printk strings into their tracing instance buffer using this
+ * function. Note, this function will not write into the top level
+ * buffer (use trace_printk() for that), as writing into the top level
+ * buffer should only have events that can be individually disabled.
+ * trace_printk() is only used for debugging a kernel, and should not
+ * be ever incorporated in normal use.
+ *
+ * trace_array_printk() can be used, as it will not add noise to the
+ * top level tracing buffer.
+ *
+ * Note, trace_array_init_printk() must be called on @tr before this
+ * can be used.
+ */
__printf(3, 0)
int trace_array_printk(struct trace_array *tr,
unsigned long ip, const char *fmt, ...)
@@ -3346,12 +3591,16 @@ int trace_array_printk(struct trace_array *tr,
int ret;
va_list ap;
- if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
- return 0;
-
if (!tr)
return -ENOENT;
+ /* This is only allowed for created instances */
+ if (tr == &global_trace)
+ return 0;
+
+ if (!(tr->trace_flags & TRACE_ITER_PRINTK))
+ return 0;
+
va_start(ap, fmt);
ret = trace_array_vprintk(tr, ip, fmt, ap);
va_end(ap);
@@ -3359,6 +3608,27 @@ int trace_array_printk(struct trace_array *tr,
}
EXPORT_SYMBOL_GPL(trace_array_printk);
+/**
+ * trace_array_init_printk - Initialize buffers for trace_array_printk()
+ * @tr: The trace array to initialize the buffers for
+ *
+ * As trace_array_printk() only writes into instances, they are OK to
+ * have in the kernel (unlike trace_printk()). This needs to be called
+ * before trace_array_printk() can be used on a trace_array.
+ */
+int trace_array_init_printk(struct trace_array *tr)
+{
+ if (!tr)
+ return -ENOENT;
+
+ /* This is only allowed for created instances */
+ if (tr == &global_trace)
+ return -EINVAL;
+
+ return alloc_percpu_trace_buffer();
+}
+EXPORT_SYMBOL_GPL(trace_array_init_printk);
+
__printf(3, 4)
int trace_array_printk_buf(struct trace_buffer *buffer,
unsigned long ip, const char *fmt, ...)
@@ -3476,8 +3746,300 @@ __find_next_entry(struct trace_iterator *iter, int *ent_cpu,
return next;
}
+#define STATIC_FMT_BUF_SIZE 128
+static char static_fmt_buf[STATIC_FMT_BUF_SIZE];
+
+char *trace_iter_expand_format(struct trace_iterator *iter)
+{
+ char *tmp;
+
+ /*
+ * iter->tr is NULL when used with tp_printk, which makes
+ * this get called where it is not safe to call krealloc().
+ */
+ if (!iter->tr || iter->fmt == static_fmt_buf)
+ return NULL;
+
+ tmp = krealloc(iter->fmt, iter->fmt_size + STATIC_FMT_BUF_SIZE,
+ GFP_KERNEL);
+ if (tmp) {
+ iter->fmt_size += STATIC_FMT_BUF_SIZE;
+ iter->fmt = tmp;
+ }
+
+ return tmp;
+}
+
+/* Returns true if the string is safe to dereference from an event */
+static bool trace_safe_str(struct trace_iterator *iter, const char *str,
+ bool star, int len)
+{
+ unsigned long addr = (unsigned long)str;
+ struct trace_event *trace_event;
+ struct trace_event_call *event;
+
+ /* Ignore strings with no length */
+ if (star && !len)
+ return true;
+
+ /* OK if part of the event data */
+ if ((addr >= (unsigned long)iter->ent) &&
+ (addr < (unsigned long)iter->ent + iter->ent_size))
+ return true;
+
+ /* OK if part of the temp seq buffer */
+ if ((addr >= (unsigned long)iter->tmp_seq.buffer) &&
+ (addr < (unsigned long)iter->tmp_seq.buffer + TRACE_SEQ_BUFFER_SIZE))
+ return true;
+
+ /* Core rodata can not be freed */
+ if (is_kernel_rodata(addr))
+ return true;
+
+ if (trace_is_tracepoint_string(str))
+ return true;
+
+ /*
+ * Now this could be a module event, referencing core module
+ * data, which is OK.
+ */
+ if (!iter->ent)
+ return false;
+
+ trace_event = ftrace_find_event(iter->ent->type);
+ if (!trace_event)
+ return false;
+
+ event = container_of(trace_event, struct trace_event_call, event);
+ if ((event->flags & TRACE_EVENT_FL_DYNAMIC) || !event->module)
+ return false;
+
+ /* Would rather have rodata, but this will suffice */
+ if (within_module_core(addr, event->module))
+ return true;
+
+ return false;
+}
+
+static DEFINE_STATIC_KEY_FALSE(trace_no_verify);
+
+static int test_can_verify_check(const char *fmt, ...)
+{
+ char buf[16];
+ va_list ap;
+ int ret;
+
+ /*
+ * The verifier is dependent on vsnprintf() modifies the va_list
+ * passed to it, where it is sent as a reference. Some architectures
+ * (like x86_32) passes it by value, which means that vsnprintf()
+ * does not modify the va_list passed to it, and the verifier
+ * would then need to be able to understand all the values that
+ * vsnprintf can use. If it is passed by value, then the verifier
+ * is disabled.
+ */
+ va_start(ap, fmt);
+ vsnprintf(buf, 16, "%d", ap);
+ ret = va_arg(ap, int);
+ va_end(ap);
+
+ return ret;
+}
+
+static void test_can_verify(void)
+{
+ if (!test_can_verify_check("%d %d", 0, 1)) {
+ pr_info("trace event string verifier disabled\n");
+ static_branch_inc(&trace_no_verify);
+ }
+}
+
+/**
+ * trace_check_vprintf - Check dereferenced strings while writing to the seq buffer
+ * @iter: The iterator that holds the seq buffer and the event being printed
+ * @fmt: The format used to print the event
+ * @ap: The va_list holding the data to print from @fmt.
+ *
+ * This writes the data into the @iter->seq buffer using the data from
+ * @fmt and @ap. If the format has a %s, then the source of the string
+ * is examined to make sure it is safe to print, otherwise it will
+ * warn and print "[UNSAFE MEMORY]" in place of the dereferenced string
+ * pointer.
+ */
+void trace_check_vprintf(struct trace_iterator *iter, const char *fmt,
+ va_list ap)
+{
+ const char *p = fmt;
+ const char *str;
+ int i, j;
+
+ if (WARN_ON_ONCE(!fmt))
+ return;
+
+ if (static_branch_unlikely(&trace_no_verify))
+ goto print;
+
+ /* Don't bother checking when doing a ftrace_dump() */
+ if (iter->fmt == static_fmt_buf)
+ goto print;
+
+ while (*p) {
+ bool star = false;
+ int len = 0;
+
+ j = 0;
+
+ /* We only care about %s and variants */
+ for (i = 0; p[i]; i++) {
+ if (i + 1 >= iter->fmt_size) {
+ /*
+ * If we can't expand the copy buffer,
+ * just print it.
+ */
+ if (!trace_iter_expand_format(iter))
+ goto print;
+ }
+
+ if (p[i] == '\\' && p[i+1]) {
+ i++;
+ continue;
+ }
+ if (p[i] == '%') {
+ /* Need to test cases like %08.*s */
+ for (j = 1; p[i+j]; j++) {
+ if (isdigit(p[i+j]) ||
+ p[i+j] == '.')
+ continue;
+ if (p[i+j] == '*') {
+ star = true;
+ continue;
+ }
+ break;
+ }
+ if (p[i+j] == 's')
+ break;
+ star = false;
+ }
+ j = 0;
+ }
+ /* If no %s found then just print normally */
+ if (!p[i])
+ break;
+
+ /* Copy up to the %s, and print that */
+ strncpy(iter->fmt, p, i);
+ iter->fmt[i] = '\0';
+ trace_seq_vprintf(&iter->seq, iter->fmt, ap);
+
+ /*
+ * If iter->seq is full, the above call no longer guarantees
+ * that ap is in sync with fmt processing, and further calls
+ * to va_arg() can return wrong positional arguments.
+ *
+ * Ensure that ap is no longer used in this case.
+ */
+ if (iter->seq.full) {
+ p = "";
+ break;
+ }
+
+ if (star)
+ len = va_arg(ap, int);
+
+ /* The ap now points to the string data of the %s */
+ str = va_arg(ap, const char *);
+
+ /*
+ * If you hit this warning, it is likely that the
+ * trace event in question used %s on a string that
+ * was saved at the time of the event, but may not be
+ * around when the trace is read. Use __string(),
+ * __assign_str() and __get_str() helpers in the TRACE_EVENT()
+ * instead. See samples/trace_events/trace-events-sample.h
+ * for reference.
+ */
+ if (WARN_ONCE(!trace_safe_str(iter, str, star, len),
+ "fmt: '%s' current_buffer: '%s'",
+ fmt, seq_buf_str(&iter->seq.seq))) {
+ int ret;
+
+ /* Try to safely read the string */
+ if (star) {
+ if (len + 1 > iter->fmt_size)
+ len = iter->fmt_size - 1;
+ if (len < 0)
+ len = 0;
+ ret = copy_from_kernel_nofault(iter->fmt, str, len);
+ iter->fmt[len] = 0;
+ star = false;
+ } else {
+ ret = strncpy_from_kernel_nofault(iter->fmt, str,
+ iter->fmt_size);
+ }
+ if (ret < 0)
+ trace_seq_printf(&iter->seq, "(0x%px)", str);
+ else
+ trace_seq_printf(&iter->seq, "(0x%px:%s)",
+ str, iter->fmt);
+ str = "[UNSAFE-MEMORY]";
+ strcpy(iter->fmt, "%s");
+ } else {
+ strncpy(iter->fmt, p + i, j + 1);
+ iter->fmt[j+1] = '\0';
+ }
+ if (star)
+ trace_seq_printf(&iter->seq, iter->fmt, len, str);
+ else
+ trace_seq_printf(&iter->seq, iter->fmt, str);
+
+ p += i + j + 1;
+ }
+ print:
+ if (*p)
+ trace_seq_vprintf(&iter->seq, p, ap);
+}
+
+const char *trace_event_format(struct trace_iterator *iter, const char *fmt)
+{
+ const char *p, *new_fmt;
+ char *q;
+
+ if (WARN_ON_ONCE(!fmt))
+ return fmt;
+
+ if (!iter->tr || iter->tr->trace_flags & TRACE_ITER_HASH_PTR)
+ return fmt;
+
+ p = fmt;
+ new_fmt = q = iter->fmt;
+ while (*p) {
+ if (unlikely(q - new_fmt + 3 > iter->fmt_size)) {
+ if (!trace_iter_expand_format(iter))
+ return fmt;
+
+ q += iter->fmt - new_fmt;
+ new_fmt = iter->fmt;
+ }
+
+ *q++ = *p++;
+
+ /* Replace %p with %px */
+ if (p[-1] == '%') {
+ if (p[0] == '%') {
+ *q++ = *p++;
+ } else if (p[0] == 'p' && !isalnum(p[1])) {
+ *q++ = *p++;
+ *q++ = 'x';
+ }
+ }
+ }
+ *q = '\0';
+
+ return new_fmt;
+}
+
#define STATIC_TEMP_BUF_SIZE 128
-static char static_temp_buf[STATIC_TEMP_BUF_SIZE];
+static char static_temp_buf[STATIC_TEMP_BUF_SIZE] __aligned(4);
/* Find the next real entry, without updating the iterator itself */
struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
@@ -3507,13 +4069,15 @@ struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
if (iter->ent && iter->ent != iter->temp) {
if ((!iter->temp || iter->temp_size < iter->ent_size) &&
!WARN_ON_ONCE(iter->temp == static_temp_buf)) {
- kfree(iter->temp);
- iter->temp = kmalloc(iter->ent_size, GFP_KERNEL);
- if (!iter->temp)
+ void *temp;
+ temp = kmalloc(iter->ent_size, GFP_KERNEL);
+ if (!temp)
return NULL;
+ kfree(iter->temp);
+ iter->temp = temp;
+ iter->temp_size = iter->ent_size;
}
memcpy(iter->temp, iter->ent, iter->ent_size);
- iter->temp_size = iter->ent_size;
iter->ent = iter->temp;
}
entry = __find_next_entry(iter, ent_cpu, NULL, ent_ts);
@@ -3610,15 +4174,16 @@ static void *s_start(struct seq_file *m, loff_t *pos)
loff_t l = 0;
int cpu;
- /*
- * copy the tracer to avoid using a global lock all around.
- * iter->trace is a copy of current_trace, the pointer to the
- * name may be used instead of a strcmp(), as iter->trace->name
- * will point to the same string as current_trace->name.
- */
mutex_lock(&trace_types_lock);
- if (unlikely(tr->current_trace && iter->trace->name != tr->current_trace->name))
- *iter->trace = *tr->current_trace;
+ if (unlikely(tr->current_trace != iter->trace)) {
+ /* Close iter->trace before switching to the new current tracer */
+ if (iter->trace->close)
+ iter->trace->close(iter);
+ iter->trace = tr->current_trace;
+ /* Reopen the new current tracer */
+ if (iter->trace->open)
+ iter->trace->open(iter);
+ }
mutex_unlock(&trace_types_lock);
#ifdef CONFIG_TRACER_MAX_TRACE
@@ -3626,9 +4191,6 @@ static void *s_start(struct seq_file *m, loff_t *pos)
return ERR_PTR(-EBUSY);
#endif
- if (!iter->snapshot)
- atomic_inc(&trace_record_taskinfo_disabled);
-
if (*pos != iter->pos) {
iter->ent = NULL;
iter->cpu = 0;
@@ -3671,9 +4233,6 @@ static void s_stop(struct seq_file *m, void *p)
return;
#endif
- if (!iter->snapshot)
- atomic_dec(&trace_record_taskinfo_disabled);
-
trace_access_unlock(iter->cpu_file);
trace_event_read_unlock();
}
@@ -3743,14 +4302,15 @@ unsigned long trace_total_entries(struct trace_array *tr)
static void print_lat_help_header(struct seq_file *m)
{
- seq_puts(m, "# _------=> CPU# \n"
- "# / _-----=> irqs-off \n"
- "# | / _----=> need-resched \n"
- "# || / _---=> hardirq/softirq \n"
- "# ||| / _--=> preempt-depth \n"
- "# |||| / delay \n"
- "# cmd pid ||||| time | caller \n"
- "# \\ / ||||| \\ | / \n");
+ seq_puts(m, "# _------=> CPU# \n"
+ "# / _-----=> irqs-off/BH-disabled\n"
+ "# | / _----=> need-resched \n"
+ "# || / _---=> hardirq/softirq \n"
+ "# ||| / _--=> preempt-depth \n"
+ "# |||| / _-=> migrate-disable \n"
+ "# ||||| / delay \n"
+ "# cmd pid |||||| time | caller \n"
+ "# \\ / |||||| \\ | / \n");
}
static void print_event_info(struct array_buffer *buf, struct seq_file *m)
@@ -3771,26 +4331,27 @@ static void print_func_help_header(struct array_buffer *buf, struct seq_file *m,
print_event_info(buf, m);
- seq_printf(m, "# TASK-PID %s CPU# TIMESTAMP FUNCTION\n", tgid ? "TGID " : "");
- seq_printf(m, "# | | %s | | |\n", tgid ? " | " : "");
+ seq_printf(m, "# TASK-PID %s CPU# TIMESTAMP FUNCTION\n", tgid ? " TGID " : "");
+ seq_printf(m, "# | | %s | | |\n", tgid ? " | " : "");
}
static void print_func_help_header_irq(struct array_buffer *buf, struct seq_file *m,
unsigned int flags)
{
bool tgid = flags & TRACE_ITER_RECORD_TGID;
- const char *space = " ";
- int prec = tgid ? 10 : 2;
+ static const char space[] = " ";
+ int prec = tgid ? 12 : 2;
print_event_info(buf, m);
- seq_printf(m, "# %.*s _-----=> irqs-off\n", prec, space);
- seq_printf(m, "# %.*s / _----=> need-resched\n", prec, space);
- seq_printf(m, "# %.*s| / _---=> hardirq/softirq\n", prec, space);
- seq_printf(m, "# %.*s|| / _--=> preempt-depth\n", prec, space);
- seq_printf(m, "# %.*s||| / delay\n", prec, space);
- seq_printf(m, "# TASK-PID %.*sCPU# |||| TIMESTAMP FUNCTION\n", prec, " TGID ");
- seq_printf(m, "# | | %.*s | |||| | |\n", prec, " | ");
+ seq_printf(m, "# %.*s _-----=> irqs-off/BH-disabled\n", prec, space);
+ seq_printf(m, "# %.*s / _----=> need-resched\n", prec, space);
+ seq_printf(m, "# %.*s| / _---=> hardirq/softirq\n", prec, space);
+ seq_printf(m, "# %.*s|| / _--=> preempt-depth\n", prec, space);
+ seq_printf(m, "# %.*s||| / _-=> migrate-disable\n", prec, space);
+ seq_printf(m, "# %.*s|||| / delay\n", prec, space);
+ seq_printf(m, "# TASK-PID %.*s CPU# ||||| TIMESTAMP FUNCTION\n", prec, " TGID ");
+ seq_printf(m, "# | | %.*s | ||||| | |\n", prec, " | ");
}
void
@@ -3802,9 +4363,7 @@ print_trace_header(struct seq_file *m, struct trace_iterator *iter)
struct tracer *type = iter->trace;
unsigned long entries;
unsigned long total;
- const char *name = "preemption";
-
- name = type->name;
+ const char *name = type->name;
get_total_entries(buf, &total, &entries);
@@ -3818,17 +4377,11 @@ print_trace_header(struct seq_file *m, struct trace_iterator *iter)
entries,
total,
buf->cpu,
-#if defined(CONFIG_PREEMPT_NONE)
- "server",
-#elif defined(CONFIG_PREEMPT_VOLUNTARY)
- "desktop",
-#elif defined(CONFIG_PREEMPT)
- "preempt",
-#elif defined(CONFIG_PREEMPT_RT)
- "preempt_rt",
-#else
+ preempt_model_none() ? "server" :
+ preempt_model_voluntary() ? "desktop" :
+ preempt_model_full() ? "preempt" :
+ preempt_model_rt() ? "preempt_rt" :
"unknown",
-#endif
/* These are reserved for later use */
0, 0, 0, 0);
#ifdef CONFIG_SMP
@@ -3908,8 +4461,11 @@ static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
if (trace_seq_has_overflowed(s))
return TRACE_TYPE_PARTIAL_LINE;
- if (event)
+ if (event) {
+ if (tr->trace_flags & TRACE_ITER_FIELDS)
+ return print_event_fields(iter, event);
return event->funcs->trace(iter, sym_flags, event);
+ }
trace_seq_printf(s, "Unknown type %d\n", entry->type);
@@ -4202,7 +4758,11 @@ static int s_show(struct seq_file *m, void *v)
iter->leftover = ret;
} else {
- print_trace_line(iter);
+ ret = print_trace_line(iter);
+ if (ret == TRACE_TYPE_PARTIAL_LINE) {
+ iter->seq.full = 0;
+ trace_seq_puts(&iter->seq, "[LINE TOO BIG]\n");
+ }
ret = trace_print_seq(m, &iter->seq);
/*
* If we overflow the seq_file buffer, then it will
@@ -4235,6 +4795,25 @@ static const struct seq_operations tracer_seq_ops = {
.show = s_show,
};
+/*
+ * Note, as iter itself can be allocated and freed in different
+ * ways, this function is only used to free its content, and not
+ * the iterator itself. The only requirement to all the allocations
+ * is that it must zero all fields (kzalloc), as freeing works with
+ * ethier allocated content or NULL.
+ */
+static void free_trace_iter_content(struct trace_iterator *iter)
+{
+ /* The fmt is either NULL, allocated or points to static_fmt_buf */
+ if (iter->fmt != static_fmt_buf)
+ kfree(iter->fmt);
+
+ kfree(iter->temp);
+ kfree(iter->buffer_iter);
+ mutex_destroy(&iter->mutex);
+ free_cpumask_var(iter->started);
+}
+
static struct trace_iterator *
__tracing_open(struct inode *inode, struct file *file, bool snapshot)
{
@@ -4267,15 +4846,17 @@ __tracing_open(struct inode *inode, struct file *file, bool snapshot)
iter->temp_size = 128;
/*
- * We make a copy of the current tracer to avoid concurrent
- * changes on it while we are reading.
+ * trace_event_printf() may need to modify given format
+ * string to replace %p with %px so that it shows real address
+ * instead of hash value. However, that is only for the event
+ * tracing, other tracer may not need. Defer the allocation
+ * until it is needed.
*/
- mutex_lock(&trace_types_lock);
- iter->trace = kzalloc(sizeof(*iter->trace), GFP_KERNEL);
- if (!iter->trace)
- goto fail;
+ iter->fmt = NULL;
+ iter->fmt_size = 0;
- *iter->trace = *tr->current_trace;
+ mutex_lock(&trace_types_lock);
+ iter->trace = tr->current_trace;
if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL))
goto fail;
@@ -4340,9 +4921,7 @@ __tracing_open(struct inode *inode, struct file *file, bool snapshot)
fail:
mutex_unlock(&trace_types_lock);
- kfree(iter->trace);
- kfree(iter->temp);
- kfree(iter->buffer_iter);
+ free_trace_iter_content(iter);
release:
seq_release_private(inode, file);
return ERR_PTR(-ENOMEM);
@@ -4383,6 +4962,60 @@ int tracing_open_generic_tr(struct inode *inode, struct file *filp)
return 0;
}
+/*
+ * The private pointer of the inode is the trace_event_file.
+ * Update the tr ref count associated to it.
+ */
+int tracing_open_file_tr(struct inode *inode, struct file *filp)
+{
+ struct trace_event_file *file = inode->i_private;
+ int ret;
+
+ ret = tracing_check_open_get_tr(file->tr);
+ if (ret)
+ return ret;
+
+ mutex_lock(&event_mutex);
+
+ /* Fail if the file is marked for removal */
+ if (file->flags & EVENT_FILE_FL_FREED) {
+ trace_array_put(file->tr);
+ ret = -ENODEV;
+ } else {
+ event_file_get(file);
+ }
+
+ mutex_unlock(&event_mutex);
+ if (ret)
+ return ret;
+
+ filp->private_data = inode->i_private;
+
+ return 0;
+}
+
+int tracing_release_file_tr(struct inode *inode, struct file *filp)
+{
+ struct trace_event_file *file = inode->i_private;
+
+ trace_array_put(file->tr);
+ event_file_put(file);
+
+ return 0;
+}
+
+int tracing_single_release_file_tr(struct inode *inode, struct file *filp)
+{
+ tracing_release_file_tr(inode, filp);
+ return single_release(inode, filp);
+}
+
+static int tracing_mark_open(struct inode *inode, struct file *filp)
+{
+ stream_open(inode, filp);
+ return tracing_open_generic_tr(inode, filp);
+}
+
static int tracing_release(struct inode *inode, struct file *file)
{
struct trace_array *tr = inode->i_private;
@@ -4415,17 +5048,13 @@ static int tracing_release(struct inode *inode, struct file *file)
mutex_unlock(&trace_types_lock);
- mutex_destroy(&iter->mutex);
- free_cpumask_var(iter->started);
- kfree(iter->temp);
- kfree(iter->trace);
- kfree(iter->buffer_iter);
+ free_trace_iter_content(iter);
seq_release_private(inode, file);
return 0;
}
-static int tracing_release_generic_tr(struct inode *inode, struct file *file)
+int tracing_release_generic_tr(struct inode *inode, struct file *file)
{
struct trace_array *tr = inode->i_private;
@@ -4612,6 +5241,8 @@ loff_t tracing_lseek(struct file *file, loff_t offset, int whence)
static const struct file_operations tracing_fops = {
.open = tracing_open,
.read = seq_read,
+ .read_iter = seq_read_iter,
+ .splice_read = copy_splice_read,
.write = tracing_write_stub,
.llseek = tracing_lseek,
.release = tracing_release,
@@ -4671,11 +5302,17 @@ int tracing_set_cpumask(struct trace_array *tr,
!cpumask_test_cpu(cpu, tracing_cpumask_new)) {
atomic_inc(&per_cpu_ptr(tr->array_buffer.data, cpu)->disabled);
ring_buffer_record_disable_cpu(tr->array_buffer.buffer, cpu);
+#ifdef CONFIG_TRACER_MAX_TRACE
+ ring_buffer_record_disable_cpu(tr->max_buffer.buffer, cpu);
+#endif
}
if (!cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
cpumask_test_cpu(cpu, tracing_cpumask_new)) {
atomic_dec(&per_cpu_ptr(tr->array_buffer.data, cpu)->disabled);
ring_buffer_record_enable_cpu(tr->array_buffer.buffer, cpu);
+#ifdef CONFIG_TRACER_MAX_TRACE
+ ring_buffer_record_enable_cpu(tr->max_buffer.buffer, cpu);
+#endif
}
}
arch_spin_unlock(&tr->max_lock);
@@ -4694,7 +5331,7 @@ tracing_cpumask_write(struct file *filp, const char __user *ubuf,
cpumask_var_t tracing_cpumask_new;
int err;
- if (!alloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL))
+ if (!zalloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL))
return -ENOMEM;
err = cpumask_parse_user(ubuf, count, tracing_cpumask_new);
@@ -4799,6 +5436,8 @@ int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
{
+ int *map;
+
if ((mask == TRACE_ITER_RECORD_TGID) ||
(mask == TRACE_ITER_RECORD_CMD))
lockdep_assert_held(&event_mutex);
@@ -4821,10 +5460,19 @@ int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
trace_event_enable_cmd_record(enabled);
if (mask == TRACE_ITER_RECORD_TGID) {
- if (!tgid_map)
- tgid_map = kvcalloc(PID_MAX_DEFAULT + 1,
- sizeof(*tgid_map),
- GFP_KERNEL);
+ if (!tgid_map) {
+ tgid_map_max = pid_max;
+ map = kvcalloc(tgid_map_max + 1, sizeof(*tgid_map),
+ GFP_KERNEL);
+
+ /*
+ * Pairs with smp_load_acquire() in
+ * trace_find_tgid_ptr() to ensure that if it observes
+ * the tgid_map we just allocated then it also observes
+ * the corresponding tgid_map_max value.
+ */
+ smp_store_release(&tgid_map, map);
+ }
if (!tgid_map) {
tr->trace_flags &= ~TRACE_ITER_RECORD_TGID;
return -ENOMEM;
@@ -4976,7 +5624,7 @@ static const char readme_msg[] =
" error_log\t- error log for failed commands (that support it)\n"
" buffer_size_kb\t- view and modify size of per cpu buffer\n"
" buffer_total_size_kb - view total size of all cpu buffers\n\n"
- " trace_clock\t\t-change the clock used to order events\n"
+ " trace_clock\t\t- change the clock used to order events\n"
" local: Per cpu clock but may not be synced across CPUs\n"
" global: Synced across CPUs but slows tracing down.\n"
" counter: Not a clock, but just an increment\n"
@@ -4985,7 +5633,7 @@ static const char readme_msg[] =
#ifdef CONFIG_X86_64
" x86-tsc: TSC cycle counter\n"
#endif
- "\n timestamp_mode\t-view the mode used to timestamp events\n"
+ "\n timestamp_mode\t- view the mode used to timestamp events\n"
" delta: Delta difference against a buffer-wide timestamp\n"
" absolute: Absolute (standalone) timestamp\n"
"\n trace_marker\t\t- Writes into this file writes into the kernel buffer\n"
@@ -5073,37 +5721,52 @@ static const char readme_msg[] =
" uprobe_events\t\t- Create/append/remove/show the userspace dynamic events\n"
"\t\t\t Write into this file to define/undefine new trace events.\n"
#endif
-#if defined(CONFIG_KPROBE_EVENTS) || defined(CONFIG_UPROBE_EVENTS)
+#if defined(CONFIG_KPROBE_EVENTS) || defined(CONFIG_UPROBE_EVENTS) || \
+ defined(CONFIG_FPROBE_EVENTS)
"\t accepts: event-definitions (one definition per line)\n"
- "\t Format: p[:[<group>/]<event>] <place> [<args>]\n"
- "\t r[maxactive][:[<group>/]<event>] <place> [<args>]\n"
+#if defined(CONFIG_KPROBE_EVENTS) || defined(CONFIG_UPROBE_EVENTS)
+ "\t Format: p[:[<group>/][<event>]] <place> [<args>]\n"
+ "\t r[maxactive][:[<group>/][<event>]] <place> [<args>]\n"
+#endif
+#ifdef CONFIG_FPROBE_EVENTS
+ "\t f[:[<group>/][<event>]] <func-name>[%return] [<args>]\n"
+ "\t t[:[<group>/][<event>]] <tracepoint> [<args>]\n"
+#endif
#ifdef CONFIG_HIST_TRIGGERS
"\t s:[synthetic/]<event> <field> [<field>]\n"
#endif
- "\t -:[<group>/]<event>\n"
+ "\t e[:[<group>/][<event>]] <attached-group>.<attached-event> [<args>] [if <filter>]\n"
+ "\t -:[<group>/][<event>]\n"
#ifdef CONFIG_KPROBE_EVENTS
"\t place: [<module>:]<symbol>[+<offset>]|<memaddr>\n"
- "place (kretprobe): [<module>:]<symbol>[+<offset>]|<memaddr>\n"
+ "place (kretprobe): [<module>:]<symbol>[+<offset>]%return|<memaddr>\n"
#endif
#ifdef CONFIG_UPROBE_EVENTS
- " place (uprobe): <path>:<offset>[(ref_ctr_offset)]\n"
+ " place (uprobe): <path>:<offset>[%return][(ref_ctr_offset)]\n"
#endif
"\t args: <name>=fetcharg[:type]\n"
- "\t fetcharg: %<register>, @<address>, @<symbol>[+|-<offset>],\n"
+ "\t fetcharg: (%<register>|$<efield>), @<address>, @<symbol>[+|-<offset>],\n"
#ifdef CONFIG_HAVE_FUNCTION_ARG_ACCESS_API
+#ifdef CONFIG_PROBE_EVENTS_BTF_ARGS
"\t $stack<index>, $stack, $retval, $comm, $arg<N>,\n"
+ "\t <argname>[->field[->field|.field...]],\n"
+#else
+ "\t $stack<index>, $stack, $retval, $comm, $arg<N>,\n"
+#endif
#else
"\t $stack<index>, $stack, $retval, $comm,\n"
#endif
"\t +|-[u]<offset>(<fetcharg>), \\imm-value, \\\"imm-string\"\n"
- "\t type: s8/16/32/64, u8/16/32/64, x8/16/32/64, string, symbol,\n"
+ "\t type: s8/16/32/64, u8/16/32/64, x8/16/32/64, char, string, symbol,\n"
"\t b<bit-width>@<bit-offset>/<container-size>, ustring,\n"
- "\t <type>\\[<array-size>\\]\n"
+ "\t symstr, <type>\\[<array-size>\\]\n"
#ifdef CONFIG_HIST_TRIGGERS
"\t field: <stype> <name>;\n"
"\t stype: u8/u16/u32/u64, s8/s16/s32/s64, pid_t,\n"
"\t [unsigned] char/int/long\n"
#endif
+ "\t efield: For event probes ('e' types), the field is on of the fields\n"
+ "\t of the <attached-group>/<attached-event>.\n"
#endif
" events/\t\t- Directory containing all trace event subsystems:\n"
" enable\t\t- Write 0/1 to enable/disable tracing of all events\n"
@@ -5151,18 +5814,34 @@ static const char readme_msg[] =
#ifdef CONFIG_HIST_TRIGGERS
" hist trigger\t- If set, event hits are aggregated into a hash table\n"
"\t Format: hist:keys=<field1[,field2,...]>\n"
+ "\t [:<var1>=<field|var_ref|numeric_literal>[,<var2>=...]]\n"
"\t [:values=<field1[,field2,...]>]\n"
"\t [:sort=<field1[,field2,...]>]\n"
"\t [:size=#entries]\n"
"\t [:pause][:continue][:clear]\n"
"\t [:name=histname1]\n"
+ "\t [:nohitcount]\n"
"\t [:<handler>.<action>]\n"
"\t [if <filter>]\n\n"
+ "\t Note, special fields can be used as well:\n"
+ "\t common_timestamp - to record current timestamp\n"
+ "\t common_cpu - to record the CPU the event happened on\n"
+ "\n"
+ "\t A hist trigger variable can be:\n"
+ "\t - a reference to a field e.g. x=current_timestamp,\n"
+ "\t - a reference to another variable e.g. y=$x,\n"
+ "\t - a numeric literal: e.g. ms_per_sec=1000,\n"
+ "\t - an arithmetic expression: e.g. time_secs=current_timestamp/1000\n"
+ "\n"
+ "\t hist trigger arithmetic expressions support addition(+), subtraction(-),\n"
+ "\t multiplication(*) and division(/) operators. An operand can be either a\n"
+ "\t variable reference, field or numeric literal.\n"
+ "\n"
"\t When a matching event is hit, an entry is added to a hash\n"
"\t table using the key(s) and value(s) named, and the value of a\n"
"\t sum called 'hitcount' is incremented. Keys and values\n"
"\t correspond to fields in the event's format description. Keys\n"
- "\t can be any field, or the special string 'stacktrace'.\n"
+ "\t can be any field, or the special string 'common_stacktrace'.\n"
"\t Compound keys consisting of up to two fields can be specified\n"
"\t by the 'keys' keyword. Values must correspond to numeric\n"
"\t fields. Sort keys consisting of up to two fields can be\n"
@@ -5187,7 +5866,10 @@ static const char readme_msg[] =
"\t .execname display a common_pid as a program name\n"
"\t .syscall display a syscall id as a syscall name\n"
"\t .log2 display log2 value rather than raw number\n"
- "\t .usecs display a common_timestamp in microseconds\n\n"
+ "\t .buckets=size display values in groups of size rather than raw number\n"
+ "\t .usecs display a common_timestamp in microseconds\n"
+ "\t .percent display a number of percentage value\n"
+ "\t .graph display a bar-graph of a value\n\n"
"\t The 'pause' parameter can be used to pause an existing hist\n"
"\t trigger or to start a hist trigger but not log any events\n"
"\t until told to do so. 'continue' can be used to start or\n"
@@ -5195,6 +5877,8 @@ static const char readme_msg[] =
"\t The 'clear' parameter will clear the contents of a running\n"
"\t hist trigger and leave its current paused/active state\n"
"\t unchanged.\n\n"
+ "\t The 'nohitcount' (or NOHC) parameter will suppress display of\n"
+ "\t raw hitcount in the histogram.\n\n"
"\t The enable_hist and disable_hist triggers can be used to\n"
"\t have one event conditionally start and stop another event's\n"
"\t already-attached hist trigger. The syntax is analogous to\n"
@@ -5210,7 +5894,12 @@ static const char readme_msg[] =
"\t trace(<synthetic_event>,param list) - generate synthetic event\n"
"\t save(field,...) - save current event fields\n"
#ifdef CONFIG_TRACER_SNAPSHOT
- "\t snapshot() - snapshot the trace buffer\n"
+ "\t snapshot() - snapshot the trace buffer\n\n"
+#endif
+#ifdef CONFIG_SYNTH_EVENTS
+ " events/synthetic_events\t- Create/append/remove/show synthetic events\n"
+ "\t Write into this file to define/undefine new synthetic events.\n"
+ "\t example: echo 'myevent u64 lat; char name[]; long[] stack' >> synthetic_events\n"
#endif
#endif
;
@@ -5231,37 +5920,16 @@ static const struct file_operations tracing_readme_fops = {
static void *saved_tgids_next(struct seq_file *m, void *v, loff_t *pos)
{
- int *ptr = v;
-
- if (*pos || m->count)
- ptr++;
-
- (*pos)++;
-
- for (; ptr <= &tgid_map[PID_MAX_DEFAULT]; ptr++) {
- if (trace_find_tgid(*ptr))
- return ptr;
- }
+ int pid = ++(*pos);
- return NULL;
+ return trace_find_tgid_ptr(pid);
}
static void *saved_tgids_start(struct seq_file *m, loff_t *pos)
{
- void *v;
- loff_t l = 0;
+ int pid = *pos;
- if (!tgid_map)
- return NULL;
-
- v = &tgid_map[0];
- while (l <= *pos) {
- v = saved_tgids_next(m, v, &l);
- if (!v)
- return NULL;
- }
-
- return v;
+ return trace_find_tgid_ptr(pid);
}
static void saved_tgids_stop(struct seq_file *m, void *v)
@@ -5270,9 +5938,14 @@ static void saved_tgids_stop(struct seq_file *m, void *v)
static int saved_tgids_show(struct seq_file *m, void *v)
{
- int pid = (int *)v - tgid_map;
+ int *entry = (int *)v;
+ int pid = entry - tgid_map;
+ int tgid = *entry;
- seq_printf(m, "%d %d\n", pid, trace_find_tgid(pid));
+ if (tgid == 0)
+ return SEQ_SKIP;
+
+ seq_printf(m, "%d %d\n", pid, tgid);
return 0;
}
@@ -5388,37 +6061,29 @@ tracing_saved_cmdlines_size_read(struct file *filp, char __user *ubuf,
char buf[64];
int r;
+ preempt_disable();
arch_spin_lock(&trace_cmdline_lock);
r = scnprintf(buf, sizeof(buf), "%u\n", savedcmd->cmdline_num);
arch_spin_unlock(&trace_cmdline_lock);
+ preempt_enable();
return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
}
-static void free_saved_cmdlines_buffer(struct saved_cmdlines_buffer *s)
-{
- kfree(s->saved_cmdlines);
- kfree(s->map_cmdline_to_pid);
- kfree(s);
-}
-
static int tracing_resize_saved_cmdlines(unsigned int val)
{
struct saved_cmdlines_buffer *s, *savedcmd_temp;
- s = kmalloc(sizeof(*s), GFP_KERNEL);
+ s = allocate_cmdlines_buffer(val);
if (!s)
return -ENOMEM;
- if (allocate_cmdlines_buffer(val, s) < 0) {
- kfree(s);
- return -ENOMEM;
- }
-
+ preempt_disable();
arch_spin_lock(&trace_cmdline_lock);
savedcmd_temp = savedcmd;
savedcmd = s;
arch_spin_unlock(&trace_cmdline_lock);
+ preempt_enable();
free_saved_cmdlines_buffer(savedcmd_temp);
return 0;
@@ -5606,7 +6271,7 @@ trace_insert_eval_map_file(struct module *mod, struct trace_eval_map **start,
static void trace_create_eval_file(struct dentry *d_tracer)
{
- trace_create_file("eval_map", 0444, d_tracer,
+ trace_create_file("eval_map", TRACE_MODE_READ, d_tracer,
NULL, &tracing_eval_map_fops);
}
@@ -5660,6 +6325,15 @@ static void set_buffer_entries(struct array_buffer *buf, unsigned long val)
per_cpu_ptr(buf->data, cpu)->entries = val;
}
+static void update_buffer_entries(struct array_buffer *buf, int cpu)
+{
+ if (cpu == RING_BUFFER_ALL_CPUS) {
+ set_buffer_entries(buf, ring_buffer_size(buf->buffer, 0));
+ } else {
+ per_cpu_ptr(buf->data, cpu)->entries = ring_buffer_size(buf->buffer, cpu);
+ }
+}
+
#ifdef CONFIG_TRACER_MAX_TRACE
/* resize @tr's buffer to the size of @size_tr's entries */
static int resize_buffer_duplicate_size(struct array_buffer *trace_buf,
@@ -5698,19 +6372,21 @@ static int __tracing_resize_ring_buffer(struct trace_array *tr,
* we use the size that was given, and we can forget about
* expanding it later.
*/
- ring_buffer_expanded = true;
+ trace_set_ring_buffer_expanded(tr);
/* May be called before buffers are initialized */
if (!tr->array_buffer.buffer)
return 0;
+ /* Do not allow tracing while resizing ring buffer */
+ tracing_stop_tr(tr);
+
ret = ring_buffer_resize(tr->array_buffer.buffer, size, cpu);
if (ret < 0)
- return ret;
+ goto out_start;
#ifdef CONFIG_TRACER_MAX_TRACE
- if (!(tr->flags & TRACE_ARRAY_FL_GLOBAL) ||
- !tr->current_trace->use_max_tr)
+ if (!tr->allocated_snapshot)
goto out;
ret = ring_buffer_resize(tr->max_buffer.buffer, size, cpu);
@@ -5735,29 +6411,24 @@ static int __tracing_resize_ring_buffer(struct trace_array *tr,
WARN_ON(1);
tracing_disabled = 1;
}
- return ret;
+ goto out_start;
}
- if (cpu == RING_BUFFER_ALL_CPUS)
- set_buffer_entries(&tr->max_buffer, size);
- else
- per_cpu_ptr(tr->max_buffer.data, cpu)->entries = size;
+ update_buffer_entries(&tr->max_buffer, cpu);
out:
#endif /* CONFIG_TRACER_MAX_TRACE */
- if (cpu == RING_BUFFER_ALL_CPUS)
- set_buffer_entries(&tr->array_buffer, size);
- else
- per_cpu_ptr(tr->array_buffer.data, cpu)->entries = size;
-
+ update_buffer_entries(&tr->array_buffer, cpu);
+ out_start:
+ tracing_start_tr(tr);
return ret;
}
ssize_t tracing_resize_ring_buffer(struct trace_array *tr,
unsigned long size, int cpu_id)
{
- int ret = size;
+ int ret;
mutex_lock(&trace_types_lock);
@@ -5782,6 +6453,7 @@ out:
/**
* tracing_update_buffers - used by tracing facility to expand ring buffers
+ * @tr: The tracing instance
*
* To save on memory when the tracing is never used on a system with it
* configured in. The ring buffers are set to a minimum size. But once
@@ -5790,13 +6462,13 @@ out:
*
* This function is to be called when a tracer is about to be used.
*/
-int tracing_update_buffers(void)
+int tracing_update_buffers(struct trace_array *tr)
{
int ret = 0;
mutex_lock(&trace_types_lock);
- if (!ring_buffer_expanded)
- ret = __tracing_resize_ring_buffer(&global_trace, trace_buf_size,
+ if (!tr->ring_buffer_expanded)
+ ret = __tracing_resize_ring_buffer(tr, trace_buf_size,
RING_BUFFER_ALL_CPUS);
mutex_unlock(&trace_types_lock);
@@ -5825,12 +6497,18 @@ static void tracing_set_nop(struct trace_array *tr)
tr->current_trace = &nop_trace;
}
+static bool tracer_options_updated;
+
static void add_tracer_options(struct trace_array *tr, struct tracer *t)
{
/* Only enable if the directory has been created already. */
if (!tr->dir)
return;
+ /* Only create trace option files after update_tracer_options finish */
+ if (!tracer_options_updated)
+ return;
+
create_trace_option_files(tr, t);
}
@@ -5844,7 +6522,7 @@ int tracing_set_tracer(struct trace_array *tr, const char *buf)
mutex_lock(&trace_types_lock);
- if (!ring_buffer_expanded) {
+ if (!tr->ring_buffer_expanded) {
ret = __tracing_resize_ring_buffer(tr, trace_buf_size,
RING_BUFFER_ALL_CPUS);
if (ret < 0)
@@ -5865,10 +6543,12 @@ int tracing_set_tracer(struct trace_array *tr, const char *buf)
#ifdef CONFIG_TRACER_SNAPSHOT
if (t->use_max_tr) {
+ local_irq_disable();
arch_spin_lock(&tr->max_lock);
if (tr->cond_snapshot)
ret = -EBUSY;
arch_spin_unlock(&tr->max_lock);
+ local_irq_enable();
if (ret)
goto out;
}
@@ -5887,7 +6567,7 @@ int tracing_set_tracer(struct trace_array *tr, const char *buf)
}
/* If trace pipe files are being read, we can't change the tracer */
- if (tr->current_trace->ref) {
+ if (tr->trace_ref) {
ret = -EBUSY;
goto out;
}
@@ -5899,12 +6579,12 @@ int tracing_set_tracer(struct trace_array *tr, const char *buf)
if (tr->current_trace->reset)
tr->current_trace->reset(tr);
+#ifdef CONFIG_TRACER_MAX_TRACE
+ had_max_tr = tr->current_trace->use_max_tr;
+
/* Current trace needs to be nop_trace before synchronize_rcu */
tr->current_trace = &nop_trace;
-#ifdef CONFIG_TRACER_MAX_TRACE
- had_max_tr = tr->allocated_snapshot;
-
if (had_max_tr && !t->use_max_tr) {
/*
* We need to make sure that the update_max_tr sees that
@@ -5916,14 +6596,14 @@ int tracing_set_tracer(struct trace_array *tr, const char *buf)
synchronize_rcu();
free_snapshot(tr);
}
-#endif
-#ifdef CONFIG_TRACER_MAX_TRACE
- if (t->use_max_tr && !had_max_tr) {
+ if (t->use_max_tr && !tr->allocated_snapshot) {
ret = tracing_alloc_snapshot_instance(tr);
if (ret < 0)
goto out;
}
+#else
+ tr->current_trace = &nop_trace;
#endif
if (t->init) {
@@ -5947,7 +6627,7 @@ tracing_set_trace_write(struct file *filp, const char __user *ubuf,
{
struct trace_array *tr = filp->private_data;
char buf[MAX_TRACER_SIZE+1];
- int i;
+ char *name;
size_t ret;
int err;
@@ -5961,11 +6641,9 @@ tracing_set_trace_write(struct file *filp, const char __user *ubuf,
buf[cnt] = 0;
- /* strip ending whitespace. */
- for (i = cnt - 1; i > 0 && isspace(buf[i]); i--)
- buf[i] = 0;
+ name = strim(buf);
- err = tracing_set_tracer(tr, buf);
+ err = tracing_set_tracer(tr, name);
if (err)
return err;
@@ -6036,28 +6714,58 @@ out:
return ret;
}
-#if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
+#ifdef CONFIG_TRACER_MAX_TRACE
static ssize_t
tracing_max_lat_read(struct file *filp, char __user *ubuf,
size_t cnt, loff_t *ppos)
{
- return tracing_nsecs_read(filp->private_data, ubuf, cnt, ppos);
+ struct trace_array *tr = filp->private_data;
+
+ return tracing_nsecs_read(&tr->max_latency, ubuf, cnt, ppos);
}
static ssize_t
tracing_max_lat_write(struct file *filp, const char __user *ubuf,
size_t cnt, loff_t *ppos)
{
- return tracing_nsecs_write(filp->private_data, ubuf, cnt, ppos);
+ struct trace_array *tr = filp->private_data;
+
+ return tracing_nsecs_write(&tr->max_latency, ubuf, cnt, ppos);
}
#endif
+static int open_pipe_on_cpu(struct trace_array *tr, int cpu)
+{
+ if (cpu == RING_BUFFER_ALL_CPUS) {
+ if (cpumask_empty(tr->pipe_cpumask)) {
+ cpumask_setall(tr->pipe_cpumask);
+ return 0;
+ }
+ } else if (!cpumask_test_cpu(cpu, tr->pipe_cpumask)) {
+ cpumask_set_cpu(cpu, tr->pipe_cpumask);
+ return 0;
+ }
+ return -EBUSY;
+}
+
+static void close_pipe_on_cpu(struct trace_array *tr, int cpu)
+{
+ if (cpu == RING_BUFFER_ALL_CPUS) {
+ WARN_ON(!cpumask_full(tr->pipe_cpumask));
+ cpumask_clear(tr->pipe_cpumask);
+ } else {
+ WARN_ON(!cpumask_test_cpu(cpu, tr->pipe_cpumask));
+ cpumask_clear_cpu(cpu, tr->pipe_cpumask);
+ }
+}
+
static int tracing_open_pipe(struct inode *inode, struct file *filp)
{
struct trace_array *tr = inode->i_private;
struct trace_iterator *iter;
+ int cpu;
int ret;
ret = tracing_check_open_get_tr(tr);
@@ -6065,13 +6773,16 @@ static int tracing_open_pipe(struct inode *inode, struct file *filp)
return ret;
mutex_lock(&trace_types_lock);
+ cpu = tracing_get_cpu(inode);
+ ret = open_pipe_on_cpu(tr, cpu);
+ if (ret)
+ goto fail_pipe_on_cpu;
/* create a buffer to store the information to pass to userspace */
iter = kzalloc(sizeof(*iter), GFP_KERNEL);
if (!iter) {
ret = -ENOMEM;
- __trace_array_put(tr);
- goto out;
+ goto fail_alloc_iter;
}
trace_seq_init(&iter->seq);
@@ -6094,7 +6805,7 @@ static int tracing_open_pipe(struct inode *inode, struct file *filp)
iter->tr = tr;
iter->array_buffer = &tr->array_buffer;
- iter->cpu_file = tracing_get_cpu(inode);
+ iter->cpu_file = cpu;
mutex_init(&iter->mutex);
filp->private_data = iter;
@@ -6103,13 +6814,16 @@ static int tracing_open_pipe(struct inode *inode, struct file *filp)
nonseekable_open(inode, filp);
- tr->current_trace->ref++;
-out:
+ tr->trace_ref++;
+
mutex_unlock(&trace_types_lock);
return ret;
fail:
kfree(iter);
+fail_alloc_iter:
+ close_pipe_on_cpu(tr, cpu);
+fail_pipe_on_cpu:
__trace_array_put(tr);
mutex_unlock(&trace_types_lock);
return ret;
@@ -6122,15 +6836,14 @@ static int tracing_release_pipe(struct inode *inode, struct file *file)
mutex_lock(&trace_types_lock);
- tr->current_trace->ref--;
+ tr->trace_ref--;
if (iter->trace->pipe_close)
iter->trace->pipe_close(iter);
-
+ close_pipe_on_cpu(tr, iter->cpu_file);
mutex_unlock(&trace_types_lock);
- free_cpumask_var(iter->started);
- mutex_destroy(&iter->mutex);
+ free_trace_iter_content(iter);
kfree(iter);
trace_array_put(tr);
@@ -6154,7 +6867,7 @@ trace_poll(struct trace_iterator *iter, struct file *filp, poll_table *poll_tabl
return EPOLLIN | EPOLLRDNORM;
else
return ring_buffer_poll_wait(iter->array_buffer->buffer, iter->cpu_file,
- filp, poll_table);
+ filp, poll_table, iter->tr->buffer_percent);
}
static __poll_t
@@ -6243,16 +6956,13 @@ waitagain:
goto out;
}
- if (cnt >= PAGE_SIZE)
- cnt = PAGE_SIZE - 1;
+ if (cnt >= TRACE_SEQ_BUFFER_SIZE)
+ cnt = TRACE_SEQ_BUFFER_SIZE - 1;
/* reset all but tr, trace, and overruns */
- memset(&iter->seq, 0,
- sizeof(struct trace_iterator) -
- offsetof(struct trace_iterator, seq));
+ trace_iterator_reset(iter);
cpumask_clear(iter->started);
trace_seq_init(&iter->seq);
- iter->pos = -1;
trace_event_read_lock();
trace_access_lock(iter->cpu_file);
@@ -6262,7 +6972,20 @@ waitagain:
ret = print_trace_line(iter);
if (ret == TRACE_TYPE_PARTIAL_LINE) {
- /* don't print partial lines */
+ /*
+ * If one print_trace_line() fills entire trace_seq in one shot,
+ * trace_seq_to_user() will returns -EBUSY because save_len == 0,
+ * In this case, we need to consume it, otherwise, loop will peek
+ * this event next time, resulting in an infinite loop.
+ */
+ if (save_len == 0) {
+ iter->seq.full = 0;
+ trace_seq_puts(&iter->seq, "[LINE TOO BIG]\n");
+ trace_consume(iter);
+ break;
+ }
+
+ /* In other cases, don't print partial lines */
iter->seq.seq.len = save_len;
break;
}
@@ -6285,7 +7008,7 @@ waitagain:
/* Now copy what we have to the user */
sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
- if (iter->seq.seq.readpos >= trace_seq_used(&iter->seq))
+ if (iter->seq.readpos >= trace_seq_used(&iter->seq))
trace_seq_init(&iter->seq);
/*
@@ -6471,7 +7194,7 @@ tracing_entries_read(struct file *filp, char __user *ubuf,
}
if (buf_size_same) {
- if (!ring_buffer_expanded)
+ if (!tr->ring_buffer_expanded)
r = sprintf(buf, "%lu (expanded: %lu)\n",
size >> 10,
trace_buf_size >> 10);
@@ -6528,10 +7251,10 @@ tracing_total_entries_read(struct file *filp, char __user *ubuf,
mutex_lock(&trace_types_lock);
for_each_tracing_cpu(cpu) {
size += per_cpu_ptr(tr->array_buffer.data, cpu)->entries >> 10;
- if (!ring_buffer_expanded)
+ if (!tr->ring_buffer_expanded)
expanded_size += trace_buf_size >> 10;
}
- if (ring_buffer_expanded)
+ if (tr->ring_buffer_expanded)
r = sprintf(buf, "%lu\n", size);
else
r = sprintf(buf, "%lu (expanded: %lu)\n", size, expanded_size);
@@ -6570,6 +7293,8 @@ tracing_free_buffer_release(struct inode *inode, struct file *filp)
return 0;
}
+#define TRACE_MARKER_MAX_SIZE 4096
+
static ssize_t
tracing_mark_write(struct file *filp, const char __user *ubuf,
size_t cnt, loff_t *fpos)
@@ -6579,9 +7304,9 @@ tracing_mark_write(struct file *filp, const char __user *ubuf,
enum event_trigger_type tt = ETT_NONE;
struct trace_buffer *buffer;
struct print_entry *entry;
- unsigned long irq_flags;
+ int meta_size;
ssize_t written;
- int size;
+ size_t size;
int len;
/* Used in tracing_mark_raw_write() as well */
@@ -6594,13 +7319,15 @@ tracing_mark_write(struct file *filp, const char __user *ubuf,
if (!(tr->trace_flags & TRACE_ITER_MARKERS))
return -EINVAL;
- if (cnt > TRACE_BUF_SIZE)
- cnt = TRACE_BUF_SIZE;
+ if ((ssize_t)cnt < 0)
+ return -EINVAL;
- BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
+ if (cnt > TRACE_MARKER_MAX_SIZE)
+ cnt = TRACE_MARKER_MAX_SIZE;
- local_save_flags(irq_flags);
- size = sizeof(*entry) + cnt + 2; /* add '\0' and possible '\n' */
+ meta_size = sizeof(*entry) + 2; /* add '\0' and possible '\n' */
+ again:
+ size = cnt + meta_size;
/* If less than "<faulted>", then make sure we can still add that */
if (cnt < FAULTED_SIZE)
@@ -6608,10 +7335,26 @@ tracing_mark_write(struct file *filp, const char __user *ubuf,
buffer = tr->array_buffer.buffer;
event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
- irq_flags, preempt_count());
- if (unlikely(!event))
+ tracing_gen_ctx());
+ if (unlikely(!event)) {
+ /*
+ * If the size was greater than what was allowed, then
+ * make it smaller and try again.
+ */
+ if (size > ring_buffer_max_event_size(buffer)) {
+ /* cnt < FAULTED size should never be bigger than max */
+ if (WARN_ON_ONCE(cnt < FAULTED_SIZE))
+ return -EBADF;
+ cnt = ring_buffer_max_event_size(buffer) - meta_size;
+ /* The above should only happen once */
+ if (WARN_ON_ONCE(cnt + meta_size == size))
+ return -EBADF;
+ goto again;
+ }
+
/* Ring buffer disabled, return as if not open for write */
return -EBADF;
+ }
entry = ring_buffer_event_data(event);
entry->ip = _THIS_IP_;
@@ -6623,12 +7366,11 @@ tracing_mark_write(struct file *filp, const char __user *ubuf,
written = -EFAULT;
} else
written = cnt;
- len = cnt;
if (tr->trace_marker_file && !list_empty(&tr->trace_marker_file->triggers)) {
/* do not add \n before testing triggers, but add \0 */
entry->buf[cnt] = '\0';
- tt = event_triggers_call(tr->trace_marker_file, entry, event);
+ tt = event_triggers_call(tr->trace_marker_file, buffer, entry, event);
}
if (entry->buf[cnt - 1] != '\n') {
@@ -6637,20 +7379,16 @@ tracing_mark_write(struct file *filp, const char __user *ubuf,
} else
entry->buf[cnt] = '\0';
+ if (static_branch_unlikely(&trace_marker_exports_enabled))
+ ftrace_exports(event, TRACE_EXPORT_MARKER);
__buffer_unlock_commit(buffer, event);
if (tt)
event_triggers_post_call(tr->trace_marker_file, tt);
- if (written > 0)
- *fpos += written;
-
return written;
}
-/* Limit it for now to 3K (including tag) */
-#define RAW_DATA_MAX_SIZE (1024*3)
-
static ssize_t
tracing_mark_raw_write(struct file *filp, const char __user *ubuf,
size_t cnt, loff_t *fpos)
@@ -6659,7 +7397,6 @@ tracing_mark_raw_write(struct file *filp, const char __user *ubuf,
struct ring_buffer_event *event;
struct trace_buffer *buffer;
struct raw_data_entry *entry;
- unsigned long irq_flags;
ssize_t written;
int size;
int len;
@@ -6673,22 +7410,20 @@ tracing_mark_raw_write(struct file *filp, const char __user *ubuf,
return -EINVAL;
/* The marker must at least have a tag id */
- if (cnt < sizeof(unsigned int) || cnt > RAW_DATA_MAX_SIZE)
+ if (cnt < sizeof(unsigned int))
return -EINVAL;
- if (cnt > TRACE_BUF_SIZE)
- cnt = TRACE_BUF_SIZE;
-
- BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
-
- local_save_flags(irq_flags);
size = sizeof(*entry) + cnt;
if (cnt < FAULT_SIZE_ID)
size += FAULT_SIZE_ID - cnt;
buffer = tr->array_buffer.buffer;
+
+ if (size > ring_buffer_max_event_size(buffer))
+ return -EINVAL;
+
event = __trace_buffer_lock_reserve(buffer, TRACE_RAW_DATA, size,
- irq_flags, preempt_count());
+ tracing_gen_ctx());
if (!event)
/* Ring buffer disabled, return as if not open for write */
return -EBADF;
@@ -6705,9 +7440,6 @@ tracing_mark_raw_write(struct file *filp, const char __user *ubuf,
__buffer_unlock_commit(buffer, event);
- if (written > 0)
- *fpos += written;
-
return written;
}
@@ -6836,31 +7568,34 @@ static int tracing_time_stamp_mode_open(struct inode *inode, struct file *file)
return ret;
}
-int tracing_set_time_stamp_abs(struct trace_array *tr, bool abs)
+u64 tracing_event_time_stamp(struct trace_buffer *buffer, struct ring_buffer_event *rbe)
+{
+ if (rbe == this_cpu_read(trace_buffered_event))
+ return ring_buffer_time_stamp(buffer);
+
+ return ring_buffer_event_time_stamp(buffer, rbe);
+}
+
+/*
+ * Set or disable using the per CPU trace_buffer_event when possible.
+ */
+int tracing_set_filter_buffering(struct trace_array *tr, bool set)
{
int ret = 0;
mutex_lock(&trace_types_lock);
- if (abs && tr->time_stamp_abs_ref++)
+ if (set && tr->no_filter_buffering_ref++)
goto out;
- if (!abs) {
- if (WARN_ON_ONCE(!tr->time_stamp_abs_ref)) {
+ if (!set) {
+ if (WARN_ON_ONCE(!tr->no_filter_buffering_ref)) {
ret = -EINVAL;
goto out;
}
- if (--tr->time_stamp_abs_ref)
- goto out;
+ --tr->no_filter_buffering_ref;
}
-
- ring_buffer_set_time_stamp_abs(tr->array_buffer.buffer, abs);
-
-#ifdef CONFIG_TRACER_MAX_TRACE
- if (tr->max_buffer.buffer)
- ring_buffer_set_time_stamp_abs(tr->max_buffer.buffer, abs);
-#endif
out:
mutex_unlock(&trace_types_lock);
@@ -6871,6 +7606,7 @@ struct ftrace_buffer_info {
struct trace_iterator iter;
void *spare;
unsigned int spare_cpu;
+ unsigned int spare_size;
unsigned int read;
};
@@ -6916,6 +7652,11 @@ out:
return ret;
}
+static void tracing_swap_cpu_buffer(void *tr)
+{
+ update_max_tr_single((struct trace_array *)tr, current, smp_processor_id());
+}
+
static ssize_t
tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
loff_t *ppos)
@@ -6926,7 +7667,7 @@ tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
unsigned long val;
int ret;
- ret = tracing_update_buffers();
+ ret = tracing_update_buffers(tr);
if (ret < 0)
return ret;
@@ -6941,10 +7682,12 @@ tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
goto out;
}
+ local_irq_disable();
arch_spin_lock(&tr->max_lock);
if (tr->cond_snapshot)
ret = -EBUSY;
arch_spin_unlock(&tr->max_lock);
+ local_irq_enable();
if (ret)
goto out;
@@ -6972,13 +7715,15 @@ tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
ret = tracing_alloc_snapshot_instance(tr);
if (ret < 0)
break;
- local_irq_disable();
/* Now, we're going to swap */
- if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
+ if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
+ local_irq_disable();
update_max_tr(tr, current, smp_processor_id(), NULL);
- else
- update_max_tr_single(tr, current, iter->cpu_file);
- local_irq_enable();
+ local_irq_enable();
+ } else {
+ smp_call_function_single(iter->cpu_file, tracing_swap_cpu_buffer,
+ (void *)tr, 1);
+ }
break;
default:
if (tr->allocated_snapshot) {
@@ -7057,20 +7802,22 @@ static const struct file_operations tracing_thresh_fops = {
.llseek = generic_file_llseek,
};
-#if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
+#ifdef CONFIG_TRACER_MAX_TRACE
static const struct file_operations tracing_max_lat_fops = {
- .open = tracing_open_generic,
+ .open = tracing_open_generic_tr,
.read = tracing_max_lat_read,
.write = tracing_max_lat_write,
.llseek = generic_file_llseek,
+ .release = tracing_release_generic_tr,
};
#endif
static const struct file_operations set_tracer_fops = {
- .open = tracing_open_generic,
+ .open = tracing_open_generic_tr,
.read = tracing_set_trace_read,
.write = tracing_set_trace_write,
.llseek = generic_file_llseek,
+ .release = tracing_release_generic_tr,
};
static const struct file_operations tracing_pipe_fops = {
@@ -7104,16 +7851,14 @@ static const struct file_operations tracing_free_buffer_fops = {
};
static const struct file_operations tracing_mark_fops = {
- .open = tracing_open_generic_tr,
+ .open = tracing_mark_open,
.write = tracing_mark_write,
- .llseek = generic_file_llseek,
.release = tracing_release_generic_tr,
};
static const struct file_operations tracing_mark_raw_fops = {
- .open = tracing_open_generic_tr,
+ .open = tracing_mark_open,
.write = tracing_mark_raw_write,
- .llseek = generic_file_llseek,
.release = tracing_release_generic_tr,
};
@@ -7151,6 +7896,91 @@ static const struct file_operations snapshot_raw_fops = {
#endif /* CONFIG_TRACER_SNAPSHOT */
+/*
+ * trace_min_max_write - Write a u64 value to a trace_min_max_param struct
+ * @filp: The active open file structure
+ * @ubuf: The userspace provided buffer to read value into
+ * @cnt: The maximum number of bytes to read
+ * @ppos: The current "file" position
+ *
+ * This function implements the write interface for a struct trace_min_max_param.
+ * The filp->private_data must point to a trace_min_max_param structure that
+ * defines where to write the value, the min and the max acceptable values,
+ * and a lock to protect the write.
+ */
+static ssize_t
+trace_min_max_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos)
+{
+ struct trace_min_max_param *param = filp->private_data;
+ u64 val;
+ int err;
+
+ if (!param)
+ return -EFAULT;
+
+ err = kstrtoull_from_user(ubuf, cnt, 10, &val);
+ if (err)
+ return err;
+
+ if (param->lock)
+ mutex_lock(param->lock);
+
+ if (param->min && val < *param->min)
+ err = -EINVAL;
+
+ if (param->max && val > *param->max)
+ err = -EINVAL;
+
+ if (!err)
+ *param->val = val;
+
+ if (param->lock)
+ mutex_unlock(param->lock);
+
+ if (err)
+ return err;
+
+ return cnt;
+}
+
+/*
+ * trace_min_max_read - Read a u64 value from a trace_min_max_param struct
+ * @filp: The active open file structure
+ * @ubuf: The userspace provided buffer to read value into
+ * @cnt: The maximum number of bytes to read
+ * @ppos: The current "file" position
+ *
+ * This function implements the read interface for a struct trace_min_max_param.
+ * The filp->private_data must point to a trace_min_max_param struct with valid
+ * data.
+ */
+static ssize_t
+trace_min_max_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
+{
+ struct trace_min_max_param *param = filp->private_data;
+ char buf[U64_STR_SIZE];
+ int len;
+ u64 val;
+
+ if (!param)
+ return -EFAULT;
+
+ val = *param->val;
+
+ if (cnt > sizeof(buf))
+ cnt = sizeof(buf);
+
+ len = snprintf(buf, sizeof(buf), "%llu\n", val);
+
+ return simple_read_from_buffer(ubuf, cnt, ppos, buf, len);
+}
+
+const struct file_operations trace_min_max_fops = {
+ .open = tracing_open_generic,
+ .read = trace_min_max_read,
+ .write = trace_min_max_write,
+};
+
#define TRACING_LOG_ERRS_MAX 8
#define TRACING_LOG_LOC_MAX 128
@@ -7159,7 +7989,7 @@ static const struct file_operations snapshot_raw_fops = {
struct err_info {
const char **errs; /* ptr to loc-specific array of err strings */
u8 type; /* index into errs -> specific err string */
- u8 pos; /* MAX_FILTER_STR_VAL = 256 */
+ u16 pos; /* caret position */
u64 ts;
};
@@ -7167,25 +7997,53 @@ struct tracing_log_err {
struct list_head list;
struct err_info info;
char loc[TRACING_LOG_LOC_MAX]; /* err location */
- char cmd[MAX_FILTER_STR_VAL]; /* what caused err */
+ char *cmd; /* what caused err */
};
static DEFINE_MUTEX(tracing_err_log_lock);
-static struct tracing_log_err *get_tracing_log_err(struct trace_array *tr)
+static struct tracing_log_err *alloc_tracing_log_err(int len)
{
struct tracing_log_err *err;
+ err = kzalloc(sizeof(*err), GFP_KERNEL);
+ if (!err)
+ return ERR_PTR(-ENOMEM);
+
+ err->cmd = kzalloc(len, GFP_KERNEL);
+ if (!err->cmd) {
+ kfree(err);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ return err;
+}
+
+static void free_tracing_log_err(struct tracing_log_err *err)
+{
+ kfree(err->cmd);
+ kfree(err);
+}
+
+static struct tracing_log_err *get_tracing_log_err(struct trace_array *tr,
+ int len)
+{
+ struct tracing_log_err *err;
+ char *cmd;
+
if (tr->n_err_log_entries < TRACING_LOG_ERRS_MAX) {
- err = kzalloc(sizeof(*err), GFP_KERNEL);
- if (!err)
- err = ERR_PTR(-ENOMEM);
- tr->n_err_log_entries++;
+ err = alloc_tracing_log_err(len);
+ if (PTR_ERR(err) != -ENOMEM)
+ tr->n_err_log_entries++;
return err;
}
-
+ cmd = kzalloc(len, GFP_KERNEL);
+ if (!cmd)
+ return ERR_PTR(-ENOMEM);
err = list_first_entry(&tr->err_log, struct tracing_log_err, list);
+ kfree(err->cmd);
+ err->cmd = cmd;
list_del(&err->list);
return err;
@@ -7196,11 +8054,11 @@ static struct tracing_log_err *get_tracing_log_err(struct trace_array *tr)
* @cmd: The tracing command that caused the error
* @str: The string to position the caret at within @cmd
*
- * Finds the position of the first occurence of @str within @cmd. The
+ * Finds the position of the first occurrence of @str within @cmd. The
* return value can be passed to tracing_log_err() for caret placement
* within @cmd.
*
- * Returns the index within @cmd of the first occurence of @str or 0
+ * Returns the index within @cmd of the first occurrence of @str or 0
* if @str was not found.
*/
unsigned int err_pos(char *cmd, const char *str)
@@ -7246,22 +8104,25 @@ unsigned int err_pos(char *cmd, const char *str)
*/
void tracing_log_err(struct trace_array *tr,
const char *loc, const char *cmd,
- const char **errs, u8 type, u8 pos)
+ const char **errs, u8 type, u16 pos)
{
struct tracing_log_err *err;
+ int len = 0;
if (!tr)
tr = &global_trace;
+ len += sizeof(CMD_PREFIX) + 2 * sizeof("\n") + strlen(cmd) + 1;
+
mutex_lock(&tracing_err_log_lock);
- err = get_tracing_log_err(tr);
+ err = get_tracing_log_err(tr, len);
if (PTR_ERR(err) == -ENOMEM) {
mutex_unlock(&tracing_err_log_lock);
return;
}
snprintf(err->loc, TRACING_LOG_LOC_MAX, "%s: error: ", loc);
- snprintf(err->cmd, MAX_FILTER_STR_VAL,"\n" CMD_PREFIX "%s\n", cmd);
+ snprintf(err->cmd, len, "\n" CMD_PREFIX "%s\n", cmd);
err->info.errs = errs;
err->info.type = type;
@@ -7279,7 +8140,7 @@ static void clear_tracing_err_log(struct trace_array *tr)
mutex_lock(&tracing_err_log_lock);
list_for_each_entry_safe(err, next, &tr->err_log, list) {
list_del(&err->list);
- kfree(err);
+ free_tracing_log_err(err);
}
tr->n_err_log_entries = 0;
@@ -7307,9 +8168,9 @@ static void tracing_err_log_seq_stop(struct seq_file *m, void *v)
mutex_unlock(&tracing_err_log_lock);
}
-static void tracing_err_log_show_pos(struct seq_file *m, u8 pos)
+static void tracing_err_log_show_pos(struct seq_file *m, u16 pos)
{
- u8 i;
+ u16 i;
for (i = 0; i < sizeof(CMD_PREFIX) - 1; i++)
seq_putc(m, ' ');
@@ -7392,7 +8253,7 @@ static const struct file_operations tracing_err_log_fops = {
.open = tracing_err_log_open,
.write = tracing_err_log_write,
.read = seq_read,
- .llseek = seq_lseek,
+ .llseek = tracing_lseek,
.release = tracing_err_log_release,
};
@@ -7406,7 +8267,7 @@ static int tracing_buffers_open(struct inode *inode, struct file *filp)
if (ret)
return ret;
- info = kzalloc(sizeof(*info), GFP_KERNEL);
+ info = kvzalloc(sizeof(*info), GFP_KERNEL);
if (!info) {
trace_array_put(tr);
return -ENOMEM;
@@ -7424,7 +8285,7 @@ static int tracing_buffers_open(struct inode *inode, struct file *filp)
filp->private_data = info;
- tr->current_trace->ref++;
+ tr->trace_ref++;
mutex_unlock(&trace_types_lock);
@@ -7450,6 +8311,8 @@ tracing_buffers_read(struct file *filp, char __user *ubuf,
{
struct ftrace_buffer_info *info = filp->private_data;
struct trace_iterator *iter = &info->iter;
+ void *trace_data;
+ int page_size;
ssize_t ret = 0;
ssize_t size;
@@ -7461,6 +8324,17 @@ tracing_buffers_read(struct file *filp, char __user *ubuf,
return -EBUSY;
#endif
+ page_size = ring_buffer_subbuf_size_get(iter->array_buffer->buffer);
+
+ /* Make sure the spare matches the current sub buffer size */
+ if (info->spare) {
+ if (page_size != info->spare_size) {
+ ring_buffer_free_read_page(iter->array_buffer->buffer,
+ info->spare_cpu, info->spare);
+ info->spare = NULL;
+ }
+ }
+
if (!info->spare) {
info->spare = ring_buffer_alloc_read_page(iter->array_buffer->buffer,
iter->cpu_file);
@@ -7469,19 +8343,20 @@ tracing_buffers_read(struct file *filp, char __user *ubuf,
info->spare = NULL;
} else {
info->spare_cpu = iter->cpu_file;
+ info->spare_size = page_size;
}
}
if (!info->spare)
return ret;
/* Do we have previous read data to read? */
- if (info->read < PAGE_SIZE)
+ if (info->read < page_size)
goto read;
again:
trace_access_lock(iter->cpu_file);
ret = ring_buffer_read_page(iter->array_buffer->buffer,
- &info->spare,
+ info->spare,
count,
iter->cpu_file, 0);
trace_access_unlock(iter->cpu_file);
@@ -7502,11 +8377,11 @@ tracing_buffers_read(struct file *filp, char __user *ubuf,
info->read = 0;
read:
- size = PAGE_SIZE - info->read;
+ size = page_size - info->read;
if (size > count)
size = count;
-
- ret = copy_to_user(ubuf, info->spare + info->read, size);
+ trace_data = ring_buffer_read_page_data(info->spare);
+ ret = copy_to_user(ubuf, trace_data + info->read, size);
if (ret == size)
return -EFAULT;
@@ -7518,6 +8393,20 @@ tracing_buffers_read(struct file *filp, char __user *ubuf,
return size;
}
+static int tracing_buffers_flush(struct file *file, fl_owner_t id)
+{
+ struct ftrace_buffer_info *info = file->private_data;
+ struct trace_iterator *iter = &info->iter;
+
+ iter->wait_index++;
+ /* Make sure the waiters see the new wait_index */
+ smp_wmb();
+
+ ring_buffer_wake_waiters(iter->array_buffer->buffer, iter->cpu_file);
+
+ return 0;
+}
+
static int tracing_buffers_release(struct inode *inode, struct file *file)
{
struct ftrace_buffer_info *info = file->private_data;
@@ -7525,14 +8414,14 @@ static int tracing_buffers_release(struct inode *inode, struct file *file)
mutex_lock(&trace_types_lock);
- iter->tr->current_trace->ref--;
+ iter->tr->trace_ref--;
__trace_array_put(iter->tr);
if (info->spare)
ring_buffer_free_read_page(iter->array_buffer->buffer,
info->spare_cpu, info->spare);
- kfree(info);
+ kvfree(info);
mutex_unlock(&trace_types_lock);
@@ -7611,6 +8500,7 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
.spd_release = buffer_spd_release,
};
struct buffer_ref *ref;
+ int page_size;
int entries, i;
ssize_t ret = 0;
@@ -7619,13 +8509,14 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
return -EBUSY;
#endif
- if (*ppos & (PAGE_SIZE - 1))
+ page_size = ring_buffer_subbuf_size_get(iter->array_buffer->buffer);
+ if (*ppos & (page_size - 1))
return -EINVAL;
- if (len & (PAGE_SIZE - 1)) {
- if (len < PAGE_SIZE)
+ if (len & (page_size - 1)) {
+ if (len < page_size)
return -EINVAL;
- len &= PAGE_MASK;
+ len &= (~(page_size - 1));
}
if (splice_grow_spd(pipe, &spd))
@@ -7635,7 +8526,7 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
trace_access_lock(iter->cpu_file);
entries = ring_buffer_entries_cpu(iter->array_buffer->buffer, iter->cpu_file);
- for (i = 0; i < spd.nr_pages_max && len && entries; i++, len -= PAGE_SIZE) {
+ for (i = 0; i < spd.nr_pages_max && len && entries; i++, len -= page_size) {
struct page *page;
int r;
@@ -7656,7 +8547,7 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
}
ref->cpu = iter->cpu_file;
- r = ring_buffer_read_page(ref->buffer, &ref->page,
+ r = ring_buffer_read_page(ref->buffer, ref->page,
len, iter->cpu_file, 1);
if (r < 0) {
ring_buffer_free_read_page(ref->buffer, ref->cpu,
@@ -7665,14 +8556,14 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
break;
}
- page = virt_to_page(ref->page);
+ page = virt_to_page(ring_buffer_read_page_data(ref->page));
spd.pages[i] = page;
- spd.partial[i].len = PAGE_SIZE;
+ spd.partial[i].len = page_size;
spd.partial[i].offset = 0;
spd.partial[i].private = (unsigned long)ref;
spd.nr_pages++;
- *ppos += PAGE_SIZE;
+ *ppos += page_size;
entries = ring_buffer_entries_cpu(iter->array_buffer->buffer, iter->cpu_file);
}
@@ -7682,6 +8573,8 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
/* did we read anything? */
if (!spd.nr_pages) {
+ long wait_index;
+
if (ret)
goto out;
@@ -7689,10 +8582,21 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK))
goto out;
- ret = wait_on_pipe(iter, iter->tr->buffer_percent);
+ wait_index = READ_ONCE(iter->wait_index);
+
+ ret = wait_on_pipe(iter, iter->snapshot ? 0 : iter->tr->buffer_percent);
if (ret)
goto out;
+ /* No need to wait after waking up when tracing is off */
+ if (!tracer_tracing_is_on(iter->tr))
+ goto out;
+
+ /* Make sure we see the new wait_index */
+ smp_rmb();
+ if (wait_index != iter->wait_index)
+ goto out;
+
goto again;
}
@@ -7703,12 +8607,35 @@ out:
return ret;
}
+/* An ioctl call with cmd 0 to the ring buffer file will wake up all waiters */
+static long tracing_buffers_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ struct ftrace_buffer_info *info = file->private_data;
+ struct trace_iterator *iter = &info->iter;
+
+ if (cmd)
+ return -ENOIOCTLCMD;
+
+ mutex_lock(&trace_types_lock);
+
+ iter->wait_index++;
+ /* Make sure the waiters see the new wait_index */
+ smp_wmb();
+
+ ring_buffer_wake_waiters(iter->array_buffer->buffer, iter->cpu_file);
+
+ mutex_unlock(&trace_types_lock);
+ return 0;
+}
+
static const struct file_operations tracing_buffers_fops = {
.open = tracing_buffers_open,
.read = tracing_buffers_read,
.poll = tracing_buffers_poll,
.release = tracing_buffers_release,
+ .flush = tracing_buffers_flush,
.splice_read = tracing_buffers_splice_read,
+ .unlocked_ioctl = tracing_buffers_ioctl,
.llseek = no_llseek,
};
@@ -7750,7 +8677,7 @@ tracing_stats_read(struct file *filp, char __user *ubuf,
trace_seq_printf(s, "oldest event ts: %5llu.%06lu\n",
t, usec_rem);
- t = ns2usecs(ring_buffer_time_stamp(trace_buf->buffer, cpu));
+ t = ns2usecs(ring_buffer_time_stamp(trace_buf->buffer));
usec_rem = do_div(t, USEC_PER_SEC);
trace_seq_printf(s, "now ts: %5llu.%06lu\n", t, usec_rem);
} else {
@@ -7759,7 +8686,7 @@ tracing_stats_read(struct file *filp, char __user *ubuf,
ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
trace_seq_printf(s, "now ts: %llu\n",
- ring_buffer_time_stamp(trace_buf->buffer, cpu));
+ ring_buffer_time_stamp(trace_buf->buffer));
}
cnt = ring_buffer_dropped_events_cpu(trace_buf->buffer, cpu);
@@ -8034,27 +8961,27 @@ tracing_init_tracefs_percpu(struct trace_array *tr, long cpu)
}
/* per cpu trace_pipe */
- trace_create_cpu_file("trace_pipe", 0444, d_cpu,
+ trace_create_cpu_file("trace_pipe", TRACE_MODE_READ, d_cpu,
tr, cpu, &tracing_pipe_fops);
/* per cpu trace */
- trace_create_cpu_file("trace", 0644, d_cpu,
+ trace_create_cpu_file("trace", TRACE_MODE_WRITE, d_cpu,
tr, cpu, &tracing_fops);
- trace_create_cpu_file("trace_pipe_raw", 0444, d_cpu,
+ trace_create_cpu_file("trace_pipe_raw", TRACE_MODE_READ, d_cpu,
tr, cpu, &tracing_buffers_fops);
- trace_create_cpu_file("stats", 0444, d_cpu,
+ trace_create_cpu_file("stats", TRACE_MODE_READ, d_cpu,
tr, cpu, &tracing_stats_fops);
- trace_create_cpu_file("buffer_size_kb", 0444, d_cpu,
+ trace_create_cpu_file("buffer_size_kb", TRACE_MODE_READ, d_cpu,
tr, cpu, &tracing_entries_fops);
#ifdef CONFIG_TRACER_SNAPSHOT
- trace_create_cpu_file("snapshot", 0644, d_cpu,
+ trace_create_cpu_file("snapshot", TRACE_MODE_WRITE, d_cpu,
tr, cpu, &snapshot_fops);
- trace_create_cpu_file("snapshot_raw", 0444, d_cpu,
+ trace_create_cpu_file("snapshot_raw", TRACE_MODE_READ, d_cpu,
tr, cpu, &snapshot_raw_fops);
#endif
}
@@ -8108,12 +9035,33 @@ trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt,
return cnt;
}
+static int tracing_open_options(struct inode *inode, struct file *filp)
+{
+ struct trace_option_dentry *topt = inode->i_private;
+ int ret;
+
+ ret = tracing_check_open_get_tr(topt->tr);
+ if (ret)
+ return ret;
+
+ filp->private_data = inode->i_private;
+ return 0;
+}
+
+static int tracing_release_options(struct inode *inode, struct file *file)
+{
+ struct trace_option_dentry *topt = file->private_data;
+
+ trace_array_put(topt->tr);
+ return 0;
+}
static const struct file_operations trace_options_fops = {
- .open = tracing_open_generic,
+ .open = tracing_open_options,
.read = trace_options_read,
.write = trace_options_write,
.llseek = generic_file_llseek,
+ .release = tracing_release_options,
};
/*
@@ -8260,8 +9208,8 @@ create_trace_option_file(struct trace_array *tr,
topt->opt = opt;
topt->tr = tr;
- topt->entry = trace_create_file(opt->name, 0644, t_options, topt,
- &trace_options_fops);
+ topt->entry = trace_create_file(opt->name, TRACE_MODE_WRITE,
+ t_options, topt, &trace_options_fops);
}
@@ -8336,7 +9284,7 @@ create_trace_option_core_file(struct trace_array *tr,
if (!t_options)
return NULL;
- return trace_create_file(option, 0644, t_options,
+ return trace_create_file(option, TRACE_MODE_WRITE, t_options,
(void *)&tr->trace_flags_index[index],
&trace_options_core_fops);
}
@@ -8397,6 +9345,8 @@ rb_simple_write(struct file *filp, const char __user *ubuf,
tracer_tracing_off(tr);
if (tr->current_trace->stop)
tr->current_trace->stop(tr);
+ /* Wake up any waiters */
+ ring_buffer_wake_waiters(buffer, RING_BUFFER_ALL_CPUS);
}
mutex_unlock(&trace_types_lock);
}
@@ -8443,9 +9393,6 @@ buffer_percent_write(struct file *filp, const char __user *ubuf,
if (val > 100)
return -EINVAL;
- if (!val)
- val = 1;
-
tr->buffer_percent = val;
(*ppos)++;
@@ -8461,6 +9408,103 @@ static const struct file_operations buffer_percent_fops = {
.llseek = default_llseek,
};
+static ssize_t
+buffer_subbuf_size_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
+{
+ struct trace_array *tr = filp->private_data;
+ size_t size;
+ char buf[64];
+ int order;
+ int r;
+
+ order = ring_buffer_subbuf_order_get(tr->array_buffer.buffer);
+ size = (PAGE_SIZE << order) / 1024;
+
+ r = sprintf(buf, "%zd\n", size);
+
+ return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
+}
+
+static ssize_t
+buffer_subbuf_size_write(struct file *filp, const char __user *ubuf,
+ size_t cnt, loff_t *ppos)
+{
+ struct trace_array *tr = filp->private_data;
+ unsigned long val;
+ int old_order;
+ int order;
+ int pages;
+ int ret;
+
+ ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
+ if (ret)
+ return ret;
+
+ val *= 1024; /* value passed in is in KB */
+
+ pages = DIV_ROUND_UP(val, PAGE_SIZE);
+ order = fls(pages - 1);
+
+ /* limit between 1 and 128 system pages */
+ if (order < 0 || order > 7)
+ return -EINVAL;
+
+ /* Do not allow tracing while changing the order of the ring buffer */
+ tracing_stop_tr(tr);
+
+ old_order = ring_buffer_subbuf_order_get(tr->array_buffer.buffer);
+ if (old_order == order)
+ goto out;
+
+ ret = ring_buffer_subbuf_order_set(tr->array_buffer.buffer, order);
+ if (ret)
+ goto out;
+
+#ifdef CONFIG_TRACER_MAX_TRACE
+
+ if (!tr->allocated_snapshot)
+ goto out_max;
+
+ ret = ring_buffer_subbuf_order_set(tr->max_buffer.buffer, order);
+ if (ret) {
+ /* Put back the old order */
+ cnt = ring_buffer_subbuf_order_set(tr->array_buffer.buffer, old_order);
+ if (WARN_ON_ONCE(cnt)) {
+ /*
+ * AARGH! We are left with different orders!
+ * The max buffer is our "snapshot" buffer.
+ * When a tracer needs a snapshot (one of the
+ * latency tracers), it swaps the max buffer
+ * with the saved snap shot. We succeeded to
+ * update the order of the main buffer, but failed to
+ * update the order of the max buffer. But when we tried
+ * to reset the main buffer to the original size, we
+ * failed there too. This is very unlikely to
+ * happen, but if it does, warn and kill all
+ * tracing.
+ */
+ tracing_disabled = 1;
+ }
+ goto out;
+ }
+ out_max:
+#endif
+ (*ppos)++;
+ out:
+ if (ret)
+ cnt = ret;
+ tracing_start_tr(tr);
+ return cnt;
+}
+
+static const struct file_operations buffer_subbuf_size_fops = {
+ .open = tracing_open_generic_tr,
+ .read = buffer_subbuf_size_read,
+ .write = buffer_subbuf_size_write,
+ .release = tracing_release_generic_tr,
+ .llseek = default_llseek,
+};
+
static struct dentry *trace_instance_dir;
static void
@@ -8493,6 +9537,16 @@ allocate_trace_buffer(struct trace_array *tr, struct array_buffer *buf, int size
return 0;
}
+static void free_trace_buffer(struct array_buffer *buf)
+{
+ if (buf->buffer) {
+ ring_buffer_free(buf->buffer);
+ buf->buffer = NULL;
+ free_percpu(buf->data);
+ buf->data = NULL;
+ }
+}
+
static int allocate_trace_buffers(struct trace_array *tr, int size)
{
int ret;
@@ -8505,34 +9559,17 @@ static int allocate_trace_buffers(struct trace_array *tr, int size)
ret = allocate_trace_buffer(tr, &tr->max_buffer,
allocate_snapshot ? size : 1);
if (MEM_FAIL(ret, "Failed to allocate trace buffer\n")) {
- ring_buffer_free(tr->array_buffer.buffer);
- tr->array_buffer.buffer = NULL;
- free_percpu(tr->array_buffer.data);
- tr->array_buffer.data = NULL;
+ free_trace_buffer(&tr->array_buffer);
return -ENOMEM;
}
tr->allocated_snapshot = allocate_snapshot;
- /*
- * Only the top level trace array gets its snapshot allocated
- * from the kernel command line.
- */
allocate_snapshot = false;
#endif
return 0;
}
-static void free_trace_buffer(struct array_buffer *buf)
-{
- if (buf->buffer) {
- ring_buffer_free(buf->buffer);
- buf->buffer = NULL;
- free_percpu(buf->data);
- buf->data = NULL;
- }
-}
-
static void free_trace_buffers(struct trace_array *tr)
{
if (!tr)
@@ -8565,6 +9602,7 @@ static void __update_tracer_options(struct trace_array *tr)
static void update_tracer_options(struct trace_array *tr)
{
mutex_lock(&trace_types_lock);
+ tracer_options_updated = true;
__update_tracer_options(tr);
mutex_unlock(&trace_types_lock);
}
@@ -8597,7 +9635,28 @@ struct trace_array *trace_array_find_get(const char *instance)
return tr;
}
-static struct trace_array *trace_array_create(const char *name)
+static int trace_array_create_dir(struct trace_array *tr)
+{
+ int ret;
+
+ tr->dir = tracefs_create_dir(tr->name, trace_instance_dir);
+ if (!tr->dir)
+ return -EINVAL;
+
+ ret = event_trace_add_tracer(tr->dir, tr);
+ if (ret) {
+ tracefs_remove(tr->dir);
+ return ret;
+ }
+
+ init_tracer_tracefs(tr, tr->dir);
+ __update_tracer_options(tr);
+
+ return ret;
+}
+
+static struct trace_array *
+trace_array_create_systems(const char *name, const char *systems)
{
struct trace_array *tr;
int ret;
@@ -8614,6 +9673,15 @@ static struct trace_array *trace_array_create(const char *name)
if (!alloc_cpumask_var(&tr->tracing_cpumask, GFP_KERNEL))
goto out_free_tr;
+ if (!zalloc_cpumask_var(&tr->pipe_cpumask, GFP_KERNEL))
+ goto out_free_tr;
+
+ if (systems) {
+ tr->system_names = kstrdup_const(systems, GFP_KERNEL);
+ if (!tr->system_names)
+ goto out_free_tr;
+ }
+
tr->trace_flags = global_trace.trace_flags & ~ZEROED_TRACE_FLAGS;
cpumask_copy(tr->tracing_cpumask, cpu_all_mask);
@@ -8632,38 +9700,46 @@ static struct trace_array *trace_array_create(const char *name)
if (allocate_trace_buffers(tr, trace_buf_size) < 0)
goto out_free_tr;
- tr->dir = tracefs_create_dir(name, trace_instance_dir);
- if (!tr->dir)
- goto out_free_tr;
+ /* The ring buffer is defaultly expanded */
+ trace_set_ring_buffer_expanded(tr);
- ret = event_trace_add_tracer(tr->dir, tr);
- if (ret) {
- tracefs_remove(tr->dir);
+ if (ftrace_allocate_ftrace_ops(tr) < 0)
goto out_free_tr;
- }
ftrace_init_trace_array(tr);
- init_tracer_tracefs(tr, tr->dir);
init_trace_flags_index(tr);
- __update_tracer_options(tr);
+
+ if (trace_instance_dir) {
+ ret = trace_array_create_dir(tr);
+ if (ret)
+ goto out_free_tr;
+ } else
+ __trace_early_add_events(tr);
list_add(&tr->list, &ftrace_trace_arrays);
tr->ref++;
-
return tr;
out_free_tr:
+ ftrace_free_ftrace_ops(tr);
free_trace_buffers(tr);
+ free_cpumask_var(tr->pipe_cpumask);
free_cpumask_var(tr->tracing_cpumask);
+ kfree_const(tr->system_names);
kfree(tr->name);
kfree(tr);
return ERR_PTR(ret);
}
+static struct trace_array *trace_array_create(const char *name)
+{
+ return trace_array_create_systems(name, NULL);
+}
+
static int instance_mkdir(const char *name)
{
struct trace_array *tr;
@@ -8689,6 +9765,7 @@ out_unlock:
/**
* trace_array_get_by_name - Create/Lookup a trace array, given its name.
* @name: The name of the trace array to be looked up/created.
+ * @systems: A list of systems to create event directories for (NULL for all)
*
* Returns pointer to trace array with given name.
* NULL, if it cannot be created.
@@ -8702,7 +9779,7 @@ out_unlock:
* trace_array_put() is called, user space can not delete it.
*
*/
-struct trace_array *trace_array_get_by_name(const char *name)
+struct trace_array *trace_array_get_by_name(const char *name, const char *systems)
{
struct trace_array *tr;
@@ -8714,7 +9791,7 @@ struct trace_array *trace_array_get_by_name(const char *name)
goto out_unlock;
}
- tr = trace_array_create(name);
+ tr = trace_array_create_systems(name, systems);
if (IS_ERR(tr))
tr = NULL;
@@ -8733,7 +9810,7 @@ static int __remove_instance(struct trace_array *tr)
int i;
/* Reference counter for a newly created trace array = 1. */
- if (tr->ref > 1 || (tr->current_trace && tr->current_trace->ref))
+ if (tr->ref > 1 || (tr->current_trace && tr->trace_ref))
return -EBUSY;
list_del(&tr->list);
@@ -8750,17 +9827,20 @@ static int __remove_instance(struct trace_array *tr)
ftrace_clear_pids(tr);
ftrace_destroy_function_files(tr);
tracefs_remove(tr->dir);
+ free_percpu(tr->last_func_repeats);
free_trace_buffers(tr);
+ clear_tracing_err_log(tr);
for (i = 0; i < tr->nr_topts; i++) {
kfree(tr->topts[i].topts);
}
kfree(tr->topts);
+ free_cpumask_var(tr->pipe_cpumask);
free_cpumask_var(tr->tracing_cpumask);
+ kfree_const(tr->system_names);
kfree(tr->name);
kfree(tr);
- tr = NULL;
return 0;
}
@@ -8814,41 +9894,56 @@ static int instance_rmdir(const char *name)
static __init void create_trace_instances(struct dentry *d_tracer)
{
+ struct trace_array *tr;
+
trace_instance_dir = tracefs_create_instance_dir("instances", d_tracer,
instance_mkdir,
instance_rmdir);
if (MEM_FAIL(!trace_instance_dir, "Failed to create instances directory\n"))
return;
+
+ mutex_lock(&event_mutex);
+ mutex_lock(&trace_types_lock);
+
+ list_for_each_entry(tr, &ftrace_trace_arrays, list) {
+ if (!tr->name)
+ continue;
+ if (MEM_FAIL(trace_array_create_dir(tr) < 0,
+ "Failed to create instance directory\n"))
+ break;
+ }
+
+ mutex_unlock(&trace_types_lock);
+ mutex_unlock(&event_mutex);
}
static void
init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer)
{
- struct trace_event_file *file;
int cpu;
- trace_create_file("available_tracers", 0444, d_tracer,
+ trace_create_file("available_tracers", TRACE_MODE_READ, d_tracer,
tr, &show_traces_fops);
- trace_create_file("current_tracer", 0644, d_tracer,
+ trace_create_file("current_tracer", TRACE_MODE_WRITE, d_tracer,
tr, &set_tracer_fops);
- trace_create_file("tracing_cpumask", 0644, d_tracer,
+ trace_create_file("tracing_cpumask", TRACE_MODE_WRITE, d_tracer,
tr, &tracing_cpumask_fops);
- trace_create_file("trace_options", 0644, d_tracer,
+ trace_create_file("trace_options", TRACE_MODE_WRITE, d_tracer,
tr, &tracing_iter_fops);
- trace_create_file("trace", 0644, d_tracer,
+ trace_create_file("trace", TRACE_MODE_WRITE, d_tracer,
tr, &tracing_fops);
- trace_create_file("trace_pipe", 0444, d_tracer,
+ trace_create_file("trace_pipe", TRACE_MODE_READ, d_tracer,
tr, &tracing_pipe_fops);
- trace_create_file("buffer_size_kb", 0644, d_tracer,
+ trace_create_file("buffer_size_kb", TRACE_MODE_WRITE, d_tracer,
tr, &tracing_entries_fops);
- trace_create_file("buffer_total_size_kb", 0444, d_tracer,
+ trace_create_file("buffer_total_size_kb", TRACE_MODE_READ, d_tracer,
tr, &tracing_total_entries_fops);
trace_create_file("free_buffer", 0200, d_tracer,
@@ -8857,32 +9952,31 @@ init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer)
trace_create_file("trace_marker", 0220, d_tracer,
tr, &tracing_mark_fops);
- file = __find_event_file(tr, "ftrace", "print");
- if (file && file->dir)
- trace_create_file("trigger", 0644, file->dir, file,
- &event_trigger_fops);
- tr->trace_marker_file = file;
+ tr->trace_marker_file = __find_event_file(tr, "ftrace", "print");
trace_create_file("trace_marker_raw", 0220, d_tracer,
tr, &tracing_mark_raw_fops);
- trace_create_file("trace_clock", 0644, d_tracer, tr,
+ trace_create_file("trace_clock", TRACE_MODE_WRITE, d_tracer, tr,
&trace_clock_fops);
- trace_create_file("tracing_on", 0644, d_tracer,
+ trace_create_file("tracing_on", TRACE_MODE_WRITE, d_tracer,
tr, &rb_simple_fops);
- trace_create_file("timestamp_mode", 0444, d_tracer, tr,
+ trace_create_file("timestamp_mode", TRACE_MODE_READ, d_tracer, tr,
&trace_time_stamp_mode_fops);
tr->buffer_percent = 50;
- trace_create_file("buffer_percent", 0444, d_tracer,
+ trace_create_file("buffer_percent", TRACE_MODE_WRITE, d_tracer,
tr, &buffer_percent_fops);
+ trace_create_file("buffer_subbuf_size_kb", TRACE_MODE_WRITE, d_tracer,
+ tr, &buffer_subbuf_size_fops);
+
create_trace_options_dir(tr);
-#if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
+#ifdef CONFIG_TRACER_MAX_TRACE
trace_create_maxlat_file(tr, d_tracer);
#endif
@@ -8890,11 +9984,11 @@ init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer)
MEM_FAIL(1, "Could not allocate function filter files");
#ifdef CONFIG_TRACER_SNAPSHOT
- trace_create_file("snapshot", 0644, d_tracer,
+ trace_create_file("snapshot", TRACE_MODE_WRITE, d_tracer,
tr, &snapshot_fops);
#endif
- trace_create_file("error_log", 0644, d_tracer,
+ trace_create_file("error_log", TRACE_MODE_WRITE, d_tracer,
tr, &tracing_err_log_fops);
for_each_tracing_cpu(cpu)
@@ -8932,40 +10026,42 @@ static struct vfsmount *trace_automount(struct dentry *mntpt, void *ingore)
* directory. It is called via fs_initcall() by any of the boot up code
* and expects to return the dentry of the top level tracing directory.
*/
-struct dentry *tracing_init_dentry(void)
+int tracing_init_dentry(void)
{
struct trace_array *tr = &global_trace;
if (security_locked_down(LOCKDOWN_TRACEFS)) {
pr_warn("Tracing disabled due to lockdown\n");
- return ERR_PTR(-EPERM);
+ return -EPERM;
}
/* The top level trace array uses NULL as parent */
if (tr->dir)
- return NULL;
+ return 0;
- if (WARN_ON(!tracefs_initialized()) ||
- (IS_ENABLED(CONFIG_DEBUG_FS) &&
- WARN_ON(!debugfs_initialized())))
- return ERR_PTR(-ENODEV);
+ if (WARN_ON(!tracefs_initialized()))
+ return -ENODEV;
/*
* As there may still be users that expect the tracing
* files to exist in debugfs/tracing, we must automount
* the tracefs file system there, so older tools still
- * work with the newer kerenl.
+ * work with the newer kernel.
*/
tr->dir = debugfs_create_automount("tracing", NULL,
trace_automount, NULL);
- return NULL;
+ return 0;
}
extern struct trace_eval_map *__start_ftrace_eval_maps[];
extern struct trace_eval_map *__stop_ftrace_eval_maps[];
-static void __init trace_eval_init(void)
+static struct workqueue_struct *eval_map_wq __initdata;
+static struct work_struct eval_map_work __initdata;
+static struct work_struct tracerfs_init_work __initdata;
+
+static void __init eval_map_work_func(struct work_struct *work)
{
int len;
@@ -8973,6 +10069,35 @@ static void __init trace_eval_init(void)
trace_insert_eval_map(NULL, __start_ftrace_eval_maps, len);
}
+static int __init trace_eval_init(void)
+{
+ INIT_WORK(&eval_map_work, eval_map_work_func);
+
+ eval_map_wq = alloc_workqueue("eval_map_wq", WQ_UNBOUND, 0);
+ if (!eval_map_wq) {
+ pr_err("Unable to allocate eval_map_wq\n");
+ /* Do work here */
+ eval_map_work_func(&eval_map_work);
+ return -ENOMEM;
+ }
+
+ queue_work(eval_map_wq, &eval_map_work);
+ return 0;
+}
+
+subsys_initcall(trace_eval_init);
+
+static int __init trace_eval_sync(void)
+{
+ /* Make sure the eval map updates are finished */
+ if (eval_map_wq)
+ destroy_workqueue(eval_map_wq);
+ return 0;
+}
+
+late_initcall_sync(trace_eval_sync);
+
+
#ifdef CONFIG_MODULES
static void trace_module_add_evals(struct module *mod)
{
@@ -9035,7 +10160,7 @@ static int trace_module_notify(struct notifier_block *self,
break;
}
- return 0;
+ return NOTIFY_OK;
}
static struct notifier_block trace_module_nb = {
@@ -9044,91 +10169,104 @@ static struct notifier_block trace_module_nb = {
};
#endif /* CONFIG_MODULES */
-static __init int tracer_init_tracefs(void)
+static __init void tracer_init_tracefs_work_func(struct work_struct *work)
{
- struct dentry *d_tracer;
-
- trace_access_lock_init();
-
- d_tracer = tracing_init_dentry();
- if (IS_ERR(d_tracer))
- return 0;
event_trace_init();
- init_tracer_tracefs(&global_trace, d_tracer);
- ftrace_init_tracefs_toplevel(&global_trace, d_tracer);
+ init_tracer_tracefs(&global_trace, NULL);
+ ftrace_init_tracefs_toplevel(&global_trace, NULL);
- trace_create_file("tracing_thresh", 0644, d_tracer,
+ trace_create_file("tracing_thresh", TRACE_MODE_WRITE, NULL,
&global_trace, &tracing_thresh_fops);
- trace_create_file("README", 0444, d_tracer,
+ trace_create_file("README", TRACE_MODE_READ, NULL,
NULL, &tracing_readme_fops);
- trace_create_file("saved_cmdlines", 0444, d_tracer,
+ trace_create_file("saved_cmdlines", TRACE_MODE_READ, NULL,
NULL, &tracing_saved_cmdlines_fops);
- trace_create_file("saved_cmdlines_size", 0644, d_tracer,
+ trace_create_file("saved_cmdlines_size", TRACE_MODE_WRITE, NULL,
NULL, &tracing_saved_cmdlines_size_fops);
- trace_create_file("saved_tgids", 0444, d_tracer,
+ trace_create_file("saved_tgids", TRACE_MODE_READ, NULL,
NULL, &tracing_saved_tgids_fops);
- trace_eval_init();
-
- trace_create_eval_file(d_tracer);
+ trace_create_eval_file(NULL);
#ifdef CONFIG_MODULES
register_module_notifier(&trace_module_nb);
#endif
#ifdef CONFIG_DYNAMIC_FTRACE
- trace_create_file("dyn_ftrace_total_info", 0444, d_tracer,
+ trace_create_file("dyn_ftrace_total_info", TRACE_MODE_READ, NULL,
NULL, &tracing_dyn_info_fops);
#endif
- create_trace_instances(d_tracer);
+ create_trace_instances(NULL);
update_tracer_options(&global_trace);
-
- return 0;
}
-static int trace_panic_handler(struct notifier_block *this,
- unsigned long event, void *unused)
+static __init int tracer_init_tracefs(void)
{
- if (ftrace_dump_on_oops)
- ftrace_dump(ftrace_dump_on_oops);
- return NOTIFY_OK;
-}
+ int ret;
-static struct notifier_block trace_panic_notifier = {
- .notifier_call = trace_panic_handler,
- .next = NULL,
- .priority = 150 /* priority: INT_MAX >= x >= 0 */
-};
+ trace_access_lock_init();
-static int trace_die_handler(struct notifier_block *self,
- unsigned long val,
- void *data)
-{
- switch (val) {
- case DIE_OOPS:
- if (ftrace_dump_on_oops)
- ftrace_dump(ftrace_dump_on_oops);
- break;
- default:
- break;
+ ret = tracing_init_dentry();
+ if (ret)
+ return 0;
+
+ if (eval_map_wq) {
+ INIT_WORK(&tracerfs_init_work, tracer_init_tracefs_work_func);
+ queue_work(eval_map_wq, &tracerfs_init_work);
+ } else {
+ tracer_init_tracefs_work_func(NULL);
}
- return NOTIFY_OK;
+
+ rv_init_interface();
+
+ return 0;
}
+fs_initcall(tracer_init_tracefs);
+
+static int trace_die_panic_handler(struct notifier_block *self,
+ unsigned long ev, void *unused);
+
+static struct notifier_block trace_panic_notifier = {
+ .notifier_call = trace_die_panic_handler,
+ .priority = INT_MAX - 1,
+};
+
static struct notifier_block trace_die_notifier = {
- .notifier_call = trace_die_handler,
- .priority = 200
+ .notifier_call = trace_die_panic_handler,
+ .priority = INT_MAX - 1,
};
/*
+ * The idea is to execute the following die/panic callback early, in order
+ * to avoid showing irrelevant information in the trace (like other panic
+ * notifier functions); we are the 2nd to run, after hung_task/rcu_stall
+ * warnings get disabled (to prevent potential log flooding).
+ */
+static int trace_die_panic_handler(struct notifier_block *self,
+ unsigned long ev, void *unused)
+{
+ if (!ftrace_dump_on_oops)
+ return NOTIFY_DONE;
+
+ /* The die notifier requires DIE_OOPS to trigger */
+ if (self == &trace_die_notifier && ev != DIE_OOPS)
+ return NOTIFY_DONE;
+
+ ftrace_dump(ftrace_dump_on_oops);
+
+ return NOTIFY_DONE;
+}
+
+/*
* printk is set to max of 1024, we really don't need it that big.
* Nothing should be printing 1000 characters anyway.
*/
@@ -9181,6 +10319,12 @@ void trace_init_global_iter(struct trace_iterator *iter)
/* Output in nanoseconds only if we are using a clock in nanoseconds. */
if (trace_clocks[iter->tr->clock_id].in_ns)
iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
+
+ /* Can not use kmalloc for iter.temp and iter.fmt */
+ iter->temp = static_temp_buf;
+ iter->temp_size = STATIC_TEMP_BUF_SIZE;
+ iter->fmt = static_fmt_buf;
+ iter->fmt_size = STATIC_FMT_BUF_SIZE;
}
void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
@@ -9210,13 +10354,9 @@ void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
tracing_off();
local_irq_save(flags);
- printk_nmi_direct_enter();
/* Simulate the iterator */
trace_init_global_iter(&iter);
- /* Can not use kmalloc for iter.temp */
- iter.temp = static_temp_buf;
- iter.temp_size = STATIC_TEMP_BUF_SIZE;
for_each_tracing_cpu(cpu) {
atomic_inc(&per_cpu_ptr(iter.array_buffer->data, cpu)->disabled);
@@ -9250,7 +10390,7 @@ void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
}
/*
- * We need to stop all tracing on all CPUS to read the
+ * We need to stop all tracing on all CPUS to read
* the next buffer. This is a bit expensive, but is
* not done often. We fill all what we can read,
* and then release the locks again.
@@ -9290,35 +10430,15 @@ void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
atomic_dec(&per_cpu_ptr(iter.array_buffer->data, cpu)->disabled);
}
atomic_dec(&dump_running);
- printk_nmi_direct_exit();
local_irq_restore(flags);
}
EXPORT_SYMBOL_GPL(ftrace_dump);
-int trace_run_command(const char *buf, int (*createfn)(int, char **))
-{
- char **argv;
- int argc, ret;
-
- argc = 0;
- ret = 0;
- argv = argv_split(GFP_KERNEL, buf, &argc);
- if (!argv)
- return -ENOMEM;
-
- if (argc)
- ret = createfn(argc, argv);
-
- argv_free(argv);
-
- return ret;
-}
-
#define WRITE_BUFSIZE 4096
ssize_t trace_parse_run_command(struct file *file, const char __user *buffer,
size_t count, loff_t *ppos,
- int (*createfn)(int, char **))
+ int (*createfn)(const char *))
{
char *kbuf, *buf, *tmp;
int ret = 0;
@@ -9366,7 +10486,7 @@ ssize_t trace_parse_run_command(struct file *file, const char __user *buffer,
if (tmp)
*tmp = '\0';
- ret = trace_run_command(buf, createfn);
+ ret = createfn(buf);
if (ret)
goto out;
buf += size;
@@ -9381,6 +10501,79 @@ out:
return ret;
}
+#ifdef CONFIG_TRACER_MAX_TRACE
+__init static bool tr_needs_alloc_snapshot(const char *name)
+{
+ char *test;
+ int len = strlen(name);
+ bool ret;
+
+ if (!boot_snapshot_index)
+ return false;
+
+ if (strncmp(name, boot_snapshot_info, len) == 0 &&
+ boot_snapshot_info[len] == '\t')
+ return true;
+
+ test = kmalloc(strlen(name) + 3, GFP_KERNEL);
+ if (!test)
+ return false;
+
+ sprintf(test, "\t%s\t", name);
+ ret = strstr(boot_snapshot_info, test) == NULL;
+ kfree(test);
+ return ret;
+}
+
+__init static void do_allocate_snapshot(const char *name)
+{
+ if (!tr_needs_alloc_snapshot(name))
+ return;
+
+ /*
+ * When allocate_snapshot is set, the next call to
+ * allocate_trace_buffers() (called by trace_array_get_by_name())
+ * will allocate the snapshot buffer. That will alse clear
+ * this flag.
+ */
+ allocate_snapshot = true;
+}
+#else
+static inline void do_allocate_snapshot(const char *name) { }
+#endif
+
+__init static void enable_instances(void)
+{
+ struct trace_array *tr;
+ char *curr_str;
+ char *str;
+ char *tok;
+
+ /* A tab is always appended */
+ boot_instance_info[boot_instance_index - 1] = '\0';
+ str = boot_instance_info;
+
+ while ((curr_str = strsep(&str, "\t"))) {
+
+ tok = strsep(&curr_str, ",");
+
+ if (IS_ENABLED(CONFIG_TRACER_MAX_TRACE))
+ do_allocate_snapshot(tok);
+
+ tr = trace_array_get_by_name(tok, NULL);
+ if (!tr) {
+ pr_warn("Failed to create instance buffer %s\n", curr_str);
+ continue;
+ }
+ /* Allow user space to delete it */
+ trace_array_put(tr);
+
+ while ((tok = strsep(&curr_str, ","))) {
+ early_enable_events(tr, tok, true);
+ }
+ }
+}
+
__init static int tracer_alloc_buffers(void)
{
int ring_buf_size;
@@ -9393,7 +10586,7 @@ __init static int tracer_alloc_buffers(void)
}
/*
- * Make sure we don't accidently add more trace options
+ * Make sure we don't accidentally add more trace options
* than we have bits for.
*/
BUILD_BUG_ON(TRACE_ITER_LAST_BIT > TRACE_FLAGS_MAX_SIZE);
@@ -9410,7 +10603,7 @@ __init static int tracer_alloc_buffers(void)
trace_printk_init_buffers();
/* To save memory, keep the ring buffer size to its minimum */
- if (ring_buffer_expanded)
+ if (global_trace.ring_buffer_expanded)
ring_buf_size = trace_buf_size;
else
ring_buf_size = 1;
@@ -9422,12 +10615,12 @@ __init static int tracer_alloc_buffers(void)
/*
* The prepare callbacks allocates some memory for the ring buffer. We
- * don't free the buffer if the if the CPU goes down. If we were to free
+ * don't free the buffer if the CPU goes down. If we were to free
* the buffer, then the user would lose any trace that was in the
* buffer. The memory will be removed once the "instance" is removed.
*/
ret = cpuhp_setup_state_multi(CPUHP_TRACE_RB_PREPARE,
- "trace/RB:preapre", trace_rb_cpu_prepare,
+ "trace/RB:prepare", trace_rb_cpu_prepare,
NULL);
if (ret < 0)
goto out_free_cpumask;
@@ -9440,12 +10633,14 @@ __init static int tracer_alloc_buffers(void)
if (trace_create_savedcmd() < 0)
goto out_free_temp_buffer;
+ if (!zalloc_cpumask_var(&global_trace.pipe_cpumask, GFP_KERNEL))
+ goto out_free_savedcmd;
+
/* TODO: make the number of buffers hot pluggable with CPUS */
if (allocate_trace_buffers(&global_trace, ring_buf_size) < 0) {
MEM_FAIL(1, "tracer: failed to allocate ring buffer!\n");
- goto out_free_savedcmd;
+ goto out_free_pipe_cpumask;
}
-
if (global_trace.buffer_disabled)
tracing_off();
@@ -9494,8 +10689,12 @@ __init static int tracer_alloc_buffers(void)
register_snapshot_cmd();
+ test_can_verify();
+
return 0;
+out_free_pipe_cpumask:
+ free_cpumask_var(global_trace.pipe_cpumask);
out_free_savedcmd:
free_saved_cmdlines_buffer(savedcmd);
out_free_temp_buffer:
@@ -9510,11 +10709,29 @@ out:
return ret;
}
+void __init ftrace_boot_snapshot(void)
+{
+#ifdef CONFIG_TRACER_MAX_TRACE
+ struct trace_array *tr;
+
+ if (!snapshot_at_boot)
+ return;
+
+ list_for_each_entry(tr, &ftrace_trace_arrays, list) {
+ if (!tr->allocated_snapshot)
+ continue;
+
+ tracing_snapshot_instance(tr);
+ trace_array_puts(tr, "** Boot snapshot taken **\n");
+ }
+#endif
+}
+
void __init early_trace_init(void)
{
if (tracepoint_printk) {
tracepoint_print_iter =
- kmalloc(sizeof(*tracepoint_print_iter), GFP_KERNEL);
+ kzalloc(sizeof(*tracepoint_print_iter), GFP_KERNEL);
if (MEM_FAIL(!tracepoint_print_iter,
"Failed to allocate trace iterator\n"))
tracepoint_printk = 0;
@@ -9522,14 +10739,19 @@ void __init early_trace_init(void)
static_key_enable(&tracepoint_printk_key.key);
}
tracer_alloc_buffers();
+
+ init_events();
}
void __init trace_init(void)
{
trace_event_init();
+
+ if (boot_instance_index)
+ enable_instances();
}
-__init static int clear_boot_tracer(void)
+__init static void clear_boot_tracer(void)
{
/*
* The default tracer at boot buffer is an init section.
@@ -9539,26 +10761,21 @@ __init static int clear_boot_tracer(void)
* about to be freed.
*/
if (!default_bootup_tracer)
- return 0;
+ return;
printk(KERN_INFO "ftrace bootup tracer '%s' not registered.\n",
default_bootup_tracer);
default_bootup_tracer = NULL;
-
- return 0;
}
-fs_initcall(tracer_init_tracefs);
-late_initcall_sync(clear_boot_tracer);
-
#ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
-__init static int tracing_set_default_clock(void)
+__init static void tracing_set_default_clock(void)
{
/* sched_clock_stable() is determined in late_initcall */
if (!trace_boot_clock && !sched_clock_stable()) {
if (security_locked_down(LOCKDOWN_TRACEFS)) {
pr_warn("Can not set tracing clock due to lockdown\n");
- return -EPERM;
+ return;
}
printk(KERN_WARNING
@@ -9568,8 +10785,21 @@ __init static int tracing_set_default_clock(void)
"on the kernel command line\n");
tracing_set_clock(&global_trace, "global");
}
+}
+#else
+static inline void tracing_set_default_clock(void) { }
+#endif
+
+__init static int late_trace_init(void)
+{
+ if (tracepoint_printk && tracepoint_printk_stop_on_boot) {
+ static_key_disable(&tracepoint_printk_key.key);
+ tracepoint_printk = 0;
+ }
+ tracing_set_default_clock();
+ clear_boot_tracer();
return 0;
}
-late_initcall_sync(tracing_set_default_clock);
-#endif
+
+late_initcall_sync(late_trace_init);
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index 13db4000af3f..00f873910c5d 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -19,12 +19,19 @@
#include <linux/glob.h>
#include <linux/irq_work.h>
#include <linux/workqueue.h>
+#include <linux/ctype.h>
+#include <linux/once_lite.h>
+
+#include "pid_list.h"
#ifdef CONFIG_FTRACE_SYSCALLS
-#include <asm/unistd.h> /* For NR_SYSCALLS */
+#include <asm/unistd.h> /* For NR_syscalls */
#include <asm/syscall.h> /* some archs define it here */
#endif
+#define TRACE_MODE_WRITE 0640
+#define TRACE_MODE_READ 0440
+
enum trace_type {
__TRACE_FIRST_TYPE = 0,
@@ -43,7 +50,10 @@ enum trace_type {
TRACE_BLK,
TRACE_BPUTS,
TRACE_HWLAT,
+ TRACE_OSNOISE,
+ TRACE_TIMERLAT,
TRACE_RAW_DATA,
+ TRACE_FUNC_REPEATS,
__TRACE_LAST_TYPE,
};
@@ -67,12 +77,25 @@ enum trace_type {
#undef __array
#define __array(type, item, size) type item[size];
+/*
+ * For backward compatibility, older user space expects to see the
+ * kernel_stack event with a fixed size caller field. But today the fix
+ * size is ignored by the kernel, and the real structure is dynamic.
+ * Expose to user space: "unsigned long caller[8];" but the real structure
+ * will be "unsigned long caller[] __counted_by(size)"
+ */
+#undef __stack_array
+#define __stack_array(type, item, size, field) type item[] __counted_by(field);
+
#undef __array_desc
#define __array_desc(type, container, item, size)
#undef __dynamic_array
#define __dynamic_array(type, item) type item[];
+#undef __rel_dynamic_array
+#define __rel_dynamic_array(type, item) type item[];
+
#undef F_STRUCT
#define F_STRUCT(args...) args
@@ -97,16 +120,14 @@ enum trace_type {
#include "trace_entries.h"
/* Use this for memory failure errors */
-#define MEM_FAIL(condition, fmt, ...) ({ \
- static bool __section(.data.once) __warned; \
- int __ret_warn_once = !!(condition); \
- \
- if (unlikely(__ret_warn_once && !__warned)) { \
- __warned = true; \
- pr_err("ERROR: " fmt, ##__VA_ARGS__); \
- } \
- unlikely(__ret_warn_once); \
-})
+#define MEM_FAIL(condition, fmt, ...) \
+ DO_ONCE_LITE_IF(condition, pr_err, "ERROR: " fmt, ##__VA_ARGS__)
+
+#define FAULT_STRING "(fault)"
+
+#define HIST_STACKTRACE_DEPTH 16
+#define HIST_STACKTRACE_SIZE (HIST_STACKTRACE_DEPTH * sizeof(unsigned long))
+#define HIST_STACKTRACE_SKIP 5
/*
* syscalls are special, and need special handling, this is why
@@ -129,29 +150,25 @@ struct kprobe_trace_entry_head {
unsigned long ip;
};
+struct eprobe_trace_entry_head {
+ struct trace_entry ent;
+};
+
struct kretprobe_trace_entry_head {
struct trace_entry ent;
unsigned long func;
unsigned long ret_ip;
};
-/*
- * trace_flag_type is an enumeration that holds different
- * states when a trace occurs. These are:
- * IRQS_OFF - interrupts were disabled
- * IRQS_NOSUPPORT - arch does not support irqs_disabled_flags
- * NEED_RESCHED - reschedule is requested
- * HARDIRQ - inside an interrupt handler
- * SOFTIRQ - inside a softirq handler
- */
-enum trace_flag_type {
- TRACE_FLAG_IRQS_OFF = 0x01,
- TRACE_FLAG_IRQS_NOSUPPORT = 0x02,
- TRACE_FLAG_NEED_RESCHED = 0x04,
- TRACE_FLAG_HARDIRQ = 0x08,
- TRACE_FLAG_SOFTIRQ = 0x10,
- TRACE_FLAG_PREEMPT_RESCHED = 0x20,
- TRACE_FLAG_NMI = 0x40,
+struct fentry_trace_entry_head {
+ struct trace_entry ent;
+ unsigned long ip;
+};
+
+struct fexit_trace_entry_head {
+ struct trace_entry ent;
+ unsigned long func;
+ unsigned long ret_ip;
};
#define TRACE_BUF_SIZE 1024
@@ -205,10 +222,14 @@ struct trace_options {
struct trace_option_dentry *topts;
};
-struct trace_pid_list {
- int pid_max;
- unsigned long *pids;
-};
+struct trace_pid_list *trace_pid_list_alloc(void);
+void trace_pid_list_free(struct trace_pid_list *pid_list);
+bool trace_pid_list_is_set(struct trace_pid_list *pid_list, unsigned int pid);
+int trace_pid_list_set(struct trace_pid_list *pid_list, unsigned int pid);
+int trace_pid_list_clear(struct trace_pid_list *pid_list, unsigned int pid);
+int trace_pid_list_first(struct trace_pid_list *pid_list, unsigned int *pid);
+int trace_pid_list_next(struct trace_pid_list *pid_list, unsigned int pid,
+ unsigned int *next);
enum {
TRACE_PIDS = BIT(0),
@@ -246,7 +267,7 @@ typedef bool (*cond_update_fn_t)(struct trace_array *tr, void *cond_data);
* tracing_snapshot_cond(tr, cond_data), the cond_data passed in is
* passed in turn to the cond_snapshot.update() function. That data
* can be compared by the update() implementation with the cond_data
- * contained wihin the struct cond_snapshot instance associated with
+ * contained within the struct cond_snapshot instance associated with
* the trace_array. Because the tr->max_lock is held throughout the
* update() call, the update() function can directly retrieve the
* cond_snapshot and cond_data associated with the per-instance
@@ -271,7 +292,7 @@ typedef bool (*cond_update_fn_t)(struct trace_array *tr, void *cond_data);
* take the snapshot, by returning 'true' if so, 'false' if no
* snapshot should be taken. Because the max_lock is held for
* the duration of update(), the implementation is safe to
- * directly retrieven and save any implementation data it needs
+ * directly retrieved and save any implementation data it needs
* to in association with the snapshot.
*/
struct cond_snapshot {
@@ -280,6 +301,17 @@ struct cond_snapshot {
};
/*
+ * struct trace_func_repeats - used to keep track of the consecutive
+ * (on the same CPU) calls of a single function.
+ */
+struct trace_func_repeats {
+ unsigned long ip;
+ unsigned long parent_ip;
+ unsigned long count;
+ u64 ts_last_call;
+};
+
+/*
* The trace array - an array of per-CPU trace arrays. This is the
* highest level data structure that individual tracers deal with.
* They have on/off state as well:
@@ -303,7 +335,7 @@ struct trace_array {
struct array_buffer max_buffer;
bool allocated_snapshot;
#endif
-#if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
+#ifdef CONFIG_TRACER_MAX_TRACE
unsigned long max_latency;
#ifdef CONFIG_FSNOTIFY
struct dentry *d_max_latency;
@@ -345,17 +377,21 @@ struct trace_array {
unsigned char trace_flags_index[TRACE_FLAGS_MAX_SIZE];
unsigned int flags;
raw_spinlock_t start_lock;
+ const char *system_names;
struct list_head err_log;
struct dentry *dir;
struct dentry *options;
struct dentry *percpu_dir;
- struct dentry *event_dir;
+ struct eventfs_inode *event_dir;
struct trace_options *topts;
struct list_head systems;
struct list_head events;
struct trace_event_file *trace_marker_file;
cpumask_var_t tracing_cpumask; /* only trace on set CPUs */
+ /* one per_cpu trace_pipe can be opened by only one user */
+ cpumask_var_t pipe_cpumask;
int ref;
+ int trace_ref;
#ifdef CONFIG_FUNCTION_TRACER
struct ftrace_ops *ops;
struct trace_pid_list __rcu *function_pids;
@@ -369,11 +405,17 @@ struct trace_array {
/* function tracing enabled */
int function_enabled;
#endif
- int time_stamp_abs_ref;
+ int no_filter_buffering_ref;
struct list_head hist_vars;
#ifdef CONFIG_TRACER_SNAPSHOT
struct cond_snapshot *cond_snapshot;
#endif
+ struct trace_func_repeats __percpu *last_func_repeats;
+ /*
+ * On boot up, the ring buffer is set to the minimum size, so that
+ * we do not waste memory on systems that are not using tracing.
+ */
+ bool ring_buffer_expanded;
};
enum {
@@ -389,7 +431,8 @@ extern int tracing_check_open_get_tr(struct trace_array *tr);
extern struct trace_array *trace_array_find(const char *instance);
extern struct trace_array *trace_array_find_get(const char *instance);
-extern int tracing_set_time_stamp_abs(struct trace_array *tr, bool abs);
+extern u64 tracing_event_time_stamp(struct trace_buffer *buffer, struct ring_buffer_event *rbe);
+extern int tracing_set_filter_buffering(struct trace_array *tr, bool set);
extern int tracing_set_clock(struct trace_array *tr, const char *clockstr);
extern bool trace_clock_in_ns(struct trace_array *tr);
@@ -448,6 +491,8 @@ extern void __ftrace_bad_type(void);
IF_ASSIGN(var, ent, struct bprint_entry, TRACE_BPRINT); \
IF_ASSIGN(var, ent, struct bputs_entry, TRACE_BPUTS); \
IF_ASSIGN(var, ent, struct hwlat_entry, TRACE_HWLAT); \
+ IF_ASSIGN(var, ent, struct osnoise_entry, TRACE_OSNOISE);\
+ IF_ASSIGN(var, ent, struct timerlat_entry, TRACE_TIMERLAT);\
IF_ASSIGN(var, ent, struct raw_data_entry, TRACE_RAW_DATA);\
IF_ASSIGN(var, ent, struct trace_mmiotrace_rw, \
TRACE_MMIO_RW); \
@@ -458,6 +503,8 @@ extern void __ftrace_bad_type(void);
TRACE_GRAPH_ENT); \
IF_ASSIGN(var, ent, struct ftrace_graph_ret_entry, \
TRACE_GRAPH_RET); \
+ IF_ASSIGN(var, ent, struct func_repeats_entry, \
+ TRACE_FUNC_REPEATS); \
__ftrace_bad_type(); \
} while (0)
@@ -547,7 +594,6 @@ struct tracer {
struct tracer *next;
struct tracer_flags *flags;
int enabled;
- int ref;
bool print_max;
bool allow_instances;
#ifdef CONFIG_TRACER_MAX_TRACE
@@ -557,163 +603,6 @@ struct tracer {
bool noboot;
};
-
-/* Only current can touch trace_recursion */
-
-/*
- * For function tracing recursion:
- * The order of these bits are important.
- *
- * When function tracing occurs, the following steps are made:
- * If arch does not support a ftrace feature:
- * call internal function (uses INTERNAL bits) which calls...
- * If callback is registered to the "global" list, the list
- * function is called and recursion checks the GLOBAL bits.
- * then this function calls...
- * The function callback, which can use the FTRACE bits to
- * check for recursion.
- *
- * Now if the arch does not suppport a feature, and it calls
- * the global list function which calls the ftrace callback
- * all three of these steps will do a recursion protection.
- * There's no reason to do one if the previous caller already
- * did. The recursion that we are protecting against will
- * go through the same steps again.
- *
- * To prevent the multiple recursion checks, if a recursion
- * bit is set that is higher than the MAX bit of the current
- * check, then we know that the check was made by the previous
- * caller, and we can skip the current check.
- */
-enum {
- /* Function recursion bits */
- TRACE_FTRACE_BIT,
- TRACE_FTRACE_NMI_BIT,
- TRACE_FTRACE_IRQ_BIT,
- TRACE_FTRACE_SIRQ_BIT,
-
- /* INTERNAL_BITs must be greater than FTRACE_BITs */
- TRACE_INTERNAL_BIT,
- TRACE_INTERNAL_NMI_BIT,
- TRACE_INTERNAL_IRQ_BIT,
- TRACE_INTERNAL_SIRQ_BIT,
-
- TRACE_BRANCH_BIT,
-/*
- * Abuse of the trace_recursion.
- * As we need a way to maintain state if we are tracing the function
- * graph in irq because we want to trace a particular function that
- * was called in irq context but we have irq tracing off. Since this
- * can only be modified by current, we can reuse trace_recursion.
- */
- TRACE_IRQ_BIT,
-
- /* Set if the function is in the set_graph_function file */
- TRACE_GRAPH_BIT,
-
- /*
- * In the very unlikely case that an interrupt came in
- * at a start of graph tracing, and we want to trace
- * the function in that interrupt, the depth can be greater
- * than zero, because of the preempted start of a previous
- * trace. In an even more unlikely case, depth could be 2
- * if a softirq interrupted the start of graph tracing,
- * followed by an interrupt preempting a start of graph
- * tracing in the softirq, and depth can even be 3
- * if an NMI came in at the start of an interrupt function
- * that preempted a softirq start of a function that
- * preempted normal context!!!! Luckily, it can't be
- * greater than 3, so the next two bits are a mask
- * of what the depth is when we set TRACE_GRAPH_BIT
- */
-
- TRACE_GRAPH_DEPTH_START_BIT,
- TRACE_GRAPH_DEPTH_END_BIT,
-
- /*
- * To implement set_graph_notrace, if this bit is set, we ignore
- * function graph tracing of called functions, until the return
- * function is called to clear it.
- */
- TRACE_GRAPH_NOTRACE_BIT,
-};
-
-#define trace_recursion_set(bit) do { (current)->trace_recursion |= (1<<(bit)); } while (0)
-#define trace_recursion_clear(bit) do { (current)->trace_recursion &= ~(1<<(bit)); } while (0)
-#define trace_recursion_test(bit) ((current)->trace_recursion & (1<<(bit)))
-
-#define trace_recursion_depth() \
- (((current)->trace_recursion >> TRACE_GRAPH_DEPTH_START_BIT) & 3)
-#define trace_recursion_set_depth(depth) \
- do { \
- current->trace_recursion &= \
- ~(3 << TRACE_GRAPH_DEPTH_START_BIT); \
- current->trace_recursion |= \
- ((depth) & 3) << TRACE_GRAPH_DEPTH_START_BIT; \
- } while (0)
-
-#define TRACE_CONTEXT_BITS 4
-
-#define TRACE_FTRACE_START TRACE_FTRACE_BIT
-#define TRACE_FTRACE_MAX ((1 << (TRACE_FTRACE_START + TRACE_CONTEXT_BITS)) - 1)
-
-#define TRACE_LIST_START TRACE_INTERNAL_BIT
-#define TRACE_LIST_MAX ((1 << (TRACE_LIST_START + TRACE_CONTEXT_BITS)) - 1)
-
-#define TRACE_CONTEXT_MASK TRACE_LIST_MAX
-
-static __always_inline int trace_get_context_bit(void)
-{
- int bit;
-
- if (in_interrupt()) {
- if (in_nmi())
- bit = 0;
-
- else if (in_irq())
- bit = 1;
- else
- bit = 2;
- } else
- bit = 3;
-
- return bit;
-}
-
-static __always_inline int trace_test_and_set_recursion(int start, int max)
-{
- unsigned int val = current->trace_recursion;
- int bit;
-
- /* A previous recursion check was made */
- if ((val & TRACE_CONTEXT_MASK) > max)
- return 0;
-
- bit = trace_get_context_bit() + start;
- if (unlikely(val & (1 << bit)))
- return -1;
-
- val |= 1 << bit;
- current->trace_recursion = val;
- barrier();
-
- return bit;
-}
-
-static __always_inline void trace_clear_recursion(int bit)
-{
- unsigned int val = current->trace_recursion;
-
- if (!bit)
- return;
-
- bit = 1 << bit;
- val &= ~bit;
-
- barrier();
- current->trace_recursion = val;
-}
-
static inline struct ring_buffer_iter *
trace_buffer_iter(struct trace_iterator *iter, int cpu)
{
@@ -723,10 +612,14 @@ trace_buffer_iter(struct trace_iterator *iter, int cpu)
int tracer_init(struct tracer *t, struct trace_array *tr);
int tracing_is_enabled(void);
void tracing_reset_online_cpus(struct array_buffer *buf);
-void tracing_reset_current(int cpu);
void tracing_reset_all_online_cpus(void);
+void tracing_reset_all_online_cpus_unlocked(void);
int tracing_open_generic(struct inode *inode, struct file *filp);
int tracing_open_generic_tr(struct inode *inode, struct file *filp);
+int tracing_release_generic_tr(struct inode *inode, struct file *file);
+int tracing_open_file_tr(struct inode *inode, struct file *filp);
+int tracing_release_file_tr(struct inode *inode, struct file *filp);
+int tracing_single_release_file_tr(struct inode *inode, struct file *filp);
bool tracing_is_disabled(void);
bool tracer_tracing_is_on(struct trace_array *tr);
void tracer_tracing_on(struct trace_array *tr);
@@ -737,7 +630,7 @@ struct dentry *trace_create_file(const char *name,
void *data,
const struct file_operations *fops);
-struct dentry *tracing_init_dentry(void);
+int tracing_init_dentry(void);
struct ring_buffer_event;
@@ -745,8 +638,7 @@ struct ring_buffer_event *
trace_buffer_lock_reserve(struct trace_buffer *buffer,
int type,
unsigned long len,
- unsigned long flags,
- int pc);
+ unsigned int trace_ctx);
struct trace_entry *tracing_get_trace_entry(struct trace_array *tr,
struct trace_array_cpu *data);
@@ -757,6 +649,12 @@ struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
void trace_buffer_unlock_commit_nostack(struct trace_buffer *buffer,
struct ring_buffer_event *event);
+bool trace_is_tracepoint_string(const char *str);
+const char *trace_event_format(struct trace_iterator *iter, const char *fmt);
+void trace_check_vprintf(struct trace_iterator *iter, const char *fmt,
+ va_list ap) __printf(2, 0);
+char *trace_iter_expand_format(struct trace_iterator *iter);
+
int trace_empty(struct trace_iterator *iter);
void *trace_find_next_entry_inc(struct trace_iterator *iter);
@@ -771,15 +669,14 @@ unsigned long trace_total_entries(struct trace_array *tr);
void trace_function(struct trace_array *tr,
unsigned long ip,
unsigned long parent_ip,
- unsigned long flags, int pc);
+ unsigned int trace_ctx);
void trace_graph_function(struct trace_array *tr,
unsigned long ip,
unsigned long parent_ip,
- unsigned long flags, int pc);
+ unsigned int trace_ctx);
void trace_latency_header(struct seq_file *m);
void trace_default_header(struct seq_file *m);
void print_trace_header(struct seq_file *m, struct trace_iterator *iter);
-int trace_empty(struct trace_iterator *iter);
void trace_graph_return(struct ftrace_graph_ret *trace);
int trace_graph_entry(struct ftrace_graph_ent *trace);
@@ -819,7 +716,6 @@ void trace_filter_add_remove_task(struct trace_pid_list *pid_list,
void *trace_pid_next(struct trace_pid_list *pid_list, void *v, loff_t *pos);
void *trace_pid_start(struct trace_pid_list *pid_list, loff_t *pos);
int trace_pid_show(struct seq_file *m, void *v);
-void trace_free_pid_list(struct trace_pid_list *pid_list);
int trace_pid_write(struct trace_pid_list *filtered_pids,
struct trace_pid_list **new_pid_list,
const char __user *ubuf, size_t cnt);
@@ -829,29 +725,31 @@ void update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu,
void *cond_data);
void update_max_tr_single(struct trace_array *tr,
struct task_struct *tsk, int cpu);
-#endif /* CONFIG_TRACER_MAX_TRACE */
-#if (defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)) && \
- defined(CONFIG_FSNOTIFY)
+#ifdef CONFIG_FSNOTIFY
+#define LATENCY_FS_NOTIFY
+#endif
+#endif /* CONFIG_TRACER_MAX_TRACE */
+#ifdef LATENCY_FS_NOTIFY
void latency_fsnotify(struct trace_array *tr);
-
#else
-
static inline void latency_fsnotify(struct trace_array *tr) { }
-
#endif
#ifdef CONFIG_STACKTRACE
-void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
- int pc);
+void __trace_stack(struct trace_array *tr, unsigned int trace_ctx, int skip);
#else
-static inline void __trace_stack(struct trace_array *tr, unsigned long flags,
- int skip, int pc)
+static inline void __trace_stack(struct trace_array *tr, unsigned int trace_ctx,
+ int skip)
{
}
#endif /* CONFIG_STACKTRACE */
+void trace_last_func_repeats(struct trace_array *tr,
+ struct trace_func_repeats *last_info,
+ unsigned int trace_ctx);
+
extern u64 ftrace_now(int cpu);
extern void trace_find_cmdline(int pid, char comm[]);
@@ -871,10 +769,12 @@ extern int DYN_FTRACE_TEST_NAME(void);
#define DYN_FTRACE_TEST_NAME2 trace_selftest_dynamic_test_func2
extern int DYN_FTRACE_TEST_NAME2(void);
-extern bool ring_buffer_expanded;
+extern void trace_set_ring_buffer_expanded(struct trace_array *tr);
extern bool tracing_selftest_disabled;
#ifdef CONFIG_FTRACE_STARTUP_TEST
+extern void __init disable_tracing_selftest(const char *reason);
+
extern int trace_selftest_startup_function(struct tracer *trace,
struct trace_array *tr);
extern int trace_selftest_startup_function_graph(struct tracer *trace,
@@ -898,6 +798,9 @@ extern int trace_selftest_startup_branch(struct tracer *trace,
*/
#define __tracer_data __refdata
#else
+static inline void __init disable_tracing_selftest(const char *reason)
+{
+}
/* Tracers are seldom changed. Optimize when selftests are disabled. */
#define __tracer_data __read_mostly
#endif /* CONFIG_FTRACE_STARTUP_TEST */
@@ -962,6 +865,8 @@ static __always_inline bool ftrace_hash_empty(struct ftrace_hash *hash)
#define TRACE_GRAPH_PRINT_TAIL 0x100
#define TRACE_GRAPH_SLEEP_TIME 0x200
#define TRACE_GRAPH_GRAPH_TIME 0x400
+#define TRACE_GRAPH_PRINT_RETVAL 0x800
+#define TRACE_GRAPH_PRINT_RETVAL_HEX 0x1000
#define TRACE_GRAPH_PRINT_FILL_SHIFT 28
#define TRACE_GRAPH_PRINT_FILL_MASK (0x3 << TRACE_GRAPH_PRINT_FILL_SHIFT)
@@ -982,10 +887,10 @@ extern void graph_trace_open(struct trace_iterator *iter);
extern void graph_trace_close(struct trace_iterator *iter);
extern int __trace_graph_entry(struct trace_array *tr,
struct ftrace_graph_ent *trace,
- unsigned long flags, int pc);
+ unsigned int trace_ctx);
extern void __trace_graph_return(struct trace_array *tr,
struct ftrace_graph_ret *trace,
- unsigned long flags, int pc);
+ unsigned int trace_ctx);
#ifdef CONFIG_DYNAMIC_FTRACE
extern struct ftrace_hash __rcu *ftrace_graph_hash;
@@ -1026,7 +931,7 @@ static inline int ftrace_graph_addr(struct ftrace_graph_ent *trace)
* is set, and called by an interrupt handler, we still
* want to trace it.
*/
- if (in_irq())
+ if (in_hardirq())
trace_recursion_set(TRACE_IRQ_BIT);
else
trace_recursion_clear(TRACE_IRQ_BIT);
@@ -1103,6 +1008,10 @@ print_graph_function_flags(struct trace_iterator *iter, u32 flags)
extern struct list_head ftrace_pids;
#ifdef CONFIG_FUNCTION_TRACER
+
+#define FTRACE_PID_IGNORE -1
+#define FTRACE_PID_TRACE -2
+
struct ftrace_func_command {
struct list_head list;
char *name;
@@ -1114,12 +1023,15 @@ struct ftrace_func_command {
extern bool ftrace_filter_param __initdata;
static inline int ftrace_trace_task(struct trace_array *tr)
{
- return !this_cpu_read(tr->array_buffer.data->ftrace_ignore_pid);
+ return this_cpu_read(tr->array_buffer.data->ftrace_ignore_pid) !=
+ FTRACE_PID_IGNORE;
}
extern int ftrace_is_dead(void);
int ftrace_create_function_files(struct trace_array *tr,
struct dentry *parent);
void ftrace_destroy_function_files(struct trace_array *tr);
+int ftrace_allocate_ftrace_ops(struct trace_array *tr);
+void ftrace_free_ftrace_ops(struct trace_array *tr);
void ftrace_init_global_array_ops(struct trace_array *tr);
void ftrace_init_array_ops(struct trace_array *tr, ftrace_func_t func);
void ftrace_reset_array_ops(struct trace_array *tr);
@@ -1141,6 +1053,11 @@ ftrace_create_function_files(struct trace_array *tr,
{
return 0;
}
+static inline int ftrace_allocate_ftrace_ops(struct trace_array *tr)
+{
+ return 0;
+}
+static inline void ftrace_free_ftrace_ops(struct trace_array *tr) { }
static inline void ftrace_destroy_function_files(struct trace_array *tr) { }
static inline __init void
ftrace_init_global_array_ops(struct trace_array *tr) { }
@@ -1318,6 +1235,7 @@ extern int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
C(HEX, "hex"), \
C(BIN, "bin"), \
C(BLOCK, "block"), \
+ C(FIELDS, "fields"), \
C(PRINTK, "trace_printk"), \
C(ANNOTATE, "annotate"), \
C(USERSTACKTRACE, "userstacktrace"), \
@@ -1333,6 +1251,7 @@ extern int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
C(MARKERS, "markers"), \
C(EVENT_FORK, "event-fork"), \
C(PAUSE_ON_TRACE, "pause-on-trace"), \
+ C(HASH_PTR, "hash-ptr"), /* Print hashed pointer */ \
FUNCTION_FLAGS \
FGRAPH_FLAGS \
STACK_FLAGS \
@@ -1394,7 +1313,15 @@ static inline void trace_branch_disable(void)
#endif /* CONFIG_BRANCH_TRACER */
/* set ring buffers to default size if not already done so */
-int tracing_update_buffers(void);
+int tracing_update_buffers(struct trace_array *tr);
+
+union trace_synth_field {
+ u8 as_u8;
+ u16 as_u16;
+ u32 as_u32;
+ u64 as_u64;
+ struct trace_dynamic_info as_dynamic;
+};
struct ftrace_event_field {
struct list_head link;
@@ -1404,6 +1331,7 @@ struct ftrace_event_field {
int offset;
int size;
int is_signed;
+ int len;
};
struct prog_entry;
@@ -1424,7 +1352,7 @@ struct trace_subsystem_dir {
struct list_head list;
struct event_subsystem *subsystem;
struct trace_array *tr;
- struct dentry *entry;
+ struct eventfs_inode *ei;
int ref_count;
int nr_events;
};
@@ -1436,15 +1364,15 @@ extern int call_filter_check_discard(struct trace_event_call *call, void *rec,
void trace_buffer_unlock_commit_regs(struct trace_array *tr,
struct trace_buffer *buffer,
struct ring_buffer_event *event,
- unsigned long flags, int pc,
+ unsigned int trcace_ctx,
struct pt_regs *regs);
static inline void trace_buffer_unlock_commit(struct trace_array *tr,
struct trace_buffer *buffer,
struct ring_buffer_event *event,
- unsigned long flags, int pc)
+ unsigned int trace_ctx)
{
- trace_buffer_unlock_commit_regs(tr, buffer, event, flags, pc, NULL);
+ trace_buffer_unlock_commit_regs(tr, buffer, event, trace_ctx, NULL);
}
DECLARE_PER_CPU(struct ring_buffer_event *, trace_buffered_event);
@@ -1452,22 +1380,26 @@ DECLARE_PER_CPU(int, trace_buffered_event_cnt);
void trace_buffered_event_disable(void);
void trace_buffered_event_enable(void);
+void early_enable_events(struct trace_array *tr, char *buf, bool disable_first);
+
static inline void
__trace_event_discard_commit(struct trace_buffer *buffer,
struct ring_buffer_event *event)
{
if (this_cpu_read(trace_buffered_event) == event) {
- /* Simply release the temp buffer */
+ /* Simply release the temp buffer and enable preemption */
this_cpu_dec(trace_buffered_event_cnt);
+ preempt_enable_notrace();
return;
}
+ /* ring_buffer_discard_commit() enables preemption */
ring_buffer_discard_commit(buffer, event);
}
/*
* Helper function for event_trigger_unlock_commit{_regs}().
* If there are event triggers attached to this event that requires
- * filtering against its fields, then they wil be called as the
+ * filtering against its fields, then they will be called as the
* entry already holds the field information of the current event.
*
* It also checks if the event should be discarded or not.
@@ -1487,26 +1419,37 @@ __event_trigger_test_discard(struct trace_event_file *file,
unsigned long eflags = file->flags;
if (eflags & EVENT_FILE_FL_TRIGGER_COND)
- *tt = event_triggers_call(file, entry, event);
+ *tt = event_triggers_call(file, buffer, entry, event);
- if (test_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags) ||
- (unlikely(file->flags & EVENT_FILE_FL_FILTERED) &&
- !filter_match_preds(file->filter, entry))) {
- __trace_event_discard_commit(buffer, event);
- return true;
- }
+ if (likely(!(file->flags & (EVENT_FILE_FL_SOFT_DISABLED |
+ EVENT_FILE_FL_FILTERED |
+ EVENT_FILE_FL_PID_FILTER))))
+ return false;
+
+ if (file->flags & EVENT_FILE_FL_SOFT_DISABLED)
+ goto discard;
+
+ if (file->flags & EVENT_FILE_FL_FILTERED &&
+ !filter_match_preds(file->filter, entry))
+ goto discard;
+
+ if ((file->flags & EVENT_FILE_FL_PID_FILTER) &&
+ trace_event_ignore_this_pid(file))
+ goto discard;
return false;
+ discard:
+ __trace_event_discard_commit(buffer, event);
+ return true;
}
/**
* event_trigger_unlock_commit - handle triggers and finish event commit
- * @file: The file pointer assoctiated to the event
+ * @file: The file pointer associated with the event
* @buffer: The ring buffer that the event is being written to
* @event: The event meta data in the ring buffer
* @entry: The event itself
- * @irq_flags: The state of the interrupts at the start of the event
- * @pc: The state of the preempt count at the start of the event.
+ * @trace_ctx: The tracing context flags.
*
* This is a helper function to handle triggers that require data
* from the event itself. It also tests the event against filters and
@@ -1516,45 +1459,12 @@ static inline void
event_trigger_unlock_commit(struct trace_event_file *file,
struct trace_buffer *buffer,
struct ring_buffer_event *event,
- void *entry, unsigned long irq_flags, int pc)
-{
- enum event_trigger_type tt = ETT_NONE;
-
- if (!__event_trigger_test_discard(file, buffer, event, entry, &tt))
- trace_buffer_unlock_commit(file->tr, buffer, event, irq_flags, pc);
-
- if (tt)
- event_triggers_post_call(file, tt);
-}
-
-/**
- * event_trigger_unlock_commit_regs - handle triggers and finish event commit
- * @file: The file pointer assoctiated to the event
- * @buffer: The ring buffer that the event is being written to
- * @event: The event meta data in the ring buffer
- * @entry: The event itself
- * @irq_flags: The state of the interrupts at the start of the event
- * @pc: The state of the preempt count at the start of the event.
- *
- * This is a helper function to handle triggers that require data
- * from the event itself. It also tests the event against filters and
- * if the event is soft disabled and should be discarded.
- *
- * Same as event_trigger_unlock_commit() but calls
- * trace_buffer_unlock_commit_regs() instead of trace_buffer_unlock_commit().
- */
-static inline void
-event_trigger_unlock_commit_regs(struct trace_event_file *file,
- struct trace_buffer *buffer,
- struct ring_buffer_event *event,
- void *entry, unsigned long irq_flags, int pc,
- struct pt_regs *regs)
+ void *entry, unsigned int trace_ctx)
{
enum event_trigger_type tt = ETT_NONE;
if (!__event_trigger_test_discard(file, buffer, event, entry, &tt))
- trace_buffer_unlock_commit_regs(file->tr, buffer, event,
- irq_flags, pc, regs);
+ trace_buffer_unlock_commit(file->tr, buffer, event, trace_ctx);
if (tt)
event_triggers_post_call(file, tt);
@@ -1576,8 +1486,6 @@ event_trigger_unlock_commit_regs(struct trace_event_file *file,
struct filter_pred;
struct regex;
-typedef int (*filter_pred_fn_t) (struct filter_pred *pred, void *event);
-
typedef int (*regex_match_func)(char *str, struct regex *r, int len);
enum regex_type {
@@ -1596,20 +1504,10 @@ struct regex {
regex_match_func match;
};
-struct filter_pred {
- filter_pred_fn_t fn;
- u64 val;
- struct regex regex;
- unsigned short *ops;
- struct ftrace_event_field *field;
- int offset;
- int not;
- int op;
-};
-
static inline bool is_string_field(struct ftrace_event_field *field)
{
return field->filter_type == FILTER_DYN_STRING ||
+ field->filter_type == FILTER_RDYN_STRING ||
field->filter_type == FILTER_STATIC_STRING ||
field->filter_type == FILTER_PTR_STRING ||
field->filter_type == FILTER_COMM;
@@ -1644,8 +1542,10 @@ extern void trace_event_enable_cmd_record(bool enable);
extern void trace_event_enable_tgid_record(bool enable);
extern int event_trace_init(void);
+extern int init_events(void);
extern int event_trace_add_tracer(struct dentry *parent, struct trace_array *tr);
extern int event_trace_del_tracer(struct trace_array *tr);
+extern void __trace_early_add_events(struct trace_array *tr);
extern struct trace_event_file *__find_event_file(struct trace_array *tr,
const char *system,
@@ -1678,9 +1578,14 @@ static inline int register_trigger_hist_enable_disable_cmds(void) { return 0; }
extern int register_trigger_cmds(void);
extern void clear_event_triggers(struct trace_array *tr);
+enum {
+ EVENT_TRIGGER_FL_PROBE = BIT(0),
+};
+
struct event_trigger_data {
unsigned long count;
int ref;
+ int flags;
struct event_trigger_ops *ops;
struct event_command *cmd_ops;
struct event_filter __rcu *filter;
@@ -1707,24 +1612,20 @@ struct enable_trigger_data {
};
extern int event_enable_trigger_print(struct seq_file *m,
- struct event_trigger_ops *ops,
- struct event_trigger_data *data);
-extern void event_enable_trigger_free(struct event_trigger_ops *ops,
struct event_trigger_data *data);
-extern int event_enable_trigger_func(struct event_command *cmd_ops,
- struct trace_event_file *file,
- char *glob, char *cmd, char *param);
+extern void event_enable_trigger_free(struct event_trigger_data *data);
+extern int event_enable_trigger_parse(struct event_command *cmd_ops,
+ struct trace_event_file *file,
+ char *glob, char *cmd,
+ char *param_and_filter);
extern int event_enable_register_trigger(char *glob,
- struct event_trigger_ops *ops,
struct event_trigger_data *data,
struct trace_event_file *file);
extern void event_enable_unregister_trigger(char *glob,
- struct event_trigger_ops *ops,
struct event_trigger_data *test,
struct trace_event_file *file);
extern void trigger_data_free(struct event_trigger_data *data);
-extern int event_trigger_init(struct event_trigger_ops *ops,
- struct event_trigger_data *data);
+extern int event_trigger_init(struct event_trigger_data *data);
extern int trace_event_trigger_enable_disable(struct trace_event_file *file,
int trigger_enable);
extern void update_cond_flag(struct trace_event_file *file);
@@ -1745,6 +1646,34 @@ get_named_trigger_data(struct event_trigger_data *data);
extern int register_event_command(struct event_command *cmd);
extern int unregister_event_command(struct event_command *cmd);
extern int register_trigger_hist_enable_disable_cmds(void);
+extern bool event_trigger_check_remove(const char *glob);
+extern bool event_trigger_empty_param(const char *param);
+extern int event_trigger_separate_filter(char *param_and_filter, char **param,
+ char **filter, bool param_required);
+extern struct event_trigger_data *
+event_trigger_alloc(struct event_command *cmd_ops,
+ char *cmd,
+ char *param,
+ void *private_data);
+extern int event_trigger_parse_num(char *trigger,
+ struct event_trigger_data *trigger_data);
+extern int event_trigger_set_filter(struct event_command *cmd_ops,
+ struct trace_event_file *file,
+ char *param,
+ struct event_trigger_data *trigger_data);
+extern void event_trigger_reset_filter(struct event_command *cmd_ops,
+ struct event_trigger_data *trigger_data);
+extern int event_trigger_register(struct event_command *cmd_ops,
+ struct trace_event_file *file,
+ char *glob,
+ struct event_trigger_data *trigger_data);
+extern void event_trigger_unregister(struct event_command *cmd_ops,
+ struct trace_event_file *file,
+ char *glob,
+ struct event_trigger_data *trigger_data);
+
+extern void event_file_get(struct trace_event_file *file);
+extern void event_file_put(struct trace_event_file *file);
/**
* struct event_trigger_ops - callbacks for trace event triggers
@@ -1752,10 +1681,20 @@ extern int register_trigger_hist_enable_disable_cmds(void);
* The methods in this structure provide per-event trigger hooks for
* various trigger operations.
*
+ * The @init and @free methods are used during trigger setup and
+ * teardown, typically called from an event_command's @parse()
+ * function implementation.
+ *
+ * The @print method is used to print the trigger spec.
+ *
+ * The @trigger method is the function that actually implements the
+ * trigger and is called in the context of the triggering event
+ * whenever that event occurs.
+ *
* All the methods below, except for @init() and @free(), must be
* implemented.
*
- * @func: The trigger 'probe' function called when the triggering
+ * @trigger: The trigger 'probe' function called when the triggering
* event occurs. The data passed into this callback is the data
* that was supplied to the event_command @reg() function that
* registered the trigger (see struct event_command) along with
@@ -1784,15 +1723,13 @@ extern int register_trigger_hist_enable_disable_cmds(void);
* (see trace_event_triggers.c).
*/
struct event_trigger_ops {
- void (*func)(struct event_trigger_data *data,
- void *rec,
- struct ring_buffer_event *rbe);
- int (*init)(struct event_trigger_ops *ops,
- struct event_trigger_data *data);
- void (*free)(struct event_trigger_ops *ops,
- struct event_trigger_data *data);
+ void (*trigger)(struct event_trigger_data *data,
+ struct trace_buffer *buffer,
+ void *rec,
+ struct ring_buffer_event *rbe);
+ int (*init)(struct event_trigger_data *data);
+ void (*free)(struct event_trigger_data *data);
int (*print)(struct seq_file *m,
- struct event_trigger_ops *ops,
struct event_trigger_data *data);
};
@@ -1835,7 +1772,7 @@ struct event_trigger_ops {
* All the methods below, except for @set_filter() and @unreg_all(),
* must be implemented.
*
- * @func: The callback function responsible for parsing and
+ * @parse: The callback function responsible for parsing and
* registering the trigger written to the 'trigger' file by the
* user. It allocates the trigger instance and registers it with
* the appropriate trace event. It makes use of the other
@@ -1870,21 +1807,24 @@ struct event_trigger_ops {
*
* @get_trigger_ops: The callback function invoked to retrieve the
* event_trigger_ops implementation associated with the command.
+ * This callback function allows a single event_command to
+ * support multiple trigger implementations via different sets of
+ * event_trigger_ops, depending on the value of the @param
+ * string.
*/
struct event_command {
struct list_head list;
char *name;
enum event_trigger_type trigger_type;
int flags;
- int (*func)(struct event_command *cmd_ops,
- struct trace_event_file *file,
- char *glob, char *cmd, char *params);
+ int (*parse)(struct event_command *cmd_ops,
+ struct trace_event_file *file,
+ char *glob, char *cmd,
+ char *param_and_filter);
int (*reg)(char *glob,
- struct event_trigger_ops *ops,
struct event_trigger_data *data,
struct trace_event_file *file);
void (*unreg)(char *glob,
- struct event_trigger_ops *ops,
struct event_trigger_data *data,
struct trace_event_file *file);
void (*unreg_all)(struct trace_event_file *file);
@@ -1968,15 +1908,14 @@ extern int tracing_set_cpumask(struct trace_array *tr,
#define MAX_EVENT_NAME_LEN 64
-extern int trace_run_command(const char *buf, int (*createfn)(int, char**));
extern ssize_t trace_parse_run_command(struct file *file,
const char __user *buffer, size_t count, loff_t *ppos,
- int (*createfn)(int, char**));
+ int (*createfn)(const char *));
extern unsigned int err_pos(char *cmd, const char *str);
extern void tracing_log_err(struct trace_array *tr,
const char *loc, const char *cmd,
- const char **errs, u8 type, u8 pos);
+ const char **errs, u8 type, u16 pos);
/*
* Normal trace_printk() and friends allocates special buffers
@@ -2057,8 +1996,6 @@ static inline void tracer_hardirqs_on(unsigned long a0, unsigned long a1) { }
static inline void tracer_hardirqs_off(unsigned long a0, unsigned long a1) { }
#endif
-extern struct trace_iterator *tracepoint_print_iter;
-
/*
* Reset the state of the trace_iterator so that it can read consumed data.
* Normally, the trace_iterator is used for reading the data when it is not
@@ -2066,15 +2003,68 @@ extern struct trace_iterator *tracepoint_print_iter;
*/
static __always_inline void trace_iterator_reset(struct trace_iterator *iter)
{
- const size_t offset = offsetof(struct trace_iterator, seq);
+ memset_startat(iter, 0, seq);
+ iter->pos = -1;
+}
- /*
- * Keep gcc from complaining about overwriting more than just one
- * member in the structure.
- */
- memset((char *)iter + offset, 0, sizeof(struct trace_iterator) - offset);
+/* Check the name is good for event/group/fields */
+static inline bool __is_good_name(const char *name, bool hash_ok)
+{
+ if (!isalpha(*name) && *name != '_' && (!hash_ok || *name != '-'))
+ return false;
+ while (*++name != '\0') {
+ if (!isalpha(*name) && !isdigit(*name) && *name != '_' &&
+ (!hash_ok || *name != '-'))
+ return false;
+ }
+ return true;
+}
- iter->pos = -1;
+/* Check the name is good for event/group/fields */
+static inline bool is_good_name(const char *name)
+{
+ return __is_good_name(name, false);
+}
+
+/* Check the name is good for system */
+static inline bool is_good_system_name(const char *name)
+{
+ return __is_good_name(name, true);
}
+/* Convert certain expected symbols into '_' when generating event names */
+static inline void sanitize_event_name(char *name)
+{
+ while (*name++ != '\0')
+ if (*name == ':' || *name == '.')
+ *name = '_';
+}
+
+/*
+ * This is a generic way to read and write a u64 value from a file in tracefs.
+ *
+ * The value is stored on the variable pointed by *val. The value needs
+ * to be at least *min and at most *max. The write is protected by an
+ * existing *lock.
+ */
+struct trace_min_max_param {
+ struct mutex *lock;
+ u64 *val;
+ u64 *min;
+ u64 *max;
+};
+
+#define U64_STR_SIZE 24 /* 20 digits max */
+
+extern const struct file_operations trace_min_max_fops;
+
+#ifdef CONFIG_RV
+extern int rv_init_interface(void);
+#else
+static inline int rv_init_interface(void)
+{
+ return 0;
+}
+#endif
+
#endif /* _LINUX_KERNEL_TRACE_H */
diff --git a/kernel/trace/trace_benchmark.c b/kernel/trace/trace_benchmark.c
index 2e9a4746ea85..54d5fa35c90a 100644
--- a/kernel/trace/trace_benchmark.c
+++ b/kernel/trace/trace_benchmark.c
@@ -31,7 +31,7 @@ static bool ok_to_run;
* it simply writes "START". As the first write is cold cache and
* the rest is hot, we save off that time in bm_first and it is
* reported as "first", which is shown in the second write to the
- * tracepoint. The "first" field is writen within the statics from
+ * tracepoint. The "first" field is written within the statics from
* then on but never changes.
*/
static void trace_do_benchmark(void)
@@ -51,7 +51,7 @@ static void trace_do_benchmark(void)
local_irq_disable();
start = trace_clock_local();
- trace_benchmark_event(bm_str);
+ trace_benchmark_event(bm_str, bm_last);
stop = trace_clock_local();
local_irq_enable();
@@ -112,7 +112,7 @@ static void trace_do_benchmark(void)
int i = 0;
/*
* stddev is the square of standard deviation but
- * we want the actualy number. Use the average
+ * we want the actually number. Use the average
* as our seed to find the std.
*
* The next try is:
@@ -155,7 +155,7 @@ static int benchmark_event_kthread(void *arg)
/*
* We don't go to sleep, but let others run as well.
- * This is bascially a "yield()" to let any task that
+ * This is basically a "yield()" to let any task that
* wants to run, schedule in, but if the CPU is idle,
* we'll keep burning cycles.
*
diff --git a/kernel/trace/trace_benchmark.h b/kernel/trace/trace_benchmark.h
index 79e6fbe5b365..c3e91060dc94 100644
--- a/kernel/trace/trace_benchmark.h
+++ b/kernel/trace/trace_benchmark.h
@@ -14,19 +14,21 @@ extern void trace_benchmark_unreg(void);
TRACE_EVENT_FN(benchmark_event,
- TP_PROTO(const char *str),
+ TP_PROTO(const char *str, u64 delta),
- TP_ARGS(str),
+ TP_ARGS(str, delta),
TP_STRUCT__entry(
__array( char, str, BENCHMARK_EVENT_STRLEN )
+ __field( u64, delta)
),
TP_fast_assign(
memcpy(__entry->str, str, BENCHMARK_EVENT_STRLEN);
+ __entry->delta = delta;
),
- TP_printk("%s", __entry->str),
+ TP_printk("%s delta=%llu", __entry->str, __entry->delta),
trace_benchmark_reg, trace_benchmark_unreg
);
diff --git a/kernel/trace/trace_boot.c b/kernel/trace/trace_boot.c
index fa0fc08c6ef8..dbe29b4c6a7a 100644
--- a/kernel/trace/trace_boot.c
+++ b/kernel/trace/trace_boot.c
@@ -31,7 +31,7 @@ trace_boot_set_instance_options(struct trace_array *tr, struct xbc_node *node)
/* Common ftrace options */
xbc_node_for_each_array_value(node, "options", anode, p) {
- if (strlcpy(buf, p, ARRAY_SIZE(buf)) >= ARRAY_SIZE(buf)) {
+ if (strscpy(buf, p, ARRAY_SIZE(buf)) < 0) {
pr_err("String is too long: %s\n", p);
continue;
}
@@ -40,6 +40,16 @@ trace_boot_set_instance_options(struct trace_array *tr, struct xbc_node *node)
pr_err("Failed to set option: %s\n", buf);
}
+ p = xbc_node_find_value(node, "tracing_on", NULL);
+ if (p && *p != '\0') {
+ if (kstrtoul(p, 10, &v))
+ pr_err("Failed to set tracing on: %s\n", p);
+ if (v)
+ tracer_tracing_on(tr);
+ else
+ tracer_tracing_off(tr);
+ }
+
p = xbc_node_find_value(node, "trace_clock", NULL);
if (p && *p != '\0') {
if (tracing_set_clock(tr, p) < 0)
@@ -77,7 +87,7 @@ trace_boot_enable_events(struct trace_array *tr, struct xbc_node *node)
const char *p;
xbc_node_for_each_array_value(node, "events", anode, p) {
- if (strlcpy(buf, p, ARRAY_SIZE(buf)) >= ARRAY_SIZE(buf)) {
+ if (strscpy(buf, p, ARRAY_SIZE(buf)) < 0) {
pr_err("String is too long: %s\n", p);
continue;
}
@@ -161,6 +171,293 @@ trace_boot_add_synth_event(struct xbc_node *node, const char *event)
}
#endif
+#ifdef CONFIG_HIST_TRIGGERS
+static int __init __printf(3, 4)
+append_printf(char **bufp, char *end, const char *fmt, ...)
+{
+ va_list args;
+ int ret;
+
+ if (*bufp == end)
+ return -ENOSPC;
+
+ va_start(args, fmt);
+ ret = vsnprintf(*bufp, end - *bufp, fmt, args);
+ if (ret < end - *bufp) {
+ *bufp += ret;
+ } else {
+ *bufp = end;
+ ret = -ERANGE;
+ }
+ va_end(args);
+
+ return ret;
+}
+
+static int __init
+append_str_nospace(char **bufp, char *end, const char *str)
+{
+ char *p = *bufp;
+ int len;
+
+ while (p < end - 1 && *str != '\0') {
+ if (!isspace(*str))
+ *(p++) = *str;
+ str++;
+ }
+ *p = '\0';
+ if (p == end - 1) {
+ *bufp = end;
+ return -ENOSPC;
+ }
+ len = p - *bufp;
+ *bufp = p;
+ return (int)len;
+}
+
+static int __init
+trace_boot_hist_add_array(struct xbc_node *hnode, char **bufp,
+ char *end, const char *key)
+{
+ struct xbc_node *anode;
+ const char *p;
+ char sep;
+
+ p = xbc_node_find_value(hnode, key, &anode);
+ if (p) {
+ if (!anode) {
+ pr_err("hist.%s requires value(s).\n", key);
+ return -EINVAL;
+ }
+
+ append_printf(bufp, end, ":%s", key);
+ sep = '=';
+ xbc_array_for_each_value(anode, p) {
+ append_printf(bufp, end, "%c%s", sep, p);
+ if (sep == '=')
+ sep = ',';
+ }
+ } else
+ return -ENOENT;
+
+ return 0;
+}
+
+static int __init
+trace_boot_hist_add_one_handler(struct xbc_node *hnode, char **bufp,
+ char *end, const char *handler,
+ const char *param)
+{
+ struct xbc_node *knode, *anode;
+ const char *p;
+ char sep;
+
+ /* Compose 'handler' parameter */
+ p = xbc_node_find_value(hnode, param, NULL);
+ if (!p) {
+ pr_err("hist.%s requires '%s' option.\n",
+ xbc_node_get_data(hnode), param);
+ return -EINVAL;
+ }
+ append_printf(bufp, end, ":%s(%s)", handler, p);
+
+ /* Compose 'action' parameter */
+ knode = xbc_node_find_subkey(hnode, "trace");
+ if (!knode)
+ knode = xbc_node_find_subkey(hnode, "save");
+
+ if (knode) {
+ anode = xbc_node_get_child(knode);
+ if (!anode || !xbc_node_is_value(anode)) {
+ pr_err("hist.%s.%s requires value(s).\n",
+ xbc_node_get_data(hnode),
+ xbc_node_get_data(knode));
+ return -EINVAL;
+ }
+
+ append_printf(bufp, end, ".%s", xbc_node_get_data(knode));
+ sep = '(';
+ xbc_array_for_each_value(anode, p) {
+ append_printf(bufp, end, "%c%s", sep, p);
+ if (sep == '(')
+ sep = ',';
+ }
+ append_printf(bufp, end, ")");
+ } else if (xbc_node_find_subkey(hnode, "snapshot")) {
+ append_printf(bufp, end, ".snapshot()");
+ } else {
+ pr_err("hist.%s requires an action.\n",
+ xbc_node_get_data(hnode));
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int __init
+trace_boot_hist_add_handlers(struct xbc_node *hnode, char **bufp,
+ char *end, const char *param)
+{
+ struct xbc_node *node;
+ const char *p, *handler;
+ int ret = 0;
+
+ handler = xbc_node_get_data(hnode);
+
+ xbc_node_for_each_subkey(hnode, node) {
+ p = xbc_node_get_data(node);
+ if (!isdigit(p[0]))
+ continue;
+ /* All digit started node should be instances. */
+ ret = trace_boot_hist_add_one_handler(node, bufp, end, handler, param);
+ if (ret < 0)
+ break;
+ }
+
+ if (xbc_node_find_subkey(hnode, param))
+ ret = trace_boot_hist_add_one_handler(hnode, bufp, end, handler, param);
+
+ return ret;
+}
+
+/*
+ * Histogram boottime tracing syntax.
+ *
+ * ftrace.[instance.INSTANCE.]event.GROUP.EVENT.hist[.N] {
+ * keys = <KEY>[,...]
+ * values = <VAL>[,...]
+ * sort = <SORT-KEY>[,...]
+ * size = <ENTRIES>
+ * name = <HISTNAME>
+ * var { <VAR> = <EXPR> ... }
+ * pause|continue|clear
+ * onmax|onchange[.N] { var = <VAR>; <ACTION> [= <PARAM>] }
+ * onmatch[.N] { event = <EVENT>; <ACTION> [= <PARAM>] }
+ * filter = <FILTER>
+ * }
+ *
+ * Where <ACTION> are;
+ *
+ * trace = <EVENT>, <ARG1>[, ...]
+ * save = <ARG1>[, ...]
+ * snapshot
+ */
+static int __init
+trace_boot_compose_hist_cmd(struct xbc_node *hnode, char *buf, size_t size)
+{
+ struct xbc_node *node, *knode;
+ char *end = buf + size;
+ const char *p;
+ int ret = 0;
+
+ append_printf(&buf, end, "hist");
+
+ ret = trace_boot_hist_add_array(hnode, &buf, end, "keys");
+ if (ret < 0) {
+ if (ret == -ENOENT)
+ pr_err("hist requires keys.\n");
+ return -EINVAL;
+ }
+
+ ret = trace_boot_hist_add_array(hnode, &buf, end, "values");
+ if (ret == -EINVAL)
+ return ret;
+ ret = trace_boot_hist_add_array(hnode, &buf, end, "sort");
+ if (ret == -EINVAL)
+ return ret;
+
+ p = xbc_node_find_value(hnode, "size", NULL);
+ if (p)
+ append_printf(&buf, end, ":size=%s", p);
+
+ p = xbc_node_find_value(hnode, "name", NULL);
+ if (p)
+ append_printf(&buf, end, ":name=%s", p);
+
+ node = xbc_node_find_subkey(hnode, "var");
+ if (node) {
+ xbc_node_for_each_key_value(node, knode, p) {
+ /* Expression must not include spaces. */
+ append_printf(&buf, end, ":%s=",
+ xbc_node_get_data(knode));
+ append_str_nospace(&buf, end, p);
+ }
+ }
+
+ /* Histogram control attributes (mutual exclusive) */
+ if (xbc_node_find_value(hnode, "pause", NULL))
+ append_printf(&buf, end, ":pause");
+ else if (xbc_node_find_value(hnode, "continue", NULL))
+ append_printf(&buf, end, ":continue");
+ else if (xbc_node_find_value(hnode, "clear", NULL))
+ append_printf(&buf, end, ":clear");
+
+ /* Histogram handler and actions */
+ node = xbc_node_find_subkey(hnode, "onmax");
+ if (node && trace_boot_hist_add_handlers(node, &buf, end, "var") < 0)
+ return -EINVAL;
+ node = xbc_node_find_subkey(hnode, "onchange");
+ if (node && trace_boot_hist_add_handlers(node, &buf, end, "var") < 0)
+ return -EINVAL;
+ node = xbc_node_find_subkey(hnode, "onmatch");
+ if (node && trace_boot_hist_add_handlers(node, &buf, end, "event") < 0)
+ return -EINVAL;
+
+ p = xbc_node_find_value(hnode, "filter", NULL);
+ if (p)
+ append_printf(&buf, end, " if %s", p);
+
+ if (buf == end) {
+ pr_err("hist exceeds the max command length.\n");
+ return -E2BIG;
+ }
+
+ return 0;
+}
+
+static void __init
+trace_boot_init_histograms(struct trace_event_file *file,
+ struct xbc_node *hnode, char *buf, size_t size)
+{
+ struct xbc_node *node;
+ const char *p;
+ char *tmp;
+
+ xbc_node_for_each_subkey(hnode, node) {
+ p = xbc_node_get_data(node);
+ if (!isdigit(p[0]))
+ continue;
+ /* All digit started node should be instances. */
+ if (trace_boot_compose_hist_cmd(node, buf, size) == 0) {
+ tmp = kstrdup(buf, GFP_KERNEL);
+ if (!tmp)
+ return;
+ if (trigger_process_regex(file, buf) < 0)
+ pr_err("Failed to apply hist trigger: %s\n", tmp);
+ kfree(tmp);
+ }
+ }
+
+ if (xbc_node_find_subkey(hnode, "keys")) {
+ if (trace_boot_compose_hist_cmd(hnode, buf, size) == 0) {
+ tmp = kstrdup(buf, GFP_KERNEL);
+ if (!tmp)
+ return;
+ if (trigger_process_regex(file, buf) < 0)
+ pr_err("Failed to apply hist trigger: %s\n", tmp);
+ kfree(tmp);
+ }
+ }
+}
+#else
+static void __init
+trace_boot_init_histograms(struct trace_event_file *file,
+ struct xbc_node *hnode, char *buf, size_t size)
+{
+ /* do nothing */
+}
+#endif
+
static void __init
trace_boot_init_one_event(struct trace_array *tr, struct xbc_node *gnode,
struct xbc_node *enode)
@@ -189,18 +486,24 @@ trace_boot_init_one_event(struct trace_array *tr, struct xbc_node *gnode,
p = xbc_node_find_value(enode, "filter", NULL);
if (p && *p != '\0') {
- if (strlcpy(buf, p, ARRAY_SIZE(buf)) >= ARRAY_SIZE(buf))
+ if (strscpy(buf, p, ARRAY_SIZE(buf)) < 0)
pr_err("filter string is too long: %s\n", p);
else if (apply_event_filter(file, buf) < 0)
pr_err("Failed to apply filter: %s\n", buf);
}
- xbc_node_for_each_array_value(enode, "actions", anode, p) {
- if (strlcpy(buf, p, ARRAY_SIZE(buf)) >= ARRAY_SIZE(buf))
- pr_err("action string is too long: %s\n", p);
- else if (trigger_process_regex(file, buf) < 0)
- pr_err("Failed to apply an action: %s\n", buf);
- }
+ if (IS_ENABLED(CONFIG_HIST_TRIGGERS)) {
+ xbc_node_for_each_array_value(enode, "actions", anode, p) {
+ if (strscpy(buf, p, ARRAY_SIZE(buf)) < 0)
+ pr_err("action string is too long: %s\n", p);
+ else if (trigger_process_regex(file, buf) < 0)
+ pr_err("Failed to apply an action: %s\n", p);
+ }
+ anode = xbc_node_find_subkey(enode, "hist");
+ if (anode)
+ trace_boot_init_histograms(file, anode, buf, ARRAY_SIZE(buf));
+ } else if (xbc_node_find_value(enode, "actions", NULL))
+ pr_err("Failed to apply event actions because CONFIG_HIST_TRIGGERS is not set.\n");
if (xbc_node_find_value(enode, "enable", NULL)) {
if (trace_event_enable_disable(file, 1, 0) < 0)
@@ -215,14 +518,37 @@ static void __init
trace_boot_init_events(struct trace_array *tr, struct xbc_node *node)
{
struct xbc_node *gnode, *enode;
+ bool enable, enable_all = false;
+ const char *data;
- node = xbc_node_find_child(node, "event");
+ node = xbc_node_find_subkey(node, "event");
if (!node)
return;
/* per-event key starts with "event.GROUP.EVENT" */
- xbc_node_for_each_child(node, gnode)
- xbc_node_for_each_child(gnode, enode)
+ xbc_node_for_each_subkey(node, gnode) {
+ data = xbc_node_get_data(gnode);
+ if (!strcmp(data, "enable")) {
+ enable_all = true;
+ continue;
+ }
+ enable = false;
+ xbc_node_for_each_subkey(gnode, enode) {
+ data = xbc_node_get_data(enode);
+ if (!strcmp(data, "enable")) {
+ enable = true;
+ continue;
+ }
trace_boot_init_one_event(tr, gnode, enode);
+ }
+ /* Event enablement must be done after event settings */
+ if (enable) {
+ data = xbc_node_get_data(gnode);
+ trace_array_set_clr_event(tr, data, NULL, true);
+ }
+ }
+ /* Ditto */
+ if (enable_all)
+ trace_array_set_clr_event(tr, NULL, NULL, true);
}
#else
#define trace_boot_enable_events(tr, node) do {} while (0)
@@ -274,6 +600,12 @@ trace_boot_enable_tracer(struct trace_array *tr, struct xbc_node *node)
if (tracing_set_tracer(tr, p) < 0)
pr_err("Failed to set given tracer: %s\n", p);
}
+
+ /* Since tracer can free snapshot buffer, allocate snapshot here.*/
+ if (xbc_node_find_value(node, "alloc_snapshot", NULL)) {
+ if (tracing_alloc_snapshot_instance(tr) < 0)
+ pr_err("Failed to allocate snapshot buffer\n");
+ }
}
static void __init
@@ -292,16 +624,16 @@ trace_boot_init_instances(struct xbc_node *node)
struct trace_array *tr;
const char *p;
- node = xbc_node_find_child(node, "instance");
+ node = xbc_node_find_subkey(node, "instance");
if (!node)
return;
- xbc_node_for_each_child(node, inode) {
+ xbc_node_for_each_subkey(node, inode) {
p = xbc_node_get_data(inode);
if (!p || *p == '\0')
continue;
- tr = trace_array_get_by_name(p);
+ tr = trace_array_get_by_name(p, NULL);
if (!tr) {
pr_err("Failed to get trace instance %s\n", p);
continue;
@@ -328,7 +660,12 @@ static int __init trace_boot_init(void)
trace_boot_init_one_instance(tr, trace_node);
trace_boot_init_instances(trace_node);
+ disable_tracing_selftest("running boot-time tracing");
+
return 0;
}
-
-fs_initcall(trace_boot_init);
+/*
+ * Start tracing at the end of core-initcall, so that it starts tracing
+ * from the beginning of postcore_initcall.
+ */
+core_initcall_sync(trace_boot_init);
diff --git a/kernel/trace/trace_branch.c b/kernel/trace/trace_branch.c
index eff099123aa2..e47fdb4c92fb 100644
--- a/kernel/trace/trace_branch.c
+++ b/kernel/trace/trace_branch.c
@@ -37,7 +37,7 @@ probe_likely_condition(struct ftrace_likely_data *f, int val, int expect)
struct ring_buffer_event *event;
struct trace_branch *entry;
unsigned long flags;
- int pc;
+ unsigned int trace_ctx;
const char *p;
if (current->trace_recursion & TRACE_BRANCH_BIT)
@@ -59,10 +59,10 @@ probe_likely_condition(struct ftrace_likely_data *f, int val, int expect)
if (atomic_read(&data->disabled))
goto out;
- pc = preempt_count();
+ trace_ctx = tracing_gen_ctx_flags(flags);
buffer = tr->array_buffer.buffer;
event = trace_buffer_lock_reserve(buffer, TRACE_BRANCH,
- sizeof(*entry), flags, pc);
+ sizeof(*entry), trace_ctx);
if (!event)
goto out;
diff --git a/kernel/trace/trace_btf.c b/kernel/trace/trace_btf.c
new file mode 100644
index 000000000000..5bbdbcbbde3c
--- /dev/null
+++ b/kernel/trace/trace_btf.c
@@ -0,0 +1,122 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/btf.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+
+#include "trace_btf.h"
+
+/*
+ * Find a function proto type by name, and return the btf_type with its btf
+ * in *@btf_p. Return NULL if not found.
+ * Note that caller has to call btf_put(*@btf_p) after using the btf_type.
+ */
+const struct btf_type *btf_find_func_proto(const char *func_name, struct btf **btf_p)
+{
+ const struct btf_type *t;
+ s32 id;
+
+ id = bpf_find_btf_id(func_name, BTF_KIND_FUNC, btf_p);
+ if (id < 0)
+ return NULL;
+
+ /* Get BTF_KIND_FUNC type */
+ t = btf_type_by_id(*btf_p, id);
+ if (!t || !btf_type_is_func(t))
+ goto err;
+
+ /* The type of BTF_KIND_FUNC is BTF_KIND_FUNC_PROTO */
+ t = btf_type_by_id(*btf_p, t->type);
+ if (!t || !btf_type_is_func_proto(t))
+ goto err;
+
+ return t;
+err:
+ btf_put(*btf_p);
+ return NULL;
+}
+
+/*
+ * Get function parameter with the number of parameters.
+ * This can return NULL if the function has no parameters.
+ * It can return -EINVAL if the @func_proto is not a function proto type.
+ */
+const struct btf_param *btf_get_func_param(const struct btf_type *func_proto, s32 *nr)
+{
+ if (!btf_type_is_func_proto(func_proto))
+ return ERR_PTR(-EINVAL);
+
+ *nr = btf_type_vlen(func_proto);
+ if (*nr > 0)
+ return (const struct btf_param *)(func_proto + 1);
+ else
+ return NULL;
+}
+
+#define BTF_ANON_STACK_MAX 16
+
+struct btf_anon_stack {
+ u32 tid;
+ u32 offset;
+};
+
+/*
+ * Find a member of data structure/union by name and return it.
+ * Return NULL if not found, or -EINVAL if parameter is invalid.
+ * If the member is an member of anonymous union/structure, the offset
+ * of that anonymous union/structure is stored into @anon_offset. Caller
+ * can calculate the correct offset from the root data structure by
+ * adding anon_offset to the member's offset.
+ */
+const struct btf_member *btf_find_struct_member(struct btf *btf,
+ const struct btf_type *type,
+ const char *member_name,
+ u32 *anon_offset)
+{
+ struct btf_anon_stack *anon_stack;
+ const struct btf_member *member;
+ u32 tid, cur_offset = 0;
+ const char *name;
+ int i, top = 0;
+
+ anon_stack = kcalloc(BTF_ANON_STACK_MAX, sizeof(*anon_stack), GFP_KERNEL);
+ if (!anon_stack)
+ return ERR_PTR(-ENOMEM);
+
+retry:
+ if (!btf_type_is_struct(type)) {
+ member = ERR_PTR(-EINVAL);
+ goto out;
+ }
+
+ for_each_member(i, type, member) {
+ if (!member->name_off) {
+ /* Anonymous union/struct: push it for later use */
+ if (btf_type_skip_modifiers(btf, member->type, &tid) &&
+ top < BTF_ANON_STACK_MAX) {
+ anon_stack[top].tid = tid;
+ anon_stack[top++].offset =
+ cur_offset + member->offset;
+ }
+ } else {
+ name = btf_name_by_offset(btf, member->name_off);
+ if (name && !strcmp(member_name, name)) {
+ if (anon_offset)
+ *anon_offset = cur_offset;
+ goto out;
+ }
+ }
+ }
+ if (top > 0) {
+ /* Pop from the anonymous stack and retry */
+ tid = anon_stack[--top].tid;
+ cur_offset = anon_stack[top].offset;
+ type = btf_type_by_id(btf, tid);
+ goto retry;
+ }
+ member = NULL;
+
+out:
+ kfree(anon_stack);
+ return member;
+}
+
diff --git a/kernel/trace/trace_btf.h b/kernel/trace/trace_btf.h
new file mode 100644
index 000000000000..4bc44bc261e6
--- /dev/null
+++ b/kernel/trace/trace_btf.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#include <linux/btf.h>
+
+const struct btf_type *btf_find_func_proto(const char *func_name,
+ struct btf **btf_p);
+const struct btf_param *btf_get_func_param(const struct btf_type *func_proto,
+ s32 *nr);
+const struct btf_member *btf_find_struct_member(struct btf *btf,
+ const struct btf_type *type,
+ const char *member_name,
+ u32 *anon_offset);
diff --git a/kernel/trace/trace_clock.c b/kernel/trace/trace_clock.c
index aaf6793ededa..4702efb00ff2 100644
--- a/kernel/trace/trace_clock.c
+++ b/kernel/trace/trace_clock.c
@@ -95,33 +95,49 @@ u64 notrace trace_clock_global(void)
{
unsigned long flags;
int this_cpu;
- u64 now;
+ u64 now, prev_time;
raw_local_irq_save(flags);
this_cpu = raw_smp_processor_id();
- now = sched_clock_cpu(this_cpu);
+
/*
- * If in an NMI context then dont risk lockups and return the
- * cpu_clock() time:
+ * The global clock "guarantees" that the events are ordered
+ * between CPUs. But if two events on two different CPUS call
+ * trace_clock_global at roughly the same time, it really does
+ * not matter which one gets the earlier time. Just make sure
+ * that the same CPU will always show a monotonic clock.
+ *
+ * Use a read memory barrier to get the latest written
+ * time that was recorded.
*/
- if (unlikely(in_nmi()))
- goto out;
+ smp_rmb();
+ prev_time = READ_ONCE(trace_clock_struct.prev_time);
+ now = sched_clock_cpu(this_cpu);
- arch_spin_lock(&trace_clock_struct.lock);
+ /* Make sure that now is always greater than or equal to prev_time */
+ if ((s64)(now - prev_time) < 0)
+ now = prev_time;
/*
- * TODO: if this happens often then maybe we should reset
- * my_scd->clock to prev_time+1, to make sure
- * we start ticking with the local clock from now on?
+ * If in an NMI context then dont risk lockups and simply return
+ * the current time.
*/
- if ((s64)(now - trace_clock_struct.prev_time) < 0)
- now = trace_clock_struct.prev_time + 1;
+ if (unlikely(in_nmi()))
+ goto out;
- trace_clock_struct.prev_time = now;
+ /* Tracing can cause strange recursion, always use a try lock */
+ if (arch_spin_trylock(&trace_clock_struct.lock)) {
+ /* Reread prev_time in case it was already updated */
+ prev_time = READ_ONCE(trace_clock_struct.prev_time);
+ if ((s64)(now - prev_time) < 0)
+ now = prev_time;
- arch_spin_unlock(&trace_clock_struct.lock);
+ trace_clock_struct.prev_time = now;
+ /* The unlock acts as the wmb for the above rmb */
+ arch_spin_unlock(&trace_clock_struct.lock);
+ }
out:
raw_local_irq_restore(flags);
diff --git a/kernel/trace/trace_dynevent.c b/kernel/trace/trace_dynevent.c
index 9f2e8520b748..4376887e0d8a 100644
--- a/kernel/trace/trace_dynevent.c
+++ b/kernel/trace/trace_dynevent.c
@@ -13,11 +13,49 @@
#include <linux/tracefs.h>
#include "trace.h"
+#include "trace_output.h" /* for trace_event_sem */
#include "trace_dynevent.h"
static DEFINE_MUTEX(dyn_event_ops_mutex);
static LIST_HEAD(dyn_event_ops_list);
+bool trace_event_dyn_try_get_ref(struct trace_event_call *dyn_call)
+{
+ struct trace_event_call *call;
+ bool ret = false;
+
+ if (WARN_ON_ONCE(!(dyn_call->flags & TRACE_EVENT_FL_DYNAMIC)))
+ return false;
+
+ down_read(&trace_event_sem);
+ list_for_each_entry(call, &ftrace_events, list) {
+ if (call == dyn_call) {
+ atomic_inc(&dyn_call->refcnt);
+ ret = true;
+ }
+ }
+ up_read(&trace_event_sem);
+ return ret;
+}
+
+void trace_event_dyn_put_ref(struct trace_event_call *call)
+{
+ if (WARN_ON_ONCE(!(call->flags & TRACE_EVENT_FL_DYNAMIC)))
+ return;
+
+ if (WARN_ON_ONCE(atomic_read(&call->refcnt) <= 0)) {
+ atomic_set(&call->refcnt, 0);
+ return;
+ }
+
+ atomic_dec(&call->refcnt);
+}
+
+bool trace_event_dyn_busy(struct trace_event_call *call)
+{
+ return atomic_read(&call->refcnt) != 0;
+}
+
int dyn_event_register(struct dyn_event_operations *ops)
{
if (!ops || !ops->create || !ops->show || !ops->is_busy ||
@@ -31,23 +69,31 @@ int dyn_event_register(struct dyn_event_operations *ops)
return 0;
}
-int dyn_event_release(int argc, char **argv, struct dyn_event_operations *type)
+int dyn_event_release(const char *raw_command, struct dyn_event_operations *type)
{
struct dyn_event *pos, *n;
char *system = NULL, *event, *p;
- int ret = -ENOENT;
+ int argc, ret = -ENOENT;
+ char **argv;
+
+ argv = argv_split(GFP_KERNEL, raw_command, &argc);
+ if (!argv)
+ return -ENOMEM;
if (argv[0][0] == '-') {
- if (argv[0][1] != ':')
- return -EINVAL;
+ if (argv[0][1] != ':') {
+ ret = -EINVAL;
+ goto out;
+ }
event = &argv[0][2];
} else {
event = strchr(argv[0], ':');
- if (!event)
- return -EINVAL;
+ if (!event) {
+ ret = -EINVAL;
+ goto out;
+ }
event++;
}
- argc--; argv++;
p = strchr(event, '/');
if (p) {
@@ -55,37 +101,41 @@ int dyn_event_release(int argc, char **argv, struct dyn_event_operations *type)
event = p + 1;
*p = '\0';
}
- if (event[0] == '\0')
- return -EINVAL;
+ if (!system && event[0] == '\0') {
+ ret = -EINVAL;
+ goto out;
+ }
mutex_lock(&event_mutex);
for_each_dyn_event_safe(pos, n) {
if (type && type != pos->ops)
continue;
if (!pos->ops->match(system, event,
- argc, (const char **)argv, pos))
+ argc - 1, (const char **)argv + 1, pos))
continue;
ret = pos->ops->free(pos);
if (ret)
break;
}
+ tracing_reset_all_online_cpus();
mutex_unlock(&event_mutex);
-
+out:
+ argv_free(argv);
return ret;
}
-static int create_dyn_event(int argc, char **argv)
+static int create_dyn_event(const char *raw_command)
{
struct dyn_event_operations *ops;
int ret = -ENODEV;
- if (argv[0][0] == '-' || argv[0][0] == '!')
- return dyn_event_release(argc, argv, NULL);
+ if (raw_command[0] == '-' || raw_command[0] == '!')
+ return dyn_event_release(raw_command, NULL);
mutex_lock(&dyn_event_ops_mutex);
list_for_each_entry(ops, &dyn_event_ops_list, list) {
- ret = ops->create(argc, (const char **)argv);
+ ret = ops->create(raw_command);
if (!ret || ret != -ECANCELED)
break;
}
@@ -165,6 +215,7 @@ int dyn_events_release_all(struct dyn_event_operations *type)
break;
}
out:
+ tracing_reset_all_online_cpus();
mutex_unlock(&event_mutex);
return ret;
@@ -206,19 +257,14 @@ static const struct file_operations dynamic_events_ops = {
/* Make a tracefs interface for controlling dynamic events */
static __init int init_dynamic_event(void)
{
- struct dentry *d_tracer;
- struct dentry *entry;
+ int ret;
- d_tracer = tracing_init_dentry();
- if (IS_ERR(d_tracer))
+ ret = tracing_init_dentry();
+ if (ret)
return 0;
- entry = tracefs_create_file("dynamic_events", 0644, d_tracer,
- NULL, &dynamic_events_ops);
-
- /* Event list interface */
- if (!entry)
- pr_warn("Could not create tracefs 'dynamic_events' entry\n");
+ trace_create_file("dynamic_events", TRACE_MODE_WRITE, NULL,
+ NULL, &dynamic_events_ops);
return 0;
}
@@ -276,7 +322,7 @@ int dynevent_arg_add(struct dynevent_cmd *cmd,
* arguments of the form 'type variable_name;' or 'x+y'.
*
* The lhs argument string will be appended to the current cmd string,
- * followed by an operator, if applicable, followd by the rhs string,
+ * followed by an operator, if applicable, followed by the rhs string,
* followed finally by a separator, if applicable. Before the
* argument is added, the @check_arg function, if present, will be
* used to check the sanity of the current arg strings.
@@ -402,7 +448,7 @@ void dynevent_arg_init(struct dynevent_arg *arg,
* whitespace, all followed by a separator, if applicable. After the
* first arg string is successfully appended to the command string,
* the optional @operator is appended, followed by the second arg and
- * and optional @separator. If no separator was specified when
+ * optional @separator. If no separator was specified when
* initializing the arg, a space will be appended.
*/
void dynevent_arg_pair_init(struct dynevent_arg_pair *arg_pair,
diff --git a/kernel/trace/trace_dynevent.h b/kernel/trace/trace_dynevent.h
index d6857a254ede..936477a111d3 100644
--- a/kernel/trace/trace_dynevent.h
+++ b/kernel/trace/trace_dynevent.h
@@ -29,17 +29,17 @@ struct dyn_event;
* @show: Showing method. This is invoked when user reads the event definitions
* via dynamic_events interface.
* @is_busy: Check whether given event is busy so that it can not be deleted.
- * Return true if it is busy, otherwides false.
- * @free: Delete the given event. Return 0 if success, otherwides error.
+ * Return true if it is busy, otherwise false.
+ * @free: Delete the given event. Return 0 if success, otherwise error.
* @match: Check whether given event and system name match this event. The argc
- * and argv is used for exact match. Return true if it matches, otherwides
+ * and argv is used for exact match. Return true if it matches, otherwise
* false.
*
* Except for @create, these methods are called under holding event_mutex.
*/
struct dyn_event_operations {
struct list_head list;
- int (*create)(int argc, const char *argv[]);
+ int (*create)(const char *raw_command);
int (*show)(struct seq_file *m, struct dyn_event *ev);
bool (*is_busy)(struct dyn_event *ev);
int (*free)(struct dyn_event *ev);
@@ -76,13 +76,15 @@ int dyn_event_init(struct dyn_event *ev, struct dyn_event_operations *ops)
return 0;
}
-static inline int dyn_event_add(struct dyn_event *ev)
+static inline int dyn_event_add(struct dyn_event *ev,
+ struct trace_event_call *call)
{
lockdep_assert_held(&event_mutex);
if (!ev || !ev->ops)
return -EINVAL;
+ call->flags |= TRACE_EVENT_FL_DYNAMIC;
list_add_tail(&ev->list, &dyn_event_list);
return 0;
}
@@ -97,7 +99,7 @@ void *dyn_event_seq_start(struct seq_file *m, loff_t *pos);
void *dyn_event_seq_next(struct seq_file *m, void *v, loff_t *pos);
void dyn_event_seq_stop(struct seq_file *m, void *v);
int dyn_events_release_all(struct dyn_event_operations *type);
-int dyn_event_release(int argc, char **argv, struct dyn_event_operations *type);
+int dyn_event_release(const char *raw_command, struct dyn_event_operations *type);
/*
* for_each_dyn_event - iterate over the dyn_event list
diff --git a/kernel/trace/trace_entries.h b/kernel/trace/trace_entries.h
index 18c4a58aff79..c47422b20908 100644
--- a/kernel/trace/trace_entries.h
+++ b/kernel/trace/trace_entries.h
@@ -32,7 +32,7 @@
* to be deciphered for the format file. Although these macros
* may become out of sync with the internal structure, they
* will create a compile error if it happens. Since the
- * internel structures are just tracing helpers, this is not
+ * internal structures are just tracing helpers, this is not
* an issue.
*
* When an internal structure is used, it should use:
@@ -86,6 +86,8 @@ FTRACE_ENTRY_PACKED(funcgraph_entry, ftrace_graph_ent_entry,
);
/* Function return entry */
+#ifdef CONFIG_FUNCTION_GRAPH_RETVAL
+
FTRACE_ENTRY_PACKED(funcgraph_exit, ftrace_graph_ret_entry,
TRACE_GRAPH_RET,
@@ -93,10 +95,32 @@ FTRACE_ENTRY_PACKED(funcgraph_exit, ftrace_graph_ret_entry,
F_STRUCT(
__field_struct( struct ftrace_graph_ret, ret )
__field_packed( unsigned long, ret, func )
- __field_packed( unsigned long, ret, overrun )
+ __field_packed( unsigned long, ret, retval )
+ __field_packed( int, ret, depth )
+ __field_packed( unsigned int, ret, overrun )
__field_packed( unsigned long long, ret, calltime)
__field_packed( unsigned long long, ret, rettime )
+ ),
+
+ F_printk("<-- %ps (%d) (start: %llx end: %llx) over: %d retval: %lx",
+ (void *)__entry->func, __entry->depth,
+ __entry->calltime, __entry->rettime,
+ __entry->depth, __entry->retval)
+);
+
+#else
+
+FTRACE_ENTRY_PACKED(funcgraph_exit, ftrace_graph_ret_entry,
+
+ TRACE_GRAPH_RET,
+
+ F_STRUCT(
+ __field_struct( struct ftrace_graph_ret, ret )
+ __field_packed( unsigned long, ret, func )
__field_packed( int, ret, depth )
+ __field_packed( unsigned int, ret, overrun )
+ __field_packed( unsigned long long, ret, calltime)
+ __field_packed( unsigned long long, ret, rettime )
),
F_printk("<-- %ps (%d) (start: %llx end: %llx) over: %d",
@@ -105,6 +129,8 @@ FTRACE_ENTRY_PACKED(funcgraph_exit, ftrace_graph_ret_entry,
__entry->depth)
);
+#endif
+
/*
* Context switch trace entry - which task (and prio) we switched from/to:
*
@@ -164,7 +190,7 @@ FTRACE_ENTRY(kernel_stack, stack_entry,
F_STRUCT(
__field( int, size )
- __array( unsigned long, caller, FTRACE_STACK_ENTRIES )
+ __stack_array( unsigned long, caller, FTRACE_STACK_ENTRIES, size)
),
F_printk("\t=> %ps\n\t=> %ps\n\t=> %ps\n"
@@ -338,3 +364,66 @@ FTRACE_ENTRY(hwlat, hwlat_entry,
__entry->nmi_total_ts,
__entry->nmi_count)
);
+
+#define FUNC_REPEATS_GET_DELTA_TS(entry) \
+ (((u64)(entry)->top_delta_ts << 32) | (entry)->bottom_delta_ts) \
+
+FTRACE_ENTRY(func_repeats, func_repeats_entry,
+
+ TRACE_FUNC_REPEATS,
+
+ F_STRUCT(
+ __field( unsigned long, ip )
+ __field( unsigned long, parent_ip )
+ __field( u16 , count )
+ __field( u16 , top_delta_ts )
+ __field( u32 , bottom_delta_ts )
+ ),
+
+ F_printk(" %ps <-%ps\t(repeats:%u delta: -%llu)",
+ (void *)__entry->ip,
+ (void *)__entry->parent_ip,
+ __entry->count,
+ FUNC_REPEATS_GET_DELTA_TS(__entry))
+);
+
+FTRACE_ENTRY(osnoise, osnoise_entry,
+
+ TRACE_OSNOISE,
+
+ F_STRUCT(
+ __field( u64, noise )
+ __field( u64, runtime )
+ __field( u64, max_sample )
+ __field( unsigned int, hw_count )
+ __field( unsigned int, nmi_count )
+ __field( unsigned int, irq_count )
+ __field( unsigned int, softirq_count )
+ __field( unsigned int, thread_count )
+ ),
+
+ F_printk("noise:%llu\tmax_sample:%llu\thw:%u\tnmi:%u\tirq:%u\tsoftirq:%u\tthread:%u\n",
+ __entry->noise,
+ __entry->max_sample,
+ __entry->hw_count,
+ __entry->nmi_count,
+ __entry->irq_count,
+ __entry->softirq_count,
+ __entry->thread_count)
+);
+
+FTRACE_ENTRY(timerlat, timerlat_entry,
+
+ TRACE_TIMERLAT,
+
+ F_STRUCT(
+ __field( unsigned int, seqnum )
+ __field( int, context )
+ __field( u64, timer_latency )
+ ),
+
+ F_printk("seq:%u\tcontext:%d\ttimer_latency:%llu\n",
+ __entry->seqnum,
+ __entry->context,
+ __entry->timer_latency)
+);
diff --git a/kernel/trace/trace_eprobe.c b/kernel/trace/trace_eprobe.c
new file mode 100644
index 000000000000..03c851f57969
--- /dev/null
+++ b/kernel/trace/trace_eprobe.c
@@ -0,0 +1,984 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * event probes
+ *
+ * Part of this code was copied from kernel/trace/trace_kprobe.c written by
+ * Masami Hiramatsu <mhiramat@kernel.org>
+ *
+ * Copyright (C) 2021, VMware Inc, Steven Rostedt <rostedt@goodmis.org>
+ * Copyright (C) 2021, VMware Inc, Tzvetomir Stoyanov tz.stoyanov@gmail.com>
+ *
+ */
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/ftrace.h>
+
+#include "trace_dynevent.h"
+#include "trace_probe.h"
+#include "trace_probe_tmpl.h"
+#include "trace_probe_kernel.h"
+
+#define EPROBE_EVENT_SYSTEM "eprobes"
+
+struct trace_eprobe {
+ /* tracepoint system */
+ const char *event_system;
+
+ /* tracepoint event */
+ const char *event_name;
+
+ /* filter string for the tracepoint */
+ char *filter_str;
+
+ struct trace_event_call *event;
+
+ struct dyn_event devent;
+ struct trace_probe tp;
+};
+
+struct eprobe_data {
+ struct trace_event_file *file;
+ struct trace_eprobe *ep;
+};
+
+
+#define for_each_trace_eprobe_tp(ep, _tp) \
+ list_for_each_entry(ep, trace_probe_probe_list(_tp), tp.list)
+
+static int __trace_eprobe_create(int argc, const char *argv[]);
+
+static void trace_event_probe_cleanup(struct trace_eprobe *ep)
+{
+ if (!ep)
+ return;
+ trace_probe_cleanup(&ep->tp);
+ kfree(ep->event_name);
+ kfree(ep->event_system);
+ if (ep->event)
+ trace_event_put_ref(ep->event);
+ kfree(ep->filter_str);
+ kfree(ep);
+}
+
+static struct trace_eprobe *to_trace_eprobe(struct dyn_event *ev)
+{
+ return container_of(ev, struct trace_eprobe, devent);
+}
+
+static int eprobe_dyn_event_create(const char *raw_command)
+{
+ return trace_probe_create(raw_command, __trace_eprobe_create);
+}
+
+static int eprobe_dyn_event_show(struct seq_file *m, struct dyn_event *ev)
+{
+ struct trace_eprobe *ep = to_trace_eprobe(ev);
+ int i;
+
+ seq_printf(m, "e:%s/%s", trace_probe_group_name(&ep->tp),
+ trace_probe_name(&ep->tp));
+ seq_printf(m, " %s.%s", ep->event_system, ep->event_name);
+
+ for (i = 0; i < ep->tp.nr_args; i++)
+ seq_printf(m, " %s=%s", ep->tp.args[i].name, ep->tp.args[i].comm);
+ seq_putc(m, '\n');
+
+ return 0;
+}
+
+static int unregister_trace_eprobe(struct trace_eprobe *ep)
+{
+ /* If other probes are on the event, just unregister eprobe */
+ if (trace_probe_has_sibling(&ep->tp))
+ goto unreg;
+
+ /* Enabled event can not be unregistered */
+ if (trace_probe_is_enabled(&ep->tp))
+ return -EBUSY;
+
+ /* Will fail if probe is being used by ftrace or perf */
+ if (trace_probe_unregister_event_call(&ep->tp))
+ return -EBUSY;
+
+unreg:
+ dyn_event_remove(&ep->devent);
+ trace_probe_unlink(&ep->tp);
+
+ return 0;
+}
+
+static int eprobe_dyn_event_release(struct dyn_event *ev)
+{
+ struct trace_eprobe *ep = to_trace_eprobe(ev);
+ int ret = unregister_trace_eprobe(ep);
+
+ if (!ret)
+ trace_event_probe_cleanup(ep);
+ return ret;
+}
+
+static bool eprobe_dyn_event_is_busy(struct dyn_event *ev)
+{
+ struct trace_eprobe *ep = to_trace_eprobe(ev);
+
+ return trace_probe_is_enabled(&ep->tp);
+}
+
+static bool eprobe_dyn_event_match(const char *system, const char *event,
+ int argc, const char **argv, struct dyn_event *ev)
+{
+ struct trace_eprobe *ep = to_trace_eprobe(ev);
+ const char *slash;
+
+ /*
+ * We match the following:
+ * event only - match all eprobes with event name
+ * system and event only - match all system/event probes
+ * system only - match all system probes
+ *
+ * The below has the above satisfied with more arguments:
+ *
+ * attached system/event - If the arg has the system and event
+ * the probe is attached to, match
+ * probes with the attachment.
+ *
+ * If any more args are given, then it requires a full match.
+ */
+
+ /*
+ * If system exists, but this probe is not part of that system
+ * do not match.
+ */
+ if (system && strcmp(trace_probe_group_name(&ep->tp), system) != 0)
+ return false;
+
+ /* Must match the event name */
+ if (event[0] != '\0' && strcmp(trace_probe_name(&ep->tp), event) != 0)
+ return false;
+
+ /* No arguments match all */
+ if (argc < 1)
+ return true;
+
+ /* First argument is the system/event the probe is attached to */
+
+ slash = strchr(argv[0], '/');
+ if (!slash)
+ slash = strchr(argv[0], '.');
+ if (!slash)
+ return false;
+
+ if (strncmp(ep->event_system, argv[0], slash - argv[0]))
+ return false;
+ if (strcmp(ep->event_name, slash + 1))
+ return false;
+
+ argc--;
+ argv++;
+
+ /* If there are no other args, then match */
+ if (argc < 1)
+ return true;
+
+ return trace_probe_match_command_args(&ep->tp, argc, argv);
+}
+
+static struct dyn_event_operations eprobe_dyn_event_ops = {
+ .create = eprobe_dyn_event_create,
+ .show = eprobe_dyn_event_show,
+ .is_busy = eprobe_dyn_event_is_busy,
+ .free = eprobe_dyn_event_release,
+ .match = eprobe_dyn_event_match,
+};
+
+static struct trace_eprobe *alloc_event_probe(const char *group,
+ const char *this_event,
+ struct trace_event_call *event,
+ int nargs)
+{
+ struct trace_eprobe *ep;
+ const char *event_name;
+ const char *sys_name;
+ int ret = -ENOMEM;
+
+ if (!event)
+ return ERR_PTR(-ENODEV);
+
+ sys_name = event->class->system;
+ event_name = trace_event_name(event);
+
+ ep = kzalloc(struct_size(ep, tp.args, nargs), GFP_KERNEL);
+ if (!ep) {
+ trace_event_put_ref(event);
+ goto error;
+ }
+ ep->event = event;
+ ep->event_name = kstrdup(event_name, GFP_KERNEL);
+ if (!ep->event_name)
+ goto error;
+ ep->event_system = kstrdup(sys_name, GFP_KERNEL);
+ if (!ep->event_system)
+ goto error;
+
+ ret = trace_probe_init(&ep->tp, this_event, group, false);
+ if (ret < 0)
+ goto error;
+
+ dyn_event_init(&ep->devent, &eprobe_dyn_event_ops);
+ return ep;
+error:
+ trace_event_probe_cleanup(ep);
+ return ERR_PTR(ret);
+}
+
+static int eprobe_event_define_fields(struct trace_event_call *event_call)
+{
+ struct eprobe_trace_entry_head field;
+ struct trace_probe *tp;
+
+ tp = trace_probe_primary_from_call(event_call);
+ if (WARN_ON_ONCE(!tp))
+ return -ENOENT;
+
+ return traceprobe_define_arg_fields(event_call, sizeof(field), tp);
+}
+
+static struct trace_event_fields eprobe_fields_array[] = {
+ { .type = TRACE_FUNCTION_TYPE,
+ .define_fields = eprobe_event_define_fields },
+ {}
+};
+
+/* Event entry printers */
+static enum print_line_t
+print_eprobe_event(struct trace_iterator *iter, int flags,
+ struct trace_event *event)
+{
+ struct eprobe_trace_entry_head *field;
+ struct trace_event_call *pevent;
+ struct trace_event *probed_event;
+ struct trace_seq *s = &iter->seq;
+ struct trace_eprobe *ep;
+ struct trace_probe *tp;
+ unsigned int type;
+
+ field = (struct eprobe_trace_entry_head *)iter->ent;
+ tp = trace_probe_primary_from_call(
+ container_of(event, struct trace_event_call, event));
+ if (WARN_ON_ONCE(!tp))
+ goto out;
+
+ ep = container_of(tp, struct trace_eprobe, tp);
+ type = ep->event->event.type;
+
+ trace_seq_printf(s, "%s: (", trace_probe_name(tp));
+
+ probed_event = ftrace_find_event(type);
+ if (probed_event) {
+ pevent = container_of(probed_event, struct trace_event_call, event);
+ trace_seq_printf(s, "%s.%s", pevent->class->system,
+ trace_event_name(pevent));
+ } else {
+ trace_seq_printf(s, "%u", type);
+ }
+
+ trace_seq_putc(s, ')');
+
+ if (trace_probe_print_args(s, tp->args, tp->nr_args,
+ (u8 *)&field[1], field) < 0)
+ goto out;
+
+ trace_seq_putc(s, '\n');
+ out:
+ return trace_handle_return(s);
+}
+
+static nokprobe_inline unsigned long
+get_event_field(struct fetch_insn *code, void *rec)
+{
+ struct ftrace_event_field *field = code->data;
+ unsigned long val;
+ void *addr;
+
+ addr = rec + field->offset;
+
+ if (is_string_field(field)) {
+ switch (field->filter_type) {
+ case FILTER_DYN_STRING:
+ val = (unsigned long)(rec + (*(unsigned int *)addr & 0xffff));
+ break;
+ case FILTER_RDYN_STRING:
+ val = (unsigned long)(addr + (*(unsigned int *)addr & 0xffff));
+ break;
+ case FILTER_STATIC_STRING:
+ val = (unsigned long)addr;
+ break;
+ case FILTER_PTR_STRING:
+ val = (unsigned long)(*(char *)addr);
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ return 0;
+ }
+ return val;
+ }
+
+ switch (field->size) {
+ case 1:
+ if (field->is_signed)
+ val = *(char *)addr;
+ else
+ val = *(unsigned char *)addr;
+ break;
+ case 2:
+ if (field->is_signed)
+ val = *(short *)addr;
+ else
+ val = *(unsigned short *)addr;
+ break;
+ case 4:
+ if (field->is_signed)
+ val = *(int *)addr;
+ else
+ val = *(unsigned int *)addr;
+ break;
+ default:
+ if (field->is_signed)
+ val = *(long *)addr;
+ else
+ val = *(unsigned long *)addr;
+ break;
+ }
+ return val;
+}
+
+static int get_eprobe_size(struct trace_probe *tp, void *rec)
+{
+ struct fetch_insn *code;
+ struct probe_arg *arg;
+ int i, len, ret = 0;
+
+ for (i = 0; i < tp->nr_args; i++) {
+ arg = tp->args + i;
+ if (arg->dynamic) {
+ unsigned long val;
+
+ code = arg->code;
+ retry:
+ switch (code->op) {
+ case FETCH_OP_TP_ARG:
+ val = get_event_field(code, rec);
+ break;
+ case FETCH_NOP_SYMBOL: /* Ignore a place holder */
+ code++;
+ goto retry;
+ default:
+ if (process_common_fetch_insn(code, &val) < 0)
+ continue;
+ }
+ code++;
+ len = process_fetch_insn_bottom(code, val, NULL, NULL);
+ if (len > 0)
+ ret += len;
+ }
+ }
+
+ return ret;
+}
+
+/* Kprobe specific fetch functions */
+
+/* Note that we don't verify it, since the code does not come from user space */
+static int
+process_fetch_insn(struct fetch_insn *code, void *rec, void *dest,
+ void *base)
+{
+ unsigned long val;
+ int ret;
+
+ retry:
+ switch (code->op) {
+ case FETCH_OP_TP_ARG:
+ val = get_event_field(code, rec);
+ break;
+ case FETCH_NOP_SYMBOL: /* Ignore a place holder */
+ code++;
+ goto retry;
+ default:
+ ret = process_common_fetch_insn(code, &val);
+ if (ret < 0)
+ return ret;
+ }
+ code++;
+ return process_fetch_insn_bottom(code, val, dest, base);
+}
+NOKPROBE_SYMBOL(process_fetch_insn)
+
+/* eprobe handler */
+static inline void
+__eprobe_trace_func(struct eprobe_data *edata, void *rec)
+{
+ struct eprobe_trace_entry_head *entry;
+ struct trace_event_call *call = trace_probe_event_call(&edata->ep->tp);
+ struct trace_event_buffer fbuffer;
+ int dsize;
+
+ if (WARN_ON_ONCE(call != edata->file->event_call))
+ return;
+
+ if (trace_trigger_soft_disabled(edata->file))
+ return;
+
+ dsize = get_eprobe_size(&edata->ep->tp, rec);
+
+ entry = trace_event_buffer_reserve(&fbuffer, edata->file,
+ sizeof(*entry) + edata->ep->tp.size + dsize);
+
+ if (!entry)
+ return;
+
+ entry = fbuffer.entry = ring_buffer_event_data(fbuffer.event);
+ store_trace_args(&entry[1], &edata->ep->tp, rec, sizeof(*entry), dsize);
+
+ trace_event_buffer_commit(&fbuffer);
+}
+
+/*
+ * The event probe implementation uses event triggers to get access to
+ * the event it is attached to, but is not an actual trigger. The below
+ * functions are just stubs to fulfill what is needed to use the trigger
+ * infrastructure.
+ */
+static int eprobe_trigger_init(struct event_trigger_data *data)
+{
+ return 0;
+}
+
+static void eprobe_trigger_free(struct event_trigger_data *data)
+{
+
+}
+
+static int eprobe_trigger_print(struct seq_file *m,
+ struct event_trigger_data *data)
+{
+ /* Do not print eprobe event triggers */
+ return 0;
+}
+
+static void eprobe_trigger_func(struct event_trigger_data *data,
+ struct trace_buffer *buffer, void *rec,
+ struct ring_buffer_event *rbe)
+{
+ struct eprobe_data *edata = data->private_data;
+
+ if (unlikely(!rec))
+ return;
+
+ __eprobe_trace_func(edata, rec);
+}
+
+static struct event_trigger_ops eprobe_trigger_ops = {
+ .trigger = eprobe_trigger_func,
+ .print = eprobe_trigger_print,
+ .init = eprobe_trigger_init,
+ .free = eprobe_trigger_free,
+};
+
+static int eprobe_trigger_cmd_parse(struct event_command *cmd_ops,
+ struct trace_event_file *file,
+ char *glob, char *cmd,
+ char *param_and_filter)
+{
+ return -1;
+}
+
+static int eprobe_trigger_reg_func(char *glob,
+ struct event_trigger_data *data,
+ struct trace_event_file *file)
+{
+ return -1;
+}
+
+static void eprobe_trigger_unreg_func(char *glob,
+ struct event_trigger_data *data,
+ struct trace_event_file *file)
+{
+
+}
+
+static struct event_trigger_ops *eprobe_trigger_get_ops(char *cmd,
+ char *param)
+{
+ return &eprobe_trigger_ops;
+}
+
+static struct event_command event_trigger_cmd = {
+ .name = "eprobe",
+ .trigger_type = ETT_EVENT_EPROBE,
+ .flags = EVENT_CMD_FL_NEEDS_REC,
+ .parse = eprobe_trigger_cmd_parse,
+ .reg = eprobe_trigger_reg_func,
+ .unreg = eprobe_trigger_unreg_func,
+ .unreg_all = NULL,
+ .get_trigger_ops = eprobe_trigger_get_ops,
+ .set_filter = NULL,
+};
+
+static struct event_trigger_data *
+new_eprobe_trigger(struct trace_eprobe *ep, struct trace_event_file *file)
+{
+ struct event_trigger_data *trigger;
+ struct event_filter *filter = NULL;
+ struct eprobe_data *edata;
+ int ret;
+
+ edata = kzalloc(sizeof(*edata), GFP_KERNEL);
+ trigger = kzalloc(sizeof(*trigger), GFP_KERNEL);
+ if (!trigger || !edata) {
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ trigger->flags = EVENT_TRIGGER_FL_PROBE;
+ trigger->count = -1;
+ trigger->ops = &eprobe_trigger_ops;
+
+ /*
+ * EVENT PROBE triggers are not registered as commands with
+ * register_event_command(), as they are not controlled by the user
+ * from the trigger file
+ */
+ trigger->cmd_ops = &event_trigger_cmd;
+
+ INIT_LIST_HEAD(&trigger->list);
+
+ if (ep->filter_str) {
+ ret = create_event_filter(file->tr, ep->event,
+ ep->filter_str, false, &filter);
+ if (ret)
+ goto error;
+ }
+ RCU_INIT_POINTER(trigger->filter, filter);
+
+ edata->file = file;
+ edata->ep = ep;
+ trigger->private_data = edata;
+
+ return trigger;
+error:
+ free_event_filter(filter);
+ kfree(edata);
+ kfree(trigger);
+ return ERR_PTR(ret);
+}
+
+static int enable_eprobe(struct trace_eprobe *ep,
+ struct trace_event_file *eprobe_file)
+{
+ struct event_trigger_data *trigger;
+ struct trace_event_file *file;
+ struct trace_array *tr = eprobe_file->tr;
+
+ file = find_event_file(tr, ep->event_system, ep->event_name);
+ if (!file)
+ return -ENOENT;
+ trigger = new_eprobe_trigger(ep, eprobe_file);
+ if (IS_ERR(trigger))
+ return PTR_ERR(trigger);
+
+ list_add_tail_rcu(&trigger->list, &file->triggers);
+
+ trace_event_trigger_enable_disable(file, 1);
+ update_cond_flag(file);
+
+ return 0;
+}
+
+static struct trace_event_functions eprobe_funcs = {
+ .trace = print_eprobe_event
+};
+
+static int disable_eprobe(struct trace_eprobe *ep,
+ struct trace_array *tr)
+{
+ struct event_trigger_data *trigger = NULL, *iter;
+ struct trace_event_file *file;
+ struct event_filter *filter;
+ struct eprobe_data *edata;
+
+ file = find_event_file(tr, ep->event_system, ep->event_name);
+ if (!file)
+ return -ENOENT;
+
+ list_for_each_entry(iter, &file->triggers, list) {
+ if (!(iter->flags & EVENT_TRIGGER_FL_PROBE))
+ continue;
+ edata = iter->private_data;
+ if (edata->ep == ep) {
+ trigger = iter;
+ break;
+ }
+ }
+ if (!trigger)
+ return -ENODEV;
+
+ list_del_rcu(&trigger->list);
+
+ trace_event_trigger_enable_disable(file, 0);
+ update_cond_flag(file);
+
+ /* Make sure nothing is using the edata or trigger */
+ tracepoint_synchronize_unregister();
+
+ filter = rcu_access_pointer(trigger->filter);
+
+ if (filter)
+ free_event_filter(filter);
+ kfree(edata);
+ kfree(trigger);
+
+ return 0;
+}
+
+static int enable_trace_eprobe(struct trace_event_call *call,
+ struct trace_event_file *file)
+{
+ struct trace_probe *tp;
+ struct trace_eprobe *ep;
+ bool enabled;
+ int ret = 0;
+ int cnt = 0;
+
+ tp = trace_probe_primary_from_call(call);
+ if (WARN_ON_ONCE(!tp))
+ return -ENODEV;
+ enabled = trace_probe_is_enabled(tp);
+
+ /* This also changes "enabled" state */
+ if (file) {
+ ret = trace_probe_add_file(tp, file);
+ if (ret)
+ return ret;
+ } else
+ trace_probe_set_flag(tp, TP_FLAG_PROFILE);
+
+ if (enabled)
+ return 0;
+
+ for_each_trace_eprobe_tp(ep, tp) {
+ ret = enable_eprobe(ep, file);
+ if (ret)
+ break;
+ enabled = true;
+ cnt++;
+ }
+
+ if (ret) {
+ /* Failed to enable one of them. Roll back all */
+ if (enabled) {
+ /*
+ * It's a bug if one failed for something other than memory
+ * not being available but another eprobe succeeded.
+ */
+ WARN_ON_ONCE(ret != -ENOMEM);
+
+ for_each_trace_eprobe_tp(ep, tp) {
+ disable_eprobe(ep, file->tr);
+ if (!--cnt)
+ break;
+ }
+ }
+ if (file)
+ trace_probe_remove_file(tp, file);
+ else
+ trace_probe_clear_flag(tp, TP_FLAG_PROFILE);
+ }
+
+ return ret;
+}
+
+static int disable_trace_eprobe(struct trace_event_call *call,
+ struct trace_event_file *file)
+{
+ struct trace_probe *tp;
+ struct trace_eprobe *ep;
+
+ tp = trace_probe_primary_from_call(call);
+ if (WARN_ON_ONCE(!tp))
+ return -ENODEV;
+
+ if (file) {
+ if (!trace_probe_get_file_link(tp, file))
+ return -ENOENT;
+ if (!trace_probe_has_single_file(tp))
+ goto out;
+ trace_probe_clear_flag(tp, TP_FLAG_TRACE);
+ } else
+ trace_probe_clear_flag(tp, TP_FLAG_PROFILE);
+
+ if (!trace_probe_is_enabled(tp)) {
+ for_each_trace_eprobe_tp(ep, tp)
+ disable_eprobe(ep, file->tr);
+ }
+
+ out:
+ if (file)
+ /*
+ * Synchronization is done in below function. For perf event,
+ * file == NULL and perf_trace_event_unreg() calls
+ * tracepoint_synchronize_unregister() to ensure synchronize
+ * event. We don't need to care about it.
+ */
+ trace_probe_remove_file(tp, file);
+
+ return 0;
+}
+
+static int eprobe_register(struct trace_event_call *event,
+ enum trace_reg type, void *data)
+{
+ struct trace_event_file *file = data;
+
+ switch (type) {
+ case TRACE_REG_REGISTER:
+ return enable_trace_eprobe(event, file);
+ case TRACE_REG_UNREGISTER:
+ return disable_trace_eprobe(event, file);
+#ifdef CONFIG_PERF_EVENTS
+ case TRACE_REG_PERF_REGISTER:
+ case TRACE_REG_PERF_UNREGISTER:
+ case TRACE_REG_PERF_OPEN:
+ case TRACE_REG_PERF_CLOSE:
+ case TRACE_REG_PERF_ADD:
+ case TRACE_REG_PERF_DEL:
+ return 0;
+#endif
+ }
+ return 0;
+}
+
+static inline void init_trace_eprobe_call(struct trace_eprobe *ep)
+{
+ struct trace_event_call *call = trace_probe_event_call(&ep->tp);
+
+ call->flags = TRACE_EVENT_FL_EPROBE;
+ call->event.funcs = &eprobe_funcs;
+ call->class->fields_array = eprobe_fields_array;
+ call->class->reg = eprobe_register;
+}
+
+static struct trace_event_call *
+find_and_get_event(const char *system, const char *event_name)
+{
+ struct trace_event_call *tp_event;
+ const char *name;
+
+ list_for_each_entry(tp_event, &ftrace_events, list) {
+ /* Skip other probes and ftrace events */
+ if (tp_event->flags &
+ (TRACE_EVENT_FL_IGNORE_ENABLE |
+ TRACE_EVENT_FL_KPROBE |
+ TRACE_EVENT_FL_UPROBE |
+ TRACE_EVENT_FL_EPROBE))
+ continue;
+ if (!tp_event->class->system ||
+ strcmp(system, tp_event->class->system))
+ continue;
+ name = trace_event_name(tp_event);
+ if (!name || strcmp(event_name, name))
+ continue;
+ if (!trace_event_try_get_ref(tp_event))
+ return NULL;
+ return tp_event;
+ }
+ return NULL;
+}
+
+static int trace_eprobe_tp_update_arg(struct trace_eprobe *ep, const char *argv[], int i)
+{
+ struct traceprobe_parse_context ctx = {
+ .event = ep->event,
+ .flags = TPARG_FL_KERNEL | TPARG_FL_TEVENT,
+ };
+ int ret;
+
+ ret = traceprobe_parse_probe_arg(&ep->tp, i, argv[i], &ctx);
+ /* Handle symbols "@" */
+ if (!ret)
+ ret = traceprobe_update_arg(&ep->tp.args[i]);
+
+ traceprobe_finish_parse(&ctx);
+ return ret;
+}
+
+static int trace_eprobe_parse_filter(struct trace_eprobe *ep, int argc, const char *argv[])
+{
+ struct event_filter *dummy = NULL;
+ int i, ret, len = 0;
+ char *p;
+
+ if (argc == 0) {
+ trace_probe_log_err(0, NO_EP_FILTER);
+ return -EINVAL;
+ }
+
+ /* Recover the filter string */
+ for (i = 0; i < argc; i++)
+ len += strlen(argv[i]) + 1;
+
+ ep->filter_str = kzalloc(len, GFP_KERNEL);
+ if (!ep->filter_str)
+ return -ENOMEM;
+
+ p = ep->filter_str;
+ for (i = 0; i < argc; i++) {
+ if (i)
+ ret = snprintf(p, len, " %s", argv[i]);
+ else
+ ret = snprintf(p, len, "%s", argv[i]);
+ p += ret;
+ len -= ret;
+ }
+
+ /*
+ * Ensure the filter string can be parsed correctly. Note, this
+ * filter string is for the original event, not for the eprobe.
+ */
+ ret = create_event_filter(top_trace_array(), ep->event, ep->filter_str,
+ true, &dummy);
+ free_event_filter(dummy);
+ if (ret)
+ goto error;
+
+ return 0;
+error:
+ kfree(ep->filter_str);
+ ep->filter_str = NULL;
+ return ret;
+}
+
+static int __trace_eprobe_create(int argc, const char *argv[])
+{
+ /*
+ * Argument syntax:
+ * e[:[GRP/][ENAME]] SYSTEM.EVENT [FETCHARGS] [if FILTER]
+ * Fetch args (no space):
+ * <name>=$<field>[:TYPE]
+ */
+ const char *event = NULL, *group = EPROBE_EVENT_SYSTEM;
+ const char *sys_event = NULL, *sys_name = NULL;
+ struct trace_event_call *event_call;
+ struct trace_eprobe *ep = NULL;
+ char buf1[MAX_EVENT_NAME_LEN];
+ char buf2[MAX_EVENT_NAME_LEN];
+ char gbuf[MAX_EVENT_NAME_LEN];
+ int ret = 0, filter_idx = 0;
+ int i, filter_cnt;
+
+ if (argc < 2 || argv[0][0] != 'e')
+ return -ECANCELED;
+
+ trace_probe_log_init("event_probe", argc, argv);
+
+ event = strchr(&argv[0][1], ':');
+ if (event) {
+ event++;
+ ret = traceprobe_parse_event_name(&event, &group, gbuf,
+ event - argv[0]);
+ if (ret)
+ goto parse_error;
+ }
+
+ trace_probe_log_set_index(1);
+ sys_event = argv[1];
+ ret = traceprobe_parse_event_name(&sys_event, &sys_name, buf2, 0);
+ if (ret || !sys_event || !sys_name) {
+ trace_probe_log_err(0, NO_EVENT_INFO);
+ goto parse_error;
+ }
+
+ if (!event) {
+ strscpy(buf1, sys_event, MAX_EVENT_NAME_LEN);
+ event = buf1;
+ }
+
+ for (i = 2; i < argc; i++) {
+ if (!strcmp(argv[i], "if")) {
+ filter_idx = i + 1;
+ filter_cnt = argc - filter_idx;
+ argc = i;
+ break;
+ }
+ }
+
+ mutex_lock(&event_mutex);
+ event_call = find_and_get_event(sys_name, sys_event);
+ ep = alloc_event_probe(group, event, event_call, argc - 2);
+ mutex_unlock(&event_mutex);
+
+ if (IS_ERR(ep)) {
+ ret = PTR_ERR(ep);
+ if (ret == -ENODEV)
+ trace_probe_log_err(0, BAD_ATTACH_EVENT);
+ /* This must return -ENOMEM or missing event, else there is a bug */
+ WARN_ON_ONCE(ret != -ENOMEM && ret != -ENODEV);
+ ep = NULL;
+ goto error;
+ }
+
+ if (filter_idx) {
+ trace_probe_log_set_index(filter_idx);
+ ret = trace_eprobe_parse_filter(ep, filter_cnt, argv + filter_idx);
+ if (ret)
+ goto parse_error;
+ } else
+ ep->filter_str = NULL;
+
+ argc -= 2; argv += 2;
+ /* parse arguments */
+ for (i = 0; i < argc && i < MAX_TRACE_ARGS; i++) {
+ trace_probe_log_set_index(i + 2);
+ ret = trace_eprobe_tp_update_arg(ep, argv, i);
+ if (ret)
+ goto error;
+ }
+ ret = traceprobe_set_print_fmt(&ep->tp, PROBE_PRINT_EVENT);
+ if (ret < 0)
+ goto error;
+ init_trace_eprobe_call(ep);
+ mutex_lock(&event_mutex);
+ ret = trace_probe_register_event_call(&ep->tp);
+ if (ret) {
+ if (ret == -EEXIST) {
+ trace_probe_log_set_index(0);
+ trace_probe_log_err(0, EVENT_EXIST);
+ }
+ mutex_unlock(&event_mutex);
+ goto error;
+ }
+ ret = dyn_event_add(&ep->devent, &ep->tp.event->call);
+ mutex_unlock(&event_mutex);
+ return ret;
+parse_error:
+ ret = -EINVAL;
+error:
+ trace_event_probe_cleanup(ep);
+ return ret;
+}
+
+/*
+ * Register dynevent at core_initcall. This allows kernel to setup eprobe
+ * events in postcore_initcall without tracefs.
+ */
+static __init int trace_events_eprobe_init_early(void)
+{
+ int err = 0;
+
+ err = dyn_event_register(&eprobe_dyn_event_ops);
+ if (err)
+ pr_warn("Could not register eprobe_dyn_event_ops\n");
+
+ return err;
+}
+core_initcall(trace_events_eprobe_init_early);
diff --git a/kernel/trace/trace_event_perf.c b/kernel/trace/trace_event_perf.c
index 643e0b19920d..05e791241812 100644
--- a/kernel/trace/trace_event_perf.c
+++ b/kernel/trace/trace_event_perf.c
@@ -16,7 +16,7 @@ static char __percpu *perf_trace_buf[PERF_NR_CONTEXTS];
/*
* Force it to be aligned to unsigned long to avoid misaligned accesses
- * suprises
+ * surprises
*/
typedef typeof(unsigned long [PERF_MAX_TRACE_SIZE / sizeof(unsigned long)])
perf_trace_t;
@@ -157,7 +157,7 @@ static void perf_trace_event_unreg(struct perf_event *p_event)
int i;
if (--tp_event->perf_refcount > 0)
- goto out;
+ return;
tp_event->class->reg(tp_event, TRACE_REG_PERF_UNREGISTER, NULL);
@@ -176,8 +176,6 @@ static void perf_trace_event_unreg(struct perf_event *p_event)
perf_trace_buf[i] = NULL;
}
}
-out:
- module_put(tp_event->mod);
}
static int perf_trace_event_open(struct perf_event *p_event)
@@ -224,10 +222,10 @@ int perf_trace_init(struct perf_event *p_event)
list_for_each_entry(tp_event, &ftrace_events, list) {
if (tp_event->event.type == event_id &&
tp_event->class && tp_event->class->reg &&
- try_module_get(tp_event->mod)) {
+ trace_event_try_get_ref(tp_event)) {
ret = perf_trace_event_init(tp_event, p_event);
if (ret)
- module_put(tp_event->mod);
+ trace_event_put_ref(tp_event);
break;
}
}
@@ -241,6 +239,7 @@ void perf_trace_destroy(struct perf_event *p_event)
mutex_lock(&event_mutex);
perf_trace_event_close(p_event);
perf_trace_event_unreg(p_event);
+ trace_event_put_ref(p_event->tp_event);
mutex_unlock(&event_mutex);
}
@@ -252,16 +251,12 @@ int perf_kprobe_init(struct perf_event *p_event, bool is_retprobe)
struct trace_event_call *tp_event;
if (p_event->attr.kprobe_func) {
- func = kzalloc(KSYM_NAME_LEN, GFP_KERNEL);
- if (!func)
- return -ENOMEM;
- ret = strncpy_from_user(
- func, u64_to_user_ptr(p_event->attr.kprobe_func),
- KSYM_NAME_LEN);
- if (ret == KSYM_NAME_LEN)
- ret = -E2BIG;
- if (ret < 0)
- goto out;
+ func = strndup_user(u64_to_user_ptr(p_event->attr.kprobe_func),
+ KSYM_NAME_LEN);
+ if (IS_ERR(func)) {
+ ret = PTR_ERR(func);
+ return (ret == -EINVAL) ? -E2BIG : ret;
+ }
if (func[0] == '\0') {
kfree(func);
@@ -292,6 +287,7 @@ void perf_kprobe_destroy(struct perf_event *p_event)
mutex_lock(&event_mutex);
perf_trace_event_close(p_event);
perf_trace_event_unreg(p_event);
+ trace_event_put_ref(p_event->tp_event);
mutex_unlock(&event_mutex);
destroy_local_trace_kprobe(p_event->tp_event);
@@ -347,6 +343,7 @@ void perf_uprobe_destroy(struct perf_event *p_event)
mutex_lock(&event_mutex);
perf_trace_event_close(p_event);
perf_trace_event_unreg(p_event);
+ trace_event_put_ref(p_event->tp_event);
mutex_unlock(&event_mutex);
destroy_local_trace_uprobe(p_event->tp_event);
}
@@ -400,7 +397,8 @@ void *perf_trace_buf_alloc(int size, struct pt_regs **regs, int *rctxp)
BUILD_BUG_ON(PERF_MAX_TRACE_SIZE % sizeof(unsigned long));
if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE,
- "perf buffer not large enough"))
+ "perf buffer not large enough, wanted %d, have %d",
+ size, PERF_MAX_TRACE_SIZE))
return NULL;
*rctxp = rctx = perf_swevent_get_recursion_context();
@@ -421,28 +419,33 @@ NOKPROBE_SYMBOL(perf_trace_buf_alloc);
void perf_trace_buf_update(void *record, u16 type)
{
struct trace_entry *entry = record;
- int pc = preempt_count();
- unsigned long flags;
- local_save_flags(flags);
- tracing_generic_entry_update(entry, type, flags, pc);
+ tracing_generic_entry_update(entry, type, tracing_gen_ctx());
}
NOKPROBE_SYMBOL(perf_trace_buf_update);
#ifdef CONFIG_FUNCTION_TRACER
static void
perf_ftrace_function_call(unsigned long ip, unsigned long parent_ip,
- struct ftrace_ops *ops, struct pt_regs *pt_regs)
+ struct ftrace_ops *ops, struct ftrace_regs *fregs)
{
struct ftrace_entry *entry;
struct perf_event *event;
struct hlist_head head;
struct pt_regs regs;
int rctx;
+ int bit;
- if ((unsigned long)ops->private != smp_processor_id())
+ if (!rcu_is_watching())
+ return;
+
+ bit = ftrace_test_recursion_trylock(ip, parent_ip);
+ if (bit < 0)
return;
+ if ((unsigned long)ops->private != smp_processor_id())
+ goto out;
+
event = container_of(ops, struct perf_event, ftrace_ops);
/*
@@ -463,13 +466,15 @@ perf_ftrace_function_call(unsigned long ip, unsigned long parent_ip,
entry = perf_trace_buf_alloc(ENTRY_SIZE, NULL, &rctx);
if (!entry)
- return;
+ goto out;
entry->ip = ip;
entry->parent_ip = parent_ip;
perf_trace_buf_submit(entry, ENTRY_SIZE, rctx, TRACE_FN,
1, &regs, &head, NULL);
+out:
+ ftrace_test_recursion_unlock(bit);
#undef ENTRY_SIZE
}
@@ -477,7 +482,6 @@ static int perf_ftrace_function_register(struct perf_event *event)
{
struct ftrace_ops *ops = &event->ftrace_ops;
- ops->flags = FTRACE_OPS_FL_RCU;
ops->func = perf_ftrace_function_call;
ops->private = (void *)(unsigned long)nr_cpu_ids;
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index f6f55682d3e2..7c364b87352e 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -38,6 +38,15 @@ DEFINE_MUTEX(event_mutex);
LIST_HEAD(ftrace_events);
static LIST_HEAD(ftrace_generic_fields);
static LIST_HEAD(ftrace_common_fields);
+static bool eventdir_initialized;
+
+static LIST_HEAD(module_strings);
+
+struct module_string {
+ struct list_head next;
+ struct module *module;
+ char *str;
+};
#define GFP_TRACE (GFP_KERNEL | __GFP_ZERO)
@@ -105,7 +114,7 @@ trace_find_event_field(struct trace_event_call *call, char *name)
static int __trace_define_field(struct list_head *head, const char *type,
const char *name, int offset, int size,
- int is_signed, int filter_type)
+ int is_signed, int filter_type, int len)
{
struct ftrace_event_field *field;
@@ -124,6 +133,7 @@ static int __trace_define_field(struct list_head *head, const char *type,
field->offset = offset;
field->size = size;
field->is_signed = is_signed;
+ field->len = len;
list_add(&field->link, head);
@@ -141,14 +151,28 @@ int trace_define_field(struct trace_event_call *call, const char *type,
head = trace_get_fields(call);
return __trace_define_field(head, type, name, offset, size,
- is_signed, filter_type);
+ is_signed, filter_type, 0);
}
EXPORT_SYMBOL_GPL(trace_define_field);
+static int trace_define_field_ext(struct trace_event_call *call, const char *type,
+ const char *name, int offset, int size, int is_signed,
+ int filter_type, int len)
+{
+ struct list_head *head;
+
+ if (WARN_ON(!call->class))
+ return 0;
+
+ head = trace_get_fields(call);
+ return __trace_define_field(head, type, name, offset, size,
+ is_signed, filter_type, len);
+}
+
#define __generic_field(type, item, filter_type) \
ret = __trace_define_field(&ftrace_generic_fields, #type, \
#item, 0, 0, is_signed_type(type), \
- filter_type); \
+ filter_type, 0); \
if (ret) \
return ret;
@@ -157,7 +181,7 @@ EXPORT_SYMBOL_GPL(trace_define_field);
"common_" #item, \
offsetof(typeof(ent), item), \
sizeof(ent.item), \
- is_signed_type(type), FILTER_OTHER); \
+ is_signed_type(type), FILTER_OTHER, 0); \
if (ret) \
return ret;
@@ -167,8 +191,11 @@ static int trace_define_generic_fields(void)
__generic_field(int, CPU, FILTER_CPU);
__generic_field(int, cpu, FILTER_CPU);
+ __generic_field(int, common_cpu, FILTER_CPU);
__generic_field(char *, COMM, FILTER_COMM);
__generic_field(char *, comm, FILTER_COMM);
+ __generic_field(char *, stacktrace, FILTER_STACKTRACE);
+ __generic_field(char *, STACKTRACE, FILTER_STACKTRACE);
return ret;
}
@@ -180,6 +207,7 @@ static int trace_define_common_fields(void)
__common_field(unsigned short, type);
__common_field(unsigned char, flags);
+ /* Holds both preempt_count and migrate_disable */
__common_field(unsigned char, preempt_count);
__common_field(int, pid);
@@ -216,6 +244,221 @@ int trace_event_get_offsets(struct trace_event_call *call)
return tail->offset + tail->size;
}
+/*
+ * Check if the referenced field is an array and return true,
+ * as arrays are OK to dereference.
+ */
+static bool test_field(const char *fmt, struct trace_event_call *call)
+{
+ struct trace_event_fields *field = call->class->fields_array;
+ const char *array_descriptor;
+ const char *p = fmt;
+ int len;
+
+ if (!(len = str_has_prefix(fmt, "REC->")))
+ return false;
+ fmt += len;
+ for (p = fmt; *p; p++) {
+ if (!isalnum(*p) && *p != '_')
+ break;
+ }
+ len = p - fmt;
+
+ for (; field->type; field++) {
+ if (strncmp(field->name, fmt, len) ||
+ field->name[len])
+ continue;
+ array_descriptor = strchr(field->type, '[');
+ /* This is an array and is OK to dereference. */
+ return array_descriptor != NULL;
+ }
+ return false;
+}
+
+/*
+ * Examine the print fmt of the event looking for unsafe dereference
+ * pointers using %p* that could be recorded in the trace event and
+ * much later referenced after the pointer was freed. Dereferencing
+ * pointers are OK, if it is dereferenced into the event itself.
+ */
+static void test_event_printk(struct trace_event_call *call)
+{
+ u64 dereference_flags = 0;
+ bool first = true;
+ const char *fmt, *c, *r, *a;
+ int parens = 0;
+ char in_quote = 0;
+ int start_arg = 0;
+ int arg = 0;
+ int i;
+
+ fmt = call->print_fmt;
+
+ if (!fmt)
+ return;
+
+ for (i = 0; fmt[i]; i++) {
+ switch (fmt[i]) {
+ case '\\':
+ i++;
+ if (!fmt[i])
+ return;
+ continue;
+ case '"':
+ case '\'':
+ /*
+ * The print fmt starts with a string that
+ * is processed first to find %p* usage,
+ * then after the first string, the print fmt
+ * contains arguments that are used to check
+ * if the dereferenced %p* usage is safe.
+ */
+ if (first) {
+ if (fmt[i] == '\'')
+ continue;
+ if (in_quote) {
+ arg = 0;
+ first = false;
+ /*
+ * If there was no %p* uses
+ * the fmt is OK.
+ */
+ if (!dereference_flags)
+ return;
+ }
+ }
+ if (in_quote) {
+ if (in_quote == fmt[i])
+ in_quote = 0;
+ } else {
+ in_quote = fmt[i];
+ }
+ continue;
+ case '%':
+ if (!first || !in_quote)
+ continue;
+ i++;
+ if (!fmt[i])
+ return;
+ switch (fmt[i]) {
+ case '%':
+ continue;
+ case 'p':
+ /* Find dereferencing fields */
+ switch (fmt[i + 1]) {
+ case 'B': case 'R': case 'r':
+ case 'b': case 'M': case 'm':
+ case 'I': case 'i': case 'E':
+ case 'U': case 'V': case 'N':
+ case 'a': case 'd': case 'D':
+ case 'g': case 't': case 'C':
+ case 'O': case 'f':
+ if (WARN_ONCE(arg == 63,
+ "Too many args for event: %s",
+ trace_event_name(call)))
+ return;
+ dereference_flags |= 1ULL << arg;
+ }
+ break;
+ default:
+ {
+ bool star = false;
+ int j;
+
+ /* Increment arg if %*s exists. */
+ for (j = 0; fmt[i + j]; j++) {
+ if (isdigit(fmt[i + j]) ||
+ fmt[i + j] == '.')
+ continue;
+ if (fmt[i + j] == '*') {
+ star = true;
+ continue;
+ }
+ if ((fmt[i + j] == 's') && star)
+ arg++;
+ break;
+ }
+ break;
+ } /* default */
+
+ } /* switch */
+ arg++;
+ continue;
+ case '(':
+ if (in_quote)
+ continue;
+ parens++;
+ continue;
+ case ')':
+ if (in_quote)
+ continue;
+ parens--;
+ if (WARN_ONCE(parens < 0,
+ "Paren mismatch for event: %s\narg='%s'\n%*s",
+ trace_event_name(call),
+ fmt + start_arg,
+ (i - start_arg) + 5, "^"))
+ return;
+ continue;
+ case ',':
+ if (in_quote || parens)
+ continue;
+ i++;
+ while (isspace(fmt[i]))
+ i++;
+ start_arg = i;
+ if (!(dereference_flags & (1ULL << arg)))
+ goto next_arg;
+
+ /* Find the REC-> in the argument */
+ c = strchr(fmt + i, ',');
+ r = strstr(fmt + i, "REC->");
+ if (r && (!c || r < c)) {
+ /*
+ * Addresses of events on the buffer,
+ * or an array on the buffer is
+ * OK to dereference.
+ * There's ways to fool this, but
+ * this is to catch common mistakes,
+ * not malicious code.
+ */
+ a = strchr(fmt + i, '&');
+ if ((a && (a < r)) || test_field(r, call))
+ dereference_flags &= ~(1ULL << arg);
+ } else if ((r = strstr(fmt + i, "__get_dynamic_array(")) &&
+ (!c || r < c)) {
+ dereference_flags &= ~(1ULL << arg);
+ } else if ((r = strstr(fmt + i, "__get_sockaddr(")) &&
+ (!c || r < c)) {
+ dereference_flags &= ~(1ULL << arg);
+ }
+
+ next_arg:
+ i--;
+ arg++;
+ }
+ }
+
+ /*
+ * If you triggered the below warning, the trace event reported
+ * uses an unsafe dereference pointer %p*. As the data stored
+ * at the trace event time may no longer exist when the trace
+ * event is printed, dereferencing to the original source is
+ * unsafe. The source of the dereference must be copied into the
+ * event itself, and the dereference must access the copy instead.
+ */
+ if (WARN_ON_ONCE(dereference_flags)) {
+ arg = 1;
+ while (!(dereference_flags & 1)) {
+ dereference_flags >>= 1;
+ arg++;
+ }
+ pr_warn("event %s has unsafe dereference of argument %d\n",
+ trace_event_name(call), arg);
+ pr_warn("print_fmt: %s\n", fmt);
+ }
+}
+
int trace_event_raw_init(struct trace_event_call *call)
{
int id;
@@ -224,6 +467,8 @@ int trace_event_raw_init(struct trace_event_call *call)
if (!id)
return -ENODEV;
+ test_event_printk(call);
+
return 0;
}
EXPORT_SYMBOL_GPL(trace_event_raw_init);
@@ -257,22 +502,19 @@ void *trace_event_buffer_reserve(struct trace_event_buffer *fbuffer,
trace_event_ignore_this_pid(trace_file))
return NULL;
- local_save_flags(fbuffer->flags);
- fbuffer->pc = preempt_count();
/*
* If CONFIG_PREEMPTION is enabled, then the tracepoint itself disables
* preemption (adding one to the preempt_count). Since we are
* interested in the preempt_count at the time the tracepoint was
* hit, we need to subtract one to offset the increment.
*/
- if (IS_ENABLED(CONFIG_PREEMPTION))
- fbuffer->pc--;
+ fbuffer->trace_ctx = tracing_gen_ctx_dec();
fbuffer->trace_file = trace_file;
fbuffer->event =
trace_event_buffer_lock_reserve(&fbuffer->buffer, trace_file,
event_call->event.type, len,
- fbuffer->flags, fbuffer->pc);
+ fbuffer->trace_ctx);
if (!fbuffer->event)
return NULL;
@@ -369,7 +611,6 @@ static int __ftrace_event_enable_disable(struct trace_event_file *file,
{
struct trace_event_call *call = file->event_call;
struct trace_array *tr = file->tr;
- unsigned long file_flags = file->flags;
int ret = 0;
int disable;
@@ -393,6 +634,8 @@ static int __ftrace_event_enable_disable(struct trace_event_file *file,
break;
disable = file->flags & EVENT_FILE_FL_SOFT_DISABLED;
clear_bit(EVENT_FILE_FL_SOFT_MODE_BIT, &file->flags);
+ /* Disable use of trace_buffered_event */
+ trace_buffered_event_disable();
} else
disable = !(file->flags & EVENT_FILE_FL_SOFT_MODE);
@@ -431,6 +674,8 @@ static int __ftrace_event_enable_disable(struct trace_event_file *file,
if (atomic_inc_return(&file->sm_ref) > 1)
break;
set_bit(EVENT_FILE_FL_SOFT_MODE_BIT, &file->flags);
+ /* Enable use of trace_buffered_event */
+ trace_buffered_event_enable();
}
if (!(file->flags & EVENT_FILE_FL_ENABLED)) {
@@ -470,15 +715,6 @@ static int __ftrace_event_enable_disable(struct trace_event_file *file,
break;
}
- /* Enable or disable use of trace_buffered_event */
- if ((file_flags & EVENT_FILE_FL_SOFT_DISABLED) !=
- (file->flags & EVENT_FILE_FL_SOFT_DISABLED)) {
- if (file->flags & EVENT_FILE_FL_SOFT_DISABLED)
- trace_buffered_event_enable();
- else
- trace_buffered_event_disable();
- }
-
return ret;
}
@@ -538,19 +774,21 @@ void trace_event_follow_fork(struct trace_array *tr, bool enable)
if (enable) {
register_trace_prio_sched_process_fork(event_filter_pid_sched_process_fork,
tr, INT_MIN);
- register_trace_prio_sched_process_exit(event_filter_pid_sched_process_exit,
+ register_trace_prio_sched_process_free(event_filter_pid_sched_process_exit,
tr, INT_MAX);
} else {
unregister_trace_sched_process_fork(event_filter_pid_sched_process_fork,
tr);
- unregister_trace_sched_process_exit(event_filter_pid_sched_process_exit,
+ unregister_trace_sched_process_free(event_filter_pid_sched_process_exit,
tr);
}
}
static void
event_filter_pid_sched_switch_probe_pre(void *data, bool preempt,
- struct task_struct *prev, struct task_struct *next)
+ struct task_struct *prev,
+ struct task_struct *next,
+ unsigned int prev_state)
{
struct trace_array *tr = data;
struct trace_pid_list *no_pid_list;
@@ -574,7 +812,9 @@ event_filter_pid_sched_switch_probe_pre(void *data, bool preempt,
static void
event_filter_pid_sched_switch_probe_post(void *data, bool preempt,
- struct task_struct *prev, struct task_struct *next)
+ struct task_struct *prev,
+ struct task_struct *next,
+ unsigned int prev_state)
{
struct trace_array *tr = data;
struct trace_pid_list *no_pid_list;
@@ -676,10 +916,10 @@ static void __ftrace_clear_event_pids(struct trace_array *tr, int type)
tracepoint_synchronize_unregister();
if ((type & TRACE_PIDS) && pid_list)
- trace_free_pid_list(pid_list);
+ trace_pid_list_free(pid_list);
if ((type & TRACE_NO_PIDS) && no_pid_list)
- trace_free_pid_list(no_pid_list);
+ trace_pid_list_free(no_pid_list);
}
static void ftrace_clear_event_pids(struct trace_array *tr, int type)
@@ -744,32 +984,41 @@ static void remove_subsystem(struct trace_subsystem_dir *dir)
return;
if (!--dir->nr_events) {
- tracefs_remove(dir->entry);
+ eventfs_remove_dir(dir->ei);
list_del(&dir->list);
__put_system_dir(dir);
}
}
-static void remove_event_file_dir(struct trace_event_file *file)
+void event_file_get(struct trace_event_file *file)
{
- struct dentry *dir = file->dir;
- struct dentry *child;
+ atomic_inc(&file->ref);
+}
- if (dir) {
- spin_lock(&dir->d_lock); /* probably unneeded */
- list_for_each_entry(child, &dir->d_subdirs, d_child) {
- if (d_really_is_positive(child)) /* probably unneeded */
- d_inode(child)->i_private = NULL;
- }
- spin_unlock(&dir->d_lock);
+void event_file_put(struct trace_event_file *file)
+{
+ if (WARN_ON_ONCE(!atomic_read(&file->ref))) {
+ if (file->flags & EVENT_FILE_FL_FREED)
+ kmem_cache_free(file_cachep, file);
+ return;
+ }
- tracefs_remove(dir);
+ if (atomic_dec_and_test(&file->ref)) {
+ /* Count should only go to zero when it is freed */
+ if (WARN_ON_ONCE(!(file->flags & EVENT_FILE_FL_FREED)))
+ return;
+ kmem_cache_free(file_cachep, file);
}
+}
+static void remove_event_file_dir(struct trace_event_file *file)
+{
+ eventfs_remove_dir(file->ei);
list_del(&file->list);
remove_subsystem(file->system);
free_event_filter(file->filter);
- kmem_cache_free(file_cachep, file);
+ file->flags |= EVENT_FILE_FL_FREED;
+ event_file_put(file);
}
/*
@@ -939,7 +1188,7 @@ ftrace_event_write(struct file *file, const char __user *ubuf,
if (!cnt)
return 0;
- ret = tracing_update_buffers();
+ ret = tracing_update_buffers(tr);
if (ret < 0)
return ret;
@@ -1142,7 +1391,7 @@ event_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
flags = file->flags;
mutex_unlock(&event_mutex);
- if (!file)
+ if (!file || flags & EVENT_FILE_FL_FREED)
return -ENODEV;
if (flags & EVENT_FILE_FL_ENABLED &&
@@ -1170,18 +1419,20 @@ event_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
if (ret)
return ret;
- ret = tracing_update_buffers();
- if (ret < 0)
- return ret;
-
switch (val) {
case 0:
case 1:
ret = -ENODEV;
mutex_lock(&event_mutex);
file = event_file_data(filp);
- if (likely(file))
+ if (likely(file && !(file->flags & EVENT_FILE_FL_FREED))) {
+ ret = tracing_update_buffers(file->tr);
+ if (ret < 0) {
+ mutex_unlock(&event_mutex);
+ return ret;
+ }
ret = ftrace_event_enable_disable(file, val);
+ }
mutex_unlock(&event_mutex);
break;
@@ -1211,7 +1462,8 @@ system_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
mutex_lock(&event_mutex);
list_for_each_entry(file, &tr->events, list) {
call = file->event_call;
- if (!trace_event_name(call) || !call->class || !call->class->reg)
+ if ((call->flags & TRACE_EVENT_FL_IGNORE_ENABLE) ||
+ !trace_event_name(call) || !call->class || !call->class->reg)
continue;
if (system && strcmp(call->class->system, system->name) != 0)
@@ -1254,7 +1506,7 @@ system_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
if (ret)
return ret;
- ret = tracing_update_buffers();
+ ret = tracing_update_buffers(dir->tr);
if (ret < 0)
return ret;
@@ -1358,12 +1610,17 @@ static int f_show(struct seq_file *m, void *v)
seq_printf(m, "\tfield:%s %s;\toffset:%u;\tsize:%u;\tsigned:%d;\n",
field->type, field->name, field->offset,
field->size, !!field->is_signed);
- else
- seq_printf(m, "\tfield:%.*s %s%s;\toffset:%u;\tsize:%u;\tsigned:%d;\n",
+ else if (field->len)
+ seq_printf(m, "\tfield:%.*s %s[%d];\toffset:%u;\tsize:%u;\tsigned:%d;\n",
(int)(array_descriptor - field->type),
field->type, field->name,
- array_descriptor, field->offset,
+ field->len, field->offset,
field->size, !!field->is_signed);
+ else
+ seq_printf(m, "\tfield:%.*s %s[];\toffset:%u;\tsize:%u;\tsigned:%d;\n",
+ (int)(array_descriptor - field->type),
+ field->type, field->name,
+ field->offset, field->size, !!field->is_signed);
return 0;
}
@@ -1448,7 +1705,7 @@ event_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
mutex_lock(&event_mutex);
file = event_file_data(filp);
- if (file)
+ if (file && !(file->flags & EVENT_FILE_FL_FREED))
print_event_filter(file, s);
mutex_unlock(&event_mutex);
@@ -1495,9 +1752,9 @@ static LIST_HEAD(event_subsystems);
static int subsystem_open(struct inode *inode, struct file *filp)
{
+ struct trace_subsystem_dir *dir = NULL, *iter_dir;
+ struct trace_array *tr = NULL, *iter_tr;
struct event_subsystem *system = NULL;
- struct trace_subsystem_dir *dir = NULL; /* Initialize for gcc */
- struct trace_array *tr;
int ret;
if (tracing_is_disabled())
@@ -1506,10 +1763,12 @@ static int subsystem_open(struct inode *inode, struct file *filp)
/* Make sure the system still exists */
mutex_lock(&event_mutex);
mutex_lock(&trace_types_lock);
- list_for_each_entry(tr, &ftrace_trace_arrays, list) {
- list_for_each_entry(dir, &tr->systems, list) {
- if (dir == inode->i_private) {
+ list_for_each_entry(iter_tr, &ftrace_trace_arrays, list) {
+ list_for_each_entry(iter_dir, &iter_tr->systems, list) {
+ if (iter_dir == inode->i_private) {
/* Don't open systems with no events */
+ tr = iter_tr;
+ dir = iter_dir;
if (dir->nr_events) {
__get_system_dir(dir);
system = dir->subsystem;
@@ -1525,9 +1784,6 @@ static int subsystem_open(struct inode *inode, struct file *filp)
if (!system)
return -ENODEV;
- /* Some versions of gcc think dir can be uninitialized here */
- WARN_ON(!dir);
-
/* Still need to increment the ref count of the system */
if (trace_array_get(tr) < 0) {
put_system(dir);
@@ -1637,9 +1893,33 @@ subsystem_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
}
static ssize_t
-show_header(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
+show_header_page_file(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
+{
+ struct trace_array *tr = filp->private_data;
+ struct trace_seq *s;
+ int r;
+
+ if (*ppos)
+ return 0;
+
+ s = kmalloc(sizeof(*s), GFP_KERNEL);
+ if (!s)
+ return -ENOMEM;
+
+ trace_seq_init(s);
+
+ ring_buffer_print_page_header(tr->array_buffer.buffer, s);
+ r = simple_read_from_buffer(ubuf, cnt, ppos,
+ s->buffer, trace_seq_used(s));
+
+ kfree(s);
+
+ return r;
+}
+
+static ssize_t
+show_header_event_file(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
{
- int (*func)(struct trace_seq *s) = filp->private_data;
struct trace_seq *s;
int r;
@@ -1652,7 +1932,7 @@ show_header(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
trace_seq_init(s);
- func(s);
+ ring_buffer_print_entry_header(s);
r = simple_read_from_buffer(ubuf, cnt, ppos,
s->buffer, trace_seq_used(s));
@@ -1724,7 +2004,7 @@ event_pid_write(struct file *filp, const char __user *ubuf,
if (!cnt)
return 0;
- ret = tracing_update_buffers();
+ ret = tracing_update_buffers(tr);
if (ret < 0)
return ret;
@@ -1757,7 +2037,7 @@ event_pid_write(struct file *filp, const char __user *ubuf,
if (filtered_pids) {
tracepoint_synchronize_unregister();
- trace_free_pid_list(filtered_pids);
+ trace_pid_list_free(filtered_pids);
} else if (pid_list && !other_pids) {
register_pid_events(tr);
}
@@ -1858,9 +2138,10 @@ static const struct file_operations ftrace_set_event_notrace_pid_fops = {
};
static const struct file_operations ftrace_enable_fops = {
- .open = tracing_open_generic,
+ .open = tracing_open_file_tr,
.read = event_enable_read,
.write = event_enable_write,
+ .release = tracing_release_file_tr,
.llseek = default_llseek,
};
@@ -1877,9 +2158,10 @@ static const struct file_operations ftrace_event_id_fops = {
};
static const struct file_operations ftrace_event_filter_fops = {
- .open = tracing_open_generic,
+ .open = tracing_open_file_tr,
.read = event_filter_read,
.write = event_filter_write,
+ .release = tracing_release_file_tr,
.llseek = default_llseek,
};
@@ -1907,10 +2189,18 @@ static const struct file_operations ftrace_tr_enable_fops = {
.release = subsystem_release,
};
-static const struct file_operations ftrace_show_header_fops = {
- .open = tracing_open_generic,
- .read = show_header,
+static const struct file_operations ftrace_show_header_page_fops = {
+ .open = tracing_open_generic_tr,
+ .read = show_header_page_file,
+ .llseek = default_llseek,
+ .release = tracing_release_generic_tr,
+};
+
+static const struct file_operations ftrace_show_header_event_fops = {
+ .open = tracing_open_generic_tr,
+ .read = show_header_event_file,
.llseek = default_llseek,
+ .release = tracing_release_generic_tr,
};
static int
@@ -2032,8 +2322,6 @@ create_new_subsystem(const char *name)
if (!system->name)
goto out_free;
- system->filter = NULL;
-
system->filter = kzalloc(sizeof(struct event_filter), GFP_KERNEL);
if (!system->filter)
goto out_free;
@@ -2048,13 +2336,40 @@ create_new_subsystem(const char *name)
return NULL;
}
-static struct dentry *
+static int system_callback(const char *name, umode_t *mode, void **data,
+ const struct file_operations **fops)
+{
+ if (strcmp(name, "filter") == 0)
+ *fops = &ftrace_subsystem_filter_fops;
+
+ else if (strcmp(name, "enable") == 0)
+ *fops = &ftrace_system_enable_fops;
+
+ else
+ return 0;
+
+ *mode = TRACE_MODE_WRITE;
+ return 1;
+}
+
+static struct eventfs_inode *
event_subsystem_dir(struct trace_array *tr, const char *name,
- struct trace_event_file *file, struct dentry *parent)
+ struct trace_event_file *file, struct eventfs_inode *parent)
{
+ struct event_subsystem *system, *iter;
struct trace_subsystem_dir *dir;
- struct event_subsystem *system;
- struct dentry *entry;
+ struct eventfs_inode *ei;
+ int nr_entries;
+ static struct eventfs_entry system_entries[] = {
+ {
+ .name = "filter",
+ .callback = system_callback,
+ },
+ {
+ .name = "enable",
+ .callback = system_callback,
+ }
+ };
/* First see if we did not already create this dir */
list_for_each_entry(dir, &tr->systems, list) {
@@ -2062,18 +2377,18 @@ event_subsystem_dir(struct trace_array *tr, const char *name,
if (strcmp(system->name, name) == 0) {
dir->nr_events++;
file->system = dir;
- return dir->entry;
+ return dir->ei;
}
}
/* Now see if the system itself exists. */
- list_for_each_entry(system, &event_subsystems, list) {
- if (strcmp(system->name, name) == 0)
+ system = NULL;
+ list_for_each_entry(iter, &event_subsystems, list) {
+ if (strcmp(iter->name, name) == 0) {
+ system = iter;
break;
+ }
}
- /* Reset system variable when not found */
- if (&system->list == &event_subsystems)
- system = NULL;
dir = kmalloc(sizeof(*dir), GFP_KERNEL);
if (!dir)
@@ -2086,33 +2401,29 @@ event_subsystem_dir(struct trace_array *tr, const char *name,
} else
__get_system(system);
- dir->entry = tracefs_create_dir(name, parent);
- if (!dir->entry) {
+ /* ftrace only has directories no files */
+ if (strcmp(name, "ftrace") == 0)
+ nr_entries = 0;
+ else
+ nr_entries = ARRAY_SIZE(system_entries);
+
+ ei = eventfs_create_dir(name, parent, system_entries, nr_entries, dir);
+ if (IS_ERR(ei)) {
pr_warn("Failed to create system directory %s\n", name);
__put_system(system);
goto out_free;
}
+ dir->ei = ei;
dir->tr = tr;
dir->ref_count = 1;
dir->nr_events = 1;
dir->subsystem = system;
file->system = dir;
- entry = tracefs_create_file("filter", 0644, dir->entry, dir,
- &ftrace_subsystem_filter_fops);
- if (!entry) {
- kfree(system->filter);
- system->filter = NULL;
- pr_warn("Could not create tracefs '%s/filter' entry\n", name);
- }
-
- trace_create_file("enable", 0644, dir->entry, dir,
- &ftrace_system_enable_fops);
-
list_add(&dir->list, &tr->systems);
- return dir->entry;
+ return dir->ei;
out_free:
kfree(dir);
@@ -2124,43 +2435,10 @@ event_subsystem_dir(struct trace_array *tr, const char *name,
}
static int
-event_create_dir(struct dentry *parent, struct trace_event_file *file)
+event_define_fields(struct trace_event_call *call)
{
- struct trace_event_call *call = file->event_call;
- struct trace_array *tr = file->tr;
struct list_head *head;
- struct dentry *d_events;
- const char *name;
- int ret;
-
- /*
- * If the trace point header did not define TRACE_SYSTEM
- * then the system would be called "TRACE_SYSTEM".
- */
- if (strcmp(call->class->system, TRACE_SYSTEM) != 0) {
- d_events = event_subsystem_dir(tr, call->class->system, file, parent);
- if (!d_events)
- return -ENOMEM;
- } else
- d_events = parent;
-
- name = trace_event_name(call);
- file->dir = tracefs_create_dir(name, d_events);
- if (!file->dir) {
- pr_warn("Could not create tracefs '%s' directory\n", name);
- return -1;
- }
-
- if (call->class->reg && !(call->flags & TRACE_EVENT_FL_IGNORE_ENABLE))
- trace_create_file("enable", 0644, file->dir, file,
- &ftrace_enable_fops);
-
-#ifdef CONFIG_PERF_EVENTS
- if (call->event.type && call->class->reg)
- trace_create_file("id", 0444, file->dir,
- (void *)(long)call->event.type,
- &ftrace_event_id_fops);
-#endif
+ int ret = 0;
/*
* Other events may have the same class. Only update
@@ -2173,54 +2451,184 @@ event_create_dir(struct dentry *parent, struct trace_event_file *file)
for (; field->type; field++) {
if (field->type == TRACE_FUNCTION_TYPE) {
- ret = field->define_fields(call);
+ field->define_fields(call);
break;
}
offset = ALIGN(offset, field->align);
- ret = trace_define_field(call, field->type, field->name,
+ ret = trace_define_field_ext(call, field->type, field->name,
offset, field->size,
- field->is_signed, field->filter_type);
- if (ret)
+ field->is_signed, field->filter_type,
+ field->len);
+ if (WARN_ON_ONCE(ret)) {
+ pr_err("error code is %d\n", ret);
break;
+ }
offset += field->size;
}
- if (ret < 0) {
- pr_warn("Could not initialize trace point events/%s\n",
- name);
- return -1;
- }
+ }
+
+ return ret;
+}
+
+static int event_callback(const char *name, umode_t *mode, void **data,
+ const struct file_operations **fops)
+{
+ struct trace_event_file *file = *data;
+ struct trace_event_call *call = file->event_call;
+
+ if (strcmp(name, "format") == 0) {
+ *mode = TRACE_MODE_READ;
+ *fops = &ftrace_event_format_fops;
+ *data = call;
+ return 1;
}
/*
* Only event directories that can be enabled should have
- * triggers or filters.
+ * triggers or filters, with the exception of the "print"
+ * event that can have a "trigger" file.
*/
if (!(call->flags & TRACE_EVENT_FL_IGNORE_ENABLE)) {
- trace_create_file("filter", 0644, file->dir, file,
- &ftrace_event_filter_fops);
+ if (call->class->reg && strcmp(name, "enable") == 0) {
+ *mode = TRACE_MODE_WRITE;
+ *fops = &ftrace_enable_fops;
+ return 1;
+ }
+
+ if (strcmp(name, "filter") == 0) {
+ *mode = TRACE_MODE_WRITE;
+ *fops = &ftrace_event_filter_fops;
+ return 1;
+ }
+ }
+
+ if (!(call->flags & TRACE_EVENT_FL_IGNORE_ENABLE) ||
+ strcmp(trace_event_name(call), "print") == 0) {
+ if (strcmp(name, "trigger") == 0) {
+ *mode = TRACE_MODE_WRITE;
+ *fops = &event_trigger_fops;
+ return 1;
+ }
+ }
- trace_create_file("trigger", 0644, file->dir, file,
- &event_trigger_fops);
+#ifdef CONFIG_PERF_EVENTS
+ if (call->event.type && call->class->reg &&
+ strcmp(name, "id") == 0) {
+ *mode = TRACE_MODE_READ;
+ *data = (void *)(long)call->event.type;
+ *fops = &ftrace_event_id_fops;
+ return 1;
}
+#endif
#ifdef CONFIG_HIST_TRIGGERS
- trace_create_file("hist", 0444, file->dir, file,
- &event_hist_fops);
+ if (strcmp(name, "hist") == 0) {
+ *mode = TRACE_MODE_READ;
+ *fops = &event_hist_fops;
+ return 1;
+ }
#endif
#ifdef CONFIG_HIST_TRIGGERS_DEBUG
- trace_create_file("hist_debug", 0444, file->dir, file,
- &event_hist_debug_fops);
+ if (strcmp(name, "hist_debug") == 0) {
+ *mode = TRACE_MODE_READ;
+ *fops = &event_hist_debug_fops;
+ return 1;
+ }
+#endif
+#ifdef CONFIG_TRACE_EVENT_INJECT
+ if (call->event.type && call->class->reg &&
+ strcmp(name, "inject") == 0) {
+ *mode = 0200;
+ *fops = &event_inject_fops;
+ return 1;
+ }
#endif
- trace_create_file("format", 0444, file->dir, call,
- &ftrace_event_format_fops);
+ return 0;
+}
+static int
+event_create_dir(struct eventfs_inode *parent, struct trace_event_file *file)
+{
+ struct trace_event_call *call = file->event_call;
+ struct trace_array *tr = file->tr;
+ struct eventfs_inode *e_events;
+ struct eventfs_inode *ei;
+ const char *name;
+ int nr_entries;
+ int ret;
+ static struct eventfs_entry event_entries[] = {
+ {
+ .name = "enable",
+ .callback = event_callback,
+ },
+ {
+ .name = "filter",
+ .callback = event_callback,
+ },
+ {
+ .name = "trigger",
+ .callback = event_callback,
+ },
+ {
+ .name = "format",
+ .callback = event_callback,
+ },
+#ifdef CONFIG_PERF_EVENTS
+ {
+ .name = "id",
+ .callback = event_callback,
+ },
+#endif
+#ifdef CONFIG_HIST_TRIGGERS
+ {
+ .name = "hist",
+ .callback = event_callback,
+ },
+#endif
+#ifdef CONFIG_HIST_TRIGGERS_DEBUG
+ {
+ .name = "hist_debug",
+ .callback = event_callback,
+ },
+#endif
#ifdef CONFIG_TRACE_EVENT_INJECT
- if (call->event.type && call->class->reg)
- trace_create_file("inject", 0200, file->dir, file,
- &event_inject_fops);
+ {
+ .name = "inject",
+ .callback = event_callback,
+ },
#endif
+ };
+
+ /*
+ * If the trace point header did not define TRACE_SYSTEM
+ * then the system would be called "TRACE_SYSTEM". This should
+ * never happen.
+ */
+ if (WARN_ON_ONCE(strcmp(call->class->system, TRACE_SYSTEM) == 0))
+ return -ENODEV;
+
+ e_events = event_subsystem_dir(tr, call->class->system, file, parent);
+ if (!e_events)
+ return -ENOMEM;
+
+ nr_entries = ARRAY_SIZE(event_entries);
+
+ name = trace_event_name(call);
+ ei = eventfs_create_dir(name, e_events, event_entries, nr_entries, file);
+ if (IS_ERR(ei)) {
+ pr_warn("Could not create tracefs '%s' directory\n", name);
+ return -1;
+ }
+
+ file->ei = ei;
+
+ ret = event_define_fields(call);
+ if (ret < 0) {
+ pr_warn("Could not initialize trace point events/%s\n", name);
+ return ret;
+ }
return 0;
}
@@ -2301,7 +2709,10 @@ __register_event(struct trace_event_call *call, struct module *mod)
return ret;
list_add(&call->list, &ftrace_events);
- call->mod = mod;
+ if (call->flags & TRACE_EVENT_FL_DYNAMIC)
+ atomic_set(&call->refcnt, 0);
+ else
+ call->module = mod;
return 0;
}
@@ -2404,6 +2815,76 @@ static void update_event_printk(struct trace_event_call *call,
}
}
+static void add_str_to_module(struct module *module, char *str)
+{
+ struct module_string *modstr;
+
+ modstr = kmalloc(sizeof(*modstr), GFP_KERNEL);
+
+ /*
+ * If we failed to allocate memory here, then we'll just
+ * let the str memory leak when the module is removed.
+ * If this fails to allocate, there's worse problems than
+ * a leaked string on module removal.
+ */
+ if (WARN_ON_ONCE(!modstr))
+ return;
+
+ modstr->module = module;
+ modstr->str = str;
+
+ list_add(&modstr->next, &module_strings);
+}
+
+static void update_event_fields(struct trace_event_call *call,
+ struct trace_eval_map *map)
+{
+ struct ftrace_event_field *field;
+ struct list_head *head;
+ char *ptr;
+ char *str;
+ int len = strlen(map->eval_string);
+
+ /* Dynamic events should never have field maps */
+ if (WARN_ON_ONCE(call->flags & TRACE_EVENT_FL_DYNAMIC))
+ return;
+
+ head = trace_get_fields(call);
+ list_for_each_entry(field, head, link) {
+ ptr = strchr(field->type, '[');
+ if (!ptr)
+ continue;
+ ptr++;
+
+ if (!isalpha(*ptr) && *ptr != '_')
+ continue;
+
+ if (strncmp(map->eval_string, ptr, len) != 0)
+ continue;
+
+ str = kstrdup(field->type, GFP_KERNEL);
+ if (WARN_ON_ONCE(!str))
+ return;
+ ptr = str + (ptr - field->type);
+ ptr = eval_replace(ptr, map, len);
+ /* enum/sizeof string smaller than value */
+ if (WARN_ON_ONCE(!ptr)) {
+ kfree(str);
+ continue;
+ }
+
+ /*
+ * If the event is part of a module, then we need to free the string
+ * when the module is removed. Otherwise, it will stay allocated
+ * until a reboot.
+ */
+ if (call->module)
+ add_str_to_module(call->module, str);
+
+ field->type = str;
+ }
+}
+
void trace_event_eval_update(struct trace_eval_map **map, int len)
{
struct trace_event_call *call, *p;
@@ -2422,9 +2903,9 @@ void trace_event_eval_update(struct trace_eval_map **map, int len)
}
/*
- * Since calls are grouped by systems, the likelyhood that the
+ * Since calls are grouped by systems, the likelihood that the
* next call in the iteration belongs to the same system as the
- * previous call is high. As an optimization, we skip seaching
+ * previous call is high. As an optimization, we skip searching
* for a map[] that matches the call's system if the last call
* was from the same system. That's what last_i is for. If the
* call has the same system as the previous call, then last_i
@@ -2439,21 +2920,59 @@ void trace_event_eval_update(struct trace_eval_map **map, int len)
first = false;
}
update_event_printk(call, map[i]);
+ update_event_fields(call, map[i]);
}
}
+ cond_resched();
}
up_write(&trace_event_sem);
}
+static bool event_in_systems(struct trace_event_call *call,
+ const char *systems)
+{
+ const char *system;
+ const char *p;
+
+ if (!systems)
+ return true;
+
+ system = call->class->system;
+ p = strstr(systems, system);
+ if (!p)
+ return false;
+
+ if (p != systems && !isspace(*(p - 1)) && *(p - 1) != ',')
+ return false;
+
+ p += strlen(system);
+ return !*p || isspace(*p) || *p == ',';
+}
+
static struct trace_event_file *
trace_create_new_event(struct trace_event_call *call,
struct trace_array *tr)
{
+ struct trace_pid_list *no_pid_list;
+ struct trace_pid_list *pid_list;
struct trace_event_file *file;
+ unsigned int first;
+
+ if (!event_in_systems(call, tr->system_names))
+ return NULL;
file = kmem_cache_alloc(file_cachep, GFP_TRACE);
if (!file)
- return NULL;
+ return ERR_PTR(-ENOMEM);
+
+ pid_list = rcu_dereference_protected(tr->filtered_pids,
+ lockdep_is_held(&event_mutex));
+ no_pid_list = rcu_dereference_protected(tr->filtered_no_pids,
+ lockdep_is_held(&event_mutex));
+
+ if (!trace_pid_list_first(pid_list, &first) ||
+ !trace_pid_list_first(no_pid_list, &first))
+ file->flags |= EVENT_FILE_FL_PID_FILTER;
file->event_call = call;
file->tr = tr;
@@ -2461,10 +2980,47 @@ trace_create_new_event(struct trace_event_call *call,
atomic_set(&file->tm_ref, 0);
INIT_LIST_HEAD(&file->triggers);
list_add(&file->list, &tr->events);
+ event_file_get(file);
return file;
}
+#define MAX_BOOT_TRIGGERS 32
+
+static struct boot_triggers {
+ const char *event;
+ char *trigger;
+} bootup_triggers[MAX_BOOT_TRIGGERS];
+
+static char bootup_trigger_buf[COMMAND_LINE_SIZE];
+static int nr_boot_triggers;
+
+static __init int setup_trace_triggers(char *str)
+{
+ char *trigger;
+ char *buf;
+ int i;
+
+ strscpy(bootup_trigger_buf, str, COMMAND_LINE_SIZE);
+ trace_set_ring_buffer_expanded(NULL);
+ disable_tracing_selftest("running event triggers");
+
+ buf = bootup_trigger_buf;
+ for (i = 0; i < MAX_BOOT_TRIGGERS; i++) {
+ trigger = strsep(&buf, ",");
+ if (!trigger)
+ break;
+ bootup_triggers[i].event = strsep(&trigger, ".");
+ bootup_triggers[i].trigger = trigger;
+ if (!bootup_triggers[i].trigger)
+ break;
+ }
+
+ nr_boot_triggers = i;
+ return 1;
+}
+__setup("trace_trigger=", setup_trace_triggers);
+
/* Add an event to a trace directory */
static int
__trace_add_new_event(struct trace_event_call *call, struct trace_array *tr)
@@ -2472,26 +3028,72 @@ __trace_add_new_event(struct trace_event_call *call, struct trace_array *tr)
struct trace_event_file *file;
file = trace_create_new_event(call, tr);
+ /*
+ * trace_create_new_event() returns ERR_PTR(-ENOMEM) if failed
+ * allocation, or NULL if the event is not part of the tr->system_names.
+ * When the event is not part of the tr->system_names, return zero, not
+ * an error.
+ */
if (!file)
- return -ENOMEM;
+ return 0;
+
+ if (IS_ERR(file))
+ return PTR_ERR(file);
+
+ if (eventdir_initialized)
+ return event_create_dir(tr->event_dir, file);
+ else
+ return event_define_fields(call);
+}
+
+static void trace_early_triggers(struct trace_event_file *file, const char *name)
+{
+ int ret;
+ int i;
- return event_create_dir(tr->event_dir, file);
+ for (i = 0; i < nr_boot_triggers; i++) {
+ if (strcmp(name, bootup_triggers[i].event))
+ continue;
+ mutex_lock(&event_mutex);
+ ret = trigger_process_regex(file, bootup_triggers[i].trigger);
+ mutex_unlock(&event_mutex);
+ if (ret)
+ pr_err("Failed to register trigger '%s' on event %s\n",
+ bootup_triggers[i].trigger,
+ bootup_triggers[i].event);
+ }
}
/*
- * Just create a decriptor for early init. A descriptor is required
+ * Just create a descriptor for early init. A descriptor is required
* for enabling events at boot. We want to enable events before
* the filesystem is initialized.
*/
-static __init int
+static int
__trace_early_add_new_event(struct trace_event_call *call,
struct trace_array *tr)
{
struct trace_event_file *file;
+ int ret;
file = trace_create_new_event(call, tr);
+ /*
+ * trace_create_new_event() returns ERR_PTR(-ENOMEM) if failed
+ * allocation, or NULL if the event is not part of the tr->system_names.
+ * When the event is not part of the tr->system_names, return zero, not
+ * an error.
+ */
if (!file)
- return -ENOMEM;
+ return 0;
+
+ if (IS_ERR(file))
+ return PTR_ERR(file);
+
+ ret = event_define_fields(call);
+ if (ret)
+ return ret;
+
+ trace_early_triggers(file, trace_event_name(call));
return 0;
}
@@ -2514,6 +3116,7 @@ int trace_add_event_call(struct trace_event_call *call)
mutex_unlock(&trace_types_lock);
return ret;
}
+EXPORT_SYMBOL_GPL(trace_add_event_call);
/*
* Must be called under locking of trace_types_lock, event_mutex and
@@ -2545,7 +3148,10 @@ static int probe_remove_event_call(struct trace_event_call *call)
* TRACE_REG_UNREGISTER.
*/
if (file->flags & EVENT_FILE_FL_ENABLED)
- return -EBUSY;
+ goto busy;
+
+ if (file->flags & EVENT_FILE_FL_WAS_ENABLED)
+ tr->clear_trace = true;
/*
* The do_for_each_event_file_safe() is
* a double loop. After finding the call for this
@@ -2558,6 +3164,12 @@ static int probe_remove_event_call(struct trace_event_call *call)
__trace_remove_event_call(call);
return 0;
+ busy:
+ /* No need to clear the trace now */
+ list_for_each_entry(tr, &ftrace_trace_arrays, list) {
+ tr->clear_trace = false;
+ }
+ return -EBUSY;
}
/* Remove an event_call */
@@ -2575,6 +3187,7 @@ int trace_remove_event_call(struct trace_event_call *call)
return ret;
}
+EXPORT_SYMBOL_GPL(trace_remove_event_call);
#define for_each_event(event, start, end) \
for (event = start; \
@@ -2609,12 +3222,23 @@ static void trace_module_add_events(struct module *mod)
static void trace_module_remove_events(struct module *mod)
{
struct trace_event_call *call, *p;
+ struct module_string *modstr, *m;
down_write(&trace_event_sem);
list_for_each_entry_safe(call, p, &ftrace_events, list) {
- if (call->mod == mod)
+ if ((call->flags & TRACE_EVENT_FL_DYNAMIC) || !call->module)
+ continue;
+ if (call->module == mod)
__trace_remove_event_call(call);
}
+ /* Check for any strings allocade for this module */
+ list_for_each_entry_safe(modstr, m, &module_strings, next) {
+ if (modstr->module != mod)
+ continue;
+ list_del(&modstr->next);
+ kfree(modstr->str);
+ kfree(modstr);
+ }
up_write(&trace_event_sem);
/*
@@ -2625,7 +3249,7 @@ static void trace_module_remove_events(struct module *mod)
* over from this module may be passed to the new module events and
* unexpected results may occur.
*/
- tracing_reset_all_online_cpus();
+ tracing_reset_all_online_cpus_unlocked();
}
static int trace_module_notify(struct notifier_block *self,
@@ -2646,7 +3270,7 @@ static int trace_module_notify(struct notifier_block *self,
mutex_unlock(&trace_types_lock);
mutex_unlock(&event_mutex);
- return 0;
+ return NOTIFY_OK;
}
static struct notifier_block trace_module_nb = {
@@ -2755,7 +3379,7 @@ struct trace_event_file *trace_get_event_file(const char *instance,
}
/* Don't let event modules unload while in use */
- ret = try_module_get(file->event_call->mod);
+ ret = trace_event_try_get_ref(file->event_call);
if (!ret) {
trace_array_put(tr);
ret = -EBUSY;
@@ -2785,7 +3409,7 @@ EXPORT_SYMBOL_GPL(trace_get_event_file);
void trace_put_event_file(struct trace_event_file *file)
{
mutex_lock(&event_mutex);
- module_put(file->event_call->mod);
+ trace_event_put_ref(file->event_call);
mutex_unlock(&event_mutex);
trace_array_put(file->tr);
@@ -2920,7 +3544,7 @@ static int free_probe_data(void *data)
if (!edata->ref) {
/* Remove the SOFT_MODE flag */
__ftrace_event_enable_disable(edata->file, 0, 1);
- module_put(edata->file->event_call->mod);
+ trace_event_put_ref(edata->file->event_call);
kfree(edata);
}
return 0;
@@ -3053,7 +3677,7 @@ event_enable_func(struct trace_array *tr, struct ftrace_hash *hash,
out_reg:
/* Don't let event modules unload while probe registered */
- ret = try_module_get(file->event_call->mod);
+ ret = trace_event_try_get_ref(file->event_call);
if (!ret) {
ret = -EBUSY;
goto out_free;
@@ -3083,7 +3707,7 @@ event_enable_func(struct trace_array *tr, struct ftrace_hash *hash,
out_disable:
__ftrace_event_enable_disable(file, 0, 1);
out_put:
- module_put(file->event_call->mod);
+ trace_event_put_ref(file->event_call);
out_free:
kfree(data);
goto out;
@@ -3116,14 +3740,13 @@ static inline int register_event_cmds(void) { return 0; }
#endif /* CONFIG_DYNAMIC_FTRACE */
/*
- * The top level array has already had its trace_event_file
- * descriptors created in order to allow for early events to
- * be recorded. This function is called after the tracefs has been
- * initialized, and we now have to create the files associated
- * to the events.
+ * The top level array and trace arrays created by boot-time tracing
+ * have already had its trace_event_file descriptors created in order
+ * to allow for early events to be recorded.
+ * This function is called after the tracefs has been initialized,
+ * and we now have to create the files associated to the events.
*/
-static __init void
-__trace_early_add_event_dirs(struct trace_array *tr)
+static void __trace_early_add_event_dirs(struct trace_array *tr)
{
struct trace_event_file *file;
int ret;
@@ -3138,20 +3761,20 @@ __trace_early_add_event_dirs(struct trace_array *tr)
}
/*
- * For early boot up, the top trace array requires to have
- * a list of events that can be enabled. This must be done before
- * the filesystem is set up in order to allow events to be traced
- * early.
+ * For early boot up, the top trace array and the trace arrays created
+ * by boot-time tracing require to have a list of events that can be
+ * enabled. This must be done before the filesystem is set up in order
+ * to allow events to be traced early.
*/
-static __init void
-__trace_early_add_events(struct trace_array *tr)
+void __trace_early_add_events(struct trace_array *tr)
{
struct trace_event_call *call;
int ret;
list_for_each_entry(call, &ftrace_events, list) {
/* Early boot up should not have any modules loaded */
- if (WARN_ON_ONCE(call->mod))
+ if (!(call->flags & TRACE_EVENT_FL_DYNAMIC) &&
+ WARN_ON_ONCE(call->module))
continue;
ret = __trace_early_add_new_event(call, tr);
@@ -3186,67 +3809,82 @@ static char bootup_event_buf[COMMAND_LINE_SIZE] __initdata;
static __init int setup_trace_event(char *str)
{
- strlcpy(bootup_event_buf, str, COMMAND_LINE_SIZE);
- ring_buffer_expanded = true;
- tracing_selftest_disabled = true;
+ strscpy(bootup_event_buf, str, COMMAND_LINE_SIZE);
+ trace_set_ring_buffer_expanded(NULL);
+ disable_tracing_selftest("running event tracing");
return 1;
}
__setup("trace_event=", setup_trace_event);
+static int events_callback(const char *name, umode_t *mode, void **data,
+ const struct file_operations **fops)
+{
+ if (strcmp(name, "enable") == 0) {
+ *mode = TRACE_MODE_WRITE;
+ *fops = &ftrace_tr_enable_fops;
+ return 1;
+ }
+
+ if (strcmp(name, "header_page") == 0) {
+ *mode = TRACE_MODE_READ;
+ *fops = &ftrace_show_header_page_fops;
+
+ } else if (strcmp(name, "header_event") == 0) {
+ *mode = TRACE_MODE_READ;
+ *fops = &ftrace_show_header_event_fops;
+ } else
+ return 0;
+
+ return 1;
+}
+
/* Expects to have event_mutex held when called */
static int
create_event_toplevel_files(struct dentry *parent, struct trace_array *tr)
{
- struct dentry *d_events;
+ struct eventfs_inode *e_events;
struct dentry *entry;
-
- entry = tracefs_create_file("set_event", 0644, parent,
- tr, &ftrace_set_event_fops);
- if (!entry) {
- pr_warn("Could not create tracefs 'set_event' entry\n");
+ int nr_entries;
+ static struct eventfs_entry events_entries[] = {
+ {
+ .name = "enable",
+ .callback = events_callback,
+ },
+ {
+ .name = "header_page",
+ .callback = events_callback,
+ },
+ {
+ .name = "header_event",
+ .callback = events_callback,
+ },
+ };
+
+ entry = trace_create_file("set_event", TRACE_MODE_WRITE, parent,
+ tr, &ftrace_set_event_fops);
+ if (!entry)
return -ENOMEM;
- }
- d_events = tracefs_create_dir("events", parent);
- if (!d_events) {
- pr_warn("Could not create tracefs 'events' directory\n");
- return -ENOMEM;
- }
+ nr_entries = ARRAY_SIZE(events_entries);
- entry = trace_create_file("enable", 0644, d_events,
- tr, &ftrace_tr_enable_fops);
- if (!entry) {
- pr_warn("Could not create tracefs 'enable' entry\n");
+ e_events = eventfs_create_events_dir("events", parent, events_entries,
+ nr_entries, tr);
+ if (IS_ERR(e_events)) {
+ pr_warn("Could not create tracefs 'events' directory\n");
return -ENOMEM;
}
/* There are not as crucial, just warn if they are not created */
- entry = tracefs_create_file("set_event_pid", 0644, parent,
- tr, &ftrace_set_event_pid_fops);
- if (!entry)
- pr_warn("Could not create tracefs 'set_event_pid' entry\n");
-
- entry = tracefs_create_file("set_event_notrace_pid", 0644, parent,
- tr, &ftrace_set_event_notrace_pid_fops);
- if (!entry)
- pr_warn("Could not create tracefs 'set_event_notrace_pid' entry\n");
-
- /* ring buffer internal formats */
- entry = trace_create_file("header_page", 0444, d_events,
- ring_buffer_print_page_header,
- &ftrace_show_header_fops);
- if (!entry)
- pr_warn("Could not create tracefs 'header_page' entry\n");
+ trace_create_file("set_event_pid", TRACE_MODE_WRITE, parent,
+ tr, &ftrace_set_event_pid_fops);
- entry = trace_create_file("header_event", 0444, d_events,
- ring_buffer_print_entry_header,
- &ftrace_show_header_fops);
- if (!entry)
- pr_warn("Could not create tracefs 'header_event' entry\n");
+ trace_create_file("set_event_notrace_pid",
+ TRACE_MODE_WRITE, parent, tr,
+ &ftrace_set_event_notrace_pid_fops);
- tr->event_dir = d_events;
+ tr->event_dir = e_events;
return 0;
}
@@ -3258,7 +3896,7 @@ create_event_toplevel_files(struct dentry *parent, struct trace_array *tr)
*
* When a new instance is created, it needs to set up its events
* directory, as well as other files associated with events. It also
- * creates the event hierachry in the @parent/events directory.
+ * creates the event hierarchy in the @parent/events directory.
*
* Returns 0 on success.
*
@@ -3275,7 +3913,11 @@ int event_trace_add_tracer(struct dentry *parent, struct trace_array *tr)
goto out;
down_write(&trace_event_sem);
- __trace_add_event_dirs(tr);
+ /* If tr already has the event list, it is initialized in early boot. */
+ if (unlikely(!list_empty(&tr->events)))
+ __trace_early_add_event_dirs(tr);
+ else
+ __trace_add_event_dirs(tr);
up_write(&trace_event_sem);
out:
@@ -3326,7 +3968,7 @@ int event_trace_del_tracer(struct trace_array *tr)
down_write(&trace_event_sem);
__trace_remove_event_dirs(tr);
- tracefs_remove(tr->event_dir);
+ eventfs_remove_events_dir(tr->event_dir);
up_write(&trace_event_sem);
tr->event_dir = NULL;
@@ -3341,10 +3983,9 @@ static __init int event_trace_memsetup(void)
return 0;
}
-static __init void
-early_enable_events(struct trace_array *tr, bool disable_first)
+__init void
+early_enable_events(struct trace_array *tr, char *buf, bool disable_first)
{
- char *buf = bootup_event_buf;
char *token;
int ret;
@@ -3387,6 +4028,8 @@ static __init int event_trace_enable(void)
list_add(&call->list, &ftrace_events);
}
+ register_trigger_cmds();
+
/*
* We need the top trace array to have a working set of trace
* points at early init, before the debug files and directories
@@ -3395,13 +4038,12 @@ static __init int event_trace_enable(void)
*/
__trace_early_add_events(tr);
- early_enable_events(tr, false);
+ early_enable_events(tr, bootup_event_buf, false);
trace_printk_start_comm();
register_event_cmds();
- register_trigger_cmds();
return 0;
}
@@ -3411,10 +4053,10 @@ static __init int event_trace_enable(void)
* initialize events and perhaps start any events that are on the
* command line. Unfortunately, there are some events that will not
* start this early, like the system call tracepoints that need
- * to set the TIF_SYSCALL_TRACEPOINT flag of pid 1. But event_trace_enable()
- * is called before pid 1 starts, and this flag is never set, making
- * the syscall tracepoint never get reached, but the event is enabled
- * regardless (and not doing anything).
+ * to set the %SYSCALL_WORK_SYSCALL_TRACEPOINT flag of pid 1. But
+ * event_trace_enable() is called before pid 1 starts, and this flag
+ * is never set, making the syscall tracepoint never get reached, but
+ * the event is enabled regardless (and not doing anything).
*/
static __init int event_trace_enable_again(void)
{
@@ -3424,40 +4066,38 @@ static __init int event_trace_enable_again(void)
if (!tr)
return -ENODEV;
- early_enable_events(tr, true);
+ early_enable_events(tr, bootup_event_buf, true);
return 0;
}
early_initcall(event_trace_enable_again);
+/* Init fields which doesn't related to the tracefs */
+static __init int event_trace_init_fields(void)
+{
+ if (trace_define_generic_fields())
+ pr_warn("tracing: Failed to allocated generic fields");
+
+ if (trace_define_common_fields())
+ pr_warn("tracing: Failed to allocate common fields");
+
+ return 0;
+}
+
__init int event_trace_init(void)
{
struct trace_array *tr;
- struct dentry *d_tracer;
- struct dentry *entry;
int ret;
tr = top_trace_array();
if (!tr)
return -ENODEV;
- d_tracer = tracing_init_dentry();
- if (IS_ERR(d_tracer))
- return 0;
+ trace_create_file("available_events", TRACE_MODE_READ,
+ NULL, tr, &ftrace_avail_fops);
- entry = tracefs_create_file("available_events", 0444, d_tracer,
- tr, &ftrace_avail_fops);
- if (!entry)
- pr_warn("Could not create tracefs 'available_events' entry\n");
-
- if (trace_define_generic_fields())
- pr_warn("tracing: Failed to allocated generic fields");
-
- if (trace_define_common_fields())
- pr_warn("tracing: Failed to allocate common fields");
-
- ret = early_event_add_tracer(d_tracer, tr);
+ ret = early_event_add_tracer(NULL, tr);
if (ret)
return ret;
@@ -3466,6 +4106,9 @@ __init int event_trace_init(void)
if (ret)
pr_warn("Failed to register trace events module notifier\n");
#endif
+
+ eventdir_initialized = true;
+
return 0;
}
@@ -3474,6 +4117,7 @@ void __init trace_event_init(void)
event_trace_memsetup();
init_ftrace_syscalls();
event_trace_enable();
+ event_trace_init_fields();
}
#ifdef CONFIG_EVENT_TRACE_STARTUP_TEST
@@ -3651,17 +4295,16 @@ static struct trace_event_file event_trace_file __initdata;
static void __init
function_test_events_call(unsigned long ip, unsigned long parent_ip,
- struct ftrace_ops *op, struct pt_regs *pt_regs)
+ struct ftrace_ops *op, struct ftrace_regs *regs)
{
struct trace_buffer *buffer;
struct ring_buffer_event *event;
struct ftrace_entry *entry;
- unsigned long flags;
+ unsigned int trace_ctx;
long disabled;
int cpu;
- int pc;
- pc = preempt_count();
+ trace_ctx = tracing_gen_ctx();
preempt_disable_notrace();
cpu = raw_smp_processor_id();
disabled = atomic_inc_return(&per_cpu(ftrace_test_event_disable, cpu));
@@ -3669,11 +4312,9 @@ function_test_events_call(unsigned long ip, unsigned long parent_ip,
if (disabled != 1)
goto out;
- local_save_flags(flags);
-
event = trace_event_buffer_lock_reserve(&buffer, &event_trace_file,
TRACE_FN, sizeof(*entry),
- flags, pc);
+ trace_ctx);
if (!event)
goto out;
entry = ring_buffer_event_data(event);
@@ -3681,7 +4322,7 @@ function_test_events_call(unsigned long ip, unsigned long parent_ip,
entry->parent_ip = parent_ip;
event_trigger_unlock_commit(&event_trace_file, buffer, event,
- entry, flags, pc);
+ entry, trace_ctx);
out:
atomic_dec(&per_cpu(ftrace_test_event_disable, cpu));
preempt_enable_notrace();
@@ -3690,7 +4331,6 @@ function_test_events_call(unsigned long ip, unsigned long parent_ip,
static struct ftrace_ops trace_ops __initdata =
{
.func = function_test_events_call,
- .flags = FTRACE_OPS_FL_RECURSION_SAFE,
};
static __init void event_trace_self_test_with_function(void)
diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c
index bf44f6bbd0c3..0c611b281a5b 100644
--- a/kernel/trace/trace_events_filter.c
+++ b/kernel/trace/trace_events_filter.c
@@ -5,6 +5,7 @@
* Copyright (C) 2009 Tom Zanussi <tzanussi@gmail.com>
*/
+#include <linux/uaccess.h>
#include <linux/module.h>
#include <linux/ctype.h>
#include <linux/mutex.h>
@@ -42,6 +43,52 @@ enum filter_op_ids { OPS };
static const char * ops[] = { OPS };
+enum filter_pred_fn {
+ FILTER_PRED_FN_NOP,
+ FILTER_PRED_FN_64,
+ FILTER_PRED_FN_64_CPUMASK,
+ FILTER_PRED_FN_S64,
+ FILTER_PRED_FN_U64,
+ FILTER_PRED_FN_32,
+ FILTER_PRED_FN_32_CPUMASK,
+ FILTER_PRED_FN_S32,
+ FILTER_PRED_FN_U32,
+ FILTER_PRED_FN_16,
+ FILTER_PRED_FN_16_CPUMASK,
+ FILTER_PRED_FN_S16,
+ FILTER_PRED_FN_U16,
+ FILTER_PRED_FN_8,
+ FILTER_PRED_FN_8_CPUMASK,
+ FILTER_PRED_FN_S8,
+ FILTER_PRED_FN_U8,
+ FILTER_PRED_FN_COMM,
+ FILTER_PRED_FN_STRING,
+ FILTER_PRED_FN_STRLOC,
+ FILTER_PRED_FN_STRRELLOC,
+ FILTER_PRED_FN_PCHAR_USER,
+ FILTER_PRED_FN_PCHAR,
+ FILTER_PRED_FN_CPU,
+ FILTER_PRED_FN_CPU_CPUMASK,
+ FILTER_PRED_FN_CPUMASK,
+ FILTER_PRED_FN_CPUMASK_CPU,
+ FILTER_PRED_FN_FUNCTION,
+ FILTER_PRED_FN_,
+ FILTER_PRED_TEST_VISITED,
+};
+
+struct filter_pred {
+ struct regex *regex;
+ struct cpumask *mask;
+ unsigned short *ops;
+ struct ftrace_event_field *field;
+ u64 val;
+ u64 val2;
+ enum filter_pred_fn fn_num;
+ int offset;
+ int not;
+ int op;
+};
+
/*
* pred functions are OP_LE, OP_LT, OP_GE, OP_GT, and OP_BAND
* pred_funcs_##type below must match the order of them above.
@@ -55,6 +102,8 @@ static const char * ops[] = { OPS };
C(TOO_MANY_OPEN, "Too many '('"), \
C(TOO_MANY_CLOSE, "Too few '('"), \
C(MISSING_QUOTE, "Missing matching quote"), \
+ C(MISSING_BRACE_OPEN, "Missing '{'"), \
+ C(MISSING_BRACE_CLOSE, "Missing '}'"), \
C(OPERAND_TOO_LONG, "Operand too long"), \
C(EXPECT_STRING, "Expecting string field"), \
C(EXPECT_DIGIT, "Expecting numeric field"), \
@@ -64,8 +113,10 @@ static const char * ops[] = { OPS };
C(BAD_SUBSYS_FILTER, "Couldn't find or set field in one of a subsystem's events"), \
C(TOO_MANY_PREDS, "Too many terms in predicate expression"), \
C(INVALID_FILTER, "Meaningless filter expression"), \
+ C(INVALID_CPULIST, "Invalid cpulist"), \
C(IP_FIELD_ONLY, "Only 'ip' field is supported for function trace"), \
C(INVALID_VALUE, "Invalid value (did you forget quotes)?"), \
+ C(NO_FUNCTION, "Function not found"), \
C(ERRNO, "Error"), \
C(NO_FILTER, "No filter found")
@@ -91,7 +142,7 @@ static bool is_not(const char *str)
}
/**
- * prog_entry - a singe entry in the filter program
+ * struct prog_entry - a singe entry in the filter program
* @target: Index to jump to on a branch (actually one minus the index)
* @when_to_branch: The value of the result of the predicate to do a branch
* @pred: The predicate to execute.
@@ -103,16 +154,16 @@ struct prog_entry {
};
/**
- * update_preds- assign a program entry a label target
+ * update_preds - assign a program entry a label target
* @prog: The program array
* @N: The index of the current entry in @prog
- * @when_to_branch: What to assign a program entry for its branch condition
+ * @invert: What to assign a program entry for its branch condition
*
* The program entry at @N has a target that points to the index of a program
* entry that can have its target and when_to_branch fields updated.
* Update the current program entry denoted by index @N target field to be
* that of the updated entry. This will denote the entry to update if
- * we are processing an "||" after an "&&"
+ * we are processing an "||" after an "&&".
*/
static void update_preds(struct prog_entry *prog, int N, int invert)
{
@@ -146,6 +197,15 @@ enum {
PROCESS_OR = 4,
};
+static void free_predicate(struct filter_pred *pred)
+{
+ if (pred) {
+ kfree(pred->regex);
+ kfree(pred->mask);
+ kfree(pred);
+ }
+}
+
/*
* Without going into a formal proof, this explains the method that is used in
* parsing the logical expressions.
@@ -256,7 +316,7 @@ enum {
* is "&&" we don't call update_preds(). Instead continue to "c". As the
* next token after "c" is not "&&" but the end of input, we first process the
* "&&" by calling update_preds() for the "&&" then we process the "||" by
- * callin updates_preds() with the values for processing "||".
+ * calling updates_preds() with the values for processing "||".
*
* What does that mean? What update_preds() does is to first save the "target"
* of the program entry indexed by the current program entry's "target"
@@ -296,7 +356,7 @@ enum {
* and "FALSE" the program entry after that, we are now done with the first
* pass.
*
- * Making the above "a || b && c" have a progam of:
+ * Making the above "a || b && c" have a program of:
* prog[0] = { "a", 1, 2 }
* prog[1] = { "b", 0, 2 }
* prog[2] = { "c", 0, 3 }
@@ -390,7 +450,7 @@ enum {
* F: return FALSE
*
* As "r = a; if (!r) goto n5;" is obviously the same as
- * "if (!a) goto n5;" without doing anything we can interperate the
+ * "if (!a) goto n5;" without doing anything we can interpret the
* program as:
* n1: if (!a) goto n5;
* n2: if (!b) goto n5;
@@ -499,7 +559,7 @@ predicate_parse(const char *str, int nr_parens, int nr_preds,
ptr++;
break;
}
- /* fall through */
+ fallthrough;
default:
parse_error(pe, FILT_ERR_TOO_MANY_PREDS,
next - str);
@@ -583,50 +643,118 @@ out_free:
kfree(inverts);
if (prog_stack) {
for (i = 0; prog_stack[i].pred; i++)
- kfree(prog_stack[i].pred);
+ free_predicate(prog_stack[i].pred);
kfree(prog_stack);
}
return ERR_PTR(ret);
}
+static inline int
+do_filter_cpumask(int op, const struct cpumask *mask, const struct cpumask *cmp)
+{
+ switch (op) {
+ case OP_EQ:
+ return cpumask_equal(mask, cmp);
+ case OP_NE:
+ return !cpumask_equal(mask, cmp);
+ case OP_BAND:
+ return cpumask_intersects(mask, cmp);
+ default:
+ return 0;
+ }
+}
+
+/* Optimisation of do_filter_cpumask() for scalar fields */
+static inline int
+do_filter_scalar_cpumask(int op, unsigned int cpu, const struct cpumask *mask)
+{
+ /*
+ * Per the weight-of-one cpumask optimisations, the mask passed in this
+ * function has a weight >= 2, so it is never equal to a single scalar.
+ */
+ switch (op) {
+ case OP_EQ:
+ return false;
+ case OP_NE:
+ return true;
+ case OP_BAND:
+ return cpumask_test_cpu(cpu, mask);
+ default:
+ return 0;
+ }
+}
+
+static inline int
+do_filter_cpumask_scalar(int op, const struct cpumask *mask, unsigned int cpu)
+{
+ switch (op) {
+ case OP_EQ:
+ return cpumask_test_cpu(cpu, mask) &&
+ cpumask_nth(1, mask) >= nr_cpu_ids;
+ case OP_NE:
+ return !cpumask_test_cpu(cpu, mask) ||
+ cpumask_nth(1, mask) < nr_cpu_ids;
+ case OP_BAND:
+ return cpumask_test_cpu(cpu, mask);
+ default:
+ return 0;
+ }
+}
+
+enum pred_cmp_types {
+ PRED_CMP_TYPE_NOP,
+ PRED_CMP_TYPE_LT,
+ PRED_CMP_TYPE_LE,
+ PRED_CMP_TYPE_GT,
+ PRED_CMP_TYPE_GE,
+ PRED_CMP_TYPE_BAND,
+};
+
#define DEFINE_COMPARISON_PRED(type) \
-static int filter_pred_LT_##type(struct filter_pred *pred, void *event) \
-{ \
- type *addr = (type *)(event + pred->offset); \
- type val = (type)pred->val; \
- return *addr < val; \
-} \
-static int filter_pred_LE_##type(struct filter_pred *pred, void *event) \
-{ \
- type *addr = (type *)(event + pred->offset); \
- type val = (type)pred->val; \
- return *addr <= val; \
-} \
-static int filter_pred_GT_##type(struct filter_pred *pred, void *event) \
-{ \
- type *addr = (type *)(event + pred->offset); \
- type val = (type)pred->val; \
- return *addr > val; \
-} \
-static int filter_pred_GE_##type(struct filter_pred *pred, void *event) \
+static int filter_pred_##type(struct filter_pred *pred, void *event) \
{ \
- type *addr = (type *)(event + pred->offset); \
- type val = (type)pred->val; \
- return *addr >= val; \
-} \
-static int filter_pred_BAND_##type(struct filter_pred *pred, void *event) \
-{ \
- type *addr = (type *)(event + pred->offset); \
- type val = (type)pred->val; \
- return !!(*addr & val); \
-} \
-static const filter_pred_fn_t pred_funcs_##type[] = { \
- filter_pred_LE_##type, \
- filter_pred_LT_##type, \
- filter_pred_GE_##type, \
- filter_pred_GT_##type, \
- filter_pred_BAND_##type, \
-};
+ switch (pred->op) { \
+ case OP_LT: { \
+ type *addr = (type *)(event + pred->offset); \
+ type val = (type)pred->val; \
+ return *addr < val; \
+ } \
+ case OP_LE: { \
+ type *addr = (type *)(event + pred->offset); \
+ type val = (type)pred->val; \
+ return *addr <= val; \
+ } \
+ case OP_GT: { \
+ type *addr = (type *)(event + pred->offset); \
+ type val = (type)pred->val; \
+ return *addr > val; \
+ } \
+ case OP_GE: { \
+ type *addr = (type *)(event + pred->offset); \
+ type val = (type)pred->val; \
+ return *addr >= val; \
+ } \
+ case OP_BAND: { \
+ type *addr = (type *)(event + pred->offset); \
+ type val = (type)pred->val; \
+ return !!(*addr & val); \
+ } \
+ default: \
+ return 0; \
+ } \
+}
+
+#define DEFINE_CPUMASK_COMPARISON_PRED(size) \
+static int filter_pred_##size##_cpumask(struct filter_pred *pred, void *event) \
+{ \
+ u##size *addr = (u##size *)(event + pred->offset); \
+ unsigned int cpu = *addr; \
+ \
+ if (cpu >= nr_cpu_ids) \
+ return 0; \
+ \
+ return do_filter_scalar_cpumask(pred->op, cpu, pred->mask); \
+}
#define DEFINE_EQUALITY_PRED(size) \
static int filter_pred_##size(struct filter_pred *pred, void *event) \
@@ -649,37 +777,112 @@ DEFINE_COMPARISON_PRED(u16);
DEFINE_COMPARISON_PRED(s8);
DEFINE_COMPARISON_PRED(u8);
+DEFINE_CPUMASK_COMPARISON_PRED(64);
+DEFINE_CPUMASK_COMPARISON_PRED(32);
+DEFINE_CPUMASK_COMPARISON_PRED(16);
+DEFINE_CPUMASK_COMPARISON_PRED(8);
+
DEFINE_EQUALITY_PRED(64);
DEFINE_EQUALITY_PRED(32);
DEFINE_EQUALITY_PRED(16);
DEFINE_EQUALITY_PRED(8);
+/* user space strings temp buffer */
+#define USTRING_BUF_SIZE 1024
+
+struct ustring_buffer {
+ char buffer[USTRING_BUF_SIZE];
+};
+
+static __percpu struct ustring_buffer *ustring_per_cpu;
+
+static __always_inline char *test_string(char *str)
+{
+ struct ustring_buffer *ubuf;
+ char *kstr;
+
+ if (!ustring_per_cpu)
+ return NULL;
+
+ ubuf = this_cpu_ptr(ustring_per_cpu);
+ kstr = ubuf->buffer;
+
+ /* For safety, do not trust the string pointer */
+ if (!strncpy_from_kernel_nofault(kstr, str, USTRING_BUF_SIZE))
+ return NULL;
+ return kstr;
+}
+
+static __always_inline char *test_ustring(char *str)
+{
+ struct ustring_buffer *ubuf;
+ char __user *ustr;
+ char *kstr;
+
+ if (!ustring_per_cpu)
+ return NULL;
+
+ ubuf = this_cpu_ptr(ustring_per_cpu);
+ kstr = ubuf->buffer;
+
+ /* user space address? */
+ ustr = (char __user *)str;
+ if (!strncpy_from_user_nofault(kstr, ustr, USTRING_BUF_SIZE))
+ return NULL;
+
+ return kstr;
+}
+
/* Filter predicate for fixed sized arrays of characters */
static int filter_pred_string(struct filter_pred *pred, void *event)
{
char *addr = (char *)(event + pred->offset);
int cmp, match;
- cmp = pred->regex.match(addr, &pred->regex, pred->regex.field_len);
+ cmp = pred->regex->match(addr, pred->regex, pred->regex->field_len);
match = cmp ^ pred->not;
return match;
}
-/* Filter predicate for char * pointers */
-static int filter_pred_pchar(struct filter_pred *pred, void *event)
+static __always_inline int filter_pchar(struct filter_pred *pred, char *str)
{
- char **addr = (char **)(event + pred->offset);
int cmp, match;
- int len = strlen(*addr) + 1; /* including tailing '\0' */
+ int len;
- cmp = pred->regex.match(*addr, &pred->regex, len);
+ len = strlen(str) + 1; /* including tailing '\0' */
+ cmp = pred->regex->match(str, pred->regex, len);
match = cmp ^ pred->not;
return match;
}
+/* Filter predicate for char * pointers */
+static int filter_pred_pchar(struct filter_pred *pred, void *event)
+{
+ char **addr = (char **)(event + pred->offset);
+ char *str;
+
+ str = test_string(*addr);
+ if (!str)
+ return 0;
+
+ return filter_pchar(pred, str);
+}
+
+/* Filter predicate for char * pointers in user space*/
+static int filter_pred_pchar_user(struct filter_pred *pred, void *event)
+{
+ char **addr = (char **)(event + pred->offset);
+ char *str;
+
+ str = test_ustring(*addr);
+ if (!str)
+ return 0;
+
+ return filter_pchar(pred, str);
+}
/*
* Filter predicate for dynamic sized arrays of characters.
@@ -699,7 +902,30 @@ static int filter_pred_strloc(struct filter_pred *pred, void *event)
char *addr = (char *)(event + str_loc);
int cmp, match;
- cmp = pred->regex.match(addr, &pred->regex, str_len);
+ cmp = pred->regex->match(addr, pred->regex, str_len);
+
+ match = cmp ^ pred->not;
+
+ return match;
+}
+
+/*
+ * Filter predicate for relative dynamic sized arrays of characters.
+ * These are implemented through a list of strings at the end
+ * of the entry as same as dynamic string.
+ * The difference is that the relative one records the location offset
+ * from the field itself, not the event entry.
+ */
+static int filter_pred_strrelloc(struct filter_pred *pred, void *event)
+{
+ u32 *item = (u32 *)(event + pred->offset);
+ u32 str_item = *item;
+ int str_loc = str_item & 0xffff;
+ int str_len = str_item >> 16;
+ char *addr = (char *)(&item[1]) + str_loc;
+ int cmp, match;
+
+ cmp = pred->regex->match(addr, pred->regex, str_len);
match = cmp ^ pred->not;
@@ -732,19 +958,55 @@ static int filter_pred_cpu(struct filter_pred *pred, void *event)
}
}
+/* Filter predicate for current CPU vs user-provided cpumask */
+static int filter_pred_cpu_cpumask(struct filter_pred *pred, void *event)
+{
+ int cpu = raw_smp_processor_id();
+
+ return do_filter_scalar_cpumask(pred->op, cpu, pred->mask);
+}
+
+/* Filter predicate for cpumask field vs user-provided cpumask */
+static int filter_pred_cpumask(struct filter_pred *pred, void *event)
+{
+ u32 item = *(u32 *)(event + pred->offset);
+ int loc = item & 0xffff;
+ const struct cpumask *mask = (event + loc);
+ const struct cpumask *cmp = pred->mask;
+
+ return do_filter_cpumask(pred->op, mask, cmp);
+}
+
+/* Filter predicate for cpumask field vs user-provided scalar */
+static int filter_pred_cpumask_cpu(struct filter_pred *pred, void *event)
+{
+ u32 item = *(u32 *)(event + pred->offset);
+ int loc = item & 0xffff;
+ const struct cpumask *mask = (event + loc);
+ unsigned int cpu = pred->val;
+
+ return do_filter_cpumask_scalar(pred->op, mask, cpu);
+}
+
/* Filter predicate for COMM. */
static int filter_pred_comm(struct filter_pred *pred, void *event)
{
int cmp;
- cmp = pred->regex.match(current->comm, &pred->regex,
+ cmp = pred->regex->match(current->comm, pred->regex,
TASK_COMM_LEN);
return cmp ^ pred->not;
}
-static int filter_pred_none(struct filter_pred *pred, void *event)
+/* Filter predicate for functions. */
+static int filter_pred_function(struct filter_pred *pred, void *event)
{
- return 0;
+ unsigned long *addr = (unsigned long *)(event + pred->offset);
+ unsigned long start = (unsigned long)pred->val;
+ unsigned long end = (unsigned long)pred->val2;
+ int ret = *addr >= start && *addr < end;
+
+ return pred->op == OP_EQ ? ret : !ret;
}
/*
@@ -756,7 +1018,7 @@ static int filter_pred_none(struct filter_pred *pred, void *event)
*
* Note:
* - @str might not be NULL-terminated if it's of type DYN_STRING
- * or STATIC_STRING, unless @len is zero.
+ * RDYN_STRING, or STATIC_STRING, unless @len is zero.
*/
static int regex_match_full(char *str, struct regex *r, int len)
@@ -861,7 +1123,7 @@ enum regex_type filter_parse_regex(char *buff, int len, char **search, int *not)
static void filter_build_regex(struct filter_pred *pred)
{
- struct regex *r = &pred->regex;
+ struct regex *r = pred->regex;
char *search;
enum regex_type type = MATCH_FULL;
@@ -892,6 +1154,19 @@ static void filter_build_regex(struct filter_pred *pred)
}
}
+
+#ifdef CONFIG_FTRACE_STARTUP_TEST
+static int test_pred_visited_fn(struct filter_pred *pred, void *event);
+#else
+static int test_pred_visited_fn(struct filter_pred *pred, void *event)
+{
+ return 0;
+}
+#endif
+
+
+static int filter_pred_fn_call(struct filter_pred *pred, void *event);
+
/* return 1 if event matches, 0 otherwise (discard) */
int filter_match_preds(struct event_filter *filter, void *rec)
{
@@ -909,7 +1184,7 @@ int filter_match_preds(struct event_filter *filter, void *rec)
for (i = 0; prog[i].pred; i++) {
struct filter_pred *pred = prog[i].pred;
- int match = pred->fn(pred, rec);
+ int match = filter_pred_fn_call(pred, rec);
if (match == prog[i].when_to_branch)
i = prog[i].target;
}
@@ -1013,7 +1288,7 @@ static void free_prog(struct event_filter *filter)
return;
for (i = 0; prog[i].pred; i++)
- kfree(prog[i].pred);
+ free_predicate(prog[i].pred);
kfree(prog);
}
@@ -1080,8 +1355,15 @@ static void filter_free_subsystem_filters(struct trace_subsystem_dir *dir,
int filter_assign_type(const char *type)
{
- if (strstr(type, "__data_loc") && strstr(type, "char"))
- return FILTER_DYN_STRING;
+ if (strstr(type, "__data_loc")) {
+ if (strstr(type, "char"))
+ return FILTER_DYN_STRING;
+ if (strstr(type, "cpumask_t"))
+ return FILTER_CPUMASK;
+ }
+
+ if (strstr(type, "__rel_loc") && strstr(type, "char"))
+ return FILTER_RDYN_STRING;
if (strchr(type, '[') && strstr(type, "char"))
return FILTER_STATIC_STRING;
@@ -1092,10 +1374,10 @@ int filter_assign_type(const char *type)
return FILTER_OTHER;
}
-static filter_pred_fn_t select_comparison_fn(enum filter_op_ids op,
- int field_size, int field_is_signed)
+static enum filter_pred_fn select_comparison_fn(enum filter_op_ids op,
+ int field_size, int field_is_signed)
{
- filter_pred_fn_t fn = NULL;
+ enum filter_pred_fn fn = FILTER_PRED_FN_NOP;
int pred_func_index = -1;
switch (op) {
@@ -1104,50 +1386,115 @@ static filter_pred_fn_t select_comparison_fn(enum filter_op_ids op,
break;
default:
if (WARN_ON_ONCE(op < PRED_FUNC_START))
- return NULL;
+ return fn;
pred_func_index = op - PRED_FUNC_START;
if (WARN_ON_ONCE(pred_func_index > PRED_FUNC_MAX))
- return NULL;
+ return fn;
}
switch (field_size) {
case 8:
if (pred_func_index < 0)
- fn = filter_pred_64;
+ fn = FILTER_PRED_FN_64;
else if (field_is_signed)
- fn = pred_funcs_s64[pred_func_index];
+ fn = FILTER_PRED_FN_S64;
else
- fn = pred_funcs_u64[pred_func_index];
+ fn = FILTER_PRED_FN_U64;
break;
case 4:
if (pred_func_index < 0)
- fn = filter_pred_32;
+ fn = FILTER_PRED_FN_32;
else if (field_is_signed)
- fn = pred_funcs_s32[pred_func_index];
+ fn = FILTER_PRED_FN_S32;
else
- fn = pred_funcs_u32[pred_func_index];
+ fn = FILTER_PRED_FN_U32;
break;
case 2:
if (pred_func_index < 0)
- fn = filter_pred_16;
+ fn = FILTER_PRED_FN_16;
else if (field_is_signed)
- fn = pred_funcs_s16[pred_func_index];
+ fn = FILTER_PRED_FN_S16;
else
- fn = pred_funcs_u16[pred_func_index];
+ fn = FILTER_PRED_FN_U16;
break;
case 1:
if (pred_func_index < 0)
- fn = filter_pred_8;
+ fn = FILTER_PRED_FN_8;
else if (field_is_signed)
- fn = pred_funcs_s8[pred_func_index];
+ fn = FILTER_PRED_FN_S8;
else
- fn = pred_funcs_u8[pred_func_index];
+ fn = FILTER_PRED_FN_U8;
break;
}
return fn;
}
+
+static int filter_pred_fn_call(struct filter_pred *pred, void *event)
+{
+ switch (pred->fn_num) {
+ case FILTER_PRED_FN_64:
+ return filter_pred_64(pred, event);
+ case FILTER_PRED_FN_64_CPUMASK:
+ return filter_pred_64_cpumask(pred, event);
+ case FILTER_PRED_FN_S64:
+ return filter_pred_s64(pred, event);
+ case FILTER_PRED_FN_U64:
+ return filter_pred_u64(pred, event);
+ case FILTER_PRED_FN_32:
+ return filter_pred_32(pred, event);
+ case FILTER_PRED_FN_32_CPUMASK:
+ return filter_pred_32_cpumask(pred, event);
+ case FILTER_PRED_FN_S32:
+ return filter_pred_s32(pred, event);
+ case FILTER_PRED_FN_U32:
+ return filter_pred_u32(pred, event);
+ case FILTER_PRED_FN_16:
+ return filter_pred_16(pred, event);
+ case FILTER_PRED_FN_16_CPUMASK:
+ return filter_pred_16_cpumask(pred, event);
+ case FILTER_PRED_FN_S16:
+ return filter_pred_s16(pred, event);
+ case FILTER_PRED_FN_U16:
+ return filter_pred_u16(pred, event);
+ case FILTER_PRED_FN_8:
+ return filter_pred_8(pred, event);
+ case FILTER_PRED_FN_8_CPUMASK:
+ return filter_pred_8_cpumask(pred, event);
+ case FILTER_PRED_FN_S8:
+ return filter_pred_s8(pred, event);
+ case FILTER_PRED_FN_U8:
+ return filter_pred_u8(pred, event);
+ case FILTER_PRED_FN_COMM:
+ return filter_pred_comm(pred, event);
+ case FILTER_PRED_FN_STRING:
+ return filter_pred_string(pred, event);
+ case FILTER_PRED_FN_STRLOC:
+ return filter_pred_strloc(pred, event);
+ case FILTER_PRED_FN_STRRELLOC:
+ return filter_pred_strrelloc(pred, event);
+ case FILTER_PRED_FN_PCHAR_USER:
+ return filter_pred_pchar_user(pred, event);
+ case FILTER_PRED_FN_PCHAR:
+ return filter_pred_pchar(pred, event);
+ case FILTER_PRED_FN_CPU:
+ return filter_pred_cpu(pred, event);
+ case FILTER_PRED_FN_CPU_CPUMASK:
+ return filter_pred_cpu_cpumask(pred, event);
+ case FILTER_PRED_FN_CPUMASK:
+ return filter_pred_cpumask(pred, event);
+ case FILTER_PRED_FN_CPUMASK_CPU:
+ return filter_pred_cpumask_cpu(pred, event);
+ case FILTER_PRED_FN_FUNCTION:
+ return filter_pred_function(pred, event);
+ case FILTER_PRED_TEST_VISITED:
+ return test_pred_visited_fn(pred, event);
+ default:
+ return 0;
+ }
+}
+
/* Called when a predicate is encountered by predicate_parse() */
static int parse_pred(const char *str, void *data,
int pos, struct filter_parse_error *pe,
@@ -1156,8 +1503,14 @@ static int parse_pred(const char *str, void *data,
struct trace_event_call *call = data;
struct ftrace_event_field *field;
struct filter_pred *pred = NULL;
+ unsigned long offset;
+ unsigned long size;
+ unsigned long ip;
char num_buf[24]; /* Big enough to hold an address */
char *field_name;
+ char *name;
+ bool function = false;
+ bool ustring = false;
char q;
u64 val;
int len;
@@ -1192,6 +1545,18 @@ static int parse_pred(const char *str, void *data,
return -EINVAL;
}
+ /* See if the field is a user space string */
+ if ((len = str_has_prefix(str + i, ".ustring"))) {
+ ustring = true;
+ i += len;
+ }
+
+ /* See if the field is a kernel function name */
+ if ((len = str_has_prefix(str + i, ".function"))) {
+ function = true;
+ i += len;
+ }
+
while (isspace(str[i]))
i++;
@@ -1222,7 +1587,71 @@ static int parse_pred(const char *str, void *data,
pred->offset = field->offset;
pred->op = op;
- if (ftrace_event_is_function(call)) {
+ if (function) {
+ /* The field must be the same size as long */
+ if (field->size != sizeof(long)) {
+ parse_error(pe, FILT_ERR_ILLEGAL_FIELD_OP, pos + i);
+ goto err_free;
+ }
+
+ /* Function only works with '==' or '!=' and an unquoted string */
+ switch (op) {
+ case OP_NE:
+ case OP_EQ:
+ break;
+ default:
+ parse_error(pe, FILT_ERR_INVALID_OP, pos + i);
+ goto err_free;
+ }
+
+ if (isdigit(str[i])) {
+ /* We allow 0xDEADBEEF */
+ while (isalnum(str[i]))
+ i++;
+
+ len = i - s;
+ /* 0xfeedfacedeadbeef is 18 chars max */
+ if (len >= sizeof(num_buf)) {
+ parse_error(pe, FILT_ERR_OPERAND_TOO_LONG, pos + i);
+ goto err_free;
+ }
+
+ strncpy(num_buf, str + s, len);
+ num_buf[len] = 0;
+
+ ret = kstrtoul(num_buf, 0, &ip);
+ if (ret) {
+ parse_error(pe, FILT_ERR_INVALID_VALUE, pos + i);
+ goto err_free;
+ }
+ } else {
+ s = i;
+ for (; str[i] && !isspace(str[i]); i++)
+ ;
+
+ len = i - s;
+ name = kmemdup_nul(str + s, len, GFP_KERNEL);
+ if (!name)
+ goto err_mem;
+ ip = kallsyms_lookup_name(name);
+ kfree(name);
+ if (!ip) {
+ parse_error(pe, FILT_ERR_NO_FUNCTION, pos + i);
+ goto err_free;
+ }
+ }
+
+ /* Now find the function start and end address */
+ if (!kallsyms_lookup_size_offset(ip, &size, &offset)) {
+ parse_error(pe, FILT_ERR_NO_FUNCTION, pos + i);
+ goto err_free;
+ }
+
+ pred->fn_num = FILTER_PRED_FN_FUNCTION;
+ pred->val = ip - offset;
+ pred->val2 = pred->val + size;
+
+ } else if (ftrace_event_is_function(call)) {
/*
* Perf does things different with function events.
* It only allows an "ip" field, and expects a string.
@@ -1234,7 +1663,7 @@ static int parse_pred(const char *str, void *data,
parse_error(pe, FILT_ERR_IP_FIELD_ONLY, pos + i);
goto err_free;
}
- pred->fn = filter_pred_none;
+ pred->fn_num = FILTER_PRED_FN_NOP;
/*
* Quotes are not required, but if they exist then we need
@@ -1261,9 +1690,130 @@ static int parse_pred(const char *str, void *data,
goto err_free;
}
- pred->regex.len = len;
- strncpy(pred->regex.pattern, str + s, len);
- pred->regex.pattern[len] = 0;
+ pred->regex = kzalloc(sizeof(*pred->regex), GFP_KERNEL);
+ if (!pred->regex)
+ goto err_mem;
+ pred->regex->len = len;
+ strncpy(pred->regex->pattern, str + s, len);
+ pred->regex->pattern[len] = 0;
+
+ } else if (!strncmp(str + i, "CPUS", 4)) {
+ unsigned int maskstart;
+ bool single;
+ char *tmp;
+
+ switch (field->filter_type) {
+ case FILTER_CPUMASK:
+ case FILTER_CPU:
+ case FILTER_OTHER:
+ break;
+ default:
+ parse_error(pe, FILT_ERR_ILLEGAL_FIELD_OP, pos + i);
+ goto err_free;
+ }
+
+ switch (op) {
+ case OP_EQ:
+ case OP_NE:
+ case OP_BAND:
+ break;
+ default:
+ parse_error(pe, FILT_ERR_ILLEGAL_FIELD_OP, pos + i);
+ goto err_free;
+ }
+
+ /* Skip CPUS */
+ i += 4;
+ if (str[i++] != '{') {
+ parse_error(pe, FILT_ERR_MISSING_BRACE_OPEN, pos + i);
+ goto err_free;
+ }
+ maskstart = i;
+
+ /* Walk the cpulist until closing } */
+ for (; str[i] && str[i] != '}'; i++)
+ ;
+
+ if (str[i] != '}') {
+ parse_error(pe, FILT_ERR_MISSING_BRACE_CLOSE, pos + i);
+ goto err_free;
+ }
+
+ if (maskstart == i) {
+ parse_error(pe, FILT_ERR_INVALID_CPULIST, pos + i);
+ goto err_free;
+ }
+
+ /* Copy the cpulist between { and } */
+ tmp = kmalloc((i - maskstart) + 1, GFP_KERNEL);
+ if (!tmp)
+ goto err_mem;
+
+ strscpy(tmp, str + maskstart, (i - maskstart) + 1);
+ pred->mask = kzalloc(cpumask_size(), GFP_KERNEL);
+ if (!pred->mask) {
+ kfree(tmp);
+ goto err_mem;
+ }
+
+ /* Now parse it */
+ if (cpulist_parse(tmp, pred->mask)) {
+ kfree(tmp);
+ parse_error(pe, FILT_ERR_INVALID_CPULIST, pos + i);
+ goto err_free;
+ }
+ kfree(tmp);
+
+ /* Move along */
+ i++;
+
+ /*
+ * Optimisation: if the user-provided mask has a weight of one
+ * then we can treat it as a scalar input.
+ */
+ single = cpumask_weight(pred->mask) == 1;
+ if (single) {
+ pred->val = cpumask_first(pred->mask);
+ kfree(pred->mask);
+ pred->mask = NULL;
+ }
+
+ if (field->filter_type == FILTER_CPUMASK) {
+ pred->fn_num = single ?
+ FILTER_PRED_FN_CPUMASK_CPU :
+ FILTER_PRED_FN_CPUMASK;
+ } else if (field->filter_type == FILTER_CPU) {
+ if (single) {
+ if (pred->op == OP_BAND)
+ pred->op = OP_EQ;
+
+ pred->fn_num = FILTER_PRED_FN_CPU;
+ } else {
+ pred->fn_num = FILTER_PRED_FN_CPU_CPUMASK;
+ }
+ } else if (single) {
+ if (pred->op == OP_BAND)
+ pred->op = OP_EQ;
+
+ pred->fn_num = select_comparison_fn(pred->op, field->size, false);
+ if (pred->op == OP_NE)
+ pred->not = 1;
+ } else {
+ switch (field->size) {
+ case 8:
+ pred->fn_num = FILTER_PRED_FN_64_CPUMASK;
+ break;
+ case 4:
+ pred->fn_num = FILTER_PRED_FN_32_CPUMASK;
+ break;
+ case 2:
+ pred->fn_num = FILTER_PRED_FN_16_CPUMASK;
+ break;
+ case 1:
+ pred->fn_num = FILTER_PRED_FN_8_CPUMASK;
+ break;
+ }
+ }
/* This is either a string, or an integer */
} else if (str[i] == '\'' || str[i] == '"') {
@@ -1273,7 +1823,7 @@ static int parse_pred(const char *str, void *data,
switch (op) {
case OP_NE:
pred->not = 1;
- /* Fall through */
+ fallthrough;
case OP_GLOB:
case OP_EQ:
break;
@@ -1305,23 +1855,40 @@ static int parse_pred(const char *str, void *data,
goto err_free;
}
- pred->regex.len = len;
- strncpy(pred->regex.pattern, str + s, len);
- pred->regex.pattern[len] = 0;
+ pred->regex = kzalloc(sizeof(*pred->regex), GFP_KERNEL);
+ if (!pred->regex)
+ goto err_mem;
+ pred->regex->len = len;
+ strncpy(pred->regex->pattern, str + s, len);
+ pred->regex->pattern[len] = 0;
filter_build_regex(pred);
if (field->filter_type == FILTER_COMM) {
- pred->fn = filter_pred_comm;
+ pred->fn_num = FILTER_PRED_FN_COMM;
} else if (field->filter_type == FILTER_STATIC_STRING) {
- pred->fn = filter_pred_string;
- pred->regex.field_len = field->size;
+ pred->fn_num = FILTER_PRED_FN_STRING;
+ pred->regex->field_len = field->size;
- } else if (field->filter_type == FILTER_DYN_STRING)
- pred->fn = filter_pred_strloc;
- else
- pred->fn = filter_pred_pchar;
+ } else if (field->filter_type == FILTER_DYN_STRING) {
+ pred->fn_num = FILTER_PRED_FN_STRLOC;
+ } else if (field->filter_type == FILTER_RDYN_STRING)
+ pred->fn_num = FILTER_PRED_FN_STRRELLOC;
+ else {
+
+ if (!ustring_per_cpu) {
+ /* Once allocated, keep it around for good */
+ ustring_per_cpu = alloc_percpu(struct ustring_buffer);
+ if (!ustring_per_cpu)
+ goto err_mem;
+ }
+
+ if (ustring)
+ pred->fn_num = FILTER_PRED_FN_PCHAR_USER;
+ else
+ pred->fn_num = FILTER_PRED_FN_PCHAR;
+ }
/* go past the last quote */
i++;
@@ -1368,10 +1935,10 @@ static int parse_pred(const char *str, void *data,
pred->val = val;
if (field->filter_type == FILTER_CPU)
- pred->fn = filter_pred_cpu;
+ pred->fn_num = FILTER_PRED_FN_CPU;
else {
- pred->fn = select_comparison_fn(pred->op, field->size,
- field->is_signed);
+ pred->fn_num = select_comparison_fn(pred->op, field->size,
+ field->is_signed);
if (pred->op == OP_NE)
pred->not = 1;
}
@@ -1385,8 +1952,11 @@ static int parse_pred(const char *str, void *data,
return i;
err_free:
- kfree(pred);
+ free_predicate(pred);
return -EINVAL;
+err_mem:
+ free_predicate(pred);
+ return -ENOMEM;
}
enum {
@@ -1561,27 +2131,6 @@ static inline void event_clear_filter(struct trace_event_file *file)
RCU_INIT_POINTER(file->filter, NULL);
}
-static inline void
-event_set_no_set_filter_flag(struct trace_event_file *file)
-{
- file->flags |= EVENT_FILE_FL_NO_SET_FILTER;
-}
-
-static inline void
-event_clear_no_set_filter_flag(struct trace_event_file *file)
-{
- file->flags &= ~EVENT_FILE_FL_NO_SET_FILTER;
-}
-
-static inline bool
-event_no_set_filter_flag(struct trace_event_file *file)
-{
- if (file->flags & EVENT_FILE_FL_NO_SET_FILTER)
- return true;
-
- return false;
-}
-
struct filter_list {
struct list_head list;
struct event_filter *filter;
@@ -1714,8 +2263,9 @@ static void create_filter_finish(struct filter_parse_error *pe)
/**
* create_filter - create a filter for a trace_event_call
+ * @tr: the trace array associated with these events
* @call: trace_event_call to create a filter for
- * @filter_str: filter string
+ * @filter_string: filter string
* @set_str: remember @filter_str and enable detailed error in filter
* @filterp: out param for created filter (always updated on return)
* Must be a pointer that references a NULL pointer.
@@ -1762,8 +2312,8 @@ int create_event_filter(struct trace_array *tr,
}
/**
- * create_system_filter - create a filter for an event_subsystem
- * @system: event_subsystem to create a filter for
+ * create_system_filter - create a filter for an event subsystem
+ * @dir: the descriptor for the subsystem directory
* @filter_str: filter string
* @filterp: out param for created filter (always updated on return)
*
@@ -1771,7 +2321,6 @@ int create_event_filter(struct trace_array *tr,
* and always remembers @filter_str.
*/
static int create_system_filter(struct trace_subsystem_dir *dir,
- struct trace_array *tr,
char *filter_str, struct event_filter **filterp)
{
struct filter_parse_error *pe = NULL;
@@ -1779,13 +2328,13 @@ static int create_system_filter(struct trace_subsystem_dir *dir,
err = create_filter_start(filter_str, true, &pe, filterp);
if (!err) {
- err = process_system_preds(dir, tr, pe, filter_str);
+ err = process_system_preds(dir, dir->tr, pe, filter_str);
if (!err) {
/* System filters just show a default message */
kfree((*filterp)->filter_string);
(*filterp)->filter_string = NULL;
} else {
- append_filter_err(tr, pe, *filterp);
+ append_filter_err(dir->tr, pe, *filterp);
}
}
create_filter_finish(pe);
@@ -1800,6 +2349,9 @@ int apply_event_filter(struct trace_event_file *file, char *filter_string)
struct event_filter *filter = NULL;
int err;
+ if (file->flags & EVENT_FILE_FL_FREED)
+ return -ENODEV;
+
if (!strcmp(strstrip(filter_string), "0")) {
filter_disable(file);
filter = event_filter(file);
@@ -1873,7 +2425,7 @@ int apply_subsystem_event_filter(struct trace_subsystem_dir *dir,
goto out_unlock;
}
- err = create_system_filter(dir, tr, filter_string, &filter);
+ err = create_system_filter(dir, filter_string, &filter);
if (filter) {
/*
* No event actually uses the system filter
@@ -1950,7 +2502,7 @@ static int __ftrace_function_set_filter(int filter, char *buf, int len,
/*
* The 'ip' field could have multiple filters set, separated
* either by space or comma. We first cut the filter and apply
- * all pieces separatelly.
+ * all pieces separately.
*/
re = ftrace_function_filter_re(buf, len, &re_cnt);
if (!re)
@@ -1999,8 +2551,8 @@ static int ftrace_function_set_filter_pred(struct filter_pred *pred,
return ret;
return __ftrace_function_set_filter(pred->op == OP_EQ,
- pred->regex.pattern,
- pred->regex.len,
+ pred->regex->pattern,
+ pred->regex->len,
data);
}
@@ -2196,7 +2748,7 @@ static void update_pred_fn(struct event_filter *filter, char *fields)
struct filter_pred *pred = prog[i].pred;
struct ftrace_event_field *field = pred->field;
- WARN_ON_ONCE(!pred->fn);
+ WARN_ON_ONCE(pred->fn_num == FILTER_PRED_FN_NOP);
if (!field) {
WARN_ONCE(1, "all leafs should have field defined %d", i);
@@ -2206,7 +2758,7 @@ static void update_pred_fn(struct event_filter *filter, char *fields)
if (!strchr(fields, *field->name))
continue;
- pred->fn = test_pred_visited_fn;
+ pred->fn_num = FILTER_PRED_TEST_VISITED;
}
}
diff --git a/kernel/trace/trace_events_hist.c b/kernel/trace/trace_events_hist.c
index 0b933546142e..6ece1308d36a 100644
--- a/kernel/trace/trace_events_hist.c
+++ b/kernel/trace/trace_events_hist.c
@@ -65,7 +65,12 @@
C(INVALID_SORT_MODIFIER,"Invalid sort modifier"), \
C(EMPTY_SORT_FIELD, "Empty sort field"), \
C(TOO_MANY_SORT_FIELDS, "Too many sort fields (Max = 2)"), \
- C(INVALID_SORT_FIELD, "Sort field must be a key or a val"),
+ C(INVALID_SORT_FIELD, "Sort field must be a key or a val"), \
+ C(INVALID_STR_OPERAND, "String type can not be an operand in expression"), \
+ C(EXPECT_NUMBER, "Expecting numeric literal"), \
+ C(UNARY_MINUS_SUBEXPR, "Unary minus not supported in sub-expressions"), \
+ C(DIVISION_BY_ZERO, "Division by zero"), \
+ C(NEED_NOHC_VAL, "Non-hitcount value is required for 'nohitcount'"),
#undef C
#define C(a, b) HIST_ERR_##a
@@ -81,18 +86,56 @@ struct hist_field;
typedef u64 (*hist_field_fn_t) (struct hist_field *field,
struct tracing_map_elt *elt,
+ struct trace_buffer *buffer,
struct ring_buffer_event *rbe,
void *event);
#define HIST_FIELD_OPERANDS_MAX 2
#define HIST_FIELDS_MAX (TRACING_MAP_FIELDS_MAX + TRACING_MAP_VARS_MAX)
#define HIST_ACTIONS_MAX 8
+#define HIST_CONST_DIGITS_MAX 21
+#define HIST_DIV_SHIFT 20 /* For optimizing division by constants */
enum field_op_id {
FIELD_OP_NONE,
FIELD_OP_PLUS,
FIELD_OP_MINUS,
FIELD_OP_UNARY_MINUS,
+ FIELD_OP_DIV,
+ FIELD_OP_MULT,
+};
+
+enum hist_field_fn {
+ HIST_FIELD_FN_NOP,
+ HIST_FIELD_FN_VAR_REF,
+ HIST_FIELD_FN_COUNTER,
+ HIST_FIELD_FN_CONST,
+ HIST_FIELD_FN_LOG2,
+ HIST_FIELD_FN_BUCKET,
+ HIST_FIELD_FN_TIMESTAMP,
+ HIST_FIELD_FN_CPU,
+ HIST_FIELD_FN_STRING,
+ HIST_FIELD_FN_DYNSTRING,
+ HIST_FIELD_FN_RELDYNSTRING,
+ HIST_FIELD_FN_PSTRING,
+ HIST_FIELD_FN_S64,
+ HIST_FIELD_FN_U64,
+ HIST_FIELD_FN_S32,
+ HIST_FIELD_FN_U32,
+ HIST_FIELD_FN_S16,
+ HIST_FIELD_FN_U16,
+ HIST_FIELD_FN_S8,
+ HIST_FIELD_FN_U8,
+ HIST_FIELD_FN_UMINUS,
+ HIST_FIELD_FN_MINUS,
+ HIST_FIELD_FN_PLUS,
+ HIST_FIELD_FN_DIV,
+ HIST_FIELD_FN_MULT,
+ HIST_FIELD_FN_DIV_POWER2,
+ HIST_FIELD_FN_DIV_NOT_POWER2,
+ HIST_FIELD_FN_DIV_MULT_SHIFT,
+ HIST_FIELD_FN_EXECNAME,
+ HIST_FIELD_FN_STACK,
};
/*
@@ -114,14 +157,15 @@ struct hist_var {
struct hist_field {
struct ftrace_event_field *field;
unsigned long flags;
- hist_field_fn_t fn;
+ unsigned long buckets;
+ const char *type;
+ struct hist_field *operands[HIST_FIELD_OPERANDS_MAX];
+ struct hist_trigger_data *hist_data;
+ enum hist_field_fn fn_num;
unsigned int ref;
unsigned int size;
unsigned int offset;
unsigned int is_signed;
- const char *type;
- struct hist_field *operands[HIST_FIELD_OPERANDS_MAX];
- struct hist_trigger_data *hist_data;
/*
* Variable fields contain variable-specific info in var.
@@ -147,18 +191,33 @@ struct hist_field {
*/
unsigned int var_ref_idx;
bool read_once;
+
+ unsigned int var_str_idx;
+
+ /* Numeric literals are represented as u64 */
+ u64 constant;
+ /* Used to optimize division by constants */
+ u64 div_multiplier;
};
-static u64 hist_field_none(struct hist_field *field,
+static u64 hist_fn_call(struct hist_field *hist_field,
+ struct tracing_map_elt *elt,
+ struct trace_buffer *buffer,
+ struct ring_buffer_event *rbe,
+ void *event);
+
+static u64 hist_field_const(struct hist_field *field,
struct tracing_map_elt *elt,
+ struct trace_buffer *buffer,
struct ring_buffer_event *rbe,
void *event)
{
- return 0;
+ return field->constant;
}
static u64 hist_field_counter(struct hist_field *field,
struct tracing_map_elt *elt,
+ struct trace_buffer *buffer,
struct ring_buffer_event *rbe,
void *event)
{
@@ -167,6 +226,7 @@ static u64 hist_field_counter(struct hist_field *field,
static u64 hist_field_string(struct hist_field *hist_field,
struct tracing_map_elt *elt,
+ struct trace_buffer *buffer,
struct ring_buffer_event *rbe,
void *event)
{
@@ -177,6 +237,7 @@ static u64 hist_field_string(struct hist_field *hist_field,
static u64 hist_field_dynstring(struct hist_field *hist_field,
struct tracing_map_elt *elt,
+ struct trace_buffer *buffer,
struct ring_buffer_event *rbe,
void *event)
{
@@ -187,8 +248,23 @@ static u64 hist_field_dynstring(struct hist_field *hist_field,
return (u64)(unsigned long)addr;
}
+static u64 hist_field_reldynstring(struct hist_field *hist_field,
+ struct tracing_map_elt *elt,
+ struct trace_buffer *buffer,
+ struct ring_buffer_event *rbe,
+ void *event)
+{
+ u32 *item = event + hist_field->field->offset;
+ u32 str_item = *item;
+ int str_loc = str_item & 0xffff;
+ char *addr = (char *)&item[1] + str_loc;
+
+ return (u64)(unsigned long)addr;
+}
+
static u64 hist_field_pstring(struct hist_field *hist_field,
struct tracing_map_elt *elt,
+ struct trace_buffer *buffer,
struct ring_buffer_event *rbe,
void *event)
{
@@ -199,52 +275,177 @@ static u64 hist_field_pstring(struct hist_field *hist_field,
static u64 hist_field_log2(struct hist_field *hist_field,
struct tracing_map_elt *elt,
+ struct trace_buffer *buffer,
struct ring_buffer_event *rbe,
void *event)
{
struct hist_field *operand = hist_field->operands[0];
- u64 val = operand->fn(operand, elt, rbe, event);
+ u64 val = hist_fn_call(operand, elt, buffer, rbe, event);
return (u64) ilog2(roundup_pow_of_two(val));
}
+static u64 hist_field_bucket(struct hist_field *hist_field,
+ struct tracing_map_elt *elt,
+ struct trace_buffer *buffer,
+ struct ring_buffer_event *rbe,
+ void *event)
+{
+ struct hist_field *operand = hist_field->operands[0];
+ unsigned long buckets = hist_field->buckets;
+
+ u64 val = hist_fn_call(operand, elt, buffer, rbe, event);
+
+ if (WARN_ON_ONCE(!buckets))
+ return val;
+
+ if (val >= LONG_MAX)
+ val = div64_ul(val, buckets);
+ else
+ val = (u64)((unsigned long)val / buckets);
+ return val * buckets;
+}
+
static u64 hist_field_plus(struct hist_field *hist_field,
struct tracing_map_elt *elt,
+ struct trace_buffer *buffer,
struct ring_buffer_event *rbe,
void *event)
{
struct hist_field *operand1 = hist_field->operands[0];
struct hist_field *operand2 = hist_field->operands[1];
- u64 val1 = operand1->fn(operand1, elt, rbe, event);
- u64 val2 = operand2->fn(operand2, elt, rbe, event);
+ u64 val1 = hist_fn_call(operand1, elt, buffer, rbe, event);
+ u64 val2 = hist_fn_call(operand2, elt, buffer, rbe, event);
return val1 + val2;
}
static u64 hist_field_minus(struct hist_field *hist_field,
struct tracing_map_elt *elt,
+ struct trace_buffer *buffer,
struct ring_buffer_event *rbe,
void *event)
{
struct hist_field *operand1 = hist_field->operands[0];
struct hist_field *operand2 = hist_field->operands[1];
- u64 val1 = operand1->fn(operand1, elt, rbe, event);
- u64 val2 = operand2->fn(operand2, elt, rbe, event);
+ u64 val1 = hist_fn_call(operand1, elt, buffer, rbe, event);
+ u64 val2 = hist_fn_call(operand2, elt, buffer, rbe, event);
return val1 - val2;
}
+static u64 hist_field_div(struct hist_field *hist_field,
+ struct tracing_map_elt *elt,
+ struct trace_buffer *buffer,
+ struct ring_buffer_event *rbe,
+ void *event)
+{
+ struct hist_field *operand1 = hist_field->operands[0];
+ struct hist_field *operand2 = hist_field->operands[1];
+
+ u64 val1 = hist_fn_call(operand1, elt, buffer, rbe, event);
+ u64 val2 = hist_fn_call(operand2, elt, buffer, rbe, event);
+
+ /* Return -1 for the undefined case */
+ if (!val2)
+ return -1;
+
+ /* Use shift if the divisor is a power of 2 */
+ if (!(val2 & (val2 - 1)))
+ return val1 >> __ffs64(val2);
+
+ return div64_u64(val1, val2);
+}
+
+static u64 div_by_power_of_two(struct hist_field *hist_field,
+ struct tracing_map_elt *elt,
+ struct trace_buffer *buffer,
+ struct ring_buffer_event *rbe,
+ void *event)
+{
+ struct hist_field *operand1 = hist_field->operands[0];
+ struct hist_field *operand2 = hist_field->operands[1];
+
+ u64 val1 = hist_fn_call(operand1, elt, buffer, rbe, event);
+
+ return val1 >> __ffs64(operand2->constant);
+}
+
+static u64 div_by_not_power_of_two(struct hist_field *hist_field,
+ struct tracing_map_elt *elt,
+ struct trace_buffer *buffer,
+ struct ring_buffer_event *rbe,
+ void *event)
+{
+ struct hist_field *operand1 = hist_field->operands[0];
+ struct hist_field *operand2 = hist_field->operands[1];
+
+ u64 val1 = hist_fn_call(operand1, elt, buffer, rbe, event);
+
+ return div64_u64(val1, operand2->constant);
+}
+
+static u64 div_by_mult_and_shift(struct hist_field *hist_field,
+ struct tracing_map_elt *elt,
+ struct trace_buffer *buffer,
+ struct ring_buffer_event *rbe,
+ void *event)
+{
+ struct hist_field *operand1 = hist_field->operands[0];
+ struct hist_field *operand2 = hist_field->operands[1];
+
+ u64 val1 = hist_fn_call(operand1, elt, buffer, rbe, event);
+
+ /*
+ * If the divisor is a constant, do a multiplication and shift instead.
+ *
+ * Choose Z = some power of 2. If Y <= Z, then:
+ * X / Y = (X * (Z / Y)) / Z
+ *
+ * (Z / Y) is a constant (mult) which is calculated at parse time, so:
+ * X / Y = (X * mult) / Z
+ *
+ * The division by Z can be replaced by a shift since Z is a power of 2:
+ * X / Y = (X * mult) >> HIST_DIV_SHIFT
+ *
+ * As long, as X < Z the results will not be off by more than 1.
+ */
+ if (val1 < (1 << HIST_DIV_SHIFT)) {
+ u64 mult = operand2->div_multiplier;
+
+ return (val1 * mult + ((1 << HIST_DIV_SHIFT) - 1)) >> HIST_DIV_SHIFT;
+ }
+
+ return div64_u64(val1, operand2->constant);
+}
+
+static u64 hist_field_mult(struct hist_field *hist_field,
+ struct tracing_map_elt *elt,
+ struct trace_buffer *buffer,
+ struct ring_buffer_event *rbe,
+ void *event)
+{
+ struct hist_field *operand1 = hist_field->operands[0];
+ struct hist_field *operand2 = hist_field->operands[1];
+
+ u64 val1 = hist_fn_call(operand1, elt, buffer, rbe, event);
+ u64 val2 = hist_fn_call(operand2, elt, buffer, rbe, event);
+
+ return val1 * val2;
+}
+
static u64 hist_field_unary_minus(struct hist_field *hist_field,
struct tracing_map_elt *elt,
+ struct trace_buffer *buffer,
struct ring_buffer_event *rbe,
void *event)
{
struct hist_field *operand = hist_field->operands[0];
- s64 sval = (s64)operand->fn(operand, elt, rbe, event);
+ s64 sval = (s64)hist_fn_call(operand, elt, buffer, rbe, event);
u64 val = (u64)-sval;
return val;
@@ -253,6 +454,7 @@ static u64 hist_field_unary_minus(struct hist_field *hist_field,
#define DEFINE_HIST_FIELD_FN(type) \
static u64 hist_field_##type(struct hist_field *hist_field, \
struct tracing_map_elt *elt, \
+ struct trace_buffer *buffer, \
struct ring_buffer_event *rbe, \
void *event) \
{ \
@@ -279,10 +481,6 @@ DEFINE_HIST_FIELD_FN(u8);
#define for_each_hist_key_field(i, hist_data) \
for ((i) = (hist_data)->n_vals; (i) < (hist_data)->n_fields; (i)++)
-#define HIST_STACKTRACE_DEPTH 16
-#define HIST_STACKTRACE_SIZE (HIST_STACKTRACE_DEPTH * sizeof(unsigned long))
-#define HIST_STACKTRACE_SKIP 5
-
#define HITCOUNT_IDX 0
#define HIST_KEY_SIZE_MAX (MAX_FILTER_STR_VAL + HIST_STACKTRACE_SIZE)
@@ -304,6 +502,10 @@ enum hist_field_flags {
HIST_FIELD_FL_VAR_REF = 1 << 14,
HIST_FIELD_FL_CPU = 1 << 15,
HIST_FIELD_FL_ALIAS = 1 << 16,
+ HIST_FIELD_FL_BUCKET = 1 << 17,
+ HIST_FIELD_FL_CONST = 1 << 18,
+ HIST_FIELD_FL_PERCENT = 1 << 19,
+ HIST_FIELD_FL_GRAPH = 1 << 20,
};
struct var_defs {
@@ -322,6 +524,7 @@ struct hist_trigger_attrs {
bool cont;
bool clear;
bool ts_in_usecs;
+ bool no_hitcount;
unsigned int map_bits;
char *assignment_str[TRACING_MAP_VARS_MAX];
@@ -349,6 +552,7 @@ struct hist_trigger_data {
unsigned int n_keys;
unsigned int n_fields;
unsigned int n_vars;
+ unsigned int n_var_str;
unsigned int key_size;
struct tracing_map_sort_key sort_keys[TRACING_MAP_SORT_KEYS_MAX];
unsigned int n_sort_keys;
@@ -377,7 +581,8 @@ struct hist_trigger_data {
struct action_data;
typedef void (*action_fn_t) (struct hist_trigger_data *hist_data,
- struct tracing_map_elt *elt, void *rec,
+ struct tracing_map_elt *elt,
+ struct trace_buffer *buffer, void *rec,
struct ring_buffer_event *rbe, void *key,
struct action_data *data, u64 *var_ref_vals);
@@ -413,7 +618,7 @@ struct action_data {
* event param, and is passed to the synthetic event
* invocation.
*/
- unsigned int var_ref_idx[TRACING_MAP_VARS_MAX];
+ unsigned int var_ref_idx[SYNTH_FIELDS_MAX];
struct synth_event *synth_event;
bool use_trace_keyword;
char *synth_event_name;
@@ -469,7 +674,8 @@ struct track_data {
struct hist_elt_data {
char *comm;
u64 *var_ref_vals;
- char *field_var_str[SYNTH_FIELDS_MAX];
+ char **field_var_str;
+ int n_field_var_str;
};
struct snapshot_context {
@@ -477,6 +683,25 @@ struct snapshot_context {
void *key;
};
+/*
+ * Returns the specific division function to use if the divisor
+ * is constant. This avoids extra branches when the trigger is hit.
+ */
+static enum hist_field_fn hist_field_get_div_fn(struct hist_field *divisor)
+{
+ u64 div = divisor->constant;
+
+ if (!(div & (div - 1)))
+ return HIST_FIELD_FN_DIV_POWER2;
+
+ /* If the divisor is too large, do a regular division */
+ if (div > (1 << HIST_DIV_SHIFT))
+ return HIST_FIELD_FN_DIV_NOT_POWER2;
+
+ divisor->div_multiplier = div64_u64((u64)(1 << HIST_DIV_SHIFT), div);
+ return HIST_FIELD_FN_DIV_MULT_SHIFT;
+}
+
static void track_data_free(struct track_data *track_data)
{
struct hist_elt_data *elt_data;
@@ -532,11 +757,16 @@ static struct track_data *track_data_alloc(unsigned int key_len,
return data;
}
-static char last_cmd[MAX_FILTER_STR_VAL];
+#define HIST_PREFIX "hist:"
+
+static char *last_cmd;
static char last_cmd_loc[MAX_FILTER_STR_VAL];
static int errpos(char *str)
{
+ if (!str || !last_cmd)
+ return 0;
+
return err_pos(last_cmd, str);
}
@@ -548,8 +778,11 @@ static void last_cmd_set(struct trace_event_file *file, char *str)
if (!str)
return;
- strcpy(last_cmd, "hist:");
- strncat(last_cmd, str, MAX_FILTER_STR_VAL - 1 - sizeof("hist:"));
+ kfree(last_cmd);
+
+ last_cmd = kasprintf(GFP_KERNEL, HIST_PREFIX "%s", str);
+ if (!last_cmd)
+ return;
if (file) {
call = file->event_call;
@@ -562,18 +795,22 @@ static void last_cmd_set(struct trace_event_file *file, char *str)
}
if (system)
- snprintf(last_cmd_loc, MAX_FILTER_STR_VAL, "hist:%s:%s", system, name);
+ snprintf(last_cmd_loc, MAX_FILTER_STR_VAL, HIST_PREFIX "%s:%s", system, name);
}
-static void hist_err(struct trace_array *tr, u8 err_type, u8 err_pos)
+static void hist_err(struct trace_array *tr, u8 err_type, u16 err_pos)
{
+ if (!last_cmd)
+ return;
+
tracing_log_err(tr, last_cmd_loc, last_cmd, err_text,
err_type, err_pos);
}
static void hist_err_clear(void)
{
- last_cmd[0] = '\0';
+ if (last_cmd)
+ last_cmd[0] = '\0';
last_cmd_loc[0] = '\0';
}
@@ -605,7 +842,8 @@ static inline void trace_synth(struct synth_event *event, u64 *var_ref_vals,
}
static void action_trace(struct hist_trigger_data *hist_data,
- struct tracing_map_elt *elt, void *rec,
+ struct tracing_map_elt *elt,
+ struct trace_buffer *buffer, void *rec,
struct ring_buffer_event *rbe, void *key,
struct action_data *data, u64 *var_ref_vals)
{
@@ -621,13 +859,14 @@ struct hist_var_data {
static u64 hist_field_timestamp(struct hist_field *hist_field,
struct tracing_map_elt *elt,
+ struct trace_buffer *buffer,
struct ring_buffer_event *rbe,
void *event)
{
struct hist_trigger_data *hist_data = hist_field->hist_data;
struct trace_array *tr = hist_data->event_file->tr;
- u64 ts = ring_buffer_event_time_stamp(rbe);
+ u64 ts = ring_buffer_event_time_stamp(buffer, rbe);
if (hist_data->attrs->ts_in_usecs && trace_clock_in_ns(tr))
ts = ns2usecs(ts);
@@ -637,6 +876,7 @@ static u64 hist_field_timestamp(struct hist_field *hist_field,
static u64 hist_field_cpu(struct hist_field *hist_field,
struct tracing_map_elt *elt,
+ struct trace_buffer *buffer,
struct ring_buffer_event *rbe,
void *event)
{
@@ -737,7 +977,7 @@ static struct hist_field *find_any_var_ref(struct hist_trigger_data *hist_data,
* A trigger can define one or more variables. If any one of them is
* currently referenced by any other trigger, this function will
* determine that.
-
+ *
* Typically used to determine whether or not a trigger can be removed
* - if there are any references to a trigger's variables, it cannot.
*
@@ -1017,6 +1257,7 @@ static struct hist_field *find_event_var(struct hist_trigger_data *hist_data,
static u64 hist_field_var_ref(struct hist_field *hist_field,
struct tracing_map_elt *elt,
+ struct trace_buffer *buffer,
struct ring_buffer_event *rbe,
void *event)
{
@@ -1083,16 +1324,20 @@ static const char *hist_field_name(struct hist_field *field,
{
const char *field_name = "";
+ if (WARN_ON_ONCE(!field))
+ return field_name;
+
if (level > 1)
return field_name;
if (field->field)
field_name = field->field->name;
else if (field->flags & HIST_FIELD_FL_LOG2 ||
- field->flags & HIST_FIELD_FL_ALIAS)
+ field->flags & HIST_FIELD_FL_ALIAS ||
+ field->flags & HIST_FIELD_FL_BUCKET)
field_name = hist_field_name(field->operands[0], ++level);
else if (field->flags & HIST_FIELD_FL_CPU)
- field_name = "cpu";
+ field_name = "common_cpu";
else if (field->flags & HIST_FIELD_FL_EXPR ||
field->flags & HIST_FIELD_FL_VAR_REF) {
if (field->system) {
@@ -1108,6 +1353,13 @@ static const char *hist_field_name(struct hist_field *field,
field_name = field->name;
} else if (field->flags & HIST_FIELD_FL_TIMESTAMP)
field_name = "common_timestamp";
+ else if (field->flags & HIST_FIELD_FL_STACKTRACE) {
+ if (field->field)
+ field_name = field->field->name;
+ else
+ field_name = "common_stacktrace";
+ } else if (field->flags & HIST_FIELD_FL_HITCOUNT)
+ field_name = "hitcount";
if (field_name == NULL)
field_name = "";
@@ -1115,38 +1367,32 @@ static const char *hist_field_name(struct hist_field *field,
return field_name;
}
-static hist_field_fn_t select_value_fn(int field_size, int field_is_signed)
+static enum hist_field_fn select_value_fn(int field_size, int field_is_signed)
{
- hist_field_fn_t fn = NULL;
-
switch (field_size) {
case 8:
if (field_is_signed)
- fn = hist_field_s64;
+ return HIST_FIELD_FN_S64;
else
- fn = hist_field_u64;
- break;
+ return HIST_FIELD_FN_U64;
case 4:
if (field_is_signed)
- fn = hist_field_s32;
+ return HIST_FIELD_FN_S32;
else
- fn = hist_field_u32;
- break;
+ return HIST_FIELD_FN_U32;
case 2:
if (field_is_signed)
- fn = hist_field_s16;
+ return HIST_FIELD_FN_S16;
else
- fn = hist_field_u16;
- break;
+ return HIST_FIELD_FN_U16;
case 1:
if (field_is_signed)
- fn = hist_field_s8;
+ return HIST_FIELD_FN_S8;
else
- fn = hist_field_u8;
- break;
+ return HIST_FIELD_FN_U8;
}
- return fn;
+ return HIST_FIELD_FN_NOP;
}
static int parse_map_size(char *str)
@@ -1304,7 +1550,10 @@ parse_hist_trigger_attrs(struct trace_array *tr, char *trigger_str)
ret = parse_assignment(tr, str, attrs);
if (ret)
goto free;
- } else if (strcmp(str, "pause") == 0)
+ } else if (strcmp(str, "nohitcount") == 0 ||
+ strcmp(str, "NOHC") == 0)
+ attrs->no_hitcount = true;
+ else if (strcmp(str, "pause") == 0)
attrs->pause = true;
else if ((strcmp(str, "cont") == 0) ||
(strcmp(str, "continue") == 0))
@@ -1357,9 +1606,11 @@ static void hist_elt_data_free(struct hist_elt_data *elt_data)
{
unsigned int i;
- for (i = 0; i < SYNTH_FIELDS_MAX; i++)
+ for (i = 0; i < elt_data->n_field_var_str; i++)
kfree(elt_data->field_var_str[i]);
+ kfree(elt_data->field_var_str);
+
kfree(elt_data->comm);
kfree(elt_data);
}
@@ -1376,17 +1627,17 @@ static int hist_trigger_elt_data_alloc(struct tracing_map_elt *elt)
struct hist_trigger_data *hist_data = elt->map->private_data;
unsigned int size = TASK_COMM_LEN;
struct hist_elt_data *elt_data;
- struct hist_field *key_field;
+ struct hist_field *hist_field;
unsigned int i, n_str;
elt_data = kzalloc(sizeof(*elt_data), GFP_KERNEL);
if (!elt_data)
return -ENOMEM;
- for_each_hist_key_field(i, hist_data) {
- key_field = hist_data->fields[i];
+ for_each_hist_field(i, hist_data) {
+ hist_field = hist_data->fields[i];
- if (key_field->flags & HIST_FIELD_FL_EXECNAME) {
+ if (hist_field->flags & HIST_FIELD_FL_EXECNAME) {
elt_data->comm = kzalloc(size, GFP_KERNEL);
if (!elt_data->comm) {
kfree(elt_data);
@@ -1396,10 +1647,24 @@ static int hist_trigger_elt_data_alloc(struct tracing_map_elt *elt)
}
}
- n_str = hist_data->n_field_var_str + hist_data->n_save_var_str;
+ n_str = hist_data->n_field_var_str + hist_data->n_save_var_str +
+ hist_data->n_var_str;
+ if (n_str > SYNTH_FIELDS_MAX) {
+ hist_elt_data_free(elt_data);
+ return -EINVAL;
+ }
+
+ BUILD_BUG_ON(STR_VAR_LEN_MAX & (sizeof(u64) - 1));
size = STR_VAR_LEN_MAX;
+ elt_data->field_var_str = kcalloc(n_str, sizeof(char *), GFP_KERNEL);
+ if (!elt_data->field_var_str) {
+ hist_elt_data_free(elt_data);
+ return -EINVAL;
+ }
+ elt_data->n_field_var_str = n_str;
+
for (i = 0; i < n_str; i++) {
elt_data->field_var_str[i] = kzalloc(size, GFP_KERNEL);
if (!elt_data->field_var_str[i]) {
@@ -1443,8 +1708,16 @@ static const char *get_hist_field_flags(struct hist_field *hist_field)
flags_str = "syscall";
else if (hist_field->flags & HIST_FIELD_FL_LOG2)
flags_str = "log2";
+ else if (hist_field->flags & HIST_FIELD_FL_BUCKET)
+ flags_str = "buckets";
else if (hist_field->flags & HIST_FIELD_FL_TIMESTAMP_USECS)
flags_str = "usecs";
+ else if (hist_field->flags & HIST_FIELD_FL_PERCENT)
+ flags_str = "percent";
+ else if (hist_field->flags & HIST_FIELD_FL_GRAPH)
+ flags_str = "graph";
+ else if (hist_field->flags & HIST_FIELD_FL_STACKTRACE)
+ flags_str = "stacktrace";
return flags_str;
}
@@ -1453,6 +1726,12 @@ static void expr_field_str(struct hist_field *field, char *expr)
{
if (field->flags & HIST_FIELD_FL_VAR_REF)
strcat(expr, "$");
+ else if (field->flags & HIST_FIELD_FL_CONST) {
+ char str[HIST_CONST_DIGITS_MAX];
+
+ snprintf(str, HIST_CONST_DIGITS_MAX, "%llu", field->constant);
+ strcat(expr, str);
+ }
strcat(expr, hist_field_name(field, 0));
@@ -1508,6 +1787,12 @@ static char *expr_str(struct hist_field *field, unsigned int level)
case FIELD_OP_PLUS:
strcat(expr, "+");
break;
+ case FIELD_OP_DIV:
+ strcat(expr, "/");
+ break;
+ case FIELD_OP_MULT:
+ strcat(expr, "*");
+ break;
default:
kfree(expr);
return NULL;
@@ -1518,27 +1803,92 @@ static char *expr_str(struct hist_field *field, unsigned int level)
return expr;
}
-static int contains_operator(char *str)
+/*
+ * If field_op != FIELD_OP_NONE, *sep points to the root operator
+ * of the expression tree to be evaluated.
+ */
+static int contains_operator(char *str, char **sep)
{
enum field_op_id field_op = FIELD_OP_NONE;
- char *op;
+ char *minus_op, *plus_op, *div_op, *mult_op;
- op = strpbrk(str, "+-");
- if (!op)
- return FIELD_OP_NONE;
- switch (*op) {
- case '-':
- if (*str == '-')
+ /*
+ * Report the last occurrence of the operators first, so that the
+ * expression is evaluated left to right. This is important since
+ * subtraction and division are not associative.
+ *
+ * e.g
+ * 64/8/4/2 is 1, i.e 64/8/4/2 = ((64/8)/4)/2
+ * 14-7-5-2 is 0, i.e 14-7-5-2 = ((14-7)-5)-2
+ */
+
+ /*
+ * First, find lower precedence addition and subtraction
+ * since the expression will be evaluated recursively.
+ */
+ minus_op = strrchr(str, '-');
+ if (minus_op) {
+ /*
+ * Unary minus is not supported in sub-expressions. If
+ * present, it is always the next root operator.
+ */
+ if (minus_op == str) {
field_op = FIELD_OP_UNARY_MINUS;
- else
- field_op = FIELD_OP_MINUS;
- break;
- case '+':
- field_op = FIELD_OP_PLUS;
- break;
- default:
- break;
+ goto out;
+ }
+
+ field_op = FIELD_OP_MINUS;
+ }
+
+ plus_op = strrchr(str, '+');
+ if (plus_op || minus_op) {
+ /*
+ * For operators of the same precedence use to rightmost as the
+ * root, so that the expression is evaluated left to right.
+ */
+ if (plus_op > minus_op)
+ field_op = FIELD_OP_PLUS;
+ goto out;
+ }
+
+ /*
+ * Multiplication and division have higher precedence than addition and
+ * subtraction.
+ */
+ div_op = strrchr(str, '/');
+ if (div_op)
+ field_op = FIELD_OP_DIV;
+
+ mult_op = strrchr(str, '*');
+ /*
+ * For operators of the same precedence use to rightmost as the
+ * root, so that the expression is evaluated left to right.
+ */
+ if (mult_op > div_op)
+ field_op = FIELD_OP_MULT;
+
+out:
+ if (sep) {
+ switch (field_op) {
+ case FIELD_OP_UNARY_MINUS:
+ case FIELD_OP_MINUS:
+ *sep = minus_op;
+ break;
+ case FIELD_OP_PLUS:
+ *sep = plus_op;
+ break;
+ case FIELD_OP_DIV:
+ *sep = div_op;
+ break;
+ case FIELD_OP_MULT:
+ *sep = mult_op;
+ break;
+ case FIELD_OP_NONE:
+ default:
+ *sep = NULL;
+ break;
+ }
}
return field_op;
@@ -1556,7 +1906,9 @@ static void __destroy_hist_field(struct hist_field *hist_field)
kfree(hist_field->var.name);
kfree(hist_field->name);
- kfree(hist_field->type);
+
+ /* Can likely be a const */
+ kfree_const(hist_field->type);
kfree(hist_field->system);
kfree(hist_field->event_name);
@@ -1606,12 +1958,19 @@ static struct hist_field *create_hist_field(struct hist_trigger_data *hist_data,
goto out; /* caller will populate */
if (flags & HIST_FIELD_FL_VAR_REF) {
- hist_field->fn = hist_field_var_ref;
+ hist_field->fn_num = HIST_FIELD_FN_VAR_REF;
goto out;
}
if (flags & HIST_FIELD_FL_HITCOUNT) {
- hist_field->fn = hist_field_counter;
+ hist_field->fn_num = HIST_FIELD_FN_COUNTER;
+ hist_field->size = sizeof(u64);
+ hist_field->type = "u64";
+ goto out;
+ }
+
+ if (flags & HIST_FIELD_FL_CONST) {
+ hist_field->fn_num = HIST_FIELD_FN_CONST;
hist_field->size = sizeof(u64);
hist_field->type = kstrdup("u64", GFP_KERNEL);
if (!hist_field->type)
@@ -1620,66 +1979,77 @@ static struct hist_field *create_hist_field(struct hist_trigger_data *hist_data,
}
if (flags & HIST_FIELD_FL_STACKTRACE) {
- hist_field->fn = hist_field_none;
+ if (field)
+ hist_field->fn_num = HIST_FIELD_FN_STACK;
+ else
+ hist_field->fn_num = HIST_FIELD_FN_NOP;
+ hist_field->size = HIST_STACKTRACE_SIZE;
+ hist_field->type = kstrdup_const("unsigned long[]", GFP_KERNEL);
+ if (!hist_field->type)
+ goto free;
goto out;
}
- if (flags & HIST_FIELD_FL_LOG2) {
- unsigned long fl = flags & ~HIST_FIELD_FL_LOG2;
- hist_field->fn = hist_field_log2;
+ if (flags & (HIST_FIELD_FL_LOG2 | HIST_FIELD_FL_BUCKET)) {
+ unsigned long fl = flags & ~(HIST_FIELD_FL_LOG2 | HIST_FIELD_FL_BUCKET);
+ hist_field->fn_num = flags & HIST_FIELD_FL_LOG2 ? HIST_FIELD_FN_LOG2 :
+ HIST_FIELD_FN_BUCKET;
hist_field->operands[0] = create_hist_field(hist_data, field, fl, NULL);
+ if (!hist_field->operands[0])
+ goto free;
hist_field->size = hist_field->operands[0]->size;
- hist_field->type = kstrdup(hist_field->operands[0]->type, GFP_KERNEL);
+ hist_field->type = kstrdup_const(hist_field->operands[0]->type, GFP_KERNEL);
if (!hist_field->type)
goto free;
goto out;
}
if (flags & HIST_FIELD_FL_TIMESTAMP) {
- hist_field->fn = hist_field_timestamp;
+ hist_field->fn_num = HIST_FIELD_FN_TIMESTAMP;
hist_field->size = sizeof(u64);
- hist_field->type = kstrdup("u64", GFP_KERNEL);
- if (!hist_field->type)
- goto free;
+ hist_field->type = "u64";
goto out;
}
if (flags & HIST_FIELD_FL_CPU) {
- hist_field->fn = hist_field_cpu;
+ hist_field->fn_num = HIST_FIELD_FN_CPU;
hist_field->size = sizeof(int);
- hist_field->type = kstrdup("unsigned int", GFP_KERNEL);
- if (!hist_field->type)
- goto free;
+ hist_field->type = "unsigned int";
goto out;
}
if (WARN_ON_ONCE(!field))
goto out;
- if (is_string_field(field)) {
+ /* Pointers to strings are just pointers and dangerous to dereference */
+ if (is_string_field(field) &&
+ (field->filter_type != FILTER_PTR_STRING)) {
flags |= HIST_FIELD_FL_STRING;
hist_field->size = MAX_FILTER_STR_VAL;
- hist_field->type = kstrdup(field->type, GFP_KERNEL);
+ hist_field->type = kstrdup_const(field->type, GFP_KERNEL);
if (!hist_field->type)
goto free;
- if (field->filter_type == FILTER_STATIC_STRING)
- hist_field->fn = hist_field_string;
- else if (field->filter_type == FILTER_DYN_STRING)
- hist_field->fn = hist_field_dynstring;
+ if (field->filter_type == FILTER_STATIC_STRING) {
+ hist_field->fn_num = HIST_FIELD_FN_STRING;
+ hist_field->size = field->size;
+ } else if (field->filter_type == FILTER_DYN_STRING) {
+ hist_field->fn_num = HIST_FIELD_FN_DYNSTRING;
+ } else if (field->filter_type == FILTER_RDYN_STRING)
+ hist_field->fn_num = HIST_FIELD_FN_RELDYNSTRING;
else
- hist_field->fn = hist_field_pstring;
+ hist_field->fn_num = HIST_FIELD_FN_PSTRING;
} else {
hist_field->size = field->size;
hist_field->is_signed = field->is_signed;
- hist_field->type = kstrdup(field->type, GFP_KERNEL);
+ hist_field->type = kstrdup_const(field->type, GFP_KERNEL);
if (!hist_field->type)
goto free;
- hist_field->fn = select_value_fn(field->size,
- field->is_signed);
- if (!hist_field->fn) {
+ hist_field->fn_num = select_value_fn(field->size,
+ field->is_signed);
+ if (hist_field->fn_num == HIST_FIELD_FN_NOP) {
destroy_hist_field(hist_field, 0);
return NULL;
}
@@ -1759,7 +2129,7 @@ static int init_var_ref(struct hist_field *ref_field,
}
}
- ref_field->type = kstrdup(var_field->type, GFP_KERNEL);
+ ref_field->type = kstrdup_const(var_field->type, GFP_KERNEL);
if (!ref_field->type) {
err = -ENOMEM;
goto free;
@@ -1768,8 +2138,11 @@ static int init_var_ref(struct hist_field *ref_field,
return err;
free:
kfree(ref_field->system);
+ ref_field->system = NULL;
kfree(ref_field->event_name);
+ ref_field->event_name = NULL;
kfree(ref_field->name);
+ ref_field->name = NULL;
goto out;
}
@@ -1822,7 +2195,9 @@ static struct hist_field *create_var_ref(struct hist_trigger_data *hist_data,
return ref_field;
}
}
-
+ /* Sanity check to avoid out-of-bound write on 'hist_data->var_refs' */
+ if (hist_data->n_var_refs >= TRACING_MAP_VARS_MAX)
+ return NULL;
ref_field = create_hist_field(var_field->hist_data, NULL, flags, NULL);
if (ref_field) {
if (init_var_ref(ref_field, var_field, system, event_name)) {
@@ -1856,7 +2231,7 @@ static char *field_name_from_var(struct hist_trigger_data *hist_data,
if (strcmp(var_name, name) == 0) {
field = hist_data->attrs->var_defs.expr[i];
- if (contains_operator(field) || is_var_ref(field))
+ if (contains_operator(field, NULL) || is_var_ref(field))
continue;
return field;
}
@@ -1917,7 +2292,7 @@ static struct hist_field *parse_var_ref(struct hist_trigger_data *hist_data,
static struct ftrace_event_field *
parse_field(struct hist_trigger_data *hist_data, struct trace_event_file *file,
- char *field_str, unsigned long *flags)
+ char *field_str, unsigned long *flags, unsigned long *buckets)
{
struct ftrace_event_field *field = NULL;
char *field_name, *modifier, *str;
@@ -1933,18 +2308,47 @@ parse_field(struct hist_trigger_data *hist_data, struct trace_event_file *file,
*flags |= HIST_FIELD_FL_HEX;
else if (strcmp(modifier, "sym") == 0)
*flags |= HIST_FIELD_FL_SYM;
- else if (strcmp(modifier, "sym-offset") == 0)
+ /*
+ * 'sym-offset' occurrences in the trigger string are modified
+ * to 'symXoffset' to simplify arithmetic expression parsing.
+ */
+ else if (strcmp(modifier, "symXoffset") == 0)
*flags |= HIST_FIELD_FL_SYM_OFFSET;
else if ((strcmp(modifier, "execname") == 0) &&
(strcmp(field_name, "common_pid") == 0))
*flags |= HIST_FIELD_FL_EXECNAME;
else if (strcmp(modifier, "syscall") == 0)
*flags |= HIST_FIELD_FL_SYSCALL;
+ else if (strcmp(modifier, "stacktrace") == 0)
+ *flags |= HIST_FIELD_FL_STACKTRACE;
else if (strcmp(modifier, "log2") == 0)
*flags |= HIST_FIELD_FL_LOG2;
else if (strcmp(modifier, "usecs") == 0)
*flags |= HIST_FIELD_FL_TIMESTAMP_USECS;
- else {
+ else if (strncmp(modifier, "bucket", 6) == 0) {
+ int ret;
+
+ modifier += 6;
+
+ if (*modifier == 's')
+ modifier++;
+ if (*modifier != '=')
+ goto error;
+ modifier++;
+ ret = kstrtoul(modifier, 0, buckets);
+ if (ret || !(*buckets))
+ goto error;
+ *flags |= HIST_FIELD_FL_BUCKET;
+ } else if (strncmp(modifier, "percent", 7) == 0) {
+ if (*flags & (HIST_FIELD_FL_VAR | HIST_FIELD_FL_KEY))
+ goto error;
+ *flags |= HIST_FIELD_FL_PERCENT;
+ } else if (strncmp(modifier, "graph", 5) == 0) {
+ if (*flags & (HIST_FIELD_FL_VAR | HIST_FIELD_FL_KEY))
+ goto error;
+ *flags |= HIST_FIELD_FL_GRAPH;
+ } else {
+ error:
hist_err(tr, HIST_ERR_BAD_FIELD_MODIFIER, errpos(modifier));
field = ERR_PTR(-EINVAL);
goto out;
@@ -1956,14 +2360,32 @@ parse_field(struct hist_trigger_data *hist_data, struct trace_event_file *file,
hist_data->enable_timestamps = true;
if (*flags & HIST_FIELD_FL_TIMESTAMP_USECS)
hist_data->attrs->ts_in_usecs = true;
- } else if (strcmp(field_name, "cpu") == 0)
+ } else if (strcmp(field_name, "common_stacktrace") == 0) {
+ *flags |= HIST_FIELD_FL_STACKTRACE;
+ } else if (strcmp(field_name, "common_cpu") == 0)
*flags |= HIST_FIELD_FL_CPU;
+ else if (strcmp(field_name, "hitcount") == 0)
+ *flags |= HIST_FIELD_FL_HITCOUNT;
else {
field = trace_find_event_field(file->event_call, field_name);
if (!field || !field->size) {
- hist_err(tr, HIST_ERR_FIELD_NOT_FOUND, errpos(field_name));
- field = ERR_PTR(-EINVAL);
- goto out;
+ /*
+ * For backward compatibility, if field_name
+ * was "cpu" or "stacktrace", then we treat this
+ * the same as common_cpu and common_stacktrace
+ * respectively. This also works for "CPU", and
+ * "STACKTRACE".
+ */
+ if (field && field->filter_type == FILTER_CPU) {
+ *flags |= HIST_FIELD_FL_CPU;
+ } else if (field && field->filter_type == FILTER_STACKTRACE) {
+ *flags |= HIST_FIELD_FL_STACKTRACE;
+ } else {
+ hist_err(tr, HIST_ERR_FIELD_NOT_FOUND,
+ errpos(field_name));
+ field = ERR_PTR(-EINVAL);
+ goto out;
+ }
}
}
out:
@@ -1983,7 +2405,7 @@ static struct hist_field *create_alias(struct hist_trigger_data *hist_data,
if (!alias)
return NULL;
- alias->fn = var_ref->fn;
+ alias->fn_num = var_ref->fn_num;
alias->operands[0] = var_ref;
if (init_var_ref(alias, var_ref, var_ref->system, var_ref->event_name)) {
@@ -1996,6 +2418,29 @@ static struct hist_field *create_alias(struct hist_trigger_data *hist_data,
return alias;
}
+static struct hist_field *parse_const(struct hist_trigger_data *hist_data,
+ char *str, char *var_name,
+ unsigned long *flags)
+{
+ struct trace_array *tr = hist_data->event_file->tr;
+ struct hist_field *field = NULL;
+ u64 constant;
+
+ if (kstrtoull(str, 0, &constant)) {
+ hist_err(tr, HIST_ERR_EXPECT_NUMBER, errpos(str));
+ return NULL;
+ }
+
+ *flags |= HIST_FIELD_FL_CONST;
+ field = create_hist_field(hist_data, NULL, *flags, var_name);
+ if (!field)
+ return NULL;
+
+ field->constant = constant;
+
+ return field;
+}
+
static struct hist_field *parse_atom(struct hist_trigger_data *hist_data,
struct trace_event_file *file, char *str,
unsigned long *flags, char *var_name)
@@ -2003,8 +2448,18 @@ static struct hist_field *parse_atom(struct hist_trigger_data *hist_data,
char *s, *ref_system = NULL, *ref_event = NULL, *ref_var = str;
struct ftrace_event_field *field = NULL;
struct hist_field *hist_field = NULL;
+ unsigned long buckets = 0;
int ret = 0;
+ if (isdigit(str[0])) {
+ hist_field = parse_const(hist_data, str, var_name, flags);
+ if (!hist_field) {
+ ret = -EINVAL;
+ goto out;
+ }
+ return hist_field;
+ }
+
s = strchr(str, '.');
if (s) {
s = strchr(++s, '.');
@@ -2040,7 +2495,7 @@ static struct hist_field *parse_atom(struct hist_trigger_data *hist_data,
} else
str = s;
- field = parse_field(hist_data, file, str, flags);
+ field = parse_field(hist_data, file, str, flags, &buckets);
if (IS_ERR(field)) {
ret = PTR_ERR(field);
goto out;
@@ -2051,6 +2506,7 @@ static struct hist_field *parse_atom(struct hist_trigger_data *hist_data,
ret = -ENOMEM;
goto out;
}
+ hist_field->buckets = buckets;
return hist_field;
out:
@@ -2060,21 +2516,24 @@ static struct hist_field *parse_atom(struct hist_trigger_data *hist_data,
static struct hist_field *parse_expr(struct hist_trigger_data *hist_data,
struct trace_event_file *file,
char *str, unsigned long flags,
- char *var_name, unsigned int level);
+ char *var_name, unsigned int *n_subexprs);
static struct hist_field *parse_unary(struct hist_trigger_data *hist_data,
struct trace_event_file *file,
char *str, unsigned long flags,
- char *var_name, unsigned int level)
+ char *var_name, unsigned int *n_subexprs)
{
struct hist_field *operand1, *expr = NULL;
unsigned long operand_flags;
int ret = 0;
char *s;
+ /* Unary minus operator, increment n_subexprs */
+ ++*n_subexprs;
+
/* we support only -(xxx) i.e. explicit parens required */
- if (level > 3) {
+ if (*n_subexprs > 3) {
hist_err(file->tr, HIST_ERR_TOO_MANY_SUBEXPR, errpos(str));
ret = -EINVAL;
goto free;
@@ -2091,8 +2550,16 @@ static struct hist_field *parse_unary(struct hist_trigger_data *hist_data,
}
s = strrchr(str, ')');
- if (s)
+ if (s) {
+ /* unary minus not supported in sub-expressions */
+ if (*(s+1) != '\0') {
+ hist_err(file->tr, HIST_ERR_UNARY_MINUS_SUBEXPR,
+ errpos(str));
+ ret = -EINVAL;
+ goto free;
+ }
*s = '\0';
+ }
else {
ret = -EINVAL; /* no closing ')' */
goto free;
@@ -2106,19 +2573,28 @@ static struct hist_field *parse_unary(struct hist_trigger_data *hist_data,
}
operand_flags = 0;
- operand1 = parse_expr(hist_data, file, str, operand_flags, NULL, ++level);
+ operand1 = parse_expr(hist_data, file, str, operand_flags, NULL, n_subexprs);
if (IS_ERR(operand1)) {
ret = PTR_ERR(operand1);
goto free;
}
+ if (operand1->flags & HIST_FIELD_FL_STRING) {
+ /* String type can not be the operand of unary operator. */
+ hist_err(file->tr, HIST_ERR_INVALID_STR_OPERAND, errpos(str));
+ destroy_hist_field(operand1, 0);
+ ret = -EINVAL;
+ goto free;
+ }
expr->flags |= operand1->flags &
(HIST_FIELD_FL_TIMESTAMP | HIST_FIELD_FL_TIMESTAMP_USECS);
- expr->fn = hist_field_unary_minus;
+ expr->fn_num = HIST_FIELD_FN_UMINUS;
expr->operands[0] = operand1;
+ expr->size = operand1->size;
+ expr->is_signed = operand1->is_signed;
expr->operator = FIELD_OP_UNARY_MINUS;
expr->name = expr_str(expr, 0);
- expr->type = kstrdup(operand1->type, GFP_KERNEL);
+ expr->type = kstrdup_const(operand1->type, GFP_KERNEL);
if (!expr->type) {
ret = -ENOMEM;
goto free;
@@ -2130,9 +2606,15 @@ static struct hist_field *parse_unary(struct hist_trigger_data *hist_data,
return ERR_PTR(ret);
}
+/*
+ * If the operands are var refs, return pointers the
+ * variable(s) referenced in var1 and var2, else NULL.
+ */
static int check_expr_operands(struct trace_array *tr,
struct hist_field *operand1,
- struct hist_field *operand2)
+ struct hist_field *operand2,
+ struct hist_field **var1,
+ struct hist_field **var2)
{
unsigned long operand1_flags = operand1->flags;
unsigned long operand2_flags = operand2->flags;
@@ -2145,6 +2627,7 @@ static int check_expr_operands(struct trace_array *tr,
if (!var)
return -EINVAL;
operand1_flags = var->flags;
+ *var1 = var;
}
if ((operand2_flags & HIST_FIELD_FL_VAR_REF) ||
@@ -2155,6 +2638,7 @@ static int check_expr_operands(struct trace_array *tr,
if (!var)
return -EINVAL;
operand2_flags = var->flags;
+ *var2 = var;
}
if ((operand1_flags & HIST_FIELD_FL_TIMESTAMP_USECS) !=
@@ -2169,64 +2653,102 @@ static int check_expr_operands(struct trace_array *tr,
static struct hist_field *parse_expr(struct hist_trigger_data *hist_data,
struct trace_event_file *file,
char *str, unsigned long flags,
- char *var_name, unsigned int level)
+ char *var_name, unsigned int *n_subexprs)
{
struct hist_field *operand1 = NULL, *operand2 = NULL, *expr = NULL;
- unsigned long operand_flags;
+ struct hist_field *var1 = NULL, *var2 = NULL;
+ unsigned long operand_flags, operand2_flags;
int field_op, ret = -EINVAL;
char *sep, *operand1_str;
+ enum hist_field_fn op_fn;
+ bool combine_consts;
- if (level > 3) {
+ if (*n_subexprs > 3) {
hist_err(file->tr, HIST_ERR_TOO_MANY_SUBEXPR, errpos(str));
return ERR_PTR(-EINVAL);
}
- field_op = contains_operator(str);
+ field_op = contains_operator(str, &sep);
if (field_op == FIELD_OP_NONE)
return parse_atom(hist_data, file, str, &flags, var_name);
if (field_op == FIELD_OP_UNARY_MINUS)
- return parse_unary(hist_data, file, str, flags, var_name, ++level);
+ return parse_unary(hist_data, file, str, flags, var_name, n_subexprs);
- switch (field_op) {
- case FIELD_OP_MINUS:
- sep = "-";
- break;
- case FIELD_OP_PLUS:
- sep = "+";
- break;
- default:
- goto free;
- }
+ /* Binary operator found, increment n_subexprs */
+ ++*n_subexprs;
- operand1_str = strsep(&str, sep);
- if (!operand1_str || !str)
- goto free;
+ /* Split the expression string at the root operator */
+ if (!sep)
+ return ERR_PTR(-EINVAL);
+
+ *sep = '\0';
+ operand1_str = str;
+ str = sep+1;
+
+ /* Binary operator requires both operands */
+ if (*operand1_str == '\0' || *str == '\0')
+ return ERR_PTR(-EINVAL);
operand_flags = 0;
- operand1 = parse_atom(hist_data, file, operand1_str,
- &operand_flags, NULL);
- if (IS_ERR(operand1)) {
- ret = PTR_ERR(operand1);
- operand1 = NULL;
- goto free;
+
+ /* LHS of string is an expression e.g. a+b in a+b+c */
+ operand1 = parse_expr(hist_data, file, operand1_str, operand_flags, NULL, n_subexprs);
+ if (IS_ERR(operand1))
+ return ERR_CAST(operand1);
+
+ if (operand1->flags & HIST_FIELD_FL_STRING) {
+ hist_err(file->tr, HIST_ERR_INVALID_STR_OPERAND, errpos(operand1_str));
+ ret = -EINVAL;
+ goto free_op1;
}
- /* rest of string could be another expression e.g. b+c in a+b+c */
+ /* RHS of string is another expression e.g. c in a+b+c */
operand_flags = 0;
- operand2 = parse_expr(hist_data, file, str, operand_flags, NULL, ++level);
+ operand2 = parse_expr(hist_data, file, str, operand_flags, NULL, n_subexprs);
if (IS_ERR(operand2)) {
ret = PTR_ERR(operand2);
- operand2 = NULL;
- goto free;
+ goto free_op1;
+ }
+ if (operand2->flags & HIST_FIELD_FL_STRING) {
+ hist_err(file->tr, HIST_ERR_INVALID_STR_OPERAND, errpos(str));
+ ret = -EINVAL;
+ goto free_operands;
}
- ret = check_expr_operands(file->tr, operand1, operand2);
+ switch (field_op) {
+ case FIELD_OP_MINUS:
+ op_fn = HIST_FIELD_FN_MINUS;
+ break;
+ case FIELD_OP_PLUS:
+ op_fn = HIST_FIELD_FN_PLUS;
+ break;
+ case FIELD_OP_DIV:
+ op_fn = HIST_FIELD_FN_DIV;
+ break;
+ case FIELD_OP_MULT:
+ op_fn = HIST_FIELD_FN_MULT;
+ break;
+ default:
+ ret = -EINVAL;
+ goto free_operands;
+ }
+
+ ret = check_expr_operands(file->tr, operand1, operand2, &var1, &var2);
if (ret)
- goto free;
+ goto free_operands;
- flags |= HIST_FIELD_FL_EXPR;
+ operand_flags = var1 ? var1->flags : operand1->flags;
+ operand2_flags = var2 ? var2->flags : operand2->flags;
+
+ /*
+ * If both operands are constant, the expression can be
+ * collapsed to a single constant.
+ */
+ combine_consts = operand_flags & operand2_flags & HIST_FIELD_FL_CONST;
+
+ flags |= combine_consts ? HIST_FIELD_FL_CONST : HIST_FIELD_FL_EXPR;
flags |= operand1->flags &
(HIST_FIELD_FL_TIMESTAMP | HIST_FIELD_FL_TIMESTAMP_USECS);
@@ -2234,40 +2756,81 @@ static struct hist_field *parse_expr(struct hist_trigger_data *hist_data,
expr = create_hist_field(hist_data, NULL, flags, var_name);
if (!expr) {
ret = -ENOMEM;
- goto free;
+ goto free_operands;
}
operand1->read_once = true;
operand2->read_once = true;
+ /* The operands are now owned and free'd by 'expr' */
expr->operands[0] = operand1;
expr->operands[1] = operand2;
- expr->operator = field_op;
- expr->name = expr_str(expr, 0);
- expr->type = kstrdup(operand1->type, GFP_KERNEL);
- if (!expr->type) {
- ret = -ENOMEM;
- goto free;
+
+ if (field_op == FIELD_OP_DIV &&
+ operand2_flags & HIST_FIELD_FL_CONST) {
+ u64 divisor = var2 ? var2->constant : operand2->constant;
+
+ if (!divisor) {
+ hist_err(file->tr, HIST_ERR_DIVISION_BY_ZERO, errpos(str));
+ ret = -EDOM;
+ goto free_expr;
+ }
+
+ /*
+ * Copy the divisor here so we don't have to look it up
+ * later if this is a var ref
+ */
+ operand2->constant = divisor;
+ op_fn = hist_field_get_div_fn(operand2);
}
- switch (field_op) {
- case FIELD_OP_MINUS:
- expr->fn = hist_field_minus;
- break;
- case FIELD_OP_PLUS:
- expr->fn = hist_field_plus;
- break;
- default:
- ret = -EINVAL;
- goto free;
+ expr->fn_num = op_fn;
+
+ if (combine_consts) {
+ if (var1)
+ expr->operands[0] = var1;
+ if (var2)
+ expr->operands[1] = var2;
+
+ expr->constant = hist_fn_call(expr, NULL, NULL, NULL, NULL);
+ expr->fn_num = HIST_FIELD_FN_CONST;
+
+ expr->operands[0] = NULL;
+ expr->operands[1] = NULL;
+
+ /*
+ * var refs won't be destroyed immediately
+ * See: destroy_hist_field()
+ */
+ destroy_hist_field(operand2, 0);
+ destroy_hist_field(operand1, 0);
+
+ expr->name = expr_str(expr, 0);
+ } else {
+ /* The operand sizes should be the same, so just pick one */
+ expr->size = operand1->size;
+ expr->is_signed = operand1->is_signed;
+
+ expr->operator = field_op;
+ expr->type = kstrdup_const(operand1->type, GFP_KERNEL);
+ if (!expr->type) {
+ ret = -ENOMEM;
+ goto free_expr;
+ }
+
+ expr->name = expr_str(expr, 0);
}
return expr;
- free:
- destroy_hist_field(operand1, 0);
+
+free_operands:
destroy_hist_field(operand2, 0);
- destroy_hist_field(expr, 0);
+free_op1:
+ destroy_hist_field(operand1, 0);
+ return ERR_PTR(ret);
+free_expr:
+ destroy_hist_field(expr, 0);
return ERR_PTR(ret);
}
@@ -2289,9 +2852,10 @@ static char *find_trigger_filter(struct hist_trigger_data *hist_data,
}
static struct event_command trigger_hist_cmd;
-static int event_hist_trigger_func(struct event_command *cmd_ops,
- struct trace_event_file *file,
- char *glob, char *cmd, char *param);
+static int event_hist_trigger_parse(struct event_command *cmd_ops,
+ struct trace_event_file *file,
+ char *glob, char *cmd,
+ char *param_and_filter);
static bool compatible_keys(struct hist_trigger_data *target_hist_data,
struct hist_trigger_data *hist_data,
@@ -2389,7 +2953,7 @@ find_synthetic_field_var(struct hist_trigger_data *target_hist_data,
* events. However, for convenience, users are allowed to directly
* specify an event field in an action, which will be automatically
* converted into a variable on their behalf.
-
+ *
* If a user specifies a field on an event that isn't the event the
* histogram currently being defined (the target event histogram), the
* only way that can be accomplished is if a new hist trigger is
@@ -2408,12 +2972,12 @@ create_field_var_hist(struct hist_trigger_data *target_hist_data,
char *subsys_name, char *event_name, char *field_name)
{
struct trace_array *tr = target_hist_data->event_file->tr;
- struct hist_field *event_var = ERR_PTR(-EINVAL);
struct hist_trigger_data *hist_data;
unsigned int i, n, first = true;
struct field_var_hist *var_hist;
struct trace_event_file *file;
struct hist_field *key_field;
+ struct hist_field *event_var;
char *saved_filter;
char *cmd;
int ret;
@@ -2494,8 +3058,8 @@ create_field_var_hist(struct hist_trigger_data *target_hist_data,
var_hist->hist_data = hist_data;
/* Create the new histogram with our variable */
- ret = event_hist_trigger_func(&trigger_hist_cmd, file,
- "", "hist", cmd);
+ ret = event_hist_trigger_parse(&trigger_hist_cmd, file,
+ "", "hist", cmd);
if (ret) {
kfree(cmd);
kfree(var_hist->cmd);
@@ -2551,6 +3115,7 @@ find_target_event_var(struct hist_trigger_data *hist_data,
}
static inline void __update_field_vars(struct tracing_map_elt *elt,
+ struct trace_buffer *buffer,
struct ring_buffer_event *rbe,
void *rec,
struct field_var **field_vars,
@@ -2561,19 +3126,37 @@ static inline void __update_field_vars(struct tracing_map_elt *elt,
unsigned int i, j, var_idx;
u64 var_val;
+ /* Make sure stacktrace can fit in the string variable length */
+ BUILD_BUG_ON((HIST_STACKTRACE_DEPTH + 1) * sizeof(long) >= STR_VAR_LEN_MAX);
+
for (i = 0, j = field_var_str_start; i < n_field_vars; i++) {
struct field_var *field_var = field_vars[i];
struct hist_field *var = field_var->var;
struct hist_field *val = field_var->val;
- var_val = val->fn(val, elt, rbe, rec);
+ var_val = hist_fn_call(val, elt, buffer, rbe, rec);
var_idx = var->var.idx;
- if (val->flags & HIST_FIELD_FL_STRING) {
+ if (val->flags & (HIST_FIELD_FL_STRING |
+ HIST_FIELD_FL_STACKTRACE)) {
char *str = elt_data->field_var_str[j++];
char *val_str = (char *)(uintptr_t)var_val;
-
- strscpy(str, val_str, STR_VAR_LEN_MAX);
+ unsigned int size;
+
+ if (val->flags & HIST_FIELD_FL_STRING) {
+ size = min(val->size, STR_VAR_LEN_MAX);
+ strscpy(str, val_str, size);
+ } else {
+ char *stack_start = str + sizeof(unsigned long);
+ int e;
+
+ e = stack_trace_save((void *)stack_start,
+ HIST_STACKTRACE_DEPTH,
+ HIST_STACKTRACE_SKIP);
+ if (e < HIST_STACKTRACE_DEPTH - 1)
+ ((unsigned long *)stack_start)[e] = 0;
+ *((unsigned long *)str) = e;
+ }
var_val = (u64)(uintptr_t)str;
}
tracing_map_set_var(elt, var_idx, var_val);
@@ -2582,19 +3165,21 @@ static inline void __update_field_vars(struct tracing_map_elt *elt,
static void update_field_vars(struct hist_trigger_data *hist_data,
struct tracing_map_elt *elt,
+ struct trace_buffer *buffer,
struct ring_buffer_event *rbe,
void *rec)
{
- __update_field_vars(elt, rbe, rec, hist_data->field_vars,
+ __update_field_vars(elt, buffer, rbe, rec, hist_data->field_vars,
hist_data->n_field_vars, 0);
}
static void save_track_data_vars(struct hist_trigger_data *hist_data,
- struct tracing_map_elt *elt, void *rec,
+ struct tracing_map_elt *elt,
+ struct trace_buffer *buffer, void *rec,
struct ring_buffer_event *rbe, void *key,
struct action_data *data, u64 *var_ref_vals)
{
- __update_field_vars(elt, rbe, rec, hist_data->save_vars,
+ __update_field_vars(elt, buffer, rbe, rec, hist_data->save_vars,
hist_data->n_save_vars, hist_data->n_field_var_str);
}
@@ -2629,10 +3214,10 @@ static struct hist_field *create_var(struct hist_trigger_data *hist_data,
var->var.hist_data = var->hist_data = hist_data;
var->size = size;
var->var.name = kstrdup(name, GFP_KERNEL);
- var->type = kstrdup(type, GFP_KERNEL);
+ var->type = kstrdup_const(type, GFP_KERNEL);
if (!var->var.name || !var->type) {
+ kfree_const(var->type);
kfree(var->var.name);
- kfree(var->type);
kfree(var);
var = ERR_PTR(-ENOMEM);
}
@@ -2699,7 +3284,7 @@ static struct field_var *create_field_var(struct hist_trigger_data *hist_data,
* events. However, for convenience, users are allowed to directly
* specify an event field in an action, which will be automatically
* converted into a variable on their behalf.
-
+ *
* This function creates a field variable with the name var_name on
* the hist trigger currently being defined on the target event. If
* subsys_name and event_name are specified, this function simply
@@ -2770,12 +3355,14 @@ static void save_track_val(struct hist_trigger_data *hist_data,
}
static void save_track_data(struct hist_trigger_data *hist_data,
- struct tracing_map_elt *elt, void *rec,
+ struct tracing_map_elt *elt,
+ struct trace_buffer *buffer, void *rec,
struct ring_buffer_event *rbe, void *key,
struct action_data *data, u64 *var_ref_vals)
{
if (data->track_data.save_data)
- data->track_data.save_data(hist_data, elt, rec, rbe, key, data, var_ref_vals);
+ data->track_data.save_data(hist_data, elt, buffer, rec, rbe,
+ key, data, var_ref_vals);
}
static bool check_track_val(struct tracing_map_elt *elt,
@@ -2826,7 +3413,8 @@ static bool cond_snapshot_update(struct trace_array *tr, void *cond_data)
}
static void save_track_data_snapshot(struct hist_trigger_data *hist_data,
- struct tracing_map_elt *elt, void *rec,
+ struct tracing_map_elt *elt,
+ struct trace_buffer *buffer, void *rec,
struct ring_buffer_event *rbe, void *key,
struct action_data *data,
u64 *var_ref_vals)
@@ -2895,7 +3483,8 @@ static bool cond_snapshot_update(struct trace_array *tr, void *cond_data)
return false;
}
static void save_track_data_snapshot(struct hist_trigger_data *hist_data,
- struct tracing_map_elt *elt, void *rec,
+ struct tracing_map_elt *elt,
+ struct trace_buffer *buffer, void *rec,
struct ring_buffer_event *rbe, void *key,
struct action_data *data,
u64 *var_ref_vals) {}
@@ -2937,7 +3526,8 @@ static void track_data_print(struct seq_file *m,
}
static void ontrack_action(struct hist_trigger_data *hist_data,
- struct tracing_map_elt *elt, void *rec,
+ struct tracing_map_elt *elt,
+ struct trace_buffer *buffer, void *rec,
struct ring_buffer_event *rbe, void *key,
struct action_data *data, u64 *var_ref_vals)
{
@@ -2945,7 +3535,8 @@ static void ontrack_action(struct hist_trigger_data *hist_data,
if (check_track_val(elt, data, var_val)) {
save_track_val(hist_data, elt, data, var_val);
- save_track_data(hist_data, elt, rec, rbe, key, data, var_ref_vals);
+ save_track_data(hist_data, elt, buffer, rec, rbe,
+ key, data, var_ref_vals);
}
}
@@ -3053,6 +3644,7 @@ static int parse_action_params(struct trace_array *tr, char *params,
while (params) {
if (data->n_params >= SYNTH_FIELDS_MAX) {
hist_err(tr, HIST_ERR_TOO_MANY_PARAMS, 0);
+ ret = -EINVAL;
goto out;
}
@@ -3263,7 +3855,8 @@ static void save_field_var(struct hist_trigger_data *hist_data,
{
hist_data->field_vars[hist_data->n_field_vars++] = field_var;
- if (field_var->val->flags & HIST_FIELD_FL_STRING)
+ /* Stack traces are saved in the string storage too */
+ if (field_var->val->flags & (HIST_FIELD_FL_STRING | HIST_FIELD_FL_STACKTRACE))
hist_data->n_field_var_str++;
}
@@ -3279,9 +3872,21 @@ static int check_synth_field(struct synth_event *event,
field = event->fields[field_pos];
+ /*
+ * A dynamic string synth field can accept static or
+ * dynamic. A static string synth field can only accept a
+ * same-sized static string, which is checked for later.
+ */
+ if (strstr(hist_field->type, "char[") && field->is_string
+ && field->is_dynamic)
+ return 0;
+
+ if (strstr(hist_field->type, "long[") && field->is_stack)
+ return 0;
+
if (strcmp(field->type, hist_field->type) != 0) {
if (field->size != hist_field->size ||
- field->is_signed != hist_field->is_signed)
+ (!field->is_string && field->is_signed != hist_field->is_signed))
return -EINVAL;
}
@@ -3336,7 +3941,7 @@ trace_action_create_field_var(struct hist_trigger_data *hist_data,
} else {
field_var = NULL;
/*
- * If no explicit system.event is specfied, default to
+ * If no explicit system.event is specified, default to
* looking for fields on the onmatch(system.event.xxx)
* event.
*/
@@ -3345,6 +3950,8 @@ trace_action_create_field_var(struct hist_trigger_data *hist_data,
event = data->match_data.event;
}
+ if (!event)
+ goto free;
/*
* At this point, we're looking at a field on another
* event. Because we can't modify a hist trigger on
@@ -3378,6 +3985,10 @@ static int trace_action_create(struct hist_trigger_data *hist_data,
lockdep_assert_held(&event_mutex);
+ /* Sanity check to avoid out-of-bound write on 'data->var_ref_idx' */
+ if (data->n_params > SYNTH_FIELDS_MAX)
+ return -EINVAL;
+
if (data->use_trace_keyword)
synth_event_name = data->synth_event_name;
else
@@ -3441,6 +4052,7 @@ static int trace_action_create(struct hist_trigger_data *hist_data,
var_ref_idx = find_var_ref_idx(hist_data, var_ref);
if (WARN_ON(var_ref_idx < 0)) {
+ kfree(p);
ret = var_ref_idx;
goto err;
}
@@ -3526,7 +4138,8 @@ static int action_create(struct hist_trigger_data *hist_data,
}
hist_data->save_vars[hist_data->n_save_vars++] = field_var;
- if (field_var->val->flags & HIST_FIELD_FL_STRING)
+ if (field_var->val->flags &
+ (HIST_FIELD_FL_STRING | HIST_FIELD_FL_STACKTRACE))
hist_data->n_save_var_str++;
kfree(param);
}
@@ -3614,14 +4227,29 @@ static int __create_val_field(struct hist_trigger_data *hist_data,
unsigned long flags)
{
struct hist_field *hist_field;
- int ret = 0;
+ int ret = 0, n_subexprs = 0;
- hist_field = parse_expr(hist_data, file, field_str, flags, var_name, 0);
+ hist_field = parse_expr(hist_data, file, field_str, flags, var_name, &n_subexprs);
if (IS_ERR(hist_field)) {
ret = PTR_ERR(hist_field);
goto out;
}
+ /* values and variables should not have some modifiers */
+ if (hist_field->flags & HIST_FIELD_FL_VAR) {
+ /* Variable */
+ if (hist_field->flags & (HIST_FIELD_FL_GRAPH | HIST_FIELD_FL_PERCENT |
+ HIST_FIELD_FL_BUCKET | HIST_FIELD_FL_LOG2))
+ goto err;
+ } else {
+ /* Value */
+ if (hist_field->flags & (HIST_FIELD_FL_GRAPH | HIST_FIELD_FL_PERCENT |
+ HIST_FIELD_FL_BUCKET | HIST_FIELD_FL_LOG2 |
+ HIST_FIELD_FL_SYM | HIST_FIELD_FL_SYM_OFFSET |
+ HIST_FIELD_FL_SYSCALL | HIST_FIELD_FL_STACKTRACE))
+ goto err;
+ }
+
hist_data->fields[val_idx] = hist_field;
++hist_data->n_vals;
@@ -3631,6 +4259,9 @@ static int __create_val_field(struct hist_trigger_data *hist_data,
ret = -EINVAL;
out:
return ret;
+ err:
+ hist_err(file->tr, HIST_ERR_BAD_FIELD_MODIFIER, errpos(field_str));
+ return -EINVAL;
}
static int create_val_field(struct hist_trigger_data *hist_data,
@@ -3644,6 +4275,124 @@ static int create_val_field(struct hist_trigger_data *hist_data,
return __create_val_field(hist_data, val_idx, file, NULL, field_str, 0);
}
+static const char no_comm[] = "(no comm)";
+
+static u64 hist_field_execname(struct hist_field *hist_field,
+ struct tracing_map_elt *elt,
+ struct trace_buffer *buffer,
+ struct ring_buffer_event *rbe,
+ void *event)
+{
+ struct hist_elt_data *elt_data;
+
+ if (WARN_ON_ONCE(!elt))
+ return (u64)(unsigned long)no_comm;
+
+ elt_data = elt->private_data;
+
+ if (WARN_ON_ONCE(!elt_data->comm))
+ return (u64)(unsigned long)no_comm;
+
+ return (u64)(unsigned long)(elt_data->comm);
+}
+
+static u64 hist_field_stack(struct hist_field *hist_field,
+ struct tracing_map_elt *elt,
+ struct trace_buffer *buffer,
+ struct ring_buffer_event *rbe,
+ void *event)
+{
+ u32 str_item = *(u32 *)(event + hist_field->field->offset);
+ int str_loc = str_item & 0xffff;
+ char *addr = (char *)(event + str_loc);
+
+ return (u64)(unsigned long)addr;
+}
+
+static u64 hist_fn_call(struct hist_field *hist_field,
+ struct tracing_map_elt *elt,
+ struct trace_buffer *buffer,
+ struct ring_buffer_event *rbe,
+ void *event)
+{
+ switch (hist_field->fn_num) {
+ case HIST_FIELD_FN_VAR_REF:
+ return hist_field_var_ref(hist_field, elt, buffer, rbe, event);
+ case HIST_FIELD_FN_COUNTER:
+ return hist_field_counter(hist_field, elt, buffer, rbe, event);
+ case HIST_FIELD_FN_CONST:
+ return hist_field_const(hist_field, elt, buffer, rbe, event);
+ case HIST_FIELD_FN_LOG2:
+ return hist_field_log2(hist_field, elt, buffer, rbe, event);
+ case HIST_FIELD_FN_BUCKET:
+ return hist_field_bucket(hist_field, elt, buffer, rbe, event);
+ case HIST_FIELD_FN_TIMESTAMP:
+ return hist_field_timestamp(hist_field, elt, buffer, rbe, event);
+ case HIST_FIELD_FN_CPU:
+ return hist_field_cpu(hist_field, elt, buffer, rbe, event);
+ case HIST_FIELD_FN_STRING:
+ return hist_field_string(hist_field, elt, buffer, rbe, event);
+ case HIST_FIELD_FN_DYNSTRING:
+ return hist_field_dynstring(hist_field, elt, buffer, rbe, event);
+ case HIST_FIELD_FN_RELDYNSTRING:
+ return hist_field_reldynstring(hist_field, elt, buffer, rbe, event);
+ case HIST_FIELD_FN_PSTRING:
+ return hist_field_pstring(hist_field, elt, buffer, rbe, event);
+ case HIST_FIELD_FN_S64:
+ return hist_field_s64(hist_field, elt, buffer, rbe, event);
+ case HIST_FIELD_FN_U64:
+ return hist_field_u64(hist_field, elt, buffer, rbe, event);
+ case HIST_FIELD_FN_S32:
+ return hist_field_s32(hist_field, elt, buffer, rbe, event);
+ case HIST_FIELD_FN_U32:
+ return hist_field_u32(hist_field, elt, buffer, rbe, event);
+ case HIST_FIELD_FN_S16:
+ return hist_field_s16(hist_field, elt, buffer, rbe, event);
+ case HIST_FIELD_FN_U16:
+ return hist_field_u16(hist_field, elt, buffer, rbe, event);
+ case HIST_FIELD_FN_S8:
+ return hist_field_s8(hist_field, elt, buffer, rbe, event);
+ case HIST_FIELD_FN_U8:
+ return hist_field_u8(hist_field, elt, buffer, rbe, event);
+ case HIST_FIELD_FN_UMINUS:
+ return hist_field_unary_minus(hist_field, elt, buffer, rbe, event);
+ case HIST_FIELD_FN_MINUS:
+ return hist_field_minus(hist_field, elt, buffer, rbe, event);
+ case HIST_FIELD_FN_PLUS:
+ return hist_field_plus(hist_field, elt, buffer, rbe, event);
+ case HIST_FIELD_FN_DIV:
+ return hist_field_div(hist_field, elt, buffer, rbe, event);
+ case HIST_FIELD_FN_MULT:
+ return hist_field_mult(hist_field, elt, buffer, rbe, event);
+ case HIST_FIELD_FN_DIV_POWER2:
+ return div_by_power_of_two(hist_field, elt, buffer, rbe, event);
+ case HIST_FIELD_FN_DIV_NOT_POWER2:
+ return div_by_not_power_of_two(hist_field, elt, buffer, rbe, event);
+ case HIST_FIELD_FN_DIV_MULT_SHIFT:
+ return div_by_mult_and_shift(hist_field, elt, buffer, rbe, event);
+ case HIST_FIELD_FN_EXECNAME:
+ return hist_field_execname(hist_field, elt, buffer, rbe, event);
+ case HIST_FIELD_FN_STACK:
+ return hist_field_stack(hist_field, elt, buffer, rbe, event);
+ default:
+ return 0;
+ }
+}
+
+/* Convert a var that points to common_pid.execname to a string */
+static void update_var_execname(struct hist_field *hist_field)
+{
+ hist_field->flags = HIST_FIELD_FL_STRING | HIST_FIELD_FL_VAR |
+ HIST_FIELD_FL_EXECNAME;
+ hist_field->size = MAX_FILTER_STR_VAL;
+ hist_field->is_signed = 0;
+
+ kfree_const(hist_field->type);
+ hist_field->type = "char[]";
+
+ hist_field->fn_num = HIST_FIELD_FN_EXECNAME;
+}
+
static int create_var_field(struct hist_trigger_data *hist_data,
unsigned int val_idx,
struct trace_event_file *file,
@@ -3651,6 +4400,7 @@ static int create_var_field(struct hist_trigger_data *hist_data,
{
struct trace_array *tr = hist_data->event_file->tr;
unsigned long flags = 0;
+ int ret;
if (WARN_ON(val_idx >= TRACING_MAP_VALS_MAX + TRACING_MAP_VARS_MAX))
return -EINVAL;
@@ -3665,14 +4415,23 @@ static int create_var_field(struct hist_trigger_data *hist_data,
if (WARN_ON(hist_data->n_vars > TRACING_MAP_VARS_MAX))
return -EINVAL;
- return __create_val_field(hist_data, val_idx, file, var_name, expr_str, flags);
+ ret = __create_val_field(hist_data, val_idx, file, var_name, expr_str, flags);
+
+ if (!ret && hist_data->fields[val_idx]->flags & HIST_FIELD_FL_EXECNAME)
+ update_var_execname(hist_data->fields[val_idx]);
+
+ if (!ret && hist_data->fields[val_idx]->flags &
+ (HIST_FIELD_FL_STRING | HIST_FIELD_FL_STACKTRACE))
+ hist_data->fields[val_idx]->var_str_idx = hist_data->n_var_str++;
+
+ return ret;
}
static int create_val_fields(struct hist_trigger_data *hist_data,
struct trace_event_file *file)
{
+ unsigned int i, j = 1, n_hitcount = 0;
char *fields_str, *field_str;
- unsigned int i, j = 1;
int ret;
ret = create_hitcount_val(hist_data);
@@ -3689,8 +4448,10 @@ static int create_val_fields(struct hist_trigger_data *hist_data,
if (!field_str)
break;
- if (strcmp(field_str, "hitcount") == 0)
- continue;
+ if (strcmp(field_str, "hitcount") == 0) {
+ if (!n_hitcount++)
+ continue;
+ }
ret = create_val_field(hist_data, j++, file, field_str);
if (ret)
@@ -3700,6 +4461,12 @@ static int create_val_fields(struct hist_trigger_data *hist_data,
if (fields_str && (strcmp(fields_str, "hitcount") != 0))
ret = -EINVAL;
out:
+ /* There is only raw hitcount but nohitcount suppresses it. */
+ if (j == 1 && hist_data->attrs->no_hitcount) {
+ hist_err(hist_data->event_file->tr, HIST_ERR_NEED_NOHC_VAL, 0);
+ ret = -ENOENT;
+ }
+
return ret;
}
@@ -3713,7 +4480,7 @@ static int create_key_field(struct hist_trigger_data *hist_data,
struct hist_field *hist_field = NULL;
unsigned long flags = 0;
unsigned int key_size;
- int ret = 0;
+ int ret = 0, n_subexprs = 0;
if (WARN_ON(key_idx >= HIST_FIELDS_MAX))
return -EINVAL;
@@ -3726,7 +4493,7 @@ static int create_key_field(struct hist_trigger_data *hist_data,
hist_field = create_hist_field(hist_data, NULL, flags, NULL);
} else {
hist_field = parse_expr(hist_data, file, field_str, flags,
- NULL, 0);
+ NULL, &n_subexprs);
if (IS_ERR(hist_field)) {
ret = PTR_ERR(hist_field);
goto out;
@@ -3866,6 +4633,7 @@ static int parse_var_defs(struct hist_trigger_data *hist_data)
s = kstrdup(field_str, GFP_KERNEL);
if (!s) {
kfree(hist_data->attrs->var_defs.name[n_vars]);
+ hist_data->attrs->var_defs.name[n_vars] = NULL;
ret = -ENOMEM;
goto free;
}
@@ -3889,7 +4657,7 @@ static int create_hist_fields(struct hist_trigger_data *hist_data,
ret = parse_var_defs(hist_data);
if (ret)
- goto out;
+ return ret;
ret = create_val_fields(hist_data, file);
if (ret)
@@ -3900,8 +4668,7 @@ static int create_hist_fields(struct hist_trigger_data *hist_data,
goto out;
ret = create_key_fields(hist_data, file);
- if (ret)
- goto out;
+
out:
free_var_defs(hist_data);
@@ -4038,36 +4805,35 @@ static int parse_actions(struct hist_trigger_data *hist_data)
int len;
for (i = 0; i < hist_data->attrs->n_actions; i++) {
+ enum handler_id hid = 0;
+ char *action_str;
+
str = hist_data->attrs->action_str[i];
- if ((len = str_has_prefix(str, "onmatch("))) {
- char *action_str = str + len;
+ if ((len = str_has_prefix(str, "onmatch(")))
+ hid = HANDLER_ONMATCH;
+ else if ((len = str_has_prefix(str, "onmax(")))
+ hid = HANDLER_ONMAX;
+ else if ((len = str_has_prefix(str, "onchange(")))
+ hid = HANDLER_ONCHANGE;
- data = onmatch_parse(tr, action_str);
- if (IS_ERR(data)) {
- ret = PTR_ERR(data);
- break;
- }
- } else if ((len = str_has_prefix(str, "onmax("))) {
- char *action_str = str + len;
+ action_str = str + len;
- data = track_data_parse(hist_data, action_str,
- HANDLER_ONMAX);
- if (IS_ERR(data)) {
- ret = PTR_ERR(data);
- break;
- }
- } else if ((len = str_has_prefix(str, "onchange("))) {
- char *action_str = str + len;
+ switch (hid) {
+ case HANDLER_ONMATCH:
+ data = onmatch_parse(tr, action_str);
+ break;
+ case HANDLER_ONMAX:
+ case HANDLER_ONCHANGE:
+ data = track_data_parse(hist_data, action_str, hid);
+ break;
+ default:
+ data = ERR_PTR(-EINVAL);
+ break;
+ }
- data = track_data_parse(hist_data, action_str,
- HANDLER_ONCHANGE);
- if (IS_ERR(data)) {
- ret = PTR_ERR(data);
- break;
- }
- } else {
- ret = -EINVAL;
+ if (IS_ERR(data)) {
+ ret = PTR_ERR(data);
break;
}
@@ -4291,7 +5057,7 @@ static int create_tracing_map_fields(struct hist_trigger_data *hist_data)
if (hist_field->flags & HIST_FIELD_FL_STACKTRACE)
cmp_fn = tracing_map_cmp_none;
- else if (!field)
+ else if (!field || hist_field->flags & HIST_FIELD_FL_CPU)
cmp_fn = tracing_map_cmp_num(hist_field->size,
hist_field->is_signed);
else if (is_string_field(field))
@@ -4376,7 +5142,8 @@ create_hist_data(unsigned int map_bits,
}
static void hist_trigger_elt_update(struct hist_trigger_data *hist_data,
- struct tracing_map_elt *elt, void *rec,
+ struct tracing_map_elt *elt,
+ struct trace_buffer *buffer, void *rec,
struct ring_buffer_event *rbe,
u64 *var_ref_vals)
{
@@ -4390,9 +5157,40 @@ static void hist_trigger_elt_update(struct hist_trigger_data *hist_data,
for_each_hist_val_field(i, hist_data) {
hist_field = hist_data->fields[i];
- hist_val = hist_field->fn(hist_field, elt, rbe, rec);
+ hist_val = hist_fn_call(hist_field, elt, buffer, rbe, rec);
if (hist_field->flags & HIST_FIELD_FL_VAR) {
var_idx = hist_field->var.idx;
+
+ if (hist_field->flags &
+ (HIST_FIELD_FL_STRING | HIST_FIELD_FL_STACKTRACE)) {
+ unsigned int str_start, var_str_idx, idx;
+ char *str, *val_str;
+ unsigned int size;
+
+ str_start = hist_data->n_field_var_str +
+ hist_data->n_save_var_str;
+ var_str_idx = hist_field->var_str_idx;
+ idx = str_start + var_str_idx;
+
+ str = elt_data->field_var_str[idx];
+ val_str = (char *)(uintptr_t)hist_val;
+
+ if (hist_field->flags & HIST_FIELD_FL_STRING) {
+ size = min(hist_field->size, STR_VAR_LEN_MAX);
+ strscpy(str, val_str, size);
+ } else {
+ char *stack_start = str + sizeof(unsigned long);
+ int e;
+
+ e = stack_trace_save((void *)stack_start,
+ HIST_STACKTRACE_DEPTH,
+ HIST_STACKTRACE_SKIP);
+ if (e < HIST_STACKTRACE_DEPTH - 1)
+ ((unsigned long *)stack_start)[e] = 0;
+ *((unsigned long *)str) = e;
+ }
+ hist_val = (u64)(uintptr_t)str;
+ }
tracing_map_set_var(elt, var_idx, hist_val);
continue;
}
@@ -4402,13 +5200,13 @@ static void hist_trigger_elt_update(struct hist_trigger_data *hist_data,
for_each_hist_key_field(i, hist_data) {
hist_field = hist_data->fields[i];
if (hist_field->flags & HIST_FIELD_FL_VAR) {
- hist_val = hist_field->fn(hist_field, elt, rbe, rec);
+ hist_val = hist_fn_call(hist_field, elt, buffer, rbe, rec);
var_idx = hist_field->var.idx;
tracing_map_set_var(elt, var_idx, hist_val);
}
}
- update_field_vars(hist_data, elt, rbe, rec);
+ update_field_vars(hist_data, elt, buffer, rbe, rec);
}
static inline void add_to_key(char *compound_key, void *key,
@@ -4420,10 +5218,9 @@ static inline void add_to_key(char *compound_key, void *key,
struct ftrace_event_field *field;
field = key_field->field;
- if (field->filter_type == FILTER_DYN_STRING)
+ if (field->filter_type == FILTER_DYN_STRING ||
+ field->filter_type == FILTER_RDYN_STRING)
size = *(u32 *)(rec + field->offset) >> 16;
- else if (field->filter_type == FILTER_PTR_STRING)
- size = strlen(key);
else if (field->filter_type == FILTER_STATIC_STRING)
size = field->size;
@@ -4438,7 +5235,8 @@ static inline void add_to_key(char *compound_key, void *key,
static void
hist_trigger_actions(struct hist_trigger_data *hist_data,
- struct tracing_map_elt *elt, void *rec,
+ struct tracing_map_elt *elt,
+ struct trace_buffer *buffer, void *rec,
struct ring_buffer_event *rbe, void *key,
u64 *var_ref_vals)
{
@@ -4447,11 +5245,12 @@ hist_trigger_actions(struct hist_trigger_data *hist_data,
for (i = 0; i < hist_data->n_actions; i++) {
data = hist_data->actions[i];
- data->fn(hist_data, elt, rec, rbe, key, data, var_ref_vals);
+ data->fn(hist_data, elt, buffer, rec, rbe, key, data, var_ref_vals);
}
}
-static void event_hist_trigger(struct event_trigger_data *data, void *rec,
+static void event_hist_trigger(struct event_trigger_data *data,
+ struct trace_buffer *buffer, void *rec,
struct ring_buffer_event *rbe)
{
struct hist_trigger_data *hist_data = data->private_data;
@@ -4465,6 +5264,9 @@ static void event_hist_trigger(struct event_trigger_data *data, void *rec,
void *key = NULL;
unsigned int i;
+ if (unlikely(!rbe))
+ return;
+
memset(compound_key, 0, hist_data->key_size);
for_each_hist_key_field(i, hist_data) {
@@ -4472,11 +5274,20 @@ static void event_hist_trigger(struct event_trigger_data *data, void *rec,
if (key_field->flags & HIST_FIELD_FL_STACKTRACE) {
memset(entries, 0, HIST_STACKTRACE_SIZE);
- stack_trace_save(entries, HIST_STACKTRACE_DEPTH,
- HIST_STACKTRACE_SKIP);
+ if (key_field->field) {
+ unsigned long *stack, n_entries;
+
+ field_contents = hist_fn_call(key_field, elt, buffer, rbe, rec);
+ stack = (unsigned long *)(long)field_contents;
+ n_entries = *stack;
+ memcpy(entries, ++stack, n_entries * sizeof(unsigned long));
+ } else {
+ stack_trace_save(entries, HIST_STACKTRACE_DEPTH,
+ HIST_STACKTRACE_SKIP);
+ }
key = entries;
} else {
- field_contents = key_field->fn(key_field, elt, rbe, rec);
+ field_contents = hist_fn_call(key_field, elt, buffer, rbe, rec);
if (key_field->flags & HIST_FIELD_FL_STRING) {
key = (void *)(unsigned long)field_contents;
use_compound_key = true;
@@ -4499,17 +5310,16 @@ static void event_hist_trigger(struct event_trigger_data *data, void *rec,
if (!elt)
return;
- hist_trigger_elt_update(hist_data, elt, rec, rbe, var_ref_vals);
+ hist_trigger_elt_update(hist_data, elt, buffer, rec, rbe, var_ref_vals);
if (resolve_var_refs(hist_data, key, var_ref_vals, true))
- hist_trigger_actions(hist_data, elt, rec, rbe, key, var_ref_vals);
+ hist_trigger_actions(hist_data, elt, buffer, rec, rbe, key, var_ref_vals);
}
static void hist_trigger_stacktrace_print(struct seq_file *m,
unsigned long *stacktrace_entries,
unsigned int max_entries)
{
- char str[KSYM_SYMBOL_LEN];
unsigned int spaces = 8;
unsigned int i;
@@ -4518,8 +5328,7 @@ static void hist_trigger_stacktrace_print(struct seq_file *m,
return;
seq_printf(m, "%*c", 1 + spaces, ' ');
- sprint_symbol(str, stacktrace_entries[i]);
- seq_printf(m, "%s\n", str);
+ seq_printf(m, "%pS\n", (void*)stacktrace_entries[i]);
}
}
@@ -4529,7 +5338,6 @@ static void hist_trigger_print_key(struct seq_file *m,
struct tracing_map_elt *elt)
{
struct hist_field *key_field;
- char str[KSYM_SYMBOL_LEN];
bool multiline = false;
const char *field_name;
unsigned int i;
@@ -4550,14 +5358,12 @@ static void hist_trigger_print_key(struct seq_file *m,
seq_printf(m, "%s: %llx", field_name, uval);
} else if (key_field->flags & HIST_FIELD_FL_SYM) {
uval = *(u64 *)(key + key_field->offset);
- sprint_symbol_no_offset(str, uval);
- seq_printf(m, "%s: [%llx] %-45s", field_name,
- uval, str);
+ seq_printf(m, "%s: [%llx] %-45ps", field_name,
+ uval, (void *)(uintptr_t)uval);
} else if (key_field->flags & HIST_FIELD_FL_SYM_OFFSET) {
uval = *(u64 *)(key + key_field->offset);
- sprint_symbol(str, uval);
- seq_printf(m, "%s: [%llx] %-55s", field_name,
- uval, str);
+ seq_printf(m, "%s: [%llx] %-55pS", field_name,
+ uval, (void *)(uintptr_t)uval);
} else if (key_field->flags & HIST_FIELD_FL_EXECNAME) {
struct hist_elt_data *elt_data = elt->private_data;
char *comm;
@@ -4581,7 +5387,10 @@ static void hist_trigger_print_key(struct seq_file *m,
seq_printf(m, "%s: %-30s[%3llu]", field_name,
syscall_name, uval);
} else if (key_field->flags & HIST_FIELD_FL_STACKTRACE) {
- seq_puts(m, "stacktrace:\n");
+ if (key_field->field)
+ seq_printf(m, "%s.stacktrace", key_field->field->name);
+ else
+ seq_puts(m, "common_stacktrace:\n");
hist_trigger_stacktrace_print(m,
key + key_field->offset,
HIST_STACKTRACE_DEPTH);
@@ -4589,6 +5398,11 @@ static void hist_trigger_print_key(struct seq_file *m,
} else if (key_field->flags & HIST_FIELD_FL_LOG2) {
seq_printf(m, "%s: ~ 2^%-2llu", field_name,
*(u64 *)(key + key_field->offset));
+ } else if (key_field->flags & HIST_FIELD_FL_BUCKET) {
+ unsigned long buckets = key_field->buckets;
+ uval = *(u64 *)(key + key_field->offset);
+ seq_printf(m, "%s: ~ %llu-%llu", field_name,
+ uval, uval + buckets -1);
} else if (key_field->flags & HIST_FIELD_FL_STRING) {
seq_printf(m, "%s: %-50s", field_name,
(char *)(key + key_field->offset));
@@ -4604,33 +5418,101 @@ static void hist_trigger_print_key(struct seq_file *m,
seq_puts(m, "}");
}
+/* Get the 100 times of the percentage of @val in @total */
+static inline unsigned int __get_percentage(u64 val, u64 total)
+{
+ if (!total)
+ goto div0;
+
+ if (val < (U64_MAX / 10000))
+ return (unsigned int)div64_ul(val * 10000, total);
+
+ total = div64_u64(total, 10000);
+ if (!total)
+ goto div0;
+
+ return (unsigned int)div64_ul(val, total);
+div0:
+ return val ? UINT_MAX : 0;
+}
+
+#define BAR_CHAR '#'
+
+static inline const char *__fill_bar_str(char *buf, int size, u64 val, u64 max)
+{
+ unsigned int len = __get_percentage(val, max);
+ int i;
+
+ if (len == UINT_MAX) {
+ snprintf(buf, size, "[ERROR]");
+ return buf;
+ }
+
+ len = len * size / 10000;
+ for (i = 0; i < len && i < size; i++)
+ buf[i] = BAR_CHAR;
+ while (i < size)
+ buf[i++] = ' ';
+ buf[size] = '\0';
+
+ return buf;
+}
+
+struct hist_val_stat {
+ u64 max;
+ u64 total;
+};
+
+static void hist_trigger_print_val(struct seq_file *m, unsigned int idx,
+ const char *field_name, unsigned long flags,
+ struct hist_val_stat *stats,
+ struct tracing_map_elt *elt)
+{
+ u64 val = tracing_map_read_sum(elt, idx);
+ unsigned int pc;
+ char bar[21];
+
+ if (flags & HIST_FIELD_FL_PERCENT) {
+ pc = __get_percentage(val, stats[idx].total);
+ if (pc == UINT_MAX)
+ seq_printf(m, " %s (%%):[ERROR]", field_name);
+ else
+ seq_printf(m, " %s (%%): %3u.%02u", field_name,
+ pc / 100, pc % 100);
+ } else if (flags & HIST_FIELD_FL_GRAPH) {
+ seq_printf(m, " %s: %20s", field_name,
+ __fill_bar_str(bar, 20, val, stats[idx].max));
+ } else if (flags & HIST_FIELD_FL_HEX) {
+ seq_printf(m, " %s: %10llx", field_name, val);
+ } else {
+ seq_printf(m, " %s: %10llu", field_name, val);
+ }
+}
+
static void hist_trigger_entry_print(struct seq_file *m,
struct hist_trigger_data *hist_data,
+ struct hist_val_stat *stats,
void *key,
struct tracing_map_elt *elt)
{
const char *field_name;
- unsigned int i;
+ unsigned int i = HITCOUNT_IDX;
+ unsigned long flags;
hist_trigger_print_key(m, hist_data, key, elt);
- seq_printf(m, " hitcount: %10llu",
- tracing_map_read_sum(elt, HITCOUNT_IDX));
+ /* At first, show the raw hitcount if !nohitcount */
+ if (!hist_data->attrs->no_hitcount)
+ hist_trigger_print_val(m, i, "hitcount", 0, stats, elt);
for (i = 1; i < hist_data->n_vals; i++) {
field_name = hist_field_name(hist_data->fields[i], 0);
-
- if (hist_data->fields[i]->flags & HIST_FIELD_FL_VAR ||
- hist_data->fields[i]->flags & HIST_FIELD_FL_EXPR)
+ flags = hist_data->fields[i]->flags;
+ if (flags & HIST_FIELD_FL_VAR || flags & HIST_FIELD_FL_EXPR)
continue;
- if (hist_data->fields[i]->flags & HIST_FIELD_FL_HEX) {
- seq_printf(m, " %s: %10llx", field_name,
- tracing_map_read_sum(elt, i));
- } else {
- seq_printf(m, " %s: %10llu", field_name,
- tracing_map_read_sum(elt, i));
- }
+ seq_puts(m, " ");
+ hist_trigger_print_val(m, i, field_name, flags, stats, elt);
}
print_actions(m, hist_data, elt);
@@ -4643,7 +5525,9 @@ static int print_entries(struct seq_file *m,
{
struct tracing_map_sort_entry **sort_entries = NULL;
struct tracing_map *map = hist_data->map;
- int i, n_entries;
+ int i, j, n_entries;
+ struct hist_val_stat *stats = NULL;
+ u64 val;
n_entries = tracing_map_sort_entries(map, hist_data->sort_keys,
hist_data->n_sort_keys,
@@ -4651,11 +5535,34 @@ static int print_entries(struct seq_file *m,
if (n_entries < 0)
return n_entries;
+ /* Calculate the max and the total for each field if needed. */
+ for (j = 0; j < hist_data->n_vals; j++) {
+ if (!(hist_data->fields[j]->flags &
+ (HIST_FIELD_FL_PERCENT | HIST_FIELD_FL_GRAPH)))
+ continue;
+ if (!stats) {
+ stats = kcalloc(hist_data->n_vals, sizeof(*stats),
+ GFP_KERNEL);
+ if (!stats) {
+ n_entries = -ENOMEM;
+ goto out;
+ }
+ }
+ for (i = 0; i < n_entries; i++) {
+ val = tracing_map_read_sum(sort_entries[i]->elt, j);
+ stats[j].total += val;
+ if (stats[j].max < val)
+ stats[j].max = val;
+ }
+ }
+
for (i = 0; i < n_entries; i++)
- hist_trigger_entry_print(m, hist_data,
+ hist_trigger_entry_print(m, hist_data, stats,
sort_entries[i]->key,
sort_entries[i]->elt);
+ kfree(stats);
+out:
tracing_map_destroy_sort_entries(sort_entries, n_entries);
return n_entries;
@@ -4671,7 +5578,7 @@ static void hist_trigger_show(struct seq_file *m,
seq_puts(m, "\n\n");
seq_puts(m, "# event histogram\n#\n# trigger info: ");
- data->ops->print(m, data->ops, data);
+ data->ops->print(m, data);
seq_puts(m, "#\n\n");
hist_data = data->private_data;
@@ -4715,10 +5622,12 @@ static int event_hist_open(struct inode *inode, struct file *file)
{
int ret;
- ret = security_locked_down(LOCKDOWN_TRACEFS);
+ ret = tracing_open_file_tr(inode, file);
if (ret)
return ret;
+ /* Clear private_data to avoid warning in single_open() */
+ file->private_data = NULL;
return single_open(file, hist_show, file);
}
@@ -4726,7 +5635,7 @@ const struct file_operations event_hist_fops = {
.open = event_hist_open,
.read = seq_read,
.llseek = seq_lseek,
- .release = single_release,
+ .release = tracing_single_release_file_tr,
};
#ifdef CONFIG_HIST_TRIGGERS_DEBUG
@@ -4748,6 +5657,8 @@ static void hist_field_debug_show_flags(struct seq_file *m,
if (flags & HIST_FIELD_FL_ALIAS)
seq_puts(m, " HIST_FIELD_FL_ALIAS\n");
+ else if (flags & HIST_FIELD_FL_CONST)
+ seq_puts(m, " HIST_FIELD_FL_CONST\n");
}
static int hist_field_debug_show(struct seq_file *m,
@@ -4769,6 +5680,9 @@ static int hist_field_debug_show(struct seq_file *m,
field->var.idx);
}
+ if (field->flags & HIST_FIELD_FL_CONST)
+ seq_printf(m, " constant: %llu\n", field->constant);
+
if (field->flags & HIST_FIELD_FL_ALIAS)
seq_printf(m, " var_ref_idx (into hist_data->var_refs[]): %u\n",
field->var_ref_idx);
@@ -4898,7 +5812,7 @@ static void hist_trigger_debug_show(struct seq_file *m,
seq_puts(m, "\n\n");
seq_puts(m, "# event histogram\n#\n# trigger info: ");
- data->ops->print(m, data->ops, data);
+ data->ops->print(m, data);
seq_puts(m, "#\n\n");
hist_data = data->private_data;
@@ -4987,10 +5901,12 @@ static int event_hist_debug_open(struct inode *inode, struct file *file)
{
int ret;
- ret = security_locked_down(LOCKDOWN_TRACEFS);
+ ret = tracing_open_file_tr(inode, file);
if (ret)
return ret;
+ /* Clear private_data to avoid warning in single_open() */
+ file->private_data = NULL;
return single_open(file, hist_debug_show, file);
}
@@ -4998,7 +5914,7 @@ const struct file_operations event_hist_debug_fops = {
.open = event_hist_debug_open,
.read = seq_read,
.llseek = seq_lseek,
- .release = single_release,
+ .release = tracing_single_release_file_tr,
};
#endif
@@ -5010,7 +5926,9 @@ static void hist_field_print(struct seq_file *m, struct hist_field *hist_field)
seq_printf(m, "%s=", hist_field->var.name);
if (hist_field->flags & HIST_FIELD_FL_CPU)
- seq_puts(m, "cpu");
+ seq_puts(m, "common_cpu");
+ else if (hist_field->flags & HIST_FIELD_FL_CONST)
+ seq_printf(m, "%llu", hist_field->constant);
else if (field_name) {
if (hist_field->flags & HIST_FIELD_FL_VAR_REF ||
hist_field->flags & HIST_FIELD_FL_ALIAS)
@@ -5021,25 +5939,28 @@ static void hist_field_print(struct seq_file *m, struct hist_field *hist_field)
if (hist_field->flags) {
if (!(hist_field->flags & HIST_FIELD_FL_VAR_REF) &&
- !(hist_field->flags & HIST_FIELD_FL_EXPR)) {
+ !(hist_field->flags & HIST_FIELD_FL_EXPR) &&
+ !(hist_field->flags & HIST_FIELD_FL_STACKTRACE)) {
const char *flags = get_hist_field_flags(hist_field);
if (flags)
seq_printf(m, ".%s", flags);
}
}
+ if (hist_field->buckets)
+ seq_printf(m, "=%ld", hist_field->buckets);
}
static int event_hist_trigger_print(struct seq_file *m,
- struct event_trigger_ops *ops,
struct event_trigger_data *data)
{
struct hist_trigger_data *hist_data = data->private_data;
struct hist_field *field;
bool have_var = false;
+ bool show_val = false;
unsigned int i;
- seq_puts(m, "hist:");
+ seq_puts(m, HIST_PREFIX);
if (data->name)
seq_printf(m, "%s:", data->name);
@@ -5052,9 +5973,12 @@ static int event_hist_trigger_print(struct seq_file *m,
if (i > hist_data->n_vals)
seq_puts(m, ",");
- if (field->flags & HIST_FIELD_FL_STACKTRACE)
- seq_puts(m, "stacktrace");
- else
+ if (field->flags & HIST_FIELD_FL_STACKTRACE) {
+ if (field->field)
+ seq_printf(m, "%s.stacktrace", field->field->name);
+ else
+ seq_puts(m, "common_stacktrace");
+ } else
hist_field_print(m, field);
}
@@ -5067,12 +5991,16 @@ static int event_hist_trigger_print(struct seq_file *m,
continue;
}
- if (i == HITCOUNT_IDX)
+ if (i == HITCOUNT_IDX) {
+ if (hist_data->attrs->no_hitcount)
+ continue;
seq_puts(m, "hitcount");
- else {
- seq_puts(m, ",");
+ } else {
+ if (show_val)
+ seq_puts(m, ",");
hist_field_print(m, field);
}
+ show_val = true;
}
if (have_var) {
@@ -5123,6 +6051,8 @@ static int event_hist_trigger_print(struct seq_file *m,
seq_printf(m, ":size=%u", (1 << hist_data->map->map_bits));
if (hist_data->enable_timestamps)
seq_printf(m, ":clock=%s", hist_data->attrs->clock);
+ if (hist_data->attrs->no_hitcount)
+ seq_puts(m, ":nohitcount");
print_actions_spec(m, hist_data);
@@ -5139,8 +6069,7 @@ static int event_hist_trigger_print(struct seq_file *m,
return 0;
}
-static int event_hist_trigger_init(struct event_trigger_ops *ops,
- struct event_trigger_data *data)
+static int event_hist_trigger_init(struct event_trigger_data *data)
{
struct hist_trigger_data *hist_data = data->private_data;
@@ -5162,13 +6091,13 @@ static void unregister_field_var_hists(struct hist_trigger_data *hist_data)
for (i = 0; i < hist_data->n_field_var_hists; i++) {
file = hist_data->field_var_hists[i]->hist_data->event_file;
cmd = hist_data->field_var_hists[i]->cmd;
- ret = event_hist_trigger_func(&trigger_hist_cmd, file,
- "!hist", "hist", cmd);
+ ret = event_hist_trigger_parse(&trigger_hist_cmd, file,
+ "!hist", "hist", cmd);
+ WARN_ON_ONCE(ret < 0);
}
}
-static void event_hist_trigger_free(struct event_trigger_ops *ops,
- struct event_trigger_data *data)
+static void event_hist_trigger_free(struct event_trigger_data *data)
{
struct hist_trigger_data *hist_data = data->private_data;
@@ -5191,31 +6120,29 @@ static void event_hist_trigger_free(struct event_trigger_ops *ops,
}
static struct event_trigger_ops event_hist_trigger_ops = {
- .func = event_hist_trigger,
+ .trigger = event_hist_trigger,
.print = event_hist_trigger_print,
.init = event_hist_trigger_init,
.free = event_hist_trigger_free,
};
-static int event_hist_trigger_named_init(struct event_trigger_ops *ops,
- struct event_trigger_data *data)
+static int event_hist_trigger_named_init(struct event_trigger_data *data)
{
data->ref++;
save_named_trigger(data->named_data->name, data);
- event_hist_trigger_init(ops, data->named_data);
+ event_hist_trigger_init(data->named_data);
return 0;
}
-static void event_hist_trigger_named_free(struct event_trigger_ops *ops,
- struct event_trigger_data *data)
+static void event_hist_trigger_named_free(struct event_trigger_data *data)
{
if (WARN_ON_ONCE(data->ref <= 0))
return;
- event_hist_trigger_free(ops, data->named_data);
+ event_hist_trigger_free(data->named_data);
data->ref--;
if (!data->ref) {
@@ -5225,7 +6152,7 @@ static void event_hist_trigger_named_free(struct event_trigger_ops *ops,
}
static struct event_trigger_ops event_hist_trigger_named_ops = {
- .func = event_hist_trigger,
+ .trigger = event_hist_trigger,
.print = event_hist_trigger_print,
.init = event_hist_trigger_named_init,
.free = event_hist_trigger_named_free,
@@ -5342,7 +6269,49 @@ static bool hist_trigger_match(struct event_trigger_data *data,
return true;
}
-static int hist_register_trigger(char *glob, struct event_trigger_ops *ops,
+static bool existing_hist_update_only(char *glob,
+ struct event_trigger_data *data,
+ struct trace_event_file *file)
+{
+ struct hist_trigger_data *hist_data = data->private_data;
+ struct event_trigger_data *test, *named_data = NULL;
+ bool updated = false;
+
+ if (!hist_data->attrs->pause && !hist_data->attrs->cont &&
+ !hist_data->attrs->clear)
+ goto out;
+
+ if (hist_data->attrs->name) {
+ named_data = find_named_trigger(hist_data->attrs->name);
+ if (named_data) {
+ if (!hist_trigger_match(data, named_data, named_data,
+ true))
+ goto out;
+ }
+ }
+
+ if (hist_data->attrs->name && !named_data)
+ goto out;
+
+ list_for_each_entry(test, &file->triggers, list) {
+ if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
+ if (!hist_trigger_match(data, test, named_data, false))
+ continue;
+ if (hist_data->attrs->pause)
+ test->paused = true;
+ else if (hist_data->attrs->cont)
+ test->paused = false;
+ else if (hist_data->attrs->clear)
+ hist_clear(test);
+ updated = true;
+ goto out;
+ }
+ }
+ out:
+ return updated;
+}
+
+static int hist_register_trigger(char *glob,
struct event_trigger_data *data,
struct trace_event_file *file)
{
@@ -5370,19 +6339,11 @@ static int hist_register_trigger(char *glob, struct event_trigger_ops *ops,
list_for_each_entry(test, &file->triggers, list) {
if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
- if (!hist_trigger_match(data, test, named_data, false))
- continue;
- if (hist_data->attrs->pause)
- test->paused = true;
- else if (hist_data->attrs->cont)
- test->paused = false;
- else if (hist_data->attrs->clear)
- hist_clear(test);
- else {
+ if (hist_trigger_match(data, test, named_data, false)) {
hist_err(tr, HIST_ERR_TRIGGER_EEXIST, 0);
ret = -EEXIST;
+ goto out;
}
- goto out;
}
}
new:
@@ -5402,7 +6363,7 @@ static int hist_register_trigger(char *glob, struct event_trigger_ops *ops,
}
if (data->ops->init) {
- ret = data->ops->init(data->ops, data);
+ ret = data->ops->init(data);
if (ret < 0)
goto out;
}
@@ -5416,13 +6377,11 @@ static int hist_register_trigger(char *glob, struct event_trigger_ops *ops,
goto out;
}
- tracing_set_time_stamp_abs(file->tr, true);
+ tracing_set_filter_buffering(file->tr, true);
}
if (named_data)
destroy_hist_data(hist_data);
-
- ret++;
out:
return ret;
}
@@ -5494,24 +6453,23 @@ static bool hist_trigger_check_refs(struct event_trigger_data *data,
return false;
}
-static void hist_unregister_trigger(char *glob, struct event_trigger_ops *ops,
+static void hist_unregister_trigger(char *glob,
struct event_trigger_data *data,
struct trace_event_file *file)
{
+ struct event_trigger_data *test = NULL, *iter, *named_data = NULL;
struct hist_trigger_data *hist_data = data->private_data;
- struct event_trigger_data *test, *named_data = NULL;
- bool unregistered = false;
lockdep_assert_held(&event_mutex);
if (hist_data->attrs->name)
named_data = find_named_trigger(hist_data->attrs->name);
- list_for_each_entry(test, &file->triggers, list) {
- if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
- if (!hist_trigger_match(data, test, named_data, false))
+ list_for_each_entry(iter, &file->triggers, list) {
+ if (iter->cmd_ops->trigger_type == ETT_EVENT_HIST) {
+ if (!hist_trigger_match(data, iter, named_data, false))
continue;
- unregistered = true;
+ test = iter;
list_del_rcu(&test->list);
trace_event_trigger_enable_disable(file, 0);
update_cond_flag(file);
@@ -5519,12 +6477,12 @@ static void hist_unregister_trigger(char *glob, struct event_trigger_ops *ops,
}
}
- if (unregistered && test->ops->free)
- test->ops->free(test->ops, test);
+ if (test && test->ops->free)
+ test->ops->free(test);
if (hist_data->enable_timestamps) {
- if (!hist_data->remove || unregistered)
- tracing_set_time_stamp_abs(file->tr, false);
+ if (!hist_data->remove || test)
+ tracing_set_filter_buffering(file->tr, false);
}
}
@@ -5571,74 +6529,86 @@ static void hist_unreg_all(struct trace_event_file *file)
update_cond_flag(file);
if (hist_data->enable_timestamps)
- tracing_set_time_stamp_abs(file->tr, false);
+ tracing_set_filter_buffering(file->tr, false);
if (test->ops->free)
- test->ops->free(test->ops, test);
+ test->ops->free(test);
}
}
}
-static int event_hist_trigger_func(struct event_command *cmd_ops,
- struct trace_event_file *file,
- char *glob, char *cmd, char *param)
+static int event_hist_trigger_parse(struct event_command *cmd_ops,
+ struct trace_event_file *file,
+ char *glob, char *cmd,
+ char *param_and_filter)
{
unsigned int hist_trigger_bits = TRACING_MAP_BITS_DEFAULT;
struct event_trigger_data *trigger_data;
struct hist_trigger_attrs *attrs;
- struct event_trigger_ops *trigger_ops;
struct hist_trigger_data *hist_data;
+ char *param, *filter, *p, *start;
struct synth_event *se;
const char *se_name;
- bool remove = false;
- char *trigger, *p;
+ bool remove;
int ret = 0;
lockdep_assert_held(&event_mutex);
- if (glob && strlen(glob)) {
+ if (WARN_ON(!glob))
+ return -EINVAL;
+
+ if (glob[0]) {
hist_err_clear();
- last_cmd_set(file, param);
+ last_cmd_set(file, param_and_filter);
}
- if (!param)
- return -EINVAL;
+ remove = event_trigger_check_remove(glob);
- if (glob[0] == '!')
- remove = true;
+ if (event_trigger_empty_param(param_and_filter))
+ return -EINVAL;
/*
* separate the trigger from the filter (k:v [if filter])
* allowing for whitespace in the trigger
*/
- p = trigger = param;
+ p = param = param_and_filter;
do {
p = strstr(p, "if");
if (!p)
break;
- if (p == param)
+ if (p == param_and_filter)
return -EINVAL;
if (*(p - 1) != ' ' && *(p - 1) != '\t') {
p++;
continue;
}
- if (p >= param + strlen(param) - (sizeof("if") - 1) - 1)
+ if (p >= param_and_filter + strlen(param_and_filter) - (sizeof("if") - 1) - 1)
return -EINVAL;
if (*(p + sizeof("if") - 1) != ' ' && *(p + sizeof("if") - 1) != '\t') {
p++;
continue;
}
break;
- } while (p);
+ } while (1);
if (!p)
- param = NULL;
+ filter = NULL;
else {
*(p - 1) = '\0';
- param = strstrip(p);
- trigger = strstrip(trigger);
+ filter = strstrip(p);
+ param = strstrip(param);
+ }
+
+ /*
+ * To simplify arithmetic expression parsing, replace occurrences of
+ * '.sym-offset' modifier with '.symXoffset'
+ */
+ start = strstr(param, ".sym-offset");
+ while (start) {
+ *(start + 4) = 'X';
+ start = strstr(start + 11, ".sym-offset");
}
- attrs = parse_hist_trigger_attrs(file->tr, trigger);
+ attrs = parse_hist_trigger_attrs(file->tr, param);
if (IS_ERR(attrs))
return PTR_ERR(attrs);
@@ -5651,29 +6621,15 @@ static int event_hist_trigger_func(struct event_command *cmd_ops,
return PTR_ERR(hist_data);
}
- trigger_ops = cmd_ops->get_trigger_ops(cmd, trigger);
-
- trigger_data = kzalloc(sizeof(*trigger_data), GFP_KERNEL);
+ trigger_data = event_trigger_alloc(cmd_ops, cmd, param, hist_data);
if (!trigger_data) {
ret = -ENOMEM;
goto out_free;
}
- trigger_data->count = -1;
- trigger_data->ops = trigger_ops;
- trigger_data->cmd_ops = cmd_ops;
-
- INIT_LIST_HEAD(&trigger_data->list);
- RCU_INIT_POINTER(trigger_data->filter, NULL);
-
- trigger_data->private_data = hist_data;
-
- /* if param is non-empty, it's supposed to be a filter */
- if (param && cmd_ops->set_filter) {
- ret = cmd_ops->set_filter(param, trigger_data, file);
- if (ret < 0)
- goto out_free;
- }
+ ret = event_trigger_set_filter(cmd_ops, file, filter, trigger_data);
+ if (ret < 0)
+ goto out_free;
if (remove) {
if (!have_hist_trigger_match(trigger_data, file))
@@ -5684,7 +6640,7 @@ static int event_hist_trigger_func(struct event_command *cmd_ops,
goto out_free;
}
- cmd_ops->unreg(glob+1, trigger_ops, trigger_data, file);
+ event_trigger_unregister(cmd_ops, file, glob+1, trigger_data);
se_name = trace_event_name(file->event_call);
se = find_synth_event(se_name);
if (se)
@@ -5693,29 +6649,26 @@ static int event_hist_trigger_func(struct event_command *cmd_ops,
goto out_free;
}
- ret = cmd_ops->reg(glob, trigger_ops, trigger_data, file);
- /*
- * The above returns on success the # of triggers registered,
- * but if it didn't register any it returns zero. Consider no
- * triggers registered a failure too.
- */
- if (!ret) {
- if (!(attrs->pause || attrs->cont || attrs->clear))
- ret = -ENOENT;
+ if (existing_hist_update_only(glob, trigger_data, file))
goto out_free;
- } else if (ret < 0)
+
+ ret = event_trigger_register(cmd_ops, file, glob, trigger_data);
+ if (ret < 0)
goto out_free;
if (get_named_trigger_data(trigger_data))
goto enable;
- if (has_hist_vars(hist_data))
- save_hist_vars(hist_data);
-
ret = create_actions(hist_data);
if (ret)
goto out_unreg;
+ if (has_hist_vars(hist_data) || hist_data->n_var_refs) {
+ ret = save_hist_vars(hist_data);
+ if (ret)
+ goto out_unreg;
+ }
+
ret = tracing_map_init(hist_data->map);
if (ret)
goto out_unreg;
@@ -5728,18 +6681,15 @@ enable:
se = find_synth_event(se_name);
if (se)
se->ref++;
- /* Just return zero, not the number of registered triggers */
- ret = 0;
out:
- if (ret == 0)
+ if (ret == 0 && glob[0])
hist_err_clear();
return ret;
out_unreg:
- cmd_ops->unreg(glob+1, trigger_ops, trigger_data, file);
+ event_trigger_unregister(cmd_ops, file, glob+1, trigger_data);
out_free:
- if (cmd_ops->set_filter)
- cmd_ops->set_filter(NULL, trigger_data, NULL);
+ event_trigger_reset_filter(cmd_ops, trigger_data);
remove_hist_vars(hist_data);
@@ -5753,7 +6703,7 @@ static struct event_command trigger_hist_cmd = {
.name = "hist",
.trigger_type = ETT_EVENT_HIST,
.flags = EVENT_CMD_FL_NEEDS_REC,
- .func = event_hist_trigger_func,
+ .parse = event_hist_trigger_parse,
.reg = hist_register_trigger,
.unreg = hist_unregister_trigger,
.unreg_all = hist_unreg_all,
@@ -5772,7 +6722,8 @@ __init int register_trigger_hist_cmd(void)
}
static void
-hist_enable_trigger(struct event_trigger_data *data, void *rec,
+hist_enable_trigger(struct event_trigger_data *data,
+ struct trace_buffer *buffer, void *rec,
struct ring_buffer_event *event)
{
struct enable_trigger_data *enable_data = data->private_data;
@@ -5790,7 +6741,8 @@ hist_enable_trigger(struct event_trigger_data *data, void *rec,
}
static void
-hist_enable_count_trigger(struct event_trigger_data *data, void *rec,
+hist_enable_count_trigger(struct event_trigger_data *data,
+ struct trace_buffer *buffer, void *rec,
struct ring_buffer_event *event)
{
if (!data->count)
@@ -5799,32 +6751,32 @@ hist_enable_count_trigger(struct event_trigger_data *data, void *rec,
if (data->count != -1)
(data->count)--;
- hist_enable_trigger(data, rec, event);
+ hist_enable_trigger(data, buffer, rec, event);
}
static struct event_trigger_ops hist_enable_trigger_ops = {
- .func = hist_enable_trigger,
+ .trigger = hist_enable_trigger,
.print = event_enable_trigger_print,
.init = event_trigger_init,
.free = event_enable_trigger_free,
};
static struct event_trigger_ops hist_enable_count_trigger_ops = {
- .func = hist_enable_count_trigger,
+ .trigger = hist_enable_count_trigger,
.print = event_enable_trigger_print,
.init = event_trigger_init,
.free = event_enable_trigger_free,
};
static struct event_trigger_ops hist_disable_trigger_ops = {
- .func = hist_enable_trigger,
+ .trigger = hist_enable_trigger,
.print = event_enable_trigger_print,
.init = event_trigger_init,
.free = event_enable_trigger_free,
};
static struct event_trigger_ops hist_disable_count_trigger_ops = {
- .func = hist_enable_count_trigger,
+ .trigger = hist_enable_count_trigger,
.print = event_enable_trigger_print,
.init = event_trigger_init,
.free = event_enable_trigger_free,
@@ -5858,7 +6810,7 @@ static void hist_enable_unreg_all(struct trace_event_file *file)
update_cond_flag(file);
trace_event_trigger_enable_disable(file, 0);
if (test->ops->free)
- test->ops->free(test->ops, test);
+ test->ops->free(test);
}
}
}
@@ -5866,7 +6818,7 @@ static void hist_enable_unreg_all(struct trace_event_file *file)
static struct event_command trigger_hist_enable_cmd = {
.name = ENABLE_HIST_STR,
.trigger_type = ETT_HIST_ENABLE,
- .func = event_enable_trigger_func,
+ .parse = event_enable_trigger_parse,
.reg = event_enable_register_trigger,
.unreg = event_enable_unregister_trigger,
.unreg_all = hist_enable_unreg_all,
@@ -5877,7 +6829,7 @@ static struct event_command trigger_hist_enable_cmd = {
static struct event_command trigger_hist_disable_cmd = {
.name = DISABLE_HIST_STR,
.trigger_type = ETT_HIST_ENABLE,
- .func = event_enable_trigger_func,
+ .parse = event_enable_trigger_parse,
.reg = event_enable_register_trigger,
.unreg = event_enable_unregister_trigger,
.unreg_all = hist_enable_unreg_all,
diff --git a/kernel/trace/trace_events_inject.c b/kernel/trace/trace_events_inject.c
index 22bcf7c51d1e..8650562bdaa9 100644
--- a/kernel/trace/trace_events_inject.c
+++ b/kernel/trace/trace_events_inject.c
@@ -168,10 +168,14 @@ static void *trace_alloc_entry(struct trace_event_call *call, int *size)
continue;
if (field->filter_type == FILTER_STATIC_STRING)
continue;
- if (field->filter_type == FILTER_DYN_STRING) {
+ if (field->filter_type == FILTER_DYN_STRING ||
+ field->filter_type == FILTER_RDYN_STRING) {
u32 *str_item;
int str_loc = entry_size & 0xffff;
+ if (field->filter_type == FILTER_RDYN_STRING)
+ str_loc -= field->offset + field->size;
+
str_item = (u32 *)(entry + field->offset);
*str_item = str_loc; /* string length is 0. */
} else {
@@ -192,7 +196,6 @@ static void *trace_alloc_entry(struct trace_event_call *call, int *size)
static int parse_entry(char *str, struct trace_event_call *call, void **pentry)
{
struct ftrace_event_field *field;
- unsigned long irq_flags;
void *entry = NULL;
int entry_size;
u64 val = 0;
@@ -203,9 +206,8 @@ static int parse_entry(char *str, struct trace_event_call *call, void **pentry)
if (!entry)
return -ENOMEM;
- local_save_flags(irq_flags);
- tracing_generic_entry_update(entry, call->event.type, irq_flags,
- preempt_count());
+ tracing_generic_entry_update(entry, call->event.type,
+ tracing_gen_ctx());
while ((len = parse_field(str, call, &field, &val)) > 0) {
if (is_function_field(field))
@@ -215,8 +217,9 @@ static int parse_entry(char *str, struct trace_event_call *call, void **pentry)
char *addr = (char *)(unsigned long) val;
if (field->filter_type == FILTER_STATIC_STRING) {
- strlcpy(entry + field->offset, addr, field->size);
- } else if (field->filter_type == FILTER_DYN_STRING) {
+ strscpy(entry + field->offset, addr, field->size);
+ } else if (field->filter_type == FILTER_DYN_STRING ||
+ field->filter_type == FILTER_RDYN_STRING) {
int str_len = strlen(addr) + 1;
int str_loc = entry_size & 0xffff;
u32 *str_item;
@@ -229,8 +232,10 @@ static int parse_entry(char *str, struct trace_event_call *call, void **pentry)
}
entry = *pentry;
- strlcpy(entry + (entry_size - str_len), addr, str_len);
+ strscpy(entry + (entry_size - str_len), addr, str_len);
str_item = (u32 *)(entry + field->offset);
+ if (field->filter_type == FILTER_RDYN_STRING)
+ str_loc -= field->offset + field->size;
*str_item = (str_len << 16) | str_loc;
} else {
char **paddr;
@@ -323,7 +328,8 @@ event_inject_read(struct file *file, char __user *buf, size_t size,
}
const struct file_operations event_inject_fops = {
- .open = tracing_open_generic,
+ .open = tracing_open_file_tr,
.read = event_inject_read,
.write = event_inject_write,
+ .release = tracing_release_file_tr,
};
diff --git a/kernel/trace/trace_events_synth.c b/kernel/trace/trace_events_synth.c
index c6cca0d1d584..c82b401a294d 100644
--- a/kernel/trace/trace_events_synth.c
+++ b/kernel/trace/trace_events_synth.c
@@ -17,10 +17,74 @@
/* for gfp flag names */
#include <linux/trace_events.h>
#include <trace/events/mmflags.h>
+#include "trace_probe.h"
+#include "trace_probe_kernel.h"
#include "trace_synth.h"
-static int create_synth_event(int argc, const char **argv);
+#undef ERRORS
+#define ERRORS \
+ C(BAD_NAME, "Illegal name"), \
+ C(INVALID_CMD, "Command must be of the form: <name> field[;field] ..."),\
+ C(INVALID_DYN_CMD, "Command must be of the form: s or -:[synthetic/]<name> field[;field] ..."),\
+ C(EVENT_EXISTS, "Event already exists"), \
+ C(TOO_MANY_FIELDS, "Too many fields"), \
+ C(INCOMPLETE_TYPE, "Incomplete type"), \
+ C(INVALID_TYPE, "Invalid type"), \
+ C(INVALID_FIELD, "Invalid field"), \
+ C(INVALID_ARRAY_SPEC, "Invalid array specification"),
+
+#undef C
+#define C(a, b) SYNTH_ERR_##a
+
+enum { ERRORS };
+
+#undef C
+#define C(a, b) b
+
+static const char *err_text[] = { ERRORS };
+
+static DEFINE_MUTEX(lastcmd_mutex);
+static char *last_cmd;
+
+static int errpos(const char *str)
+{
+ int ret = 0;
+
+ mutex_lock(&lastcmd_mutex);
+ if (!str || !last_cmd)
+ goto out;
+
+ ret = err_pos(last_cmd, str);
+ out:
+ mutex_unlock(&lastcmd_mutex);
+ return ret;
+}
+
+static void last_cmd_set(const char *str)
+{
+ if (!str)
+ return;
+
+ mutex_lock(&lastcmd_mutex);
+ kfree(last_cmd);
+ last_cmd = kstrdup(str, GFP_KERNEL);
+ mutex_unlock(&lastcmd_mutex);
+}
+
+static void synth_err(u8 err_type, u16 err_pos)
+{
+ mutex_lock(&lastcmd_mutex);
+ if (!last_cmd)
+ goto out;
+
+ tracing_log_err(NULL, "synthetic_events", last_cmd, err_text,
+ err_type, err_pos);
+ out:
+ mutex_unlock(&lastcmd_mutex);
+}
+
+static int create_synth_event(const char *raw_command);
static int synth_event_show(struct seq_file *m, struct dyn_event *ev);
static int synth_event_release(struct dyn_event *ev);
static bool synth_event_is_busy(struct dyn_event *ev);
@@ -63,7 +127,7 @@ static bool synth_event_match(const char *system, const char *event,
struct synth_trace_event {
struct trace_entry ent;
- u64 fields[];
+ union trace_synth_field fields[];
};
static int synth_event_define_fields(struct trace_event_call *call)
@@ -88,7 +152,7 @@ static int synth_event_define_fields(struct trace_event_call *call)
event->fields[i]->offset = n_u64;
- if (event->fields[i]->is_string) {
+ if (event->fields[i]->is_string && !event->fields[i]->is_dynamic) {
offset += STR_VAR_LEN_MAX;
n_u64 += STR_VAR_LEN_MAX / sizeof(u64);
} else {
@@ -120,6 +184,14 @@ static int synth_field_is_string(char *type)
return false;
}
+static int synth_field_is_stack(char *type)
+{
+ if (strstr(type, "long[") != NULL)
+ return true;
+
+ return false;
+}
+
static int synth_field_string_size(char *type)
{
char buf[4], *end, *start;
@@ -132,13 +204,16 @@ static int synth_field_string_size(char *type)
start += sizeof("char[") - 1;
end = strchr(type, ']');
- if (!end || end < start)
+ if (!end || end < start || type + strlen(type) > end + 1)
return -EINVAL;
len = end - start;
if (len > 3)
return -EINVAL;
+ if (len == 0)
+ return 0; /* variable-length string */
+
strncpy(buf, start, len);
buf[len] = '\0';
@@ -184,12 +259,16 @@ static int synth_field_size(char *type)
size = sizeof(long);
else if (strcmp(type, "unsigned long") == 0)
size = sizeof(unsigned long);
+ else if (strcmp(type, "bool") == 0)
+ size = sizeof(bool);
else if (strcmp(type, "pid_t") == 0)
size = sizeof(pid_t);
else if (strcmp(type, "gfp_t") == 0)
size = sizeof(gfp_t);
else if (synth_field_is_string(type))
size = synth_field_string_size(type);
+ else if (synth_field_is_stack(type))
+ size = 0;
return size;
}
@@ -226,11 +305,15 @@ static const char *synth_field_fmt(char *type)
fmt = "%ld";
else if (strcmp(type, "unsigned long") == 0)
fmt = "%lu";
+ else if (strcmp(type, "bool") == 0)
+ fmt = "%d";
else if (strcmp(type, "pid_t") == 0)
fmt = "%d";
else if (strcmp(type, "gfp_t") == 0)
fmt = "%x";
else if (synth_field_is_string(type))
+ fmt = "%.*s";
+ else if (synth_field_is_stack(type))
fmt = "%s";
return fmt;
@@ -238,23 +321,23 @@ static const char *synth_field_fmt(char *type)
static void print_synth_event_num_val(struct trace_seq *s,
char *print_fmt, char *name,
- int size, u64 val, char *space)
+ int size, union trace_synth_field *val, char *space)
{
switch (size) {
case 1:
- trace_seq_printf(s, print_fmt, name, (u8)val, space);
+ trace_seq_printf(s, print_fmt, name, val->as_u8, space);
break;
case 2:
- trace_seq_printf(s, print_fmt, name, (u16)val, space);
+ trace_seq_printf(s, print_fmt, name, val->as_u16, space);
break;
case 4:
- trace_seq_printf(s, print_fmt, name, (u32)val, space);
+ trace_seq_printf(s, print_fmt, name, val->as_u32, space);
break;
default:
- trace_seq_printf(s, print_fmt, name, val, space);
+ trace_seq_printf(s, print_fmt, name, val->as_u64, space);
break;
}
}
@@ -267,7 +350,7 @@ static enum print_line_t print_synth_event(struct trace_iterator *iter,
struct trace_seq *s = &iter->seq;
struct synth_trace_event *entry;
struct synth_event *se;
- unsigned int i, n_u64;
+ unsigned int i, j, n_u64;
char print_fmt[32];
const char *fmt;
@@ -290,10 +373,29 @@ static enum print_line_t print_synth_event(struct trace_iterator *iter,
/* parameter values */
if (se->fields[i]->is_string) {
- trace_seq_printf(s, print_fmt, se->fields[i]->name,
- (char *)&entry->fields[n_u64],
- i == se->n_fields - 1 ? "" : " ");
- n_u64 += STR_VAR_LEN_MAX / sizeof(u64);
+ if (se->fields[i]->is_dynamic) {
+ union trace_synth_field *data = &entry->fields[n_u64];
+
+ trace_seq_printf(s, print_fmt, se->fields[i]->name,
+ STR_VAR_LEN_MAX,
+ (char *)entry + data->as_dynamic.offset,
+ i == se->n_fields - 1 ? "" : " ");
+ n_u64++;
+ } else {
+ trace_seq_printf(s, print_fmt, se->fields[i]->name,
+ STR_VAR_LEN_MAX,
+ (char *)&entry->fields[n_u64].as_u64,
+ i == se->n_fields - 1 ? "" : " ");
+ n_u64 += STR_VAR_LEN_MAX / sizeof(u64);
+ }
+ } else if (se->fields[i]->is_stack) {
+ union trace_synth_field *data = &entry->fields[n_u64];
+ unsigned long *p = (void *)entry + data->as_dynamic.offset;
+
+ trace_seq_printf(s, "%s=STACK:\n", se->fields[i]->name);
+ for (j = 1; j < data->as_dynamic.len / sizeof(long); j++)
+ trace_seq_printf(s, "=> %pS\n", (void *)p[j]);
+ n_u64++;
} else {
struct trace_print_flags __flags[] = {
__def_gfpflag_names, {-1, NULL} };
@@ -302,13 +404,13 @@ static enum print_line_t print_synth_event(struct trace_iterator *iter,
print_synth_event_num_val(s, print_fmt,
se->fields[i]->name,
se->fields[i]->size,
- entry->fields[n_u64],
+ &entry->fields[n_u64],
space);
if (strcmp(se->fields[i]->type, "gfp_t") == 0) {
trace_seq_puts(s, " (");
trace_print_flags_seq(s, "|",
- entry->fields[n_u64],
+ entry->fields[n_u64].as_u64,
__flags);
trace_seq_putc(s, ')');
}
@@ -325,16 +427,91 @@ static struct trace_event_functions synth_event_funcs = {
.trace = print_synth_event
};
+static unsigned int trace_string(struct synth_trace_event *entry,
+ struct synth_event *event,
+ char *str_val,
+ bool is_dynamic,
+ unsigned int data_size,
+ unsigned int *n_u64)
+{
+ unsigned int len = 0;
+ char *str_field;
+ int ret;
+
+ if (is_dynamic) {
+ union trace_synth_field *data = &entry->fields[*n_u64];
+
+ len = fetch_store_strlen((unsigned long)str_val);
+ data->as_dynamic.offset = struct_size(entry, fields, event->n_u64) + data_size;
+ data->as_dynamic.len = len;
+
+ ret = fetch_store_string((unsigned long)str_val, &entry->fields[*n_u64], entry);
+
+ (*n_u64)++;
+ } else {
+ str_field = (char *)&entry->fields[*n_u64].as_u64;
+
+#ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
+ if ((unsigned long)str_val < TASK_SIZE)
+ ret = strncpy_from_user_nofault(str_field, (const void __user *)str_val, STR_VAR_LEN_MAX);
+ else
+#endif
+ ret = strncpy_from_kernel_nofault(str_field, str_val, STR_VAR_LEN_MAX);
+
+ if (ret < 0)
+ strcpy(str_field, FAULT_STRING);
+
+ (*n_u64) += STR_VAR_LEN_MAX / sizeof(u64);
+ }
+
+ return len;
+}
+
+static unsigned int trace_stack(struct synth_trace_event *entry,
+ struct synth_event *event,
+ long *stack,
+ unsigned int data_size,
+ unsigned int *n_u64)
+{
+ union trace_synth_field *data = &entry->fields[*n_u64];
+ unsigned int len;
+ u32 data_offset;
+ void *data_loc;
+
+ data_offset = struct_size(entry, fields, event->n_u64);
+ data_offset += data_size;
+
+ for (len = 0; len < HIST_STACKTRACE_DEPTH; len++) {
+ if (!stack[len])
+ break;
+ }
+
+ len *= sizeof(long);
+
+ /* Find the dynamic section to copy the stack into. */
+ data_loc = (void *)entry + data_offset;
+ memcpy(data_loc, stack, len);
+
+ /* Fill in the field that holds the offset/len combo */
+
+ data->as_dynamic.offset = data_offset;
+ data->as_dynamic.len = len;
+
+ (*n_u64)++;
+
+ return len;
+}
+
static notrace void trace_event_raw_event_synth(void *__data,
u64 *var_ref_vals,
unsigned int *var_ref_idx)
{
+ unsigned int i, n_u64, val_idx, len, data_size = 0;
struct trace_event_file *trace_file = __data;
struct synth_trace_event *entry;
struct trace_event_buffer fbuffer;
struct trace_buffer *buffer;
struct synth_event *event;
- unsigned int i, n_u64, val_idx;
int fields_size = 0;
event = trace_file->event_call->data;
@@ -344,6 +521,24 @@ static notrace void trace_event_raw_event_synth(void *__data,
fields_size = event->n_u64 * sizeof(u64);
+ for (i = 0; i < event->n_dynamic_fields; i++) {
+ unsigned int field_pos = event->dynamic_fields[i]->field_pos;
+ char *str_val;
+
+ val_idx = var_ref_idx[field_pos];
+ str_val = (char *)(long)var_ref_vals[val_idx];
+
+ if (event->dynamic_fields[i]->is_stack) {
+ /* reserve one extra element for size */
+ len = *((unsigned long *)str_val) + 1;
+ len *= sizeof(unsigned long);
+ } else {
+ len = fetch_store_strlen((unsigned long)str_val);
+ }
+
+ fields_size += len;
+ }
+
/*
* Avoid ring buffer recursion detection, as this event
* is being performed within another event.
@@ -360,29 +555,36 @@ static notrace void trace_event_raw_event_synth(void *__data,
val_idx = var_ref_idx[i];
if (event->fields[i]->is_string) {
char *str_val = (char *)(long)var_ref_vals[val_idx];
- char *str_field = (char *)&entry->fields[n_u64];
- strscpy(str_field, str_val, STR_VAR_LEN_MAX);
- n_u64 += STR_VAR_LEN_MAX / sizeof(u64);
+ len = trace_string(entry, event, str_val,
+ event->fields[i]->is_dynamic,
+ data_size, &n_u64);
+ data_size += len; /* only dynamic string increments */
+ } else if (event->fields[i]->is_stack) {
+ long *stack = (long *)(long)var_ref_vals[val_idx];
+
+ len = trace_stack(entry, event, stack,
+ data_size, &n_u64);
+ data_size += len;
} else {
struct synth_field *field = event->fields[i];
u64 val = var_ref_vals[val_idx];
switch (field->size) {
case 1:
- *(u8 *)&entry->fields[n_u64] = (u8)val;
+ entry->fields[n_u64].as_u8 = (u8)val;
break;
case 2:
- *(u16 *)&entry->fields[n_u64] = (u16)val;
+ entry->fields[n_u64].as_u16 = (u16)val;
break;
case 4:
- *(u32 *)&entry->fields[n_u64] = (u32)val;
+ entry->fields[n_u64].as_u32 = (u32)val;
break;
default:
- entry->fields[n_u64] = val;
+ entry->fields[n_u64].as_u64 = val;
break;
}
n_u64++;
@@ -422,8 +624,16 @@ static int __set_synth_event_print_fmt(struct synth_event *event,
pos += snprintf(buf + pos, LEN_OR_ZERO, "\"");
for (i = 0; i < event->n_fields; i++) {
- pos += snprintf(buf + pos, LEN_OR_ZERO,
- ", REC->%s", event->fields[i]->name);
+ if (event->fields[i]->is_string &&
+ event->fields[i]->is_dynamic)
+ pos += snprintf(buf + pos, LEN_OR_ZERO,
+ ", __get_str(%s)", event->fields[i]->name);
+ else if (event->fields[i]->is_stack)
+ pos += snprintf(buf + pos, LEN_OR_ZERO,
+ ", __get_stacktrace(%s)", event->fields[i]->name);
+ else
+ pos += snprintf(buf + pos, LEN_OR_ZERO,
+ ", REC->%s", event->fields[i]->name);
}
#undef LEN_OR_ZERO
@@ -459,28 +669,53 @@ static void free_synth_field(struct synth_field *field)
kfree(field);
}
-static struct synth_field *parse_synth_field(int argc, const char **argv,
- int *consumed)
+static int check_field_version(const char *prefix, const char *field_type,
+ const char *field_name)
{
- struct synth_field *field;
- const char *prefix = NULL, *field_type = argv[0], *field_name, *array;
- int len, ret = 0;
+ /*
+ * For backward compatibility, the old synthetic event command
+ * format did not require semicolons, and in order to not
+ * break user space, that old format must still work. If a new
+ * feature is added, then the format that uses the new feature
+ * will be required to have semicolons, as nothing that uses
+ * the old format would be using the new, yet to be created,
+ * feature. When a new feature is added, this will detect it,
+ * and return a number greater than 1, and require the format
+ * to use semicolons.
+ */
+ return 1;
+}
- if (field_type[0] == ';')
- field_type++;
+static struct synth_field *parse_synth_field(int argc, char **argv,
+ int *consumed, int *field_version)
+{
+ const char *prefix = NULL, *field_type = argv[0], *field_name, *array;
+ struct synth_field *field;
+ int len, ret = -ENOMEM;
+ struct seq_buf s;
+ ssize_t size;
if (!strcmp(field_type, "unsigned")) {
- if (argc < 3)
+ if (argc < 3) {
+ synth_err(SYNTH_ERR_INCOMPLETE_TYPE, errpos(field_type));
return ERR_PTR(-EINVAL);
+ }
prefix = "unsigned ";
field_type = argv[1];
field_name = argv[2];
- *consumed = 3;
+ *consumed += 3;
} else {
field_name = argv[1];
- *consumed = 2;
+ *consumed += 2;
+ }
+
+ if (!field_name) {
+ synth_err(SYNTH_ERR_INVALID_FIELD, errpos(field_type));
+ return ERR_PTR(-EINVAL);
}
+ *field_version = check_field_version(prefix, field_type, field_name);
+
field = kzalloc(sizeof(*field), GFP_KERNEL);
if (!field)
return ERR_PTR(-ENOMEM);
@@ -489,48 +724,85 @@ static struct synth_field *parse_synth_field(int argc, const char **argv,
array = strchr(field_name, '[');
if (array)
len -= strlen(array);
- else if (field_name[len - 1] == ';')
- len--;
field->name = kmemdup_nul(field_name, len, GFP_KERNEL);
- if (!field->name) {
- ret = -ENOMEM;
+ if (!field->name)
+ goto free;
+
+ if (!is_good_name(field->name)) {
+ synth_err(SYNTH_ERR_BAD_NAME, errpos(field_name));
+ ret = -EINVAL;
goto free;
}
- if (field_type[0] == ';')
- field_type++;
len = strlen(field_type) + 1;
+
if (array)
len += strlen(array);
+
if (prefix)
len += strlen(prefix);
field->type = kzalloc(len, GFP_KERNEL);
- if (!field->type) {
- ret = -ENOMEM;
+ if (!field->type)
goto free;
- }
+
+ seq_buf_init(&s, field->type, len);
if (prefix)
- strcat(field->type, prefix);
- strcat(field->type, field_type);
- if (array) {
- strcat(field->type, array);
- if (field->type[len - 1] == ';')
- field->type[len - 1] = '\0';
- }
+ seq_buf_puts(&s, prefix);
+ seq_buf_puts(&s, field_type);
+ if (array)
+ seq_buf_puts(&s, array);
+ if (WARN_ON_ONCE(!seq_buf_buffer_left(&s)))
+ goto free;
+
+ s.buffer[s.len] = '\0';
- field->size = synth_field_size(field->type);
- if (!field->size) {
+ size = synth_field_size(field->type);
+ if (size < 0) {
+ if (array)
+ synth_err(SYNTH_ERR_INVALID_ARRAY_SPEC, errpos(field_name));
+ else
+ synth_err(SYNTH_ERR_INVALID_TYPE, errpos(field_type));
ret = -EINVAL;
goto free;
+ } else if (size == 0) {
+ if (synth_field_is_string(field->type) ||
+ synth_field_is_stack(field->type)) {
+ char *type;
+
+ len = sizeof("__data_loc ") + strlen(field->type) + 1;
+ type = kzalloc(len, GFP_KERNEL);
+ if (!type)
+ goto free;
+
+ seq_buf_init(&s, type, len);
+ seq_buf_puts(&s, "__data_loc ");
+ seq_buf_puts(&s, field->type);
+
+ if (WARN_ON_ONCE(!seq_buf_buffer_left(&s)))
+ goto free;
+ s.buffer[s.len] = '\0';
+
+ kfree(field->type);
+ field->type = type;
+
+ field->is_dynamic = true;
+ size = sizeof(u64);
+ } else {
+ synth_err(SYNTH_ERR_INVALID_TYPE, errpos(field_type));
+ ret = -EINVAL;
+ goto free;
+ }
}
+ field->size = size;
if (synth_field_is_string(field->type))
field->is_string = true;
+ else if (synth_field_is_stack(field->type))
+ field->is_stack = true;
field->is_signed = synth_field_signed(field->type);
-
out:
return field;
free:
@@ -629,10 +901,9 @@ static int register_synth_event(struct synth_event *event)
}
ret = set_synth_event_print_fmt(call);
- if (ret < 0) {
+ /* unregister_trace_event() will be called inside */
+ if (ret < 0)
trace_remove_event_call(call);
- goto err;
- }
out:
return ret;
err:
@@ -661,6 +932,7 @@ static void free_synth_event(struct synth_event *event)
free_synth_field(event->fields[i]);
kfree(event->fields);
+ kfree(event->dynamic_fields);
kfree(event->name);
kfree(event->class.system);
free_synth_tracepoint(event->tp);
@@ -671,8 +943,8 @@ static void free_synth_event(struct synth_event *event)
static struct synth_event *alloc_synth_event(const char *name, int n_fields,
struct synth_field **fields)
{
+ unsigned int i, j, n_dynamic_fields = 0;
struct synth_event *event;
- unsigned int i;
event = kzalloc(sizeof(*event), GFP_KERNEL);
if (!event) {
@@ -694,11 +966,31 @@ static struct synth_event *alloc_synth_event(const char *name, int n_fields,
goto out;
}
+ for (i = 0; i < n_fields; i++)
+ if (fields[i]->is_dynamic)
+ n_dynamic_fields++;
+
+ if (n_dynamic_fields) {
+ event->dynamic_fields = kcalloc(n_dynamic_fields,
+ sizeof(*event->dynamic_fields),
+ GFP_KERNEL);
+ if (!event->dynamic_fields) {
+ free_synth_event(event);
+ event = ERR_PTR(-ENOMEM);
+ goto out;
+ }
+ }
+
dyn_event_init(&event->devent, &synth_event_ops);
- for (i = 0; i < n_fields; i++)
+ for (i = 0, j = 0; i < n_fields; i++) {
+ fields[i]->field_pos = i;
event->fields[i] = fields[i];
+ if (fields[i]->is_dynamic)
+ event->dynamic_fields[j++] = fields[i];
+ }
+ event->n_dynamic_fields = j;
event->n_fields = n_fields;
out:
return event;
@@ -710,6 +1002,10 @@ static int synth_event_check_arg_fn(void *data)
int size;
size = synth_field_size((char *)arg_pair->lhs);
+ if (size == 0) {
+ if (strstr((char *)arg_pair->lhs, "["))
+ return 0;
+ }
return size ? 0 : -EINVAL;
}
@@ -842,7 +1138,7 @@ EXPORT_SYMBOL_GPL(synth_event_add_fields);
* @cmd: A pointer to the dynevent_cmd struct representing the new event
* @name: The name of the synthetic event
* @mod: The module creating the event, NULL if not created from a module
- * @args: Variable number of arg (pairs), one pair for each field
+ * @...: Variable number of arg (pairs), one pair for each field
*
* NOTE: Users normally won't want to call this function directly, but
* rather use the synth_event_gen_cmd_start() wrapper, which
@@ -914,6 +1210,7 @@ EXPORT_SYMBOL_GPL(__synth_event_gen_cmd_start);
* synth_event_gen_cmd_array_start - Start synthetic event command from an array
* @cmd: A pointer to the dynevent_cmd struct representing the new event
* @name: The name of the synthetic event
+ * @mod: The module creating the event, NULL if not created from a module
* @fields: An array of type/name field descriptions
* @n_fields: The number of field descriptions contained in the fields array
*
@@ -971,11 +1268,13 @@ int synth_event_gen_cmd_array_start(struct dynevent_cmd *cmd, const char *name,
}
EXPORT_SYMBOL_GPL(synth_event_gen_cmd_array_start);
-static int __create_synth_event(int argc, const char *name, const char **argv)
+static int __create_synth_event(const char *name, const char *raw_fields)
{
+ char **argv, *field_str, *tmp_fields, *saved_fields = NULL;
struct synth_field *field, *fields[SYNTH_FIELDS_MAX];
+ int consumed, cmd_version = 1, n_fields_this_loop;
+ int i, argc, n_fields = 0, ret = 0;
struct synth_event *event = NULL;
- int i, consumed = 0, n_fields = 0, ret = 0;
/*
* Argument syntax:
@@ -984,35 +1283,100 @@ static int __create_synth_event(int argc, const char *name, const char **argv)
* where 'field' = type field_name
*/
- if (name[0] == '\0' || argc < 1)
+ if (name[0] == '\0') {
+ synth_err(SYNTH_ERR_INVALID_CMD, 0);
return -EINVAL;
+ }
+
+ if (!is_good_name(name)) {
+ synth_err(SYNTH_ERR_BAD_NAME, errpos(name));
+ return -EINVAL;
+ }
mutex_lock(&event_mutex);
event = find_synth_event(name);
if (event) {
+ synth_err(SYNTH_ERR_EVENT_EXISTS, errpos(name));
ret = -EEXIST;
- goto out;
+ goto err;
}
- for (i = 0; i < argc - 1; i++) {
- if (strcmp(argv[i], ";") == 0)
- continue;
- if (n_fields == SYNTH_FIELDS_MAX) {
- ret = -EINVAL;
+ tmp_fields = saved_fields = kstrdup(raw_fields, GFP_KERNEL);
+ if (!tmp_fields) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ while ((field_str = strsep(&tmp_fields, ";")) != NULL) {
+ argv = argv_split(GFP_KERNEL, field_str, &argc);
+ if (!argv) {
+ ret = -ENOMEM;
goto err;
}
- field = parse_synth_field(argc - i, &argv[i], &consumed);
- if (IS_ERR(field)) {
- ret = PTR_ERR(field);
+ if (!argc) {
+ argv_free(argv);
+ continue;
+ }
+
+ n_fields_this_loop = 0;
+ consumed = 0;
+ while (argc > consumed) {
+ int field_version;
+
+ field = parse_synth_field(argc - consumed,
+ argv + consumed, &consumed,
+ &field_version);
+ if (IS_ERR(field)) {
+ ret = PTR_ERR(field);
+ goto err_free_arg;
+ }
+
+ /*
+ * Track the highest version of any field we
+ * found in the command.
+ */
+ if (field_version > cmd_version)
+ cmd_version = field_version;
+
+ /*
+ * Now sort out what is and isn't valid for
+ * each supported version.
+ *
+ * If we see more than 1 field per loop, it
+ * means we have multiple fields between
+ * semicolons, and that's something we no
+ * longer support in a version 2 or greater
+ * command.
+ */
+ if (cmd_version > 1 && n_fields_this_loop >= 1) {
+ synth_err(SYNTH_ERR_INVALID_CMD, errpos(field_str));
+ ret = -EINVAL;
+ goto err_free_arg;
+ }
+
+ if (n_fields == SYNTH_FIELDS_MAX) {
+ synth_err(SYNTH_ERR_TOO_MANY_FIELDS, 0);
+ ret = -EINVAL;
+ goto err_free_arg;
+ }
+ fields[n_fields++] = field;
+
+ n_fields_this_loop++;
+ }
+ argv_free(argv);
+
+ if (consumed < argc) {
+ synth_err(SYNTH_ERR_INVALID_CMD, 0);
+ ret = -EINVAL;
goto err;
}
- fields[n_fields++] = field;
- i += consumed - 1;
+
}
- if (i < argc && strcmp(argv[i], ";") != 0) {
+ if (n_fields == 0) {
+ synth_err(SYNTH_ERR_INVALID_CMD, 0);
ret = -EINVAL;
goto err;
}
@@ -1025,13 +1389,17 @@ static int __create_synth_event(int argc, const char *name, const char **argv)
}
ret = register_synth_event(event);
if (!ret)
- dyn_event_add(&event->devent);
+ dyn_event_add(&event->devent, &event->call);
else
free_synth_event(event);
out:
mutex_unlock(&event_mutex);
+ kfree(saved_fields);
+
return ret;
+ err_free_arg:
+ argv_free(argv);
err:
for (i = 0; i < n_fields; i++)
free_synth_field(fields[i]);
@@ -1041,7 +1409,7 @@ static int __create_synth_event(int argc, const char *name, const char **argv)
/**
* synth_event_create - Create a new synthetic event
- * @name: The name of the new sythetic event
+ * @name: The name of the new synthetic event
* @fields: An array of type/name field descriptions
* @n_fields: The number of field descriptions contained in the fields array
* @mod: The module creating the event, NULL if not created from a module
@@ -1094,13 +1462,15 @@ static int destroy_synth_event(struct synth_event *se)
int ret;
if (se->ref)
- ret = -EBUSY;
- else {
- ret = unregister_synth_event(se);
- if (!ret) {
- dyn_event_remove(&se->devent);
- free_synth_event(se);
- }
+ return -EBUSY;
+
+ if (trace_event_dyn_busy(&se->call))
+ return -EBUSY;
+
+ ret = unregister_synth_event(se);
+ if (!ret) {
+ dyn_event_remove(&se->devent);
+ free_synth_event(se);
}
return ret;
@@ -1108,7 +1478,7 @@ static int destroy_synth_event(struct synth_event *se)
/**
* synth_event_delete - Delete a synthetic event
- * @event_name: The name of the new sythetic event
+ * @event_name: The name of the new synthetic event
*
* Delete a synthetic event that was created with synth_event_create().
*
@@ -1129,7 +1499,6 @@ int synth_event_delete(const char *event_name)
mutex_unlock(&event_mutex);
if (mod) {
- mutex_lock(&trace_types_lock);
/*
* It is safest to reset the ring buffer if the module
* being unloaded registered any events that were
@@ -1141,26 +1510,85 @@ int synth_event_delete(const char *event_name)
* occur.
*/
tracing_reset_all_online_cpus();
- mutex_unlock(&trace_types_lock);
}
return ret;
}
EXPORT_SYMBOL_GPL(synth_event_delete);
-static int create_or_delete_synth_event(int argc, char **argv)
+static int check_command(const char *raw_command)
{
- const char *name = argv[0];
- int ret;
+ char **argv = NULL, *cmd, *saved_cmd, *name_and_field;
+ int argc, ret = 0;
+
+ cmd = saved_cmd = kstrdup(raw_command, GFP_KERNEL);
+ if (!cmd)
+ return -ENOMEM;
+
+ name_and_field = strsep(&cmd, ";");
+ if (!name_and_field) {
+ ret = -EINVAL;
+ goto free;
+ }
+
+ if (name_and_field[0] == '!')
+ goto free;
+
+ argv = argv_split(GFP_KERNEL, name_and_field, &argc);
+ if (!argv) {
+ ret = -ENOMEM;
+ goto free;
+ }
+ argv_free(argv);
+
+ if (argc < 3)
+ ret = -EINVAL;
+free:
+ kfree(saved_cmd);
+
+ return ret;
+}
+
+static int create_or_delete_synth_event(const char *raw_command)
+{
+ char *name = NULL, *fields, *p;
+ int ret = 0;
+
+ raw_command = skip_spaces(raw_command);
+ if (raw_command[0] == '\0')
+ return ret;
+
+ last_cmd_set(raw_command);
+
+ ret = check_command(raw_command);
+ if (ret) {
+ synth_err(SYNTH_ERR_INVALID_CMD, 0);
+ return ret;
+ }
+
+ p = strpbrk(raw_command, " \t");
+ if (!p && raw_command[0] != '!') {
+ synth_err(SYNTH_ERR_INVALID_CMD, 0);
+ ret = -EINVAL;
+ goto free;
+ }
+
+ name = kmemdup_nul(raw_command, p ? p - raw_command : strlen(raw_command), GFP_KERNEL);
+ if (!name)
+ return -ENOMEM;
- /* trace_run_command() ensures argc != 0 */
if (name[0] == '!') {
ret = synth_event_delete(name + 1);
- return ret;
+ goto free;
}
- ret = __create_synth_event(argc - 1, name, (const char **)argv + 1);
- return ret == -ECANCELED ? -EINVAL : ret;
+ fields = skip_spaces(p);
+
+ ret = __create_synth_event(name, fields);
+free:
+ kfree(name);
+
+ return ret;
}
static int synth_event_run_command(struct dynevent_cmd *cmd)
@@ -1168,7 +1596,7 @@ static int synth_event_run_command(struct dynevent_cmd *cmd)
struct synth_event *se;
int ret;
- ret = trace_run_command(cmd->seq.buffer, create_or_delete_synth_event);
+ ret = create_or_delete_synth_event(cmd->seq.buffer);
if (ret)
return ret;
@@ -1198,10 +1626,9 @@ void synth_event_cmd_init(struct dynevent_cmd *cmd, char *buf, int maxlen)
EXPORT_SYMBOL_GPL(synth_event_cmd_init);
static inline int
-__synth_event_trace_start(struct trace_event_file *file,
- struct synth_event_trace_state *trace_state)
+__synth_event_trace_init(struct trace_event_file *file,
+ struct synth_event_trace_state *trace_state)
{
- int entry_size, fields_size = 0;
int ret = 0;
memset(trace_state, '\0', sizeof(*trace_state));
@@ -1211,8 +1638,8 @@ __synth_event_trace_start(struct trace_event_file *file,
* ENABLED bit is set (which attaches the probe thus allowing
* this code to be called, etc). Because this is called
* directly by the user, we don't have that but we still need
- * to honor not logging when disabled. For the the iterated
- * trace case, we save the enabed state upon start and just
+ * to honor not logging when disabled. For the iterated
+ * trace case, we save the enabled state upon start and just
* ignore the following data calls.
*/
if (!(file->flags & EVENT_FILE_FL_ENABLED) ||
@@ -1223,8 +1650,20 @@ __synth_event_trace_start(struct trace_event_file *file,
}
trace_state->event = file->event_call->data;
+out:
+ return ret;
+}
+
+static inline int
+__synth_event_trace_start(struct trace_event_file *file,
+ struct synth_event_trace_state *trace_state,
+ int dynamic_fields_size)
+{
+ int entry_size, fields_size = 0;
+ int ret = 0;
fields_size = trace_state->event->n_u64 * sizeof(u64);
+ fields_size += dynamic_fields_size;
/*
* Avoid ring buffer recursion detection, as this event
@@ -1241,7 +1680,7 @@ __synth_event_trace_start(struct trace_event_file *file,
ring_buffer_nest_end(trace_state->buffer);
ret = -EINVAL;
}
-out:
+
return ret;
}
@@ -1257,7 +1696,7 @@ __synth_event_trace_end(struct synth_event_trace_state *trace_state)
* synth_event_trace - Trace a synthetic event
* @file: The trace_event_file representing the synthetic event
* @n_vals: The number of values in vals
- * @args: Variable number of args containing the event values
+ * @...: Variable number of args containing the event values
*
* Trace a synthetic event using the values passed in the variable
* argument list.
@@ -1274,23 +1713,46 @@ __synth_event_trace_end(struct synth_event_trace_state *trace_state)
*/
int synth_event_trace(struct trace_event_file *file, unsigned int n_vals, ...)
{
+ unsigned int i, n_u64, len, data_size = 0;
struct synth_event_trace_state state;
- unsigned int i, n_u64;
va_list args;
int ret;
- ret = __synth_event_trace_start(file, &state);
+ ret = __synth_event_trace_init(file, &state);
if (ret) {
if (ret == -ENOENT)
ret = 0; /* just disabled, not really an error */
return ret;
}
+ if (state.event->n_dynamic_fields) {
+ va_start(args, n_vals);
+
+ for (i = 0; i < state.event->n_fields; i++) {
+ u64 val = va_arg(args, u64);
+
+ if (state.event->fields[i]->is_string &&
+ state.event->fields[i]->is_dynamic) {
+ char *str_val = (char *)(long)val;
+
+ data_size += strlen(str_val) + 1;
+ }
+ }
+
+ va_end(args);
+ }
+
+ ret = __synth_event_trace_start(file, &state, data_size);
+ if (ret)
+ return ret;
+
if (n_vals != state.event->n_fields) {
ret = -EINVAL;
goto out;
}
+ data_size = 0;
+
va_start(args, n_vals);
for (i = 0, n_u64 = 0; i < state.event->n_fields; i++) {
u64 val;
@@ -1299,28 +1761,29 @@ int synth_event_trace(struct trace_event_file *file, unsigned int n_vals, ...)
if (state.event->fields[i]->is_string) {
char *str_val = (char *)(long)val;
- char *str_field = (char *)&state.entry->fields[n_u64];
- strscpy(str_field, str_val, STR_VAR_LEN_MAX);
- n_u64 += STR_VAR_LEN_MAX / sizeof(u64);
+ len = trace_string(state.entry, state.event, str_val,
+ state.event->fields[i]->is_dynamic,
+ data_size, &n_u64);
+ data_size += len; /* only dynamic string increments */
} else {
struct synth_field *field = state.event->fields[i];
switch (field->size) {
case 1:
- *(u8 *)&state.entry->fields[n_u64] = (u8)val;
+ state.entry->fields[n_u64].as_u8 = (u8)val;
break;
case 2:
- *(u16 *)&state.entry->fields[n_u64] = (u16)val;
+ state.entry->fields[n_u64].as_u16 = (u16)val;
break;
case 4:
- *(u32 *)&state.entry->fields[n_u64] = (u32)val;
+ state.entry->fields[n_u64].as_u32 = (u32)val;
break;
default:
- state.entry->fields[n_u64] = val;
+ state.entry->fields[n_u64].as_u64 = val;
break;
}
n_u64++;
@@ -1355,48 +1818,65 @@ EXPORT_SYMBOL_GPL(synth_event_trace);
int synth_event_trace_array(struct trace_event_file *file, u64 *vals,
unsigned int n_vals)
{
+ unsigned int i, n_u64, field_pos, len, data_size = 0;
struct synth_event_trace_state state;
- unsigned int i, n_u64;
+ char *str_val;
int ret;
- ret = __synth_event_trace_start(file, &state);
+ ret = __synth_event_trace_init(file, &state);
if (ret) {
if (ret == -ENOENT)
ret = 0; /* just disabled, not really an error */
return ret;
}
+ if (state.event->n_dynamic_fields) {
+ for (i = 0; i < state.event->n_dynamic_fields; i++) {
+ field_pos = state.event->dynamic_fields[i]->field_pos;
+ str_val = (char *)(long)vals[field_pos];
+ len = strlen(str_val) + 1;
+ data_size += len;
+ }
+ }
+
+ ret = __synth_event_trace_start(file, &state, data_size);
+ if (ret)
+ return ret;
+
if (n_vals != state.event->n_fields) {
ret = -EINVAL;
goto out;
}
+ data_size = 0;
+
for (i = 0, n_u64 = 0; i < state.event->n_fields; i++) {
if (state.event->fields[i]->is_string) {
char *str_val = (char *)(long)vals[i];
- char *str_field = (char *)&state.entry->fields[n_u64];
- strscpy(str_field, str_val, STR_VAR_LEN_MAX);
- n_u64 += STR_VAR_LEN_MAX / sizeof(u64);
+ len = trace_string(state.entry, state.event, str_val,
+ state.event->fields[i]->is_dynamic,
+ data_size, &n_u64);
+ data_size += len; /* only dynamic string increments */
} else {
struct synth_field *field = state.event->fields[i];
u64 val = vals[i];
switch (field->size) {
case 1:
- *(u8 *)&state.entry->fields[n_u64] = (u8)val;
+ state.entry->fields[n_u64].as_u8 = (u8)val;
break;
case 2:
- *(u16 *)&state.entry->fields[n_u64] = (u16)val;
+ state.entry->fields[n_u64].as_u16 = (u16)val;
break;
case 4:
- *(u32 *)&state.entry->fields[n_u64] = (u32)val;
+ state.entry->fields[n_u64].as_u32 = (u32)val;
break;
default:
- state.entry->fields[n_u64] = val;
+ state.entry->fields[n_u64].as_u64 = val;
break;
}
n_u64++;
@@ -1445,9 +1925,17 @@ int synth_event_trace_start(struct trace_event_file *file,
if (!trace_state)
return -EINVAL;
- ret = __synth_event_trace_start(file, trace_state);
- if (ret == -ENOENT)
- ret = 0; /* just disabled, not really an error */
+ ret = __synth_event_trace_init(file, trace_state);
+ if (ret) {
+ if (ret == -ENOENT)
+ ret = 0; /* just disabled, not really an error */
+ return ret;
+ }
+
+ if (trace_state->event->n_dynamic_fields)
+ return -ENOTSUPP;
+
+ ret = __synth_event_trace_start(file, trace_state, 0);
return ret;
}
@@ -1508,6 +1996,11 @@ static int __synth_event_add_val(const char *field_name, u64 val,
char *str_val = (char *)(long)val;
char *str_field;
+ if (field->is_dynamic) { /* add_val can't do dynamic strings */
+ ret = -EINVAL;
+ goto out;
+ }
+
if (!str_val) {
ret = -EINVAL;
goto out;
@@ -1518,19 +2011,19 @@ static int __synth_event_add_val(const char *field_name, u64 val,
} else {
switch (field->size) {
case 1:
- *(u8 *)&trace_state->entry->fields[field->offset] = (u8)val;
+ trace_state->entry->fields[field->offset].as_u8 = (u8)val;
break;
case 2:
- *(u16 *)&trace_state->entry->fields[field->offset] = (u16)val;
+ trace_state->entry->fields[field->offset].as_u16 = (u16)val;
break;
case 4:
- *(u32 *)&trace_state->entry->fields[field->offset] = (u32)val;
+ trace_state->entry->fields[field->offset].as_u32 = (u32)val;
break;
default:
- trace_state->entry->fields[field->offset] = val;
+ trace_state->entry->fields[field->offset].as_u64 = val;
break;
}
}
@@ -1576,7 +2069,7 @@ EXPORT_SYMBOL_GPL(synth_event_add_next_val);
/**
* synth_event_add_val - Add a named field's value to an open synth trace
* @field_name: The name of the synthetic event field value to set
- * @val: The value to set the next field to
+ * @val: The value to set the named field to
* @trace_state: A pointer to object tracking the piecewise trace state
*
* Set the value of the named field in an event that's been opened by
@@ -1639,23 +2132,60 @@ int synth_event_trace_end(struct synth_event_trace_state *trace_state)
}
EXPORT_SYMBOL_GPL(synth_event_trace_end);
-static int create_synth_event(int argc, const char **argv)
+static int create_synth_event(const char *raw_command)
{
- const char *name = argv[0];
- int len;
+ char *fields, *p;
+ const char *name;
+ int len, ret = 0;
+
+ raw_command = skip_spaces(raw_command);
+ if (raw_command[0] == '\0')
+ return ret;
+
+ last_cmd_set(raw_command);
+ name = raw_command;
+
+ /* Don't try to process if not our system */
if (name[0] != 's' || name[1] != ':')
return -ECANCELED;
name += 2;
+ p = strpbrk(raw_command, " \t");
+ if (!p) {
+ synth_err(SYNTH_ERR_INVALID_CMD, 0);
+ return -EINVAL;
+ }
+
+ fields = skip_spaces(p);
+
/* This interface accepts group name prefix */
if (strchr(name, '/')) {
len = str_has_prefix(name, SYNTH_SYSTEM "/");
- if (len == 0)
+ if (len == 0) {
+ synth_err(SYNTH_ERR_INVALID_DYN_CMD, 0);
return -EINVAL;
+ }
name += len;
}
- return __create_synth_event(argc - 1, name, argv + 1);
+
+ len = name - raw_command;
+
+ ret = check_command(raw_command + len);
+ if (ret) {
+ synth_err(SYNTH_ERR_INVALID_CMD, 0);
+ return ret;
+ }
+
+ name = kmemdup_nul(raw_command + len, p - raw_command - len, GFP_KERNEL);
+ if (!name)
+ return -ENOMEM;
+
+ ret = __create_synth_event(name, fields);
+
+ kfree(name);
+
+ return ret;
}
static int synth_event_release(struct dyn_event *ev)
@@ -1666,6 +2196,9 @@ static int synth_event_release(struct dyn_event *ev)
if (event->ref)
return -EBUSY;
+ if (trace_event_dyn_busy(&event->call))
+ return -EBUSY;
+
ret = unregister_synth_event(event);
if (ret)
return ret;
@@ -1679,14 +2212,22 @@ static int __synth_event_show(struct seq_file *m, struct synth_event *event)
{
struct synth_field *field;
unsigned int i;
+ char *type, *t;
seq_printf(m, "%s\t", event->name);
for (i = 0; i < event->n_fields; i++) {
field = event->fields[i];
+ type = field->type;
+ t = strstr(type, "__data_loc");
+ if (t) { /* __data_loc belongs in format but not event desc */
+ t += sizeof("__data_loc");
+ type = t;
+ }
+
/* parameter values */
- seq_printf(m, "%s %s%s", field->type, field->name,
+ seq_printf(m, "%s %s%s", type, field->name,
i == event->n_fields - 1 ? "" : "; ");
}
@@ -1754,26 +2295,32 @@ static const struct file_operations synth_events_fops = {
.release = seq_release,
};
-static __init int trace_events_synth_init(void)
+/*
+ * Register dynevent at core_initcall. This allows kernel to setup kprobe
+ * events in postcore_initcall without tracefs.
+ */
+static __init int trace_events_synth_init_early(void)
{
- struct dentry *entry = NULL;
- struct dentry *d_tracer;
int err = 0;
err = dyn_event_register(&synth_event_ops);
- if (err) {
+ if (err)
pr_warn("Could not register synth_event_ops\n");
- return err;
- }
- d_tracer = tracing_init_dentry();
- if (IS_ERR(d_tracer)) {
- err = PTR_ERR(d_tracer);
+ return err;
+}
+core_initcall(trace_events_synth_init_early);
+
+static __init int trace_events_synth_init(void)
+{
+ struct dentry *entry = NULL;
+ int err = 0;
+ err = tracing_init_dentry();
+ if (err)
goto err;
- }
- entry = tracefs_create_file("synthetic_events", 0644, d_tracer,
- NULL, &synth_events_fops);
+ entry = tracefs_create_file("synthetic_events", TRACE_MODE_WRITE,
+ NULL, NULL, &synth_events_fops);
if (!entry) {
err = -ENODEV;
goto err;
diff --git a/kernel/trace/trace_events_trigger.c b/kernel/trace/trace_events_trigger.c
index f725802160c0..b33c3861fbbb 100644
--- a/kernel/trace/trace_events_trigger.c
+++ b/kernel/trace/trace_events_trigger.c
@@ -31,7 +31,9 @@ void trigger_data_free(struct event_trigger_data *data)
/**
* event_triggers_call - Call triggers associated with a trace event
* @file: The trace_event_file associated with the event
+ * @buffer: The ring buffer that the event is being written to
* @rec: The trace entry for the event, NULL for unconditional invocation
+ * @event: The event meta data in the ring buffer
*
* For each trigger associated with an event, invoke the trigger
* function registered with the associated trigger command. If rec is
@@ -53,7 +55,8 @@ void trigger_data_free(struct event_trigger_data *data)
* any trigger that should be deferred, ETT_NONE if nothing to defer.
*/
enum event_trigger_type
-event_triggers_call(struct trace_event_file *file, void *rec,
+event_triggers_call(struct trace_event_file *file,
+ struct trace_buffer *buffer, void *rec,
struct ring_buffer_event *event)
{
struct event_trigger_data *data;
@@ -67,7 +70,7 @@ event_triggers_call(struct trace_event_file *file, void *rec,
if (data->paused)
continue;
if (!rec) {
- data->ops->func(data, rec, event);
+ data->ops->trigger(data, buffer, rec, event);
continue;
}
filter = rcu_dereference_sched(data->filter);
@@ -77,12 +80,26 @@ event_triggers_call(struct trace_event_file *file, void *rec,
tt |= data->cmd_ops->trigger_type;
continue;
}
- data->ops->func(data, rec, event);
+ data->ops->trigger(data, buffer, rec, event);
}
return tt;
}
EXPORT_SYMBOL_GPL(event_triggers_call);
+bool __trace_trigger_soft_disabled(struct trace_event_file *file)
+{
+ unsigned long eflags = file->flags;
+
+ if (eflags & EVENT_FILE_FL_TRIGGER_MODE)
+ event_triggers_call(file, NULL, NULL, NULL);
+ if (eflags & EVENT_FILE_FL_SOFT_DISABLED)
+ return true;
+ if (eflags & EVENT_FILE_FL_PID_FILTER)
+ return trace_event_ignore_this_pid(file);
+ return false;
+}
+EXPORT_SYMBOL_GPL(__trace_trigger_soft_disabled);
+
/**
* event_triggers_post_call - Call 'post_triggers' for a trace event
* @file: The trace_event_file associated with the event
@@ -105,7 +122,7 @@ event_triggers_post_call(struct trace_event_file *file,
if (data->paused)
continue;
if (data->cmd_ops->trigger_type & tt)
- data->ops->func(data, NULL, NULL);
+ data->ops->trigger(data, NULL, NULL, NULL);
}
}
EXPORT_SYMBOL_GPL(event_triggers_post_call);
@@ -123,6 +140,19 @@ static void *trigger_next(struct seq_file *m, void *t, loff_t *pos)
return seq_list_next(t, &event_file->triggers, pos);
}
+static bool check_user_trigger(struct trace_event_file *file)
+{
+ struct event_trigger_data *data;
+
+ list_for_each_entry_rcu(data, &file->triggers, list,
+ lockdep_is_held(&event_mutex)) {
+ if (data->flags & EVENT_TRIGGER_FL_PROBE)
+ continue;
+ return true;
+ }
+ return false;
+}
+
static void *trigger_start(struct seq_file *m, loff_t *pos)
{
struct trace_event_file *event_file;
@@ -133,7 +163,7 @@ static void *trigger_start(struct seq_file *m, loff_t *pos)
if (unlikely(!event_file))
return ERR_PTR(-ENODEV);
- if (list_empty(&event_file->triggers))
+ if (list_empty(&event_file->triggers) || !check_user_trigger(event_file))
return *pos == 0 ? SHOW_AVAILABLE_TRIGGERS : NULL;
return seq_list_start(&event_file->triggers, *pos);
@@ -161,7 +191,7 @@ static int trigger_show(struct seq_file *m, void *v)
}
data = list_entry(v, struct event_trigger_data, list);
- data->ops->print(m, data->ops, data);
+ data->ops->print(m, data);
return 0;
}
@@ -232,7 +262,7 @@ int trigger_process_regex(struct trace_event_file *file, char *buff)
mutex_lock(&trigger_cmd_mutex);
list_for_each_entry(p, &trigger_commands, list) {
if (strcmp(p->name, command) == 0) {
- ret = p->func(p, file, buff, command, next);
+ ret = p->parse(p, file, buff, command, next);
goto out_unlock;
}
}
@@ -405,7 +435,6 @@ event_trigger_print(const char *name, struct seq_file *m,
/**
* event_trigger_init - Generic event_trigger_ops @init implementation
- * @ops: The trigger ops associated with the trigger
* @data: Trigger-specific data
*
* Common implementation of event trigger initialization.
@@ -415,8 +444,7 @@ event_trigger_print(const char *name, struct seq_file *m,
*
* Return: 0 on success, errno otherwise
*/
-int event_trigger_init(struct event_trigger_ops *ops,
- struct event_trigger_data *data)
+int event_trigger_init(struct event_trigger_data *data)
{
data->ref++;
return 0;
@@ -424,7 +452,6 @@ int event_trigger_init(struct event_trigger_ops *ops,
/**
* event_trigger_free - Generic event_trigger_ops @free implementation
- * @ops: The trigger ops associated with the trigger
* @data: Trigger-specific data
*
* Common implementation of event trigger de-initialization.
@@ -433,8 +460,7 @@ int event_trigger_init(struct event_trigger_ops *ops,
* implementations.
*/
static void
-event_trigger_free(struct event_trigger_ops *ops,
- struct event_trigger_data *data)
+event_trigger_free(struct event_trigger_data *data)
{
if (WARN_ON_ONCE(data->ref <= 0))
return;
@@ -488,7 +514,7 @@ clear_event_triggers(struct trace_array *tr)
trace_event_trigger_enable_disable(file, 0);
list_del_rcu(&data->list);
if (data->ops->free)
- data->ops->free(data->ops, data);
+ data->ops->free(data);
}
}
}
@@ -527,7 +553,6 @@ void update_cond_flag(struct trace_event_file *file)
/**
* register_trigger - Generic event_command @reg implementation
* @glob: The raw string used to register the trigger
- * @ops: The trigger ops associated with the trigger
* @data: Trigger-specific data to associate with the trigger
* @file: The trace_event_file associated with the event
*
@@ -538,7 +563,7 @@ void update_cond_flag(struct trace_event_file *file)
*
* Return: 0 on success, errno otherwise
*/
-static int register_trigger(char *glob, struct event_trigger_ops *ops,
+static int register_trigger(char *glob,
struct event_trigger_data *data,
struct trace_event_file *file)
{
@@ -555,19 +580,18 @@ static int register_trigger(char *glob, struct event_trigger_ops *ops,
}
if (data->ops->init) {
- ret = data->ops->init(data->ops, data);
+ ret = data->ops->init(data);
if (ret < 0)
goto out;
}
list_add_rcu(&data->list, &file->triggers);
- ret++;
update_cond_flag(file);
- if (trace_event_trigger_enable_disable(file, 1) < 0) {
+ ret = trace_event_trigger_enable_disable(file, 1);
+ if (ret < 0) {
list_del_rcu(&data->list);
update_cond_flag(file);
- ret--;
}
out:
return ret;
@@ -576,7 +600,6 @@ out:
/**
* unregister_trigger - Generic event_command @unreg implementation
* @glob: The raw string used to register the trigger
- * @ops: The trigger ops associated with the trigger
* @test: Trigger-specific data used to find the trigger to remove
* @file: The trace_event_file associated with the event
*
@@ -585,18 +608,17 @@ out:
* Usually used directly as the @unreg method in event command
* implementations.
*/
-static void unregister_trigger(char *glob, struct event_trigger_ops *ops,
+static void unregister_trigger(char *glob,
struct event_trigger_data *test,
struct trace_event_file *file)
{
- struct event_trigger_data *data;
- bool unregistered = false;
+ struct event_trigger_data *data = NULL, *iter;
lockdep_assert_held(&event_mutex);
- list_for_each_entry(data, &file->triggers, list) {
- if (data->cmd_ops->trigger_type == test->cmd_ops->trigger_type) {
- unregistered = true;
+ list_for_each_entry(iter, &file->triggers, list) {
+ if (iter->cmd_ops->trigger_type == test->cmd_ops->trigger_type) {
+ data = iter;
list_del_rcu(&data->list);
trace_event_trigger_enable_disable(file, 0);
update_cond_flag(file);
@@ -604,117 +626,407 @@ static void unregister_trigger(char *glob, struct event_trigger_ops *ops,
}
}
- if (unregistered && data->ops->free)
- data->ops->free(data->ops, data);
+ if (data && data->ops->free)
+ data->ops->free(data);
}
+/*
+ * Event trigger parsing helper functions.
+ *
+ * These functions help make it easier to write an event trigger
+ * parsing function i.e. the struct event_command.parse() callback
+ * function responsible for parsing and registering a trigger command
+ * written to the 'trigger' file.
+ *
+ * A trigger command (or just 'trigger' for short) takes the form:
+ * [trigger] [if filter]
+ *
+ * The struct event_command.parse() callback (and other struct
+ * event_command functions) refer to several components of a trigger
+ * command. Those same components are referenced by the event trigger
+ * parsing helper functions defined below. These components are:
+ *
+ * cmd - the trigger command name
+ * glob - the trigger command name optionally prefaced with '!'
+ * param_and_filter - text following cmd and ':'
+ * param - text following cmd and ':' and stripped of filter
+ * filter - the optional filter text following (and including) 'if'
+ *
+ * To illustrate the use of these componenents, here are some concrete
+ * examples. For the following triggers:
+ *
+ * echo 'traceon:5 if pid == 0' > trigger
+ * - 'traceon' is both cmd and glob
+ * - '5 if pid == 0' is the param_and_filter
+ * - '5' is the param
+ * - 'if pid == 0' is the filter
+ *
+ * echo 'enable_event:sys:event:n' > trigger
+ * - 'enable_event' is both cmd and glob
+ * - 'sys:event:n' is the param_and_filter
+ * - 'sys:event:n' is the param
+ * - there is no filter
+ *
+ * echo 'hist:keys=pid if prio > 50' > trigger
+ * - 'hist' is both cmd and glob
+ * - 'keys=pid if prio > 50' is the param_and_filter
+ * - 'keys=pid' is the param
+ * - 'if prio > 50' is the filter
+ *
+ * echo '!enable_event:sys:event:n' > trigger
+ * - 'enable_event' the cmd
+ * - '!enable_event' is the glob
+ * - 'sys:event:n' is the param_and_filter
+ * - 'sys:event:n' is the param
+ * - there is no filter
+ *
+ * echo 'traceoff' > trigger
+ * - 'traceoff' is both cmd and glob
+ * - there is no param_and_filter
+ * - there is no param
+ * - there is no filter
+ *
+ * There are a few different categories of event trigger covered by
+ * these helpers:
+ *
+ * - triggers that don't require a parameter e.g. traceon
+ * - triggers that do require a parameter e.g. enable_event and hist
+ * - triggers that though they may not require a param may support an
+ * optional 'n' param (n = number of times the trigger should fire)
+ * e.g.: traceon:5 or enable_event:sys:event:n
+ * - triggers that do not support an 'n' param e.g. hist
+ *
+ * These functions can be used or ignored as necessary - it all
+ * depends on the complexity of the trigger, and the granularity of
+ * the functions supported reflects the fact that some implementations
+ * may need to customize certain aspects of their implementations and
+ * won't need certain functions. For instance, the hist trigger
+ * implementation doesn't use event_trigger_separate_filter() because
+ * it has special requirements for handling the filter.
+ */
+
/**
- * event_trigger_callback - Generic event_command @func implementation
- * @cmd_ops: The command ops, used for trigger registration
- * @file: The trace_event_file associated with the event
- * @glob: The raw string used to register the trigger
- * @cmd: The cmd portion of the string used to register the trigger
- * @param: The params portion of the string used to register the trigger
+ * event_trigger_check_remove - check whether an event trigger specifies remove
+ * @glob: The trigger command string, with optional remove(!) operator
*
- * Common implementation for event command parsing and trigger
- * instantiation.
+ * The event trigger callback implementations pass in 'glob' as a
+ * parameter. This is the command name either with or without a
+ * remove(!) operator. This function simply parses the glob and
+ * determines whether the command corresponds to a trigger removal or
+ * a trigger addition.
*
- * Usually used directly as the @func method in event command
- * implementations.
+ * Return: true if this is a remove command, false otherwise
+ */
+bool event_trigger_check_remove(const char *glob)
+{
+ return (glob && glob[0] == '!') ? true : false;
+}
+
+/**
+ * event_trigger_empty_param - check whether the param is empty
+ * @param: The trigger param string
+ *
+ * The event trigger callback implementations pass in 'param' as a
+ * parameter. This corresponds to the string following the command
+ * name minus the command name. This function can be called by a
+ * callback implementation for any command that requires a param; a
+ * callback that doesn't require a param can ignore it.
+ *
+ * Return: true if this is an empty param, false otherwise
+ */
+bool event_trigger_empty_param(const char *param)
+{
+ return !param;
+}
+
+/**
+ * event_trigger_separate_filter - separate an event trigger from a filter
+ * @param_and_filter: String containing trigger and possibly filter
+ * @param: outparam, will be filled with a pointer to the trigger
+ * @filter: outparam, will be filled with a pointer to the filter
+ * @param_required: Specifies whether or not the param string is required
+ *
+ * Given a param string of the form '[trigger] [if filter]', this
+ * function separates the filter from the trigger and returns the
+ * trigger in @param and the filter in @filter. Either the @param
+ * or the @filter may be set to NULL by this function - if not set to
+ * NULL, they will contain strings corresponding to the trigger and
+ * filter.
+ *
+ * There are two cases that need to be handled with respect to the
+ * passed-in param: either the param is required, or it is not
+ * required. If @param_required is set, and there's no param, it will
+ * return -EINVAL. If @param_required is not set and there's a param
+ * that starts with a number, that corresponds to the case of a
+ * trigger with :n (n = number of times the trigger should fire) and
+ * the parsing continues normally; otherwise the function just returns
+ * and assumes param just contains a filter and there's nothing else
+ * to do.
*
* Return: 0 on success, errno otherwise
*/
-static int
-event_trigger_callback(struct event_command *cmd_ops,
- struct trace_event_file *file,
- char *glob, char *cmd, char *param)
+int event_trigger_separate_filter(char *param_and_filter, char **param,
+ char **filter, bool param_required)
{
- struct event_trigger_data *trigger_data;
- struct event_trigger_ops *trigger_ops;
- char *trigger = NULL;
- char *number;
- int ret;
+ int ret = 0;
- /* separate the trigger from the filter (t:n [if filter]) */
- if (param && isdigit(param[0])) {
- trigger = strsep(&param, " \t");
- if (param) {
- param = skip_spaces(param);
- if (!*param)
- param = NULL;
- }
+ *param = *filter = NULL;
+
+ if (!param_and_filter) {
+ if (param_required)
+ ret = -EINVAL;
+ goto out;
}
- trigger_ops = cmd_ops->get_trigger_ops(cmd, trigger);
+ /*
+ * Here we check for an optional param. The only legal
+ * optional param is :n, and if that's the case, continue
+ * below. Otherwise we assume what's left is a filter and
+ * return it as the filter string for the caller to deal with.
+ */
+ if (!param_required && param_and_filter && !isdigit(param_and_filter[0])) {
+ *filter = param_and_filter;
+ goto out;
+ }
+
+ /*
+ * Separate the param from the filter (param [if filter]).
+ * Here we have either an optional :n param or a required
+ * param and an optional filter.
+ */
+ *param = strsep(&param_and_filter, " \t");
+
+ /*
+ * Here we have a filter, though it may be empty.
+ */
+ if (param_and_filter) {
+ *filter = skip_spaces(param_and_filter);
+ if (!**filter)
+ *filter = NULL;
+ }
+out:
+ return ret;
+}
+
+/**
+ * event_trigger_alloc - allocate and init event_trigger_data for a trigger
+ * @cmd_ops: The event_command operations for the trigger
+ * @cmd: The cmd string
+ * @param: The param string
+ * @private_data: User data to associate with the event trigger
+ *
+ * Allocate an event_trigger_data instance and initialize it. The
+ * @cmd_ops are used along with the @cmd and @param to get the
+ * trigger_ops to assign to the event_trigger_data. @private_data can
+ * also be passed in and associated with the event_trigger_data.
+ *
+ * Use event_trigger_free() to free an event_trigger_data object.
+ *
+ * Return: The trigger_data object success, NULL otherwise
+ */
+struct event_trigger_data *event_trigger_alloc(struct event_command *cmd_ops,
+ char *cmd,
+ char *param,
+ void *private_data)
+{
+ struct event_trigger_data *trigger_data;
+ struct event_trigger_ops *trigger_ops;
+
+ trigger_ops = cmd_ops->get_trigger_ops(cmd, param);
- ret = -ENOMEM;
trigger_data = kzalloc(sizeof(*trigger_data), GFP_KERNEL);
if (!trigger_data)
- goto out;
+ return NULL;
trigger_data->count = -1;
trigger_data->ops = trigger_ops;
trigger_data->cmd_ops = cmd_ops;
- trigger_data->private_data = file;
+ trigger_data->private_data = private_data;
+
INIT_LIST_HEAD(&trigger_data->list);
INIT_LIST_HEAD(&trigger_data->named_list);
+ RCU_INIT_POINTER(trigger_data->filter, NULL);
- if (glob[0] == '!') {
- cmd_ops->unreg(glob+1, trigger_ops, trigger_data, file);
- kfree(trigger_data);
- ret = 0;
- goto out;
- }
+ return trigger_data;
+}
+
+/**
+ * event_trigger_parse_num - parse and return the number param for a trigger
+ * @param: The param string
+ * @trigger_data: The trigger_data for the trigger
+ *
+ * Parse the :n (n = number of times the trigger should fire) param
+ * and set the count variable in the trigger_data to the parsed count.
+ *
+ * Return: 0 on success, errno otherwise
+ */
+int event_trigger_parse_num(char *param,
+ struct event_trigger_data *trigger_data)
+{
+ char *number;
+ int ret = 0;
- if (trigger) {
- number = strsep(&trigger, ":");
+ if (param) {
+ number = strsep(&param, ":");
- ret = -EINVAL;
if (!strlen(number))
- goto out_free;
+ return -EINVAL;
/*
* We use the callback data field (which is a pointer)
* as our counter.
*/
ret = kstrtoul(number, 0, &trigger_data->count);
- if (ret)
- goto out_free;
}
- if (!param) /* if param is non-empty, it's supposed to be a filter */
- goto out_reg;
+ return ret;
+}
+
+/**
+ * event_trigger_set_filter - set an event trigger's filter
+ * @cmd_ops: The event_command operations for the trigger
+ * @file: The event file for the trigger's event
+ * @param: The string containing the filter
+ * @trigger_data: The trigger_data for the trigger
+ *
+ * Set the filter for the trigger. If the filter is NULL, just return
+ * without error.
+ *
+ * Return: 0 on success, errno otherwise
+ */
+int event_trigger_set_filter(struct event_command *cmd_ops,
+ struct trace_event_file *file,
+ char *param,
+ struct event_trigger_data *trigger_data)
+{
+ if (param && cmd_ops->set_filter)
+ return cmd_ops->set_filter(param, trigger_data, file);
+
+ return 0;
+}
+
+/**
+ * event_trigger_reset_filter - reset an event trigger's filter
+ * @cmd_ops: The event_command operations for the trigger
+ * @trigger_data: The trigger_data for the trigger
+ *
+ * Reset the filter for the trigger to no filter.
+ */
+void event_trigger_reset_filter(struct event_command *cmd_ops,
+ struct event_trigger_data *trigger_data)
+{
+ if (cmd_ops->set_filter)
+ cmd_ops->set_filter(NULL, trigger_data, NULL);
+}
+
+/**
+ * event_trigger_register - register an event trigger
+ * @cmd_ops: The event_command operations for the trigger
+ * @file: The event file for the trigger's event
+ * @glob: The trigger command string, with optional remove(!) operator
+ * @trigger_data: The trigger_data for the trigger
+ *
+ * Register an event trigger. The @cmd_ops are used to call the
+ * cmd_ops->reg() function which actually does the registration.
+ *
+ * Return: 0 on success, errno otherwise
+ */
+int event_trigger_register(struct event_command *cmd_ops,
+ struct trace_event_file *file,
+ char *glob,
+ struct event_trigger_data *trigger_data)
+{
+ return cmd_ops->reg(glob, trigger_data, file);
+}
+
+/**
+ * event_trigger_unregister - unregister an event trigger
+ * @cmd_ops: The event_command operations for the trigger
+ * @file: The event file for the trigger's event
+ * @glob: The trigger command string, with optional remove(!) operator
+ * @trigger_data: The trigger_data for the trigger
+ *
+ * Unregister an event trigger. The @cmd_ops are used to call the
+ * cmd_ops->unreg() function which actually does the unregistration.
+ */
+void event_trigger_unregister(struct event_command *cmd_ops,
+ struct trace_event_file *file,
+ char *glob,
+ struct event_trigger_data *trigger_data)
+{
+ cmd_ops->unreg(glob, trigger_data, file);
+}
+
+/*
+ * End event trigger parsing helper functions.
+ */
+
+/**
+ * event_trigger_parse - Generic event_command @parse implementation
+ * @cmd_ops: The command ops, used for trigger registration
+ * @file: The trace_event_file associated with the event
+ * @glob: The raw string used to register the trigger
+ * @cmd: The cmd portion of the string used to register the trigger
+ * @param_and_filter: The param and filter portion of the string used to register the trigger
+ *
+ * Common implementation for event command parsing and trigger
+ * instantiation.
+ *
+ * Usually used directly as the @parse method in event command
+ * implementations.
+ *
+ * Return: 0 on success, errno otherwise
+ */
+static int
+event_trigger_parse(struct event_command *cmd_ops,
+ struct trace_event_file *file,
+ char *glob, char *cmd, char *param_and_filter)
+{
+ struct event_trigger_data *trigger_data;
+ char *param, *filter;
+ bool remove;
+ int ret;
+
+ remove = event_trigger_check_remove(glob);
+
+ ret = event_trigger_separate_filter(param_and_filter, &param, &filter, false);
+ if (ret)
+ return ret;
+
+ ret = -ENOMEM;
+ trigger_data = event_trigger_alloc(cmd_ops, cmd, param, file);
+ if (!trigger_data)
+ goto out;
+
+ if (remove) {
+ event_trigger_unregister(cmd_ops, file, glob+1, trigger_data);
+ kfree(trigger_data);
+ ret = 0;
+ goto out;
+ }
- if (!cmd_ops->set_filter)
- goto out_reg;
+ ret = event_trigger_parse_num(param, trigger_data);
+ if (ret)
+ goto out_free;
- ret = cmd_ops->set_filter(param, trigger_data, file);
+ ret = event_trigger_set_filter(cmd_ops, file, filter, trigger_data);
if (ret < 0)
goto out_free;
- out_reg:
/* Up the trigger_data count to make sure reg doesn't free it on failure */
- event_trigger_init(trigger_ops, trigger_data);
- ret = cmd_ops->reg(glob, trigger_ops, trigger_data, file);
- /*
- * The above returns on success the # of functions enabled,
- * but if it didn't find any functions it returns zero.
- * Consider no functions a failure too.
- */
- if (!ret) {
- cmd_ops->unreg(glob, trigger_ops, trigger_data, file);
- ret = -ENOENT;
- } else if (ret > 0)
- ret = 0;
+ event_trigger_init(trigger_data);
+
+ ret = event_trigger_register(cmd_ops, file, glob, trigger_data);
+ if (ret)
+ goto out_free;
/* Down the counter of trigger_data or free it if not used anymore */
- event_trigger_free(trigger_ops, trigger_data);
+ event_trigger_free(trigger_data);
out:
return ret;
out_free:
- if (cmd_ops->set_filter)
- cmd_ops->set_filter(NULL, trigger_data, NULL);
+ event_trigger_reset_filter(cmd_ops, trigger_data);
kfree(trigger_data);
goto out;
}
@@ -757,7 +1069,14 @@ int set_trigger_filter(char *filter_str,
/* The filter is for the 'trigger' event, not the triggered event */
ret = create_event_filter(file->tr, file->event_call,
- filter_str, false, &filter);
+ filter_str, true, &filter);
+
+ /* Only enabled set_str for error handling */
+ if (filter) {
+ kfree(filter->filter_string);
+ filter->filter_string = NULL;
+ }
+
/*
* If create_event_filter() fails, filter still needs to be freed.
* Which the calling code will do with data->filter.
@@ -768,8 +1087,14 @@ int set_trigger_filter(char *filter_str,
rcu_assign_pointer(data->filter, filter);
if (tmp) {
- /* Make sure the call is done with the filter */
- tracepoint_synchronize_unregister();
+ /*
+ * Make sure the call is done with the filter.
+ * It is possible that a filter could fail at boot up,
+ * and then this path will be called. Avoid the synchronization
+ * in that case.
+ */
+ if (system_state != SYSTEM_BOOTING)
+ tracepoint_synchronize_unregister();
free_event_filter(tmp);
}
@@ -915,7 +1240,8 @@ void unpause_named_trigger(struct event_trigger_data *data)
/**
* set_named_trigger_data - Associate common named trigger data
- * @data: The trigger data of a named trigger to unpause
+ * @data: The trigger data to associate
+ * @named_data: The common named trigger to be associated
*
* Named triggers are sets of triggers that share a common set of
* trigger data. The first named trigger registered with a given name
@@ -937,9 +1263,20 @@ get_named_trigger_data(struct event_trigger_data *data)
}
static void
-traceon_trigger(struct event_trigger_data *data, void *rec,
+traceon_trigger(struct event_trigger_data *data,
+ struct trace_buffer *buffer, void *rec,
struct ring_buffer_event *event)
{
+ struct trace_event_file *file = data->private_data;
+
+ if (file) {
+ if (tracer_tracing_is_on(file->tr))
+ return;
+
+ tracer_tracing_on(file->tr);
+ return;
+ }
+
if (tracing_is_on())
return;
@@ -947,11 +1284,19 @@ traceon_trigger(struct event_trigger_data *data, void *rec,
}
static void
-traceon_count_trigger(struct event_trigger_data *data, void *rec,
+traceon_count_trigger(struct event_trigger_data *data,
+ struct trace_buffer *buffer, void *rec,
struct ring_buffer_event *event)
{
- if (tracing_is_on())
- return;
+ struct trace_event_file *file = data->private_data;
+
+ if (file) {
+ if (tracer_tracing_is_on(file->tr))
+ return;
+ } else {
+ if (tracing_is_on())
+ return;
+ }
if (!data->count)
return;
@@ -959,13 +1304,27 @@ traceon_count_trigger(struct event_trigger_data *data, void *rec,
if (data->count != -1)
(data->count)--;
- tracing_on();
+ if (file)
+ tracer_tracing_on(file->tr);
+ else
+ tracing_on();
}
static void
-traceoff_trigger(struct event_trigger_data *data, void *rec,
+traceoff_trigger(struct event_trigger_data *data,
+ struct trace_buffer *buffer, void *rec,
struct ring_buffer_event *event)
{
+ struct trace_event_file *file = data->private_data;
+
+ if (file) {
+ if (!tracer_tracing_is_on(file->tr))
+ return;
+
+ tracer_tracing_off(file->tr);
+ return;
+ }
+
if (!tracing_is_on())
return;
@@ -973,11 +1332,19 @@ traceoff_trigger(struct event_trigger_data *data, void *rec,
}
static void
-traceoff_count_trigger(struct event_trigger_data *data, void *rec,
+traceoff_count_trigger(struct event_trigger_data *data,
+ struct trace_buffer *buffer, void *rec,
struct ring_buffer_event *event)
{
- if (!tracing_is_on())
- return;
+ struct trace_event_file *file = data->private_data;
+
+ if (file) {
+ if (!tracer_tracing_is_on(file->tr))
+ return;
+ } else {
+ if (!tracing_is_on())
+ return;
+ }
if (!data->count)
return;
@@ -985,48 +1352,49 @@ traceoff_count_trigger(struct event_trigger_data *data, void *rec,
if (data->count != -1)
(data->count)--;
- tracing_off();
+ if (file)
+ tracer_tracing_off(file->tr);
+ else
+ tracing_off();
}
static int
-traceon_trigger_print(struct seq_file *m, struct event_trigger_ops *ops,
- struct event_trigger_data *data)
+traceon_trigger_print(struct seq_file *m, struct event_trigger_data *data)
{
return event_trigger_print("traceon", m, (void *)data->count,
data->filter_str);
}
static int
-traceoff_trigger_print(struct seq_file *m, struct event_trigger_ops *ops,
- struct event_trigger_data *data)
+traceoff_trigger_print(struct seq_file *m, struct event_trigger_data *data)
{
return event_trigger_print("traceoff", m, (void *)data->count,
data->filter_str);
}
static struct event_trigger_ops traceon_trigger_ops = {
- .func = traceon_trigger,
+ .trigger = traceon_trigger,
.print = traceon_trigger_print,
.init = event_trigger_init,
.free = event_trigger_free,
};
static struct event_trigger_ops traceon_count_trigger_ops = {
- .func = traceon_count_trigger,
+ .trigger = traceon_count_trigger,
.print = traceon_trigger_print,
.init = event_trigger_init,
.free = event_trigger_free,
};
static struct event_trigger_ops traceoff_trigger_ops = {
- .func = traceoff_trigger,
+ .trigger = traceoff_trigger,
.print = traceoff_trigger_print,
.init = event_trigger_init,
.free = event_trigger_free,
};
static struct event_trigger_ops traceoff_count_trigger_ops = {
- .func = traceoff_count_trigger,
+ .trigger = traceoff_count_trigger,
.print = traceoff_trigger_print,
.init = event_trigger_init,
.free = event_trigger_free,
@@ -1051,7 +1419,7 @@ onoff_get_trigger_ops(char *cmd, char *param)
static struct event_command trigger_traceon_cmd = {
.name = "traceon",
.trigger_type = ETT_TRACE_ONOFF,
- .func = event_trigger_callback,
+ .parse = event_trigger_parse,
.reg = register_trigger,
.unreg = unregister_trigger,
.get_trigger_ops = onoff_get_trigger_ops,
@@ -1062,7 +1430,7 @@ static struct event_command trigger_traceoff_cmd = {
.name = "traceoff",
.trigger_type = ETT_TRACE_ONOFF,
.flags = EVENT_CMD_FL_POST_TRIGGER,
- .func = event_trigger_callback,
+ .parse = event_trigger_parse,
.reg = register_trigger,
.unreg = unregister_trigger,
.get_trigger_ops = onoff_get_trigger_ops,
@@ -1071,7 +1439,8 @@ static struct event_command trigger_traceoff_cmd = {
#ifdef CONFIG_TRACER_SNAPSHOT
static void
-snapshot_trigger(struct event_trigger_data *data, void *rec,
+snapshot_trigger(struct event_trigger_data *data,
+ struct trace_buffer *buffer, void *rec,
struct ring_buffer_event *event)
{
struct trace_event_file *file = data->private_data;
@@ -1083,7 +1452,8 @@ snapshot_trigger(struct event_trigger_data *data, void *rec,
}
static void
-snapshot_count_trigger(struct event_trigger_data *data, void *rec,
+snapshot_count_trigger(struct event_trigger_data *data,
+ struct trace_buffer *buffer, void *rec,
struct ring_buffer_event *event)
{
if (!data->count)
@@ -1092,37 +1462,38 @@ snapshot_count_trigger(struct event_trigger_data *data, void *rec,
if (data->count != -1)
(data->count)--;
- snapshot_trigger(data, rec, event);
+ snapshot_trigger(data, buffer, rec, event);
}
static int
-register_snapshot_trigger(char *glob, struct event_trigger_ops *ops,
+register_snapshot_trigger(char *glob,
struct event_trigger_data *data,
struct trace_event_file *file)
{
- if (tracing_alloc_snapshot_instance(file->tr) != 0)
- return 0;
+ int ret = tracing_alloc_snapshot_instance(file->tr);
- return register_trigger(glob, ops, data, file);
+ if (ret < 0)
+ return ret;
+
+ return register_trigger(glob, data, file);
}
static int
-snapshot_trigger_print(struct seq_file *m, struct event_trigger_ops *ops,
- struct event_trigger_data *data)
+snapshot_trigger_print(struct seq_file *m, struct event_trigger_data *data)
{
return event_trigger_print("snapshot", m, (void *)data->count,
data->filter_str);
}
static struct event_trigger_ops snapshot_trigger_ops = {
- .func = snapshot_trigger,
+ .trigger = snapshot_trigger,
.print = snapshot_trigger_print,
.init = event_trigger_init,
.free = event_trigger_free,
};
static struct event_trigger_ops snapshot_count_trigger_ops = {
- .func = snapshot_count_trigger,
+ .trigger = snapshot_count_trigger,
.print = snapshot_trigger_print,
.init = event_trigger_init,
.free = event_trigger_free,
@@ -1137,7 +1508,7 @@ snapshot_get_trigger_ops(char *cmd, char *param)
static struct event_command trigger_snapshot_cmd = {
.name = "snapshot",
.trigger_type = ETT_SNAPSHOT,
- .func = event_trigger_callback,
+ .parse = event_trigger_parse,
.reg = register_snapshot_trigger,
.unreg = unregister_trigger,
.get_trigger_ops = snapshot_get_trigger_ops,
@@ -1176,14 +1547,21 @@ static __init int register_trigger_snapshot_cmd(void) { return 0; }
#endif
static void
-stacktrace_trigger(struct event_trigger_data *data, void *rec,
+stacktrace_trigger(struct event_trigger_data *data,
+ struct trace_buffer *buffer, void *rec,
struct ring_buffer_event *event)
{
- trace_dump_stack(STACK_SKIP);
+ struct trace_event_file *file = data->private_data;
+
+ if (file)
+ __trace_stack(file->tr, tracing_gen_ctx(), STACK_SKIP);
+ else
+ trace_dump_stack(STACK_SKIP);
}
static void
-stacktrace_count_trigger(struct event_trigger_data *data, void *rec,
+stacktrace_count_trigger(struct event_trigger_data *data,
+ struct trace_buffer *buffer, void *rec,
struct ring_buffer_event *event)
{
if (!data->count)
@@ -1192,26 +1570,25 @@ stacktrace_count_trigger(struct event_trigger_data *data, void *rec,
if (data->count != -1)
(data->count)--;
- stacktrace_trigger(data, rec, event);
+ stacktrace_trigger(data, buffer, rec, event);
}
static int
-stacktrace_trigger_print(struct seq_file *m, struct event_trigger_ops *ops,
- struct event_trigger_data *data)
+stacktrace_trigger_print(struct seq_file *m, struct event_trigger_data *data)
{
return event_trigger_print("stacktrace", m, (void *)data->count,
data->filter_str);
}
static struct event_trigger_ops stacktrace_trigger_ops = {
- .func = stacktrace_trigger,
+ .trigger = stacktrace_trigger,
.print = stacktrace_trigger_print,
.init = event_trigger_init,
.free = event_trigger_free,
};
static struct event_trigger_ops stacktrace_count_trigger_ops = {
- .func = stacktrace_count_trigger,
+ .trigger = stacktrace_count_trigger,
.print = stacktrace_trigger_print,
.init = event_trigger_init,
.free = event_trigger_free,
@@ -1227,7 +1604,7 @@ static struct event_command trigger_stacktrace_cmd = {
.name = "stacktrace",
.trigger_type = ETT_STACKTRACE,
.flags = EVENT_CMD_FL_POST_TRIGGER,
- .func = event_trigger_callback,
+ .parse = event_trigger_parse,
.reg = register_trigger,
.unreg = unregister_trigger,
.get_trigger_ops = stacktrace_get_trigger_ops,
@@ -1254,7 +1631,8 @@ static __init void unregister_trigger_traceon_traceoff_cmds(void)
}
static void
-event_enable_trigger(struct event_trigger_data *data, void *rec,
+event_enable_trigger(struct event_trigger_data *data,
+ struct trace_buffer *buffer, void *rec,
struct ring_buffer_event *event)
{
struct enable_trigger_data *enable_data = data->private_data;
@@ -1266,7 +1644,8 @@ event_enable_trigger(struct event_trigger_data *data, void *rec,
}
static void
-event_enable_count_trigger(struct event_trigger_data *data, void *rec,
+event_enable_count_trigger(struct event_trigger_data *data,
+ struct trace_buffer *buffer, void *rec,
struct ring_buffer_event *event)
{
struct enable_trigger_data *enable_data = data->private_data;
@@ -1281,11 +1660,10 @@ event_enable_count_trigger(struct event_trigger_data *data, void *rec,
if (data->count != -1)
(data->count)--;
- event_enable_trigger(data, rec, event);
+ event_enable_trigger(data, buffer, rec, event);
}
int event_enable_trigger_print(struct seq_file *m,
- struct event_trigger_ops *ops,
struct event_trigger_data *data)
{
struct enable_trigger_data *enable_data = data->private_data;
@@ -1310,8 +1688,7 @@ int event_enable_trigger_print(struct seq_file *m,
return 0;
}
-void event_enable_trigger_free(struct event_trigger_ops *ops,
- struct event_trigger_data *data)
+void event_enable_trigger_free(struct event_trigger_data *data)
{
struct enable_trigger_data *enable_data = data->private_data;
@@ -1322,75 +1699,69 @@ void event_enable_trigger_free(struct event_trigger_ops *ops,
if (!data->ref) {
/* Remove the SOFT_MODE flag */
trace_event_enable_disable(enable_data->file, 0, 1);
- module_put(enable_data->file->event_call->mod);
+ trace_event_put_ref(enable_data->file->event_call);
trigger_data_free(data);
kfree(enable_data);
}
}
static struct event_trigger_ops event_enable_trigger_ops = {
- .func = event_enable_trigger,
+ .trigger = event_enable_trigger,
.print = event_enable_trigger_print,
.init = event_trigger_init,
.free = event_enable_trigger_free,
};
static struct event_trigger_ops event_enable_count_trigger_ops = {
- .func = event_enable_count_trigger,
+ .trigger = event_enable_count_trigger,
.print = event_enable_trigger_print,
.init = event_trigger_init,
.free = event_enable_trigger_free,
};
static struct event_trigger_ops event_disable_trigger_ops = {
- .func = event_enable_trigger,
+ .trigger = event_enable_trigger,
.print = event_enable_trigger_print,
.init = event_trigger_init,
.free = event_enable_trigger_free,
};
static struct event_trigger_ops event_disable_count_trigger_ops = {
- .func = event_enable_count_trigger,
+ .trigger = event_enable_count_trigger,
.print = event_enable_trigger_print,
.init = event_trigger_init,
.free = event_enable_trigger_free,
};
-int event_enable_trigger_func(struct event_command *cmd_ops,
- struct trace_event_file *file,
- char *glob, char *cmd, char *param)
+int event_enable_trigger_parse(struct event_command *cmd_ops,
+ struct trace_event_file *file,
+ char *glob, char *cmd, char *param_and_filter)
{
struct trace_event_file *event_enable_file;
struct enable_trigger_data *enable_data;
struct event_trigger_data *trigger_data;
- struct event_trigger_ops *trigger_ops;
struct trace_array *tr = file->tr;
+ char *param, *filter;
+ bool enable, remove;
const char *system;
const char *event;
bool hist = false;
- char *trigger;
- char *number;
- bool enable;
int ret;
- if (!param)
- return -EINVAL;
+ remove = event_trigger_check_remove(glob);
- /* separate the trigger from the filter (s:e:n [if filter]) */
- trigger = strsep(&param, " \t");
- if (!trigger)
+ if (event_trigger_empty_param(param_and_filter))
return -EINVAL;
- if (param) {
- param = skip_spaces(param);
- if (!*param)
- param = NULL;
- }
- system = strsep(&trigger, ":");
- if (!trigger)
+ ret = event_trigger_separate_filter(param_and_filter, &param, &filter, true);
+ if (ret)
+ return ret;
+
+ system = strsep(&param, ":");
+ if (!param)
return -EINVAL;
- event = strsep(&trigger, ":");
+ event = strsep(&param, ":");
ret = -EINVAL;
event_enable_file = find_event_file(tr, system, event);
@@ -1406,32 +1777,24 @@ int event_enable_trigger_func(struct event_command *cmd_ops,
#else
enable = strcmp(cmd, ENABLE_EVENT_STR) == 0;
#endif
- trigger_ops = cmd_ops->get_trigger_ops(cmd, trigger);
-
ret = -ENOMEM;
- trigger_data = kzalloc(sizeof(*trigger_data), GFP_KERNEL);
- if (!trigger_data)
- goto out;
enable_data = kzalloc(sizeof(*enable_data), GFP_KERNEL);
- if (!enable_data) {
- kfree(trigger_data);
+ if (!enable_data)
goto out;
- }
-
- trigger_data->count = -1;
- trigger_data->ops = trigger_ops;
- trigger_data->cmd_ops = cmd_ops;
- INIT_LIST_HEAD(&trigger_data->list);
- RCU_INIT_POINTER(trigger_data->filter, NULL);
enable_data->hist = hist;
enable_data->enable = enable;
enable_data->file = event_enable_file;
- trigger_data->private_data = enable_data;
- if (glob[0] == '!') {
- cmd_ops->unreg(glob+1, trigger_ops, trigger_data, file);
+ trigger_data = event_trigger_alloc(cmd_ops, cmd, param, enable_data);
+ if (!trigger_data) {
+ kfree(enable_data);
+ goto out;
+ }
+
+ if (remove) {
+ event_trigger_unregister(cmd_ops, file, glob+1, trigger_data);
kfree(trigger_data);
kfree(enable_data);
ret = 0;
@@ -1439,37 +1802,18 @@ int event_enable_trigger_func(struct event_command *cmd_ops,
}
/* Up the trigger_data count to make sure nothing frees it on failure */
- event_trigger_init(trigger_ops, trigger_data);
+ event_trigger_init(trigger_data);
- if (trigger) {
- number = strsep(&trigger, ":");
-
- ret = -EINVAL;
- if (!strlen(number))
- goto out_free;
-
- /*
- * We use the callback data field (which is a pointer)
- * as our counter.
- */
- ret = kstrtoul(number, 0, &trigger_data->count);
- if (ret)
- goto out_free;
- }
-
- if (!param) /* if param is non-empty, it's supposed to be a filter */
- goto out_reg;
-
- if (!cmd_ops->set_filter)
- goto out_reg;
+ ret = event_trigger_parse_num(param, trigger_data);
+ if (ret)
+ goto out_free;
- ret = cmd_ops->set_filter(param, trigger_data, file);
+ ret = event_trigger_set_filter(cmd_ops, file, filter, trigger_data);
if (ret < 0)
goto out_free;
- out_reg:
/* Don't let event modules unload while probe registered */
- ret = try_module_get(event_enable_file->event_call->mod);
+ ret = trace_event_try_get_ref(event_enable_file->event_call);
if (!ret) {
ret = -EBUSY;
goto out_free;
@@ -1478,37 +1822,27 @@ int event_enable_trigger_func(struct event_command *cmd_ops,
ret = trace_event_enable_disable(event_enable_file, 1, 1);
if (ret < 0)
goto out_put;
- ret = cmd_ops->reg(glob, trigger_ops, trigger_data, file);
- /*
- * The above returns on success the # of functions enabled,
- * but if it didn't find any functions it returns zero.
- * Consider no functions a failure too.
- */
- if (!ret) {
- ret = -ENOENT;
- goto out_disable;
- } else if (ret < 0)
+
+ ret = event_trigger_register(cmd_ops, file, glob, trigger_data);
+ if (ret)
goto out_disable;
- /* Just return zero, not the number of enabled functions */
- ret = 0;
- event_trigger_free(trigger_ops, trigger_data);
+
+ event_trigger_free(trigger_data);
out:
return ret;
-
out_disable:
trace_event_enable_disable(event_enable_file, 0, 1);
out_put:
- module_put(event_enable_file->event_call->mod);
+ trace_event_put_ref(event_enable_file->event_call);
out_free:
- if (cmd_ops->set_filter)
- cmd_ops->set_filter(NULL, trigger_data, NULL);
- event_trigger_free(trigger_ops, trigger_data);
+ event_trigger_reset_filter(cmd_ops, trigger_data);
+ event_trigger_free(trigger_data);
kfree(enable_data);
+
goto out;
}
int event_enable_register_trigger(char *glob,
- struct event_trigger_ops *ops,
struct event_trigger_data *data,
struct trace_event_file *file)
{
@@ -1531,43 +1865,40 @@ int event_enable_register_trigger(char *glob,
}
if (data->ops->init) {
- ret = data->ops->init(data->ops, data);
+ ret = data->ops->init(data);
if (ret < 0)
goto out;
}
list_add_rcu(&data->list, &file->triggers);
- ret++;
update_cond_flag(file);
- if (trace_event_trigger_enable_disable(file, 1) < 0) {
+ ret = trace_event_trigger_enable_disable(file, 1);
+ if (ret < 0) {
list_del_rcu(&data->list);
update_cond_flag(file);
- ret--;
}
out:
return ret;
}
void event_enable_unregister_trigger(char *glob,
- struct event_trigger_ops *ops,
struct event_trigger_data *test,
struct trace_event_file *file)
{
struct enable_trigger_data *test_enable_data = test->private_data;
+ struct event_trigger_data *data = NULL, *iter;
struct enable_trigger_data *enable_data;
- struct event_trigger_data *data;
- bool unregistered = false;
lockdep_assert_held(&event_mutex);
- list_for_each_entry(data, &file->triggers, list) {
- enable_data = data->private_data;
+ list_for_each_entry(iter, &file->triggers, list) {
+ enable_data = iter->private_data;
if (enable_data &&
- (data->cmd_ops->trigger_type ==
+ (iter->cmd_ops->trigger_type ==
test->cmd_ops->trigger_type) &&
(enable_data->file == test_enable_data->file)) {
- unregistered = true;
+ data = iter;
list_del_rcu(&data->list);
trace_event_trigger_enable_disable(file, 0);
update_cond_flag(file);
@@ -1575,8 +1906,8 @@ void event_enable_unregister_trigger(char *glob,
}
}
- if (unregistered && data->ops->free)
- data->ops->free(data->ops, data);
+ if (data && data->ops->free)
+ data->ops->free(data);
}
static struct event_trigger_ops *
@@ -1604,7 +1935,7 @@ event_enable_get_trigger_ops(char *cmd, char *param)
static struct event_command trigger_enable_cmd = {
.name = ENABLE_EVENT_STR,
.trigger_type = ETT_EVENT_ENABLE,
- .func = event_enable_trigger_func,
+ .parse = event_enable_trigger_parse,
.reg = event_enable_register_trigger,
.unreg = event_enable_unregister_trigger,
.get_trigger_ops = event_enable_get_trigger_ops,
@@ -1614,7 +1945,7 @@ static struct event_command trigger_enable_cmd = {
static struct event_command trigger_disable_cmd = {
.name = DISABLE_EVENT_STR,
.trigger_type = ETT_EVENT_ENABLE,
- .func = event_enable_trigger_func,
+ .parse = event_enable_trigger_parse,
.reg = event_enable_register_trigger,
.unreg = event_enable_unregister_trigger,
.get_trigger_ops = event_enable_get_trigger_ops,
diff --git a/kernel/trace/trace_events_user.c b/kernel/trace/trace_events_user.c
new file mode 100644
index 000000000000..e76f5e1efdf2
--- /dev/null
+++ b/kernel/trace/trace_events_user.c
@@ -0,0 +1,2784 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2021, Microsoft Corporation.
+ *
+ * Authors:
+ * Beau Belgrave <beaub@linux.microsoft.com>
+ */
+
+#include <linux/bitmap.h>
+#include <linux/cdev.h>
+#include <linux/hashtable.h>
+#include <linux/list.h>
+#include <linux/io.h>
+#include <linux/uio.h>
+#include <linux/ioctl.h>
+#include <linux/jhash.h>
+#include <linux/refcount.h>
+#include <linux/trace_events.h>
+#include <linux/tracefs.h>
+#include <linux/types.h>
+#include <linux/uaccess.h>
+#include <linux/highmem.h>
+#include <linux/init.h>
+#include <linux/user_events.h>
+#include "trace_dynevent.h"
+#include "trace_output.h"
+#include "trace.h"
+
+#define USER_EVENTS_PREFIX_LEN (sizeof(USER_EVENTS_PREFIX)-1)
+
+#define FIELD_DEPTH_TYPE 0
+#define FIELD_DEPTH_NAME 1
+#define FIELD_DEPTH_SIZE 2
+
+/* Limit how long of an event name plus args within the subsystem. */
+#define MAX_EVENT_DESC 512
+#define EVENT_NAME(user_event) ((user_event)->tracepoint.name)
+#define MAX_FIELD_ARRAY_SIZE 1024
+
+/*
+ * Internal bits (kernel side only) to keep track of connected probes:
+ * These are used when status is requested in text form about an event. These
+ * bits are compared against an internal byte on the event to determine which
+ * probes to print out to the user.
+ *
+ * These do not reflect the mapped bytes between the user and kernel space.
+ */
+#define EVENT_STATUS_FTRACE BIT(0)
+#define EVENT_STATUS_PERF BIT(1)
+#define EVENT_STATUS_OTHER BIT(7)
+
+/*
+ * Stores the system name, tables, and locks for a group of events. This
+ * allows isolation for events by various means.
+ */
+struct user_event_group {
+ char *system_name;
+ struct hlist_node node;
+ struct mutex reg_mutex;
+ DECLARE_HASHTABLE(register_table, 8);
+};
+
+/* Group for init_user_ns mapping, top-most group */
+static struct user_event_group *init_group;
+
+/* Max allowed events for the whole system */
+static unsigned int max_user_events = 32768;
+
+/* Current number of events on the whole system */
+static unsigned int current_user_events;
+
+/*
+ * Stores per-event properties, as users register events
+ * within a file a user_event might be created if it does not
+ * already exist. These are globally used and their lifetime
+ * is tied to the refcnt member. These cannot go away until the
+ * refcnt reaches one.
+ */
+struct user_event {
+ struct user_event_group *group;
+ struct tracepoint tracepoint;
+ struct trace_event_call call;
+ struct trace_event_class class;
+ struct dyn_event devent;
+ struct hlist_node node;
+ struct list_head fields;
+ struct list_head validators;
+ struct work_struct put_work;
+ refcount_t refcnt;
+ int min_size;
+ int reg_flags;
+ char status;
+};
+
+/*
+ * Stores per-mm/event properties that enable an address to be
+ * updated properly for each task. As tasks are forked, we use
+ * these to track enablement sites that are tied to an event.
+ */
+struct user_event_enabler {
+ struct list_head mm_enablers_link;
+ struct user_event *event;
+ unsigned long addr;
+
+ /* Track enable bit, flags, etc. Aligned for bitops. */
+ unsigned long values;
+};
+
+/* Bits 0-5 are for the bit to update upon enable/disable (0-63 allowed) */
+#define ENABLE_VAL_BIT_MASK 0x3F
+
+/* Bit 6 is for faulting status of enablement */
+#define ENABLE_VAL_FAULTING_BIT 6
+
+/* Bit 7 is for freeing status of enablement */
+#define ENABLE_VAL_FREEING_BIT 7
+
+/* Bit 8 is for marking 32-bit on 64-bit */
+#define ENABLE_VAL_32_ON_64_BIT 8
+
+#define ENABLE_VAL_COMPAT_MASK (1 << ENABLE_VAL_32_ON_64_BIT)
+
+/* Only duplicate the bit and compat values */
+#define ENABLE_VAL_DUP_MASK (ENABLE_VAL_BIT_MASK | ENABLE_VAL_COMPAT_MASK)
+
+#define ENABLE_BITOPS(e) (&(e)->values)
+
+#define ENABLE_BIT(e) ((int)((e)->values & ENABLE_VAL_BIT_MASK))
+
+/* Used for asynchronous faulting in of pages */
+struct user_event_enabler_fault {
+ struct work_struct work;
+ struct user_event_mm *mm;
+ struct user_event_enabler *enabler;
+ int attempt;
+};
+
+static struct kmem_cache *fault_cache;
+
+/* Global list of memory descriptors using user_events */
+static LIST_HEAD(user_event_mms);
+static DEFINE_SPINLOCK(user_event_mms_lock);
+
+/*
+ * Stores per-file events references, as users register events
+ * within a file this structure is modified and freed via RCU.
+ * The lifetime of this struct is tied to the lifetime of the file.
+ * These are not shared and only accessible by the file that created it.
+ */
+struct user_event_refs {
+ struct rcu_head rcu;
+ int count;
+ struct user_event *events[];
+};
+
+struct user_event_file_info {
+ struct user_event_group *group;
+ struct user_event_refs *refs;
+};
+
+#define VALIDATOR_ENSURE_NULL (1 << 0)
+#define VALIDATOR_REL (1 << 1)
+
+struct user_event_validator {
+ struct list_head user_event_link;
+ int offset;
+ int flags;
+};
+
+static inline void align_addr_bit(unsigned long *addr, int *bit,
+ unsigned long *flags)
+{
+ if (IS_ALIGNED(*addr, sizeof(long))) {
+#ifdef __BIG_ENDIAN
+ /* 32 bit on BE 64 bit requires a 32 bit offset when aligned. */
+ if (test_bit(ENABLE_VAL_32_ON_64_BIT, flags))
+ *bit += 32;
+#endif
+ return;
+ }
+
+ *addr = ALIGN_DOWN(*addr, sizeof(long));
+
+ /*
+ * We only support 32 and 64 bit values. The only time we need
+ * to align is a 32 bit value on a 64 bit kernel, which on LE
+ * is always 32 bits, and on BE requires no change when unaligned.
+ */
+#ifdef __LITTLE_ENDIAN
+ *bit += 32;
+#endif
+}
+
+typedef void (*user_event_func_t) (struct user_event *user, struct iov_iter *i,
+ void *tpdata, bool *faulted);
+
+static int user_event_parse(struct user_event_group *group, char *name,
+ char *args, char *flags,
+ struct user_event **newuser, int reg_flags);
+
+static struct user_event_mm *user_event_mm_get(struct user_event_mm *mm);
+static struct user_event_mm *user_event_mm_get_all(struct user_event *user);
+static void user_event_mm_put(struct user_event_mm *mm);
+static int destroy_user_event(struct user_event *user);
+
+static u32 user_event_key(char *name)
+{
+ return jhash(name, strlen(name), 0);
+}
+
+static bool user_event_capable(u16 reg_flags)
+{
+ /* Persistent events require CAP_PERFMON / CAP_SYS_ADMIN */
+ if (reg_flags & USER_EVENT_REG_PERSIST) {
+ if (!perfmon_capable())
+ return false;
+ }
+
+ return true;
+}
+
+static struct user_event *user_event_get(struct user_event *user)
+{
+ refcount_inc(&user->refcnt);
+
+ return user;
+}
+
+static void delayed_destroy_user_event(struct work_struct *work)
+{
+ struct user_event *user = container_of(
+ work, struct user_event, put_work);
+
+ mutex_lock(&event_mutex);
+
+ if (!refcount_dec_and_test(&user->refcnt))
+ goto out;
+
+ if (destroy_user_event(user)) {
+ /*
+ * The only reason this would fail here is if we cannot
+ * update the visibility of the event. In this case the
+ * event stays in the hashtable, waiting for someone to
+ * attempt to delete it later.
+ */
+ pr_warn("user_events: Unable to delete event\n");
+ refcount_set(&user->refcnt, 1);
+ }
+out:
+ mutex_unlock(&event_mutex);
+}
+
+static void user_event_put(struct user_event *user, bool locked)
+{
+ bool delete;
+
+ if (unlikely(!user))
+ return;
+
+ /*
+ * When the event is not enabled for auto-delete there will always
+ * be at least 1 reference to the event. During the event creation
+ * we initially set the refcnt to 2 to achieve this. In those cases
+ * the caller must acquire event_mutex and after decrement check if
+ * the refcnt is 1, meaning this is the last reference. When auto
+ * delete is enabled, there will only be 1 ref, IE: refcnt will be
+ * only set to 1 during creation to allow the below checks to go
+ * through upon the last put. The last put must always be done with
+ * the event mutex held.
+ */
+ if (!locked) {
+ lockdep_assert_not_held(&event_mutex);
+ delete = refcount_dec_and_mutex_lock(&user->refcnt, &event_mutex);
+ } else {
+ lockdep_assert_held(&event_mutex);
+ delete = refcount_dec_and_test(&user->refcnt);
+ }
+
+ if (!delete)
+ return;
+
+ /*
+ * We now have the event_mutex in all cases, which ensures that
+ * no new references will be taken until event_mutex is released.
+ * New references come through find_user_event(), which requires
+ * the event_mutex to be held.
+ */
+
+ if (user->reg_flags & USER_EVENT_REG_PERSIST) {
+ /* We should not get here when persist flag is set */
+ pr_alert("BUG: Auto-delete engaged on persistent event\n");
+ goto out;
+ }
+
+ /*
+ * Unfortunately we have to attempt the actual destroy in a work
+ * queue. This is because not all cases handle a trace_event_call
+ * being removed within the class->reg() operation for unregister.
+ */
+ INIT_WORK(&user->put_work, delayed_destroy_user_event);
+
+ /*
+ * Since the event is still in the hashtable, we have to re-inc
+ * the ref count to 1. This count will be decremented and checked
+ * in the work queue to ensure it's still the last ref. This is
+ * needed because a user-process could register the same event in
+ * between the time of event_mutex release and the work queue
+ * running the delayed destroy. If we removed the item now from
+ * the hashtable, this would result in a timing window where a
+ * user process would fail a register because the trace_event_call
+ * register would fail in the tracing layers.
+ */
+ refcount_set(&user->refcnt, 1);
+
+ if (WARN_ON_ONCE(!schedule_work(&user->put_work))) {
+ /*
+ * If we fail we must wait for an admin to attempt delete or
+ * another register/close of the event, whichever is first.
+ */
+ pr_warn("user_events: Unable to queue delayed destroy\n");
+ }
+out:
+ /* Ensure if we didn't have event_mutex before we unlock it */
+ if (!locked)
+ mutex_unlock(&event_mutex);
+}
+
+static void user_event_group_destroy(struct user_event_group *group)
+{
+ kfree(group->system_name);
+ kfree(group);
+}
+
+static char *user_event_group_system_name(void)
+{
+ char *system_name;
+ int len = sizeof(USER_EVENTS_SYSTEM) + 1;
+
+ system_name = kmalloc(len, GFP_KERNEL);
+
+ if (!system_name)
+ return NULL;
+
+ snprintf(system_name, len, "%s", USER_EVENTS_SYSTEM);
+
+ return system_name;
+}
+
+static struct user_event_group *current_user_event_group(void)
+{
+ return init_group;
+}
+
+static struct user_event_group *user_event_group_create(void)
+{
+ struct user_event_group *group;
+
+ group = kzalloc(sizeof(*group), GFP_KERNEL);
+
+ if (!group)
+ return NULL;
+
+ group->system_name = user_event_group_system_name();
+
+ if (!group->system_name)
+ goto error;
+
+ mutex_init(&group->reg_mutex);
+ hash_init(group->register_table);
+
+ return group;
+error:
+ if (group)
+ user_event_group_destroy(group);
+
+ return NULL;
+};
+
+static void user_event_enabler_destroy(struct user_event_enabler *enabler,
+ bool locked)
+{
+ list_del_rcu(&enabler->mm_enablers_link);
+
+ /* No longer tracking the event via the enabler */
+ user_event_put(enabler->event, locked);
+
+ kfree(enabler);
+}
+
+static int user_event_mm_fault_in(struct user_event_mm *mm, unsigned long uaddr,
+ int attempt)
+{
+ bool unlocked;
+ int ret;
+
+ /*
+ * Normally this is low, ensure that it cannot be taken advantage of by
+ * bad user processes to cause excessive looping.
+ */
+ if (attempt > 10)
+ return -EFAULT;
+
+ mmap_read_lock(mm->mm);
+
+ /* Ensure MM has tasks, cannot use after exit_mm() */
+ if (refcount_read(&mm->tasks) == 0) {
+ ret = -ENOENT;
+ goto out;
+ }
+
+ ret = fixup_user_fault(mm->mm, uaddr, FAULT_FLAG_WRITE | FAULT_FLAG_REMOTE,
+ &unlocked);
+out:
+ mmap_read_unlock(mm->mm);
+
+ return ret;
+}
+
+static int user_event_enabler_write(struct user_event_mm *mm,
+ struct user_event_enabler *enabler,
+ bool fixup_fault, int *attempt);
+
+static void user_event_enabler_fault_fixup(struct work_struct *work)
+{
+ struct user_event_enabler_fault *fault = container_of(
+ work, struct user_event_enabler_fault, work);
+ struct user_event_enabler *enabler = fault->enabler;
+ struct user_event_mm *mm = fault->mm;
+ unsigned long uaddr = enabler->addr;
+ int attempt = fault->attempt;
+ int ret;
+
+ ret = user_event_mm_fault_in(mm, uaddr, attempt);
+
+ if (ret && ret != -ENOENT) {
+ struct user_event *user = enabler->event;
+
+ pr_warn("user_events: Fault for mm: 0x%pK @ 0x%llx event: %s\n",
+ mm->mm, (unsigned long long)uaddr, EVENT_NAME(user));
+ }
+
+ /* Prevent state changes from racing */
+ mutex_lock(&event_mutex);
+
+ /* User asked for enabler to be removed during fault */
+ if (test_bit(ENABLE_VAL_FREEING_BIT, ENABLE_BITOPS(enabler))) {
+ user_event_enabler_destroy(enabler, true);
+ goto out;
+ }
+
+ /*
+ * If we managed to get the page, re-issue the write. We do not
+ * want to get into a possible infinite loop, which is why we only
+ * attempt again directly if the page came in. If we couldn't get
+ * the page here, then we will try again the next time the event is
+ * enabled/disabled.
+ */
+ clear_bit(ENABLE_VAL_FAULTING_BIT, ENABLE_BITOPS(enabler));
+
+ if (!ret) {
+ mmap_read_lock(mm->mm);
+ user_event_enabler_write(mm, enabler, true, &attempt);
+ mmap_read_unlock(mm->mm);
+ }
+out:
+ mutex_unlock(&event_mutex);
+
+ /* In all cases we no longer need the mm or fault */
+ user_event_mm_put(mm);
+ kmem_cache_free(fault_cache, fault);
+}
+
+static bool user_event_enabler_queue_fault(struct user_event_mm *mm,
+ struct user_event_enabler *enabler,
+ int attempt)
+{
+ struct user_event_enabler_fault *fault;
+
+ fault = kmem_cache_zalloc(fault_cache, GFP_NOWAIT | __GFP_NOWARN);
+
+ if (!fault)
+ return false;
+
+ INIT_WORK(&fault->work, user_event_enabler_fault_fixup);
+ fault->mm = user_event_mm_get(mm);
+ fault->enabler = enabler;
+ fault->attempt = attempt;
+
+ /* Don't try to queue in again while we have a pending fault */
+ set_bit(ENABLE_VAL_FAULTING_BIT, ENABLE_BITOPS(enabler));
+
+ if (!schedule_work(&fault->work)) {
+ /* Allow another attempt later */
+ clear_bit(ENABLE_VAL_FAULTING_BIT, ENABLE_BITOPS(enabler));
+
+ user_event_mm_put(mm);
+ kmem_cache_free(fault_cache, fault);
+
+ return false;
+ }
+
+ return true;
+}
+
+static int user_event_enabler_write(struct user_event_mm *mm,
+ struct user_event_enabler *enabler,
+ bool fixup_fault, int *attempt)
+{
+ unsigned long uaddr = enabler->addr;
+ unsigned long *ptr;
+ struct page *page;
+ void *kaddr;
+ int bit = ENABLE_BIT(enabler);
+ int ret;
+
+ lockdep_assert_held(&event_mutex);
+ mmap_assert_locked(mm->mm);
+
+ *attempt += 1;
+
+ /* Ensure MM has tasks, cannot use after exit_mm() */
+ if (refcount_read(&mm->tasks) == 0)
+ return -ENOENT;
+
+ if (unlikely(test_bit(ENABLE_VAL_FAULTING_BIT, ENABLE_BITOPS(enabler)) ||
+ test_bit(ENABLE_VAL_FREEING_BIT, ENABLE_BITOPS(enabler))))
+ return -EBUSY;
+
+ align_addr_bit(&uaddr, &bit, ENABLE_BITOPS(enabler));
+
+ ret = pin_user_pages_remote(mm->mm, uaddr, 1, FOLL_WRITE | FOLL_NOFAULT,
+ &page, NULL);
+
+ if (unlikely(ret <= 0)) {
+ if (!fixup_fault)
+ return -EFAULT;
+
+ if (!user_event_enabler_queue_fault(mm, enabler, *attempt))
+ pr_warn("user_events: Unable to queue fault handler\n");
+
+ return -EFAULT;
+ }
+
+ kaddr = kmap_local_page(page);
+ ptr = kaddr + (uaddr & ~PAGE_MASK);
+
+ /* Update bit atomically, user tracers must be atomic as well */
+ if (enabler->event && enabler->event->status)
+ set_bit(bit, ptr);
+ else
+ clear_bit(bit, ptr);
+
+ kunmap_local(kaddr);
+ unpin_user_pages_dirty_lock(&page, 1, true);
+
+ return 0;
+}
+
+static bool user_event_enabler_exists(struct user_event_mm *mm,
+ unsigned long uaddr, unsigned char bit)
+{
+ struct user_event_enabler *enabler;
+
+ list_for_each_entry(enabler, &mm->enablers, mm_enablers_link) {
+ if (enabler->addr == uaddr && ENABLE_BIT(enabler) == bit)
+ return true;
+ }
+
+ return false;
+}
+
+static void user_event_enabler_update(struct user_event *user)
+{
+ struct user_event_enabler *enabler;
+ struct user_event_mm *next;
+ struct user_event_mm *mm;
+ int attempt;
+
+ lockdep_assert_held(&event_mutex);
+
+ /*
+ * We need to build a one-shot list of all the mms that have an
+ * enabler for the user_event passed in. This list is only valid
+ * while holding the event_mutex. The only reason for this is due
+ * to the global mm list being RCU protected and we use methods
+ * which can wait (mmap_read_lock and pin_user_pages_remote).
+ *
+ * NOTE: user_event_mm_get_all() increments the ref count of each
+ * mm that is added to the list to prevent removal timing windows.
+ * We must always put each mm after they are used, which may wait.
+ */
+ mm = user_event_mm_get_all(user);
+
+ while (mm) {
+ next = mm->next;
+ mmap_read_lock(mm->mm);
+
+ list_for_each_entry(enabler, &mm->enablers, mm_enablers_link) {
+ if (enabler->event == user) {
+ attempt = 0;
+ user_event_enabler_write(mm, enabler, true, &attempt);
+ }
+ }
+
+ mmap_read_unlock(mm->mm);
+ user_event_mm_put(mm);
+ mm = next;
+ }
+}
+
+static bool user_event_enabler_dup(struct user_event_enabler *orig,
+ struct user_event_mm *mm)
+{
+ struct user_event_enabler *enabler;
+
+ /* Skip pending frees */
+ if (unlikely(test_bit(ENABLE_VAL_FREEING_BIT, ENABLE_BITOPS(orig))))
+ return true;
+
+ enabler = kzalloc(sizeof(*enabler), GFP_NOWAIT | __GFP_ACCOUNT);
+
+ if (!enabler)
+ return false;
+
+ enabler->event = user_event_get(orig->event);
+ enabler->addr = orig->addr;
+
+ /* Only dup part of value (ignore future flags, etc) */
+ enabler->values = orig->values & ENABLE_VAL_DUP_MASK;
+
+ /* Enablers not exposed yet, RCU not required */
+ list_add(&enabler->mm_enablers_link, &mm->enablers);
+
+ return true;
+}
+
+static struct user_event_mm *user_event_mm_get(struct user_event_mm *mm)
+{
+ refcount_inc(&mm->refcnt);
+
+ return mm;
+}
+
+static struct user_event_mm *user_event_mm_get_all(struct user_event *user)
+{
+ struct user_event_mm *found = NULL;
+ struct user_event_enabler *enabler;
+ struct user_event_mm *mm;
+
+ /*
+ * We use the mm->next field to build a one-shot list from the global
+ * RCU protected list. To build this list the event_mutex must be held.
+ * This lets us build a list without requiring allocs that could fail
+ * when user based events are most wanted for diagnostics.
+ */
+ lockdep_assert_held(&event_mutex);
+
+ /*
+ * We do not want to block fork/exec while enablements are being
+ * updated, so we use RCU to walk the current tasks that have used
+ * user_events ABI for 1 or more events. Each enabler found in each
+ * task that matches the event being updated has a write to reflect
+ * the kernel state back into the process. Waits/faults must not occur
+ * during this. So we scan the list under RCU for all the mm that have
+ * the event within it. This is needed because mm_read_lock() can wait.
+ * Each user mm returned has a ref inc to handle remove RCU races.
+ */
+ rcu_read_lock();
+
+ list_for_each_entry_rcu(mm, &user_event_mms, mms_link) {
+ list_for_each_entry_rcu(enabler, &mm->enablers, mm_enablers_link) {
+ if (enabler->event == user) {
+ mm->next = found;
+ found = user_event_mm_get(mm);
+ break;
+ }
+ }
+ }
+
+ rcu_read_unlock();
+
+ return found;
+}
+
+static struct user_event_mm *user_event_mm_alloc(struct task_struct *t)
+{
+ struct user_event_mm *user_mm;
+
+ user_mm = kzalloc(sizeof(*user_mm), GFP_KERNEL_ACCOUNT);
+
+ if (!user_mm)
+ return NULL;
+
+ user_mm->mm = t->mm;
+ INIT_LIST_HEAD(&user_mm->enablers);
+ refcount_set(&user_mm->refcnt, 1);
+ refcount_set(&user_mm->tasks, 1);
+
+ /*
+ * The lifetime of the memory descriptor can slightly outlast
+ * the task lifetime if a ref to the user_event_mm is taken
+ * between list_del_rcu() and call_rcu(). Therefore we need
+ * to take a reference to it to ensure it can live this long
+ * under this corner case. This can also occur in clones that
+ * outlast the parent.
+ */
+ mmgrab(user_mm->mm);
+
+ return user_mm;
+}
+
+static void user_event_mm_attach(struct user_event_mm *user_mm, struct task_struct *t)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&user_event_mms_lock, flags);
+ list_add_rcu(&user_mm->mms_link, &user_event_mms);
+ spin_unlock_irqrestore(&user_event_mms_lock, flags);
+
+ t->user_event_mm = user_mm;
+}
+
+static struct user_event_mm *current_user_event_mm(void)
+{
+ struct user_event_mm *user_mm = current->user_event_mm;
+
+ if (user_mm)
+ goto inc;
+
+ user_mm = user_event_mm_alloc(current);
+
+ if (!user_mm)
+ goto error;
+
+ user_event_mm_attach(user_mm, current);
+inc:
+ refcount_inc(&user_mm->refcnt);
+error:
+ return user_mm;
+}
+
+static void user_event_mm_destroy(struct user_event_mm *mm)
+{
+ struct user_event_enabler *enabler, *next;
+
+ list_for_each_entry_safe(enabler, next, &mm->enablers, mm_enablers_link)
+ user_event_enabler_destroy(enabler, false);
+
+ mmdrop(mm->mm);
+ kfree(mm);
+}
+
+static void user_event_mm_put(struct user_event_mm *mm)
+{
+ if (mm && refcount_dec_and_test(&mm->refcnt))
+ user_event_mm_destroy(mm);
+}
+
+static void delayed_user_event_mm_put(struct work_struct *work)
+{
+ struct user_event_mm *mm;
+
+ mm = container_of(to_rcu_work(work), struct user_event_mm, put_rwork);
+ user_event_mm_put(mm);
+}
+
+void user_event_mm_remove(struct task_struct *t)
+{
+ struct user_event_mm *mm;
+ unsigned long flags;
+
+ might_sleep();
+
+ mm = t->user_event_mm;
+ t->user_event_mm = NULL;
+
+ /* Clone will increment the tasks, only remove if last clone */
+ if (!refcount_dec_and_test(&mm->tasks))
+ return;
+
+ /* Remove the mm from the list, so it can no longer be enabled */
+ spin_lock_irqsave(&user_event_mms_lock, flags);
+ list_del_rcu(&mm->mms_link);
+ spin_unlock_irqrestore(&user_event_mms_lock, flags);
+
+ /*
+ * We need to wait for currently occurring writes to stop within
+ * the mm. This is required since exit_mm() snaps the current rss
+ * stats and clears them. On the final mmdrop(), check_mm() will
+ * report a bug if these increment.
+ *
+ * All writes/pins are done under mmap_read lock, take the write
+ * lock to ensure in-progress faults have completed. Faults that
+ * are pending but yet to run will check the task count and skip
+ * the fault since the mm is going away.
+ */
+ mmap_write_lock(mm->mm);
+ mmap_write_unlock(mm->mm);
+
+ /*
+ * Put for mm must be done after RCU delay to handle new refs in
+ * between the list_del_rcu() and now. This ensures any get refs
+ * during rcu_read_lock() are accounted for during list removal.
+ *
+ * CPU A | CPU B
+ * ---------------------------------------------------------------
+ * user_event_mm_remove() | rcu_read_lock();
+ * list_del_rcu() | list_for_each_entry_rcu();
+ * call_rcu() | refcount_inc();
+ * . | rcu_read_unlock();
+ * schedule_work() | .
+ * user_event_mm_put() | .
+ *
+ * mmdrop() cannot be called in the softirq context of call_rcu()
+ * so we use a work queue after call_rcu() to run within.
+ */
+ INIT_RCU_WORK(&mm->put_rwork, delayed_user_event_mm_put);
+ queue_rcu_work(system_wq, &mm->put_rwork);
+}
+
+void user_event_mm_dup(struct task_struct *t, struct user_event_mm *old_mm)
+{
+ struct user_event_mm *mm = user_event_mm_alloc(t);
+ struct user_event_enabler *enabler;
+
+ if (!mm)
+ return;
+
+ rcu_read_lock();
+
+ list_for_each_entry_rcu(enabler, &old_mm->enablers, mm_enablers_link) {
+ if (!user_event_enabler_dup(enabler, mm))
+ goto error;
+ }
+
+ rcu_read_unlock();
+
+ user_event_mm_attach(mm, t);
+ return;
+error:
+ rcu_read_unlock();
+ user_event_mm_destroy(mm);
+}
+
+static bool current_user_event_enabler_exists(unsigned long uaddr,
+ unsigned char bit)
+{
+ struct user_event_mm *user_mm = current_user_event_mm();
+ bool exists;
+
+ if (!user_mm)
+ return false;
+
+ exists = user_event_enabler_exists(user_mm, uaddr, bit);
+
+ user_event_mm_put(user_mm);
+
+ return exists;
+}
+
+static struct user_event_enabler
+*user_event_enabler_create(struct user_reg *reg, struct user_event *user,
+ int *write_result)
+{
+ struct user_event_enabler *enabler;
+ struct user_event_mm *user_mm;
+ unsigned long uaddr = (unsigned long)reg->enable_addr;
+ int attempt = 0;
+
+ user_mm = current_user_event_mm();
+
+ if (!user_mm)
+ return NULL;
+
+ enabler = kzalloc(sizeof(*enabler), GFP_KERNEL_ACCOUNT);
+
+ if (!enabler)
+ goto out;
+
+ enabler->event = user;
+ enabler->addr = uaddr;
+ enabler->values = reg->enable_bit;
+
+#if BITS_PER_LONG >= 64
+ if (reg->enable_size == 4)
+ set_bit(ENABLE_VAL_32_ON_64_BIT, ENABLE_BITOPS(enabler));
+#endif
+
+retry:
+ /* Prevents state changes from racing with new enablers */
+ mutex_lock(&event_mutex);
+
+ /* Attempt to reflect the current state within the process */
+ mmap_read_lock(user_mm->mm);
+ *write_result = user_event_enabler_write(user_mm, enabler, false,
+ &attempt);
+ mmap_read_unlock(user_mm->mm);
+
+ /*
+ * If the write works, then we will track the enabler. A ref to the
+ * underlying user_event is held by the enabler to prevent it going
+ * away while the enabler is still in use by a process. The ref is
+ * removed when the enabler is destroyed. This means a event cannot
+ * be forcefully deleted from the system until all tasks using it
+ * exit or run exec(), which includes forks and clones.
+ */
+ if (!*write_result) {
+ user_event_get(user);
+ list_add_rcu(&enabler->mm_enablers_link, &user_mm->enablers);
+ }
+
+ mutex_unlock(&event_mutex);
+
+ if (*write_result) {
+ /* Attempt to fault-in and retry if it worked */
+ if (!user_event_mm_fault_in(user_mm, uaddr, attempt))
+ goto retry;
+
+ kfree(enabler);
+ enabler = NULL;
+ }
+out:
+ user_event_mm_put(user_mm);
+
+ return enabler;
+}
+
+static __always_inline __must_check
+bool user_event_last_ref(struct user_event *user)
+{
+ int last = 0;
+
+ if (user->reg_flags & USER_EVENT_REG_PERSIST)
+ last = 1;
+
+ return refcount_read(&user->refcnt) == last;
+}
+
+static __always_inline __must_check
+size_t copy_nofault(void *addr, size_t bytes, struct iov_iter *i)
+{
+ size_t ret;
+
+ pagefault_disable();
+
+ ret = copy_from_iter_nocache(addr, bytes, i);
+
+ pagefault_enable();
+
+ return ret;
+}
+
+static struct list_head *user_event_get_fields(struct trace_event_call *call)
+{
+ struct user_event *user = (struct user_event *)call->data;
+
+ return &user->fields;
+}
+
+/*
+ * Parses a register command for user_events
+ * Format: event_name[:FLAG1[,FLAG2...]] [field1[;field2...]]
+ *
+ * Example event named 'test' with a 20 char 'msg' field with an unsigned int
+ * 'id' field after:
+ * test char[20] msg;unsigned int id
+ *
+ * NOTE: Offsets are from the user data perspective, they are not from the
+ * trace_entry/buffer perspective. We automatically add the common properties
+ * sizes to the offset for the user.
+ *
+ * Upon success user_event has its ref count increased by 1.
+ */
+static int user_event_parse_cmd(struct user_event_group *group,
+ char *raw_command, struct user_event **newuser,
+ int reg_flags)
+{
+ char *name = raw_command;
+ char *args = strpbrk(name, " ");
+ char *flags;
+
+ if (args)
+ *args++ = '\0';
+
+ flags = strpbrk(name, ":");
+
+ if (flags)
+ *flags++ = '\0';
+
+ return user_event_parse(group, name, args, flags, newuser, reg_flags);
+}
+
+static int user_field_array_size(const char *type)
+{
+ const char *start = strchr(type, '[');
+ char val[8];
+ char *bracket;
+ int size = 0;
+
+ if (start == NULL)
+ return -EINVAL;
+
+ if (strscpy(val, start + 1, sizeof(val)) <= 0)
+ return -EINVAL;
+
+ bracket = strchr(val, ']');
+
+ if (!bracket)
+ return -EINVAL;
+
+ *bracket = '\0';
+
+ if (kstrtouint(val, 0, &size))
+ return -EINVAL;
+
+ if (size > MAX_FIELD_ARRAY_SIZE)
+ return -EINVAL;
+
+ return size;
+}
+
+static int user_field_size(const char *type)
+{
+ /* long is not allowed from a user, since it's ambigious in size */
+ if (strcmp(type, "s64") == 0)
+ return sizeof(s64);
+ if (strcmp(type, "u64") == 0)
+ return sizeof(u64);
+ if (strcmp(type, "s32") == 0)
+ return sizeof(s32);
+ if (strcmp(type, "u32") == 0)
+ return sizeof(u32);
+ if (strcmp(type, "int") == 0)
+ return sizeof(int);
+ if (strcmp(type, "unsigned int") == 0)
+ return sizeof(unsigned int);
+ if (strcmp(type, "s16") == 0)
+ return sizeof(s16);
+ if (strcmp(type, "u16") == 0)
+ return sizeof(u16);
+ if (strcmp(type, "short") == 0)
+ return sizeof(short);
+ if (strcmp(type, "unsigned short") == 0)
+ return sizeof(unsigned short);
+ if (strcmp(type, "s8") == 0)
+ return sizeof(s8);
+ if (strcmp(type, "u8") == 0)
+ return sizeof(u8);
+ if (strcmp(type, "char") == 0)
+ return sizeof(char);
+ if (strcmp(type, "unsigned char") == 0)
+ return sizeof(unsigned char);
+ if (str_has_prefix(type, "char["))
+ return user_field_array_size(type);
+ if (str_has_prefix(type, "unsigned char["))
+ return user_field_array_size(type);
+ if (str_has_prefix(type, "__data_loc "))
+ return sizeof(u32);
+ if (str_has_prefix(type, "__rel_loc "))
+ return sizeof(u32);
+
+ /* Uknown basic type, error */
+ return -EINVAL;
+}
+
+static void user_event_destroy_validators(struct user_event *user)
+{
+ struct user_event_validator *validator, *next;
+ struct list_head *head = &user->validators;
+
+ list_for_each_entry_safe(validator, next, head, user_event_link) {
+ list_del(&validator->user_event_link);
+ kfree(validator);
+ }
+}
+
+static void user_event_destroy_fields(struct user_event *user)
+{
+ struct ftrace_event_field *field, *next;
+ struct list_head *head = &user->fields;
+
+ list_for_each_entry_safe(field, next, head, link) {
+ list_del(&field->link);
+ kfree(field);
+ }
+}
+
+static int user_event_add_field(struct user_event *user, const char *type,
+ const char *name, int offset, int size,
+ int is_signed, int filter_type)
+{
+ struct user_event_validator *validator;
+ struct ftrace_event_field *field;
+ int validator_flags = 0;
+
+ field = kmalloc(sizeof(*field), GFP_KERNEL_ACCOUNT);
+
+ if (!field)
+ return -ENOMEM;
+
+ if (str_has_prefix(type, "__data_loc "))
+ goto add_validator;
+
+ if (str_has_prefix(type, "__rel_loc ")) {
+ validator_flags |= VALIDATOR_REL;
+ goto add_validator;
+ }
+
+ goto add_field;
+
+add_validator:
+ if (strstr(type, "char") != NULL)
+ validator_flags |= VALIDATOR_ENSURE_NULL;
+
+ validator = kmalloc(sizeof(*validator), GFP_KERNEL_ACCOUNT);
+
+ if (!validator) {
+ kfree(field);
+ return -ENOMEM;
+ }
+
+ validator->flags = validator_flags;
+ validator->offset = offset;
+
+ /* Want sequential access when validating */
+ list_add_tail(&validator->user_event_link, &user->validators);
+
+add_field:
+ field->type = type;
+ field->name = name;
+ field->offset = offset;
+ field->size = size;
+ field->is_signed = is_signed;
+ field->filter_type = filter_type;
+
+ if (filter_type == FILTER_OTHER)
+ field->filter_type = filter_assign_type(type);
+
+ list_add(&field->link, &user->fields);
+
+ /*
+ * Min size from user writes that are required, this does not include
+ * the size of trace_entry (common fields).
+ */
+ user->min_size = (offset + size) - sizeof(struct trace_entry);
+
+ return 0;
+}
+
+/*
+ * Parses the values of a field within the description
+ * Format: type name [size]
+ */
+static int user_event_parse_field(char *field, struct user_event *user,
+ u32 *offset)
+{
+ char *part, *type, *name;
+ u32 depth = 0, saved_offset = *offset;
+ int len, size = -EINVAL;
+ bool is_struct = false;
+
+ field = skip_spaces(field);
+
+ if (*field == '\0')
+ return 0;
+
+ /* Handle types that have a space within */
+ len = str_has_prefix(field, "unsigned ");
+ if (len)
+ goto skip_next;
+
+ len = str_has_prefix(field, "struct ");
+ if (len) {
+ is_struct = true;
+ goto skip_next;
+ }
+
+ len = str_has_prefix(field, "__data_loc unsigned ");
+ if (len)
+ goto skip_next;
+
+ len = str_has_prefix(field, "__data_loc ");
+ if (len)
+ goto skip_next;
+
+ len = str_has_prefix(field, "__rel_loc unsigned ");
+ if (len)
+ goto skip_next;
+
+ len = str_has_prefix(field, "__rel_loc ");
+ if (len)
+ goto skip_next;
+
+ goto parse;
+skip_next:
+ type = field;
+ field = strpbrk(field + len, " ");
+
+ if (field == NULL)
+ return -EINVAL;
+
+ *field++ = '\0';
+ depth++;
+parse:
+ name = NULL;
+
+ while ((part = strsep(&field, " ")) != NULL) {
+ switch (depth++) {
+ case FIELD_DEPTH_TYPE:
+ type = part;
+ break;
+ case FIELD_DEPTH_NAME:
+ name = part;
+ break;
+ case FIELD_DEPTH_SIZE:
+ if (!is_struct)
+ return -EINVAL;
+
+ if (kstrtou32(part, 10, &size))
+ return -EINVAL;
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+
+ if (depth < FIELD_DEPTH_SIZE || !name)
+ return -EINVAL;
+
+ if (depth == FIELD_DEPTH_SIZE)
+ size = user_field_size(type);
+
+ if (size == 0)
+ return -EINVAL;
+
+ if (size < 0)
+ return size;
+
+ *offset = saved_offset + size;
+
+ return user_event_add_field(user, type, name, saved_offset, size,
+ type[0] != 'u', FILTER_OTHER);
+}
+
+static int user_event_parse_fields(struct user_event *user, char *args)
+{
+ char *field;
+ u32 offset = sizeof(struct trace_entry);
+ int ret = -EINVAL;
+
+ if (args == NULL)
+ return 0;
+
+ while ((field = strsep(&args, ";")) != NULL) {
+ ret = user_event_parse_field(field, user, &offset);
+
+ if (ret)
+ break;
+ }
+
+ return ret;
+}
+
+static struct trace_event_fields user_event_fields_array[1];
+
+static const char *user_field_format(const char *type)
+{
+ if (strcmp(type, "s64") == 0)
+ return "%lld";
+ if (strcmp(type, "u64") == 0)
+ return "%llu";
+ if (strcmp(type, "s32") == 0)
+ return "%d";
+ if (strcmp(type, "u32") == 0)
+ return "%u";
+ if (strcmp(type, "int") == 0)
+ return "%d";
+ if (strcmp(type, "unsigned int") == 0)
+ return "%u";
+ if (strcmp(type, "s16") == 0)
+ return "%d";
+ if (strcmp(type, "u16") == 0)
+ return "%u";
+ if (strcmp(type, "short") == 0)
+ return "%d";
+ if (strcmp(type, "unsigned short") == 0)
+ return "%u";
+ if (strcmp(type, "s8") == 0)
+ return "%d";
+ if (strcmp(type, "u8") == 0)
+ return "%u";
+ if (strcmp(type, "char") == 0)
+ return "%d";
+ if (strcmp(type, "unsigned char") == 0)
+ return "%u";
+ if (strstr(type, "char[") != NULL)
+ return "%s";
+
+ /* Unknown, likely struct, allowed treat as 64-bit */
+ return "%llu";
+}
+
+static bool user_field_is_dyn_string(const char *type, const char **str_func)
+{
+ if (str_has_prefix(type, "__data_loc ")) {
+ *str_func = "__get_str";
+ goto check;
+ }
+
+ if (str_has_prefix(type, "__rel_loc ")) {
+ *str_func = "__get_rel_str";
+ goto check;
+ }
+
+ return false;
+check:
+ return strstr(type, "char") != NULL;
+}
+
+#define LEN_OR_ZERO (len ? len - pos : 0)
+static int user_dyn_field_set_string(int argc, const char **argv, int *iout,
+ char *buf, int len, bool *colon)
+{
+ int pos = 0, i = *iout;
+
+ *colon = false;
+
+ for (; i < argc; ++i) {
+ if (i != *iout)
+ pos += snprintf(buf + pos, LEN_OR_ZERO, " ");
+
+ pos += snprintf(buf + pos, LEN_OR_ZERO, "%s", argv[i]);
+
+ if (strchr(argv[i], ';')) {
+ ++i;
+ *colon = true;
+ break;
+ }
+ }
+
+ /* Actual set, advance i */
+ if (len != 0)
+ *iout = i;
+
+ return pos + 1;
+}
+
+static int user_field_set_string(struct ftrace_event_field *field,
+ char *buf, int len, bool colon)
+{
+ int pos = 0;
+
+ pos += snprintf(buf + pos, LEN_OR_ZERO, "%s", field->type);
+ pos += snprintf(buf + pos, LEN_OR_ZERO, " ");
+ pos += snprintf(buf + pos, LEN_OR_ZERO, "%s", field->name);
+
+ if (str_has_prefix(field->type, "struct "))
+ pos += snprintf(buf + pos, LEN_OR_ZERO, " %d", field->size);
+
+ if (colon)
+ pos += snprintf(buf + pos, LEN_OR_ZERO, ";");
+
+ return pos + 1;
+}
+
+static int user_event_set_print_fmt(struct user_event *user, char *buf, int len)
+{
+ struct ftrace_event_field *field;
+ struct list_head *head = &user->fields;
+ int pos = 0, depth = 0;
+ const char *str_func;
+
+ pos += snprintf(buf + pos, LEN_OR_ZERO, "\"");
+
+ list_for_each_entry_reverse(field, head, link) {
+ if (depth != 0)
+ pos += snprintf(buf + pos, LEN_OR_ZERO, " ");
+
+ pos += snprintf(buf + pos, LEN_OR_ZERO, "%s=%s",
+ field->name, user_field_format(field->type));
+
+ depth++;
+ }
+
+ pos += snprintf(buf + pos, LEN_OR_ZERO, "\"");
+
+ list_for_each_entry_reverse(field, head, link) {
+ if (user_field_is_dyn_string(field->type, &str_func))
+ pos += snprintf(buf + pos, LEN_OR_ZERO,
+ ", %s(%s)", str_func, field->name);
+ else
+ pos += snprintf(buf + pos, LEN_OR_ZERO,
+ ", REC->%s", field->name);
+ }
+
+ return pos + 1;
+}
+#undef LEN_OR_ZERO
+
+static int user_event_create_print_fmt(struct user_event *user)
+{
+ char *print_fmt;
+ int len;
+
+ len = user_event_set_print_fmt(user, NULL, 0);
+
+ print_fmt = kmalloc(len, GFP_KERNEL_ACCOUNT);
+
+ if (!print_fmt)
+ return -ENOMEM;
+
+ user_event_set_print_fmt(user, print_fmt, len);
+
+ user->call.print_fmt = print_fmt;
+
+ return 0;
+}
+
+static enum print_line_t user_event_print_trace(struct trace_iterator *iter,
+ int flags,
+ struct trace_event *event)
+{
+ return print_event_fields(iter, event);
+}
+
+static struct trace_event_functions user_event_funcs = {
+ .trace = user_event_print_trace,
+};
+
+static int user_event_set_call_visible(struct user_event *user, bool visible)
+{
+ int ret;
+ const struct cred *old_cred;
+ struct cred *cred;
+
+ cred = prepare_creds();
+
+ if (!cred)
+ return -ENOMEM;
+
+ /*
+ * While by default tracefs is locked down, systems can be configured
+ * to allow user_event files to be less locked down. The extreme case
+ * being "other" has read/write access to user_events_data/status.
+ *
+ * When not locked down, processes may not have permissions to
+ * add/remove calls themselves to tracefs. We need to temporarily
+ * switch to root file permission to allow for this scenario.
+ */
+ cred->fsuid = GLOBAL_ROOT_UID;
+
+ old_cred = override_creds(cred);
+
+ if (visible)
+ ret = trace_add_event_call(&user->call);
+ else
+ ret = trace_remove_event_call(&user->call);
+
+ revert_creds(old_cred);
+ put_cred(cred);
+
+ return ret;
+}
+
+static int destroy_user_event(struct user_event *user)
+{
+ int ret = 0;
+
+ lockdep_assert_held(&event_mutex);
+
+ /* Must destroy fields before call removal */
+ user_event_destroy_fields(user);
+
+ ret = user_event_set_call_visible(user, false);
+
+ if (ret)
+ return ret;
+
+ dyn_event_remove(&user->devent);
+ hash_del(&user->node);
+
+ user_event_destroy_validators(user);
+ kfree(user->call.print_fmt);
+ kfree(EVENT_NAME(user));
+ kfree(user);
+
+ if (current_user_events > 0)
+ current_user_events--;
+ else
+ pr_alert("BUG: Bad current_user_events\n");
+
+ return ret;
+}
+
+static struct user_event *find_user_event(struct user_event_group *group,
+ char *name, u32 *outkey)
+{
+ struct user_event *user;
+ u32 key = user_event_key(name);
+
+ *outkey = key;
+
+ hash_for_each_possible(group->register_table, user, node, key)
+ if (!strcmp(EVENT_NAME(user), name))
+ return user_event_get(user);
+
+ return NULL;
+}
+
+static int user_event_validate(struct user_event *user, void *data, int len)
+{
+ struct list_head *head = &user->validators;
+ struct user_event_validator *validator;
+ void *pos, *end = data + len;
+ u32 loc, offset, size;
+
+ list_for_each_entry(validator, head, user_event_link) {
+ pos = data + validator->offset;
+
+ /* Already done min_size check, no bounds check here */
+ loc = *(u32 *)pos;
+ offset = loc & 0xffff;
+ size = loc >> 16;
+
+ if (likely(validator->flags & VALIDATOR_REL))
+ pos += offset + sizeof(loc);
+ else
+ pos = data + offset;
+
+ pos += size;
+
+ if (unlikely(pos > end))
+ return -EFAULT;
+
+ if (likely(validator->flags & VALIDATOR_ENSURE_NULL))
+ if (unlikely(*(char *)(pos - 1) != '\0'))
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+/*
+ * Writes the user supplied payload out to a trace file.
+ */
+static void user_event_ftrace(struct user_event *user, struct iov_iter *i,
+ void *tpdata, bool *faulted)
+{
+ struct trace_event_file *file;
+ struct trace_entry *entry;
+ struct trace_event_buffer event_buffer;
+ size_t size = sizeof(*entry) + i->count;
+
+ file = (struct trace_event_file *)tpdata;
+
+ if (!file ||
+ !(file->flags & EVENT_FILE_FL_ENABLED) ||
+ trace_trigger_soft_disabled(file))
+ return;
+
+ /* Allocates and fills trace_entry, + 1 of this is data payload */
+ entry = trace_event_buffer_reserve(&event_buffer, file, size);
+
+ if (unlikely(!entry))
+ return;
+
+ if (unlikely(i->count != 0 && !copy_nofault(entry + 1, i->count, i)))
+ goto discard;
+
+ if (!list_empty(&user->validators) &&
+ unlikely(user_event_validate(user, entry, size)))
+ goto discard;
+
+ trace_event_buffer_commit(&event_buffer);
+
+ return;
+discard:
+ *faulted = true;
+ __trace_event_discard_commit(event_buffer.buffer,
+ event_buffer.event);
+}
+
+#ifdef CONFIG_PERF_EVENTS
+/*
+ * Writes the user supplied payload out to perf ring buffer.
+ */
+static void user_event_perf(struct user_event *user, struct iov_iter *i,
+ void *tpdata, bool *faulted)
+{
+ struct hlist_head *perf_head;
+
+ perf_head = this_cpu_ptr(user->call.perf_events);
+
+ if (perf_head && !hlist_empty(perf_head)) {
+ struct trace_entry *perf_entry;
+ struct pt_regs *regs;
+ size_t size = sizeof(*perf_entry) + i->count;
+ int context;
+
+ perf_entry = perf_trace_buf_alloc(ALIGN(size, 8),
+ &regs, &context);
+
+ if (unlikely(!perf_entry))
+ return;
+
+ perf_fetch_caller_regs(regs);
+
+ if (unlikely(i->count != 0 && !copy_nofault(perf_entry + 1, i->count, i)))
+ goto discard;
+
+ if (!list_empty(&user->validators) &&
+ unlikely(user_event_validate(user, perf_entry, size)))
+ goto discard;
+
+ perf_trace_buf_submit(perf_entry, size, context,
+ user->call.event.type, 1, regs,
+ perf_head, NULL);
+
+ return;
+discard:
+ *faulted = true;
+ perf_swevent_put_recursion_context(context);
+ }
+}
+#endif
+
+/*
+ * Update the enabled bit among all user processes.
+ */
+static void update_enable_bit_for(struct user_event *user)
+{
+ struct tracepoint *tp = &user->tracepoint;
+ char status = 0;
+
+ if (atomic_read(&tp->key.enabled) > 0) {
+ struct tracepoint_func *probe_func_ptr;
+ user_event_func_t probe_func;
+
+ rcu_read_lock_sched();
+
+ probe_func_ptr = rcu_dereference_sched(tp->funcs);
+
+ if (probe_func_ptr) {
+ do {
+ probe_func = probe_func_ptr->func;
+
+ if (probe_func == user_event_ftrace)
+ status |= EVENT_STATUS_FTRACE;
+#ifdef CONFIG_PERF_EVENTS
+ else if (probe_func == user_event_perf)
+ status |= EVENT_STATUS_PERF;
+#endif
+ else
+ status |= EVENT_STATUS_OTHER;
+ } while ((++probe_func_ptr)->func);
+ }
+
+ rcu_read_unlock_sched();
+ }
+
+ user->status = status;
+
+ user_event_enabler_update(user);
+}
+
+/*
+ * Register callback for our events from tracing sub-systems.
+ */
+static int user_event_reg(struct trace_event_call *call,
+ enum trace_reg type,
+ void *data)
+{
+ struct user_event *user = (struct user_event *)call->data;
+ int ret = 0;
+
+ if (!user)
+ return -ENOENT;
+
+ switch (type) {
+ case TRACE_REG_REGISTER:
+ ret = tracepoint_probe_register(call->tp,
+ call->class->probe,
+ data);
+ if (!ret)
+ goto inc;
+ break;
+
+ case TRACE_REG_UNREGISTER:
+ tracepoint_probe_unregister(call->tp,
+ call->class->probe,
+ data);
+ goto dec;
+
+#ifdef CONFIG_PERF_EVENTS
+ case TRACE_REG_PERF_REGISTER:
+ ret = tracepoint_probe_register(call->tp,
+ call->class->perf_probe,
+ data);
+ if (!ret)
+ goto inc;
+ break;
+
+ case TRACE_REG_PERF_UNREGISTER:
+ tracepoint_probe_unregister(call->tp,
+ call->class->perf_probe,
+ data);
+ goto dec;
+
+ case TRACE_REG_PERF_OPEN:
+ case TRACE_REG_PERF_CLOSE:
+ case TRACE_REG_PERF_ADD:
+ case TRACE_REG_PERF_DEL:
+ break;
+#endif
+ }
+
+ return ret;
+inc:
+ user_event_get(user);
+ update_enable_bit_for(user);
+ return 0;
+dec:
+ update_enable_bit_for(user);
+ user_event_put(user, true);
+ return 0;
+}
+
+static int user_event_create(const char *raw_command)
+{
+ struct user_event_group *group;
+ struct user_event *user;
+ char *name;
+ int ret;
+
+ if (!str_has_prefix(raw_command, USER_EVENTS_PREFIX))
+ return -ECANCELED;
+
+ raw_command += USER_EVENTS_PREFIX_LEN;
+ raw_command = skip_spaces(raw_command);
+
+ name = kstrdup(raw_command, GFP_KERNEL_ACCOUNT);
+
+ if (!name)
+ return -ENOMEM;
+
+ group = current_user_event_group();
+
+ if (!group) {
+ kfree(name);
+ return -ENOENT;
+ }
+
+ mutex_lock(&group->reg_mutex);
+
+ /* Dyn events persist, otherwise they would cleanup immediately */
+ ret = user_event_parse_cmd(group, name, &user, USER_EVENT_REG_PERSIST);
+
+ if (!ret)
+ user_event_put(user, false);
+
+ mutex_unlock(&group->reg_mutex);
+
+ if (ret)
+ kfree(name);
+
+ return ret;
+}
+
+static int user_event_show(struct seq_file *m, struct dyn_event *ev)
+{
+ struct user_event *user = container_of(ev, struct user_event, devent);
+ struct ftrace_event_field *field;
+ struct list_head *head;
+ int depth = 0;
+
+ seq_printf(m, "%s%s", USER_EVENTS_PREFIX, EVENT_NAME(user));
+
+ head = trace_get_fields(&user->call);
+
+ list_for_each_entry_reverse(field, head, link) {
+ if (depth == 0)
+ seq_puts(m, " ");
+ else
+ seq_puts(m, "; ");
+
+ seq_printf(m, "%s %s", field->type, field->name);
+
+ if (str_has_prefix(field->type, "struct "))
+ seq_printf(m, " %d", field->size);
+
+ depth++;
+ }
+
+ seq_puts(m, "\n");
+
+ return 0;
+}
+
+static bool user_event_is_busy(struct dyn_event *ev)
+{
+ struct user_event *user = container_of(ev, struct user_event, devent);
+
+ return !user_event_last_ref(user);
+}
+
+static int user_event_free(struct dyn_event *ev)
+{
+ struct user_event *user = container_of(ev, struct user_event, devent);
+
+ if (!user_event_last_ref(user))
+ return -EBUSY;
+
+ if (!user_event_capable(user->reg_flags))
+ return -EPERM;
+
+ return destroy_user_event(user);
+}
+
+static bool user_field_match(struct ftrace_event_field *field, int argc,
+ const char **argv, int *iout)
+{
+ char *field_name = NULL, *dyn_field_name = NULL;
+ bool colon = false, match = false;
+ int dyn_len, len;
+
+ if (*iout >= argc)
+ return false;
+
+ dyn_len = user_dyn_field_set_string(argc, argv, iout, dyn_field_name,
+ 0, &colon);
+
+ len = user_field_set_string(field, field_name, 0, colon);
+
+ if (dyn_len != len)
+ return false;
+
+ dyn_field_name = kmalloc(dyn_len, GFP_KERNEL);
+ field_name = kmalloc(len, GFP_KERNEL);
+
+ if (!dyn_field_name || !field_name)
+ goto out;
+
+ user_dyn_field_set_string(argc, argv, iout, dyn_field_name,
+ dyn_len, &colon);
+
+ user_field_set_string(field, field_name, len, colon);
+
+ match = strcmp(dyn_field_name, field_name) == 0;
+out:
+ kfree(dyn_field_name);
+ kfree(field_name);
+
+ return match;
+}
+
+static bool user_fields_match(struct user_event *user, int argc,
+ const char **argv)
+{
+ struct ftrace_event_field *field;
+ struct list_head *head = &user->fields;
+ int i = 0;
+
+ list_for_each_entry_reverse(field, head, link) {
+ if (!user_field_match(field, argc, argv, &i))
+ return false;
+ }
+
+ if (i != argc)
+ return false;
+
+ return true;
+}
+
+static bool user_event_match(const char *system, const char *event,
+ int argc, const char **argv, struct dyn_event *ev)
+{
+ struct user_event *user = container_of(ev, struct user_event, devent);
+ bool match;
+
+ match = strcmp(EVENT_NAME(user), event) == 0 &&
+ (!system || strcmp(system, USER_EVENTS_SYSTEM) == 0);
+
+ if (match && argc > 0)
+ match = user_fields_match(user, argc, argv);
+ else if (match && argc == 0)
+ match = list_empty(&user->fields);
+
+ return match;
+}
+
+static struct dyn_event_operations user_event_dops = {
+ .create = user_event_create,
+ .show = user_event_show,
+ .is_busy = user_event_is_busy,
+ .free = user_event_free,
+ .match = user_event_match,
+};
+
+static int user_event_trace_register(struct user_event *user)
+{
+ int ret;
+
+ ret = register_trace_event(&user->call.event);
+
+ if (!ret)
+ return -ENODEV;
+
+ ret = user_event_set_call_visible(user, true);
+
+ if (ret)
+ unregister_trace_event(&user->call.event);
+
+ return ret;
+}
+
+/*
+ * Parses the event name, arguments and flags then registers if successful.
+ * The name buffer lifetime is owned by this method for success cases only.
+ * Upon success the returned user_event has its ref count increased by 1.
+ */
+static int user_event_parse(struct user_event_group *group, char *name,
+ char *args, char *flags,
+ struct user_event **newuser, int reg_flags)
+{
+ int ret;
+ u32 key;
+ struct user_event *user;
+ int argc = 0;
+ char **argv;
+
+ /* Currently don't support any text based flags */
+ if (flags != NULL)
+ return -EINVAL;
+
+ if (!user_event_capable(reg_flags))
+ return -EPERM;
+
+ /* Prevent dyn_event from racing */
+ mutex_lock(&event_mutex);
+ user = find_user_event(group, name, &key);
+ mutex_unlock(&event_mutex);
+
+ if (user) {
+ if (args) {
+ argv = argv_split(GFP_KERNEL, args, &argc);
+ if (!argv) {
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ ret = user_fields_match(user, argc, (const char **)argv);
+ argv_free(argv);
+
+ } else
+ ret = list_empty(&user->fields);
+
+ if (ret) {
+ *newuser = user;
+ /*
+ * Name is allocated by caller, free it since it already exists.
+ * Caller only worries about failure cases for freeing.
+ */
+ kfree(name);
+ } else {
+ ret = -EADDRINUSE;
+ goto error;
+ }
+
+ return 0;
+error:
+ user_event_put(user, false);
+ return ret;
+ }
+
+ user = kzalloc(sizeof(*user), GFP_KERNEL_ACCOUNT);
+
+ if (!user)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&user->class.fields);
+ INIT_LIST_HEAD(&user->fields);
+ INIT_LIST_HEAD(&user->validators);
+
+ user->group = group;
+ user->tracepoint.name = name;
+
+ ret = user_event_parse_fields(user, args);
+
+ if (ret)
+ goto put_user;
+
+ ret = user_event_create_print_fmt(user);
+
+ if (ret)
+ goto put_user;
+
+ user->call.data = user;
+ user->call.class = &user->class;
+ user->call.name = name;
+ user->call.flags = TRACE_EVENT_FL_TRACEPOINT;
+ user->call.tp = &user->tracepoint;
+ user->call.event.funcs = &user_event_funcs;
+ user->class.system = group->system_name;
+
+ user->class.fields_array = user_event_fields_array;
+ user->class.get_fields = user_event_get_fields;
+ user->class.reg = user_event_reg;
+ user->class.probe = user_event_ftrace;
+#ifdef CONFIG_PERF_EVENTS
+ user->class.perf_probe = user_event_perf;
+#endif
+
+ mutex_lock(&event_mutex);
+
+ if (current_user_events >= max_user_events) {
+ ret = -EMFILE;
+ goto put_user_lock;
+ }
+
+ ret = user_event_trace_register(user);
+
+ if (ret)
+ goto put_user_lock;
+
+ user->reg_flags = reg_flags;
+
+ if (user->reg_flags & USER_EVENT_REG_PERSIST) {
+ /* Ensure we track self ref and caller ref (2) */
+ refcount_set(&user->refcnt, 2);
+ } else {
+ /* Ensure we track only caller ref (1) */
+ refcount_set(&user->refcnt, 1);
+ }
+
+ dyn_event_init(&user->devent, &user_event_dops);
+ dyn_event_add(&user->devent, &user->call);
+ hash_add(group->register_table, &user->node, key);
+ current_user_events++;
+
+ mutex_unlock(&event_mutex);
+
+ *newuser = user;
+ return 0;
+put_user_lock:
+ mutex_unlock(&event_mutex);
+put_user:
+ user_event_destroy_fields(user);
+ user_event_destroy_validators(user);
+ kfree(user->call.print_fmt);
+ kfree(user);
+ return ret;
+}
+
+/*
+ * Deletes a previously created event if it is no longer being used.
+ */
+static int delete_user_event(struct user_event_group *group, char *name)
+{
+ u32 key;
+ struct user_event *user = find_user_event(group, name, &key);
+
+ if (!user)
+ return -ENOENT;
+
+ user_event_put(user, true);
+
+ if (!user_event_last_ref(user))
+ return -EBUSY;
+
+ if (!user_event_capable(user->reg_flags))
+ return -EPERM;
+
+ return destroy_user_event(user);
+}
+
+/*
+ * Validates the user payload and writes via iterator.
+ */
+static ssize_t user_events_write_core(struct file *file, struct iov_iter *i)
+{
+ struct user_event_file_info *info = file->private_data;
+ struct user_event_refs *refs;
+ struct user_event *user = NULL;
+ struct tracepoint *tp;
+ ssize_t ret = i->count;
+ int idx;
+
+ if (unlikely(copy_from_iter(&idx, sizeof(idx), i) != sizeof(idx)))
+ return -EFAULT;
+
+ if (idx < 0)
+ return -EINVAL;
+
+ rcu_read_lock_sched();
+
+ refs = rcu_dereference_sched(info->refs);
+
+ /*
+ * The refs->events array is protected by RCU, and new items may be
+ * added. But the user retrieved from indexing into the events array
+ * shall be immutable while the file is opened.
+ */
+ if (likely(refs && idx < refs->count))
+ user = refs->events[idx];
+
+ rcu_read_unlock_sched();
+
+ if (unlikely(user == NULL))
+ return -ENOENT;
+
+ if (unlikely(i->count < user->min_size))
+ return -EINVAL;
+
+ tp = &user->tracepoint;
+
+ /*
+ * It's possible key.enabled disables after this check, however
+ * we don't mind if a few events are included in this condition.
+ */
+ if (likely(atomic_read(&tp->key.enabled) > 0)) {
+ struct tracepoint_func *probe_func_ptr;
+ user_event_func_t probe_func;
+ struct iov_iter copy;
+ void *tpdata;
+ bool faulted;
+
+ if (unlikely(fault_in_iov_iter_readable(i, i->count)))
+ return -EFAULT;
+
+ faulted = false;
+
+ rcu_read_lock_sched();
+
+ probe_func_ptr = rcu_dereference_sched(tp->funcs);
+
+ if (probe_func_ptr) {
+ do {
+ copy = *i;
+ probe_func = probe_func_ptr->func;
+ tpdata = probe_func_ptr->data;
+ probe_func(user, &copy, tpdata, &faulted);
+ } while ((++probe_func_ptr)->func);
+ }
+
+ rcu_read_unlock_sched();
+
+ if (unlikely(faulted))
+ return -EFAULT;
+ } else
+ return -EBADF;
+
+ return ret;
+}
+
+static int user_events_open(struct inode *node, struct file *file)
+{
+ struct user_event_group *group;
+ struct user_event_file_info *info;
+
+ group = current_user_event_group();
+
+ if (!group)
+ return -ENOENT;
+
+ info = kzalloc(sizeof(*info), GFP_KERNEL_ACCOUNT);
+
+ if (!info)
+ return -ENOMEM;
+
+ info->group = group;
+
+ file->private_data = info;
+
+ return 0;
+}
+
+static ssize_t user_events_write(struct file *file, const char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ struct iov_iter i;
+
+ if (unlikely(*ppos != 0))
+ return -EFAULT;
+
+ if (unlikely(import_ubuf(ITER_SOURCE, (char __user *)ubuf, count, &i)))
+ return -EFAULT;
+
+ return user_events_write_core(file, &i);
+}
+
+static ssize_t user_events_write_iter(struct kiocb *kp, struct iov_iter *i)
+{
+ return user_events_write_core(kp->ki_filp, i);
+}
+
+static int user_events_ref_add(struct user_event_file_info *info,
+ struct user_event *user)
+{
+ struct user_event_group *group = info->group;
+ struct user_event_refs *refs, *new_refs;
+ int i, size, count = 0;
+
+ refs = rcu_dereference_protected(info->refs,
+ lockdep_is_held(&group->reg_mutex));
+
+ if (refs) {
+ count = refs->count;
+
+ for (i = 0; i < count; ++i)
+ if (refs->events[i] == user)
+ return i;
+ }
+
+ size = struct_size(refs, events, count + 1);
+
+ new_refs = kzalloc(size, GFP_KERNEL_ACCOUNT);
+
+ if (!new_refs)
+ return -ENOMEM;
+
+ new_refs->count = count + 1;
+
+ for (i = 0; i < count; ++i)
+ new_refs->events[i] = refs->events[i];
+
+ new_refs->events[i] = user_event_get(user);
+
+ rcu_assign_pointer(info->refs, new_refs);
+
+ if (refs)
+ kfree_rcu(refs, rcu);
+
+ return i;
+}
+
+static long user_reg_get(struct user_reg __user *ureg, struct user_reg *kreg)
+{
+ u32 size;
+ long ret;
+
+ ret = get_user(size, &ureg->size);
+
+ if (ret)
+ return ret;
+
+ if (size > PAGE_SIZE)
+ return -E2BIG;
+
+ if (size < offsetofend(struct user_reg, write_index))
+ return -EINVAL;
+
+ ret = copy_struct_from_user(kreg, sizeof(*kreg), ureg, size);
+
+ if (ret)
+ return ret;
+
+ /* Ensure only valid flags */
+ if (kreg->flags & ~(USER_EVENT_REG_MAX-1))
+ return -EINVAL;
+
+ /* Ensure supported size */
+ switch (kreg->enable_size) {
+ case 4:
+ /* 32-bit */
+ break;
+#if BITS_PER_LONG >= 64
+ case 8:
+ /* 64-bit */
+ break;
+#endif
+ default:
+ return -EINVAL;
+ }
+
+ /* Ensure natural alignment */
+ if (kreg->enable_addr % kreg->enable_size)
+ return -EINVAL;
+
+ /* Ensure bit range for size */
+ if (kreg->enable_bit > (kreg->enable_size * BITS_PER_BYTE) - 1)
+ return -EINVAL;
+
+ /* Ensure accessible */
+ if (!access_ok((const void __user *)(uintptr_t)kreg->enable_addr,
+ kreg->enable_size))
+ return -EFAULT;
+
+ kreg->size = size;
+
+ return 0;
+}
+
+/*
+ * Registers a user_event on behalf of a user process.
+ */
+static long user_events_ioctl_reg(struct user_event_file_info *info,
+ unsigned long uarg)
+{
+ struct user_reg __user *ureg = (struct user_reg __user *)uarg;
+ struct user_reg reg;
+ struct user_event *user;
+ struct user_event_enabler *enabler;
+ char *name;
+ long ret;
+ int write_result;
+
+ ret = user_reg_get(ureg, &reg);
+
+ if (ret)
+ return ret;
+
+ /*
+ * Prevent users from using the same address and bit multiple times
+ * within the same mm address space. This can cause unexpected behavior
+ * for user processes that is far easier to debug if this is explictly
+ * an error upon registering.
+ */
+ if (current_user_event_enabler_exists((unsigned long)reg.enable_addr,
+ reg.enable_bit))
+ return -EADDRINUSE;
+
+ name = strndup_user((const char __user *)(uintptr_t)reg.name_args,
+ MAX_EVENT_DESC);
+
+ if (IS_ERR(name)) {
+ ret = PTR_ERR(name);
+ return ret;
+ }
+
+ ret = user_event_parse_cmd(info->group, name, &user, reg.flags);
+
+ if (ret) {
+ kfree(name);
+ return ret;
+ }
+
+ ret = user_events_ref_add(info, user);
+
+ /* No longer need parse ref, ref_add either worked or not */
+ user_event_put(user, false);
+
+ /* Positive number is index and valid */
+ if (ret < 0)
+ return ret;
+
+ /*
+ * user_events_ref_add succeeded:
+ * At this point we have a user_event, it's lifetime is bound by the
+ * reference count, not this file. If anything fails, the user_event
+ * still has a reference until the file is released. During release
+ * any remaining references (from user_events_ref_add) are decremented.
+ *
+ * Attempt to create an enabler, which too has a lifetime tied in the
+ * same way for the event. Once the task that caused the enabler to be
+ * created exits or issues exec() then the enablers it has created
+ * will be destroyed and the ref to the event will be decremented.
+ */
+ enabler = user_event_enabler_create(&reg, user, &write_result);
+
+ if (!enabler)
+ return -ENOMEM;
+
+ /* Write failed/faulted, give error back to caller */
+ if (write_result)
+ return write_result;
+
+ put_user((u32)ret, &ureg->write_index);
+
+ return 0;
+}
+
+/*
+ * Deletes a user_event on behalf of a user process.
+ */
+static long user_events_ioctl_del(struct user_event_file_info *info,
+ unsigned long uarg)
+{
+ void __user *ubuf = (void __user *)uarg;
+ char *name;
+ long ret;
+
+ name = strndup_user(ubuf, MAX_EVENT_DESC);
+
+ if (IS_ERR(name))
+ return PTR_ERR(name);
+
+ /* event_mutex prevents dyn_event from racing */
+ mutex_lock(&event_mutex);
+ ret = delete_user_event(info->group, name);
+ mutex_unlock(&event_mutex);
+
+ kfree(name);
+
+ return ret;
+}
+
+static long user_unreg_get(struct user_unreg __user *ureg,
+ struct user_unreg *kreg)
+{
+ u32 size;
+ long ret;
+
+ ret = get_user(size, &ureg->size);
+
+ if (ret)
+ return ret;
+
+ if (size > PAGE_SIZE)
+ return -E2BIG;
+
+ if (size < offsetofend(struct user_unreg, disable_addr))
+ return -EINVAL;
+
+ ret = copy_struct_from_user(kreg, sizeof(*kreg), ureg, size);
+
+ /* Ensure no reserved values, since we don't support any yet */
+ if (kreg->__reserved || kreg->__reserved2)
+ return -EINVAL;
+
+ return ret;
+}
+
+static int user_event_mm_clear_bit(struct user_event_mm *user_mm,
+ unsigned long uaddr, unsigned char bit,
+ unsigned long flags)
+{
+ struct user_event_enabler enabler;
+ int result;
+ int attempt = 0;
+
+ memset(&enabler, 0, sizeof(enabler));
+ enabler.addr = uaddr;
+ enabler.values = bit | flags;
+retry:
+ /* Prevents state changes from racing with new enablers */
+ mutex_lock(&event_mutex);
+
+ /* Force the bit to be cleared, since no event is attached */
+ mmap_read_lock(user_mm->mm);
+ result = user_event_enabler_write(user_mm, &enabler, false, &attempt);
+ mmap_read_unlock(user_mm->mm);
+
+ mutex_unlock(&event_mutex);
+
+ if (result) {
+ /* Attempt to fault-in and retry if it worked */
+ if (!user_event_mm_fault_in(user_mm, uaddr, attempt))
+ goto retry;
+ }
+
+ return result;
+}
+
+/*
+ * Unregisters an enablement address/bit within a task/user mm.
+ */
+static long user_events_ioctl_unreg(unsigned long uarg)
+{
+ struct user_unreg __user *ureg = (struct user_unreg __user *)uarg;
+ struct user_event_mm *mm = current->user_event_mm;
+ struct user_event_enabler *enabler, *next;
+ struct user_unreg reg;
+ unsigned long flags;
+ long ret;
+
+ ret = user_unreg_get(ureg, &reg);
+
+ if (ret)
+ return ret;
+
+ if (!mm)
+ return -ENOENT;
+
+ flags = 0;
+ ret = -ENOENT;
+
+ /*
+ * Flags freeing and faulting are used to indicate if the enabler is in
+ * use at all. When faulting is set a page-fault is occurring asyncly.
+ * During async fault if freeing is set, the enabler will be destroyed.
+ * If no async fault is happening, we can destroy it now since we hold
+ * the event_mutex during these checks.
+ */
+ mutex_lock(&event_mutex);
+
+ list_for_each_entry_safe(enabler, next, &mm->enablers, mm_enablers_link) {
+ if (enabler->addr == reg.disable_addr &&
+ ENABLE_BIT(enabler) == reg.disable_bit) {
+ set_bit(ENABLE_VAL_FREEING_BIT, ENABLE_BITOPS(enabler));
+
+ /* We must keep compat flags for the clear */
+ flags |= enabler->values & ENABLE_VAL_COMPAT_MASK;
+
+ if (!test_bit(ENABLE_VAL_FAULTING_BIT, ENABLE_BITOPS(enabler)))
+ user_event_enabler_destroy(enabler, true);
+
+ /* Removed at least one */
+ ret = 0;
+ }
+ }
+
+ mutex_unlock(&event_mutex);
+
+ /* Ensure bit is now cleared for user, regardless of event status */
+ if (!ret)
+ ret = user_event_mm_clear_bit(mm, reg.disable_addr,
+ reg.disable_bit, flags);
+
+ return ret;
+}
+
+/*
+ * Handles the ioctl from user mode to register or alter operations.
+ */
+static long user_events_ioctl(struct file *file, unsigned int cmd,
+ unsigned long uarg)
+{
+ struct user_event_file_info *info = file->private_data;
+ struct user_event_group *group = info->group;
+ long ret = -ENOTTY;
+
+ switch (cmd) {
+ case DIAG_IOCSREG:
+ mutex_lock(&group->reg_mutex);
+ ret = user_events_ioctl_reg(info, uarg);
+ mutex_unlock(&group->reg_mutex);
+ break;
+
+ case DIAG_IOCSDEL:
+ mutex_lock(&group->reg_mutex);
+ ret = user_events_ioctl_del(info, uarg);
+ mutex_unlock(&group->reg_mutex);
+ break;
+
+ case DIAG_IOCSUNREG:
+ mutex_lock(&group->reg_mutex);
+ ret = user_events_ioctl_unreg(uarg);
+ mutex_unlock(&group->reg_mutex);
+ break;
+ }
+
+ return ret;
+}
+
+/*
+ * Handles the final close of the file from user mode.
+ */
+static int user_events_release(struct inode *node, struct file *file)
+{
+ struct user_event_file_info *info = file->private_data;
+ struct user_event_group *group;
+ struct user_event_refs *refs;
+ int i;
+
+ if (!info)
+ return -EINVAL;
+
+ group = info->group;
+
+ /*
+ * Ensure refs cannot change under any situation by taking the
+ * register mutex during the final freeing of the references.
+ */
+ mutex_lock(&group->reg_mutex);
+
+ refs = info->refs;
+
+ if (!refs)
+ goto out;
+
+ /*
+ * The lifetime of refs has reached an end, it's tied to this file.
+ * The underlying user_events are ref counted, and cannot be freed.
+ * After this decrement, the user_events may be freed elsewhere.
+ */
+ for (i = 0; i < refs->count; ++i)
+ user_event_put(refs->events[i], false);
+
+out:
+ file->private_data = NULL;
+
+ mutex_unlock(&group->reg_mutex);
+
+ kfree(refs);
+ kfree(info);
+
+ return 0;
+}
+
+static const struct file_operations user_data_fops = {
+ .open = user_events_open,
+ .write = user_events_write,
+ .write_iter = user_events_write_iter,
+ .unlocked_ioctl = user_events_ioctl,
+ .release = user_events_release,
+};
+
+static void *user_seq_start(struct seq_file *m, loff_t *pos)
+{
+ if (*pos)
+ return NULL;
+
+ return (void *)1;
+}
+
+static void *user_seq_next(struct seq_file *m, void *p, loff_t *pos)
+{
+ ++*pos;
+ return NULL;
+}
+
+static void user_seq_stop(struct seq_file *m, void *p)
+{
+}
+
+static int user_seq_show(struct seq_file *m, void *p)
+{
+ struct user_event_group *group = m->private;
+ struct user_event *user;
+ char status;
+ int i, active = 0, busy = 0;
+
+ if (!group)
+ return -EINVAL;
+
+ mutex_lock(&group->reg_mutex);
+
+ hash_for_each(group->register_table, i, user, node) {
+ status = user->status;
+
+ seq_printf(m, "%s", EVENT_NAME(user));
+
+ if (status != 0)
+ seq_puts(m, " #");
+
+ if (status != 0) {
+ seq_puts(m, " Used by");
+ if (status & EVENT_STATUS_FTRACE)
+ seq_puts(m, " ftrace");
+ if (status & EVENT_STATUS_PERF)
+ seq_puts(m, " perf");
+ if (status & EVENT_STATUS_OTHER)
+ seq_puts(m, " other");
+ busy++;
+ }
+
+ seq_puts(m, "\n");
+ active++;
+ }
+
+ mutex_unlock(&group->reg_mutex);
+
+ seq_puts(m, "\n");
+ seq_printf(m, "Active: %d\n", active);
+ seq_printf(m, "Busy: %d\n", busy);
+
+ return 0;
+}
+
+static const struct seq_operations user_seq_ops = {
+ .start = user_seq_start,
+ .next = user_seq_next,
+ .stop = user_seq_stop,
+ .show = user_seq_show,
+};
+
+static int user_status_open(struct inode *node, struct file *file)
+{
+ struct user_event_group *group;
+ int ret;
+
+ group = current_user_event_group();
+
+ if (!group)
+ return -ENOENT;
+
+ ret = seq_open(file, &user_seq_ops);
+
+ if (!ret) {
+ /* Chain group to seq_file */
+ struct seq_file *m = file->private_data;
+
+ m->private = group;
+ }
+
+ return ret;
+}
+
+static const struct file_operations user_status_fops = {
+ .open = user_status_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release,
+};
+
+/*
+ * Creates a set of tracefs files to allow user mode interactions.
+ */
+static int create_user_tracefs(void)
+{
+ struct dentry *edata, *emmap;
+
+ edata = tracefs_create_file("user_events_data", TRACE_MODE_WRITE,
+ NULL, NULL, &user_data_fops);
+
+ if (!edata) {
+ pr_warn("Could not create tracefs 'user_events_data' entry\n");
+ goto err;
+ }
+
+ emmap = tracefs_create_file("user_events_status", TRACE_MODE_READ,
+ NULL, NULL, &user_status_fops);
+
+ if (!emmap) {
+ tracefs_remove(edata);
+ pr_warn("Could not create tracefs 'user_events_mmap' entry\n");
+ goto err;
+ }
+
+ return 0;
+err:
+ return -ENODEV;
+}
+
+static int set_max_user_events_sysctl(struct ctl_table *table, int write,
+ void *buffer, size_t *lenp, loff_t *ppos)
+{
+ int ret;
+
+ mutex_lock(&event_mutex);
+
+ ret = proc_douintvec(table, write, buffer, lenp, ppos);
+
+ mutex_unlock(&event_mutex);
+
+ return ret;
+}
+
+static struct ctl_table user_event_sysctls[] = {
+ {
+ .procname = "user_events_max",
+ .data = &max_user_events,
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = set_max_user_events_sysctl,
+ },
+ {}
+};
+
+static int __init trace_events_user_init(void)
+{
+ int ret;
+
+ fault_cache = KMEM_CACHE(user_event_enabler_fault, 0);
+
+ if (!fault_cache)
+ return -ENOMEM;
+
+ init_group = user_event_group_create();
+
+ if (!init_group) {
+ kmem_cache_destroy(fault_cache);
+ return -ENOMEM;
+ }
+
+ ret = create_user_tracefs();
+
+ if (ret) {
+ pr_warn("user_events could not register with tracefs\n");
+ user_event_group_destroy(init_group);
+ kmem_cache_destroy(fault_cache);
+ init_group = NULL;
+ return ret;
+ }
+
+ if (dyn_event_register(&user_event_dops))
+ pr_warn("user_events could not register with dyn_events\n");
+
+ register_sysctl_init("kernel", user_event_sysctls);
+
+ return 0;
+}
+
+fs_initcall(trace_events_user_init);
diff --git a/kernel/trace/trace_export.c b/kernel/trace/trace_export.c
index 70d3d0a09053..1698fc22afa0 100644
--- a/kernel/trace/trace_export.c
+++ b/kernel/trace/trace_export.c
@@ -26,7 +26,7 @@ static int ftrace_event_register(struct trace_event_call *call,
/*
* The FTRACE_ENTRY_REG macro allows ftrace entry to define register
- * function and thus become accesible via perf.
+ * function and thus become accessible via perf.
*/
#undef FTRACE_ENTRY_REG
#define FTRACE_ENTRY_REG(name, struct_name, id, tstruct, print, regfn) \
@@ -51,6 +51,9 @@ static int ftrace_event_register(struct trace_event_call *call,
#undef __array
#define __array(type, item, size) type item[size];
+#undef __stack_array
+#define __stack_array(type, item, size, field) __array(type, item, size)
+
#undef __array_desc
#define __array_desc(type, container, item, size) type item[size];
@@ -111,7 +114,11 @@ static void __always_unused ____ftrace_check_##name(void) \
#define __array(_type, _item, _len) { \
.type = #_type"["__stringify(_len)"]", .name = #_item, \
.size = sizeof(_type[_len]), .align = __alignof__(_type), \
- is_signed_type(_type), .filter_type = FILTER_OTHER },
+ is_signed_type(_type), .filter_type = FILTER_OTHER, \
+ .len = _len },
+
+#undef __stack_array
+#define __stack_array(_type, _item, _len, _field) __array(_type, _item, _len)
#undef __array_desc
#define __array_desc(_type, _container, _item, _len) __array(_type, _item, _len)
@@ -148,6 +155,9 @@ static struct trace_event_fields ftrace_event_fields_##name[] = { \
#undef __array
#define __array(type, item, len)
+#undef __stack_array
+#define __stack_array(type, item, len, field)
+
#undef __array_desc
#define __array_desc(type, container, item, len)
@@ -176,7 +186,7 @@ struct trace_event_call __used event_##call = { \
.flags = TRACE_EVENT_FL_IGNORE_ENABLE, \
}; \
static struct trace_event_call __used \
-__attribute__((section("_ftrace_events"))) *__event_##call = &event_##call;
+__section("_ftrace_events") *__event_##call = &event_##call;
#undef FTRACE_ENTRY
#define FTRACE_ENTRY(call, struct_name, etype, tstruct, print) \
diff --git a/kernel/trace/trace_fprobe.c b/kernel/trace/trace_fprobe.c
new file mode 100644
index 000000000000..7d2ddbcfa377
--- /dev/null
+++ b/kernel/trace/trace_fprobe.c
@@ -0,0 +1,1231 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Fprobe-based tracing events
+ * Copyright (C) 2022 Google LLC.
+ */
+#define pr_fmt(fmt) "trace_fprobe: " fmt
+
+#include <linux/fprobe.h>
+#include <linux/module.h>
+#include <linux/rculist.h>
+#include <linux/security.h>
+#include <linux/tracepoint.h>
+#include <linux/uaccess.h>
+
+#include "trace_dynevent.h"
+#include "trace_probe.h"
+#include "trace_probe_kernel.h"
+#include "trace_probe_tmpl.h"
+
+#define FPROBE_EVENT_SYSTEM "fprobes"
+#define TRACEPOINT_EVENT_SYSTEM "tracepoints"
+#define RETHOOK_MAXACTIVE_MAX 4096
+
+static int trace_fprobe_create(const char *raw_command);
+static int trace_fprobe_show(struct seq_file *m, struct dyn_event *ev);
+static int trace_fprobe_release(struct dyn_event *ev);
+static bool trace_fprobe_is_busy(struct dyn_event *ev);
+static bool trace_fprobe_match(const char *system, const char *event,
+ int argc, const char **argv, struct dyn_event *ev);
+
+static struct dyn_event_operations trace_fprobe_ops = {
+ .create = trace_fprobe_create,
+ .show = trace_fprobe_show,
+ .is_busy = trace_fprobe_is_busy,
+ .free = trace_fprobe_release,
+ .match = trace_fprobe_match,
+};
+
+/*
+ * Fprobe event core functions
+ */
+struct trace_fprobe {
+ struct dyn_event devent;
+ struct fprobe fp;
+ const char *symbol;
+ struct tracepoint *tpoint;
+ struct module *mod;
+ struct trace_probe tp;
+};
+
+static bool is_trace_fprobe(struct dyn_event *ev)
+{
+ return ev->ops == &trace_fprobe_ops;
+}
+
+static struct trace_fprobe *to_trace_fprobe(struct dyn_event *ev)
+{
+ return container_of(ev, struct trace_fprobe, devent);
+}
+
+/**
+ * for_each_trace_fprobe - iterate over the trace_fprobe list
+ * @pos: the struct trace_fprobe * for each entry
+ * @dpos: the struct dyn_event * to use as a loop cursor
+ */
+#define for_each_trace_fprobe(pos, dpos) \
+ for_each_dyn_event(dpos) \
+ if (is_trace_fprobe(dpos) && (pos = to_trace_fprobe(dpos)))
+
+static bool trace_fprobe_is_return(struct trace_fprobe *tf)
+{
+ return tf->fp.exit_handler != NULL;
+}
+
+static bool trace_fprobe_is_tracepoint(struct trace_fprobe *tf)
+{
+ return tf->tpoint != NULL;
+}
+
+static const char *trace_fprobe_symbol(struct trace_fprobe *tf)
+{
+ return tf->symbol ? tf->symbol : "unknown";
+}
+
+static bool trace_fprobe_is_busy(struct dyn_event *ev)
+{
+ struct trace_fprobe *tf = to_trace_fprobe(ev);
+
+ return trace_probe_is_enabled(&tf->tp);
+}
+
+static bool trace_fprobe_match_command_head(struct trace_fprobe *tf,
+ int argc, const char **argv)
+{
+ char buf[MAX_ARGSTR_LEN + 1];
+
+ if (!argc)
+ return true;
+
+ snprintf(buf, sizeof(buf), "%s", trace_fprobe_symbol(tf));
+ if (strcmp(buf, argv[0]))
+ return false;
+ argc--; argv++;
+
+ return trace_probe_match_command_args(&tf->tp, argc, argv);
+}
+
+static bool trace_fprobe_match(const char *system, const char *event,
+ int argc, const char **argv, struct dyn_event *ev)
+{
+ struct trace_fprobe *tf = to_trace_fprobe(ev);
+
+ if (event[0] != '\0' && strcmp(trace_probe_name(&tf->tp), event))
+ return false;
+
+ if (system && strcmp(trace_probe_group_name(&tf->tp), system))
+ return false;
+
+ return trace_fprobe_match_command_head(tf, argc, argv);
+}
+
+static bool trace_fprobe_is_registered(struct trace_fprobe *tf)
+{
+ return fprobe_is_registered(&tf->fp);
+}
+
+/*
+ * Note that we don't verify the fetch_insn code, since it does not come
+ * from user space.
+ */
+static int
+process_fetch_insn(struct fetch_insn *code, void *rec, void *dest,
+ void *base)
+{
+ struct pt_regs *regs = rec;
+ unsigned long val;
+ int ret;
+
+retry:
+ /* 1st stage: get value from context */
+ switch (code->op) {
+ case FETCH_OP_STACK:
+ val = regs_get_kernel_stack_nth(regs, code->param);
+ break;
+ case FETCH_OP_STACKP:
+ val = kernel_stack_pointer(regs);
+ break;
+ case FETCH_OP_RETVAL:
+ val = regs_return_value(regs);
+ break;
+#ifdef CONFIG_HAVE_FUNCTION_ARG_ACCESS_API
+ case FETCH_OP_ARG:
+ val = regs_get_kernel_argument(regs, code->param);
+ break;
+#endif
+ case FETCH_NOP_SYMBOL: /* Ignore a place holder */
+ code++;
+ goto retry;
+ default:
+ ret = process_common_fetch_insn(code, &val);
+ if (ret < 0)
+ return ret;
+ }
+ code++;
+
+ return process_fetch_insn_bottom(code, val, dest, base);
+}
+NOKPROBE_SYMBOL(process_fetch_insn)
+
+/* function entry handler */
+static nokprobe_inline void
+__fentry_trace_func(struct trace_fprobe *tf, unsigned long entry_ip,
+ struct pt_regs *regs,
+ struct trace_event_file *trace_file)
+{
+ struct fentry_trace_entry_head *entry;
+ struct trace_event_call *call = trace_probe_event_call(&tf->tp);
+ struct trace_event_buffer fbuffer;
+ int dsize;
+
+ if (WARN_ON_ONCE(call != trace_file->event_call))
+ return;
+
+ if (trace_trigger_soft_disabled(trace_file))
+ return;
+
+ dsize = __get_data_size(&tf->tp, regs);
+
+ entry = trace_event_buffer_reserve(&fbuffer, trace_file,
+ sizeof(*entry) + tf->tp.size + dsize);
+ if (!entry)
+ return;
+
+ fbuffer.regs = regs;
+ entry = fbuffer.entry = ring_buffer_event_data(fbuffer.event);
+ entry->ip = entry_ip;
+ store_trace_args(&entry[1], &tf->tp, regs, sizeof(*entry), dsize);
+
+ trace_event_buffer_commit(&fbuffer);
+}
+
+static void
+fentry_trace_func(struct trace_fprobe *tf, unsigned long entry_ip,
+ struct pt_regs *regs)
+{
+ struct event_file_link *link;
+
+ trace_probe_for_each_link_rcu(link, &tf->tp)
+ __fentry_trace_func(tf, entry_ip, regs, link->file);
+}
+NOKPROBE_SYMBOL(fentry_trace_func);
+
+/* Kretprobe handler */
+static nokprobe_inline void
+__fexit_trace_func(struct trace_fprobe *tf, unsigned long entry_ip,
+ unsigned long ret_ip, struct pt_regs *regs,
+ struct trace_event_file *trace_file)
+{
+ struct fexit_trace_entry_head *entry;
+ struct trace_event_buffer fbuffer;
+ struct trace_event_call *call = trace_probe_event_call(&tf->tp);
+ int dsize;
+
+ if (WARN_ON_ONCE(call != trace_file->event_call))
+ return;
+
+ if (trace_trigger_soft_disabled(trace_file))
+ return;
+
+ dsize = __get_data_size(&tf->tp, regs);
+
+ entry = trace_event_buffer_reserve(&fbuffer, trace_file,
+ sizeof(*entry) + tf->tp.size + dsize);
+ if (!entry)
+ return;
+
+ fbuffer.regs = regs;
+ entry = fbuffer.entry = ring_buffer_event_data(fbuffer.event);
+ entry->func = entry_ip;
+ entry->ret_ip = ret_ip;
+ store_trace_args(&entry[1], &tf->tp, regs, sizeof(*entry), dsize);
+
+ trace_event_buffer_commit(&fbuffer);
+}
+
+static void
+fexit_trace_func(struct trace_fprobe *tf, unsigned long entry_ip,
+ unsigned long ret_ip, struct pt_regs *regs)
+{
+ struct event_file_link *link;
+
+ trace_probe_for_each_link_rcu(link, &tf->tp)
+ __fexit_trace_func(tf, entry_ip, ret_ip, regs, link->file);
+}
+NOKPROBE_SYMBOL(fexit_trace_func);
+
+#ifdef CONFIG_PERF_EVENTS
+
+static int fentry_perf_func(struct trace_fprobe *tf, unsigned long entry_ip,
+ struct pt_regs *regs)
+{
+ struct trace_event_call *call = trace_probe_event_call(&tf->tp);
+ struct fentry_trace_entry_head *entry;
+ struct hlist_head *head;
+ int size, __size, dsize;
+ int rctx;
+
+ head = this_cpu_ptr(call->perf_events);
+ if (hlist_empty(head))
+ return 0;
+
+ dsize = __get_data_size(&tf->tp, regs);
+ __size = sizeof(*entry) + tf->tp.size + dsize;
+ size = ALIGN(__size + sizeof(u32), sizeof(u64));
+ size -= sizeof(u32);
+
+ entry = perf_trace_buf_alloc(size, NULL, &rctx);
+ if (!entry)
+ return 0;
+
+ entry->ip = entry_ip;
+ memset(&entry[1], 0, dsize);
+ store_trace_args(&entry[1], &tf->tp, regs, sizeof(*entry), dsize);
+ perf_trace_buf_submit(entry, size, rctx, call->event.type, 1, regs,
+ head, NULL);
+ return 0;
+}
+NOKPROBE_SYMBOL(fentry_perf_func);
+
+static void
+fexit_perf_func(struct trace_fprobe *tf, unsigned long entry_ip,
+ unsigned long ret_ip, struct pt_regs *regs)
+{
+ struct trace_event_call *call = trace_probe_event_call(&tf->tp);
+ struct fexit_trace_entry_head *entry;
+ struct hlist_head *head;
+ int size, __size, dsize;
+ int rctx;
+
+ head = this_cpu_ptr(call->perf_events);
+ if (hlist_empty(head))
+ return;
+
+ dsize = __get_data_size(&tf->tp, regs);
+ __size = sizeof(*entry) + tf->tp.size + dsize;
+ size = ALIGN(__size + sizeof(u32), sizeof(u64));
+ size -= sizeof(u32);
+
+ entry = perf_trace_buf_alloc(size, NULL, &rctx);
+ if (!entry)
+ return;
+
+ entry->func = entry_ip;
+ entry->ret_ip = ret_ip;
+ store_trace_args(&entry[1], &tf->tp, regs, sizeof(*entry), dsize);
+ perf_trace_buf_submit(entry, size, rctx, call->event.type, 1, regs,
+ head, NULL);
+}
+NOKPROBE_SYMBOL(fexit_perf_func);
+#endif /* CONFIG_PERF_EVENTS */
+
+static int fentry_dispatcher(struct fprobe *fp, unsigned long entry_ip,
+ unsigned long ret_ip, struct pt_regs *regs,
+ void *entry_data)
+{
+ struct trace_fprobe *tf = container_of(fp, struct trace_fprobe, fp);
+ int ret = 0;
+
+ if (trace_probe_test_flag(&tf->tp, TP_FLAG_TRACE))
+ fentry_trace_func(tf, entry_ip, regs);
+#ifdef CONFIG_PERF_EVENTS
+ if (trace_probe_test_flag(&tf->tp, TP_FLAG_PROFILE))
+ ret = fentry_perf_func(tf, entry_ip, regs);
+#endif
+ return ret;
+}
+NOKPROBE_SYMBOL(fentry_dispatcher);
+
+static void fexit_dispatcher(struct fprobe *fp, unsigned long entry_ip,
+ unsigned long ret_ip, struct pt_regs *regs,
+ void *entry_data)
+{
+ struct trace_fprobe *tf = container_of(fp, struct trace_fprobe, fp);
+
+ if (trace_probe_test_flag(&tf->tp, TP_FLAG_TRACE))
+ fexit_trace_func(tf, entry_ip, ret_ip, regs);
+#ifdef CONFIG_PERF_EVENTS
+ if (trace_probe_test_flag(&tf->tp, TP_FLAG_PROFILE))
+ fexit_perf_func(tf, entry_ip, ret_ip, regs);
+#endif
+}
+NOKPROBE_SYMBOL(fexit_dispatcher);
+
+static void free_trace_fprobe(struct trace_fprobe *tf)
+{
+ if (tf) {
+ trace_probe_cleanup(&tf->tp);
+ kfree(tf->symbol);
+ kfree(tf);
+ }
+}
+
+/*
+ * Allocate new trace_probe and initialize it (including fprobe).
+ */
+static struct trace_fprobe *alloc_trace_fprobe(const char *group,
+ const char *event,
+ const char *symbol,
+ struct tracepoint *tpoint,
+ int maxactive,
+ int nargs, bool is_return)
+{
+ struct trace_fprobe *tf;
+ int ret = -ENOMEM;
+
+ tf = kzalloc(struct_size(tf, tp.args, nargs), GFP_KERNEL);
+ if (!tf)
+ return ERR_PTR(ret);
+
+ tf->symbol = kstrdup(symbol, GFP_KERNEL);
+ if (!tf->symbol)
+ goto error;
+
+ if (is_return)
+ tf->fp.exit_handler = fexit_dispatcher;
+ else
+ tf->fp.entry_handler = fentry_dispatcher;
+
+ tf->tpoint = tpoint;
+ tf->fp.nr_maxactive = maxactive;
+
+ ret = trace_probe_init(&tf->tp, event, group, false);
+ if (ret < 0)
+ goto error;
+
+ dyn_event_init(&tf->devent, &trace_fprobe_ops);
+ return tf;
+error:
+ free_trace_fprobe(tf);
+ return ERR_PTR(ret);
+}
+
+static struct trace_fprobe *find_trace_fprobe(const char *event,
+ const char *group)
+{
+ struct dyn_event *pos;
+ struct trace_fprobe *tf;
+
+ for_each_trace_fprobe(tf, pos)
+ if (strcmp(trace_probe_name(&tf->tp), event) == 0 &&
+ strcmp(trace_probe_group_name(&tf->tp), group) == 0)
+ return tf;
+ return NULL;
+}
+
+static inline int __enable_trace_fprobe(struct trace_fprobe *tf)
+{
+ if (trace_fprobe_is_registered(tf))
+ enable_fprobe(&tf->fp);
+
+ return 0;
+}
+
+static void __disable_trace_fprobe(struct trace_probe *tp)
+{
+ struct trace_fprobe *tf;
+
+ list_for_each_entry(tf, trace_probe_probe_list(tp), tp.list) {
+ if (!trace_fprobe_is_registered(tf))
+ continue;
+ disable_fprobe(&tf->fp);
+ }
+}
+
+/*
+ * Enable trace_probe
+ * if the file is NULL, enable "perf" handler, or enable "trace" handler.
+ */
+static int enable_trace_fprobe(struct trace_event_call *call,
+ struct trace_event_file *file)
+{
+ struct trace_probe *tp;
+ struct trace_fprobe *tf;
+ bool enabled;
+ int ret = 0;
+
+ tp = trace_probe_primary_from_call(call);
+ if (WARN_ON_ONCE(!tp))
+ return -ENODEV;
+ enabled = trace_probe_is_enabled(tp);
+
+ /* This also changes "enabled" state */
+ if (file) {
+ ret = trace_probe_add_file(tp, file);
+ if (ret)
+ return ret;
+ } else
+ trace_probe_set_flag(tp, TP_FLAG_PROFILE);
+
+ if (!enabled) {
+ list_for_each_entry(tf, trace_probe_probe_list(tp), tp.list) {
+ /* TODO: check the fprobe is gone */
+ __enable_trace_fprobe(tf);
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * Disable trace_probe
+ * if the file is NULL, disable "perf" handler, or disable "trace" handler.
+ */
+static int disable_trace_fprobe(struct trace_event_call *call,
+ struct trace_event_file *file)
+{
+ struct trace_probe *tp;
+
+ tp = trace_probe_primary_from_call(call);
+ if (WARN_ON_ONCE(!tp))
+ return -ENODEV;
+
+ if (file) {
+ if (!trace_probe_get_file_link(tp, file))
+ return -ENOENT;
+ if (!trace_probe_has_single_file(tp))
+ goto out;
+ trace_probe_clear_flag(tp, TP_FLAG_TRACE);
+ } else
+ trace_probe_clear_flag(tp, TP_FLAG_PROFILE);
+
+ if (!trace_probe_is_enabled(tp))
+ __disable_trace_fprobe(tp);
+
+ out:
+ if (file)
+ /*
+ * Synchronization is done in below function. For perf event,
+ * file == NULL and perf_trace_event_unreg() calls
+ * tracepoint_synchronize_unregister() to ensure synchronize
+ * event. We don't need to care about it.
+ */
+ trace_probe_remove_file(tp, file);
+
+ return 0;
+}
+
+/* Event entry printers */
+static enum print_line_t
+print_fentry_event(struct trace_iterator *iter, int flags,
+ struct trace_event *event)
+{
+ struct fentry_trace_entry_head *field;
+ struct trace_seq *s = &iter->seq;
+ struct trace_probe *tp;
+
+ field = (struct fentry_trace_entry_head *)iter->ent;
+ tp = trace_probe_primary_from_call(
+ container_of(event, struct trace_event_call, event));
+ if (WARN_ON_ONCE(!tp))
+ goto out;
+
+ trace_seq_printf(s, "%s: (", trace_probe_name(tp));
+
+ if (!seq_print_ip_sym(s, field->ip, flags | TRACE_ITER_SYM_OFFSET))
+ goto out;
+
+ trace_seq_putc(s, ')');
+
+ if (trace_probe_print_args(s, tp->args, tp->nr_args,
+ (u8 *)&field[1], field) < 0)
+ goto out;
+
+ trace_seq_putc(s, '\n');
+ out:
+ return trace_handle_return(s);
+}
+
+static enum print_line_t
+print_fexit_event(struct trace_iterator *iter, int flags,
+ struct trace_event *event)
+{
+ struct fexit_trace_entry_head *field;
+ struct trace_seq *s = &iter->seq;
+ struct trace_probe *tp;
+
+ field = (struct fexit_trace_entry_head *)iter->ent;
+ tp = trace_probe_primary_from_call(
+ container_of(event, struct trace_event_call, event));
+ if (WARN_ON_ONCE(!tp))
+ goto out;
+
+ trace_seq_printf(s, "%s: (", trace_probe_name(tp));
+
+ if (!seq_print_ip_sym(s, field->ret_ip, flags | TRACE_ITER_SYM_OFFSET))
+ goto out;
+
+ trace_seq_puts(s, " <- ");
+
+ if (!seq_print_ip_sym(s, field->func, flags & ~TRACE_ITER_SYM_OFFSET))
+ goto out;
+
+ trace_seq_putc(s, ')');
+
+ if (trace_probe_print_args(s, tp->args, tp->nr_args,
+ (u8 *)&field[1], field) < 0)
+ goto out;
+
+ trace_seq_putc(s, '\n');
+
+ out:
+ return trace_handle_return(s);
+}
+
+static int fentry_event_define_fields(struct trace_event_call *event_call)
+{
+ int ret;
+ struct fentry_trace_entry_head field;
+ struct trace_probe *tp;
+
+ tp = trace_probe_primary_from_call(event_call);
+ if (WARN_ON_ONCE(!tp))
+ return -ENOENT;
+
+ DEFINE_FIELD(unsigned long, ip, FIELD_STRING_IP, 0);
+
+ return traceprobe_define_arg_fields(event_call, sizeof(field), tp);
+}
+
+static int fexit_event_define_fields(struct trace_event_call *event_call)
+{
+ int ret;
+ struct fexit_trace_entry_head field;
+ struct trace_probe *tp;
+
+ tp = trace_probe_primary_from_call(event_call);
+ if (WARN_ON_ONCE(!tp))
+ return -ENOENT;
+
+ DEFINE_FIELD(unsigned long, func, FIELD_STRING_FUNC, 0);
+ DEFINE_FIELD(unsigned long, ret_ip, FIELD_STRING_RETIP, 0);
+
+ return traceprobe_define_arg_fields(event_call, sizeof(field), tp);
+}
+
+static struct trace_event_functions fentry_funcs = {
+ .trace = print_fentry_event
+};
+
+static struct trace_event_functions fexit_funcs = {
+ .trace = print_fexit_event
+};
+
+static struct trace_event_fields fentry_fields_array[] = {
+ { .type = TRACE_FUNCTION_TYPE,
+ .define_fields = fentry_event_define_fields },
+ {}
+};
+
+static struct trace_event_fields fexit_fields_array[] = {
+ { .type = TRACE_FUNCTION_TYPE,
+ .define_fields = fexit_event_define_fields },
+ {}
+};
+
+static int fprobe_register(struct trace_event_call *event,
+ enum trace_reg type, void *data);
+
+static inline void init_trace_event_call(struct trace_fprobe *tf)
+{
+ struct trace_event_call *call = trace_probe_event_call(&tf->tp);
+
+ if (trace_fprobe_is_return(tf)) {
+ call->event.funcs = &fexit_funcs;
+ call->class->fields_array = fexit_fields_array;
+ } else {
+ call->event.funcs = &fentry_funcs;
+ call->class->fields_array = fentry_fields_array;
+ }
+
+ call->flags = TRACE_EVENT_FL_FPROBE;
+ call->class->reg = fprobe_register;
+}
+
+static int register_fprobe_event(struct trace_fprobe *tf)
+{
+ init_trace_event_call(tf);
+
+ return trace_probe_register_event_call(&tf->tp);
+}
+
+static int unregister_fprobe_event(struct trace_fprobe *tf)
+{
+ return trace_probe_unregister_event_call(&tf->tp);
+}
+
+/* Internal register function - just handle fprobe and flags */
+static int __register_trace_fprobe(struct trace_fprobe *tf)
+{
+ int i, ret;
+
+ /* Should we need new LOCKDOWN flag for fprobe? */
+ ret = security_locked_down(LOCKDOWN_KPROBES);
+ if (ret)
+ return ret;
+
+ if (trace_fprobe_is_registered(tf))
+ return -EINVAL;
+
+ for (i = 0; i < tf->tp.nr_args; i++) {
+ ret = traceprobe_update_arg(&tf->tp.args[i]);
+ if (ret)
+ return ret;
+ }
+
+ /* Set/clear disabled flag according to tp->flag */
+ if (trace_probe_is_enabled(&tf->tp))
+ tf->fp.flags &= ~FPROBE_FL_DISABLED;
+ else
+ tf->fp.flags |= FPROBE_FL_DISABLED;
+
+ if (trace_fprobe_is_tracepoint(tf)) {
+ struct tracepoint *tpoint = tf->tpoint;
+ unsigned long ip = (unsigned long)tpoint->probestub;
+ /*
+ * Here, we do 2 steps to enable fprobe on a tracepoint.
+ * At first, put __probestub_##TP function on the tracepoint
+ * and put a fprobe on the stub function.
+ */
+ ret = tracepoint_probe_register_prio_may_exist(tpoint,
+ tpoint->probestub, NULL, 0);
+ if (ret < 0)
+ return ret;
+ return register_fprobe_ips(&tf->fp, &ip, 1);
+ }
+
+ /* TODO: handle filter, nofilter or symbol list */
+ return register_fprobe(&tf->fp, tf->symbol, NULL);
+}
+
+/* Internal unregister function - just handle fprobe and flags */
+static void __unregister_trace_fprobe(struct trace_fprobe *tf)
+{
+ if (trace_fprobe_is_registered(tf)) {
+ unregister_fprobe(&tf->fp);
+ memset(&tf->fp, 0, sizeof(tf->fp));
+ if (trace_fprobe_is_tracepoint(tf)) {
+ tracepoint_probe_unregister(tf->tpoint,
+ tf->tpoint->probestub, NULL);
+ tf->tpoint = NULL;
+ tf->mod = NULL;
+ }
+ }
+}
+
+/* TODO: make this trace_*probe common function */
+/* Unregister a trace_probe and probe_event */
+static int unregister_trace_fprobe(struct trace_fprobe *tf)
+{
+ /* If other probes are on the event, just unregister fprobe */
+ if (trace_probe_has_sibling(&tf->tp))
+ goto unreg;
+
+ /* Enabled event can not be unregistered */
+ if (trace_probe_is_enabled(&tf->tp))
+ return -EBUSY;
+
+ /* If there's a reference to the dynamic event */
+ if (trace_event_dyn_busy(trace_probe_event_call(&tf->tp)))
+ return -EBUSY;
+
+ /* Will fail if probe is being used by ftrace or perf */
+ if (unregister_fprobe_event(tf))
+ return -EBUSY;
+
+unreg:
+ __unregister_trace_fprobe(tf);
+ dyn_event_remove(&tf->devent);
+ trace_probe_unlink(&tf->tp);
+
+ return 0;
+}
+
+static bool trace_fprobe_has_same_fprobe(struct trace_fprobe *orig,
+ struct trace_fprobe *comp)
+{
+ struct trace_probe_event *tpe = orig->tp.event;
+ int i;
+
+ list_for_each_entry(orig, &tpe->probes, tp.list) {
+ if (strcmp(trace_fprobe_symbol(orig),
+ trace_fprobe_symbol(comp)))
+ continue;
+
+ /*
+ * trace_probe_compare_arg_type() ensured that nr_args and
+ * each argument name and type are same. Let's compare comm.
+ */
+ for (i = 0; i < orig->tp.nr_args; i++) {
+ if (strcmp(orig->tp.args[i].comm,
+ comp->tp.args[i].comm))
+ break;
+ }
+
+ if (i == orig->tp.nr_args)
+ return true;
+ }
+
+ return false;
+}
+
+static int append_trace_fprobe(struct trace_fprobe *tf, struct trace_fprobe *to)
+{
+ int ret;
+
+ if (trace_fprobe_is_return(tf) != trace_fprobe_is_return(to) ||
+ trace_fprobe_is_tracepoint(tf) != trace_fprobe_is_tracepoint(to)) {
+ trace_probe_log_set_index(0);
+ trace_probe_log_err(0, DIFF_PROBE_TYPE);
+ return -EEXIST;
+ }
+ ret = trace_probe_compare_arg_type(&tf->tp, &to->tp);
+ if (ret) {
+ /* Note that argument starts index = 2 */
+ trace_probe_log_set_index(ret + 1);
+ trace_probe_log_err(0, DIFF_ARG_TYPE);
+ return -EEXIST;
+ }
+ if (trace_fprobe_has_same_fprobe(to, tf)) {
+ trace_probe_log_set_index(0);
+ trace_probe_log_err(0, SAME_PROBE);
+ return -EEXIST;
+ }
+
+ /* Append to existing event */
+ ret = trace_probe_append(&tf->tp, &to->tp);
+ if (ret)
+ return ret;
+
+ ret = __register_trace_fprobe(tf);
+ if (ret)
+ trace_probe_unlink(&tf->tp);
+ else
+ dyn_event_add(&tf->devent, trace_probe_event_call(&tf->tp));
+
+ return ret;
+}
+
+/* Register a trace_probe and probe_event */
+static int register_trace_fprobe(struct trace_fprobe *tf)
+{
+ struct trace_fprobe *old_tf;
+ int ret;
+
+ mutex_lock(&event_mutex);
+
+ old_tf = find_trace_fprobe(trace_probe_name(&tf->tp),
+ trace_probe_group_name(&tf->tp));
+ if (old_tf) {
+ ret = append_trace_fprobe(tf, old_tf);
+ goto end;
+ }
+
+ /* Register new event */
+ ret = register_fprobe_event(tf);
+ if (ret) {
+ if (ret == -EEXIST) {
+ trace_probe_log_set_index(0);
+ trace_probe_log_err(0, EVENT_EXIST);
+ } else
+ pr_warn("Failed to register probe event(%d)\n", ret);
+ goto end;
+ }
+
+ /* Register fprobe */
+ ret = __register_trace_fprobe(tf);
+ if (ret < 0)
+ unregister_fprobe_event(tf);
+ else
+ dyn_event_add(&tf->devent, trace_probe_event_call(&tf->tp));
+
+end:
+ mutex_unlock(&event_mutex);
+ return ret;
+}
+
+#ifdef CONFIG_MODULES
+static int __tracepoint_probe_module_cb(struct notifier_block *self,
+ unsigned long val, void *data)
+{
+ struct tp_module *tp_mod = data;
+ struct trace_fprobe *tf;
+ struct dyn_event *pos;
+
+ if (val != MODULE_STATE_GOING)
+ return NOTIFY_DONE;
+
+ mutex_lock(&event_mutex);
+ for_each_trace_fprobe(tf, pos) {
+ if (tp_mod->mod == tf->mod) {
+ tracepoint_probe_unregister(tf->tpoint,
+ tf->tpoint->probestub, NULL);
+ tf->tpoint = NULL;
+ tf->mod = NULL;
+ }
+ }
+ mutex_unlock(&event_mutex);
+
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block tracepoint_module_nb = {
+ .notifier_call = __tracepoint_probe_module_cb,
+};
+#endif /* CONFIG_MODULES */
+
+struct __find_tracepoint_cb_data {
+ const char *tp_name;
+ struct tracepoint *tpoint;
+};
+
+static void __find_tracepoint_cb(struct tracepoint *tp, void *priv)
+{
+ struct __find_tracepoint_cb_data *data = priv;
+
+ if (!data->tpoint && !strcmp(data->tp_name, tp->name))
+ data->tpoint = tp;
+}
+
+static struct tracepoint *find_tracepoint(const char *tp_name)
+{
+ struct __find_tracepoint_cb_data data = {
+ .tp_name = tp_name,
+ };
+
+ for_each_kernel_tracepoint(__find_tracepoint_cb, &data);
+
+ return data.tpoint;
+}
+
+static int parse_symbol_and_return(int argc, const char *argv[],
+ char **symbol, bool *is_return,
+ bool is_tracepoint)
+{
+ char *tmp = strchr(argv[1], '%');
+ int i;
+
+ if (tmp) {
+ int len = tmp - argv[1];
+
+ if (!is_tracepoint && !strcmp(tmp, "%return")) {
+ *is_return = true;
+ } else {
+ trace_probe_log_err(len, BAD_ADDR_SUFFIX);
+ return -EINVAL;
+ }
+ *symbol = kmemdup_nul(argv[1], len, GFP_KERNEL);
+ } else
+ *symbol = kstrdup(argv[1], GFP_KERNEL);
+ if (!*symbol)
+ return -ENOMEM;
+
+ if (*is_return)
+ return 0;
+
+ /* If there is $retval, this should be a return fprobe. */
+ for (i = 2; i < argc; i++) {
+ tmp = strstr(argv[i], "$retval");
+ if (tmp && !isalnum(tmp[7]) && tmp[7] != '_') {
+ if (is_tracepoint) {
+ trace_probe_log_set_index(i);
+ trace_probe_log_err(tmp - argv[i], RETVAL_ON_PROBE);
+ return -EINVAL;
+ }
+ *is_return = true;
+ break;
+ }
+ }
+ return 0;
+}
+
+static int __trace_fprobe_create(int argc, const char *argv[])
+{
+ /*
+ * Argument syntax:
+ * - Add fentry probe:
+ * f[:[GRP/][EVENT]] [MOD:]KSYM [FETCHARGS]
+ * - Add fexit probe:
+ * f[N][:[GRP/][EVENT]] [MOD:]KSYM%return [FETCHARGS]
+ * - Add tracepoint probe:
+ * t[:[GRP/][EVENT]] TRACEPOINT [FETCHARGS]
+ *
+ * Fetch args:
+ * $retval : fetch return value
+ * $stack : fetch stack address
+ * $stackN : fetch Nth entry of stack (N:0-)
+ * $argN : fetch Nth argument (N:1-)
+ * $comm : fetch current task comm
+ * @ADDR : fetch memory at ADDR (ADDR should be in kernel)
+ * @SYM[+|-offs] : fetch memory at SYM +|- offs (SYM is a data symbol)
+ * Dereferencing memory fetch:
+ * +|-offs(ARG) : fetch memory at ARG +|- offs address.
+ * Alias name of args:
+ * NAME=FETCHARG : set NAME as alias of FETCHARG.
+ * Type of args:
+ * FETCHARG:TYPE : use TYPE instead of unsigned long.
+ */
+ struct trace_fprobe *tf = NULL;
+ int i, len, new_argc = 0, ret = 0;
+ bool is_return = false;
+ char *symbol = NULL;
+ const char *event = NULL, *group = FPROBE_EVENT_SYSTEM;
+ const char **new_argv = NULL;
+ int maxactive = 0;
+ char buf[MAX_EVENT_NAME_LEN];
+ char gbuf[MAX_EVENT_NAME_LEN];
+ char sbuf[KSYM_NAME_LEN];
+ char abuf[MAX_BTF_ARGS_LEN];
+ bool is_tracepoint = false;
+ struct tracepoint *tpoint = NULL;
+ struct traceprobe_parse_context ctx = {
+ .flags = TPARG_FL_KERNEL | TPARG_FL_FPROBE,
+ };
+
+ if ((argv[0][0] != 'f' && argv[0][0] != 't') || argc < 2)
+ return -ECANCELED;
+
+ if (argv[0][0] == 't') {
+ is_tracepoint = true;
+ group = TRACEPOINT_EVENT_SYSTEM;
+ }
+
+ trace_probe_log_init("trace_fprobe", argc, argv);
+
+ event = strchr(&argv[0][1], ':');
+ if (event)
+ event++;
+
+ if (isdigit(argv[0][1])) {
+ if (event)
+ len = event - &argv[0][1] - 1;
+ else
+ len = strlen(&argv[0][1]);
+ if (len > MAX_EVENT_NAME_LEN - 1) {
+ trace_probe_log_err(1, BAD_MAXACT);
+ goto parse_error;
+ }
+ memcpy(buf, &argv[0][1], len);
+ buf[len] = '\0';
+ ret = kstrtouint(buf, 0, &maxactive);
+ if (ret || !maxactive) {
+ trace_probe_log_err(1, BAD_MAXACT);
+ goto parse_error;
+ }
+ /* fprobe rethook instances are iterated over via a list. The
+ * maximum should stay reasonable.
+ */
+ if (maxactive > RETHOOK_MAXACTIVE_MAX) {
+ trace_probe_log_err(1, MAXACT_TOO_BIG);
+ goto parse_error;
+ }
+ }
+
+ trace_probe_log_set_index(1);
+
+ /* a symbol(or tracepoint) must be specified */
+ ret = parse_symbol_and_return(argc, argv, &symbol, &is_return, is_tracepoint);
+ if (ret < 0)
+ goto parse_error;
+
+ if (!is_return && maxactive) {
+ trace_probe_log_set_index(0);
+ trace_probe_log_err(1, BAD_MAXACT_TYPE);
+ goto parse_error;
+ }
+
+ trace_probe_log_set_index(0);
+ if (event) {
+ ret = traceprobe_parse_event_name(&event, &group, gbuf,
+ event - argv[0]);
+ if (ret)
+ goto parse_error;
+ }
+
+ if (!event) {
+ /* Make a new event name */
+ if (is_tracepoint)
+ snprintf(buf, MAX_EVENT_NAME_LEN, "%s%s",
+ isdigit(*symbol) ? "_" : "", symbol);
+ else
+ snprintf(buf, MAX_EVENT_NAME_LEN, "%s__%s", symbol,
+ is_return ? "exit" : "entry");
+ sanitize_event_name(buf);
+ event = buf;
+ }
+
+ if (is_return)
+ ctx.flags |= TPARG_FL_RETURN;
+ else
+ ctx.flags |= TPARG_FL_FENTRY;
+
+ if (is_tracepoint) {
+ ctx.flags |= TPARG_FL_TPOINT;
+ tpoint = find_tracepoint(symbol);
+ if (!tpoint) {
+ trace_probe_log_set_index(1);
+ trace_probe_log_err(0, NO_TRACEPOINT);
+ goto parse_error;
+ }
+ ctx.funcname = kallsyms_lookup(
+ (unsigned long)tpoint->probestub,
+ NULL, NULL, NULL, sbuf);
+ } else
+ ctx.funcname = symbol;
+
+ argc -= 2; argv += 2;
+ new_argv = traceprobe_expand_meta_args(argc, argv, &new_argc,
+ abuf, MAX_BTF_ARGS_LEN, &ctx);
+ if (IS_ERR(new_argv)) {
+ ret = PTR_ERR(new_argv);
+ new_argv = NULL;
+ goto out;
+ }
+ if (new_argv) {
+ argc = new_argc;
+ argv = new_argv;
+ }
+
+ /* setup a probe */
+ tf = alloc_trace_fprobe(group, event, symbol, tpoint, maxactive,
+ argc, is_return);
+ if (IS_ERR(tf)) {
+ ret = PTR_ERR(tf);
+ /* This must return -ENOMEM, else there is a bug */
+ WARN_ON_ONCE(ret != -ENOMEM);
+ goto out; /* We know tf is not allocated */
+ }
+
+ if (is_tracepoint)
+ tf->mod = __module_text_address(
+ (unsigned long)tf->tpoint->probestub);
+
+ /* parse arguments */
+ for (i = 0; i < argc && i < MAX_TRACE_ARGS; i++) {
+ trace_probe_log_set_index(i + 2);
+ ctx.offset = 0;
+ ret = traceprobe_parse_probe_arg(&tf->tp, i, argv[i], &ctx);
+ if (ret)
+ goto error; /* This can be -ENOMEM */
+ }
+
+ ret = traceprobe_set_print_fmt(&tf->tp,
+ is_return ? PROBE_PRINT_RETURN : PROBE_PRINT_NORMAL);
+ if (ret < 0)
+ goto error;
+
+ ret = register_trace_fprobe(tf);
+ if (ret) {
+ trace_probe_log_set_index(1);
+ if (ret == -EILSEQ)
+ trace_probe_log_err(0, BAD_INSN_BNDRY);
+ else if (ret == -ENOENT)
+ trace_probe_log_err(0, BAD_PROBE_ADDR);
+ else if (ret != -ENOMEM && ret != -EEXIST)
+ trace_probe_log_err(0, FAIL_REG_PROBE);
+ goto error;
+ }
+
+out:
+ traceprobe_finish_parse(&ctx);
+ trace_probe_log_clear();
+ kfree(new_argv);
+ kfree(symbol);
+ return ret;
+
+parse_error:
+ ret = -EINVAL;
+error:
+ free_trace_fprobe(tf);
+ goto out;
+}
+
+static int trace_fprobe_create(const char *raw_command)
+{
+ return trace_probe_create(raw_command, __trace_fprobe_create);
+}
+
+static int trace_fprobe_release(struct dyn_event *ev)
+{
+ struct trace_fprobe *tf = to_trace_fprobe(ev);
+ int ret = unregister_trace_fprobe(tf);
+
+ if (!ret)
+ free_trace_fprobe(tf);
+ return ret;
+}
+
+static int trace_fprobe_show(struct seq_file *m, struct dyn_event *ev)
+{
+ struct trace_fprobe *tf = to_trace_fprobe(ev);
+ int i;
+
+ if (trace_fprobe_is_tracepoint(tf))
+ seq_putc(m, 't');
+ else
+ seq_putc(m, 'f');
+ if (trace_fprobe_is_return(tf) && tf->fp.nr_maxactive)
+ seq_printf(m, "%d", tf->fp.nr_maxactive);
+ seq_printf(m, ":%s/%s", trace_probe_group_name(&tf->tp),
+ trace_probe_name(&tf->tp));
+
+ seq_printf(m, " %s%s", trace_fprobe_symbol(tf),
+ trace_fprobe_is_return(tf) ? "%return" : "");
+
+ for (i = 0; i < tf->tp.nr_args; i++)
+ seq_printf(m, " %s=%s", tf->tp.args[i].name, tf->tp.args[i].comm);
+ seq_putc(m, '\n');
+
+ return 0;
+}
+
+/*
+ * called by perf_trace_init() or __ftrace_set_clr_event() under event_mutex.
+ */
+static int fprobe_register(struct trace_event_call *event,
+ enum trace_reg type, void *data)
+{
+ struct trace_event_file *file = data;
+
+ switch (type) {
+ case TRACE_REG_REGISTER:
+ return enable_trace_fprobe(event, file);
+ case TRACE_REG_UNREGISTER:
+ return disable_trace_fprobe(event, file);
+
+#ifdef CONFIG_PERF_EVENTS
+ case TRACE_REG_PERF_REGISTER:
+ return enable_trace_fprobe(event, NULL);
+ case TRACE_REG_PERF_UNREGISTER:
+ return disable_trace_fprobe(event, NULL);
+ case TRACE_REG_PERF_OPEN:
+ case TRACE_REG_PERF_CLOSE:
+ case TRACE_REG_PERF_ADD:
+ case TRACE_REG_PERF_DEL:
+ return 0;
+#endif
+ }
+ return 0;
+}
+
+/*
+ * Register dynevent at core_initcall. This allows kernel to setup fprobe
+ * events in postcore_initcall without tracefs.
+ */
+static __init int init_fprobe_trace_early(void)
+{
+ int ret;
+
+ ret = dyn_event_register(&trace_fprobe_ops);
+ if (ret)
+ return ret;
+
+#ifdef CONFIG_MODULES
+ ret = register_tracepoint_module_notifier(&tracepoint_module_nb);
+ if (ret)
+ return ret;
+#endif
+
+ return 0;
+}
+core_initcall(init_fprobe_trace_early);
diff --git a/kernel/trace/trace_functions.c b/kernel/trace/trace_functions.c
index dd4dff71d89a..9f1bfbe105e8 100644
--- a/kernel/trace/trace_functions.c
+++ b/kernel/trace/trace_functions.c
@@ -23,40 +23,63 @@ static void tracing_start_function_trace(struct trace_array *tr);
static void tracing_stop_function_trace(struct trace_array *tr);
static void
function_trace_call(unsigned long ip, unsigned long parent_ip,
- struct ftrace_ops *op, struct pt_regs *pt_regs);
+ struct ftrace_ops *op, struct ftrace_regs *fregs);
static void
function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
- struct ftrace_ops *op, struct pt_regs *pt_regs);
+ struct ftrace_ops *op, struct ftrace_regs *fregs);
+static void
+function_no_repeats_trace_call(unsigned long ip, unsigned long parent_ip,
+ struct ftrace_ops *op, struct ftrace_regs *fregs);
+static void
+function_stack_no_repeats_trace_call(unsigned long ip, unsigned long parent_ip,
+ struct ftrace_ops *op,
+ struct ftrace_regs *fregs);
static struct tracer_flags func_flags;
/* Our option */
enum {
- TRACE_FUNC_OPT_STACK = 0x1,
+
+ TRACE_FUNC_NO_OPTS = 0x0, /* No flags set. */
+ TRACE_FUNC_OPT_STACK = 0x1,
+ TRACE_FUNC_OPT_NO_REPEATS = 0x2,
+
+ /* Update this to next highest bit. */
+ TRACE_FUNC_OPT_HIGHEST_BIT = 0x4
};
-static int allocate_ftrace_ops(struct trace_array *tr)
+#define TRACE_FUNC_OPT_MASK (TRACE_FUNC_OPT_HIGHEST_BIT - 1)
+
+int ftrace_allocate_ftrace_ops(struct trace_array *tr)
{
struct ftrace_ops *ops;
+ /* The top level array uses the "global_ops" */
+ if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
+ return 0;
+
ops = kzalloc(sizeof(*ops), GFP_KERNEL);
if (!ops)
return -ENOMEM;
/* Currently only the non stack version is supported */
ops->func = function_trace_call;
- ops->flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_PID;
+ ops->flags = FTRACE_OPS_FL_PID;
tr->ops = ops;
ops->private = tr;
+
return 0;
}
+void ftrace_free_ftrace_ops(struct trace_array *tr)
+{
+ kfree(tr->ops);
+ tr->ops = NULL;
+}
int ftrace_create_function_files(struct trace_array *tr,
struct dentry *parent)
{
- int ret;
-
/*
* The top level array uses the "global_ops", and the files are
* created on boot up.
@@ -64,9 +87,8 @@ int ftrace_create_function_files(struct trace_array *tr,
if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
return 0;
- ret = allocate_ftrace_ops(tr);
- if (ret)
- return ret;
+ if (!tr->ops)
+ return -EINVAL;
ftrace_create_filter_files(tr->ops, parent);
@@ -76,14 +98,40 @@ int ftrace_create_function_files(struct trace_array *tr,
void ftrace_destroy_function_files(struct trace_array *tr)
{
ftrace_destroy_filter_files(tr->ops);
- kfree(tr->ops);
- tr->ops = NULL;
+ ftrace_free_ftrace_ops(tr);
+}
+
+static ftrace_func_t select_trace_function(u32 flags_val)
+{
+ switch (flags_val & TRACE_FUNC_OPT_MASK) {
+ case TRACE_FUNC_NO_OPTS:
+ return function_trace_call;
+ case TRACE_FUNC_OPT_STACK:
+ return function_stack_trace_call;
+ case TRACE_FUNC_OPT_NO_REPEATS:
+ return function_no_repeats_trace_call;
+ case TRACE_FUNC_OPT_STACK | TRACE_FUNC_OPT_NO_REPEATS:
+ return function_stack_no_repeats_trace_call;
+ default:
+ return NULL;
+ }
+}
+
+static bool handle_func_repeats(struct trace_array *tr, u32 flags_val)
+{
+ if (!tr->last_func_repeats &&
+ (flags_val & TRACE_FUNC_OPT_NO_REPEATS)) {
+ tr->last_func_repeats = alloc_percpu(struct trace_func_repeats);
+ if (!tr->last_func_repeats)
+ return false;
+ }
+
+ return true;
}
static int function_trace_init(struct trace_array *tr)
{
ftrace_func_t func;
-
/*
* Instance trace_arrays get their ops allocated
* at instance creation. Unless it failed
@@ -92,17 +140,16 @@ static int function_trace_init(struct trace_array *tr)
if (!tr->ops)
return -ENOMEM;
- /* Currently only the global instance can do stack tracing */
- if (tr->flags & TRACE_ARRAY_FL_GLOBAL &&
- func_flags.val & TRACE_FUNC_OPT_STACK)
- func = function_stack_trace_call;
- else
- func = function_trace_call;
+ func = select_trace_function(func_flags.val);
+ if (!func)
+ return -EINVAL;
+
+ if (!handle_func_repeats(tr, func_flags.val))
+ return -ENOMEM;
ftrace_init_array_ops(tr, func);
- tr->array_buffer.cpu = get_cpu();
- put_cpu();
+ tr->array_buffer.cpu = raw_smp_processor_id();
tracing_start_cmdline_record();
tracing_start_function_trace(tr);
@@ -123,35 +170,29 @@ static void function_trace_start(struct trace_array *tr)
static void
function_trace_call(unsigned long ip, unsigned long parent_ip,
- struct ftrace_ops *op, struct pt_regs *pt_regs)
+ struct ftrace_ops *op, struct ftrace_regs *fregs)
{
struct trace_array *tr = op->private;
struct trace_array_cpu *data;
- unsigned long flags;
+ unsigned int trace_ctx;
int bit;
int cpu;
- int pc;
if (unlikely(!tr->function_enabled))
return;
- pc = preempt_count();
- preempt_disable_notrace();
-
- bit = trace_test_and_set_recursion(TRACE_FTRACE_START, TRACE_FTRACE_MAX);
+ bit = ftrace_test_recursion_trylock(ip, parent_ip);
if (bit < 0)
- goto out;
+ return;
+
+ trace_ctx = tracing_gen_ctx();
cpu = smp_processor_id();
data = per_cpu_ptr(tr->array_buffer.data, cpu);
- if (!atomic_read(&data->disabled)) {
- local_save_flags(flags);
- trace_function(tr, ip, parent_ip, flags, pc);
- }
- trace_clear_recursion(bit);
+ if (!atomic_read(&data->disabled))
+ trace_function(tr, ip, parent_ip, trace_ctx);
- out:
- preempt_enable_notrace();
+ ftrace_test_recursion_unlock(bit);
}
#ifdef CONFIG_UNWINDER_ORC
@@ -174,14 +215,14 @@ function_trace_call(unsigned long ip, unsigned long parent_ip,
static void
function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
- struct ftrace_ops *op, struct pt_regs *pt_regs)
+ struct ftrace_ops *op, struct ftrace_regs *fregs)
{
struct trace_array *tr = op->private;
struct trace_array_cpu *data;
unsigned long flags;
long disabled;
int cpu;
- int pc;
+ unsigned int trace_ctx;
if (unlikely(!tr->function_enabled))
return;
@@ -196,24 +237,143 @@ function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
disabled = atomic_inc_return(&data->disabled);
if (likely(disabled == 1)) {
- pc = preempt_count();
- trace_function(tr, ip, parent_ip, flags, pc);
- __trace_stack(tr, flags, STACK_SKIP, pc);
+ trace_ctx = tracing_gen_ctx_flags(flags);
+ trace_function(tr, ip, parent_ip, trace_ctx);
+ __trace_stack(tr, trace_ctx, STACK_SKIP);
}
atomic_dec(&data->disabled);
local_irq_restore(flags);
}
+static inline bool is_repeat_check(struct trace_array *tr,
+ struct trace_func_repeats *last_info,
+ unsigned long ip, unsigned long parent_ip)
+{
+ if (last_info->ip == ip &&
+ last_info->parent_ip == parent_ip &&
+ last_info->count < U16_MAX) {
+ last_info->ts_last_call =
+ ring_buffer_time_stamp(tr->array_buffer.buffer);
+ last_info->count++;
+ return true;
+ }
+
+ return false;
+}
+
+static inline void process_repeats(struct trace_array *tr,
+ unsigned long ip, unsigned long parent_ip,
+ struct trace_func_repeats *last_info,
+ unsigned int trace_ctx)
+{
+ if (last_info->count) {
+ trace_last_func_repeats(tr, last_info, trace_ctx);
+ last_info->count = 0;
+ }
+
+ last_info->ip = ip;
+ last_info->parent_ip = parent_ip;
+}
+
+static void
+function_no_repeats_trace_call(unsigned long ip, unsigned long parent_ip,
+ struct ftrace_ops *op,
+ struct ftrace_regs *fregs)
+{
+ struct trace_func_repeats *last_info;
+ struct trace_array *tr = op->private;
+ struct trace_array_cpu *data;
+ unsigned int trace_ctx;
+ unsigned long flags;
+ int bit;
+ int cpu;
+
+ if (unlikely(!tr->function_enabled))
+ return;
+
+ bit = ftrace_test_recursion_trylock(ip, parent_ip);
+ if (bit < 0)
+ return;
+
+ cpu = smp_processor_id();
+ data = per_cpu_ptr(tr->array_buffer.data, cpu);
+ if (atomic_read(&data->disabled))
+ goto out;
+
+ /*
+ * An interrupt may happen at any place here. But as far as I can see,
+ * the only damage that this can cause is to mess up the repetition
+ * counter without valuable data being lost.
+ * TODO: think about a solution that is better than just hoping to be
+ * lucky.
+ */
+ last_info = per_cpu_ptr(tr->last_func_repeats, cpu);
+ if (is_repeat_check(tr, last_info, ip, parent_ip))
+ goto out;
+
+ local_save_flags(flags);
+ trace_ctx = tracing_gen_ctx_flags(flags);
+ process_repeats(tr, ip, parent_ip, last_info, trace_ctx);
+
+ trace_function(tr, ip, parent_ip, trace_ctx);
+
+out:
+ ftrace_test_recursion_unlock(bit);
+}
+
+static void
+function_stack_no_repeats_trace_call(unsigned long ip, unsigned long parent_ip,
+ struct ftrace_ops *op,
+ struct ftrace_regs *fregs)
+{
+ struct trace_func_repeats *last_info;
+ struct trace_array *tr = op->private;
+ struct trace_array_cpu *data;
+ unsigned long flags;
+ long disabled;
+ int cpu;
+ unsigned int trace_ctx;
+
+ if (unlikely(!tr->function_enabled))
+ return;
+
+ /*
+ * Need to use raw, since this must be called before the
+ * recursive protection is performed.
+ */
+ local_irq_save(flags);
+ cpu = raw_smp_processor_id();
+ data = per_cpu_ptr(tr->array_buffer.data, cpu);
+ disabled = atomic_inc_return(&data->disabled);
+
+ if (likely(disabled == 1)) {
+ last_info = per_cpu_ptr(tr->last_func_repeats, cpu);
+ if (is_repeat_check(tr, last_info, ip, parent_ip))
+ goto out;
+
+ trace_ctx = tracing_gen_ctx_flags(flags);
+ process_repeats(tr, ip, parent_ip, last_info, trace_ctx);
+
+ trace_function(tr, ip, parent_ip, trace_ctx);
+ __trace_stack(tr, trace_ctx, STACK_SKIP);
+ }
+
+ out:
+ atomic_dec(&data->disabled);
+ local_irq_restore(flags);
+}
+
static struct tracer_opt func_opts[] = {
#ifdef CONFIG_STACKTRACE
{ TRACER_OPT(func_stack_trace, TRACE_FUNC_OPT_STACK) },
#endif
+ { TRACER_OPT(func-no-repeats, TRACE_FUNC_OPT_NO_REPEATS) },
{ } /* Always set a last empty entry */
};
static struct tracer_flags func_flags = {
- .val = 0, /* By default: all flags disabled */
+ .val = TRACE_FUNC_NO_OPTS, /* By default: all flags disabled */
.opts = func_opts
};
@@ -235,30 +395,32 @@ static struct tracer function_trace;
static int
func_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
{
- switch (bit) {
- case TRACE_FUNC_OPT_STACK:
- /* do nothing if already set */
- if (!!set == !!(func_flags.val & TRACE_FUNC_OPT_STACK))
- break;
-
- /* We can change this flag when not running. */
- if (tr->current_trace != &function_trace)
- break;
+ ftrace_func_t func;
+ u32 new_flags;
- unregister_ftrace_function(tr->ops);
+ /* Do nothing if already set. */
+ if (!!set == !!(func_flags.val & bit))
+ return 0;
- if (set) {
- tr->ops->func = function_stack_trace_call;
- register_ftrace_function(tr->ops);
- } else {
- tr->ops->func = function_trace_call;
- register_ftrace_function(tr->ops);
- }
+ /* We can change this flag only when not running. */
+ if (tr->current_trace != &function_trace)
+ return 0;
- break;
- default:
+ new_flags = (func_flags.val & ~bit) | (set ? bit : 0);
+ func = select_trace_function(new_flags);
+ if (!func)
return -EINVAL;
- }
+
+ /* Check if there's anything to change. */
+ if (tr->ops->func == func)
+ return 0;
+
+ if (!handle_func_repeats(tr, new_flags))
+ return -ENOMEM;
+
+ unregister_ftrace_function(tr->ops);
+ tr->ops->func = func;
+ register_ftrace_function(tr->ops);
return 0;
}
@@ -401,13 +563,11 @@ ftrace_traceoff(unsigned long ip, unsigned long parent_ip,
static __always_inline void trace_stack(struct trace_array *tr)
{
- unsigned long flags;
- int pc;
+ unsigned int trace_ctx;
- local_save_flags(flags);
- pc = preempt_count();
+ trace_ctx = tracing_gen_ctx();
- __trace_stack(tr, flags, FTRACE_STACK_SKIP, pc);
+ __trace_stack(tr, trace_ctx, FTRACE_STACK_SKIP);
}
static void
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c
index 4a9c49c08ec9..c35fbaab2a47 100644
--- a/kernel/trace/trace_functions_graph.c
+++ b/kernel/trace/trace_functions_graph.c
@@ -58,6 +58,12 @@ static struct tracer_opt trace_opts[] = {
{ TRACER_OPT(funcgraph-irqs, TRACE_GRAPH_PRINT_IRQS) },
/* Display function name after trailing } */
{ TRACER_OPT(funcgraph-tail, TRACE_GRAPH_PRINT_TAIL) },
+#ifdef CONFIG_FUNCTION_GRAPH_RETVAL
+ /* Display function return value ? */
+ { TRACER_OPT(funcgraph-retval, TRACE_GRAPH_PRINT_RETVAL) },
+ /* Display function return value in hexadecimal format ? */
+ { TRACER_OPT(funcgraph-retval-hex, TRACE_GRAPH_PRINT_RETVAL_HEX) },
+#endif
/* Include sleep time (scheduled out) between entry and return */
{ TRACER_OPT(sleep-time, TRACE_GRAPH_SLEEP_TIME) },
@@ -96,8 +102,7 @@ print_graph_duration(struct trace_array *tr, unsigned long long duration,
int __trace_graph_entry(struct trace_array *tr,
struct ftrace_graph_ent *trace,
- unsigned long flags,
- int pc)
+ unsigned int trace_ctx)
{
struct trace_event_call *call = &event_funcgraph_entry;
struct ring_buffer_event *event;
@@ -105,7 +110,7 @@ int __trace_graph_entry(struct trace_array *tr,
struct ftrace_graph_ent_entry *entry;
event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_ENT,
- sizeof(*entry), flags, pc);
+ sizeof(*entry), trace_ctx);
if (!event)
return 0;
entry = ring_buffer_event_data(event);
@@ -121,7 +126,7 @@ static inline int ftrace_graph_ignore_irqs(void)
if (!ftrace_graph_skip_irqs || trace_recursion_test(TRACE_IRQ_BIT))
return 0;
- return in_irq();
+ return in_hardirq();
}
int trace_graph_entry(struct ftrace_graph_ent *trace)
@@ -129,10 +134,10 @@ int trace_graph_entry(struct ftrace_graph_ent *trace)
struct trace_array *tr = graph_array;
struct trace_array_cpu *data;
unsigned long flags;
+ unsigned int trace_ctx;
long disabled;
int ret;
int cpu;
- int pc;
if (trace_recursion_test(TRACE_GRAPH_NOTRACE_BIT))
return 0;
@@ -174,8 +179,8 @@ int trace_graph_entry(struct ftrace_graph_ent *trace)
data = per_cpu_ptr(tr->array_buffer.data, cpu);
disabled = atomic_inc_return(&data->disabled);
if (likely(disabled == 1)) {
- pc = preempt_count();
- ret = __trace_graph_entry(tr, trace, flags, pc);
+ trace_ctx = tracing_gen_ctx_flags(flags);
+ ret = __trace_graph_entry(tr, trace, trace_ctx);
} else {
ret = 0;
}
@@ -188,7 +193,7 @@ int trace_graph_entry(struct ftrace_graph_ent *trace)
static void
__trace_graph_function(struct trace_array *tr,
- unsigned long ip, unsigned long flags, int pc)
+ unsigned long ip, unsigned int trace_ctx)
{
u64 time = trace_clock_local();
struct ftrace_graph_ent ent = {
@@ -202,22 +207,21 @@ __trace_graph_function(struct trace_array *tr,
.rettime = time,
};
- __trace_graph_entry(tr, &ent, flags, pc);
- __trace_graph_return(tr, &ret, flags, pc);
+ __trace_graph_entry(tr, &ent, trace_ctx);
+ __trace_graph_return(tr, &ret, trace_ctx);
}
void
trace_graph_function(struct trace_array *tr,
unsigned long ip, unsigned long parent_ip,
- unsigned long flags, int pc)
+ unsigned int trace_ctx)
{
- __trace_graph_function(tr, ip, flags, pc);
+ __trace_graph_function(tr, ip, trace_ctx);
}
void __trace_graph_return(struct trace_array *tr,
struct ftrace_graph_ret *trace,
- unsigned long flags,
- int pc)
+ unsigned int trace_ctx)
{
struct trace_event_call *call = &event_funcgraph_exit;
struct ring_buffer_event *event;
@@ -225,7 +229,7 @@ void __trace_graph_return(struct trace_array *tr,
struct ftrace_graph_ret_entry *entry;
event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_RET,
- sizeof(*entry), flags, pc);
+ sizeof(*entry), trace_ctx);
if (!event)
return;
entry = ring_buffer_event_data(event);
@@ -239,9 +243,9 @@ void trace_graph_return(struct ftrace_graph_ret *trace)
struct trace_array *tr = graph_array;
struct trace_array_cpu *data;
unsigned long flags;
+ unsigned int trace_ctx;
long disabled;
int cpu;
- int pc;
ftrace_graph_addr_finish(trace);
@@ -255,8 +259,8 @@ void trace_graph_return(struct ftrace_graph_ret *trace)
data = per_cpu_ptr(tr->array_buffer.data, cpu);
disabled = atomic_inc_return(&data->disabled);
if (likely(disabled == 1)) {
- pc = preempt_count();
- __trace_graph_return(tr, trace, flags, pc);
+ trace_ctx = tracing_gen_ctx_flags(flags);
+ __trace_graph_return(tr, trace, trace_ctx);
}
atomic_dec(&data->disabled);
local_irq_restore(flags);
@@ -621,6 +625,56 @@ print_graph_duration(struct trace_array *tr, unsigned long long duration,
trace_seq_puts(s, "| ");
}
+#ifdef CONFIG_FUNCTION_GRAPH_RETVAL
+
+#define __TRACE_GRAPH_PRINT_RETVAL TRACE_GRAPH_PRINT_RETVAL
+
+static void print_graph_retval(struct trace_seq *s, unsigned long retval,
+ bool leaf, void *func, bool hex_format)
+{
+ unsigned long err_code = 0;
+
+ if (retval == 0 || hex_format)
+ goto done;
+
+ /* Check if the return value matches the negative format */
+ if (IS_ENABLED(CONFIG_64BIT) && (retval & BIT(31)) &&
+ (((u64)retval) >> 32) == 0) {
+ /* sign extension */
+ err_code = (unsigned long)(s32)retval;
+ } else {
+ err_code = retval;
+ }
+
+ if (!IS_ERR_VALUE(err_code))
+ err_code = 0;
+
+done:
+ if (leaf) {
+ if (hex_format || (err_code == 0))
+ trace_seq_printf(s, "%ps(); /* = 0x%lx */\n",
+ func, retval);
+ else
+ trace_seq_printf(s, "%ps(); /* = %ld */\n",
+ func, err_code);
+ } else {
+ if (hex_format || (err_code == 0))
+ trace_seq_printf(s, "} /* %ps = 0x%lx */\n",
+ func, retval);
+ else
+ trace_seq_printf(s, "} /* %ps = %ld */\n",
+ func, err_code);
+ }
+}
+
+#else
+
+#define __TRACE_GRAPH_PRINT_RETVAL 0
+
+#define print_graph_retval(_seq, _retval, _leaf, _func, _format) do {} while (0)
+
+#endif
+
/* Case of a leaf function on its call entry */
static enum print_line_t
print_graph_entry_leaf(struct trace_iterator *iter,
@@ -665,7 +719,15 @@ print_graph_entry_leaf(struct trace_iterator *iter,
for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++)
trace_seq_putc(s, ' ');
- trace_seq_printf(s, "%ps();\n", (void *)call->func);
+ /*
+ * Write out the function return value if the option function-retval is
+ * enabled.
+ */
+ if (flags & __TRACE_GRAPH_PRINT_RETVAL)
+ print_graph_retval(s, graph_ret->retval, true, (void *)call->func,
+ !!(flags & TRACE_GRAPH_PRINT_RETVAL_HEX));
+ else
+ trace_seq_printf(s, "%ps();\n", (void *)call->func);
print_graph_irq(iter, graph_ret->func, TRACE_GRAPH_RET,
cpu, iter->ent->pid, flags);
@@ -766,7 +828,7 @@ print_graph_prologue(struct trace_iterator *iter, struct trace_seq *s,
* - we are inside irq code
* - we just entered irq code
*
- * retunns 0 if
+ * returns 0 if
* - funcgraph-interrupts option is set
* - we are not inside irq code
*/
@@ -944,20 +1006,29 @@ print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s,
trace_seq_putc(s, ' ');
/*
- * If the return function does not have a matching entry,
- * then the entry was lost. Instead of just printing
- * the '}' and letting the user guess what function this
- * belongs to, write out the function name. Always do
- * that if the funcgraph-tail option is enabled.
+ * Always write out the function name and its return value if the
+ * function-retval option is enabled.
*/
- if (func_match && !(flags & TRACE_GRAPH_PRINT_TAIL))
- trace_seq_puts(s, "}\n");
- else
- trace_seq_printf(s, "} /* %ps */\n", (void *)trace->func);
+ if (flags & __TRACE_GRAPH_PRINT_RETVAL) {
+ print_graph_retval(s, trace->retval, false, (void *)trace->func,
+ !!(flags & TRACE_GRAPH_PRINT_RETVAL_HEX));
+ } else {
+ /*
+ * If the return function does not have a matching entry,
+ * then the entry was lost. Instead of just printing
+ * the '}' and letting the user guess what function this
+ * belongs to, write out the function name. Always do
+ * that if the funcgraph-tail option is enabled.
+ */
+ if (func_match && !(flags & TRACE_GRAPH_PRINT_TAIL))
+ trace_seq_puts(s, "}\n");
+ else
+ trace_seq_printf(s, "} /* %ps */\n", (void *)trace->func);
+ }
/* Overrun */
if (flags & TRACE_GRAPH_PRINT_OVERRUN)
- trace_seq_printf(s, " (Overruns: %lu)\n",
+ trace_seq_printf(s, " (Overruns: %u)\n",
trace->overrun);
print_graph_irq(iter, trace->func, TRACE_GRAPH_RET,
@@ -1336,13 +1407,13 @@ static const struct file_operations graph_depth_fops = {
static __init int init_graph_tracefs(void)
{
- struct dentry *d_tracer;
+ int ret;
- d_tracer = tracing_init_dentry();
- if (IS_ERR(d_tracer))
+ ret = tracing_init_dentry();
+ if (ret)
return 0;
- trace_create_file("max_graph_depth", 0644, d_tracer,
+ trace_create_file("max_graph_depth", TRACE_MODE_WRITE, NULL,
NULL, &graph_depth_fops);
return 0;
diff --git a/kernel/trace/trace_hwlat.c b/kernel/trace/trace_hwlat.c
index e2be7bb7ef7e..b791524a6536 100644
--- a/kernel/trace/trace_hwlat.c
+++ b/kernel/trace/trace_hwlat.c
@@ -34,7 +34,7 @@
* Copyright (C) 2008-2009 Jon Masters, Red Hat, Inc. <jcm@redhat.com>
* Copyright (C) 2013-2016 Steven Rostedt, Red Hat, Inc. <srostedt@redhat.com>
*
- * Includes useful feedback from Clark Williams <clark@redhat.com>
+ * Includes useful feedback from Clark Williams <williams@redhat.com>
*
*/
#include <linux/kthread.h>
@@ -54,20 +54,33 @@ static struct trace_array *hwlat_trace;
#define DEFAULT_SAMPLE_WIDTH 500000 /* 0.5s */
#define DEFAULT_LAT_THRESHOLD 10 /* 10us */
-/* sampling thread*/
-static struct task_struct *hwlat_kthread;
-
static struct dentry *hwlat_sample_width; /* sample width us */
static struct dentry *hwlat_sample_window; /* sample window us */
+static struct dentry *hwlat_thread_mode; /* hwlat thread mode */
+
+enum {
+ MODE_NONE = 0,
+ MODE_ROUND_ROBIN,
+ MODE_PER_CPU,
+ MODE_MAX
+};
+static char *thread_mode_str[] = { "none", "round-robin", "per-cpu" };
/* Save the previous tracing_thresh value */
static unsigned long save_tracing_thresh;
-/* NMI timestamp counters */
-static u64 nmi_ts_start;
-static u64 nmi_total_ts;
-static int nmi_count;
-static int nmi_cpu;
+/* runtime kthread data */
+struct hwlat_kthread_data {
+ struct task_struct *kthread;
+ /* NMI timestamp counters */
+ u64 nmi_ts_start;
+ u64 nmi_total_ts;
+ int nmi_count;
+ int nmi_cpu;
+};
+
+static struct hwlat_kthread_data hwlat_single_cpu_data;
+static DEFINE_PER_CPU(struct hwlat_kthread_data, hwlat_per_cpu_data);
/* Tells NMIs to call back to the hwlat tracer to record timestamps */
bool trace_hwlat_callback_enabled;
@@ -83,7 +96,7 @@ struct hwlat_sample {
u64 nmi_total_ts; /* Total time spent in NMIs */
struct timespec64 timestamp; /* wall time */
int nmi_count; /* # NMIs during this sample */
- int count; /* # of iteratons over threash */
+ int count; /* # of iterations over thresh */
};
/* keep the global state somewhere. */
@@ -96,11 +109,24 @@ static struct hwlat_data {
u64 sample_window; /* total sampling window (on+off) */
u64 sample_width; /* active sampling portion of window */
+ int thread_mode; /* thread mode */
+
} hwlat_data = {
.sample_window = DEFAULT_SAMPLE_WINDOW,
.sample_width = DEFAULT_SAMPLE_WIDTH,
+ .thread_mode = MODE_ROUND_ROBIN
};
+static struct hwlat_kthread_data *get_cpu_data(void)
+{
+ if (hwlat_data.thread_mode == MODE_PER_CPU)
+ return this_cpu_ptr(&hwlat_per_cpu_data);
+ else
+ return &hwlat_single_cpu_data;
+}
+
+static bool hwlat_busy;
+
static void trace_hwlat_sample(struct hwlat_sample *sample)
{
struct trace_array *tr = hwlat_trace;
@@ -108,14 +134,9 @@ static void trace_hwlat_sample(struct hwlat_sample *sample)
struct trace_buffer *buffer = tr->array_buffer.buffer;
struct ring_buffer_event *event;
struct hwlat_entry *entry;
- unsigned long flags;
- int pc;
-
- pc = preempt_count();
- local_save_flags(flags);
event = trace_buffer_lock_reserve(buffer, TRACE_HWLAT, sizeof(*entry),
- flags, pc);
+ tracing_gen_ctx());
if (!event)
return;
entry = ring_buffer_event_data(event);
@@ -141,7 +162,9 @@ static void trace_hwlat_sample(struct hwlat_sample *sample)
void trace_hwlat_callback(bool enter)
{
- if (smp_processor_id() != nmi_cpu)
+ struct hwlat_kthread_data *kdata = get_cpu_data();
+
+ if (!kdata->kthread)
return;
/*
@@ -150,15 +173,24 @@ void trace_hwlat_callback(bool enter)
*/
if (!IS_ENABLED(CONFIG_GENERIC_SCHED_CLOCK)) {
if (enter)
- nmi_ts_start = time_get();
+ kdata->nmi_ts_start = time_get();
else
- nmi_total_ts += time_get() - nmi_ts_start;
+ kdata->nmi_total_ts += time_get() - kdata->nmi_ts_start;
}
if (enter)
- nmi_count++;
+ kdata->nmi_count++;
}
+/*
+ * hwlat_err - report a hwlat error.
+ */
+#define hwlat_err(msg) ({ \
+ struct trace_array *tr = hwlat_trace; \
+ \
+ trace_array_printk_buf(tr->array_buffer.buffer, _THIS_IP_, msg); \
+})
+
/**
* get_sample - sample the CPU TSC and look for likely hardware latencies
*
@@ -168,6 +200,7 @@ void trace_hwlat_callback(bool enter)
*/
static int get_sample(void)
{
+ struct hwlat_kthread_data *kdata = get_cpu_data();
struct trace_array *tr = hwlat_trace;
struct hwlat_sample s;
time_type start, t1, t2, last_t2;
@@ -180,9 +213,8 @@ static int get_sample(void)
do_div(thresh, NSEC_PER_USEC); /* modifies interval value */
- nmi_cpu = smp_processor_id();
- nmi_total_ts = 0;
- nmi_count = 0;
+ kdata->nmi_total_ts = 0;
+ kdata->nmi_count = 0;
/* Make sure NMIs see this first */
barrier();
@@ -202,7 +234,7 @@ static int get_sample(void)
outer_diff = time_to_us(time_sub(t1, last_t2));
/* This shouldn't happen */
if (outer_diff < 0) {
- pr_err(BANNER "time running backwards\n");
+ hwlat_err(BANNER "time running backwards\n");
goto out;
}
if (outer_diff > outer_sample)
@@ -214,7 +246,7 @@ static int get_sample(void)
/* Check for possible overflows */
if (total < last_total) {
- pr_err("Time total overflowed\n");
+ hwlat_err("Time total overflowed\n");
break;
}
last_total = total;
@@ -230,7 +262,7 @@ static int get_sample(void)
/* This shouldn't happen */
if (diff < 0) {
- pr_err(BANNER "time running backwards\n");
+ hwlat_err(BANNER "time running backwards\n");
goto out;
}
@@ -252,15 +284,15 @@ static int get_sample(void)
ret = 1;
/* We read in microseconds */
- if (nmi_total_ts)
- do_div(nmi_total_ts, NSEC_PER_USEC);
+ if (kdata->nmi_total_ts)
+ do_div(kdata->nmi_total_ts, NSEC_PER_USEC);
hwlat_data.count++;
s.seqnum = hwlat_data.count;
s.duration = sample;
s.outer_duration = outer_sample;
- s.nmi_total_ts = nmi_total_ts;
- s.nmi_count = nmi_count;
+ s.nmi_total_ts = kdata->nmi_total_ts;
+ s.nmi_count = kdata->nmi_count;
s.count = count;
trace_hwlat_sample(&s);
@@ -278,42 +310,41 @@ out:
}
static struct cpumask save_cpumask;
-static bool disable_migrate;
static void move_to_next_cpu(void)
{
struct cpumask *current_mask = &save_cpumask;
+ struct trace_array *tr = hwlat_trace;
int next_cpu;
- if (disable_migrate)
- return;
/*
* If for some reason the user modifies the CPU affinity
* of this thread, then stop migrating for the duration
* of the current test.
*/
if (!cpumask_equal(current_mask, current->cpus_ptr))
- goto disable;
+ goto change_mode;
- get_online_cpus();
- cpumask_and(current_mask, cpu_online_mask, tracing_buffer_mask);
- next_cpu = cpumask_next(smp_processor_id(), current_mask);
- put_online_cpus();
+ cpus_read_lock();
+ cpumask_and(current_mask, cpu_online_mask, tr->tracing_cpumask);
+ next_cpu = cpumask_next(raw_smp_processor_id(), current_mask);
+ cpus_read_unlock();
if (next_cpu >= nr_cpu_ids)
next_cpu = cpumask_first(current_mask);
if (next_cpu >= nr_cpu_ids) /* Shouldn't happen! */
- goto disable;
+ goto change_mode;
cpumask_clear(current_mask);
cpumask_set_cpu(next_cpu, current_mask);
- sched_setaffinity(0, current_mask);
+ set_cpus_allowed_ptr(current, current_mask);
return;
- disable:
- disable_migrate = true;
+ change_mode:
+ hwlat_data.thread_mode = MODE_NONE;
+ pr_info(BANNER "cpumask changed while in round-robin mode, switching to mode none\n");
}
/*
@@ -332,7 +363,8 @@ static int kthread_fn(void *data)
while (!kthread_should_stop()) {
- move_to_next_cpu();
+ if (hwlat_data.thread_mode == MODE_ROUND_ROBIN)
+ move_to_next_cpu();
local_irq_disable();
get_sample();
@@ -355,179 +387,377 @@ static int kthread_fn(void *data)
return 0;
}
-/**
- * start_kthread - Kick off the hardware latency sampling/detector kthread
+/*
+ * stop_stop_kthread - Inform the hardware latency sampling/detector kthread to stop
+ *
+ * This kicks the running hardware latency sampling/detector kernel thread and
+ * tells it to stop sampling now. Use this on unload and at system shutdown.
+ */
+static void stop_single_kthread(void)
+{
+ struct hwlat_kthread_data *kdata = get_cpu_data();
+ struct task_struct *kthread;
+
+ cpus_read_lock();
+ kthread = kdata->kthread;
+
+ if (!kthread)
+ goto out_put_cpus;
+
+ kthread_stop(kthread);
+ kdata->kthread = NULL;
+
+out_put_cpus:
+ cpus_read_unlock();
+}
+
+
+/*
+ * start_single_kthread - Kick off the hardware latency sampling/detector kthread
*
* This starts the kernel thread that will sit and sample the CPU timestamp
* counter (TSC or similar) and look for potential hardware latencies.
*/
-static int start_kthread(struct trace_array *tr)
+static int start_single_kthread(struct trace_array *tr)
{
+ struct hwlat_kthread_data *kdata = get_cpu_data();
struct cpumask *current_mask = &save_cpumask;
struct task_struct *kthread;
int next_cpu;
- if (WARN_ON(hwlat_kthread))
- return 0;
-
- /* Just pick the first CPU on first iteration */
- current_mask = &save_cpumask;
- get_online_cpus();
- cpumask_and(current_mask, cpu_online_mask, tracing_buffer_mask);
- put_online_cpus();
- next_cpu = cpumask_first(current_mask);
+ cpus_read_lock();
+ if (kdata->kthread)
+ goto out_put_cpus;
kthread = kthread_create(kthread_fn, NULL, "hwlatd");
if (IS_ERR(kthread)) {
pr_err(BANNER "could not start sampling thread\n");
+ cpus_read_unlock();
return -ENOMEM;
}
- cpumask_clear(current_mask);
- cpumask_set_cpu(next_cpu, current_mask);
- sched_setaffinity(kthread->pid, current_mask);
+ /* Just pick the first CPU on first iteration */
+ cpumask_and(current_mask, cpu_online_mask, tr->tracing_cpumask);
+
+ if (hwlat_data.thread_mode == MODE_ROUND_ROBIN) {
+ next_cpu = cpumask_first(current_mask);
+ cpumask_clear(current_mask);
+ cpumask_set_cpu(next_cpu, current_mask);
+
+ }
+
+ set_cpus_allowed_ptr(kthread, current_mask);
- hwlat_kthread = kthread;
+ kdata->kthread = kthread;
wake_up_process(kthread);
+out_put_cpus:
+ cpus_read_unlock();
return 0;
}
-/**
- * stop_kthread - Inform the hardware latency samping/detector kthread to stop
+/*
+ * stop_cpu_kthread - Stop a hwlat cpu kthread
+ */
+static void stop_cpu_kthread(unsigned int cpu)
+{
+ struct task_struct *kthread;
+
+ kthread = per_cpu(hwlat_per_cpu_data, cpu).kthread;
+ if (kthread)
+ kthread_stop(kthread);
+ per_cpu(hwlat_per_cpu_data, cpu).kthread = NULL;
+}
+
+/*
+ * stop_per_cpu_kthreads - Inform the hardware latency sampling/detector kthread to stop
*
- * This kicks the running hardware latency sampling/detector kernel thread and
+ * This kicks the running hardware latency sampling/detector kernel threads and
* tells it to stop sampling now. Use this on unload and at system shutdown.
*/
-static void stop_kthread(void)
+static void stop_per_cpu_kthreads(void)
{
- if (!hwlat_kthread)
- return;
- kthread_stop(hwlat_kthread);
- hwlat_kthread = NULL;
+ unsigned int cpu;
+
+ cpus_read_lock();
+ for_each_online_cpu(cpu)
+ stop_cpu_kthread(cpu);
+ cpus_read_unlock();
}
/*
- * hwlat_read - Wrapper read function for reading both window and width
- * @filp: The active open file structure
- * @ubuf: The userspace provided buffer to read value into
- * @cnt: The maximum number of bytes to read
- * @ppos: The current "file" position
- *
- * This function provides a generic read implementation for the global state
- * "hwlat_data" structure filesystem entries.
+ * start_cpu_kthread - Start a hwlat cpu kthread
*/
-static ssize_t hwlat_read(struct file *filp, char __user *ubuf,
- size_t cnt, loff_t *ppos)
+static int start_cpu_kthread(unsigned int cpu)
{
- char buf[U64STR_SIZE];
- u64 *entry = filp->private_data;
- u64 val;
- int len;
+ struct task_struct *kthread;
- if (!entry)
- return -EFAULT;
+ /* Do not start a new hwlatd thread if it is already running */
+ if (per_cpu(hwlat_per_cpu_data, cpu).kthread)
+ return 0;
- if (cnt > sizeof(buf))
- cnt = sizeof(buf);
+ kthread = kthread_run_on_cpu(kthread_fn, NULL, cpu, "hwlatd/%u");
+ if (IS_ERR(kthread)) {
+ pr_err(BANNER "could not start sampling thread\n");
+ return -ENOMEM;
+ }
- val = *entry;
+ per_cpu(hwlat_per_cpu_data, cpu).kthread = kthread;
- len = snprintf(buf, sizeof(buf), "%llu\n", val);
+ return 0;
+}
- return simple_read_from_buffer(ubuf, cnt, ppos, buf, len);
+#ifdef CONFIG_HOTPLUG_CPU
+static void hwlat_hotplug_workfn(struct work_struct *dummy)
+{
+ struct trace_array *tr = hwlat_trace;
+ unsigned int cpu = smp_processor_id();
+
+ mutex_lock(&trace_types_lock);
+ mutex_lock(&hwlat_data.lock);
+ cpus_read_lock();
+
+ if (!hwlat_busy || hwlat_data.thread_mode != MODE_PER_CPU)
+ goto out_unlock;
+
+ if (!cpumask_test_cpu(cpu, tr->tracing_cpumask))
+ goto out_unlock;
+
+ start_cpu_kthread(cpu);
+
+out_unlock:
+ cpus_read_unlock();
+ mutex_unlock(&hwlat_data.lock);
+ mutex_unlock(&trace_types_lock);
}
-/**
- * hwlat_width_write - Write function for "width" entry
- * @filp: The active open file structure
- * @ubuf: The user buffer that contains the value to write
- * @cnt: The maximum number of bytes to write to "file"
- * @ppos: The current position in @file
+static DECLARE_WORK(hwlat_hotplug_work, hwlat_hotplug_workfn);
+
+/*
+ * hwlat_cpu_init - CPU hotplug online callback function
+ */
+static int hwlat_cpu_init(unsigned int cpu)
+{
+ schedule_work_on(cpu, &hwlat_hotplug_work);
+ return 0;
+}
+
+/*
+ * hwlat_cpu_die - CPU hotplug offline callback function
+ */
+static int hwlat_cpu_die(unsigned int cpu)
+{
+ stop_cpu_kthread(cpu);
+ return 0;
+}
+
+static void hwlat_init_hotplug_support(void)
+{
+ int ret;
+
+ ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "trace/hwlat:online",
+ hwlat_cpu_init, hwlat_cpu_die);
+ if (ret < 0)
+ pr_warn(BANNER "Error to init cpu hotplug support\n");
+
+ return;
+}
+#else /* CONFIG_HOTPLUG_CPU */
+static void hwlat_init_hotplug_support(void)
+{
+ return;
+}
+#endif /* CONFIG_HOTPLUG_CPU */
+
+/*
+ * start_per_cpu_kthreads - Kick off the hardware latency sampling/detector kthreads
*
- * This function provides a write implementation for the "width" interface
- * to the hardware latency detector. It can be used to configure
- * for how many us of the total window us we will actively sample for any
- * hardware-induced latency periods. Obviously, it is not possible to
- * sample constantly and have the system respond to a sample reader, or,
- * worse, without having the system appear to have gone out to lunch. It
- * is enforced that width is less that the total window size.
+ * This starts the kernel threads that will sit on potentially all cpus and
+ * sample the CPU timestamp counter (TSC or similar) and look for potential
+ * hardware latencies.
*/
-static ssize_t
-hwlat_width_write(struct file *filp, const char __user *ubuf,
- size_t cnt, loff_t *ppos)
+static int start_per_cpu_kthreads(struct trace_array *tr)
{
- u64 val;
- int err;
+ struct cpumask *current_mask = &save_cpumask;
+ unsigned int cpu;
+ int retval;
- err = kstrtoull_from_user(ubuf, cnt, 10, &val);
- if (err)
- return err;
+ cpus_read_lock();
+ /*
+ * Run only on CPUs in which hwlat is allowed to run.
+ */
+ cpumask_and(current_mask, cpu_online_mask, tr->tracing_cpumask);
+
+ for_each_cpu(cpu, current_mask) {
+ retval = start_cpu_kthread(cpu);
+ if (retval)
+ goto out_error;
+ }
+ cpus_read_unlock();
+
+ return 0;
+
+out_error:
+ cpus_read_unlock();
+ stop_per_cpu_kthreads();
+ return retval;
+}
+
+static void *s_mode_start(struct seq_file *s, loff_t *pos)
+{
+ int mode = *pos;
mutex_lock(&hwlat_data.lock);
- if (val < hwlat_data.sample_window)
- hwlat_data.sample_width = val;
+
+ if (mode >= MODE_MAX)
+ return NULL;
+
+ return pos;
+}
+
+static void *s_mode_next(struct seq_file *s, void *v, loff_t *pos)
+{
+ int mode = ++(*pos);
+
+ if (mode >= MODE_MAX)
+ return NULL;
+
+ return pos;
+}
+
+static int s_mode_show(struct seq_file *s, void *v)
+{
+ loff_t *pos = v;
+ int mode = *pos;
+
+ if (mode == hwlat_data.thread_mode)
+ seq_printf(s, "[%s]", thread_mode_str[mode]);
else
- err = -EINVAL;
- mutex_unlock(&hwlat_data.lock);
+ seq_printf(s, "%s", thread_mode_str[mode]);
- if (err)
- return err;
+ if (mode < MODE_MAX - 1) /* if mode is any but last */
+ seq_puts(s, " ");
- return cnt;
+ return 0;
}
+static void s_mode_stop(struct seq_file *s, void *v)
+{
+ seq_puts(s, "\n");
+ mutex_unlock(&hwlat_data.lock);
+}
+
+static const struct seq_operations thread_mode_seq_ops = {
+ .start = s_mode_start,
+ .next = s_mode_next,
+ .show = s_mode_show,
+ .stop = s_mode_stop
+};
+
+static int hwlat_mode_open(struct inode *inode, struct file *file)
+{
+ return seq_open(file, &thread_mode_seq_ops);
+};
+
+static void hwlat_tracer_start(struct trace_array *tr);
+static void hwlat_tracer_stop(struct trace_array *tr);
+
/**
- * hwlat_window_write - Write function for "window" entry
+ * hwlat_mode_write - Write function for "mode" entry
* @filp: The active open file structure
* @ubuf: The user buffer that contains the value to write
* @cnt: The maximum number of bytes to write to "file"
* @ppos: The current position in @file
*
- * This function provides a write implementation for the "window" interface
- * to the hardware latency detetector. The window is the total time
- * in us that will be considered one sample period. Conceptually, windows
- * occur back-to-back and contain a sample width period during which
- * actual sampling occurs. Can be used to write a new total window size. It
- * is enfoced that any value written must be greater than the sample width
- * size, or an error results.
+ * This function provides a write implementation for the "mode" interface
+ * to the hardware latency detector. hwlatd has different operation modes.
+ * The "none" sets the allowed cpumask for a single hwlatd thread at the
+ * startup and lets the scheduler handle the migration. The default mode is
+ * the "round-robin" one, in which a single hwlatd thread runs, migrating
+ * among the allowed CPUs in a round-robin fashion. The "per-cpu" mode
+ * creates one hwlatd thread per allowed CPU.
*/
-static ssize_t
-hwlat_window_write(struct file *filp, const char __user *ubuf,
- size_t cnt, loff_t *ppos)
+static ssize_t hwlat_mode_write(struct file *filp, const char __user *ubuf,
+ size_t cnt, loff_t *ppos)
{
- u64 val;
- int err;
+ struct trace_array *tr = hwlat_trace;
+ const char *mode;
+ char buf[64];
+ int ret, i;
- err = kstrtoull_from_user(ubuf, cnt, 10, &val);
- if (err)
- return err;
+ if (cnt >= sizeof(buf))
+ return -EINVAL;
+
+ if (copy_from_user(buf, ubuf, cnt))
+ return -EFAULT;
+
+ buf[cnt] = 0;
+
+ mode = strstrip(buf);
+
+ ret = -EINVAL;
+
+ /*
+ * trace_types_lock is taken to avoid concurrency on start/stop
+ * and hwlat_busy.
+ */
+ mutex_lock(&trace_types_lock);
+ if (hwlat_busy)
+ hwlat_tracer_stop(tr);
mutex_lock(&hwlat_data.lock);
- if (hwlat_data.sample_width < val)
- hwlat_data.sample_window = val;
- else
- err = -EINVAL;
+
+ for (i = 0; i < MODE_MAX; i++) {
+ if (strcmp(mode, thread_mode_str[i]) == 0) {
+ hwlat_data.thread_mode = i;
+ ret = cnt;
+ }
+ }
+
mutex_unlock(&hwlat_data.lock);
- if (err)
- return err;
+ if (hwlat_busy)
+ hwlat_tracer_start(tr);
+ mutex_unlock(&trace_types_lock);
+
+ *ppos += cnt;
+
- return cnt;
+
+ return ret;
}
-static const struct file_operations width_fops = {
- .open = tracing_open_generic,
- .read = hwlat_read,
- .write = hwlat_width_write,
+/*
+ * The width parameter is read/write using the generic trace_min_max_param
+ * method. The *val is protected by the hwlat_data lock and is upper
+ * bounded by the window parameter.
+ */
+static struct trace_min_max_param hwlat_width = {
+ .lock = &hwlat_data.lock,
+ .val = &hwlat_data.sample_width,
+ .max = &hwlat_data.sample_window,
+ .min = NULL,
};
-static const struct file_operations window_fops = {
- .open = tracing_open_generic,
- .read = hwlat_read,
- .write = hwlat_window_write,
+/*
+ * The window parameter is read/write using the generic trace_min_max_param
+ * method. The *val is protected by the hwlat_data lock and is lower
+ * bounded by the width parameter.
+ */
+static struct trace_min_max_param hwlat_window = {
+ .lock = &hwlat_data.lock,
+ .val = &hwlat_data.sample_window,
+ .max = NULL,
+ .min = &hwlat_data.sample_width,
};
+static const struct file_operations thread_mode_fops = {
+ .open = hwlat_mode_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release,
+ .write = hwlat_mode_write
+};
/**
* init_tracefs - A function to initialize the tracefs interface files
*
@@ -538,31 +768,38 @@ static const struct file_operations window_fops = {
*/
static int init_tracefs(void)
{
- struct dentry *d_tracer;
+ int ret;
struct dentry *top_dir;
- d_tracer = tracing_init_dentry();
- if (IS_ERR(d_tracer))
+ ret = tracing_init_dentry();
+ if (ret)
return -ENOMEM;
- top_dir = tracefs_create_dir("hwlat_detector", d_tracer);
+ top_dir = tracefs_create_dir("hwlat_detector", NULL);
if (!top_dir)
return -ENOMEM;
- hwlat_sample_window = tracefs_create_file("window", 0640,
+ hwlat_sample_window = tracefs_create_file("window", TRACE_MODE_WRITE,
top_dir,
- &hwlat_data.sample_window,
- &window_fops);
+ &hwlat_window,
+ &trace_min_max_fops);
if (!hwlat_sample_window)
goto err;
- hwlat_sample_width = tracefs_create_file("width", 0644,
+ hwlat_sample_width = tracefs_create_file("width", TRACE_MODE_WRITE,
top_dir,
- &hwlat_data.sample_width,
- &width_fops);
+ &hwlat_width,
+ &trace_min_max_fops);
if (!hwlat_sample_width)
goto err;
+ hwlat_thread_mode = trace_create_file("mode", TRACE_MODE_WRITE,
+ top_dir,
+ NULL,
+ &thread_mode_fops);
+ if (!hwlat_thread_mode)
+ goto err;
+
return 0;
err:
@@ -574,18 +811,22 @@ static void hwlat_tracer_start(struct trace_array *tr)
{
int err;
- err = start_kthread(tr);
+ if (hwlat_data.thread_mode == MODE_PER_CPU)
+ err = start_per_cpu_kthreads(tr);
+ else
+ err = start_single_kthread(tr);
if (err)
pr_err(BANNER "Cannot start hwlat kthread\n");
}
static void hwlat_tracer_stop(struct trace_array *tr)
{
- stop_kthread();
+ if (hwlat_data.thread_mode == MODE_PER_CPU)
+ stop_per_cpu_kthreads();
+ else
+ stop_single_kthread();
}
-static bool hwlat_busy;
-
static int hwlat_tracer_init(struct trace_array *tr)
{
/* Only allow one instance to enable this */
@@ -594,7 +835,6 @@ static int hwlat_tracer_init(struct trace_array *tr)
hwlat_trace = tr;
- disable_migrate = false;
hwlat_data.count = 0;
tr->max_latency = 0;
save_tracing_thresh = tracing_thresh;
@@ -613,7 +853,7 @@ static int hwlat_tracer_init(struct trace_array *tr)
static void hwlat_tracer_reset(struct trace_array *tr)
{
- stop_kthread();
+ hwlat_tracer_stop(tr);
/* the tracing threshold is static between runs */
last_tracing_thresh = tracing_thresh;
@@ -642,6 +882,8 @@ __init static int init_hwlat_tracer(void)
if (ret)
return ret;
+ hwlat_init_hotplug_support();
+
init_tracefs();
return 0;
diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c
index 10bbb0f381d5..ba37f768e2f2 100644
--- a/kernel/trace/trace_irqsoff.c
+++ b/kernel/trace/trace_irqsoff.c
@@ -138,16 +138,19 @@ static int func_prolog_dec(struct trace_array *tr,
*/
static void
irqsoff_tracer_call(unsigned long ip, unsigned long parent_ip,
- struct ftrace_ops *op, struct pt_regs *pt_regs)
+ struct ftrace_ops *op, struct ftrace_regs *fregs)
{
struct trace_array *tr = irqsoff_trace;
struct trace_array_cpu *data;
unsigned long flags;
+ unsigned int trace_ctx;
if (!func_prolog_dec(tr, &data, &flags))
return;
- trace_function(tr, ip, parent_ip, flags, preempt_count());
+ trace_ctx = tracing_gen_ctx_flags(flags);
+
+ trace_function(tr, ip, parent_ip, trace_ctx);
atomic_dec(&data->disabled);
}
@@ -177,8 +180,8 @@ static int irqsoff_graph_entry(struct ftrace_graph_ent *trace)
struct trace_array *tr = irqsoff_trace;
struct trace_array_cpu *data;
unsigned long flags;
+ unsigned int trace_ctx;
int ret;
- int pc;
if (ftrace_graph_ignore_func(trace))
return 0;
@@ -195,8 +198,8 @@ static int irqsoff_graph_entry(struct ftrace_graph_ent *trace)
if (!func_prolog_dec(tr, &data, &flags))
return 0;
- pc = preempt_count();
- ret = __trace_graph_entry(tr, trace, flags, pc);
+ trace_ctx = tracing_gen_ctx_flags(flags);
+ ret = __trace_graph_entry(tr, trace, trace_ctx);
atomic_dec(&data->disabled);
return ret;
@@ -207,15 +210,15 @@ static void irqsoff_graph_return(struct ftrace_graph_ret *trace)
struct trace_array *tr = irqsoff_trace;
struct trace_array_cpu *data;
unsigned long flags;
- int pc;
+ unsigned int trace_ctx;
ftrace_graph_addr_finish(trace);
if (!func_prolog_dec(tr, &data, &flags))
return;
- pc = preempt_count();
- __trace_graph_return(tr, trace, flags, pc);
+ trace_ctx = tracing_gen_ctx_flags(flags);
+ __trace_graph_return(tr, trace, trace_ctx);
atomic_dec(&data->disabled);
}
@@ -228,7 +231,8 @@ static void irqsoff_trace_open(struct trace_iterator *iter)
{
if (is_graph(iter->tr))
graph_trace_open(iter);
-
+ else
+ iter->private = NULL;
}
static void irqsoff_trace_close(struct trace_iterator *iter)
@@ -267,12 +271,12 @@ static void irqsoff_print_header(struct seq_file *s)
static void
__trace_function(struct trace_array *tr,
unsigned long ip, unsigned long parent_ip,
- unsigned long flags, int pc)
+ unsigned int trace_ctx)
{
if (is_graph(tr))
- trace_graph_function(tr, ip, parent_ip, flags, pc);
+ trace_graph_function(tr, ip, parent_ip, trace_ctx);
else
- trace_function(tr, ip, parent_ip, flags, pc);
+ trace_function(tr, ip, parent_ip, trace_ctx);
}
#else
@@ -322,15 +326,13 @@ check_critical_timing(struct trace_array *tr,
{
u64 T0, T1, delta;
unsigned long flags;
- int pc;
+ unsigned int trace_ctx;
T0 = data->preempt_timestamp;
T1 = ftrace_now(cpu);
delta = T1-T0;
- local_save_flags(flags);
-
- pc = preempt_count();
+ trace_ctx = tracing_gen_ctx();
if (!report_latency(tr, delta))
goto out;
@@ -341,9 +343,9 @@ check_critical_timing(struct trace_array *tr,
if (!report_latency(tr, delta))
goto out_unlock;
- __trace_function(tr, CALLER_ADDR0, parent_ip, flags, pc);
+ __trace_function(tr, CALLER_ADDR0, parent_ip, trace_ctx);
/* Skip 5 functions to get to the irq/preempt enable function */
- __trace_stack(tr, flags, 5, pc);
+ __trace_stack(tr, trace_ctx, 5);
if (data->critical_sequence != max_sequence)
goto out_unlock;
@@ -363,16 +365,15 @@ out_unlock:
out:
data->critical_sequence = max_sequence;
data->preempt_timestamp = ftrace_now(cpu);
- __trace_function(tr, CALLER_ADDR0, parent_ip, flags, pc);
+ __trace_function(tr, CALLER_ADDR0, parent_ip, trace_ctx);
}
static nokprobe_inline void
-start_critical_timing(unsigned long ip, unsigned long parent_ip, int pc)
+start_critical_timing(unsigned long ip, unsigned long parent_ip)
{
int cpu;
struct trace_array *tr = irqsoff_trace;
struct trace_array_cpu *data;
- unsigned long flags;
if (!tracer_enabled || !tracing_is_enabled())
return;
@@ -393,9 +394,7 @@ start_critical_timing(unsigned long ip, unsigned long parent_ip, int pc)
data->preempt_timestamp = ftrace_now(cpu);
data->critical_start = parent_ip ? : ip;
- local_save_flags(flags);
-
- __trace_function(tr, ip, parent_ip, flags, pc);
+ __trace_function(tr, ip, parent_ip, tracing_gen_ctx());
per_cpu(tracing_cpu, cpu) = 1;
@@ -403,12 +402,12 @@ start_critical_timing(unsigned long ip, unsigned long parent_ip, int pc)
}
static nokprobe_inline void
-stop_critical_timing(unsigned long ip, unsigned long parent_ip, int pc)
+stop_critical_timing(unsigned long ip, unsigned long parent_ip)
{
int cpu;
struct trace_array *tr = irqsoff_trace;
struct trace_array_cpu *data;
- unsigned long flags;
+ unsigned int trace_ctx;
cpu = raw_smp_processor_id();
/* Always clear the tracing cpu on stopping the trace */
@@ -428,8 +427,8 @@ stop_critical_timing(unsigned long ip, unsigned long parent_ip, int pc)
atomic_inc(&data->disabled);
- local_save_flags(flags);
- __trace_function(tr, ip, parent_ip, flags, pc);
+ trace_ctx = tracing_gen_ctx();
+ __trace_function(tr, ip, parent_ip, trace_ctx);
check_critical_timing(tr, data, parent_ip ? : ip, cpu);
data->critical_start = 0;
atomic_dec(&data->disabled);
@@ -438,20 +437,16 @@ stop_critical_timing(unsigned long ip, unsigned long parent_ip, int pc)
/* start and stop critical timings used to for stoppage (in idle) */
void start_critical_timings(void)
{
- int pc = preempt_count();
-
- if (preempt_trace(pc) || irq_trace())
- start_critical_timing(CALLER_ADDR0, CALLER_ADDR1, pc);
+ if (preempt_trace(preempt_count()) || irq_trace())
+ start_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
}
EXPORT_SYMBOL_GPL(start_critical_timings);
NOKPROBE_SYMBOL(start_critical_timings);
void stop_critical_timings(void)
{
- int pc = preempt_count();
-
- if (preempt_trace(pc) || irq_trace())
- stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1, pc);
+ if (preempt_trace(preempt_count()) || irq_trace())
+ stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
}
EXPORT_SYMBOL_GPL(stop_critical_timings);
NOKPROBE_SYMBOL(stop_critical_timings);
@@ -562,6 +557,8 @@ static int __irqsoff_tracer_init(struct trace_array *tr)
/* non overwrite screws up the latency tracers */
set_tracer_flag(tr, TRACE_ITER_OVERWRITE, 1);
set_tracer_flag(tr, TRACE_ITER_LATENCY_FMT, 1);
+ /* without pause, we will produce garbage if another latency occurs */
+ set_tracer_flag(tr, TRACE_ITER_PAUSE_ON_TRACE, 1);
tr->max_latency = 0;
irqsoff_trace = tr;
@@ -583,11 +580,13 @@ static void __irqsoff_tracer_reset(struct trace_array *tr)
{
int lat_flag = save_flags & TRACE_ITER_LATENCY_FMT;
int overwrite_flag = save_flags & TRACE_ITER_OVERWRITE;
+ int pause_flag = save_flags & TRACE_ITER_PAUSE_ON_TRACE;
stop_irqsoff_tracer(tr, is_graph(tr));
set_tracer_flag(tr, TRACE_ITER_LATENCY_FMT, lat_flag);
set_tracer_flag(tr, TRACE_ITER_OVERWRITE, overwrite_flag);
+ set_tracer_flag(tr, TRACE_ITER_PAUSE_ON_TRACE, pause_flag);
ftrace_reset_array_ops(tr);
irqsoff_busy = false;
@@ -609,19 +608,15 @@ static void irqsoff_tracer_stop(struct trace_array *tr)
*/
void tracer_hardirqs_on(unsigned long a0, unsigned long a1)
{
- unsigned int pc = preempt_count();
-
- if (!preempt_trace(pc) && irq_trace())
- stop_critical_timing(a0, a1, pc);
+ if (!preempt_trace(preempt_count()) && irq_trace())
+ stop_critical_timing(a0, a1);
}
NOKPROBE_SYMBOL(tracer_hardirqs_on);
void tracer_hardirqs_off(unsigned long a0, unsigned long a1)
{
- unsigned int pc = preempt_count();
-
- if (!preempt_trace(pc) && irq_trace())
- start_critical_timing(a0, a1, pc);
+ if (!preempt_trace(preempt_count()) && irq_trace())
+ start_critical_timing(a0, a1);
}
NOKPROBE_SYMBOL(tracer_hardirqs_off);
@@ -661,18 +656,14 @@ static struct tracer irqsoff_tracer __read_mostly =
#ifdef CONFIG_PREEMPT_TRACER
void tracer_preempt_on(unsigned long a0, unsigned long a1)
{
- int pc = preempt_count();
-
- if (preempt_trace(pc) && !irq_trace())
- stop_critical_timing(a0, a1, pc);
+ if (preempt_trace(preempt_count()) && !irq_trace())
+ stop_critical_timing(a0, a1);
}
void tracer_preempt_off(unsigned long a0, unsigned long a1)
{
- int pc = preempt_count();
-
- if (preempt_trace(pc) && !irq_trace())
- start_critical_timing(a0, a1, pc);
+ if (preempt_trace(preempt_count()) && !irq_trace())
+ start_critical_timing(a0, a1);
}
static int preemptoff_tracer_init(struct trace_array *tr)
diff --git a/kernel/trace/trace_kdb.c b/kernel/trace/trace_kdb.c
index 9da76104f7a2..59857a1ee44c 100644
--- a/kernel/trace/trace_kdb.c
+++ b/kernel/trace/trace_kdb.c
@@ -147,11 +147,17 @@ static int kdb_ftdump(int argc, const char **argv)
return 0;
}
+static kdbtab_t ftdump_cmd = {
+ .name = "ftdump",
+ .func = kdb_ftdump,
+ .usage = "[skip_#entries] [cpu]",
+ .help = "Dump ftrace log; -skip dumps last #entries",
+ .flags = KDB_ENABLE_ALWAYS_SAFE,
+};
+
static __init int kdb_ftrace_register(void)
{
- kdb_register_flags("ftdump", kdb_ftdump, "[skip_#entries] [cpu]",
- "Dump ftrace log; -skip dumps last #entries", 0,
- KDB_ENABLE_ALWAYS_SAFE);
+ kdb_register(&ftdump_cmd);
return 0;
}
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
index aefb6065b508..c4c6e0e0068b 100644
--- a/kernel/trace/trace_kprobe.c
+++ b/kernel/trace/trace_kprobe.c
@@ -7,6 +7,7 @@
*/
#define pr_fmt(fmt) "trace_kprobe: " fmt
+#include <linux/bpf-cgroup.h>
#include <linux/security.h>
#include <linux/module.h>
#include <linux/uaccess.h>
@@ -19,22 +20,24 @@
#include "trace_kprobe_selftest.h"
#include "trace_probe.h"
#include "trace_probe_tmpl.h"
+#include "trace_probe_kernel.h"
#define KPROBE_EVENT_SYSTEM "kprobes"
#define KRETPROBE_MAXACTIVE_MAX 4096
/* Kprobe early definition from command line */
static char kprobe_boot_events_buf[COMMAND_LINE_SIZE] __initdata;
-static bool kprobe_boot_events_enabled __initdata;
static int __init set_kprobe_boot_events(char *str)
{
- strlcpy(kprobe_boot_events_buf, str, COMMAND_LINE_SIZE);
- return 0;
+ strscpy(kprobe_boot_events_buf, str, COMMAND_LINE_SIZE);
+ disable_tracing_selftest("running kprobe events");
+
+ return 1;
}
__setup("kprobe_event=", set_kprobe_boot_events);
-static int trace_kprobe_create(int argc, const char **argv);
+static int trace_kprobe_create(const char *raw_command);
static int trace_kprobe_show(struct seq_file *m, struct dyn_event *ev);
static int trace_kprobe_release(struct dyn_event *ev);
static bool trace_kprobe_is_busy(struct dyn_event *ev);
@@ -79,10 +82,6 @@ static struct trace_kprobe *to_trace_kprobe(struct dyn_event *ev)
for_each_dyn_event(dpos) \
if (is_trace_kprobe(dpos) && (pos = to_trace_kprobe(dpos)))
-#define SIZEOF_TRACE_KPROBE(n) \
- (offsetof(struct trace_kprobe, tp.args) + \
- (sizeof(struct probe_arg) * (n)))
-
static nokprobe_inline bool trace_kprobe_is_return(struct trace_kprobe *tk)
{
return tk->rp.handler != NULL;
@@ -100,15 +99,16 @@ static nokprobe_inline unsigned long trace_kprobe_offset(struct trace_kprobe *tk
static nokprobe_inline bool trace_kprobe_has_gone(struct trace_kprobe *tk)
{
- return !!(kprobe_gone(&tk->rp.kp));
+ return kprobe_gone(&tk->rp.kp);
}
static nokprobe_inline bool trace_kprobe_within_module(struct trace_kprobe *tk,
struct module *mod)
{
- int len = strlen(mod->name);
+ int len = strlen(module_name(mod));
const char *name = trace_kprobe_symbol(tk);
- return strncmp(mod->name, name, len) == 0 && name[len] == ':';
+
+ return strncmp(module_name(mod), name, len) == 0 && name[len] == ':';
}
static nokprobe_inline bool trace_kprobe_module_exist(struct trace_kprobe *tk)
@@ -122,9 +122,9 @@ static nokprobe_inline bool trace_kprobe_module_exist(struct trace_kprobe *tk)
if (!p)
return true;
*p = '\0';
- mutex_lock(&module_mutex);
+ rcu_read_lock_sched();
ret = !!find_module(tk->symbol);
- mutex_unlock(&module_mutex);
+ rcu_read_unlock_sched();
*p = ':';
return ret;
@@ -164,7 +164,8 @@ static bool trace_kprobe_match(const char *system, const char *event,
{
struct trace_kprobe *tk = to_trace_kprobe(ev);
- return strcmp(trace_probe_name(&tk->tp), event) == 0 &&
+ return (event[0] == '\0' ||
+ strcmp(trace_probe_name(&tk->tp), event) == 0) &&
(!system || strcmp(trace_probe_group_name(&tk->tp), system) == 0) &&
trace_kprobe_match_command_head(tk, argc, argv);
}
@@ -219,9 +220,9 @@ bool trace_kprobe_on_func_entry(struct trace_event_call *call)
{
struct trace_kprobe *tk = trace_kprobe_primary_from_call(call);
- return tk ? kprobe_on_func_entry(tk->rp.kp.addr,
+ return tk ? (kprobe_on_func_entry(tk->rp.kp.addr,
tk->rp.kp.addr ? NULL : tk->rp.kp.symbol_name,
- tk->rp.kp.addr ? 0 : tk->rp.kp.offset) : false;
+ tk->rp.kp.addr ? 0 : tk->rp.kp.offset) == 0) : false;
}
bool trace_kprobe_error_injectable(struct trace_event_call *call)
@@ -263,7 +264,7 @@ static struct trace_kprobe *alloc_trace_kprobe(const char *group,
struct trace_kprobe *tk;
int ret = -ENOMEM;
- tk = kzalloc(SIZEOF_TRACE_KPROBE(nargs), GFP_KERNEL);
+ tk = kzalloc(struct_size(tk, tp.args, nargs), GFP_KERNEL);
if (!tk)
return ERR_PTR(ret);
@@ -329,11 +330,9 @@ static inline int __enable_trace_kprobe(struct trace_kprobe *tk)
static void __disable_trace_kprobe(struct trace_probe *tp)
{
- struct trace_probe *pos;
struct trace_kprobe *tk;
- list_for_each_entry(pos, trace_probe_probe_list(tp), list) {
- tk = container_of(pos, struct trace_kprobe, tp);
+ list_for_each_entry(tk, trace_probe_probe_list(tp), tp.list) {
if (!trace_kprobe_is_registered(tk))
continue;
if (trace_kprobe_is_return(tk))
@@ -350,7 +349,7 @@ static void __disable_trace_kprobe(struct trace_probe *tp)
static int enable_trace_kprobe(struct trace_event_call *call,
struct trace_event_file *file)
{
- struct trace_probe *pos, *tp;
+ struct trace_probe *tp;
struct trace_kprobe *tk;
bool enabled;
int ret = 0;
@@ -371,8 +370,7 @@ static int enable_trace_kprobe(struct trace_event_call *call,
if (enabled)
return 0;
- list_for_each_entry(pos, trace_probe_probe_list(tp), list) {
- tk = container_of(pos, struct trace_kprobe, tp);
+ list_for_each_entry(tk, trace_probe_probe_list(tp), tp.list) {
if (trace_kprobe_has_gone(tk))
continue;
ret = __enable_trace_kprobe(tk);
@@ -432,7 +430,7 @@ static int disable_trace_kprobe(struct trace_event_call *call,
return 0;
}
-#if defined(CONFIG_KPROBES_ON_FTRACE) && \
+#if defined(CONFIG_DYNAMIC_FTRACE) && \
!defined(CONFIG_KPROBE_EVENTS_ON_NOTRACE)
static bool __within_notrace_func(unsigned long addr)
{
@@ -489,8 +487,8 @@ static int __register_trace_kprobe(struct trace_kprobe *tk)
return -EINVAL;
if (within_notrace_func(tk)) {
- pr_warn("Could not probe notrace function %s\n",
- trace_kprobe_symbol(tk));
+ pr_warn("Could not probe notrace function %ps\n",
+ (void *)trace_kprobe_address(tk));
return -EINVAL;
}
@@ -541,6 +539,10 @@ static int unregister_trace_kprobe(struct trace_kprobe *tk)
if (trace_probe_is_enabled(&tk->tp))
return -EBUSY;
+ /* If there's a reference to the dynamic event */
+ if (trace_event_dyn_busy(trace_probe_event_call(&tk->tp)))
+ return -EBUSY;
+
/* Will fail if probe is being used by ftrace or perf */
if (unregister_kprobe_event(tk))
return -EBUSY;
@@ -557,11 +559,9 @@ static bool trace_kprobe_has_same_kprobe(struct trace_kprobe *orig,
struct trace_kprobe *comp)
{
struct trace_probe_event *tpe = orig->tp.event;
- struct trace_probe *pos;
int i;
- list_for_each_entry(pos, &tpe->probes, list) {
- orig = container_of(pos, struct trace_kprobe, tp);
+ list_for_each_entry(orig, &tpe->probes, tp.list) {
if (strcmp(trace_kprobe_symbol(orig),
trace_kprobe_symbol(comp)) ||
trace_kprobe_offset(orig) != trace_kprobe_offset(comp))
@@ -616,7 +616,7 @@ static int append_trace_kprobe(struct trace_kprobe *tk, struct trace_kprobe *to)
if (ret)
trace_probe_unlink(&tk->tp);
else
- dyn_event_add(&tk->devent);
+ dyn_event_add(&tk->devent, trace_probe_event_call(&tk->tp));
return ret;
}
@@ -645,7 +645,11 @@ static int register_trace_kprobe(struct trace_kprobe *tk)
/* Register new event */
ret = register_kprobe_event(tk);
if (ret) {
- pr_warn("Failed to register probe event(%d)\n", ret);
+ if (ret == -EEXIST) {
+ trace_probe_log_set_index(0);
+ trace_probe_log_err(0, EVENT_EXIST);
+ } else
+ pr_warn("Failed to register probe event(%d)\n", ret);
goto end;
}
@@ -659,7 +663,7 @@ static int register_trace_kprobe(struct trace_kprobe *tk)
if (ret < 0)
unregister_kprobe_event(tk);
else
- dyn_event_add(&tk->devent);
+ dyn_event_add(&tk->devent, trace_probe_event_call(&tk->tp));
end:
mutex_unlock(&event_mutex);
@@ -688,7 +692,7 @@ static int trace_kprobe_module_callback(struct notifier_block *nb,
if (ret)
pr_warn("Failed to re-register probe %s on %s: %d\n",
trace_probe_name(&tk->tp),
- mod->name, ret);
+ module_name(mod), ret);
}
}
mutex_unlock(&event_mutex);
@@ -701,22 +705,52 @@ static struct notifier_block trace_kprobe_module_nb = {
.priority = 1 /* Invoked after kprobe module callback */
};
-/* Convert certain expected symbols into '_' when generating event names */
-static inline void sanitize_event_name(char *name)
+static int count_symbols(void *data, unsigned long unused)
{
- while (*name++ != '\0')
- if (*name == ':' || *name == '.')
- *name = '_';
+ unsigned int *count = data;
+
+ (*count)++;
+
+ return 0;
}
-static int trace_kprobe_create(int argc, const char *argv[])
+struct sym_count_ctx {
+ unsigned int count;
+ const char *name;
+};
+
+static int count_mod_symbols(void *data, const char *name, unsigned long unused)
+{
+ struct sym_count_ctx *ctx = data;
+
+ if (strcmp(name, ctx->name) == 0)
+ ctx->count++;
+
+ return 0;
+}
+
+static unsigned int number_of_same_symbols(char *func_name)
+{
+ struct sym_count_ctx ctx = { .count = 0, .name = func_name };
+
+ kallsyms_on_each_match_symbol(count_symbols, func_name, &ctx.count);
+
+ module_kallsyms_on_each_symbol(NULL, count_mod_symbols, &ctx);
+
+ return ctx.count;
+}
+
+static int __trace_kprobe_create(int argc, const char *argv[])
{
/*
* Argument syntax:
* - Add kprobe:
- * p[:[GRP/]EVENT] [MOD:]KSYM[+OFFS]|KADDR [FETCHARGS]
+ * p[:[GRP/][EVENT]] [MOD:]KSYM[+OFFS]|KADDR [FETCHARGS]
* - Add kretprobe:
- * r[MAXACTIVE][:[GRP/]EVENT] [MOD:]KSYM[+0] [FETCHARGS]
+ * r[MAXACTIVE][:[GRP/][EVENT]] [MOD:]KSYM[+0] [FETCHARGS]
+ * Or
+ * p[:[GRP/][EVENT]] [MOD:]KSYM[+0]%return [FETCHARGS]
+ *
* Fetch args:
* $retval : fetch return value
* $stack : fetch stack address
@@ -733,20 +767,23 @@ static int trace_kprobe_create(int argc, const char *argv[])
* FETCHARG:TYPE : use TYPE instead of unsigned long.
*/
struct trace_kprobe *tk = NULL;
- int i, len, ret = 0;
+ int i, len, new_argc = 0, ret = 0;
bool is_return = false;
char *symbol = NULL, *tmp = NULL;
+ const char **new_argv = NULL;
const char *event = NULL, *group = KPROBE_EVENT_SYSTEM;
+ enum probe_print_type ptype;
int maxactive = 0;
long offset = 0;
void *addr = NULL;
char buf[MAX_EVENT_NAME_LEN];
- unsigned int flags = TPARG_FL_KERNEL;
+ char gbuf[MAX_EVENT_NAME_LEN];
+ char abuf[MAX_BTF_ARGS_LEN];
+ struct traceprobe_parse_context ctx = { .flags = TPARG_FL_KERNEL };
switch (argv[0][0]) {
case 'r':
is_return = true;
- flags |= TPARG_FL_RETURN;
break;
case 'p':
break;
@@ -764,7 +801,7 @@ static int trace_kprobe_create(int argc, const char *argv[])
if (isdigit(argv[0][1])) {
if (!is_return) {
- trace_probe_log_err(1, MAXACT_NO_KPROBE);
+ trace_probe_log_err(1, BAD_MAXACT_TYPE);
goto parse_error;
}
if (event)
@@ -804,27 +841,70 @@ static int trace_kprobe_create(int argc, const char *argv[])
symbol = kstrdup(argv[1], GFP_KERNEL);
if (!symbol)
return -ENOMEM;
+
+ tmp = strchr(symbol, '%');
+ if (tmp) {
+ if (!strcmp(tmp, "%return")) {
+ *tmp = '\0';
+ is_return = true;
+ } else {
+ trace_probe_log_err(tmp - symbol, BAD_ADDR_SUFFIX);
+ goto parse_error;
+ }
+ }
+
/* TODO: support .init module functions */
ret = traceprobe_split_symbol_offset(symbol, &offset);
if (ret || offset < 0 || offset > UINT_MAX) {
trace_probe_log_err(0, BAD_PROBE_ADDR);
goto parse_error;
}
- if (kprobe_on_func_entry(NULL, symbol, offset))
- flags |= TPARG_FL_FENTRY;
- if (offset && is_return && !(flags & TPARG_FL_FENTRY)) {
+ if (is_return)
+ ctx.flags |= TPARG_FL_RETURN;
+ ret = kprobe_on_func_entry(NULL, symbol, offset);
+ if (ret == 0 && !is_return)
+ ctx.flags |= TPARG_FL_FENTRY;
+ /* Defer the ENOENT case until register kprobe */
+ if (ret == -EINVAL && is_return) {
trace_probe_log_err(0, BAD_RETPROBE);
goto parse_error;
}
}
+ if (symbol && !strchr(symbol, ':')) {
+ unsigned int count;
+
+ count = number_of_same_symbols(symbol);
+ if (count > 1) {
+ /*
+ * Users should use ADDR to remove the ambiguity of
+ * using KSYM only.
+ */
+ trace_probe_log_err(0, NON_UNIQ_SYMBOL);
+ ret = -EADDRNOTAVAIL;
+
+ goto error;
+ } else if (count == 0) {
+ /*
+ * We can return ENOENT earlier than when register the
+ * kprobe.
+ */
+ trace_probe_log_err(0, BAD_PROBE_ADDR);
+ ret = -ENOENT;
+
+ goto error;
+ }
+ }
+
trace_probe_log_set_index(0);
if (event) {
- ret = traceprobe_parse_event_name(&event, &group, buf,
+ ret = traceprobe_parse_event_name(&event, &group, gbuf,
event - argv[0]);
if (ret)
goto parse_error;
- } else {
+ }
+
+ if (!event) {
/* Make a new event name */
if (symbol)
snprintf(buf, MAX_EVENT_NAME_LEN, "%c_%s_%ld",
@@ -836,33 +916,41 @@ static int trace_kprobe_create(int argc, const char *argv[])
event = buf;
}
+ argc -= 2; argv += 2;
+ ctx.funcname = symbol;
+ new_argv = traceprobe_expand_meta_args(argc, argv, &new_argc,
+ abuf, MAX_BTF_ARGS_LEN, &ctx);
+ if (IS_ERR(new_argv)) {
+ ret = PTR_ERR(new_argv);
+ new_argv = NULL;
+ goto out;
+ }
+ if (new_argv) {
+ argc = new_argc;
+ argv = new_argv;
+ }
+
/* setup a probe */
tk = alloc_trace_kprobe(group, event, addr, symbol, offset, maxactive,
- argc - 2, is_return);
+ argc, is_return);
if (IS_ERR(tk)) {
ret = PTR_ERR(tk);
/* This must return -ENOMEM, else there is a bug */
WARN_ON_ONCE(ret != -ENOMEM);
goto out; /* We know tk is not allocated */
}
- argc -= 2; argv += 2;
/* parse arguments */
for (i = 0; i < argc && i < MAX_TRACE_ARGS; i++) {
- tmp = kstrdup(argv[i], GFP_KERNEL);
- if (!tmp) {
- ret = -ENOMEM;
- goto error;
- }
-
trace_probe_log_set_index(i + 2);
- ret = traceprobe_parse_probe_arg(&tk->tp, i, tmp, flags);
- kfree(tmp);
+ ctx.offset = 0;
+ ret = traceprobe_parse_probe_arg(&tk->tp, i, argv[i], &ctx);
if (ret)
goto error; /* This can be -ENOMEM */
}
- ret = traceprobe_set_print_fmt(&tk->tp, is_return);
+ ptype = is_return ? PROBE_PRINT_RETURN : PROBE_PRINT_NORMAL;
+ ret = traceprobe_set_print_fmt(&tk->tp, ptype);
if (ret < 0)
goto error;
@@ -879,7 +967,9 @@ static int trace_kprobe_create(int argc, const char *argv[])
}
out:
+ traceprobe_finish_parse(&ctx);
trace_probe_log_clear();
+ kfree(new_argv);
kfree(symbol);
return ret;
@@ -890,20 +980,25 @@ error:
goto out;
}
-static int create_or_delete_trace_kprobe(int argc, char **argv)
+static int trace_kprobe_create(const char *raw_command)
+{
+ return trace_probe_create(raw_command, __trace_kprobe_create);
+}
+
+static int create_or_delete_trace_kprobe(const char *raw_command)
{
int ret;
- if (argv[0][0] == '-')
- return dyn_event_release(argc, argv, &trace_kprobe_ops);
+ if (raw_command[0] == '-')
+ return dyn_event_release(raw_command, &trace_kprobe_ops);
- ret = trace_kprobe_create(argc, (const char **)argv);
+ ret = trace_kprobe_create(raw_command);
return ret == -ECANCELED ? -EINVAL : ret;
}
static int trace_kprobe_run_command(struct dynevent_cmd *cmd)
{
- return trace_run_command(cmd->seq.buffer, create_or_delete_trace_kprobe);
+ return create_or_delete_trace_kprobe(cmd->seq.buffer);
}
/**
@@ -925,10 +1020,10 @@ EXPORT_SYMBOL_GPL(kprobe_event_cmd_init);
/**
* __kprobe_event_gen_cmd_start - Generate a kprobe event command from arg list
* @cmd: A pointer to the dynevent_cmd struct representing the new event
+ * @kretprobe: Is this a return probe?
* @name: The name of the kprobe event
* @loc: The location of the kprobe event
- * @kretprobe: Is this a return probe?
- * @args: Variable number of arg (pairs), one pair for each field
+ * @...: Variable number of arg (pairs), one pair for each field
*
* NOTE: Users normally won't want to call this function directly, but
* rather use the kprobe_event_gen_cmd_start() wrapper, which automatically
@@ -1001,7 +1096,7 @@ EXPORT_SYMBOL_GPL(__kprobe_event_gen_cmd_start);
/**
* __kprobe_event_add_fields - Add probe fields to a kprobe command from arg list
* @cmd: A pointer to the dynevent_cmd struct representing the new event
- * @args: Variable number of arg (pairs), one pair for each field
+ * @...: Variable number of arg (pairs), one pair for each field
*
* NOTE: Users normally won't want to call this function directly, but
* rather use the kprobe_event_add_fields() wrapper, which
@@ -1064,7 +1159,7 @@ int kprobe_event_delete(const char *name)
snprintf(buf, MAX_EVENT_NAME_LEN, "-:%s", name);
- return trace_run_command(buf, create_or_delete_trace_kprobe);
+ return create_or_delete_trace_kprobe(buf);
}
EXPORT_SYMBOL_GPL(kprobe_event_delete);
@@ -1154,20 +1249,28 @@ static const struct file_operations kprobe_events_ops = {
.write = probes_write,
};
+static unsigned long trace_kprobe_missed(struct trace_kprobe *tk)
+{
+ return trace_kprobe_is_return(tk) ?
+ tk->rp.kp.nmissed + tk->rp.nmissed : tk->rp.kp.nmissed;
+}
+
/* Probes profiling interfaces */
static int probes_profile_seq_show(struct seq_file *m, void *v)
{
struct dyn_event *ev = v;
struct trace_kprobe *tk;
+ unsigned long nmissed;
if (!is_trace_kprobe(ev))
return 0;
tk = to_trace_kprobe(ev);
+ nmissed = trace_kprobe_missed(tk);
seq_printf(m, " %-44s %15lu %15lu\n",
trace_probe_name(&tk->tp),
trace_kprobe_nhit(tk),
- tk->rp.kp.nmissed);
+ nmissed);
return 0;
}
@@ -1198,117 +1301,14 @@ static const struct file_operations kprobe_profile_ops = {
.release = seq_release,
};
-/* Kprobe specific fetch functions */
-
-/* Return the length of string -- including null terminal byte */
-static nokprobe_inline int
-fetch_store_strlen_user(unsigned long addr)
-{
- const void __user *uaddr = (__force const void __user *)addr;
-
- return strnlen_user_nofault(uaddr, MAX_STRING_SIZE);
-}
-
-/* Return the length of string -- including null terminal byte */
-static nokprobe_inline int
-fetch_store_strlen(unsigned long addr)
-{
- int ret, len = 0;
- u8 c;
-
-#ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
- if (addr < TASK_SIZE)
- return fetch_store_strlen_user(addr);
-#endif
-
- do {
- ret = copy_from_kernel_nofault(&c, (u8 *)addr + len, 1);
- len++;
- } while (c && ret == 0 && len < MAX_STRING_SIZE);
-
- return (ret < 0) ? ret : len;
-}
-
-/*
- * Fetch a null-terminated string from user. Caller MUST set *(u32 *)buf
- * with max length and relative data location.
- */
-static nokprobe_inline int
-fetch_store_string_user(unsigned long addr, void *dest, void *base)
-{
- const void __user *uaddr = (__force const void __user *)addr;
- int maxlen = get_loc_len(*(u32 *)dest);
- void *__dest;
- long ret;
-
- if (unlikely(!maxlen))
- return -ENOMEM;
-
- __dest = get_loc_data(dest, base);
-
- ret = strncpy_from_user_nofault(__dest, uaddr, maxlen);
- if (ret >= 0)
- *(u32 *)dest = make_data_loc(ret, __dest - base);
-
- return ret;
-}
-
-/*
- * Fetch a null-terminated string. Caller MUST set *(u32 *)buf with max
- * length and relative data location.
- */
-static nokprobe_inline int
-fetch_store_string(unsigned long addr, void *dest, void *base)
-{
- int maxlen = get_loc_len(*(u32 *)dest);
- void *__dest;
- long ret;
-
-#ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
- if ((unsigned long)addr < TASK_SIZE)
- return fetch_store_string_user(addr, dest, base);
-#endif
-
- if (unlikely(!maxlen))
- return -ENOMEM;
-
- __dest = get_loc_data(dest, base);
-
- /*
- * Try to get string again, since the string can be changed while
- * probing.
- */
- ret = strncpy_from_kernel_nofault(__dest, (void *)addr, maxlen);
- if (ret >= 0)
- *(u32 *)dest = make_data_loc(ret, __dest - base);
-
- return ret;
-}
-
-static nokprobe_inline int
-probe_mem_read_user(void *dest, void *src, size_t size)
-{
- const void __user *uaddr = (__force const void __user *)src;
-
- return copy_from_user_nofault(dest, uaddr, size);
-}
-
-static nokprobe_inline int
-probe_mem_read(void *dest, void *src, size_t size)
-{
-#ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
- if ((unsigned long)src < TASK_SIZE)
- return probe_mem_read_user(dest, src, size);
-#endif
- return copy_from_kernel_nofault(dest, src, size);
-}
-
/* Note that we don't verify it, since the code does not come from user space */
static int
-process_fetch_insn(struct fetch_insn *code, struct pt_regs *regs, void *dest,
+process_fetch_insn(struct fetch_insn *code, void *rec, void *dest,
void *base)
{
+ struct pt_regs *regs = rec;
unsigned long val;
+ int ret;
retry:
/* 1st stage: get value from context */
@@ -1325,15 +1325,6 @@ retry:
case FETCH_OP_RETVAL:
val = regs_return_value(regs);
break;
- case FETCH_OP_IMM:
- val = code->immediate;
- break;
- case FETCH_OP_COMM:
- val = (unsigned long)current->comm;
- break;
- case FETCH_OP_DATA:
- val = (unsigned long)code->data;
- break;
#ifdef CONFIG_HAVE_FUNCTION_ARG_ACCESS_API
case FETCH_OP_ARG:
val = regs_get_kernel_argument(regs, code->param);
@@ -1343,7 +1334,9 @@ retry:
code++;
goto retry;
default:
- return -EILSEQ;
+ ret = process_common_fetch_insn(code, &val);
+ if (ret < 0)
+ return ret;
}
code++;
@@ -1366,22 +1359,14 @@ __kprobe_trace_func(struct trace_kprobe *tk, struct pt_regs *regs,
if (trace_trigger_soft_disabled(trace_file))
return;
- local_save_flags(fbuffer.flags);
- fbuffer.pc = preempt_count();
- fbuffer.trace_file = trace_file;
-
dsize = __get_data_size(&tk->tp, regs);
- fbuffer.event =
- trace_event_buffer_lock_reserve(&fbuffer.buffer, trace_file,
- call->event.type,
- sizeof(*entry) + tk->tp.size + dsize,
- fbuffer.flags, fbuffer.pc);
- if (!fbuffer.event)
+ entry = trace_event_buffer_reserve(&fbuffer, trace_file,
+ sizeof(*entry) + tk->tp.size + dsize);
+ if (!entry)
return;
fbuffer.regs = regs;
- entry = fbuffer.entry = ring_buffer_event_data(fbuffer.event);
entry->ip = (unsigned long)tk->rp.kp.addr;
store_trace_args(&entry[1], &tk->tp, regs, sizeof(*entry), dsize);
@@ -1414,23 +1399,16 @@ __kretprobe_trace_func(struct trace_kprobe *tk, struct kretprobe_instance *ri,
if (trace_trigger_soft_disabled(trace_file))
return;
- local_save_flags(fbuffer.flags);
- fbuffer.pc = preempt_count();
- fbuffer.trace_file = trace_file;
-
dsize = __get_data_size(&tk->tp, regs);
- fbuffer.event =
- trace_event_buffer_lock_reserve(&fbuffer.buffer, trace_file,
- call->event.type,
- sizeof(*entry) + tk->tp.size + dsize,
- fbuffer.flags, fbuffer.pc);
- if (!fbuffer.event)
+
+ entry = trace_event_buffer_reserve(&fbuffer, trace_file,
+ sizeof(*entry) + tk->tp.size + dsize);
+ if (!entry)
return;
fbuffer.regs = regs;
- entry = fbuffer.entry = ring_buffer_event_data(fbuffer.event);
entry->func = (unsigned long)tk->rp.kp.addr;
- entry->ret_ip = (unsigned long)ri->ret_addr;
+ entry->ret_ip = get_kretprobe_retaddr(ri);
store_trace_args(&entry[1], &tk->tp, regs, sizeof(*entry), dsize);
trace_event_buffer_commit(&fbuffer);
@@ -1469,7 +1447,7 @@ print_kprobe_event(struct trace_iterator *iter, int flags,
trace_seq_putc(s, ')');
- if (print_probe_args(s, tp->args, tp->nr_args,
+ if (trace_probe_print_args(s, tp->args, tp->nr_args,
(u8 *)&field[1], field) < 0)
goto out;
@@ -1504,7 +1482,7 @@ print_kretprobe_event(struct trace_iterator *iter, int flags,
trace_seq_putc(s, ')');
- if (print_probe_args(s, tp->args, tp->nr_args,
+ if (trace_probe_print_args(s, tp->args, tp->nr_args,
(u8 *)&field[1], field) < 0)
goto out;
@@ -1625,7 +1603,7 @@ kretprobe_perf_func(struct trace_kprobe *tk, struct kretprobe_instance *ri,
return;
entry->func = (unsigned long)tk->rp.kp.addr;
- entry->ret_ip = (unsigned long)ri->ret_addr;
+ entry->ret_ip = get_kretprobe_retaddr(ri);
store_trace_args(&entry[1], &tk->tp, regs, sizeof(*entry), dsize);
perf_trace_buf_submit(entry, size, rctx, call->event.type, 1, regs,
head, NULL);
@@ -1634,7 +1612,8 @@ NOKPROBE_SYMBOL(kretprobe_perf_func);
int bpf_get_kprobe_info(const struct perf_event *event, u32 *fd_type,
const char **symbol, u64 *probe_offset,
- u64 *probe_addr, bool perf_type_tracepoint)
+ u64 *probe_addr, unsigned long *missed,
+ bool perf_type_tracepoint)
{
const char *pevent = trace_event_name(event->tp_event);
const char *group = event->tp_event->class->system;
@@ -1649,15 +1628,12 @@ int bpf_get_kprobe_info(const struct perf_event *event, u32 *fd_type,
*fd_type = trace_kprobe_is_return(tk) ? BPF_FD_TYPE_KRETPROBE
: BPF_FD_TYPE_KPROBE;
- if (tk->symbol) {
- *symbol = tk->symbol;
- *probe_offset = tk->rp.kp.offset;
- *probe_addr = 0;
- } else {
- *symbol = NULL;
- *probe_offset = 0;
- *probe_addr = (unsigned long)tk->rp.kp.addr;
- }
+ *probe_offset = tk->rp.kp.offset;
+ *probe_addr = kallsyms_show_value(current_cred()) ?
+ (unsigned long)tk->rp.kp.addr : 0;
+ *symbol = tk->symbol;
+ if (missed)
+ *missed = trace_kprobe_missed(tk);
return 0;
}
#endif /* CONFIG_PERF_EVENTS */
@@ -1714,8 +1690,18 @@ NOKPROBE_SYMBOL(kprobe_dispatcher);
static int
kretprobe_dispatcher(struct kretprobe_instance *ri, struct pt_regs *regs)
{
- struct trace_kprobe *tk = container_of(ri->rp, struct trace_kprobe, rp);
+ struct kretprobe *rp = get_kretprobe(ri);
+ struct trace_kprobe *tk;
+
+ /*
+ * There is a small chance that get_kretprobe(ri) returns NULL when
+ * the kretprobe is unregister on another CPU between kretprobe's
+ * trampoline_handler and this function.
+ */
+ if (unlikely(!rp))
+ return 0;
+ tk = container_of(rp, struct trace_kprobe, rp);
raw_cpu_inc(*tk->nhit);
if (trace_probe_test_flag(&tk->tp, TP_FLAG_TRACE))
@@ -1724,7 +1710,7 @@ kretprobe_dispatcher(struct kretprobe_instance *ri, struct pt_regs *regs)
if (trace_probe_test_flag(&tk->tp, TP_FLAG_PROFILE))
kretprobe_perf_func(tk, ri, regs);
#endif
- return 0; /* We don't tweek kernel, so just return 0 */
+ return 0; /* We don't tweak kernel, so just return 0 */
}
NOKPROBE_SYMBOL(kretprobe_dispatcher);
@@ -1777,15 +1763,35 @@ static int unregister_kprobe_event(struct trace_kprobe *tk)
}
#ifdef CONFIG_PERF_EVENTS
+
/* create a trace_kprobe, but don't add it to global lists */
struct trace_event_call *
create_local_trace_kprobe(char *func, void *addr, unsigned long offs,
bool is_return)
{
+ enum probe_print_type ptype;
struct trace_kprobe *tk;
int ret;
char *event;
+ if (func) {
+ unsigned int count;
+
+ count = number_of_same_symbols(func);
+ if (count > 1)
+ /*
+ * Users should use addr to remove the ambiguity of
+ * using func only.
+ */
+ return ERR_PTR(-EADDRNOTAVAIL);
+ else if (count == 0)
+ /*
+ * We can return ENOENT earlier than when register the
+ * kprobe.
+ */
+ return ERR_PTR(-ENOENT);
+ }
+
/*
* local trace_kprobes are not added to dyn_event, so they are never
* searched in find_trace_kprobe(). Therefore, there is no concern of
@@ -1805,7 +1811,9 @@ create_local_trace_kprobe(char *func, void *addr, unsigned long offs,
init_trace_event_call(tk);
- if (traceprobe_set_print_fmt(&tk->tp, trace_kprobe_is_return(tk)) < 0) {
+ ptype = trace_kprobe_is_return(tk) ?
+ PROBE_PRINT_RETURN : PROBE_PRINT_NORMAL;
+ if (traceprobe_set_print_fmt(&tk->tp, ptype) < 0) {
ret = -ENOMEM;
goto error;
}
@@ -1867,11 +1875,9 @@ static __init void setup_boot_kprobe_events(void)
if (p)
*p++ = '\0';
- ret = trace_run_command(cmd, create_or_delete_trace_kprobe);
+ ret = create_or_delete_trace_kprobe(cmd);
if (ret)
pr_warn("Failed to add event(%d): %s\n", ret, cmd);
- else
- kprobe_boot_events_enabled = true;
cmd = p;
}
@@ -1880,8 +1886,8 @@ static __init void setup_boot_kprobe_events(void)
}
/*
- * Register dynevent at subsys_initcall. This allows kernel to setup kprobe
- * events in fs_initcall without tracefs.
+ * Register dynevent at core_initcall. This allows kernel to setup kprobe
+ * events in postcore_initcall without tracefs.
*/
static __init int init_kprobe_trace_early(void)
{
@@ -1896,31 +1902,24 @@ static __init int init_kprobe_trace_early(void)
return 0;
}
-subsys_initcall(init_kprobe_trace_early);
+core_initcall(init_kprobe_trace_early);
/* Make a tracefs interface for controlling probe points */
static __init int init_kprobe_trace(void)
{
- struct dentry *d_tracer;
- struct dentry *entry;
+ int ret;
- d_tracer = tracing_init_dentry();
- if (IS_ERR(d_tracer))
+ ret = tracing_init_dentry();
+ if (ret)
return 0;
- entry = tracefs_create_file("kprobe_events", 0644, d_tracer,
- NULL, &kprobe_events_ops);
-
/* Event list interface */
- if (!entry)
- pr_warn("Could not create tracefs 'kprobe_events' entry\n");
+ trace_create_file("kprobe_events", TRACE_MODE_WRITE,
+ NULL, NULL, &kprobe_events_ops);
/* Profile interface */
- entry = tracefs_create_file("kprobe_profile", 0444, d_tracer,
- NULL, &kprobe_profile_ops);
-
- if (!entry)
- pr_warn("Could not create tracefs 'kprobe_profile' entry\n");
+ trace_create_file("kprobe_profile", TRACE_MODE_READ,
+ NULL, NULL, &kprobe_profile_ops);
setup_boot_kprobe_events();
@@ -1956,17 +1955,14 @@ static __init int kprobe_trace_self_tests_init(void)
if (tracing_is_disabled())
return -ENODEV;
- if (kprobe_boot_events_enabled) {
- pr_info("Skipping kprobe tests due to kprobe_event on cmdline\n");
+ if (tracing_selftest_disabled)
return 0;
- }
target = kprobe_trace_selftest_target;
pr_info("Testing kprobe tracing: ");
- ret = trace_run_command("p:testprobe kprobe_trace_selftest_target $stack $stack0 +0($stack)",
- create_or_delete_trace_kprobe);
+ ret = create_or_delete_trace_kprobe("p:testprobe kprobe_trace_selftest_target $stack $stack0 +0($stack)");
if (WARN_ON_ONCE(ret)) {
pr_warn("error on probing function entry.\n");
warn++;
@@ -1987,8 +1983,7 @@ static __init int kprobe_trace_self_tests_init(void)
}
}
- ret = trace_run_command("r:testprobe2 kprobe_trace_selftest_target $retval",
- create_or_delete_trace_kprobe);
+ ret = create_or_delete_trace_kprobe("r:testprobe2 kprobe_trace_selftest_target $retval");
if (WARN_ON_ONCE(ret)) {
pr_warn("error on probing function return.\n");
warn++;
@@ -2061,13 +2056,13 @@ static __init int kprobe_trace_self_tests_init(void)
trace_probe_event_call(&tk->tp), file);
}
- ret = trace_run_command("-:testprobe", create_or_delete_trace_kprobe);
+ ret = create_or_delete_trace_kprobe("-:testprobe");
if (WARN_ON_ONCE(ret)) {
pr_warn("error on deleting a probe.\n");
warn++;
}
- ret = trace_run_command("-:testprobe2", create_or_delete_trace_kprobe);
+ ret = create_or_delete_trace_kprobe("-:testprobe2");
if (WARN_ON_ONCE(ret)) {
pr_warn("error on deleting a probe.\n");
warn++;
diff --git a/kernel/trace/trace_kprobe_selftest.c b/kernel/trace/trace_kprobe_selftest.c
index 16548ee4c8c6..3851cd1e6a62 100644
--- a/kernel/trace/trace_kprobe_selftest.c
+++ b/kernel/trace/trace_kprobe_selftest.c
@@ -1,4 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
+
+#include "trace_kprobe_selftest.h"
+
/*
* Function used during the kprobe self test. This function is in a separate
* compile unit so it can be compile with CC_FLAGS_FTRACE to ensure that it
diff --git a/kernel/trace/trace_mmiotrace.c b/kernel/trace/trace_mmiotrace.c
index 84582bf1ed5f..64e77b513697 100644
--- a/kernel/trace/trace_mmiotrace.c
+++ b/kernel/trace/trace_mmiotrace.c
@@ -5,8 +5,6 @@
* Copyright (C) 2008 Pekka Paalanen <pq@iki.fi>
*/
-#define DEBUG 1
-
#include <linux/kernel.h>
#include <linux/mmiotrace.h>
#include <linux/pci.h>
@@ -300,10 +298,11 @@ static void __trace_mmiotrace_rw(struct trace_array *tr,
struct trace_buffer *buffer = tr->array_buffer.buffer;
struct ring_buffer_event *event;
struct trace_mmiotrace_rw *entry;
- int pc = preempt_count();
+ unsigned int trace_ctx;
+ trace_ctx = tracing_gen_ctx_flags(0);
event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_RW,
- sizeof(*entry), 0, pc);
+ sizeof(*entry), trace_ctx);
if (!event) {
atomic_inc(&dropped_count);
return;
@@ -312,7 +311,7 @@ static void __trace_mmiotrace_rw(struct trace_array *tr,
entry->rw = *rw;
if (!call_filter_check_discard(call, entry, buffer, event))
- trace_buffer_unlock_commit(tr, buffer, event, 0, pc);
+ trace_buffer_unlock_commit(tr, buffer, event, trace_ctx);
}
void mmio_trace_rw(struct mmiotrace_rw *rw)
@@ -330,10 +329,11 @@ static void __trace_mmiotrace_map(struct trace_array *tr,
struct trace_buffer *buffer = tr->array_buffer.buffer;
struct ring_buffer_event *event;
struct trace_mmiotrace_map *entry;
- int pc = preempt_count();
+ unsigned int trace_ctx;
+ trace_ctx = tracing_gen_ctx_flags(0);
event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_MAP,
- sizeof(*entry), 0, pc);
+ sizeof(*entry), trace_ctx);
if (!event) {
atomic_inc(&dropped_count);
return;
@@ -342,7 +342,7 @@ static void __trace_mmiotrace_map(struct trace_array *tr,
entry->map = *map;
if (!call_filter_check_discard(call, entry, buffer, event))
- trace_buffer_unlock_commit(tr, buffer, event, 0, pc);
+ trace_buffer_unlock_commit(tr, buffer, event, trace_ctx);
}
void mmio_trace_mapping(struct mmiotrace_map *map)
diff --git a/kernel/trace/trace_osnoise.c b/kernel/trace/trace_osnoise.c
new file mode 100644
index 000000000000..a8e28f9b9271
--- /dev/null
+++ b/kernel/trace/trace_osnoise.c
@@ -0,0 +1,3133 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * OS Noise Tracer: computes the OS Noise suffered by a running thread.
+ * Timerlat Tracer: measures the wakeup latency of a timer triggered IRQ and thread.
+ *
+ * Based on "hwlat_detector" tracer by:
+ * Copyright (C) 2008-2009 Jon Masters, Red Hat, Inc. <jcm@redhat.com>
+ * Copyright (C) 2013-2016 Steven Rostedt, Red Hat, Inc. <srostedt@redhat.com>
+ * With feedback from Clark Williams <williams@redhat.com>
+ *
+ * And also based on the rtsl tracer presented on:
+ * DE OLIVEIRA, Daniel Bristot, et al. Demystifying the real-time linux
+ * scheduling latency. In: 32nd Euromicro Conference on Real-Time Systems
+ * (ECRTS 2020). Schloss Dagstuhl-Leibniz-Zentrum fur Informatik, 2020.
+ *
+ * Copyright (C) 2021 Daniel Bristot de Oliveira, Red Hat, Inc. <bristot@redhat.com>
+ */
+
+#include <linux/kthread.h>
+#include <linux/tracefs.h>
+#include <linux/uaccess.h>
+#include <linux/cpumask.h>
+#include <linux/delay.h>
+#include <linux/sched/clock.h>
+#include <uapi/linux/sched/types.h>
+#include <linux/sched.h>
+#include "trace.h"
+
+#ifdef CONFIG_X86_LOCAL_APIC
+#include <asm/trace/irq_vectors.h>
+#undef TRACE_INCLUDE_PATH
+#undef TRACE_INCLUDE_FILE
+#endif /* CONFIG_X86_LOCAL_APIC */
+
+#include <trace/events/irq.h>
+#include <trace/events/sched.h>
+
+#define CREATE_TRACE_POINTS
+#include <trace/events/osnoise.h>
+
+/*
+ * Default values.
+ */
+#define BANNER "osnoise: "
+#define DEFAULT_SAMPLE_PERIOD 1000000 /* 1s */
+#define DEFAULT_SAMPLE_RUNTIME 1000000 /* 1s */
+
+#define DEFAULT_TIMERLAT_PERIOD 1000 /* 1ms */
+#define DEFAULT_TIMERLAT_PRIO 95 /* FIFO 95 */
+
+/*
+ * osnoise/options entries.
+ */
+enum osnoise_options_index {
+ OSN_DEFAULTS = 0,
+ OSN_WORKLOAD,
+ OSN_PANIC_ON_STOP,
+ OSN_PREEMPT_DISABLE,
+ OSN_IRQ_DISABLE,
+ OSN_MAX
+};
+
+static const char * const osnoise_options_str[OSN_MAX] = {
+ "DEFAULTS",
+ "OSNOISE_WORKLOAD",
+ "PANIC_ON_STOP",
+ "OSNOISE_PREEMPT_DISABLE",
+ "OSNOISE_IRQ_DISABLE" };
+
+#define OSN_DEFAULT_OPTIONS 0x2
+static unsigned long osnoise_options = OSN_DEFAULT_OPTIONS;
+
+/*
+ * trace_array of the enabled osnoise/timerlat instances.
+ */
+struct osnoise_instance {
+ struct list_head list;
+ struct trace_array *tr;
+};
+
+static struct list_head osnoise_instances;
+
+static bool osnoise_has_registered_instances(void)
+{
+ return !!list_first_or_null_rcu(&osnoise_instances,
+ struct osnoise_instance,
+ list);
+}
+
+/*
+ * osnoise_instance_registered - check if a tr is already registered
+ */
+static int osnoise_instance_registered(struct trace_array *tr)
+{
+ struct osnoise_instance *inst;
+ int found = 0;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(inst, &osnoise_instances, list) {
+ if (inst->tr == tr)
+ found = 1;
+ }
+ rcu_read_unlock();
+
+ return found;
+}
+
+/*
+ * osnoise_register_instance - register a new trace instance
+ *
+ * Register a trace_array *tr in the list of instances running
+ * osnoise/timerlat tracers.
+ */
+static int osnoise_register_instance(struct trace_array *tr)
+{
+ struct osnoise_instance *inst;
+
+ /*
+ * register/unregister serialization is provided by trace's
+ * trace_types_lock.
+ */
+ lockdep_assert_held(&trace_types_lock);
+
+ inst = kmalloc(sizeof(*inst), GFP_KERNEL);
+ if (!inst)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD_RCU(&inst->list);
+ inst->tr = tr;
+ list_add_tail_rcu(&inst->list, &osnoise_instances);
+
+ return 0;
+}
+
+/*
+ * osnoise_unregister_instance - unregister a registered trace instance
+ *
+ * Remove the trace_array *tr from the list of instances running
+ * osnoise/timerlat tracers.
+ */
+static void osnoise_unregister_instance(struct trace_array *tr)
+{
+ struct osnoise_instance *inst;
+ int found = 0;
+
+ /*
+ * register/unregister serialization is provided by trace's
+ * trace_types_lock.
+ */
+ list_for_each_entry_rcu(inst, &osnoise_instances, list,
+ lockdep_is_held(&trace_types_lock)) {
+ if (inst->tr == tr) {
+ list_del_rcu(&inst->list);
+ found = 1;
+ break;
+ }
+ }
+
+ if (!found)
+ return;
+
+ kvfree_rcu_mightsleep(inst);
+}
+
+/*
+ * NMI runtime info.
+ */
+struct osn_nmi {
+ u64 count;
+ u64 delta_start;
+};
+
+/*
+ * IRQ runtime info.
+ */
+struct osn_irq {
+ u64 count;
+ u64 arrival_time;
+ u64 delta_start;
+};
+
+#define IRQ_CONTEXT 0
+#define THREAD_CONTEXT 1
+#define THREAD_URET 2
+/*
+ * sofirq runtime info.
+ */
+struct osn_softirq {
+ u64 count;
+ u64 arrival_time;
+ u64 delta_start;
+};
+
+/*
+ * thread runtime info.
+ */
+struct osn_thread {
+ u64 count;
+ u64 arrival_time;
+ u64 delta_start;
+};
+
+/*
+ * Runtime information: this structure saves the runtime information used by
+ * one sampling thread.
+ */
+struct osnoise_variables {
+ struct task_struct *kthread;
+ bool sampling;
+ pid_t pid;
+ struct osn_nmi nmi;
+ struct osn_irq irq;
+ struct osn_softirq softirq;
+ struct osn_thread thread;
+ local_t int_counter;
+};
+
+/*
+ * Per-cpu runtime information.
+ */
+static DEFINE_PER_CPU(struct osnoise_variables, per_cpu_osnoise_var);
+
+/*
+ * this_cpu_osn_var - Return the per-cpu osnoise_variables on its relative CPU
+ */
+static inline struct osnoise_variables *this_cpu_osn_var(void)
+{
+ return this_cpu_ptr(&per_cpu_osnoise_var);
+}
+
+#ifdef CONFIG_TIMERLAT_TRACER
+/*
+ * Runtime information for the timer mode.
+ */
+struct timerlat_variables {
+ struct task_struct *kthread;
+ struct hrtimer timer;
+ u64 rel_period;
+ u64 abs_period;
+ bool tracing_thread;
+ u64 count;
+ bool uthread_migrate;
+};
+
+static DEFINE_PER_CPU(struct timerlat_variables, per_cpu_timerlat_var);
+
+/*
+ * this_cpu_tmr_var - Return the per-cpu timerlat_variables on its relative CPU
+ */
+static inline struct timerlat_variables *this_cpu_tmr_var(void)
+{
+ return this_cpu_ptr(&per_cpu_timerlat_var);
+}
+
+/*
+ * tlat_var_reset - Reset the values of the given timerlat_variables
+ */
+static inline void tlat_var_reset(void)
+{
+ struct timerlat_variables *tlat_var;
+ int cpu;
+ /*
+ * So far, all the values are initialized as 0, so
+ * zeroing the structure is perfect.
+ */
+ for_each_cpu(cpu, cpu_online_mask) {
+ tlat_var = per_cpu_ptr(&per_cpu_timerlat_var, cpu);
+ memset(tlat_var, 0, sizeof(*tlat_var));
+ }
+}
+#else /* CONFIG_TIMERLAT_TRACER */
+#define tlat_var_reset() do {} while (0)
+#endif /* CONFIG_TIMERLAT_TRACER */
+
+/*
+ * osn_var_reset - Reset the values of the given osnoise_variables
+ */
+static inline void osn_var_reset(void)
+{
+ struct osnoise_variables *osn_var;
+ int cpu;
+
+ /*
+ * So far, all the values are initialized as 0, so
+ * zeroing the structure is perfect.
+ */
+ for_each_cpu(cpu, cpu_online_mask) {
+ osn_var = per_cpu_ptr(&per_cpu_osnoise_var, cpu);
+ memset(osn_var, 0, sizeof(*osn_var));
+ }
+}
+
+/*
+ * osn_var_reset_all - Reset the value of all per-cpu osnoise_variables
+ */
+static inline void osn_var_reset_all(void)
+{
+ osn_var_reset();
+ tlat_var_reset();
+}
+
+/*
+ * Tells NMIs to call back to the osnoise tracer to record timestamps.
+ */
+bool trace_osnoise_callback_enabled;
+
+/*
+ * osnoise sample structure definition. Used to store the statistics of a
+ * sample run.
+ */
+struct osnoise_sample {
+ u64 runtime; /* runtime */
+ u64 noise; /* noise */
+ u64 max_sample; /* max single noise sample */
+ int hw_count; /* # HW (incl. hypervisor) interference */
+ int nmi_count; /* # NMIs during this sample */
+ int irq_count; /* # IRQs during this sample */
+ int softirq_count; /* # softirqs during this sample */
+ int thread_count; /* # threads during this sample */
+};
+
+#ifdef CONFIG_TIMERLAT_TRACER
+/*
+ * timerlat sample structure definition. Used to store the statistics of
+ * a sample run.
+ */
+struct timerlat_sample {
+ u64 timer_latency; /* timer_latency */
+ unsigned int seqnum; /* unique sequence */
+ int context; /* timer context */
+};
+#endif
+
+/*
+ * Protect the interface.
+ */
+static struct mutex interface_lock;
+
+/*
+ * Tracer data.
+ */
+static struct osnoise_data {
+ u64 sample_period; /* total sampling period */
+ u64 sample_runtime; /* active sampling portion of period */
+ u64 stop_tracing; /* stop trace in the internal operation (loop/irq) */
+ u64 stop_tracing_total; /* stop trace in the final operation (report/thread) */
+#ifdef CONFIG_TIMERLAT_TRACER
+ u64 timerlat_period; /* timerlat period */
+ u64 print_stack; /* print IRQ stack if total > */
+ int timerlat_tracer; /* timerlat tracer */
+#endif
+ bool tainted; /* infor users and developers about a problem */
+} osnoise_data = {
+ .sample_period = DEFAULT_SAMPLE_PERIOD,
+ .sample_runtime = DEFAULT_SAMPLE_RUNTIME,
+ .stop_tracing = 0,
+ .stop_tracing_total = 0,
+#ifdef CONFIG_TIMERLAT_TRACER
+ .print_stack = 0,
+ .timerlat_period = DEFAULT_TIMERLAT_PERIOD,
+ .timerlat_tracer = 0,
+#endif
+};
+
+#ifdef CONFIG_TIMERLAT_TRACER
+static inline bool timerlat_enabled(void)
+{
+ return osnoise_data.timerlat_tracer;
+}
+
+static inline int timerlat_softirq_exit(struct osnoise_variables *osn_var)
+{
+ struct timerlat_variables *tlat_var = this_cpu_tmr_var();
+ /*
+ * If the timerlat is enabled, but the irq handler did
+ * not run yet enabling timerlat_tracer, do not trace.
+ */
+ if (!tlat_var->tracing_thread) {
+ osn_var->softirq.arrival_time = 0;
+ osn_var->softirq.delta_start = 0;
+ return 0;
+ }
+ return 1;
+}
+
+static inline int timerlat_thread_exit(struct osnoise_variables *osn_var)
+{
+ struct timerlat_variables *tlat_var = this_cpu_tmr_var();
+ /*
+ * If the timerlat is enabled, but the irq handler did
+ * not run yet enabling timerlat_tracer, do not trace.
+ */
+ if (!tlat_var->tracing_thread) {
+ osn_var->thread.delta_start = 0;
+ osn_var->thread.arrival_time = 0;
+ return 0;
+ }
+ return 1;
+}
+#else /* CONFIG_TIMERLAT_TRACER */
+static inline bool timerlat_enabled(void)
+{
+ return false;
+}
+
+static inline int timerlat_softirq_exit(struct osnoise_variables *osn_var)
+{
+ return 1;
+}
+static inline int timerlat_thread_exit(struct osnoise_variables *osn_var)
+{
+ return 1;
+}
+#endif
+
+#ifdef CONFIG_PREEMPT_RT
+/*
+ * Print the osnoise header info.
+ */
+static void print_osnoise_headers(struct seq_file *s)
+{
+ if (osnoise_data.tainted)
+ seq_puts(s, "# osnoise is tainted!\n");
+
+ seq_puts(s, "# _-------=> irqs-off\n");
+ seq_puts(s, "# / _------=> need-resched\n");
+ seq_puts(s, "# | / _-----=> need-resched-lazy\n");
+ seq_puts(s, "# || / _----=> hardirq/softirq\n");
+ seq_puts(s, "# ||| / _---=> preempt-depth\n");
+ seq_puts(s, "# |||| / _--=> preempt-lazy-depth\n");
+ seq_puts(s, "# ||||| / _-=> migrate-disable\n");
+
+ seq_puts(s, "# |||||| / ");
+ seq_puts(s, " MAX\n");
+
+ seq_puts(s, "# ||||| / ");
+ seq_puts(s, " SINGLE Interference counters:\n");
+
+ seq_puts(s, "# ||||||| RUNTIME ");
+ seq_puts(s, " NOISE %% OF CPU NOISE +-----------------------------+\n");
+
+ seq_puts(s, "# TASK-PID CPU# ||||||| TIMESTAMP IN US ");
+ seq_puts(s, " IN US AVAILABLE IN US HW NMI IRQ SIRQ THREAD\n");
+
+ seq_puts(s, "# | | | ||||||| | | ");
+ seq_puts(s, " | | | | | | | |\n");
+}
+#else /* CONFIG_PREEMPT_RT */
+static void print_osnoise_headers(struct seq_file *s)
+{
+ if (osnoise_data.tainted)
+ seq_puts(s, "# osnoise is tainted!\n");
+
+ seq_puts(s, "# _-----=> irqs-off\n");
+ seq_puts(s, "# / _----=> need-resched\n");
+ seq_puts(s, "# | / _---=> hardirq/softirq\n");
+ seq_puts(s, "# || / _--=> preempt-depth\n");
+ seq_puts(s, "# ||| / _-=> migrate-disable ");
+ seq_puts(s, " MAX\n");
+ seq_puts(s, "# |||| / delay ");
+ seq_puts(s, " SINGLE Interference counters:\n");
+
+ seq_puts(s, "# ||||| RUNTIME ");
+ seq_puts(s, " NOISE %% OF CPU NOISE +-----------------------------+\n");
+
+ seq_puts(s, "# TASK-PID CPU# ||||| TIMESTAMP IN US ");
+ seq_puts(s, " IN US AVAILABLE IN US HW NMI IRQ SIRQ THREAD\n");
+
+ seq_puts(s, "# | | | ||||| | | ");
+ seq_puts(s, " | | | | | | | |\n");
+}
+#endif /* CONFIG_PREEMPT_RT */
+
+/*
+ * osnoise_taint - report an osnoise error.
+ */
+#define osnoise_taint(msg) ({ \
+ struct osnoise_instance *inst; \
+ struct trace_buffer *buffer; \
+ \
+ rcu_read_lock(); \
+ list_for_each_entry_rcu(inst, &osnoise_instances, list) { \
+ buffer = inst->tr->array_buffer.buffer; \
+ trace_array_printk_buf(buffer, _THIS_IP_, msg); \
+ } \
+ rcu_read_unlock(); \
+ osnoise_data.tainted = true; \
+})
+
+/*
+ * Record an osnoise_sample into the tracer buffer.
+ */
+static void
+__trace_osnoise_sample(struct osnoise_sample *sample, struct trace_buffer *buffer)
+{
+ struct trace_event_call *call = &event_osnoise;
+ struct ring_buffer_event *event;
+ struct osnoise_entry *entry;
+
+ event = trace_buffer_lock_reserve(buffer, TRACE_OSNOISE, sizeof(*entry),
+ tracing_gen_ctx());
+ if (!event)
+ return;
+ entry = ring_buffer_event_data(event);
+ entry->runtime = sample->runtime;
+ entry->noise = sample->noise;
+ entry->max_sample = sample->max_sample;
+ entry->hw_count = sample->hw_count;
+ entry->nmi_count = sample->nmi_count;
+ entry->irq_count = sample->irq_count;
+ entry->softirq_count = sample->softirq_count;
+ entry->thread_count = sample->thread_count;
+
+ if (!call_filter_check_discard(call, entry, buffer, event))
+ trace_buffer_unlock_commit_nostack(buffer, event);
+}
+
+/*
+ * Record an osnoise_sample on all osnoise instances.
+ */
+static void trace_osnoise_sample(struct osnoise_sample *sample)
+{
+ struct osnoise_instance *inst;
+ struct trace_buffer *buffer;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(inst, &osnoise_instances, list) {
+ buffer = inst->tr->array_buffer.buffer;
+ __trace_osnoise_sample(sample, buffer);
+ }
+ rcu_read_unlock();
+}
+
+#ifdef CONFIG_TIMERLAT_TRACER
+/*
+ * Print the timerlat header info.
+ */
+#ifdef CONFIG_PREEMPT_RT
+static void print_timerlat_headers(struct seq_file *s)
+{
+ seq_puts(s, "# _-------=> irqs-off\n");
+ seq_puts(s, "# / _------=> need-resched\n");
+ seq_puts(s, "# | / _-----=> need-resched-lazy\n");
+ seq_puts(s, "# || / _----=> hardirq/softirq\n");
+ seq_puts(s, "# ||| / _---=> preempt-depth\n");
+ seq_puts(s, "# |||| / _--=> preempt-lazy-depth\n");
+ seq_puts(s, "# ||||| / _-=> migrate-disable\n");
+ seq_puts(s, "# |||||| /\n");
+ seq_puts(s, "# ||||||| ACTIVATION\n");
+ seq_puts(s, "# TASK-PID CPU# ||||||| TIMESTAMP ID ");
+ seq_puts(s, " CONTEXT LATENCY\n");
+ seq_puts(s, "# | | | ||||||| | | ");
+ seq_puts(s, " | |\n");
+}
+#else /* CONFIG_PREEMPT_RT */
+static void print_timerlat_headers(struct seq_file *s)
+{
+ seq_puts(s, "# _-----=> irqs-off\n");
+ seq_puts(s, "# / _----=> need-resched\n");
+ seq_puts(s, "# | / _---=> hardirq/softirq\n");
+ seq_puts(s, "# || / _--=> preempt-depth\n");
+ seq_puts(s, "# ||| / _-=> migrate-disable\n");
+ seq_puts(s, "# |||| / delay\n");
+ seq_puts(s, "# ||||| ACTIVATION\n");
+ seq_puts(s, "# TASK-PID CPU# ||||| TIMESTAMP ID ");
+ seq_puts(s, " CONTEXT LATENCY\n");
+ seq_puts(s, "# | | | ||||| | | ");
+ seq_puts(s, " | |\n");
+}
+#endif /* CONFIG_PREEMPT_RT */
+
+static void
+__trace_timerlat_sample(struct timerlat_sample *sample, struct trace_buffer *buffer)
+{
+ struct trace_event_call *call = &event_osnoise;
+ struct ring_buffer_event *event;
+ struct timerlat_entry *entry;
+
+ event = trace_buffer_lock_reserve(buffer, TRACE_TIMERLAT, sizeof(*entry),
+ tracing_gen_ctx());
+ if (!event)
+ return;
+ entry = ring_buffer_event_data(event);
+ entry->seqnum = sample->seqnum;
+ entry->context = sample->context;
+ entry->timer_latency = sample->timer_latency;
+
+ if (!call_filter_check_discard(call, entry, buffer, event))
+ trace_buffer_unlock_commit_nostack(buffer, event);
+}
+
+/*
+ * Record an timerlat_sample into the tracer buffer.
+ */
+static void trace_timerlat_sample(struct timerlat_sample *sample)
+{
+ struct osnoise_instance *inst;
+ struct trace_buffer *buffer;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(inst, &osnoise_instances, list) {
+ buffer = inst->tr->array_buffer.buffer;
+ __trace_timerlat_sample(sample, buffer);
+ }
+ rcu_read_unlock();
+}
+
+#ifdef CONFIG_STACKTRACE
+
+#define MAX_CALLS 256
+
+/*
+ * Stack trace will take place only at IRQ level, so, no need
+ * to control nesting here.
+ */
+struct trace_stack {
+ int stack_size;
+ int nr_entries;
+ unsigned long calls[MAX_CALLS];
+};
+
+static DEFINE_PER_CPU(struct trace_stack, trace_stack);
+
+/*
+ * timerlat_save_stack - save a stack trace without printing
+ *
+ * Save the current stack trace without printing. The
+ * stack will be printed later, after the end of the measurement.
+ */
+static void timerlat_save_stack(int skip)
+{
+ unsigned int size, nr_entries;
+ struct trace_stack *fstack;
+
+ fstack = this_cpu_ptr(&trace_stack);
+
+ size = ARRAY_SIZE(fstack->calls);
+
+ nr_entries = stack_trace_save(fstack->calls, size, skip);
+
+ fstack->stack_size = nr_entries * sizeof(unsigned long);
+ fstack->nr_entries = nr_entries;
+
+ return;
+
+}
+
+static void
+__timerlat_dump_stack(struct trace_buffer *buffer, struct trace_stack *fstack, unsigned int size)
+{
+ struct trace_event_call *call = &event_osnoise;
+ struct ring_buffer_event *event;
+ struct stack_entry *entry;
+
+ event = trace_buffer_lock_reserve(buffer, TRACE_STACK, sizeof(*entry) + size,
+ tracing_gen_ctx());
+ if (!event)
+ return;
+
+ entry = ring_buffer_event_data(event);
+
+ memcpy(&entry->caller, fstack->calls, size);
+ entry->size = fstack->nr_entries;
+
+ if (!call_filter_check_discard(call, entry, buffer, event))
+ trace_buffer_unlock_commit_nostack(buffer, event);
+}
+
+/*
+ * timerlat_dump_stack - dump a stack trace previously saved
+ */
+static void timerlat_dump_stack(u64 latency)
+{
+ struct osnoise_instance *inst;
+ struct trace_buffer *buffer;
+ struct trace_stack *fstack;
+ unsigned int size;
+
+ /*
+ * trace only if latency > print_stack config, if enabled.
+ */
+ if (!osnoise_data.print_stack || osnoise_data.print_stack > latency)
+ return;
+
+ preempt_disable_notrace();
+ fstack = this_cpu_ptr(&trace_stack);
+ size = fstack->stack_size;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(inst, &osnoise_instances, list) {
+ buffer = inst->tr->array_buffer.buffer;
+ __timerlat_dump_stack(buffer, fstack, size);
+
+ }
+ rcu_read_unlock();
+ preempt_enable_notrace();
+}
+#else /* CONFIG_STACKTRACE */
+#define timerlat_dump_stack(u64 latency) do {} while (0)
+#define timerlat_save_stack(a) do {} while (0)
+#endif /* CONFIG_STACKTRACE */
+#endif /* CONFIG_TIMERLAT_TRACER */
+
+/*
+ * Macros to encapsulate the time capturing infrastructure.
+ */
+#define time_get() trace_clock_local()
+#define time_to_us(x) div_u64(x, 1000)
+#define time_sub(a, b) ((a) - (b))
+
+/*
+ * cond_move_irq_delta_start - Forward the delta_start of a running IRQ
+ *
+ * If an IRQ is preempted by an NMI, its delta_start is pushed forward
+ * to discount the NMI interference.
+ *
+ * See get_int_safe_duration().
+ */
+static inline void
+cond_move_irq_delta_start(struct osnoise_variables *osn_var, u64 duration)
+{
+ if (osn_var->irq.delta_start)
+ osn_var->irq.delta_start += duration;
+}
+
+#ifndef CONFIG_PREEMPT_RT
+/*
+ * cond_move_softirq_delta_start - Forward the delta_start of a running softirq.
+ *
+ * If a softirq is preempted by an IRQ or NMI, its delta_start is pushed
+ * forward to discount the interference.
+ *
+ * See get_int_safe_duration().
+ */
+static inline void
+cond_move_softirq_delta_start(struct osnoise_variables *osn_var, u64 duration)
+{
+ if (osn_var->softirq.delta_start)
+ osn_var->softirq.delta_start += duration;
+}
+#else /* CONFIG_PREEMPT_RT */
+#define cond_move_softirq_delta_start(osn_var, duration) do {} while (0)
+#endif
+
+/*
+ * cond_move_thread_delta_start - Forward the delta_start of a running thread
+ *
+ * If a noisy thread is preempted by an softirq, IRQ or NMI, its delta_start
+ * is pushed forward to discount the interference.
+ *
+ * See get_int_safe_duration().
+ */
+static inline void
+cond_move_thread_delta_start(struct osnoise_variables *osn_var, u64 duration)
+{
+ if (osn_var->thread.delta_start)
+ osn_var->thread.delta_start += duration;
+}
+
+/*
+ * get_int_safe_duration - Get the duration of a window
+ *
+ * The irq, softirq and thread varaibles need to have its duration without
+ * the interference from higher priority interrupts. Instead of keeping a
+ * variable to discount the interrupt interference from these variables, the
+ * starting time of these variables are pushed forward with the interrupt's
+ * duration. In this way, a single variable is used to:
+ *
+ * - Know if a given window is being measured.
+ * - Account its duration.
+ * - Discount the interference.
+ *
+ * To avoid getting inconsistent values, e.g.,:
+ *
+ * now = time_get()
+ * ---> interrupt!
+ * delta_start -= int duration;
+ * <---
+ * duration = now - delta_start;
+ *
+ * result: negative duration if the variable duration before the
+ * interrupt was smaller than the interrupt execution.
+ *
+ * A counter of interrupts is used. If the counter increased, try
+ * to capture an interference safe duration.
+ */
+static inline s64
+get_int_safe_duration(struct osnoise_variables *osn_var, u64 *delta_start)
+{
+ u64 int_counter, now;
+ s64 duration;
+
+ do {
+ int_counter = local_read(&osn_var->int_counter);
+ /* synchronize with interrupts */
+ barrier();
+
+ now = time_get();
+ duration = (now - *delta_start);
+
+ /* synchronize with interrupts */
+ barrier();
+ } while (int_counter != local_read(&osn_var->int_counter));
+
+ /*
+ * This is an evidence of race conditions that cause
+ * a value to be "discounted" too much.
+ */
+ if (duration < 0)
+ osnoise_taint("Negative duration!\n");
+
+ *delta_start = 0;
+
+ return duration;
+}
+
+/*
+ *
+ * set_int_safe_time - Save the current time on *time, aware of interference
+ *
+ * Get the time, taking into consideration a possible interference from
+ * higher priority interrupts.
+ *
+ * See get_int_safe_duration() for an explanation.
+ */
+static u64
+set_int_safe_time(struct osnoise_variables *osn_var, u64 *time)
+{
+ u64 int_counter;
+
+ do {
+ int_counter = local_read(&osn_var->int_counter);
+ /* synchronize with interrupts */
+ barrier();
+
+ *time = time_get();
+
+ /* synchronize with interrupts */
+ barrier();
+ } while (int_counter != local_read(&osn_var->int_counter));
+
+ return int_counter;
+}
+
+#ifdef CONFIG_TIMERLAT_TRACER
+/*
+ * copy_int_safe_time - Copy *src into *desc aware of interference
+ */
+static u64
+copy_int_safe_time(struct osnoise_variables *osn_var, u64 *dst, u64 *src)
+{
+ u64 int_counter;
+
+ do {
+ int_counter = local_read(&osn_var->int_counter);
+ /* synchronize with interrupts */
+ barrier();
+
+ *dst = *src;
+
+ /* synchronize with interrupts */
+ barrier();
+ } while (int_counter != local_read(&osn_var->int_counter));
+
+ return int_counter;
+}
+#endif /* CONFIG_TIMERLAT_TRACER */
+
+/*
+ * trace_osnoise_callback - NMI entry/exit callback
+ *
+ * This function is called at the entry and exit NMI code. The bool enter
+ * distinguishes between either case. This function is used to note a NMI
+ * occurrence, compute the noise caused by the NMI, and to remove the noise
+ * it is potentially causing on other interference variables.
+ */
+void trace_osnoise_callback(bool enter)
+{
+ struct osnoise_variables *osn_var = this_cpu_osn_var();
+ u64 duration;
+
+ if (!osn_var->sampling)
+ return;
+
+ /*
+ * Currently trace_clock_local() calls sched_clock() and the
+ * generic version is not NMI safe.
+ */
+ if (!IS_ENABLED(CONFIG_GENERIC_SCHED_CLOCK)) {
+ if (enter) {
+ osn_var->nmi.delta_start = time_get();
+ local_inc(&osn_var->int_counter);
+ } else {
+ duration = time_get() - osn_var->nmi.delta_start;
+
+ trace_nmi_noise(osn_var->nmi.delta_start, duration);
+
+ cond_move_irq_delta_start(osn_var, duration);
+ cond_move_softirq_delta_start(osn_var, duration);
+ cond_move_thread_delta_start(osn_var, duration);
+ }
+ }
+
+ if (enter)
+ osn_var->nmi.count++;
+}
+
+/*
+ * osnoise_trace_irq_entry - Note the starting of an IRQ
+ *
+ * Save the starting time of an IRQ. As IRQs are non-preemptive to other IRQs,
+ * it is safe to use a single variable (ons_var->irq) to save the statistics.
+ * The arrival_time is used to report... the arrival time. The delta_start
+ * is used to compute the duration at the IRQ exit handler. See
+ * cond_move_irq_delta_start().
+ */
+void osnoise_trace_irq_entry(int id)
+{
+ struct osnoise_variables *osn_var = this_cpu_osn_var();
+
+ if (!osn_var->sampling)
+ return;
+ /*
+ * This value will be used in the report, but not to compute
+ * the execution time, so it is safe to get it unsafe.
+ */
+ osn_var->irq.arrival_time = time_get();
+ set_int_safe_time(osn_var, &osn_var->irq.delta_start);
+ osn_var->irq.count++;
+
+ local_inc(&osn_var->int_counter);
+}
+
+/*
+ * osnoise_irq_exit - Note the end of an IRQ, sava data and trace
+ *
+ * Computes the duration of the IRQ noise, and trace it. Also discounts the
+ * interference from other sources of noise could be currently being accounted.
+ */
+void osnoise_trace_irq_exit(int id, const char *desc)
+{
+ struct osnoise_variables *osn_var = this_cpu_osn_var();
+ s64 duration;
+
+ if (!osn_var->sampling)
+ return;
+
+ duration = get_int_safe_duration(osn_var, &osn_var->irq.delta_start);
+ trace_irq_noise(id, desc, osn_var->irq.arrival_time, duration);
+ osn_var->irq.arrival_time = 0;
+ cond_move_softirq_delta_start(osn_var, duration);
+ cond_move_thread_delta_start(osn_var, duration);
+}
+
+/*
+ * trace_irqentry_callback - Callback to the irq:irq_entry traceevent
+ *
+ * Used to note the starting of an IRQ occurece.
+ */
+static void trace_irqentry_callback(void *data, int irq,
+ struct irqaction *action)
+{
+ osnoise_trace_irq_entry(irq);
+}
+
+/*
+ * trace_irqexit_callback - Callback to the irq:irq_exit traceevent
+ *
+ * Used to note the end of an IRQ occurece.
+ */
+static void trace_irqexit_callback(void *data, int irq,
+ struct irqaction *action, int ret)
+{
+ osnoise_trace_irq_exit(irq, action->name);
+}
+
+/*
+ * arch specific register function.
+ */
+int __weak osnoise_arch_register(void)
+{
+ return 0;
+}
+
+/*
+ * arch specific unregister function.
+ */
+void __weak osnoise_arch_unregister(void)
+{
+ return;
+}
+
+/*
+ * hook_irq_events - Hook IRQ handling events
+ *
+ * This function hooks the IRQ related callbacks to the respective trace
+ * events.
+ */
+static int hook_irq_events(void)
+{
+ int ret;
+
+ ret = register_trace_irq_handler_entry(trace_irqentry_callback, NULL);
+ if (ret)
+ goto out_err;
+
+ ret = register_trace_irq_handler_exit(trace_irqexit_callback, NULL);
+ if (ret)
+ goto out_unregister_entry;
+
+ ret = osnoise_arch_register();
+ if (ret)
+ goto out_irq_exit;
+
+ return 0;
+
+out_irq_exit:
+ unregister_trace_irq_handler_exit(trace_irqexit_callback, NULL);
+out_unregister_entry:
+ unregister_trace_irq_handler_entry(trace_irqentry_callback, NULL);
+out_err:
+ return -EINVAL;
+}
+
+/*
+ * unhook_irq_events - Unhook IRQ handling events
+ *
+ * This function unhooks the IRQ related callbacks to the respective trace
+ * events.
+ */
+static void unhook_irq_events(void)
+{
+ osnoise_arch_unregister();
+ unregister_trace_irq_handler_exit(trace_irqexit_callback, NULL);
+ unregister_trace_irq_handler_entry(trace_irqentry_callback, NULL);
+}
+
+#ifndef CONFIG_PREEMPT_RT
+/*
+ * trace_softirq_entry_callback - Note the starting of a softirq
+ *
+ * Save the starting time of a softirq. As softirqs are non-preemptive to
+ * other softirqs, it is safe to use a single variable (ons_var->softirq)
+ * to save the statistics. The arrival_time is used to report... the
+ * arrival time. The delta_start is used to compute the duration at the
+ * softirq exit handler. See cond_move_softirq_delta_start().
+ */
+static void trace_softirq_entry_callback(void *data, unsigned int vec_nr)
+{
+ struct osnoise_variables *osn_var = this_cpu_osn_var();
+
+ if (!osn_var->sampling)
+ return;
+ /*
+ * This value will be used in the report, but not to compute
+ * the execution time, so it is safe to get it unsafe.
+ */
+ osn_var->softirq.arrival_time = time_get();
+ set_int_safe_time(osn_var, &osn_var->softirq.delta_start);
+ osn_var->softirq.count++;
+
+ local_inc(&osn_var->int_counter);
+}
+
+/*
+ * trace_softirq_exit_callback - Note the end of an softirq
+ *
+ * Computes the duration of the softirq noise, and trace it. Also discounts the
+ * interference from other sources of noise could be currently being accounted.
+ */
+static void trace_softirq_exit_callback(void *data, unsigned int vec_nr)
+{
+ struct osnoise_variables *osn_var = this_cpu_osn_var();
+ s64 duration;
+
+ if (!osn_var->sampling)
+ return;
+
+ if (unlikely(timerlat_enabled()))
+ if (!timerlat_softirq_exit(osn_var))
+ return;
+
+ duration = get_int_safe_duration(osn_var, &osn_var->softirq.delta_start);
+ trace_softirq_noise(vec_nr, osn_var->softirq.arrival_time, duration);
+ cond_move_thread_delta_start(osn_var, duration);
+ osn_var->softirq.arrival_time = 0;
+}
+
+/*
+ * hook_softirq_events - Hook softirq handling events
+ *
+ * This function hooks the softirq related callbacks to the respective trace
+ * events.
+ */
+static int hook_softirq_events(void)
+{
+ int ret;
+
+ ret = register_trace_softirq_entry(trace_softirq_entry_callback, NULL);
+ if (ret)
+ goto out_err;
+
+ ret = register_trace_softirq_exit(trace_softirq_exit_callback, NULL);
+ if (ret)
+ goto out_unreg_entry;
+
+ return 0;
+
+out_unreg_entry:
+ unregister_trace_softirq_entry(trace_softirq_entry_callback, NULL);
+out_err:
+ return -EINVAL;
+}
+
+/*
+ * unhook_softirq_events - Unhook softirq handling events
+ *
+ * This function hooks the softirq related callbacks to the respective trace
+ * events.
+ */
+static void unhook_softirq_events(void)
+{
+ unregister_trace_softirq_entry(trace_softirq_entry_callback, NULL);
+ unregister_trace_softirq_exit(trace_softirq_exit_callback, NULL);
+}
+#else /* CONFIG_PREEMPT_RT */
+/*
+ * softirq are threads on the PREEMPT_RT mode.
+ */
+static int hook_softirq_events(void)
+{
+ return 0;
+}
+static void unhook_softirq_events(void)
+{
+}
+#endif
+
+/*
+ * thread_entry - Record the starting of a thread noise window
+ *
+ * It saves the context switch time for a noisy thread, and increments
+ * the interference counters.
+ */
+static void
+thread_entry(struct osnoise_variables *osn_var, struct task_struct *t)
+{
+ if (!osn_var->sampling)
+ return;
+ /*
+ * The arrival time will be used in the report, but not to compute
+ * the execution time, so it is safe to get it unsafe.
+ */
+ osn_var->thread.arrival_time = time_get();
+
+ set_int_safe_time(osn_var, &osn_var->thread.delta_start);
+
+ osn_var->thread.count++;
+ local_inc(&osn_var->int_counter);
+}
+
+/*
+ * thread_exit - Report the end of a thread noise window
+ *
+ * It computes the total noise from a thread, tracing if needed.
+ */
+static void
+thread_exit(struct osnoise_variables *osn_var, struct task_struct *t)
+{
+ s64 duration;
+
+ if (!osn_var->sampling)
+ return;
+
+ if (unlikely(timerlat_enabled()))
+ if (!timerlat_thread_exit(osn_var))
+ return;
+
+ duration = get_int_safe_duration(osn_var, &osn_var->thread.delta_start);
+
+ trace_thread_noise(t, osn_var->thread.arrival_time, duration);
+
+ osn_var->thread.arrival_time = 0;
+}
+
+#ifdef CONFIG_TIMERLAT_TRACER
+/*
+ * osnoise_stop_exception - Stop tracing and the tracer.
+ */
+static __always_inline void osnoise_stop_exception(char *msg, int cpu)
+{
+ struct osnoise_instance *inst;
+ struct trace_array *tr;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(inst, &osnoise_instances, list) {
+ tr = inst->tr;
+ trace_array_printk_buf(tr->array_buffer.buffer, _THIS_IP_,
+ "stop tracing hit on cpu %d due to exception: %s\n",
+ smp_processor_id(),
+ msg);
+
+ if (test_bit(OSN_PANIC_ON_STOP, &osnoise_options))
+ panic("tracer hit on cpu %d due to exception: %s\n",
+ smp_processor_id(),
+ msg);
+
+ tracer_tracing_off(tr);
+ }
+ rcu_read_unlock();
+}
+
+/*
+ * trace_sched_migrate_callback - sched:sched_migrate_task trace event handler
+ *
+ * his function is hooked to the sched:sched_migrate_task trace event, and monitors
+ * timerlat user-space thread migration.
+ */
+static void trace_sched_migrate_callback(void *data, struct task_struct *p, int dest_cpu)
+{
+ struct osnoise_variables *osn_var;
+ long cpu = task_cpu(p);
+
+ osn_var = per_cpu_ptr(&per_cpu_osnoise_var, cpu);
+ if (osn_var->pid == p->pid && dest_cpu != cpu) {
+ per_cpu_ptr(&per_cpu_timerlat_var, cpu)->uthread_migrate = 1;
+ osnoise_taint("timerlat user-thread migrated\n");
+ osnoise_stop_exception("timerlat user-thread migrated", cpu);
+ }
+}
+
+static int register_migration_monitor(void)
+{
+ int ret = 0;
+
+ /*
+ * Timerlat thread migration check is only required when running timerlat in user-space.
+ * Thus, enable callback only if timerlat is set with no workload.
+ */
+ if (timerlat_enabled() && !test_bit(OSN_WORKLOAD, &osnoise_options))
+ ret = register_trace_sched_migrate_task(trace_sched_migrate_callback, NULL);
+
+ return ret;
+}
+
+static void unregister_migration_monitor(void)
+{
+ if (timerlat_enabled() && !test_bit(OSN_WORKLOAD, &osnoise_options))
+ unregister_trace_sched_migrate_task(trace_sched_migrate_callback, NULL);
+}
+#else
+static int register_migration_monitor(void)
+{
+ return 0;
+}
+static void unregister_migration_monitor(void) {}
+#endif
+/*
+ * trace_sched_switch - sched:sched_switch trace event handler
+ *
+ * This function is hooked to the sched:sched_switch trace event, and it is
+ * used to record the beginning and to report the end of a thread noise window.
+ */
+static void
+trace_sched_switch_callback(void *data, bool preempt,
+ struct task_struct *p,
+ struct task_struct *n,
+ unsigned int prev_state)
+{
+ struct osnoise_variables *osn_var = this_cpu_osn_var();
+ int workload = test_bit(OSN_WORKLOAD, &osnoise_options);
+
+ if ((p->pid != osn_var->pid) || !workload)
+ thread_exit(osn_var, p);
+
+ if ((n->pid != osn_var->pid) || !workload)
+ thread_entry(osn_var, n);
+}
+
+/*
+ * hook_thread_events - Hook the instrumentation for thread noise
+ *
+ * Hook the osnoise tracer callbacks to handle the noise from other
+ * threads on the necessary kernel events.
+ */
+static int hook_thread_events(void)
+{
+ int ret;
+
+ ret = register_trace_sched_switch(trace_sched_switch_callback, NULL);
+ if (ret)
+ return -EINVAL;
+
+ ret = register_migration_monitor();
+ if (ret)
+ goto out_unreg;
+
+ return 0;
+
+out_unreg:
+ unregister_trace_sched_switch(trace_sched_switch_callback, NULL);
+ return -EINVAL;
+}
+
+/*
+ * unhook_thread_events - unhook the instrumentation for thread noise
+ *
+ * Unook the osnoise tracer callbacks to handle the noise from other
+ * threads on the necessary kernel events.
+ */
+static void unhook_thread_events(void)
+{
+ unregister_trace_sched_switch(trace_sched_switch_callback, NULL);
+ unregister_migration_monitor();
+}
+
+/*
+ * save_osn_sample_stats - Save the osnoise_sample statistics
+ *
+ * Save the osnoise_sample statistics before the sampling phase. These
+ * values will be used later to compute the diff betwneen the statistics
+ * before and after the osnoise sampling.
+ */
+static void
+save_osn_sample_stats(struct osnoise_variables *osn_var, struct osnoise_sample *s)
+{
+ s->nmi_count = osn_var->nmi.count;
+ s->irq_count = osn_var->irq.count;
+ s->softirq_count = osn_var->softirq.count;
+ s->thread_count = osn_var->thread.count;
+}
+
+/*
+ * diff_osn_sample_stats - Compute the osnoise_sample statistics
+ *
+ * After a sample period, compute the difference on the osnoise_sample
+ * statistics. The struct osnoise_sample *s contains the statistics saved via
+ * save_osn_sample_stats() before the osnoise sampling.
+ */
+static void
+diff_osn_sample_stats(struct osnoise_variables *osn_var, struct osnoise_sample *s)
+{
+ s->nmi_count = osn_var->nmi.count - s->nmi_count;
+ s->irq_count = osn_var->irq.count - s->irq_count;
+ s->softirq_count = osn_var->softirq.count - s->softirq_count;
+ s->thread_count = osn_var->thread.count - s->thread_count;
+}
+
+/*
+ * osnoise_stop_tracing - Stop tracing and the tracer.
+ */
+static __always_inline void osnoise_stop_tracing(void)
+{
+ struct osnoise_instance *inst;
+ struct trace_array *tr;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(inst, &osnoise_instances, list) {
+ tr = inst->tr;
+ trace_array_printk_buf(tr->array_buffer.buffer, _THIS_IP_,
+ "stop tracing hit on cpu %d\n", smp_processor_id());
+
+ if (test_bit(OSN_PANIC_ON_STOP, &osnoise_options))
+ panic("tracer hit stop condition on CPU %d\n", smp_processor_id());
+
+ tracer_tracing_off(tr);
+ }
+ rcu_read_unlock();
+}
+
+/*
+ * osnoise_has_tracing_on - Check if there is at least one instance on
+ */
+static __always_inline int osnoise_has_tracing_on(void)
+{
+ struct osnoise_instance *inst;
+ int trace_is_on = 0;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(inst, &osnoise_instances, list)
+ trace_is_on += tracer_tracing_is_on(inst->tr);
+ rcu_read_unlock();
+
+ return trace_is_on;
+}
+
+/*
+ * notify_new_max_latency - Notify a new max latency via fsnotify interface.
+ */
+static void notify_new_max_latency(u64 latency)
+{
+ struct osnoise_instance *inst;
+ struct trace_array *tr;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(inst, &osnoise_instances, list) {
+ tr = inst->tr;
+ if (tracer_tracing_is_on(tr) && tr->max_latency < latency) {
+ tr->max_latency = latency;
+ latency_fsnotify(tr);
+ }
+ }
+ rcu_read_unlock();
+}
+
+/*
+ * run_osnoise - Sample the time and look for osnoise
+ *
+ * Used to capture the time, looking for potential osnoise latency repeatedly.
+ * Different from hwlat_detector, it is called with preemption and interrupts
+ * enabled. This allows irqs, softirqs and threads to run, interfering on the
+ * osnoise sampling thread, as they would do with a regular thread.
+ */
+static int run_osnoise(void)
+{
+ bool disable_irq = test_bit(OSN_IRQ_DISABLE, &osnoise_options);
+ struct osnoise_variables *osn_var = this_cpu_osn_var();
+ u64 start, sample, last_sample;
+ u64 last_int_count, int_count;
+ s64 noise = 0, max_noise = 0;
+ s64 total, last_total = 0;
+ struct osnoise_sample s;
+ bool disable_preemption;
+ unsigned int threshold;
+ u64 runtime, stop_in;
+ u64 sum_noise = 0;
+ int hw_count = 0;
+ int ret = -1;
+
+ /*
+ * Disabling preemption is only required if IRQs are enabled,
+ * and the options is set on.
+ */
+ disable_preemption = !disable_irq && test_bit(OSN_PREEMPT_DISABLE, &osnoise_options);
+
+ /*
+ * Considers the current thread as the workload.
+ */
+ osn_var->pid = current->pid;
+
+ /*
+ * Save the current stats for the diff
+ */
+ save_osn_sample_stats(osn_var, &s);
+
+ /*
+ * if threshold is 0, use the default value of 5 us.
+ */
+ threshold = tracing_thresh ? : 5000;
+
+ /*
+ * Apply PREEMPT and IRQ disabled options.
+ */
+ if (disable_irq)
+ local_irq_disable();
+
+ if (disable_preemption)
+ preempt_disable();
+
+ /*
+ * Make sure NMIs see sampling first
+ */
+ osn_var->sampling = true;
+ barrier();
+
+ /*
+ * Transform the *_us config to nanoseconds to avoid the
+ * division on the main loop.
+ */
+ runtime = osnoise_data.sample_runtime * NSEC_PER_USEC;
+ stop_in = osnoise_data.stop_tracing * NSEC_PER_USEC;
+
+ /*
+ * Start timestemp
+ */
+ start = time_get();
+
+ /*
+ * "previous" loop.
+ */
+ last_int_count = set_int_safe_time(osn_var, &last_sample);
+
+ do {
+ /*
+ * Get sample!
+ */
+ int_count = set_int_safe_time(osn_var, &sample);
+
+ noise = time_sub(sample, last_sample);
+
+ /*
+ * This shouldn't happen.
+ */
+ if (noise < 0) {
+ osnoise_taint("negative noise!");
+ goto out;
+ }
+
+ /*
+ * Sample runtime.
+ */
+ total = time_sub(sample, start);
+
+ /*
+ * Check for possible overflows.
+ */
+ if (total < last_total) {
+ osnoise_taint("total overflow!");
+ break;
+ }
+
+ last_total = total;
+
+ if (noise >= threshold) {
+ int interference = int_count - last_int_count;
+
+ if (noise > max_noise)
+ max_noise = noise;
+
+ if (!interference)
+ hw_count++;
+
+ sum_noise += noise;
+
+ trace_sample_threshold(last_sample, noise, interference);
+
+ if (osnoise_data.stop_tracing)
+ if (noise > stop_in)
+ osnoise_stop_tracing();
+ }
+
+ /*
+ * In some cases, notably when running on a nohz_full CPU with
+ * a stopped tick PREEMPT_RCU has no way to account for QSs.
+ * This will eventually cause unwarranted noise as PREEMPT_RCU
+ * will force preemption as the means of ending the current
+ * grace period. We avoid this problem by calling
+ * rcu_momentary_dyntick_idle(), which performs a zero duration
+ * EQS allowing PREEMPT_RCU to end the current grace period.
+ * This call shouldn't be wrapped inside an RCU critical
+ * section.
+ *
+ * Note that in non PREEMPT_RCU kernels QSs are handled through
+ * cond_resched()
+ */
+ if (IS_ENABLED(CONFIG_PREEMPT_RCU)) {
+ if (!disable_irq)
+ local_irq_disable();
+
+ rcu_momentary_dyntick_idle();
+
+ if (!disable_irq)
+ local_irq_enable();
+ }
+
+ /*
+ * For the non-preemptive kernel config: let threads runs, if
+ * they so wish, unless set not do to so.
+ */
+ if (!disable_irq && !disable_preemption)
+ cond_resched();
+
+ last_sample = sample;
+ last_int_count = int_count;
+
+ } while (total < runtime && !kthread_should_stop());
+
+ /*
+ * Finish the above in the view for interrupts.
+ */
+ barrier();
+
+ osn_var->sampling = false;
+
+ /*
+ * Make sure sampling data is no longer updated.
+ */
+ barrier();
+
+ /*
+ * Return to the preemptive state.
+ */
+ if (disable_preemption)
+ preempt_enable();
+
+ if (disable_irq)
+ local_irq_enable();
+
+ /*
+ * Save noise info.
+ */
+ s.noise = time_to_us(sum_noise);
+ s.runtime = time_to_us(total);
+ s.max_sample = time_to_us(max_noise);
+ s.hw_count = hw_count;
+
+ /* Save interference stats info */
+ diff_osn_sample_stats(osn_var, &s);
+
+ trace_osnoise_sample(&s);
+
+ notify_new_max_latency(max_noise);
+
+ if (osnoise_data.stop_tracing_total)
+ if (s.noise > osnoise_data.stop_tracing_total)
+ osnoise_stop_tracing();
+
+ return 0;
+out:
+ return ret;
+}
+
+static struct cpumask osnoise_cpumask;
+static struct cpumask save_cpumask;
+
+/*
+ * osnoise_sleep - sleep until the next period
+ */
+static void osnoise_sleep(bool skip_period)
+{
+ u64 interval;
+ ktime_t wake_time;
+
+ mutex_lock(&interface_lock);
+ if (skip_period)
+ interval = osnoise_data.sample_period;
+ else
+ interval = osnoise_data.sample_period - osnoise_data.sample_runtime;
+ mutex_unlock(&interface_lock);
+
+ /*
+ * differently from hwlat_detector, the osnoise tracer can run
+ * without a pause because preemption is on.
+ */
+ if (!interval) {
+ /* Let synchronize_rcu_tasks() make progress */
+ cond_resched_tasks_rcu_qs();
+ return;
+ }
+
+ wake_time = ktime_add_us(ktime_get(), interval);
+ __set_current_state(TASK_INTERRUPTIBLE);
+
+ while (schedule_hrtimeout(&wake_time, HRTIMER_MODE_ABS)) {
+ if (kthread_should_stop())
+ break;
+ }
+}
+
+/*
+ * osnoise_migration_pending - checks if the task needs to migrate
+ *
+ * osnoise/timerlat threads are per-cpu. If there is a pending request to
+ * migrate the thread away from the current CPU, something bad has happened.
+ * Play the good citizen and leave.
+ *
+ * Returns 0 if it is safe to continue, 1 otherwise.
+ */
+static inline int osnoise_migration_pending(void)
+{
+ if (!current->migration_pending)
+ return 0;
+
+ /*
+ * If migration is pending, there is a task waiting for the
+ * tracer to enable migration. The tracer does not allow migration,
+ * thus: taint and leave to unblock the blocked thread.
+ */
+ osnoise_taint("migration requested to osnoise threads, leaving.");
+
+ /*
+ * Unset this thread from the threads managed by the interface.
+ * The tracers are responsible for cleaning their env before
+ * exiting.
+ */
+ mutex_lock(&interface_lock);
+ this_cpu_osn_var()->kthread = NULL;
+ mutex_unlock(&interface_lock);
+
+ return 1;
+}
+
+/*
+ * osnoise_main - The osnoise detection kernel thread
+ *
+ * Calls run_osnoise() function to measure the osnoise for the configured runtime,
+ * every period.
+ */
+static int osnoise_main(void *data)
+{
+ unsigned long flags;
+
+ /*
+ * This thread was created pinned to the CPU using PF_NO_SETAFFINITY.
+ * The problem is that cgroup does not allow PF_NO_SETAFFINITY thread.
+ *
+ * To work around this limitation, disable migration and remove the
+ * flag.
+ */
+ migrate_disable();
+ raw_spin_lock_irqsave(&current->pi_lock, flags);
+ current->flags &= ~(PF_NO_SETAFFINITY);
+ raw_spin_unlock_irqrestore(&current->pi_lock, flags);
+
+ while (!kthread_should_stop()) {
+ if (osnoise_migration_pending())
+ break;
+
+ /* skip a period if tracing is off on all instances */
+ if (!osnoise_has_tracing_on()) {
+ osnoise_sleep(true);
+ continue;
+ }
+
+ run_osnoise();
+ osnoise_sleep(false);
+ }
+
+ migrate_enable();
+ return 0;
+}
+
+#ifdef CONFIG_TIMERLAT_TRACER
+/*
+ * timerlat_irq - hrtimer handler for timerlat.
+ */
+static enum hrtimer_restart timerlat_irq(struct hrtimer *timer)
+{
+ struct osnoise_variables *osn_var = this_cpu_osn_var();
+ struct timerlat_variables *tlat;
+ struct timerlat_sample s;
+ u64 now;
+ u64 diff;
+
+ /*
+ * I am not sure if the timer was armed for this CPU. So, get
+ * the timerlat struct from the timer itself, not from this
+ * CPU.
+ */
+ tlat = container_of(timer, struct timerlat_variables, timer);
+
+ now = ktime_to_ns(hrtimer_cb_get_time(&tlat->timer));
+
+ /*
+ * Enable the osnoise: events for thread an softirq.
+ */
+ tlat->tracing_thread = true;
+
+ osn_var->thread.arrival_time = time_get();
+
+ /*
+ * A hardirq is running: the timer IRQ. It is for sure preempting
+ * a thread, and potentially preempting a softirq.
+ *
+ * At this point, it is not interesting to know the duration of the
+ * preempted thread (and maybe softirq), but how much time they will
+ * delay the beginning of the execution of the timer thread.
+ *
+ * To get the correct (net) delay added by the softirq, its delta_start
+ * is set as the IRQ one. In this way, at the return of the IRQ, the delta
+ * start of the sofitrq will be zeroed, accounting then only the time
+ * after that.
+ *
+ * The thread follows the same principle. However, if a softirq is
+ * running, the thread needs to receive the softirq delta_start. The
+ * reason being is that the softirq will be the last to be unfolded,
+ * resseting the thread delay to zero.
+ *
+ * The PREEMPT_RT is a special case, though. As softirqs run as threads
+ * on RT, moving the thread is enough.
+ */
+ if (!IS_ENABLED(CONFIG_PREEMPT_RT) && osn_var->softirq.delta_start) {
+ copy_int_safe_time(osn_var, &osn_var->thread.delta_start,
+ &osn_var->softirq.delta_start);
+
+ copy_int_safe_time(osn_var, &osn_var->softirq.delta_start,
+ &osn_var->irq.delta_start);
+ } else {
+ copy_int_safe_time(osn_var, &osn_var->thread.delta_start,
+ &osn_var->irq.delta_start);
+ }
+
+ /*
+ * Compute the current time with the expected time.
+ */
+ diff = now - tlat->abs_period;
+
+ tlat->count++;
+ s.seqnum = tlat->count;
+ s.timer_latency = diff;
+ s.context = IRQ_CONTEXT;
+
+ trace_timerlat_sample(&s);
+
+ if (osnoise_data.stop_tracing) {
+ if (time_to_us(diff) >= osnoise_data.stop_tracing) {
+
+ /*
+ * At this point, if stop_tracing is set and <= print_stack,
+ * print_stack is set and would be printed in the thread handler.
+ *
+ * Thus, print the stack trace as it is helpful to define the
+ * root cause of an IRQ latency.
+ */
+ if (osnoise_data.stop_tracing <= osnoise_data.print_stack) {
+ timerlat_save_stack(0);
+ timerlat_dump_stack(time_to_us(diff));
+ }
+
+ osnoise_stop_tracing();
+ notify_new_max_latency(diff);
+
+ wake_up_process(tlat->kthread);
+
+ return HRTIMER_NORESTART;
+ }
+ }
+
+ wake_up_process(tlat->kthread);
+
+ if (osnoise_data.print_stack)
+ timerlat_save_stack(0);
+
+ return HRTIMER_NORESTART;
+}
+
+/*
+ * wait_next_period - Wait for the next period for timerlat
+ */
+static int wait_next_period(struct timerlat_variables *tlat)
+{
+ ktime_t next_abs_period, now;
+ u64 rel_period = osnoise_data.timerlat_period * 1000;
+
+ now = hrtimer_cb_get_time(&tlat->timer);
+ next_abs_period = ns_to_ktime(tlat->abs_period + rel_period);
+
+ /*
+ * Save the next abs_period.
+ */
+ tlat->abs_period = (u64) ktime_to_ns(next_abs_period);
+
+ /*
+ * If the new abs_period is in the past, skip the activation.
+ */
+ while (ktime_compare(now, next_abs_period) > 0) {
+ next_abs_period = ns_to_ktime(tlat->abs_period + rel_period);
+ tlat->abs_period = (u64) ktime_to_ns(next_abs_period);
+ }
+
+ set_current_state(TASK_INTERRUPTIBLE);
+
+ hrtimer_start(&tlat->timer, next_abs_period, HRTIMER_MODE_ABS_PINNED_HARD);
+ schedule();
+ return 1;
+}
+
+/*
+ * timerlat_main- Timerlat main
+ */
+static int timerlat_main(void *data)
+{
+ struct osnoise_variables *osn_var = this_cpu_osn_var();
+ struct timerlat_variables *tlat = this_cpu_tmr_var();
+ struct timerlat_sample s;
+ struct sched_param sp;
+ unsigned long flags;
+ u64 now, diff;
+
+ /*
+ * Make the thread RT, that is how cyclictest is usually used.
+ */
+ sp.sched_priority = DEFAULT_TIMERLAT_PRIO;
+ sched_setscheduler_nocheck(current, SCHED_FIFO, &sp);
+
+ /*
+ * This thread was created pinned to the CPU using PF_NO_SETAFFINITY.
+ * The problem is that cgroup does not allow PF_NO_SETAFFINITY thread.
+ *
+ * To work around this limitation, disable migration and remove the
+ * flag.
+ */
+ migrate_disable();
+ raw_spin_lock_irqsave(&current->pi_lock, flags);
+ current->flags &= ~(PF_NO_SETAFFINITY);
+ raw_spin_unlock_irqrestore(&current->pi_lock, flags);
+
+ tlat->count = 0;
+ tlat->tracing_thread = false;
+
+ hrtimer_init(&tlat->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED_HARD);
+ tlat->timer.function = timerlat_irq;
+ tlat->kthread = current;
+ osn_var->pid = current->pid;
+ /*
+ * Anotate the arrival time.
+ */
+ tlat->abs_period = hrtimer_cb_get_time(&tlat->timer);
+
+ wait_next_period(tlat);
+
+ osn_var->sampling = 1;
+
+ while (!kthread_should_stop()) {
+
+ now = ktime_to_ns(hrtimer_cb_get_time(&tlat->timer));
+ diff = now - tlat->abs_period;
+
+ s.seqnum = tlat->count;
+ s.timer_latency = diff;
+ s.context = THREAD_CONTEXT;
+
+ trace_timerlat_sample(&s);
+
+ notify_new_max_latency(diff);
+
+ timerlat_dump_stack(time_to_us(diff));
+
+ tlat->tracing_thread = false;
+ if (osnoise_data.stop_tracing_total)
+ if (time_to_us(diff) >= osnoise_data.stop_tracing_total)
+ osnoise_stop_tracing();
+
+ if (osnoise_migration_pending())
+ break;
+
+ wait_next_period(tlat);
+ }
+
+ hrtimer_cancel(&tlat->timer);
+ migrate_enable();
+ return 0;
+}
+#else /* CONFIG_TIMERLAT_TRACER */
+static int timerlat_main(void *data)
+{
+ return 0;
+}
+#endif /* CONFIG_TIMERLAT_TRACER */
+
+/*
+ * stop_kthread - stop a workload thread
+ */
+static void stop_kthread(unsigned int cpu)
+{
+ struct task_struct *kthread;
+
+ kthread = per_cpu(per_cpu_osnoise_var, cpu).kthread;
+ if (kthread) {
+ if (test_bit(OSN_WORKLOAD, &osnoise_options)) {
+ kthread_stop(kthread);
+ } else {
+ /*
+ * This is a user thread waiting on the timerlat_fd. We need
+ * to close all users, and the best way to guarantee this is
+ * by killing the thread. NOTE: this is a purpose specific file.
+ */
+ kill_pid(kthread->thread_pid, SIGKILL, 1);
+ put_task_struct(kthread);
+ }
+ per_cpu(per_cpu_osnoise_var, cpu).kthread = NULL;
+ } else {
+ /* if no workload, just return */
+ if (!test_bit(OSN_WORKLOAD, &osnoise_options)) {
+ /*
+ * This is set in the osnoise tracer case.
+ */
+ per_cpu(per_cpu_osnoise_var, cpu).sampling = false;
+ barrier();
+ return;
+ }
+ }
+}
+
+/*
+ * stop_per_cpu_kthread - Stop per-cpu threads
+ *
+ * Stop the osnoise sampling htread. Use this on unload and at system
+ * shutdown.
+ */
+static void stop_per_cpu_kthreads(void)
+{
+ int cpu;
+
+ cpus_read_lock();
+
+ for_each_online_cpu(cpu)
+ stop_kthread(cpu);
+
+ cpus_read_unlock();
+}
+
+/*
+ * start_kthread - Start a workload tread
+ */
+static int start_kthread(unsigned int cpu)
+{
+ struct task_struct *kthread;
+ void *main = osnoise_main;
+ char comm[24];
+
+ if (timerlat_enabled()) {
+ snprintf(comm, 24, "timerlat/%d", cpu);
+ main = timerlat_main;
+ } else {
+ /* if no workload, just return */
+ if (!test_bit(OSN_WORKLOAD, &osnoise_options)) {
+ per_cpu(per_cpu_osnoise_var, cpu).sampling = true;
+ barrier();
+ return 0;
+ }
+ snprintf(comm, 24, "osnoise/%d", cpu);
+ }
+
+ kthread = kthread_run_on_cpu(main, NULL, cpu, comm);
+
+ if (IS_ERR(kthread)) {
+ pr_err(BANNER "could not start sampling thread\n");
+ stop_per_cpu_kthreads();
+ return -ENOMEM;
+ }
+
+ per_cpu(per_cpu_osnoise_var, cpu).kthread = kthread;
+
+ return 0;
+}
+
+/*
+ * start_per_cpu_kthread - Kick off per-cpu osnoise sampling kthreads
+ *
+ * This starts the kernel thread that will look for osnoise on many
+ * cpus.
+ */
+static int start_per_cpu_kthreads(void)
+{
+ struct cpumask *current_mask = &save_cpumask;
+ int retval = 0;
+ int cpu;
+
+ if (!test_bit(OSN_WORKLOAD, &osnoise_options)) {
+ if (timerlat_enabled())
+ return 0;
+ }
+
+ cpus_read_lock();
+ /*
+ * Run only on online CPUs in which osnoise is allowed to run.
+ */
+ cpumask_and(current_mask, cpu_online_mask, &osnoise_cpumask);
+
+ for_each_possible_cpu(cpu)
+ per_cpu(per_cpu_osnoise_var, cpu).kthread = NULL;
+
+ for_each_cpu(cpu, current_mask) {
+ retval = start_kthread(cpu);
+ if (retval) {
+ cpus_read_unlock();
+ stop_per_cpu_kthreads();
+ return retval;
+ }
+ }
+
+ cpus_read_unlock();
+
+ return retval;
+}
+
+#ifdef CONFIG_HOTPLUG_CPU
+static void osnoise_hotplug_workfn(struct work_struct *dummy)
+{
+ unsigned int cpu = smp_processor_id();
+
+ mutex_lock(&trace_types_lock);
+
+ if (!osnoise_has_registered_instances())
+ goto out_unlock_trace;
+
+ mutex_lock(&interface_lock);
+ cpus_read_lock();
+
+ if (!cpumask_test_cpu(cpu, &osnoise_cpumask))
+ goto out_unlock;
+
+ start_kthread(cpu);
+
+out_unlock:
+ cpus_read_unlock();
+ mutex_unlock(&interface_lock);
+out_unlock_trace:
+ mutex_unlock(&trace_types_lock);
+}
+
+static DECLARE_WORK(osnoise_hotplug_work, osnoise_hotplug_workfn);
+
+/*
+ * osnoise_cpu_init - CPU hotplug online callback function
+ */
+static int osnoise_cpu_init(unsigned int cpu)
+{
+ schedule_work_on(cpu, &osnoise_hotplug_work);
+ return 0;
+}
+
+/*
+ * osnoise_cpu_die - CPU hotplug offline callback function
+ */
+static int osnoise_cpu_die(unsigned int cpu)
+{
+ stop_kthread(cpu);
+ return 0;
+}
+
+static void osnoise_init_hotplug_support(void)
+{
+ int ret;
+
+ ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "trace/osnoise:online",
+ osnoise_cpu_init, osnoise_cpu_die);
+ if (ret < 0)
+ pr_warn(BANNER "Error to init cpu hotplug support\n");
+
+ return;
+}
+#else /* CONFIG_HOTPLUG_CPU */
+static void osnoise_init_hotplug_support(void)
+{
+ return;
+}
+#endif /* CONFIG_HOTPLUG_CPU */
+
+/*
+ * seq file functions for the osnoise/options file.
+ */
+static void *s_options_start(struct seq_file *s, loff_t *pos)
+{
+ int option = *pos;
+
+ mutex_lock(&interface_lock);
+
+ if (option >= OSN_MAX)
+ return NULL;
+
+ return pos;
+}
+
+static void *s_options_next(struct seq_file *s, void *v, loff_t *pos)
+{
+ int option = ++(*pos);
+
+ if (option >= OSN_MAX)
+ return NULL;
+
+ return pos;
+}
+
+static int s_options_show(struct seq_file *s, void *v)
+{
+ loff_t *pos = v;
+ int option = *pos;
+
+ if (option == OSN_DEFAULTS) {
+ if (osnoise_options == OSN_DEFAULT_OPTIONS)
+ seq_printf(s, "%s", osnoise_options_str[option]);
+ else
+ seq_printf(s, "NO_%s", osnoise_options_str[option]);
+ goto out;
+ }
+
+ if (test_bit(option, &osnoise_options))
+ seq_printf(s, "%s", osnoise_options_str[option]);
+ else
+ seq_printf(s, "NO_%s", osnoise_options_str[option]);
+
+out:
+ if (option != OSN_MAX)
+ seq_puts(s, " ");
+
+ return 0;
+}
+
+static void s_options_stop(struct seq_file *s, void *v)
+{
+ seq_puts(s, "\n");
+ mutex_unlock(&interface_lock);
+}
+
+static const struct seq_operations osnoise_options_seq_ops = {
+ .start = s_options_start,
+ .next = s_options_next,
+ .show = s_options_show,
+ .stop = s_options_stop
+};
+
+static int osnoise_options_open(struct inode *inode, struct file *file)
+{
+ return seq_open(file, &osnoise_options_seq_ops);
+};
+
+/**
+ * osnoise_options_write - Write function for "options" entry
+ * @filp: The active open file structure
+ * @ubuf: The user buffer that contains the value to write
+ * @cnt: The maximum number of bytes to write to "file"
+ * @ppos: The current position in @file
+ *
+ * Writing the option name sets the option, writing the "NO_"
+ * prefix in front of the option name disables it.
+ *
+ * Writing "DEFAULTS" resets the option values to the default ones.
+ */
+static ssize_t osnoise_options_write(struct file *filp, const char __user *ubuf,
+ size_t cnt, loff_t *ppos)
+{
+ int running, option, enable, retval;
+ char buf[256], *option_str;
+
+ if (cnt >= 256)
+ return -EINVAL;
+
+ if (copy_from_user(buf, ubuf, cnt))
+ return -EFAULT;
+
+ buf[cnt] = 0;
+
+ if (strncmp(buf, "NO_", 3)) {
+ option_str = strstrip(buf);
+ enable = true;
+ } else {
+ option_str = strstrip(&buf[3]);
+ enable = false;
+ }
+
+ option = match_string(osnoise_options_str, OSN_MAX, option_str);
+ if (option < 0)
+ return -EINVAL;
+
+ /*
+ * trace_types_lock is taken to avoid concurrency on start/stop.
+ */
+ mutex_lock(&trace_types_lock);
+ running = osnoise_has_registered_instances();
+ if (running)
+ stop_per_cpu_kthreads();
+
+ mutex_lock(&interface_lock);
+ /*
+ * avoid CPU hotplug operations that might read options.
+ */
+ cpus_read_lock();
+
+ retval = cnt;
+
+ if (enable) {
+ if (option == OSN_DEFAULTS)
+ osnoise_options = OSN_DEFAULT_OPTIONS;
+ else
+ set_bit(option, &osnoise_options);
+ } else {
+ if (option == OSN_DEFAULTS)
+ retval = -EINVAL;
+ else
+ clear_bit(option, &osnoise_options);
+ }
+
+ cpus_read_unlock();
+ mutex_unlock(&interface_lock);
+
+ if (running)
+ start_per_cpu_kthreads();
+ mutex_unlock(&trace_types_lock);
+
+ return retval;
+}
+
+/*
+ * osnoise_cpus_read - Read function for reading the "cpus" file
+ * @filp: The active open file structure
+ * @ubuf: The userspace provided buffer to read value into
+ * @cnt: The maximum number of bytes to read
+ * @ppos: The current "file" position
+ *
+ * Prints the "cpus" output into the user-provided buffer.
+ */
+static ssize_t
+osnoise_cpus_read(struct file *filp, char __user *ubuf, size_t count,
+ loff_t *ppos)
+{
+ char *mask_str;
+ int len;
+
+ mutex_lock(&interface_lock);
+
+ len = snprintf(NULL, 0, "%*pbl\n", cpumask_pr_args(&osnoise_cpumask)) + 1;
+ mask_str = kmalloc(len, GFP_KERNEL);
+ if (!mask_str) {
+ count = -ENOMEM;
+ goto out_unlock;
+ }
+
+ len = snprintf(mask_str, len, "%*pbl\n", cpumask_pr_args(&osnoise_cpumask));
+ if (len >= count) {
+ count = -EINVAL;
+ goto out_free;
+ }
+
+ count = simple_read_from_buffer(ubuf, count, ppos, mask_str, len);
+
+out_free:
+ kfree(mask_str);
+out_unlock:
+ mutex_unlock(&interface_lock);
+
+ return count;
+}
+
+/*
+ * osnoise_cpus_write - Write function for "cpus" entry
+ * @filp: The active open file structure
+ * @ubuf: The user buffer that contains the value to write
+ * @cnt: The maximum number of bytes to write to "file"
+ * @ppos: The current position in @file
+ *
+ * This function provides a write implementation for the "cpus"
+ * interface to the osnoise trace. By default, it lists all CPUs,
+ * in this way, allowing osnoise threads to run on any online CPU
+ * of the system. It serves to restrict the execution of osnoise to the
+ * set of CPUs writing via this interface. Why not use "tracing_cpumask"?
+ * Because the user might be interested in tracing what is running on
+ * other CPUs. For instance, one might run osnoise in one HT CPU
+ * while observing what is running on the sibling HT CPU.
+ */
+static ssize_t
+osnoise_cpus_write(struct file *filp, const char __user *ubuf, size_t count,
+ loff_t *ppos)
+{
+ cpumask_var_t osnoise_cpumask_new;
+ int running, err;
+ char buf[256];
+
+ if (count >= 256)
+ return -EINVAL;
+
+ if (copy_from_user(buf, ubuf, count))
+ return -EFAULT;
+
+ if (!zalloc_cpumask_var(&osnoise_cpumask_new, GFP_KERNEL))
+ return -ENOMEM;
+
+ err = cpulist_parse(buf, osnoise_cpumask_new);
+ if (err)
+ goto err_free;
+
+ /*
+ * trace_types_lock is taken to avoid concurrency on start/stop.
+ */
+ mutex_lock(&trace_types_lock);
+ running = osnoise_has_registered_instances();
+ if (running)
+ stop_per_cpu_kthreads();
+
+ mutex_lock(&interface_lock);
+ /*
+ * osnoise_cpumask is read by CPU hotplug operations.
+ */
+ cpus_read_lock();
+
+ cpumask_copy(&osnoise_cpumask, osnoise_cpumask_new);
+
+ cpus_read_unlock();
+ mutex_unlock(&interface_lock);
+
+ if (running)
+ start_per_cpu_kthreads();
+ mutex_unlock(&trace_types_lock);
+
+ free_cpumask_var(osnoise_cpumask_new);
+ return count;
+
+err_free:
+ free_cpumask_var(osnoise_cpumask_new);
+
+ return err;
+}
+
+#ifdef CONFIG_TIMERLAT_TRACER
+static int timerlat_fd_open(struct inode *inode, struct file *file)
+{
+ struct osnoise_variables *osn_var;
+ struct timerlat_variables *tlat;
+ long cpu = (long) inode->i_cdev;
+
+ mutex_lock(&interface_lock);
+
+ /*
+ * This file is accessible only if timerlat is enabled, and
+ * NO_OSNOISE_WORKLOAD is set.
+ */
+ if (!timerlat_enabled() || test_bit(OSN_WORKLOAD, &osnoise_options)) {
+ mutex_unlock(&interface_lock);
+ return -EINVAL;
+ }
+
+ migrate_disable();
+
+ osn_var = this_cpu_osn_var();
+
+ /*
+ * The osn_var->pid holds the single access to this file.
+ */
+ if (osn_var->pid) {
+ mutex_unlock(&interface_lock);
+ migrate_enable();
+ return -EBUSY;
+ }
+
+ /*
+ * timerlat tracer is a per-cpu tracer. Check if the user-space too
+ * is pinned to a single CPU. The tracer laters monitor if the task
+ * migrates and then disables tracer if it does. However, it is
+ * worth doing this basic acceptance test to avoid obviusly wrong
+ * setup.
+ */
+ if (current->nr_cpus_allowed > 1 || cpu != smp_processor_id()) {
+ mutex_unlock(&interface_lock);
+ migrate_enable();
+ return -EPERM;
+ }
+
+ /*
+ * From now on, it is good to go.
+ */
+ file->private_data = inode->i_cdev;
+
+ get_task_struct(current);
+
+ osn_var->kthread = current;
+ osn_var->pid = current->pid;
+
+ /*
+ * Setup is done.
+ */
+ mutex_unlock(&interface_lock);
+
+ tlat = this_cpu_tmr_var();
+ tlat->count = 0;
+
+ hrtimer_init(&tlat->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED_HARD);
+ tlat->timer.function = timerlat_irq;
+
+ migrate_enable();
+ return 0;
+};
+
+/*
+ * timerlat_fd_read - Read function for "timerlat_fd" file
+ * @file: The active open file structure
+ * @ubuf: The userspace provided buffer to read value into
+ * @cnt: The maximum number of bytes to read
+ * @ppos: The current "file" position
+ *
+ * Prints 1 on timerlat, the number of interferences on osnoise, -1 on error.
+ */
+static ssize_t
+timerlat_fd_read(struct file *file, char __user *ubuf, size_t count,
+ loff_t *ppos)
+{
+ long cpu = (long) file->private_data;
+ struct osnoise_variables *osn_var;
+ struct timerlat_variables *tlat;
+ struct timerlat_sample s;
+ s64 diff;
+ u64 now;
+
+ migrate_disable();
+
+ tlat = this_cpu_tmr_var();
+
+ /*
+ * While in user-space, the thread is migratable. There is nothing
+ * we can do about it.
+ * So, if the thread is running on another CPU, stop the machinery.
+ */
+ if (cpu == smp_processor_id()) {
+ if (tlat->uthread_migrate) {
+ migrate_enable();
+ return -EINVAL;
+ }
+ } else {
+ per_cpu_ptr(&per_cpu_timerlat_var, cpu)->uthread_migrate = 1;
+ osnoise_taint("timerlat user thread migrate\n");
+ osnoise_stop_tracing();
+ migrate_enable();
+ return -EINVAL;
+ }
+
+ osn_var = this_cpu_osn_var();
+
+ /*
+ * The timerlat in user-space runs in a different order:
+ * the read() starts from the execution of the previous occurrence,
+ * sleeping for the next occurrence.
+ *
+ * So, skip if we are entering on read() before the first wakeup
+ * from timerlat IRQ:
+ */
+ if (likely(osn_var->sampling)) {
+ now = ktime_to_ns(hrtimer_cb_get_time(&tlat->timer));
+ diff = now - tlat->abs_period;
+
+ /*
+ * it was not a timer firing, but some other signal?
+ */
+ if (diff < 0)
+ goto out;
+
+ s.seqnum = tlat->count;
+ s.timer_latency = diff;
+ s.context = THREAD_URET;
+
+ trace_timerlat_sample(&s);
+
+ notify_new_max_latency(diff);
+
+ tlat->tracing_thread = false;
+ if (osnoise_data.stop_tracing_total)
+ if (time_to_us(diff) >= osnoise_data.stop_tracing_total)
+ osnoise_stop_tracing();
+ } else {
+ tlat->tracing_thread = false;
+ tlat->kthread = current;
+
+ /* Annotate now to drift new period */
+ tlat->abs_period = hrtimer_cb_get_time(&tlat->timer);
+
+ osn_var->sampling = 1;
+ }
+
+ /* wait for the next period */
+ wait_next_period(tlat);
+
+ /* This is the wakeup from this cycle */
+ now = ktime_to_ns(hrtimer_cb_get_time(&tlat->timer));
+ diff = now - tlat->abs_period;
+
+ /*
+ * it was not a timer firing, but some other signal?
+ */
+ if (diff < 0)
+ goto out;
+
+ s.seqnum = tlat->count;
+ s.timer_latency = diff;
+ s.context = THREAD_CONTEXT;
+
+ trace_timerlat_sample(&s);
+
+ if (osnoise_data.stop_tracing_total) {
+ if (time_to_us(diff) >= osnoise_data.stop_tracing_total) {
+ timerlat_dump_stack(time_to_us(diff));
+ notify_new_max_latency(diff);
+ osnoise_stop_tracing();
+ }
+ }
+
+out:
+ migrate_enable();
+ return 0;
+}
+
+static int timerlat_fd_release(struct inode *inode, struct file *file)
+{
+ struct osnoise_variables *osn_var;
+ struct timerlat_variables *tlat_var;
+ long cpu = (long) file->private_data;
+
+ migrate_disable();
+ mutex_lock(&interface_lock);
+
+ osn_var = per_cpu_ptr(&per_cpu_osnoise_var, cpu);
+ tlat_var = per_cpu_ptr(&per_cpu_timerlat_var, cpu);
+
+ hrtimer_cancel(&tlat_var->timer);
+ memset(tlat_var, 0, sizeof(*tlat_var));
+
+ osn_var->sampling = 0;
+ osn_var->pid = 0;
+
+ /*
+ * We are leaving, not being stopped... see stop_kthread();
+ */
+ if (osn_var->kthread) {
+ put_task_struct(osn_var->kthread);
+ osn_var->kthread = NULL;
+ }
+
+ mutex_unlock(&interface_lock);
+ migrate_enable();
+ return 0;
+}
+#endif
+
+/*
+ * osnoise/runtime_us: cannot be greater than the period.
+ */
+static struct trace_min_max_param osnoise_runtime = {
+ .lock = &interface_lock,
+ .val = &osnoise_data.sample_runtime,
+ .max = &osnoise_data.sample_period,
+ .min = NULL,
+};
+
+/*
+ * osnoise/period_us: cannot be smaller than the runtime.
+ */
+static struct trace_min_max_param osnoise_period = {
+ .lock = &interface_lock,
+ .val = &osnoise_data.sample_period,
+ .max = NULL,
+ .min = &osnoise_data.sample_runtime,
+};
+
+/*
+ * osnoise/stop_tracing_us: no limit.
+ */
+static struct trace_min_max_param osnoise_stop_tracing_in = {
+ .lock = &interface_lock,
+ .val = &osnoise_data.stop_tracing,
+ .max = NULL,
+ .min = NULL,
+};
+
+/*
+ * osnoise/stop_tracing_total_us: no limit.
+ */
+static struct trace_min_max_param osnoise_stop_tracing_total = {
+ .lock = &interface_lock,
+ .val = &osnoise_data.stop_tracing_total,
+ .max = NULL,
+ .min = NULL,
+};
+
+#ifdef CONFIG_TIMERLAT_TRACER
+/*
+ * osnoise/print_stack: print the stacktrace of the IRQ handler if the total
+ * latency is higher than val.
+ */
+static struct trace_min_max_param osnoise_print_stack = {
+ .lock = &interface_lock,
+ .val = &osnoise_data.print_stack,
+ .max = NULL,
+ .min = NULL,
+};
+
+/*
+ * osnoise/timerlat_period: min 100 us, max 1 s
+ */
+static u64 timerlat_min_period = 100;
+static u64 timerlat_max_period = 1000000;
+static struct trace_min_max_param timerlat_period = {
+ .lock = &interface_lock,
+ .val = &osnoise_data.timerlat_period,
+ .max = &timerlat_max_period,
+ .min = &timerlat_min_period,
+};
+
+static const struct file_operations timerlat_fd_fops = {
+ .open = timerlat_fd_open,
+ .read = timerlat_fd_read,
+ .release = timerlat_fd_release,
+ .llseek = generic_file_llseek,
+};
+#endif
+
+static const struct file_operations cpus_fops = {
+ .open = tracing_open_generic,
+ .read = osnoise_cpus_read,
+ .write = osnoise_cpus_write,
+ .llseek = generic_file_llseek,
+};
+
+static const struct file_operations osnoise_options_fops = {
+ .open = osnoise_options_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release,
+ .write = osnoise_options_write
+};
+
+#ifdef CONFIG_TIMERLAT_TRACER
+#ifdef CONFIG_STACKTRACE
+static int init_timerlat_stack_tracefs(struct dentry *top_dir)
+{
+ struct dentry *tmp;
+
+ tmp = tracefs_create_file("print_stack", TRACE_MODE_WRITE, top_dir,
+ &osnoise_print_stack, &trace_min_max_fops);
+ if (!tmp)
+ return -ENOMEM;
+
+ return 0;
+}
+#else /* CONFIG_STACKTRACE */
+static int init_timerlat_stack_tracefs(struct dentry *top_dir)
+{
+ return 0;
+}
+#endif /* CONFIG_STACKTRACE */
+
+static int osnoise_create_cpu_timerlat_fd(struct dentry *top_dir)
+{
+ struct dentry *timerlat_fd;
+ struct dentry *per_cpu;
+ struct dentry *cpu_dir;
+ char cpu_str[30]; /* see trace.c: tracing_init_tracefs_percpu() */
+ long cpu;
+
+ /*
+ * Why not using tracing instance per_cpu/ dir?
+ *
+ * Because osnoise/timerlat have a single workload, having
+ * multiple files like these are wast of memory.
+ */
+ per_cpu = tracefs_create_dir("per_cpu", top_dir);
+ if (!per_cpu)
+ return -ENOMEM;
+
+ for_each_possible_cpu(cpu) {
+ snprintf(cpu_str, 30, "cpu%ld", cpu);
+ cpu_dir = tracefs_create_dir(cpu_str, per_cpu);
+ if (!cpu_dir)
+ goto out_clean;
+
+ timerlat_fd = trace_create_file("timerlat_fd", TRACE_MODE_READ,
+ cpu_dir, NULL, &timerlat_fd_fops);
+ if (!timerlat_fd)
+ goto out_clean;
+
+ /* Record the CPU */
+ d_inode(timerlat_fd)->i_cdev = (void *)(cpu);
+ }
+
+ return 0;
+
+out_clean:
+ tracefs_remove(per_cpu);
+ return -ENOMEM;
+}
+
+/*
+ * init_timerlat_tracefs - A function to initialize the timerlat interface files
+ */
+static int init_timerlat_tracefs(struct dentry *top_dir)
+{
+ struct dentry *tmp;
+ int retval;
+
+ tmp = tracefs_create_file("timerlat_period_us", TRACE_MODE_WRITE, top_dir,
+ &timerlat_period, &trace_min_max_fops);
+ if (!tmp)
+ return -ENOMEM;
+
+ retval = osnoise_create_cpu_timerlat_fd(top_dir);
+ if (retval)
+ return retval;
+
+ return init_timerlat_stack_tracefs(top_dir);
+}
+#else /* CONFIG_TIMERLAT_TRACER */
+static int init_timerlat_tracefs(struct dentry *top_dir)
+{
+ return 0;
+}
+#endif /* CONFIG_TIMERLAT_TRACER */
+
+/*
+ * init_tracefs - A function to initialize the tracefs interface files
+ *
+ * This function creates entries in tracefs for "osnoise" and "timerlat".
+ * It creates these directories in the tracing directory, and within that
+ * directory the use can change and view the configs.
+ */
+static int init_tracefs(void)
+{
+ struct dentry *top_dir;
+ struct dentry *tmp;
+ int ret;
+
+ ret = tracing_init_dentry();
+ if (ret)
+ return -ENOMEM;
+
+ top_dir = tracefs_create_dir("osnoise", NULL);
+ if (!top_dir)
+ return 0;
+
+ tmp = tracefs_create_file("period_us", TRACE_MODE_WRITE, top_dir,
+ &osnoise_period, &trace_min_max_fops);
+ if (!tmp)
+ goto err;
+
+ tmp = tracefs_create_file("runtime_us", TRACE_MODE_WRITE, top_dir,
+ &osnoise_runtime, &trace_min_max_fops);
+ if (!tmp)
+ goto err;
+
+ tmp = tracefs_create_file("stop_tracing_us", TRACE_MODE_WRITE, top_dir,
+ &osnoise_stop_tracing_in, &trace_min_max_fops);
+ if (!tmp)
+ goto err;
+
+ tmp = tracefs_create_file("stop_tracing_total_us", TRACE_MODE_WRITE, top_dir,
+ &osnoise_stop_tracing_total, &trace_min_max_fops);
+ if (!tmp)
+ goto err;
+
+ tmp = trace_create_file("cpus", TRACE_MODE_WRITE, top_dir, NULL, &cpus_fops);
+ if (!tmp)
+ goto err;
+
+ tmp = trace_create_file("options", TRACE_MODE_WRITE, top_dir, NULL,
+ &osnoise_options_fops);
+ if (!tmp)
+ goto err;
+
+ ret = init_timerlat_tracefs(top_dir);
+ if (ret)
+ goto err;
+
+ return 0;
+
+err:
+ tracefs_remove(top_dir);
+ return -ENOMEM;
+}
+
+static int osnoise_hook_events(void)
+{
+ int retval;
+
+ /*
+ * Trace is already hooked, we are re-enabling from
+ * a stop_tracing_*.
+ */
+ if (trace_osnoise_callback_enabled)
+ return 0;
+
+ retval = hook_irq_events();
+ if (retval)
+ return -EINVAL;
+
+ retval = hook_softirq_events();
+ if (retval)
+ goto out_unhook_irq;
+
+ retval = hook_thread_events();
+ /*
+ * All fine!
+ */
+ if (!retval)
+ return 0;
+
+ unhook_softirq_events();
+out_unhook_irq:
+ unhook_irq_events();
+ return -EINVAL;
+}
+
+static void osnoise_unhook_events(void)
+{
+ unhook_thread_events();
+ unhook_softirq_events();
+ unhook_irq_events();
+}
+
+/*
+ * osnoise_workload_start - start the workload and hook to events
+ */
+static int osnoise_workload_start(void)
+{
+ int retval;
+
+ /*
+ * Instances need to be registered after calling workload
+ * start. Hence, if there is already an instance, the
+ * workload was already registered. Otherwise, this
+ * code is on the way to register the first instance,
+ * and the workload will start.
+ */
+ if (osnoise_has_registered_instances())
+ return 0;
+
+ osn_var_reset_all();
+
+ retval = osnoise_hook_events();
+ if (retval)
+ return retval;
+
+ /*
+ * Make sure that ftrace_nmi_enter/exit() see reset values
+ * before enabling trace_osnoise_callback_enabled.
+ */
+ barrier();
+ trace_osnoise_callback_enabled = true;
+
+ retval = start_per_cpu_kthreads();
+ if (retval) {
+ trace_osnoise_callback_enabled = false;
+ /*
+ * Make sure that ftrace_nmi_enter/exit() see
+ * trace_osnoise_callback_enabled as false before continuing.
+ */
+ barrier();
+
+ osnoise_unhook_events();
+ return retval;
+ }
+
+ return 0;
+}
+
+/*
+ * osnoise_workload_stop - stop the workload and unhook the events
+ */
+static void osnoise_workload_stop(void)
+{
+ /*
+ * Instances need to be unregistered before calling
+ * stop. Hence, if there is a registered instance, more
+ * than one instance is running, and the workload will not
+ * yet stop. Otherwise, this code is on the way to disable
+ * the last instance, and the workload can stop.
+ */
+ if (osnoise_has_registered_instances())
+ return;
+
+ /*
+ * If callbacks were already disabled in a previous stop
+ * call, there is no need to disable then again.
+ *
+ * For instance, this happens when tracing is stopped via:
+ * echo 0 > tracing_on
+ * echo nop > current_tracer.
+ */
+ if (!trace_osnoise_callback_enabled)
+ return;
+
+ trace_osnoise_callback_enabled = false;
+ /*
+ * Make sure that ftrace_nmi_enter/exit() see
+ * trace_osnoise_callback_enabled as false before continuing.
+ */
+ barrier();
+
+ stop_per_cpu_kthreads();
+
+ osnoise_unhook_events();
+}
+
+static void osnoise_tracer_start(struct trace_array *tr)
+{
+ int retval;
+
+ /*
+ * If the instance is already registered, there is no need to
+ * register it again.
+ */
+ if (osnoise_instance_registered(tr))
+ return;
+
+ retval = osnoise_workload_start();
+ if (retval)
+ pr_err(BANNER "Error starting osnoise tracer\n");
+
+ osnoise_register_instance(tr);
+}
+
+static void osnoise_tracer_stop(struct trace_array *tr)
+{
+ osnoise_unregister_instance(tr);
+ osnoise_workload_stop();
+}
+
+static int osnoise_tracer_init(struct trace_array *tr)
+{
+ /*
+ * Only allow osnoise tracer if timerlat tracer is not running
+ * already.
+ */
+ if (timerlat_enabled())
+ return -EBUSY;
+
+ tr->max_latency = 0;
+
+ osnoise_tracer_start(tr);
+ return 0;
+}
+
+static void osnoise_tracer_reset(struct trace_array *tr)
+{
+ osnoise_tracer_stop(tr);
+}
+
+static struct tracer osnoise_tracer __read_mostly = {
+ .name = "osnoise",
+ .init = osnoise_tracer_init,
+ .reset = osnoise_tracer_reset,
+ .start = osnoise_tracer_start,
+ .stop = osnoise_tracer_stop,
+ .print_header = print_osnoise_headers,
+ .allow_instances = true,
+};
+
+#ifdef CONFIG_TIMERLAT_TRACER
+static void timerlat_tracer_start(struct trace_array *tr)
+{
+ int retval;
+
+ /*
+ * If the instance is already registered, there is no need to
+ * register it again.
+ */
+ if (osnoise_instance_registered(tr))
+ return;
+
+ retval = osnoise_workload_start();
+ if (retval)
+ pr_err(BANNER "Error starting timerlat tracer\n");
+
+ osnoise_register_instance(tr);
+
+ return;
+}
+
+static void timerlat_tracer_stop(struct trace_array *tr)
+{
+ int cpu;
+
+ osnoise_unregister_instance(tr);
+
+ /*
+ * Instruct the threads to stop only if this is the last instance.
+ */
+ if (!osnoise_has_registered_instances()) {
+ for_each_online_cpu(cpu)
+ per_cpu(per_cpu_osnoise_var, cpu).sampling = 0;
+ }
+
+ osnoise_workload_stop();
+}
+
+static int timerlat_tracer_init(struct trace_array *tr)
+{
+ /*
+ * Only allow timerlat tracer if osnoise tracer is not running already.
+ */
+ if (osnoise_has_registered_instances() && !osnoise_data.timerlat_tracer)
+ return -EBUSY;
+
+ /*
+ * If this is the first instance, set timerlat_tracer to block
+ * osnoise tracer start.
+ */
+ if (!osnoise_has_registered_instances())
+ osnoise_data.timerlat_tracer = 1;
+
+ tr->max_latency = 0;
+ timerlat_tracer_start(tr);
+
+ return 0;
+}
+
+static void timerlat_tracer_reset(struct trace_array *tr)
+{
+ timerlat_tracer_stop(tr);
+
+ /*
+ * If this is the last instance, reset timerlat_tracer allowing
+ * osnoise to be started.
+ */
+ if (!osnoise_has_registered_instances())
+ osnoise_data.timerlat_tracer = 0;
+}
+
+static struct tracer timerlat_tracer __read_mostly = {
+ .name = "timerlat",
+ .init = timerlat_tracer_init,
+ .reset = timerlat_tracer_reset,
+ .start = timerlat_tracer_start,
+ .stop = timerlat_tracer_stop,
+ .print_header = print_timerlat_headers,
+ .allow_instances = true,
+};
+
+__init static int init_timerlat_tracer(void)
+{
+ return register_tracer(&timerlat_tracer);
+}
+#else /* CONFIG_TIMERLAT_TRACER */
+__init static int init_timerlat_tracer(void)
+{
+ return 0;
+}
+#endif /* CONFIG_TIMERLAT_TRACER */
+
+__init static int init_osnoise_tracer(void)
+{
+ int ret;
+
+ mutex_init(&interface_lock);
+
+ cpumask_copy(&osnoise_cpumask, cpu_all_mask);
+
+ ret = register_tracer(&osnoise_tracer);
+ if (ret) {
+ pr_err(BANNER "Error registering osnoise!\n");
+ return ret;
+ }
+
+ ret = init_timerlat_tracer();
+ if (ret) {
+ pr_err(BANNER "Error registering timerlat!\n");
+ return ret;
+ }
+
+ osnoise_init_hotplug_support();
+
+ INIT_LIST_HEAD_RCU(&osnoise_instances);
+
+ init_tracefs();
+
+ return 0;
+}
+late_initcall(init_osnoise_tracer);
diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
index 73976de7f8cc..d8b302d01083 100644
--- a/kernel/trace/trace_output.c
+++ b/kernel/trace/trace_output.c
@@ -8,8 +8,10 @@
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/ftrace.h>
+#include <linux/kprobes.h>
#include <linux/sched/clock.h>
#include <linux/sched/mm.h>
+#include <linux/idr.h>
#include "trace_output.h"
@@ -20,8 +22,6 @@ DECLARE_RWSEM(trace_event_sem);
static struct hlist_head event_hash[EVENT_HASHSIZE] __read_mostly;
-static int next_event_type = __TRACE_LAST_TYPE + 1;
-
enum print_line_t trace_print_bputs_msg_only(struct trace_iterator *iter)
{
struct trace_seq *s = &iter->seq;
@@ -221,8 +221,11 @@ trace_print_hex_seq(struct trace_seq *p, const unsigned char *buf, int buf_len,
const char *ret = trace_seq_buffer_ptr(p);
const char *fmt = concatenate ? "%*phN" : "%*ph";
- for (i = 0; i < buf_len; i += 16)
+ for (i = 0; i < buf_len; i += 16) {
+ if (!concatenate && i != 0)
+ trace_seq_putc(p, ' ');
trace_seq_printf(p, fmt, min(buf_len - i, 16), &buf[i]);
+ }
trace_seq_putc(p, 0);
return ret;
@@ -312,13 +315,24 @@ int trace_raw_output_prep(struct trace_iterator *iter,
}
EXPORT_SYMBOL(trace_raw_output_prep);
-static int trace_output_raw(struct trace_iterator *iter, char *name,
- char *fmt, va_list ap)
+void trace_event_printf(struct trace_iterator *iter, const char *fmt, ...)
+{
+ va_list ap;
+
+ va_start(ap, fmt);
+ trace_check_vprintf(iter, trace_event_format(iter, fmt), ap);
+ va_end(ap);
+}
+EXPORT_SYMBOL(trace_event_printf);
+
+static __printf(3, 0)
+int trace_output_raw(struct trace_iterator *iter, char *name,
+ char *fmt, va_list ap)
{
struct trace_seq *s = &iter->seq;
trace_seq_printf(s, "%s: ", name);
- trace_seq_vprintf(s, fmt, ap);
+ trace_seq_vprintf(s, trace_event_format(iter, fmt), ap);
return trace_handle_return(s);
}
@@ -336,25 +350,15 @@ int trace_output_call(struct trace_iterator *iter, char *name, char *fmt, ...)
}
EXPORT_SYMBOL_GPL(trace_output_call);
-#ifdef CONFIG_KRETPROBES
-static inline const char *kretprobed(const char *name)
+static inline const char *kretprobed(const char *name, unsigned long addr)
{
- static const char tramp_name[] = "kretprobe_trampoline";
- int size = sizeof(tramp_name);
-
- if (strncmp(tramp_name, name, size) == 0)
+ if (is_kretprobe_trampoline(addr))
return "[unknown/kretprobe'd]";
return name;
}
-#else
-static inline const char *kretprobed(const char *name)
-{
- return name;
-}
-#endif /* CONFIG_KRETPROBES */
-static void
-seq_print_sym(struct trace_seq *s, unsigned long address, bool offset)
+void
+trace_seq_print_sym(struct trace_seq *s, unsigned long address, bool offset)
{
#ifdef CONFIG_KALLSYMS
char str[KSYM_SYMBOL_LEN];
@@ -364,7 +368,7 @@ seq_print_sym(struct trace_seq *s, unsigned long address, bool offset)
sprint_symbol(str, address);
else
kallsyms_lookup(address, NULL, NULL, NULL, str);
- name = kretprobed(str);
+ name = kretprobed(str, address);
if (name && strlen(name)) {
trace_seq_puts(s, name);
@@ -400,7 +404,7 @@ static int seq_print_user_ip(struct trace_seq *s, struct mm_struct *mm,
vmstart = vma->vm_start;
}
if (file) {
- ret = trace_seq_path(s, &file->f_path);
+ ret = trace_seq_path(s, file_user_path(file));
if (ret)
trace_seq_printf(s, "[+0x%lx]",
ip - vmstart);
@@ -420,7 +424,7 @@ seq_print_ip_sym(struct trace_seq *s, unsigned long ip, unsigned long sym_flags)
goto out;
}
- seq_print_sym(s, ip, sym_flags & TRACE_ITER_SYM_OFFSET);
+ trace_seq_print_sym(s, ip, sym_flags & TRACE_ITER_SYM_OFFSET);
if (sym_flags & TRACE_ITER_SYM_ADDR)
trace_seq_printf(s, " <" IP_FMT ">", ip);
@@ -444,14 +448,18 @@ int trace_print_lat_fmt(struct trace_seq *s, struct trace_entry *entry)
char irqs_off;
int hardirq;
int softirq;
+ int bh_off;
int nmi;
nmi = entry->flags & TRACE_FLAG_NMI;
hardirq = entry->flags & TRACE_FLAG_HARDIRQ;
softirq = entry->flags & TRACE_FLAG_SOFTIRQ;
+ bh_off = entry->flags & TRACE_FLAG_BH_OFF;
irqs_off =
+ (entry->flags & TRACE_FLAG_IRQS_OFF && bh_off) ? 'D' :
(entry->flags & TRACE_FLAG_IRQS_OFF) ? 'd' :
+ bh_off ? 'b' :
(entry->flags & TRACE_FLAG_IRQS_NOSUPPORT) ? 'X' :
'.';
@@ -482,8 +490,13 @@ int trace_print_lat_fmt(struct trace_seq *s, struct trace_entry *entry)
trace_seq_printf(s, "%c%c%c",
irqs_off, need_resched, hardsoft_irq);
- if (entry->preempt_count)
- trace_seq_printf(s, "%x", entry->preempt_count);
+ if (entry->preempt_count & 0xf)
+ trace_seq_printf(s, "%x", entry->preempt_count & 0xf);
+ else
+ trace_seq_putc(s, '.');
+
+ if (entry->preempt_count & 0xf0)
+ trace_seq_printf(s, "%x", entry->preempt_count >> 4);
else
trace_seq_putc(s, '.');
@@ -497,7 +510,7 @@ lat_print_generic(struct trace_seq *s, struct trace_entry *entry, int cpu)
trace_find_cmdline(entry->pid, comm);
- trace_seq_printf(s, "%8.8s-%-5d %3d",
+ trace_seq_printf(s, "%8.8s-%-7d %3d",
comm, entry->pid, cpu);
return trace_print_lat_fmt(s, entry);
@@ -577,26 +590,39 @@ lat_print_timestamp(struct trace_iterator *iter, u64 next_ts)
return !trace_seq_has_overflowed(s);
}
+static void trace_print_time(struct trace_seq *s, struct trace_iterator *iter,
+ unsigned long long ts)
+{
+ unsigned long secs, usec_rem;
+ unsigned long long t;
+
+ if (iter->iter_flags & TRACE_FILE_TIME_IN_NS) {
+ t = ns2usecs(ts);
+ usec_rem = do_div(t, USEC_PER_SEC);
+ secs = (unsigned long)t;
+ trace_seq_printf(s, " %5lu.%06lu", secs, usec_rem);
+ } else
+ trace_seq_printf(s, " %12llu", ts);
+}
+
int trace_print_context(struct trace_iterator *iter)
{
struct trace_array *tr = iter->tr;
struct trace_seq *s = &iter->seq;
struct trace_entry *entry = iter->ent;
- unsigned long long t;
- unsigned long secs, usec_rem;
char comm[TASK_COMM_LEN];
trace_find_cmdline(entry->pid, comm);
- trace_seq_printf(s, "%16s-%-5d ", comm, entry->pid);
+ trace_seq_printf(s, "%16s-%-7d ", comm, entry->pid);
if (tr->trace_flags & TRACE_ITER_RECORD_TGID) {
unsigned int tgid = trace_find_tgid(entry->pid);
if (!tgid)
- trace_seq_printf(s, "(-----) ");
+ trace_seq_printf(s, "(-------) ");
else
- trace_seq_printf(s, "(%5d) ", tgid);
+ trace_seq_printf(s, "(%7d) ", tgid);
}
trace_seq_printf(s, "[%03d] ", iter->cpu);
@@ -604,13 +630,8 @@ int trace_print_context(struct trace_iterator *iter)
if (tr->trace_flags & TRACE_ITER_IRQ_INFO)
trace_print_lat_fmt(s, entry);
- if (iter->iter_flags & TRACE_FILE_TIME_IN_NS) {
- t = ns2usecs(iter->ts);
- usec_rem = do_div(t, USEC_PER_SEC);
- secs = (unsigned long)t;
- trace_seq_printf(s, " %5lu.%06lu: ", secs, usec_rem);
- } else
- trace_seq_printf(s, " %12llu: ", iter->ts);
+ trace_print_time(s, iter, iter->ts);
+ trace_seq_puts(s, ": ");
return !trace_seq_has_overflowed(s);
}
@@ -636,9 +657,9 @@ int trace_print_lat_context(struct trace_iterator *iter)
trace_find_cmdline(entry->pid, comm);
trace_seq_printf(
- s, "%16s %5d %3d %d %08x %08lx ",
+ s, "%16s %7d %3d %d %08x %08lx ",
comm, entry->pid, iter->cpu, entry->flags,
- entry->preempt_count, iter->idx);
+ entry->preempt_count & 0xf, iter->idx);
} else {
lat_print_generic(s, entry, iter->cpu);
}
@@ -670,34 +691,24 @@ struct trace_event *ftrace_find_event(int type)
return NULL;
}
-static LIST_HEAD(ftrace_event_list);
+static DEFINE_IDA(trace_event_ida);
-static int trace_search_list(struct list_head **list)
+static void free_trace_event_type(int type)
{
- struct trace_event *e;
- int last = __TRACE_LAST_TYPE;
-
- if (list_empty(&ftrace_event_list)) {
- *list = &ftrace_event_list;
- return last + 1;
- }
+ if (type >= __TRACE_LAST_TYPE)
+ ida_free(&trace_event_ida, type);
+}
- /*
- * We used up all possible max events,
- * lets see if somebody freed one.
- */
- list_for_each_entry(e, &ftrace_event_list, list) {
- if (e->type != last + 1)
- break;
- last++;
- }
+static int alloc_trace_event_type(void)
+{
+ int next;
- /* Did we used up all 65 thousand events??? */
- if ((last + 1) > TRACE_EVENT_TYPE_MAX)
+ /* Skip static defined type numbers */
+ next = ida_alloc_range(&trace_event_ida, __TRACE_LAST_TYPE,
+ TRACE_EVENT_TYPE_MAX, GFP_KERNEL);
+ if (next < 0)
return 0;
-
- *list = &e->list;
- return last + 1;
+ return next;
}
void trace_event_read_lock(void)
@@ -738,31 +749,12 @@ int register_trace_event(struct trace_event *event)
if (WARN_ON(!event->funcs))
goto out;
- INIT_LIST_HEAD(&event->list);
-
if (!event->type) {
- struct list_head *list = NULL;
-
- if (next_event_type > TRACE_EVENT_TYPE_MAX) {
-
- event->type = trace_search_list(&list);
- if (!event->type)
- goto out;
-
- } else {
-
- event->type = next_event_type++;
- list = &ftrace_event_list;
- }
-
- if (WARN_ON(ftrace_find_event(event->type)))
+ event->type = alloc_trace_event_type();
+ if (!event->type)
goto out;
-
- list_add_tail(&event->list, list);
-
- } else if (event->type > __TRACE_LAST_TYPE) {
- printk(KERN_WARNING "Need to add type to trace.h\n");
- WARN_ON(1);
+ } else if (WARN(event->type > __TRACE_LAST_TYPE,
+ "Need to add type to trace.h")) {
goto out;
} else {
/* Is this event already used */
@@ -797,7 +789,7 @@ EXPORT_SYMBOL_GPL(register_trace_event);
int __unregister_trace_event(struct trace_event *event)
{
hlist_del(&event->node);
- list_del(&event->list);
+ free_trace_event_type(event->type);
return 0;
}
@@ -819,6 +811,176 @@ EXPORT_SYMBOL_GPL(unregister_trace_event);
* Standard events
*/
+static void print_array(struct trace_iterator *iter, void *pos,
+ struct ftrace_event_field *field)
+{
+ int offset;
+ int len;
+ int i;
+
+ offset = *(int *)pos & 0xffff;
+ len = *(int *)pos >> 16;
+
+ if (field)
+ offset += field->offset + sizeof(int);
+
+ if (offset + len > iter->ent_size) {
+ trace_seq_puts(&iter->seq, "<OVERFLOW>");
+ return;
+ }
+
+ pos = (void *)iter->ent + offset;
+
+ for (i = 0; i < len; i++, pos++) {
+ if (i)
+ trace_seq_putc(&iter->seq, ',');
+ trace_seq_printf(&iter->seq, "%02x", *(unsigned char *)pos);
+ }
+}
+
+static void print_fields(struct trace_iterator *iter, struct trace_event_call *call,
+ struct list_head *head)
+{
+ struct ftrace_event_field *field;
+ int offset;
+ int len;
+ int ret;
+ void *pos;
+
+ list_for_each_entry_reverse(field, head, link) {
+ trace_seq_printf(&iter->seq, " %s=", field->name);
+ if (field->offset + field->size > iter->ent_size) {
+ trace_seq_puts(&iter->seq, "<OVERFLOW>");
+ continue;
+ }
+ pos = (void *)iter->ent + field->offset;
+
+ switch (field->filter_type) {
+ case FILTER_COMM:
+ case FILTER_STATIC_STRING:
+ trace_seq_printf(&iter->seq, "%.*s", field->size, (char *)pos);
+ break;
+ case FILTER_RDYN_STRING:
+ case FILTER_DYN_STRING:
+ offset = *(int *)pos & 0xffff;
+ len = *(int *)pos >> 16;
+
+ if (field->filter_type == FILTER_RDYN_STRING)
+ offset += field->offset + sizeof(int);
+
+ if (offset + len > iter->ent_size) {
+ trace_seq_puts(&iter->seq, "<OVERFLOW>");
+ break;
+ }
+ pos = (void *)iter->ent + offset;
+ trace_seq_printf(&iter->seq, "%.*s", len, (char *)pos);
+ break;
+ case FILTER_PTR_STRING:
+ if (!iter->fmt_size)
+ trace_iter_expand_format(iter);
+ pos = *(void **)pos;
+ ret = strncpy_from_kernel_nofault(iter->fmt, pos,
+ iter->fmt_size);
+ if (ret < 0)
+ trace_seq_printf(&iter->seq, "(0x%px)", pos);
+ else
+ trace_seq_printf(&iter->seq, "(0x%px:%s)",
+ pos, iter->fmt);
+ break;
+ case FILTER_TRACE_FN:
+ pos = *(void **)pos;
+ trace_seq_printf(&iter->seq, "%pS", pos);
+ break;
+ case FILTER_CPU:
+ case FILTER_OTHER:
+ switch (field->size) {
+ case 1:
+ if (isprint(*(char *)pos)) {
+ trace_seq_printf(&iter->seq, "'%c'",
+ *(unsigned char *)pos);
+ }
+ trace_seq_printf(&iter->seq, "(%d)",
+ *(unsigned char *)pos);
+ break;
+ case 2:
+ trace_seq_printf(&iter->seq, "0x%x (%d)",
+ *(unsigned short *)pos,
+ *(unsigned short *)pos);
+ break;
+ case 4:
+ /* dynamic array info is 4 bytes */
+ if (strstr(field->type, "__data_loc")) {
+ print_array(iter, pos, NULL);
+ break;
+ }
+
+ if (strstr(field->type, "__rel_loc")) {
+ print_array(iter, pos, field);
+ break;
+ }
+
+ trace_seq_printf(&iter->seq, "0x%x (%d)",
+ *(unsigned int *)pos,
+ *(unsigned int *)pos);
+ break;
+ case 8:
+ trace_seq_printf(&iter->seq, "0x%llx (%lld)",
+ *(unsigned long long *)pos,
+ *(unsigned long long *)pos);
+ break;
+ default:
+ trace_seq_puts(&iter->seq, "<INVALID-SIZE>");
+ break;
+ }
+ break;
+ default:
+ trace_seq_puts(&iter->seq, "<INVALID-TYPE>");
+ }
+ }
+ trace_seq_putc(&iter->seq, '\n');
+}
+
+enum print_line_t print_event_fields(struct trace_iterator *iter,
+ struct trace_event *event)
+{
+ struct trace_event_call *call;
+ struct list_head *head;
+
+ /* ftrace defined events have separate call structures */
+ if (event->type <= __TRACE_LAST_TYPE) {
+ bool found = false;
+
+ down_read(&trace_event_sem);
+ list_for_each_entry(call, &ftrace_events, list) {
+ if (call->event.type == event->type) {
+ found = true;
+ break;
+ }
+ /* No need to search all events */
+ if (call->event.type > __TRACE_LAST_TYPE)
+ break;
+ }
+ up_read(&trace_event_sem);
+ if (!found) {
+ trace_seq_printf(&iter->seq, "UNKNOWN TYPE %d\n", event->type);
+ goto out;
+ }
+ } else {
+ call = container_of(event, struct trace_event_call, event);
+ }
+ head = trace_get_fields(call);
+
+ trace_seq_printf(&iter->seq, "%s:", trace_event_name(call));
+
+ if (head && !list_empty(head))
+ print_fields(iter, call, head);
+ else
+ trace_seq_puts(&iter->seq, "No fields found\n");
+
+ out:
+ return trace_handle_return(&iter->seq);
+}
+
enum print_line_t trace_nop_print(struct trace_iterator *iter, int flags,
struct trace_event *event)
{
@@ -827,6 +989,17 @@ enum print_line_t trace_nop_print(struct trace_iterator *iter, int flags,
return trace_handle_return(&iter->seq);
}
+static void print_fn_trace(struct trace_seq *s, unsigned long ip,
+ unsigned long parent_ip, int flags)
+{
+ seq_print_ip_sym(s, ip, flags);
+
+ if ((flags & TRACE_ITER_PRINT_PARENT) && parent_ip) {
+ trace_seq_puts(s, " <-");
+ seq_print_ip_sym(s, parent_ip, flags);
+ }
+}
+
/* TRACE_FN */
static enum print_line_t trace_fn_trace(struct trace_iterator *iter, int flags,
struct trace_event *event)
@@ -836,13 +1009,7 @@ static enum print_line_t trace_fn_trace(struct trace_iterator *iter, int flags,
trace_assign_type(field, iter->ent);
- seq_print_ip_sym(s, field->ip, flags);
-
- if ((flags & TRACE_ITER_PRINT_PARENT) && field->parent_ip) {
- trace_seq_puts(s, " <-");
- seq_print_ip_sym(s, field->parent_ip, flags);
- }
-
+ print_fn_trace(s, field->ip, field->parent_ip, flags);
trace_seq_putc(s, '\n');
return trace_handle_return(s);
@@ -917,7 +1084,7 @@ static enum print_line_t trace_ctxwake_print(struct trace_iterator *iter,
S = task_index_to_char(field->prev_state);
trace_find_cmdline(field->next_pid, comm);
trace_seq_printf(&iter->seq,
- " %5d:%3d:%c %s [%03d] %5d:%3d:%c %s\n",
+ " %7d:%3d:%c %s [%03d] %7d:%3d:%c %s\n",
field->prev_pid,
field->prev_prio,
S, delim,
@@ -1179,7 +1346,6 @@ trace_hwlat_print(struct trace_iterator *iter, int flags,
return trace_handle_return(s);
}
-
static enum print_line_t
trace_hwlat_raw(struct trace_iterator *iter, int flags,
struct trace_event *event)
@@ -1209,6 +1375,124 @@ static struct trace_event trace_hwlat_event = {
.funcs = &trace_hwlat_funcs,
};
+/* TRACE_OSNOISE */
+static enum print_line_t
+trace_osnoise_print(struct trace_iterator *iter, int flags,
+ struct trace_event *event)
+{
+ struct trace_entry *entry = iter->ent;
+ struct trace_seq *s = &iter->seq;
+ struct osnoise_entry *field;
+ u64 ratio, ratio_dec;
+ u64 net_runtime;
+
+ trace_assign_type(field, entry);
+
+ /*
+ * compute the available % of cpu time.
+ */
+ net_runtime = field->runtime - field->noise;
+ ratio = net_runtime * 10000000;
+ do_div(ratio, field->runtime);
+ ratio_dec = do_div(ratio, 100000);
+
+ trace_seq_printf(s, "%llu %10llu %3llu.%05llu %7llu",
+ field->runtime,
+ field->noise,
+ ratio, ratio_dec,
+ field->max_sample);
+
+ trace_seq_printf(s, " %6u", field->hw_count);
+ trace_seq_printf(s, " %6u", field->nmi_count);
+ trace_seq_printf(s, " %6u", field->irq_count);
+ trace_seq_printf(s, " %6u", field->softirq_count);
+ trace_seq_printf(s, " %6u", field->thread_count);
+
+ trace_seq_putc(s, '\n');
+
+ return trace_handle_return(s);
+}
+
+static enum print_line_t
+trace_osnoise_raw(struct trace_iterator *iter, int flags,
+ struct trace_event *event)
+{
+ struct osnoise_entry *field;
+ struct trace_seq *s = &iter->seq;
+
+ trace_assign_type(field, iter->ent);
+
+ trace_seq_printf(s, "%lld %llu %llu %u %u %u %u %u\n",
+ field->runtime,
+ field->noise,
+ field->max_sample,
+ field->hw_count,
+ field->nmi_count,
+ field->irq_count,
+ field->softirq_count,
+ field->thread_count);
+
+ return trace_handle_return(s);
+}
+
+static struct trace_event_functions trace_osnoise_funcs = {
+ .trace = trace_osnoise_print,
+ .raw = trace_osnoise_raw,
+};
+
+static struct trace_event trace_osnoise_event = {
+ .type = TRACE_OSNOISE,
+ .funcs = &trace_osnoise_funcs,
+};
+
+/* TRACE_TIMERLAT */
+
+static char *timerlat_lat_context[] = {"irq", "thread", "user-ret"};
+static enum print_line_t
+trace_timerlat_print(struct trace_iterator *iter, int flags,
+ struct trace_event *event)
+{
+ struct trace_entry *entry = iter->ent;
+ struct trace_seq *s = &iter->seq;
+ struct timerlat_entry *field;
+
+ trace_assign_type(field, entry);
+
+ trace_seq_printf(s, "#%-5u context %6s timer_latency %9llu ns\n",
+ field->seqnum,
+ timerlat_lat_context[field->context],
+ field->timer_latency);
+
+ return trace_handle_return(s);
+}
+
+static enum print_line_t
+trace_timerlat_raw(struct trace_iterator *iter, int flags,
+ struct trace_event *event)
+{
+ struct timerlat_entry *field;
+ struct trace_seq *s = &iter->seq;
+
+ trace_assign_type(field, iter->ent);
+
+ trace_seq_printf(s, "%u %d %llu\n",
+ field->seqnum,
+ field->context,
+ field->timer_latency);
+
+ return trace_handle_return(s);
+}
+
+static struct trace_event_functions trace_timerlat_funcs = {
+ .trace = trace_timerlat_print,
+ .raw = trace_timerlat_raw,
+};
+
+static struct trace_event trace_timerlat_event = {
+ .type = TRACE_TIMERLAT,
+ .funcs = &trace_timerlat_funcs,
+};
+
/* TRACE_BPUTS */
static enum print_line_t
trace_bputs_print(struct trace_iterator *iter, int flags,
@@ -1363,6 +1647,51 @@ static struct trace_event trace_raw_data_event = {
.funcs = &trace_raw_data_funcs,
};
+static enum print_line_t
+trace_func_repeats_raw(struct trace_iterator *iter, int flags,
+ struct trace_event *event)
+{
+ struct func_repeats_entry *field;
+ struct trace_seq *s = &iter->seq;
+
+ trace_assign_type(field, iter->ent);
+
+ trace_seq_printf(s, "%lu %lu %u %llu\n",
+ field->ip,
+ field->parent_ip,
+ field->count,
+ FUNC_REPEATS_GET_DELTA_TS(field));
+
+ return trace_handle_return(s);
+}
+
+static enum print_line_t
+trace_func_repeats_print(struct trace_iterator *iter, int flags,
+ struct trace_event *event)
+{
+ struct func_repeats_entry *field;
+ struct trace_seq *s = &iter->seq;
+
+ trace_assign_type(field, iter->ent);
+
+ print_fn_trace(s, field->ip, field->parent_ip, flags);
+ trace_seq_printf(s, " (repeats: %u, last_ts:", field->count);
+ trace_print_time(s, iter,
+ iter->ts - FUNC_REPEATS_GET_DELTA_TS(field));
+ trace_seq_puts(s, ")\n");
+
+ return trace_handle_return(s);
+}
+
+static struct trace_event_functions trace_func_repeats_funcs = {
+ .trace = trace_func_repeats_print,
+ .raw = trace_func_repeats_raw,
+};
+
+static struct trace_event trace_func_repeats_event = {
+ .type = TRACE_FUNC_REPEATS,
+ .funcs = &trace_func_repeats_funcs,
+};
static struct trace_event *events[] __initdata = {
&trace_fn_event,
@@ -1374,26 +1703,23 @@ static struct trace_event *events[] __initdata = {
&trace_bprint_event,
&trace_print_event,
&trace_hwlat_event,
+ &trace_osnoise_event,
+ &trace_timerlat_event,
&trace_raw_data_event,
+ &trace_func_repeats_event,
NULL
};
-__init static int init_events(void)
+__init int init_events(void)
{
struct trace_event *event;
int i, ret;
for (i = 0; events[i]; i++) {
event = events[i];
-
ret = register_trace_event(event);
- if (!ret) {
- printk(KERN_WARNING "event %d failed to register\n",
- event->type);
- WARN_ON_ONCE(1);
- }
+ WARN_ONCE(!ret, "event %d failed to register", event->type);
}
return 0;
}
-early_initcall(init_events);
diff --git a/kernel/trace/trace_output.h b/kernel/trace/trace_output.h
index 2f742b74e7e6..dca40f1f1da4 100644
--- a/kernel/trace/trace_output.h
+++ b/kernel/trace/trace_output.h
@@ -16,8 +16,11 @@ extern int
seq_print_ip_sym(struct trace_seq *s, unsigned long ip,
unsigned long sym_flags);
+extern void trace_seq_print_sym(struct trace_seq *s, unsigned long address, bool offset);
extern int trace_print_context(struct trace_iterator *iter);
extern int trace_print_lat_context(struct trace_iterator *iter);
+extern enum print_line_t print_event_fields(struct trace_iterator *iter,
+ struct trace_event *event);
extern void trace_event_read_lock(void);
extern void trace_event_read_unlock(void);
diff --git a/kernel/trace/trace_preemptirq.c b/kernel/trace/trace_preemptirq.c
index f10073e62603..e37446f7916e 100644
--- a/kernel/trace/trace_preemptirq.c
+++ b/kernel/trace/trace_preemptirq.c
@@ -15,6 +15,20 @@
#define CREATE_TRACE_POINTS
#include <trace/events/preemptirq.h>
+/*
+ * Use regular trace points on architectures that implement noinstr
+ * tooling: these calls will only happen with RCU enabled, which can
+ * use a regular tracepoint.
+ *
+ * On older architectures, use the rcuidle tracing methods (which
+ * aren't NMI-safe - so exclude NMI contexts):
+ */
+#ifdef CONFIG_ARCH_WANTS_NO_INSTR
+#define trace(point) trace_##point
+#else
+#define trace(point) if (!in_nmi()) trace_##point##_rcuidle
+#endif
+
#ifdef CONFIG_TRACE_IRQFLAGS
/* Per-cpu variable to prevent redundant calls when IRQs already off */
static DEFINE_PER_CPU(int, tracing_irq_cpu);
@@ -28,8 +42,7 @@ static DEFINE_PER_CPU(int, tracing_irq_cpu);
void trace_hardirqs_on_prepare(void)
{
if (this_cpu_read(tracing_irq_cpu)) {
- if (!in_nmi())
- trace_irq_enable(CALLER_ADDR0, CALLER_ADDR1);
+ trace(irq_enable)(CALLER_ADDR0, CALLER_ADDR1);
tracer_hardirqs_on(CALLER_ADDR0, CALLER_ADDR1);
this_cpu_write(tracing_irq_cpu, 0);
}
@@ -40,13 +53,12 @@ NOKPROBE_SYMBOL(trace_hardirqs_on_prepare);
void trace_hardirqs_on(void)
{
if (this_cpu_read(tracing_irq_cpu)) {
- if (!in_nmi())
- trace_irq_enable_rcuidle(CALLER_ADDR0, CALLER_ADDR1);
+ trace(irq_enable)(CALLER_ADDR0, CALLER_ADDR1);
tracer_hardirqs_on(CALLER_ADDR0, CALLER_ADDR1);
this_cpu_write(tracing_irq_cpu, 0);
}
- lockdep_hardirqs_on_prepare(CALLER_ADDR0);
+ lockdep_hardirqs_on_prepare();
lockdep_hardirqs_on(CALLER_ADDR0);
}
EXPORT_SYMBOL(trace_hardirqs_on);
@@ -63,8 +75,7 @@ void trace_hardirqs_off_finish(void)
if (!this_cpu_read(tracing_irq_cpu)) {
this_cpu_write(tracing_irq_cpu, 1);
tracer_hardirqs_off(CALLER_ADDR0, CALLER_ADDR1);
- if (!in_nmi())
- trace_irq_disable(CALLER_ADDR0, CALLER_ADDR1);
+ trace(irq_disable)(CALLER_ADDR0, CALLER_ADDR1);
}
}
@@ -78,56 +89,24 @@ void trace_hardirqs_off(void)
if (!this_cpu_read(tracing_irq_cpu)) {
this_cpu_write(tracing_irq_cpu, 1);
tracer_hardirqs_off(CALLER_ADDR0, CALLER_ADDR1);
- if (!in_nmi())
- trace_irq_disable_rcuidle(CALLER_ADDR0, CALLER_ADDR1);
+ trace(irq_disable)(CALLER_ADDR0, CALLER_ADDR1);
}
}
EXPORT_SYMBOL(trace_hardirqs_off);
NOKPROBE_SYMBOL(trace_hardirqs_off);
-
-__visible void trace_hardirqs_on_caller(unsigned long caller_addr)
-{
- if (this_cpu_read(tracing_irq_cpu)) {
- if (!in_nmi())
- trace_irq_enable_rcuidle(CALLER_ADDR0, caller_addr);
- tracer_hardirqs_on(CALLER_ADDR0, caller_addr);
- this_cpu_write(tracing_irq_cpu, 0);
- }
-
- lockdep_hardirqs_on_prepare(CALLER_ADDR0);
- lockdep_hardirqs_on(CALLER_ADDR0);
-}
-EXPORT_SYMBOL(trace_hardirqs_on_caller);
-NOKPROBE_SYMBOL(trace_hardirqs_on_caller);
-
-__visible void trace_hardirqs_off_caller(unsigned long caller_addr)
-{
- if (!this_cpu_read(tracing_irq_cpu)) {
- this_cpu_write(tracing_irq_cpu, 1);
- tracer_hardirqs_off(CALLER_ADDR0, caller_addr);
- if (!in_nmi())
- trace_irq_disable_rcuidle(CALLER_ADDR0, caller_addr);
- }
-
- lockdep_hardirqs_off(CALLER_ADDR0);
-}
-EXPORT_SYMBOL(trace_hardirqs_off_caller);
-NOKPROBE_SYMBOL(trace_hardirqs_off_caller);
#endif /* CONFIG_TRACE_IRQFLAGS */
#ifdef CONFIG_TRACE_PREEMPT_TOGGLE
void trace_preempt_on(unsigned long a0, unsigned long a1)
{
- if (!in_nmi())
- trace_preempt_enable_rcuidle(a0, a1);
+ trace(preempt_enable)(a0, a1);
tracer_preempt_on(a0, a1);
}
void trace_preempt_off(unsigned long a0, unsigned long a1)
{
- if (!in_nmi())
- trace_preempt_disable_rcuidle(a0, a1);
+ trace(preempt_disable)(a0, a1);
tracer_preempt_off(a0, a1);
}
#endif
diff --git a/kernel/trace/trace_printk.c b/kernel/trace/trace_printk.c
index d4e31e969206..29f6e95439b6 100644
--- a/kernel/trace/trace_printk.c
+++ b/kernel/trace/trace_printk.c
@@ -96,7 +96,7 @@ static int module_trace_bprintk_format_notify(struct notifier_block *self,
if (val == MODULE_STATE_COMING)
hold_module_trace_bprintk_format(start, end);
}
- return 0;
+ return NOTIFY_OK;
}
/*
@@ -174,7 +174,7 @@ __init static int
module_trace_bprintk_format_notify(struct notifier_block *self,
unsigned long val, void *data)
{
- return 0;
+ return NOTIFY_OK;
}
static inline const char **
find_next_mod_format(int start_index, void *v, const char **fmt, loff_t *pos)
@@ -251,6 +251,17 @@ int __ftrace_vprintk(unsigned long ip, const char *fmt, va_list ap)
}
EXPORT_SYMBOL_GPL(__ftrace_vprintk);
+bool trace_is_tracepoint_string(const char *str)
+{
+ const char **ptr = __start___tracepoint_str;
+
+ for (ptr = __start___tracepoint_str; ptr < __stop___tracepoint_str; ptr++) {
+ if (str == *ptr)
+ return true;
+ }
+ return false;
+}
+
static const char **find_next(void *v, loff_t *pos)
{
const char **fmt = v;
@@ -367,13 +378,13 @@ static const struct file_operations ftrace_formats_fops = {
static __init int init_trace_printk_function_export(void)
{
- struct dentry *d_tracer;
+ int ret;
- d_tracer = tracing_init_dentry();
- if (IS_ERR(d_tracer))
+ ret = tracing_init_dentry();
+ if (ret)
return 0;
- trace_create_file("printk_formats", 0444, d_tracer,
+ trace_create_file("printk_formats", TRACE_MODE_READ, NULL,
NULL, &ftrace_formats_fops);
return 0;
diff --git a/kernel/trace/trace_probe.c b/kernel/trace/trace_probe.c
index d2867ccc6aca..34289f9c6707 100644
--- a/kernel/trace/trace_probe.c
+++ b/kernel/trace/trace_probe.c
@@ -11,6 +11,9 @@
*/
#define pr_fmt(fmt) "trace_probe: " fmt
+#include <linux/bpf.h>
+#include "trace_btf.h"
+
#include "trace_probe.h"
#undef C
@@ -50,6 +53,7 @@ DEFINE_BASIC_PRINT_TYPE_FUNC(x8, u8, "0x%x")
DEFINE_BASIC_PRINT_TYPE_FUNC(x16, u16, "0x%x")
DEFINE_BASIC_PRINT_TYPE_FUNC(x32, u32, "0x%x")
DEFINE_BASIC_PRINT_TYPE_FUNC(x64, u64, "0x%Lx")
+DEFINE_BASIC_PRINT_TYPE_FUNC(char, u8, "'%c'")
int PRINT_TYPE_FUNC_NAME(symbol)(struct trace_seq *s, void *data, void *ent)
{
@@ -64,7 +68,7 @@ int PRINT_TYPE_FUNC_NAME(string)(struct trace_seq *s, void *data, void *ent)
int len = *(u32 *)data >> 16;
if (!len)
- trace_seq_puts(s, "(fault)");
+ trace_seq_puts(s, FAULT_STRING);
else
trace_seq_printf(s, "\"%s\"",
(const char *)get_loc_data(data, ent));
@@ -76,9 +80,11 @@ const char PRINT_TYPE_FMT_NAME(string)[] = "\\\"%s\\\"";
/* Fetch type information table */
static const struct fetch_type probe_fetch_types[] = {
/* Special types */
- __ASSIGN_FETCH_TYPE("string", string, string, sizeof(u32), 1,
+ __ASSIGN_FETCH_TYPE("string", string, string, sizeof(u32), 1, 1,
+ "__data_loc char[]"),
+ __ASSIGN_FETCH_TYPE("ustring", string, string, sizeof(u32), 1, 1,
"__data_loc char[]"),
- __ASSIGN_FETCH_TYPE("ustring", string, string, sizeof(u32), 1,
+ __ASSIGN_FETCH_TYPE("symstr", string, string, sizeof(u32), 1, 1,
"__data_loc char[]"),
/* Basic types */
ASSIGN_FETCH_TYPE(u8, u8, 0),
@@ -93,15 +99,21 @@ static const struct fetch_type probe_fetch_types[] = {
ASSIGN_FETCH_TYPE_ALIAS(x16, u16, u16, 0),
ASSIGN_FETCH_TYPE_ALIAS(x32, u32, u32, 0),
ASSIGN_FETCH_TYPE_ALIAS(x64, u64, u64, 0),
+ ASSIGN_FETCH_TYPE_ALIAS(char, u8, u8, 0),
ASSIGN_FETCH_TYPE_ALIAS(symbol, ADDR_FETCH_TYPE, ADDR_FETCH_TYPE, 0),
ASSIGN_FETCH_TYPE_END
};
-static const struct fetch_type *find_fetch_type(const char *type)
+static const struct fetch_type *find_fetch_type(const char *type, unsigned long flags)
{
int i;
+ /* Reject the symbol/symstr for uprobes */
+ if (type && (flags & TPARG_FL_USER) &&
+ (!strcmp(type, "symbol") || !strcmp(type, "symstr")))
+ return NULL;
+
if (!type)
type = DEFAULT_FETCH_TYPE_STR;
@@ -119,13 +131,13 @@ static const struct fetch_type *find_fetch_type(const char *type)
switch (bs) {
case 8:
- return find_fetch_type("u8");
+ return find_fetch_type("u8", flags);
case 16:
- return find_fetch_type("u16");
+ return find_fetch_type("u16", flags);
case 32:
- return find_fetch_type("u32");
+ return find_fetch_type("u32", flags);
case 64:
- return find_fetch_type("u64");
+ return find_fetch_type("u64", flags);
default:
goto fail;
}
@@ -168,7 +180,7 @@ void __trace_probe_log_err(int offset, int err_type)
if (!trace_probe_log.argv)
return;
- /* Recalcurate the length and allocate buffer */
+ /* Recalculate the length and allocate buffer */
for (i = 0; i < trace_probe_log.argc; i++) {
if (i == trace_probe_log.index)
pos = len;
@@ -182,7 +194,7 @@ void __trace_probe_log_err(int offset, int err_type)
/**
* Set the error position is next to the last arg + space.
* Note that len includes the terminal null and the cursor
- * appaers at pos + 1.
+ * appears at pos + 1.
*/
pos = len;
offset = 0;
@@ -233,6 +245,9 @@ int traceprobe_parse_event_name(const char **pevent, const char **pgroup,
int len;
slash = strchr(event, '/');
+ if (!slash)
+ slash = strchr(event, '.');
+
if (slash) {
if (slash == event) {
trace_probe_log_err(offset, NO_GROUP_NAME);
@@ -242,8 +257,8 @@ int traceprobe_parse_event_name(const char **pevent, const char **pgroup,
trace_probe_log_err(offset, GROUP_TOO_LONG);
return -EINVAL;
}
- strlcpy(buf, event, slash - event + 1);
- if (!is_good_name(buf)) {
+ strscpy(buf, event, slash - event + 1);
+ if (!is_good_system_name(buf)) {
trace_probe_log_err(offset, BAD_GROUP_NAME);
return -EINVAL;
}
@@ -254,6 +269,10 @@ int traceprobe_parse_event_name(const char **pevent, const char **pgroup,
}
len = strlen(event);
if (len == 0) {
+ if (slash) {
+ *pevent = NULL;
+ return 0;
+ }
trace_probe_log_err(offset, NO_EVENT_NAME);
return -EINVAL;
} else if (len > MAX_EVENT_NAME_LEN) {
@@ -267,62 +286,574 @@ int traceprobe_parse_event_name(const char **pevent, const char **pgroup,
return 0;
}
+static int parse_trace_event_arg(char *arg, struct fetch_insn *code,
+ struct traceprobe_parse_context *ctx)
+{
+ struct ftrace_event_field *field;
+ struct list_head *head;
+
+ head = trace_get_fields(ctx->event);
+ list_for_each_entry(field, head, link) {
+ if (!strcmp(arg, field->name)) {
+ code->op = FETCH_OP_TP_ARG;
+ code->data = field;
+ return 0;
+ }
+ }
+ return -ENOENT;
+}
+
+#ifdef CONFIG_PROBE_EVENTS_BTF_ARGS
+
+static u32 btf_type_int(const struct btf_type *t)
+{
+ return *(u32 *)(t + 1);
+}
+
+static bool btf_type_is_char_ptr(struct btf *btf, const struct btf_type *type)
+{
+ const struct btf_type *real_type;
+ u32 intdata;
+ s32 tid;
+
+ real_type = btf_type_skip_modifiers(btf, type->type, &tid);
+ if (!real_type)
+ return false;
+
+ if (BTF_INFO_KIND(real_type->info) != BTF_KIND_INT)
+ return false;
+
+ intdata = btf_type_int(real_type);
+ return !(BTF_INT_ENCODING(intdata) & BTF_INT_SIGNED)
+ && BTF_INT_BITS(intdata) == 8;
+}
+
+static bool btf_type_is_char_array(struct btf *btf, const struct btf_type *type)
+{
+ const struct btf_type *real_type;
+ const struct btf_array *array;
+ u32 intdata;
+ s32 tid;
+
+ if (BTF_INFO_KIND(type->info) != BTF_KIND_ARRAY)
+ return false;
+
+ array = (const struct btf_array *)(type + 1);
+
+ real_type = btf_type_skip_modifiers(btf, array->type, &tid);
+
+ intdata = btf_type_int(real_type);
+ return !(BTF_INT_ENCODING(intdata) & BTF_INT_SIGNED)
+ && BTF_INT_BITS(intdata) == 8;
+}
+
+static int check_prepare_btf_string_fetch(char *typename,
+ struct fetch_insn **pcode,
+ struct traceprobe_parse_context *ctx)
+{
+ struct btf *btf = ctx->btf;
+
+ if (!btf || !ctx->last_type)
+ return 0;
+
+ /* char [] does not need any change. */
+ if (btf_type_is_char_array(btf, ctx->last_type))
+ return 0;
+
+ /* char * requires dereference the pointer. */
+ if (btf_type_is_char_ptr(btf, ctx->last_type)) {
+ struct fetch_insn *code = *pcode + 1;
+
+ if (code->op == FETCH_OP_END) {
+ trace_probe_log_err(ctx->offset, TOO_MANY_OPS);
+ return -E2BIG;
+ }
+ if (typename[0] == 'u')
+ code->op = FETCH_OP_UDEREF;
+ else
+ code->op = FETCH_OP_DEREF;
+ code->offset = 0;
+ *pcode = code;
+ return 0;
+ }
+ /* Other types are not available for string */
+ trace_probe_log_err(ctx->offset, BAD_TYPE4STR);
+ return -EINVAL;
+}
+
+static const char *fetch_type_from_btf_type(struct btf *btf,
+ const struct btf_type *type,
+ struct traceprobe_parse_context *ctx)
+{
+ u32 intdata;
+
+ /* TODO: const char * could be converted as a string */
+ switch (BTF_INFO_KIND(type->info)) {
+ case BTF_KIND_ENUM:
+ /* enum is "int", so convert to "s32" */
+ return "s32";
+ case BTF_KIND_ENUM64:
+ return "s64";
+ case BTF_KIND_PTR:
+ /* pointer will be converted to "x??" */
+ if (IS_ENABLED(CONFIG_64BIT))
+ return "x64";
+ else
+ return "x32";
+ case BTF_KIND_INT:
+ intdata = btf_type_int(type);
+ if (BTF_INT_ENCODING(intdata) & BTF_INT_SIGNED) {
+ switch (BTF_INT_BITS(intdata)) {
+ case 8:
+ return "s8";
+ case 16:
+ return "s16";
+ case 32:
+ return "s32";
+ case 64:
+ return "s64";
+ }
+ } else { /* unsigned */
+ switch (BTF_INT_BITS(intdata)) {
+ case 8:
+ return "u8";
+ case 16:
+ return "u16";
+ case 32:
+ return "u32";
+ case 64:
+ return "u64";
+ }
+ /* bitfield, size is encoded in the type */
+ ctx->last_bitsize = BTF_INT_BITS(intdata);
+ ctx->last_bitoffs += BTF_INT_OFFSET(intdata);
+ return "u64";
+ }
+ }
+ /* TODO: support other types */
+
+ return NULL;
+}
+
+static int query_btf_context(struct traceprobe_parse_context *ctx)
+{
+ const struct btf_param *param;
+ const struct btf_type *type;
+ struct btf *btf;
+ s32 nr;
+
+ if (ctx->btf)
+ return 0;
+
+ if (!ctx->funcname)
+ return -EINVAL;
+
+ type = btf_find_func_proto(ctx->funcname, &btf);
+ if (!type)
+ return -ENOENT;
+
+ ctx->btf = btf;
+ ctx->proto = type;
+
+ /* ctx->params is optional, since func(void) will not have params. */
+ nr = 0;
+ param = btf_get_func_param(type, &nr);
+ if (!IS_ERR_OR_NULL(param)) {
+ /* Hide the first 'data' argument of tracepoint */
+ if (ctx->flags & TPARG_FL_TPOINT) {
+ nr--;
+ param++;
+ }
+ }
+
+ if (nr > 0) {
+ ctx->nr_params = nr;
+ ctx->params = param;
+ } else {
+ ctx->nr_params = 0;
+ ctx->params = NULL;
+ }
+
+ return 0;
+}
+
+static void clear_btf_context(struct traceprobe_parse_context *ctx)
+{
+ if (ctx->btf) {
+ btf_put(ctx->btf);
+ ctx->btf = NULL;
+ ctx->proto = NULL;
+ ctx->params = NULL;
+ ctx->nr_params = 0;
+ }
+}
+
+/* Return 1 if the field separater is arrow operator ('->') */
+static int split_next_field(char *varname, char **next_field,
+ struct traceprobe_parse_context *ctx)
+{
+ char *field;
+ int ret = 0;
+
+ field = strpbrk(varname, ".-");
+ if (field) {
+ if (field[0] == '-' && field[1] == '>') {
+ field[0] = '\0';
+ field += 2;
+ ret = 1;
+ } else if (field[0] == '.') {
+ field[0] = '\0';
+ field += 1;
+ } else {
+ trace_probe_log_err(ctx->offset + field - varname, BAD_HYPHEN);
+ return -EINVAL;
+ }
+ *next_field = field;
+ }
+
+ return ret;
+}
+
+/*
+ * Parse the field of data structure. The @type must be a pointer type
+ * pointing the target data structure type.
+ */
+static int parse_btf_field(char *fieldname, const struct btf_type *type,
+ struct fetch_insn **pcode, struct fetch_insn *end,
+ struct traceprobe_parse_context *ctx)
+{
+ struct fetch_insn *code = *pcode;
+ const struct btf_member *field;
+ u32 bitoffs, anon_offs;
+ char *next;
+ int is_ptr;
+ s32 tid;
+
+ do {
+ /* Outer loop for solving arrow operator ('->') */
+ if (BTF_INFO_KIND(type->info) != BTF_KIND_PTR) {
+ trace_probe_log_err(ctx->offset, NO_PTR_STRCT);
+ return -EINVAL;
+ }
+ /* Convert a struct pointer type to a struct type */
+ type = btf_type_skip_modifiers(ctx->btf, type->type, &tid);
+ if (!type) {
+ trace_probe_log_err(ctx->offset, BAD_BTF_TID);
+ return -EINVAL;
+ }
+
+ bitoffs = 0;
+ do {
+ /* Inner loop for solving dot operator ('.') */
+ next = NULL;
+ is_ptr = split_next_field(fieldname, &next, ctx);
+ if (is_ptr < 0)
+ return is_ptr;
+
+ anon_offs = 0;
+ field = btf_find_struct_member(ctx->btf, type, fieldname,
+ &anon_offs);
+ if (!field) {
+ trace_probe_log_err(ctx->offset, NO_BTF_FIELD);
+ return -ENOENT;
+ }
+ /* Add anonymous structure/union offset */
+ bitoffs += anon_offs;
+
+ /* Accumulate the bit-offsets of the dot-connected fields */
+ if (btf_type_kflag(type)) {
+ bitoffs += BTF_MEMBER_BIT_OFFSET(field->offset);
+ ctx->last_bitsize = BTF_MEMBER_BITFIELD_SIZE(field->offset);
+ } else {
+ bitoffs += field->offset;
+ ctx->last_bitsize = 0;
+ }
+
+ type = btf_type_skip_modifiers(ctx->btf, field->type, &tid);
+ if (!type) {
+ trace_probe_log_err(ctx->offset, BAD_BTF_TID);
+ return -EINVAL;
+ }
+
+ ctx->offset += next - fieldname;
+ fieldname = next;
+ } while (!is_ptr && fieldname);
+
+ if (++code == end) {
+ trace_probe_log_err(ctx->offset, TOO_MANY_OPS);
+ return -EINVAL;
+ }
+ code->op = FETCH_OP_DEREF; /* TODO: user deref support */
+ code->offset = bitoffs / 8;
+ *pcode = code;
+
+ ctx->last_bitoffs = bitoffs % 8;
+ ctx->last_type = type;
+ } while (fieldname);
+
+ return 0;
+}
+
+static int parse_btf_arg(char *varname,
+ struct fetch_insn **pcode, struct fetch_insn *end,
+ struct traceprobe_parse_context *ctx)
+{
+ struct fetch_insn *code = *pcode;
+ const struct btf_param *params;
+ const struct btf_type *type;
+ char *field = NULL;
+ int i, is_ptr, ret;
+ u32 tid;
+
+ if (WARN_ON_ONCE(!ctx->funcname))
+ return -EINVAL;
+
+ is_ptr = split_next_field(varname, &field, ctx);
+ if (is_ptr < 0)
+ return is_ptr;
+ if (!is_ptr && field) {
+ /* dot-connected field on an argument is not supported. */
+ trace_probe_log_err(ctx->offset + field - varname,
+ NOSUP_DAT_ARG);
+ return -EOPNOTSUPP;
+ }
+
+ if (ctx->flags & TPARG_FL_RETURN) {
+ if (strcmp(varname, "$retval") != 0) {
+ trace_probe_log_err(ctx->offset, NO_BTFARG);
+ return -ENOENT;
+ }
+ code->op = FETCH_OP_RETVAL;
+ /* Check whether the function return type is not void */
+ if (query_btf_context(ctx) == 0) {
+ if (ctx->proto->type == 0) {
+ trace_probe_log_err(ctx->offset, NO_RETVAL);
+ return -ENOENT;
+ }
+ tid = ctx->proto->type;
+ goto found;
+ }
+ if (field) {
+ trace_probe_log_err(ctx->offset + field - varname,
+ NO_BTF_ENTRY);
+ return -ENOENT;
+ }
+ return 0;
+ }
+
+ if (!ctx->btf) {
+ ret = query_btf_context(ctx);
+ if (ret < 0 || ctx->nr_params == 0) {
+ trace_probe_log_err(ctx->offset, NO_BTF_ENTRY);
+ return PTR_ERR(params);
+ }
+ }
+ params = ctx->params;
+
+ for (i = 0; i < ctx->nr_params; i++) {
+ const char *name = btf_name_by_offset(ctx->btf, params[i].name_off);
+
+ if (name && !strcmp(name, varname)) {
+ code->op = FETCH_OP_ARG;
+ if (ctx->flags & TPARG_FL_TPOINT)
+ code->param = i + 1;
+ else
+ code->param = i;
+ tid = params[i].type;
+ goto found;
+ }
+ }
+ trace_probe_log_err(ctx->offset, NO_BTFARG);
+ return -ENOENT;
+
+found:
+ type = btf_type_skip_modifiers(ctx->btf, tid, &tid);
+ if (!type) {
+ trace_probe_log_err(ctx->offset, BAD_BTF_TID);
+ return -EINVAL;
+ }
+ /* Initialize the last type information */
+ ctx->last_type = type;
+ ctx->last_bitoffs = 0;
+ ctx->last_bitsize = 0;
+ if (field) {
+ ctx->offset += field - varname;
+ return parse_btf_field(field, type, pcode, end, ctx);
+ }
+ return 0;
+}
+
+static const struct fetch_type *find_fetch_type_from_btf_type(
+ struct traceprobe_parse_context *ctx)
+{
+ struct btf *btf = ctx->btf;
+ const char *typestr = NULL;
+
+ if (btf && ctx->last_type)
+ typestr = fetch_type_from_btf_type(btf, ctx->last_type, ctx);
+
+ return find_fetch_type(typestr, ctx->flags);
+}
+
+static int parse_btf_bitfield(struct fetch_insn **pcode,
+ struct traceprobe_parse_context *ctx)
+{
+ struct fetch_insn *code = *pcode;
+
+ if ((ctx->last_bitsize % 8 == 0) && ctx->last_bitoffs == 0)
+ return 0;
+
+ code++;
+ if (code->op != FETCH_OP_NOP) {
+ trace_probe_log_err(ctx->offset, TOO_MANY_OPS);
+ return -EINVAL;
+ }
+ *pcode = code;
+
+ code->op = FETCH_OP_MOD_BF;
+ code->lshift = 64 - (ctx->last_bitsize + ctx->last_bitoffs);
+ code->rshift = 64 - ctx->last_bitsize;
+ code->basesize = 64 / 8;
+ return 0;
+}
+
+#else
+static void clear_btf_context(struct traceprobe_parse_context *ctx)
+{
+ ctx->btf = NULL;
+}
+
+static int query_btf_context(struct traceprobe_parse_context *ctx)
+{
+ return -EOPNOTSUPP;
+}
+
+static int parse_btf_arg(char *varname,
+ struct fetch_insn **pcode, struct fetch_insn *end,
+ struct traceprobe_parse_context *ctx)
+{
+ trace_probe_log_err(ctx->offset, NOSUP_BTFARG);
+ return -EOPNOTSUPP;
+}
+
+static int parse_btf_bitfield(struct fetch_insn **pcode,
+ struct traceprobe_parse_context *ctx)
+{
+ trace_probe_log_err(ctx->offset, NOSUP_BTFARG);
+ return -EOPNOTSUPP;
+}
+
+#define find_fetch_type_from_btf_type(ctx) \
+ find_fetch_type(NULL, ctx->flags)
+
+static int check_prepare_btf_string_fetch(char *typename,
+ struct fetch_insn **pcode,
+ struct traceprobe_parse_context *ctx)
+{
+ return 0;
+}
+
+#endif
+
#define PARAM_MAX_STACK (THREAD_SIZE / sizeof(unsigned long))
-static int parse_probe_vars(char *arg, const struct fetch_type *t,
- struct fetch_insn *code, unsigned int flags, int offs)
+/* Parse $vars. @orig_arg points '$', which syncs to @ctx->offset */
+static int parse_probe_vars(char *orig_arg, const struct fetch_type *t,
+ struct fetch_insn **pcode,
+ struct fetch_insn *end,
+ struct traceprobe_parse_context *ctx)
{
+ struct fetch_insn *code = *pcode;
+ int err = TP_ERR_BAD_VAR;
+ char *arg = orig_arg + 1;
unsigned long param;
int ret = 0;
int len;
- if (strcmp(arg, "retval") == 0) {
- if (flags & TPARG_FL_RETURN) {
+ if (ctx->flags & TPARG_FL_TEVENT) {
+ if (code->data)
+ return -EFAULT;
+ ret = parse_trace_event_arg(arg, code, ctx);
+ if (!ret)
+ return 0;
+ if (strcmp(arg, "comm") == 0 || strcmp(arg, "COMM") == 0) {
+ code->op = FETCH_OP_COMM;
+ return 0;
+ }
+ /* backward compatibility */
+ ctx->offset = 0;
+ goto inval;
+ }
+
+ if (str_has_prefix(arg, "retval")) {
+ if (!(ctx->flags & TPARG_FL_RETURN)) {
+ err = TP_ERR_RETVAL_ON_PROBE;
+ goto inval;
+ }
+ if (!(ctx->flags & TPARG_FL_KERNEL) ||
+ !IS_ENABLED(CONFIG_PROBE_EVENTS_BTF_ARGS)) {
code->op = FETCH_OP_RETVAL;
- } else {
- trace_probe_log_err(offs, RETVAL_ON_PROBE);
- ret = -EINVAL;
+ return 0;
}
- } else if ((len = str_has_prefix(arg, "stack"))) {
+ return parse_btf_arg(orig_arg, pcode, end, ctx);
+ }
+
+ len = str_has_prefix(arg, "stack");
+ if (len) {
+
if (arg[len] == '\0') {
code->op = FETCH_OP_STACKP;
- } else if (isdigit(arg[len])) {
+ return 0;
+ }
+
+ if (isdigit(arg[len])) {
ret = kstrtoul(arg + len, 10, &param);
- if (ret) {
- goto inval_var;
- } else if ((flags & TPARG_FL_KERNEL) &&
- param > PARAM_MAX_STACK) {
- trace_probe_log_err(offs, BAD_STACK_NUM);
- ret = -EINVAL;
- } else {
- code->op = FETCH_OP_STACK;
- code->param = (unsigned int)param;
+ if (ret)
+ goto inval;
+
+ if ((ctx->flags & TPARG_FL_KERNEL) &&
+ param > PARAM_MAX_STACK) {
+ err = TP_ERR_BAD_STACK_NUM;
+ goto inval;
}
- } else
- goto inval_var;
- } else if (strcmp(arg, "comm") == 0) {
+ code->op = FETCH_OP_STACK;
+ code->param = (unsigned int)param;
+ return 0;
+ }
+ goto inval;
+ }
+
+ if (strcmp(arg, "comm") == 0 || strcmp(arg, "COMM") == 0) {
code->op = FETCH_OP_COMM;
+ return 0;
+ }
+
#ifdef CONFIG_HAVE_FUNCTION_ARG_ACCESS_API
- } else if (((flags & TPARG_FL_MASK) ==
- (TPARG_FL_KERNEL | TPARG_FL_FENTRY)) &&
- (len = str_has_prefix(arg, "arg"))) {
+ len = str_has_prefix(arg, "arg");
+ if (len && tparg_is_function_entry(ctx->flags)) {
ret = kstrtoul(arg + len, 10, &param);
- if (ret) {
- goto inval_var;
- } else if (!param || param > PARAM_MAX_STACK) {
- trace_probe_log_err(offs, BAD_ARG_NUM);
- return -EINVAL;
+ if (ret)
+ goto inval;
+
+ if (!param || param > PARAM_MAX_STACK) {
+ err = TP_ERR_BAD_ARG_NUM;
+ goto inval;
}
+
code->op = FETCH_OP_ARG;
code->param = (unsigned int)param - 1;
+ /*
+ * The tracepoint probe will probe a stub function, and the
+ * first parameter of the stub is a dummy and should be ignored.
+ */
+ if (ctx->flags & TPARG_FL_TPOINT)
+ code->param++;
+ return 0;
+ }
#endif
- } else
- goto inval_var;
- return ret;
-
-inval_var:
- trace_probe_log_err(offs, BAD_VAR);
+inval:
+ __trace_probe_log_err(ctx->offset, err);
return -EINVAL;
}
@@ -346,6 +877,8 @@ static int __parse_imm_string(char *str, char **pbuf, int offs)
return -EINVAL;
}
*pbuf = kstrndup(str, len - 1, GFP_KERNEL);
+ if (!*pbuf)
+ return -ENOMEM;
return 0;
}
@@ -353,7 +886,7 @@ static int __parse_imm_string(char *str, char **pbuf, int offs)
static int
parse_probe_arg(char *arg, const struct fetch_type *type,
struct fetch_insn **pcode, struct fetch_insn *end,
- unsigned int flags, int offs)
+ struct traceprobe_parse_context *ctx)
{
struct fetch_insn *code = *pcode;
unsigned long param;
@@ -364,24 +897,29 @@ parse_probe_arg(char *arg, const struct fetch_type *type,
switch (arg[0]) {
case '$':
- ret = parse_probe_vars(arg + 1, type, code, flags, offs);
+ ret = parse_probe_vars(arg, type, pcode, end, ctx);
break;
case '%': /* named register */
+ if (ctx->flags & (TPARG_FL_TEVENT | TPARG_FL_FPROBE)) {
+ /* eprobe and fprobe do not handle registers */
+ trace_probe_log_err(ctx->offset, BAD_VAR);
+ break;
+ }
ret = regs_query_register_offset(arg + 1);
if (ret >= 0) {
code->op = FETCH_OP_REG;
code->param = (unsigned int)ret;
ret = 0;
} else
- trace_probe_log_err(offs, BAD_REG_NAME);
+ trace_probe_log_err(ctx->offset, BAD_REG_NAME);
break;
case '@': /* memory, file-offset or symbol */
if (isdigit(arg[1])) {
ret = kstrtoul(arg + 1, 0, &param);
if (ret) {
- trace_probe_log_err(offs, BAD_MEM_ADDR);
+ trace_probe_log_err(ctx->offset, BAD_MEM_ADDR);
break;
}
/* load address */
@@ -389,13 +927,13 @@ parse_probe_arg(char *arg, const struct fetch_type *type,
code->immediate = param;
} else if (arg[1] == '+') {
/* kprobes don't support file offsets */
- if (flags & TPARG_FL_KERNEL) {
- trace_probe_log_err(offs, FILE_ON_KPROBE);
+ if (ctx->flags & TPARG_FL_KERNEL) {
+ trace_probe_log_err(ctx->offset, FILE_ON_KPROBE);
return -EINVAL;
}
ret = kstrtol(arg + 2, 0, &offset);
if (ret) {
- trace_probe_log_err(offs, BAD_FILE_OFFS);
+ trace_probe_log_err(ctx->offset, BAD_FILE_OFFS);
break;
}
@@ -403,8 +941,8 @@ parse_probe_arg(char *arg, const struct fetch_type *type,
code->immediate = (unsigned long)offset; // imm64?
} else {
/* uprobes don't support symbols */
- if (!(flags & TPARG_FL_KERNEL)) {
- trace_probe_log_err(offs, SYM_ON_UPROBE);
+ if (!(ctx->flags & TPARG_FL_KERNEL)) {
+ trace_probe_log_err(ctx->offset, SYM_ON_UPROBE);
return -EINVAL;
}
/* Preserve symbol for updating */
@@ -413,7 +951,7 @@ parse_probe_arg(char *arg, const struct fetch_type *type,
if (!code->data)
return -ENOMEM;
if (++code == end) {
- trace_probe_log_err(offs, TOO_MANY_OPS);
+ trace_probe_log_err(ctx->offset, TOO_MANY_OPS);
return -EINVAL;
}
code->op = FETCH_OP_IMM;
@@ -421,7 +959,7 @@ parse_probe_arg(char *arg, const struct fetch_type *type,
}
/* These are fetching from memory */
if (++code == end) {
- trace_probe_log_err(offs, TOO_MANY_OPS);
+ trace_probe_log_err(ctx->offset, TOO_MANY_OPS);
return -EINVAL;
}
*pcode = code;
@@ -440,47 +978,51 @@ parse_probe_arg(char *arg, const struct fetch_type *type,
arg++; /* Skip '+', because kstrtol() rejects it. */
tmp = strchr(arg, '(');
if (!tmp) {
- trace_probe_log_err(offs, DEREF_NEED_BRACE);
+ trace_probe_log_err(ctx->offset, DEREF_NEED_BRACE);
return -EINVAL;
}
*tmp = '\0';
ret = kstrtol(arg, 0, &offset);
if (ret) {
- trace_probe_log_err(offs, BAD_DEREF_OFFS);
+ trace_probe_log_err(ctx->offset, BAD_DEREF_OFFS);
break;
}
- offs += (tmp + 1 - arg) + (arg[0] != '-' ? 1 : 0);
+ ctx->offset += (tmp + 1 - arg) + (arg[0] != '-' ? 1 : 0);
arg = tmp + 1;
tmp = strrchr(arg, ')');
if (!tmp) {
- trace_probe_log_err(offs + strlen(arg),
+ trace_probe_log_err(ctx->offset + strlen(arg),
DEREF_OPEN_BRACE);
return -EINVAL;
} else {
- const struct fetch_type *t2 = find_fetch_type(NULL);
+ const struct fetch_type *t2 = find_fetch_type(NULL, ctx->flags);
+ int cur_offs = ctx->offset;
*tmp = '\0';
- ret = parse_probe_arg(arg, t2, &code, end, flags, offs);
+ ret = parse_probe_arg(arg, t2, &code, end, ctx);
if (ret)
break;
+ ctx->offset = cur_offs;
if (code->op == FETCH_OP_COMM ||
code->op == FETCH_OP_DATA) {
- trace_probe_log_err(offs, COMM_CANT_DEREF);
+ trace_probe_log_err(ctx->offset, COMM_CANT_DEREF);
return -EINVAL;
}
if (++code == end) {
- trace_probe_log_err(offs, TOO_MANY_OPS);
+ trace_probe_log_err(ctx->offset, TOO_MANY_OPS);
return -EINVAL;
}
*pcode = code;
code->op = deref;
code->offset = offset;
+ /* Reset the last type if used */
+ ctx->last_type = NULL;
}
break;
case '\\': /* Immediate value */
if (arg[1] == '"') { /* Immediate string */
- ret = __parse_imm_string(arg + 2, &tmp, offs + 2);
+ ret = __parse_imm_string(arg + 2, &tmp, ctx->offset + 2);
if (ret)
break;
code->op = FETCH_OP_DATA;
@@ -488,15 +1030,24 @@ parse_probe_arg(char *arg, const struct fetch_type *type,
} else {
ret = str_to_immediate(arg + 1, &code->immediate);
if (ret)
- trace_probe_log_err(offs + 1, BAD_IMM);
+ trace_probe_log_err(ctx->offset + 1, BAD_IMM);
else
code->op = FETCH_OP_IMM;
}
break;
+ default:
+ if (isalpha(arg[0]) || arg[0] == '_') { /* BTF variable */
+ if (!tparg_is_function_entry(ctx->flags)) {
+ trace_probe_log_err(ctx->offset, NOSUP_BTFARG);
+ return -EINVAL;
+ }
+ ret = parse_btf_arg(arg, pcode, end, ctx);
+ break;
+ }
}
if (!ret && code->op == FETCH_OP_NOP) {
/* Parsed, but do not find fetch method */
- trace_probe_log_err(offs, BAD_FETCH_ARG);
+ trace_probe_log_err(ctx->offset, BAD_FETCH_ARG);
ret = -EINVAL;
}
return ret;
@@ -540,26 +1091,35 @@ static int __parse_bitfield_probe_arg(const char *bf,
}
/* String length checking wrapper */
-static int traceprobe_parse_probe_arg_body(char *arg, ssize_t *size,
- struct probe_arg *parg, unsigned int flags, int offset)
+static int traceprobe_parse_probe_arg_body(const char *argv, ssize_t *size,
+ struct probe_arg *parg,
+ struct traceprobe_parse_context *ctx)
{
struct fetch_insn *code, *scode, *tmp = NULL;
char *t, *t2, *t3;
int ret, len;
+ char *arg;
+ arg = kstrdup(argv, GFP_KERNEL);
+ if (!arg)
+ return -ENOMEM;
+
+ ret = -EINVAL;
len = strlen(arg);
if (len > MAX_ARGSTR_LEN) {
- trace_probe_log_err(offset, ARG_TOO_LONG);
- return -EINVAL;
+ trace_probe_log_err(ctx->offset, ARG_TOO_LONG);
+ goto out;
} else if (len == 0) {
- trace_probe_log_err(offset, NO_ARG_BODY);
- return -EINVAL;
+ trace_probe_log_err(ctx->offset, NO_ARG_BODY);
+ goto out;
}
+ ret = -ENOMEM;
parg->comm = kstrdup(arg, GFP_KERNEL);
if (!parg->comm)
- return -ENOMEM;
+ goto out;
+ ret = -EINVAL;
t = strchr(arg, ':');
if (t) {
*t = '\0';
@@ -568,43 +1128,72 @@ static int traceprobe_parse_probe_arg_body(char *arg, ssize_t *size,
*t2++ = '\0';
t3 = strchr(t2, ']');
if (!t3) {
- offset += t2 + strlen(t2) - arg;
- trace_probe_log_err(offset,
+ int offs = t2 + strlen(t2) - arg;
+
+ trace_probe_log_err(ctx->offset + offs,
ARRAY_NO_CLOSE);
- return -EINVAL;
+ goto out;
} else if (t3[1] != '\0') {
- trace_probe_log_err(offset + t3 + 1 - arg,
+ trace_probe_log_err(ctx->offset + t3 + 1 - arg,
BAD_ARRAY_SUFFIX);
- return -EINVAL;
+ goto out;
}
*t3 = '\0';
if (kstrtouint(t2, 0, &parg->count) || !parg->count) {
- trace_probe_log_err(offset + t2 - arg,
+ trace_probe_log_err(ctx->offset + t2 - arg,
BAD_ARRAY_NUM);
- return -EINVAL;
+ goto out;
}
if (parg->count > MAX_ARRAY_LEN) {
- trace_probe_log_err(offset + t2 - arg,
+ trace_probe_log_err(ctx->offset + t2 - arg,
ARRAY_TOO_BIG);
- return -EINVAL;
+ goto out;
}
}
}
/*
- * Since $comm and immediate string can not be dereferred,
- * we can find those by strcmp.
+ * Since $comm and immediate string can not be dereferenced,
+ * we can find those by strcmp. But ignore for eprobes.
*/
- if (strcmp(arg, "$comm") == 0 || strncmp(arg, "\\\"", 2) == 0) {
- /* The type of $comm must be "string", and not an array. */
- if (parg->count || (t && strcmp(t, "string")))
- return -EINVAL;
- parg->type = find_fetch_type("string");
+ if (!(ctx->flags & TPARG_FL_TEVENT) &&
+ (strcmp(arg, "$comm") == 0 || strcmp(arg, "$COMM") == 0 ||
+ strncmp(arg, "\\\"", 2) == 0)) {
+ /* The type of $comm must be "string", and not an array type. */
+ if (parg->count || (t && strcmp(t, "string"))) {
+ trace_probe_log_err(ctx->offset + (t ? (t - arg) : 0),
+ NEED_STRING_TYPE);
+ goto out;
+ }
+ parg->type = find_fetch_type("string", ctx->flags);
} else
- parg->type = find_fetch_type(t);
+ parg->type = find_fetch_type(t, ctx->flags);
if (!parg->type) {
- trace_probe_log_err(offset + (t ? (t - arg) : 0), BAD_TYPE);
- return -EINVAL;
+ trace_probe_log_err(ctx->offset + (t ? (t - arg) : 0), BAD_TYPE);
+ goto out;
+ }
+
+ code = tmp = kcalloc(FETCH_INSN_MAX, sizeof(*code), GFP_KERNEL);
+ if (!code)
+ goto out;
+ code[FETCH_INSN_MAX - 1].op = FETCH_OP_END;
+
+ ctx->last_type = NULL;
+ ret = parse_probe_arg(arg, parg->type, &code, &code[FETCH_INSN_MAX - 1],
+ ctx);
+ if (ret)
+ goto fail;
+
+ /* Update storing type if BTF is available */
+ if (IS_ENABLED(CONFIG_PROBE_EVENTS_BTF_ARGS) &&
+ ctx->last_type) {
+ if (!t) {
+ parg->type = find_fetch_type_from_btf_type(ctx);
+ } else if (strstr(t, "string")) {
+ ret = check_prepare_btf_string_fetch(t, &code, ctx);
+ if (ret)
+ goto fail;
+ }
}
parg->offset = *size;
*size += parg->type->size * (parg->count ?: 1);
@@ -612,45 +1201,49 @@ static int traceprobe_parse_probe_arg_body(char *arg, ssize_t *size,
if (parg->count) {
len = strlen(parg->type->fmttype) + 6;
parg->fmt = kmalloc(len, GFP_KERNEL);
- if (!parg->fmt)
- return -ENOMEM;
+ if (!parg->fmt) {
+ ret = -ENOMEM;
+ goto out;
+ }
snprintf(parg->fmt, len, "%s[%d]", parg->type->fmttype,
parg->count);
}
- code = tmp = kcalloc(FETCH_INSN_MAX, sizeof(*code), GFP_KERNEL);
- if (!code)
- return -ENOMEM;
- code[FETCH_INSN_MAX - 1].op = FETCH_OP_END;
-
- ret = parse_probe_arg(arg, parg->type, &code, &code[FETCH_INSN_MAX - 1],
- flags, offset);
- if (ret)
- goto fail;
-
+ ret = -EINVAL;
/* Store operation */
- if (!strcmp(parg->type->name, "string") ||
- !strcmp(parg->type->name, "ustring")) {
- if (code->op != FETCH_OP_DEREF && code->op != FETCH_OP_UDEREF &&
- code->op != FETCH_OP_IMM && code->op != FETCH_OP_COMM &&
- code->op != FETCH_OP_DATA) {
- trace_probe_log_err(offset + (t ? (t - arg) : 0),
- BAD_STRING);
- ret = -EINVAL;
- goto fail;
+ if (parg->type->is_string) {
+ if (!strcmp(parg->type->name, "symstr")) {
+ if (code->op != FETCH_OP_REG && code->op != FETCH_OP_STACK &&
+ code->op != FETCH_OP_RETVAL && code->op != FETCH_OP_ARG &&
+ code->op != FETCH_OP_DEREF && code->op != FETCH_OP_TP_ARG) {
+ trace_probe_log_err(ctx->offset + (t ? (t - arg) : 0),
+ BAD_SYMSTRING);
+ goto fail;
+ }
+ } else {
+ if (code->op != FETCH_OP_DEREF && code->op != FETCH_OP_UDEREF &&
+ code->op != FETCH_OP_IMM && code->op != FETCH_OP_COMM &&
+ code->op != FETCH_OP_DATA && code->op != FETCH_OP_TP_ARG) {
+ trace_probe_log_err(ctx->offset + (t ? (t - arg) : 0),
+ BAD_STRING);
+ goto fail;
+ }
}
- if ((code->op == FETCH_OP_IMM || code->op == FETCH_OP_COMM ||
- code->op == FETCH_OP_DATA) || parg->count) {
+ if (!strcmp(parg->type->name, "symstr") ||
+ (code->op == FETCH_OP_IMM || code->op == FETCH_OP_COMM ||
+ code->op == FETCH_OP_DATA) || code->op == FETCH_OP_TP_ARG ||
+ parg->count) {
/*
* IMM, DATA and COMM is pointing actual address, those
* must be kept, and if parg->count != 0, this is an
* array of string pointers instead of string address
* itself.
+ * For the symstr, it doesn't need to dereference, thus
+ * it just get the value.
*/
code++;
if (code->op != FETCH_OP_NOP) {
- trace_probe_log_err(offset, TOO_MANY_OPS);
- ret = -EINVAL;
+ trace_probe_log_err(ctx->offset, TOO_MANY_OPS);
goto fail;
}
}
@@ -658,6 +1251,8 @@ static int traceprobe_parse_probe_arg_body(char *arg, ssize_t *size,
if (!strcmp(parg->type->name, "ustring") ||
code->op == FETCH_OP_UDEREF)
code->op = FETCH_OP_ST_USTRING;
+ else if (!strcmp(parg->type->name, "symstr"))
+ code->op = FETCH_OP_ST_SYMSTR;
else
code->op = FETCH_OP_ST_STRING;
code->size = parg->type->size;
@@ -671,8 +1266,7 @@ static int traceprobe_parse_probe_arg_body(char *arg, ssize_t *size,
} else {
code++;
if (code->op != FETCH_OP_NOP) {
- trace_probe_log_err(offset, TOO_MANY_OPS);
- ret = -EINVAL;
+ trace_probe_log_err(ctx->offset, TOO_MANY_OPS);
goto fail;
}
code->op = FETCH_OP_ST_RAW;
@@ -683,24 +1277,28 @@ static int traceprobe_parse_probe_arg_body(char *arg, ssize_t *size,
if (t != NULL) {
ret = __parse_bitfield_probe_arg(t, parg->type, &code);
if (ret) {
- trace_probe_log_err(offset + t - arg, BAD_BITFIELD);
+ trace_probe_log_err(ctx->offset + t - arg, BAD_BITFIELD);
goto fail;
}
+ } else if (IS_ENABLED(CONFIG_PROBE_EVENTS_BTF_ARGS) &&
+ ctx->last_type) {
+ ret = parse_btf_bitfield(&code, ctx);
+ if (ret)
+ goto fail;
}
+ ret = -EINVAL;
/* Loop(Array) operation */
if (parg->count) {
if (scode->op != FETCH_OP_ST_MEM &&
scode->op != FETCH_OP_ST_STRING &&
scode->op != FETCH_OP_ST_USTRING) {
- trace_probe_log_err(offset + (t ? (t - arg) : 0),
+ trace_probe_log_err(ctx->offset + (t ? (t - arg) : 0),
BAD_STRING);
- ret = -EINVAL;
goto fail;
}
code++;
if (code->op != FETCH_OP_NOP) {
- trace_probe_log_err(offset, TOO_MANY_OPS);
- ret = -EINVAL;
+ trace_probe_log_err(ctx->offset, TOO_MANY_OPS);
goto fail;
}
code->op = FETCH_OP_LP_ARRAY;
@@ -709,6 +1307,7 @@ static int traceprobe_parse_probe_arg_body(char *arg, ssize_t *size,
code++;
code->op = FETCH_OP_END;
+ ret = 0;
/* Shrink down the code buffer */
parg->code = kcalloc(code - tmp + 1, sizeof(*code), GFP_KERNEL);
if (!parg->code)
@@ -724,6 +1323,8 @@ fail:
kfree(code->data);
}
kfree(tmp);
+out:
+ kfree(arg);
return ret;
}
@@ -745,11 +1346,38 @@ static int traceprobe_conflict_field_name(const char *name,
return 0;
}
-int traceprobe_parse_probe_arg(struct trace_probe *tp, int i, char *arg,
- unsigned int flags)
+static char *generate_probe_arg_name(const char *arg, int idx)
+{
+ char *name = NULL;
+ const char *end;
+
+ /*
+ * If argument name is omitted, try arg as a name (BTF variable)
+ * or "argN".
+ */
+ if (IS_ENABLED(CONFIG_PROBE_EVENTS_BTF_ARGS)) {
+ end = strchr(arg, ':');
+ if (!end)
+ end = arg + strlen(arg);
+
+ name = kmemdup_nul(arg, end - arg, GFP_KERNEL);
+ if (!name || !is_good_name(name)) {
+ kfree(name);
+ name = NULL;
+ }
+ }
+
+ if (!name)
+ name = kasprintf(GFP_KERNEL, "arg%d", idx + 1);
+
+ return name;
+}
+
+int traceprobe_parse_probe_arg(struct trace_probe *tp, int i, const char *arg,
+ struct traceprobe_parse_context *ctx)
{
struct probe_arg *parg = &tp->args[i];
- char *body;
+ const char *body;
/* Increment count for freeing args in error case */
tp->nr_args++;
@@ -766,8 +1394,7 @@ int traceprobe_parse_probe_arg(struct trace_probe *tp, int i, char *arg,
parg->name = kmemdup_nul(arg, body - arg, GFP_KERNEL);
body++;
} else {
- /* If argument name is omitted, set "argN" */
- parg->name = kasprintf(GFP_KERNEL, "arg%d", i + 1);
+ parg->name = generate_probe_arg_name(arg, i);
body = arg;
}
if (!parg->name)
@@ -781,9 +1408,9 @@ int traceprobe_parse_probe_arg(struct trace_probe *tp, int i, char *arg,
trace_probe_log_err(0, USED_ARG_NAME);
return -EINVAL;
}
+ ctx->offset = body - arg;
/* Parse fetch argument */
- return traceprobe_parse_probe_arg_body(body, &tp->size, parg, flags,
- body - arg);
+ return traceprobe_parse_probe_arg_body(body, &tp->size, parg, ctx);
}
void traceprobe_free_probe_arg(struct probe_arg *arg)
@@ -802,6 +1429,151 @@ void traceprobe_free_probe_arg(struct probe_arg *arg)
kfree(arg->fmt);
}
+static int argv_has_var_arg(int argc, const char *argv[], int *args_idx,
+ struct traceprobe_parse_context *ctx)
+{
+ int i, found = 0;
+
+ for (i = 0; i < argc; i++)
+ if (str_has_prefix(argv[i], "$arg")) {
+ trace_probe_log_set_index(i + 2);
+
+ if (!tparg_is_function_entry(ctx->flags)) {
+ trace_probe_log_err(0, NOFENTRY_ARGS);
+ return -EINVAL;
+ }
+
+ if (isdigit(argv[i][4])) {
+ found = 1;
+ continue;
+ }
+
+ if (argv[i][4] != '*') {
+ trace_probe_log_err(0, BAD_VAR);
+ return -EINVAL;
+ }
+
+ if (*args_idx >= 0 && *args_idx < argc) {
+ trace_probe_log_err(0, DOUBLE_ARGS);
+ return -EINVAL;
+ }
+ found = 1;
+ *args_idx = i;
+ }
+
+ return found;
+}
+
+static int sprint_nth_btf_arg(int idx, const char *type,
+ char *buf, int bufsize,
+ struct traceprobe_parse_context *ctx)
+{
+ const char *name;
+ int ret;
+
+ if (idx >= ctx->nr_params) {
+ trace_probe_log_err(0, NO_BTFARG);
+ return -ENOENT;
+ }
+ name = btf_name_by_offset(ctx->btf, ctx->params[idx].name_off);
+ if (!name) {
+ trace_probe_log_err(0, NO_BTF_ENTRY);
+ return -ENOENT;
+ }
+ ret = snprintf(buf, bufsize, "%s%s", name, type);
+ if (ret >= bufsize) {
+ trace_probe_log_err(0, ARGS_2LONG);
+ return -E2BIG;
+ }
+ return ret;
+}
+
+/* Return new_argv which must be freed after use */
+const char **traceprobe_expand_meta_args(int argc, const char *argv[],
+ int *new_argc, char *buf, int bufsize,
+ struct traceprobe_parse_context *ctx)
+{
+ const struct btf_param *params = NULL;
+ int i, j, n, used, ret, args_idx = -1;
+ const char **new_argv = NULL;
+
+ ret = argv_has_var_arg(argc, argv, &args_idx, ctx);
+ if (ret < 0)
+ return ERR_PTR(ret);
+
+ if (!ret) {
+ *new_argc = argc;
+ return NULL;
+ }
+
+ ret = query_btf_context(ctx);
+ if (ret < 0 || ctx->nr_params == 0) {
+ if (args_idx != -1) {
+ /* $arg* requires BTF info */
+ trace_probe_log_err(0, NOSUP_BTFARG);
+ return (const char **)params;
+ }
+ *new_argc = argc;
+ return NULL;
+ }
+
+ if (args_idx >= 0)
+ *new_argc = argc + ctx->nr_params - 1;
+ else
+ *new_argc = argc;
+
+ new_argv = kcalloc(*new_argc, sizeof(char *), GFP_KERNEL);
+ if (!new_argv)
+ return ERR_PTR(-ENOMEM);
+
+ used = 0;
+ for (i = 0, j = 0; i < argc; i++) {
+ trace_probe_log_set_index(i + 2);
+ if (i == args_idx) {
+ for (n = 0; n < ctx->nr_params; n++) {
+ ret = sprint_nth_btf_arg(n, "", buf + used,
+ bufsize - used, ctx);
+ if (ret < 0)
+ goto error;
+
+ new_argv[j++] = buf + used;
+ used += ret + 1;
+ }
+ continue;
+ }
+
+ if (str_has_prefix(argv[i], "$arg")) {
+ char *type = NULL;
+
+ n = simple_strtoul(argv[i] + 4, &type, 10);
+ if (type && !(*type == ':' || *type == '\0')) {
+ trace_probe_log_err(0, BAD_VAR);
+ ret = -ENOENT;
+ goto error;
+ }
+ /* Note: $argN starts from $arg1 */
+ ret = sprint_nth_btf_arg(n - 1, type, buf + used,
+ bufsize - used, ctx);
+ if (ret < 0)
+ goto error;
+ new_argv[j++] = buf + used;
+ used += ret + 1;
+ } else
+ new_argv[j++] = argv[i];
+ }
+
+ return new_argv;
+
+error:
+ kfree(new_argv);
+ return ERR_PTR(ret);
+}
+
+void traceprobe_finish_parse(struct traceprobe_parse_context *ctx)
+{
+ clear_btf_context(ctx);
+}
+
int traceprobe_update_arg(struct probe_arg *arg)
{
struct fetch_insn *code = arg->code;
@@ -839,19 +1611,29 @@ int traceprobe_update_arg(struct probe_arg *arg)
/* When len=0, we just calculate the needed length */
#define LEN_OR_ZERO (len ? len - pos : 0)
static int __set_print_fmt(struct trace_probe *tp, char *buf, int len,
- bool is_return)
+ enum probe_print_type ptype)
{
struct probe_arg *parg;
int i, j;
int pos = 0;
const char *fmt, *arg;
- if (!is_return) {
+ switch (ptype) {
+ case PROBE_PRINT_NORMAL:
fmt = "(%lx)";
- arg = "REC->" FIELD_STRING_IP;
- } else {
+ arg = ", REC->" FIELD_STRING_IP;
+ break;
+ case PROBE_PRINT_RETURN:
fmt = "(%lx <- %lx)";
- arg = "REC->" FIELD_STRING_FUNC ", REC->" FIELD_STRING_RETIP;
+ arg = ", REC->" FIELD_STRING_FUNC ", REC->" FIELD_STRING_RETIP;
+ break;
+ case PROBE_PRINT_EVENT:
+ fmt = "";
+ arg = "";
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ return 0;
}
pos += snprintf(buf + pos, LEN_OR_ZERO, "\"%s", fmt);
@@ -871,13 +1653,12 @@ static int __set_print_fmt(struct trace_probe *tp, char *buf, int len,
parg->type->fmt);
}
- pos += snprintf(buf + pos, LEN_OR_ZERO, "\", %s", arg);
+ pos += snprintf(buf + pos, LEN_OR_ZERO, "\"%s", arg);
for (i = 0; i < tp->nr_args; i++) {
parg = tp->args + i;
if (parg->count) {
- if ((strcmp(parg->type->name, "string") == 0) ||
- (strcmp(parg->type->name, "ustring") == 0))
+ if (parg->type->is_string)
fmt = ", __get_str(%s[%d])";
else
fmt = ", REC->%s[%d]";
@@ -885,8 +1666,7 @@ static int __set_print_fmt(struct trace_probe *tp, char *buf, int len,
pos += snprintf(buf + pos, LEN_OR_ZERO,
fmt, parg->name, j);
} else {
- if ((strcmp(parg->type->name, "string") == 0) ||
- (strcmp(parg->type->name, "ustring") == 0))
+ if (parg->type->is_string)
fmt = ", __get_str(%s)";
else
fmt = ", REC->%s";
@@ -900,20 +1680,20 @@ static int __set_print_fmt(struct trace_probe *tp, char *buf, int len,
}
#undef LEN_OR_ZERO
-int traceprobe_set_print_fmt(struct trace_probe *tp, bool is_return)
+int traceprobe_set_print_fmt(struct trace_probe *tp, enum probe_print_type ptype)
{
struct trace_event_call *call = trace_probe_event_call(tp);
int len;
char *print_fmt;
/* First: called with 0 length to calculate the needed length */
- len = __set_print_fmt(tp, NULL, 0, is_return);
+ len = __set_print_fmt(tp, NULL, 0, ptype);
print_fmt = kmalloc(len + 1, GFP_KERNEL);
if (!print_fmt)
return -ENOMEM;
/* Second: actually write the @print_fmt */
- __set_print_fmt(tp, print_fmt, len + 1, is_return);
+ __set_print_fmt(tp, print_fmt, len + 1, ptype);
call->print_fmt = print_fmt;
return 0;
@@ -1029,11 +1809,36 @@ error:
return ret;
}
+static struct trace_event_call *
+find_trace_event_call(const char *system, const char *event_name)
+{
+ struct trace_event_call *tp_event;
+ const char *name;
+
+ list_for_each_entry(tp_event, &ftrace_events, list) {
+ if (!tp_event->class->system ||
+ strcmp(system, tp_event->class->system))
+ continue;
+ name = trace_event_name(tp_event);
+ if (!name || strcmp(event_name, name))
+ continue;
+ return tp_event;
+ }
+
+ return NULL;
+}
+
int trace_probe_register_event_call(struct trace_probe *tp)
{
struct trace_event_call *call = trace_probe_event_call(tp);
int ret;
+ lockdep_assert_held(&event_mutex);
+
+ if (find_trace_event_call(trace_probe_group_name(tp),
+ trace_probe_name(tp)))
+ return -EEXIST;
+
ret = register_trace_event(&call->event);
if (!ret)
return -ENODEV;
@@ -1083,8 +1888,7 @@ int trace_probe_remove_file(struct trace_probe *tp,
return -ENOENT;
list_del_rcu(&link->list);
- synchronize_rcu();
- kfree(link);
+ kvfree_rcu_mightsleep(link);
if (list_empty(&tp->event->files))
trace_probe_clear_flag(tp, TP_FLAG_TRACE);
@@ -1134,3 +1938,47 @@ bool trace_probe_match_command_args(struct trace_probe *tp,
}
return true;
}
+
+int trace_probe_create(const char *raw_command, int (*createfn)(int, const char **))
+{
+ int argc = 0, ret = 0;
+ char **argv;
+
+ argv = argv_split(GFP_KERNEL, raw_command, &argc);
+ if (!argv)
+ return -ENOMEM;
+
+ if (argc)
+ ret = createfn(argc, (const char **)argv);
+
+ argv_free(argv);
+
+ return ret;
+}
+
+int trace_probe_print_args(struct trace_seq *s, struct probe_arg *args, int nr_args,
+ u8 *data, void *field)
+{
+ void *p;
+ int i, j;
+
+ for (i = 0; i < nr_args; i++) {
+ struct probe_arg *a = args + i;
+
+ trace_seq_printf(s, " %s=", a->name);
+ if (likely(!a->count)) {
+ if (!a->type->print(s, data + a->offset, field))
+ return -ENOMEM;
+ continue;
+ }
+ trace_seq_putc(s, '{');
+ p = data + a->offset;
+ for (j = 0; j < a->count; j++) {
+ if (!a->type->print(s, p, field))
+ return -ENOMEM;
+ trace_seq_putc(s, j == a->count - 1 ? '}' : ',');
+ p += a->type->size;
+ }
+ }
+ return 0;
+}
diff --git a/kernel/trace/trace_probe.h b/kernel/trace/trace_probe.h
index a22b62813f8c..c1877d018269 100644
--- a/kernel/trace/trace_probe.h
+++ b/kernel/trace/trace_probe.h
@@ -16,7 +16,6 @@
#include <linux/tracefs.h>
#include <linux/types.h>
#include <linux/string.h>
-#include <linux/ctype.h>
#include <linux/ptrace.h>
#include <linux/perf_event.h>
#include <linux/kprobes.h>
@@ -24,6 +23,7 @@
#include <linux/limits.h>
#include <linux/uaccess.h>
#include <linux/bitops.h>
+#include <linux/btf.h>
#include <asm/bitsperlong.h>
#include "trace.h"
@@ -33,7 +33,9 @@
#define MAX_ARGSTR_LEN 63
#define MAX_ARRAY_LEN 64
#define MAX_ARG_NAME_LEN 32
+#define MAX_BTF_ARGS_LEN 128
#define MAX_STRING_SIZE PATH_MAX
+#define MAX_ARG_BUF_LEN (MAX_TRACE_ARGS * MAX_ARG_NAME_LEN)
/* Reserved field names */
#define FIELD_STRING_IP "__probe_ip"
@@ -99,10 +101,12 @@ enum fetch_op {
FETCH_OP_ST_UMEM, /* Mem: .offset, .size */
FETCH_OP_ST_STRING, /* String: .offset, .size */
FETCH_OP_ST_USTRING, /* User String: .offset, .size */
+ FETCH_OP_ST_SYMSTR, /* Kernel Symbol String: .offset, .size */
// Stage 4 (modify) op
FETCH_OP_MOD_BF, /* Bitfield: .basesize, .lshift, .rshift */
// Stage 5 (loop) op
FETCH_OP_LP_ARRAY, /* Array: .param = loop count */
+ FETCH_OP_TP_ARG, /* Trace Point argument */
FETCH_OP_END,
FETCH_NOP_SYMBOL, /* Unresolved Symbol holder */
};
@@ -133,9 +137,10 @@ struct fetch_insn {
struct fetch_type {
const char *name; /* Name of type */
size_t size; /* Byte size of type */
- int is_signed; /* Signed flag */
+ bool is_signed; /* Signed flag */
+ bool is_string; /* String flag */
print_type_func_t print; /* Print functions */
- const char *fmt; /* Fromat string */
+ const char *fmt; /* Format string */
const char *fmttype; /* Name in format file */
};
@@ -164,6 +169,7 @@ DECLARE_BASIC_PRINT_TYPE_FUNC(x16);
DECLARE_BASIC_PRINT_TYPE_FUNC(x32);
DECLARE_BASIC_PRINT_TYPE_FUNC(x64);
+DECLARE_BASIC_PRINT_TYPE_FUNC(char);
DECLARE_BASIC_PRINT_TYPE_FUNC(string);
DECLARE_BASIC_PRINT_TYPE_FUNC(symbol);
@@ -177,16 +183,19 @@ DECLARE_BASIC_PRINT_TYPE_FUNC(symbol);
#define _ADDR_FETCH_TYPE(t) __ADDR_FETCH_TYPE(t)
#define ADDR_FETCH_TYPE _ADDR_FETCH_TYPE(BITS_PER_LONG)
-#define __ASSIGN_FETCH_TYPE(_name, ptype, ftype, _size, sign, _fmttype) \
- {.name = _name, \
+#define __ASSIGN_FETCH_TYPE(_name, ptype, ftype, _size, sign, str, _fmttype) \
+ {.name = _name, \
.size = _size, \
- .is_signed = sign, \
+ .is_signed = (bool)sign, \
+ .is_string = (bool)str, \
.print = PRINT_TYPE_FUNC_NAME(ptype), \
.fmt = PRINT_TYPE_FMT_NAME(ptype), \
.fmttype = _fmttype, \
}
+
+/* Non string types can use these macros */
#define _ASSIGN_FETCH_TYPE(_name, ptype, ftype, _size, sign, _fmttype) \
- __ASSIGN_FETCH_TYPE(_name, ptype, ftype, _size, sign, #_fmttype)
+ __ASSIGN_FETCH_TYPE(_name, ptype, ftype, _size, sign, 0, #_fmttype)
#define ASSIGN_FETCH_TYPE(ptype, ftype, sign) \
_ASSIGN_FETCH_TYPE(#ptype, ptype, ftype, sizeof(ftype), sign, ptype)
@@ -302,7 +311,7 @@ trace_probe_primary_from_call(struct trace_event_call *call)
{
struct trace_probe_event *tpe = trace_probe_event_from_call(call);
- return list_first_entry(&tpe->probes, struct trace_probe, list);
+ return list_first_entry_or_null(&tpe->probes, struct trace_probe, list);
}
static inline struct list_head *trace_probe_probe_list(struct trace_probe *tp)
@@ -342,40 +351,78 @@ struct event_file_link *trace_probe_get_file_link(struct trace_probe *tp,
int trace_probe_compare_arg_type(struct trace_probe *a, struct trace_probe *b);
bool trace_probe_match_command_args(struct trace_probe *tp,
int argc, const char **argv);
+int trace_probe_create(const char *raw_command, int (*createfn)(int, const char **));
+int trace_probe_print_args(struct trace_seq *s, struct probe_arg *args, int nr_args,
+ u8 *data, void *field);
#define trace_probe_for_each_link(pos, tp) \
list_for_each_entry(pos, &(tp)->event->files, list)
#define trace_probe_for_each_link_rcu(pos, tp) \
list_for_each_entry_rcu(pos, &(tp)->event->files, list)
-/* Check the name is good for event/group/fields */
-static inline bool is_good_name(const char *name)
-{
- if (!isalpha(*name) && *name != '_')
- return false;
- while (*++name != '\0') {
- if (!isalpha(*name) && !isdigit(*name) && *name != '_')
- return false;
- }
- return true;
-}
-
+/*
+ * The flags used for parsing trace_probe arguments.
+ * TPARG_FL_RETURN, TPARG_FL_FENTRY and TPARG_FL_TEVENT are mutually exclusive.
+ * TPARG_FL_KERNEL and TPARG_FL_USER are also mutually exclusive.
+ * TPARG_FL_FPROBE and TPARG_FL_TPOINT are optional but it should be with
+ * TPARG_FL_KERNEL.
+ */
#define TPARG_FL_RETURN BIT(0)
#define TPARG_FL_KERNEL BIT(1)
#define TPARG_FL_FENTRY BIT(2)
-#define TPARG_FL_MASK GENMASK(2, 0)
+#define TPARG_FL_TEVENT BIT(3)
+#define TPARG_FL_USER BIT(4)
+#define TPARG_FL_FPROBE BIT(5)
+#define TPARG_FL_TPOINT BIT(6)
+#define TPARG_FL_LOC_MASK GENMASK(4, 0)
+
+static inline bool tparg_is_function_entry(unsigned int flags)
+{
+ return (flags & TPARG_FL_LOC_MASK) == (TPARG_FL_KERNEL | TPARG_FL_FENTRY);
+}
+
+struct traceprobe_parse_context {
+ struct trace_event_call *event;
+ /* BTF related parameters */
+ const char *funcname; /* Function name in BTF */
+ const struct btf_type *proto; /* Prototype of the function */
+ const struct btf_param *params; /* Parameter of the function */
+ s32 nr_params; /* The number of the parameters */
+ struct btf *btf; /* The BTF to be used */
+ const struct btf_type *last_type; /* Saved type */
+ u32 last_bitoffs; /* Saved bitoffs */
+ u32 last_bitsize; /* Saved bitsize */
+ unsigned int flags;
+ int offset;
+};
extern int traceprobe_parse_probe_arg(struct trace_probe *tp, int i,
- char *arg, unsigned int flags);
+ const char *argv,
+ struct traceprobe_parse_context *ctx);
+const char **traceprobe_expand_meta_args(int argc, const char *argv[],
+ int *new_argc, char *buf, int bufsize,
+ struct traceprobe_parse_context *ctx);
extern int traceprobe_update_arg(struct probe_arg *arg);
extern void traceprobe_free_probe_arg(struct probe_arg *arg);
+/*
+ * If either traceprobe_parse_probe_arg() or traceprobe_expand_meta_args() is called,
+ * this MUST be called for clean up the context and return a resource.
+ */
+void traceprobe_finish_parse(struct traceprobe_parse_context *ctx);
+
extern int traceprobe_split_symbol_offset(char *symbol, long *offset);
int traceprobe_parse_event_name(const char **pevent, const char **pgroup,
char *buf, int offset);
-extern int traceprobe_set_print_fmt(struct trace_probe *tp, bool is_return);
+enum probe_print_type {
+ PROBE_PRINT_NORMAL,
+ PROBE_PRINT_RETURN,
+ PROBE_PRINT_EVENT,
+};
+
+extern int traceprobe_set_print_fmt(struct trace_probe *tp, enum probe_print_type ptype);
#ifdef CONFIG_PERF_EVENTS
extern struct trace_event_call *
@@ -399,18 +446,23 @@ extern int traceprobe_define_arg_fields(struct trace_event_call *event_call,
C(REFCNT_OPEN_BRACE, "Reference counter brace is not closed"), \
C(BAD_REFCNT_SUFFIX, "Reference counter has wrong suffix"), \
C(BAD_UPROBE_OFFS, "Invalid uprobe offset"), \
- C(MAXACT_NO_KPROBE, "Maxactive is not for kprobe"), \
+ C(BAD_MAXACT_TYPE, "Maxactive is only for function exit"), \
C(BAD_MAXACT, "Invalid maxactive number"), \
C(MAXACT_TOO_BIG, "Maxactive is too big"), \
C(BAD_PROBE_ADDR, "Invalid probed address or symbol"), \
+ C(NON_UNIQ_SYMBOL, "The symbol is not unique"), \
C(BAD_RETPROBE, "Retprobe address must be an function entry"), \
+ C(NO_TRACEPOINT, "Tracepoint is not found"), \
+ C(BAD_ADDR_SUFFIX, "Invalid probed address suffix"), \
C(NO_GROUP_NAME, "Group name is not specified"), \
C(GROUP_TOO_LONG, "Group name is too long"), \
C(BAD_GROUP_NAME, "Group name must follow the same rules as C identifiers"), \
C(NO_EVENT_NAME, "Event name is not specified"), \
C(EVENT_TOO_LONG, "Event name is too long"), \
C(BAD_EVENT_NAME, "Event name must follow the same rules as C identifiers"), \
+ C(EVENT_EXIST, "Given group/event name is already used by another event"), \
C(RETVAL_ON_PROBE, "$retval is not available on probe"), \
+ C(NO_RETVAL, "This function returns 'void' type"), \
C(BAD_STACK_NUM, "Invalid stack number"), \
C(BAD_ARG_NUM, "Invalid argument number"), \
C(BAD_VAR, "Invalid $-valiable specified"), \
@@ -433,6 +485,7 @@ extern int traceprobe_define_arg_fields(struct trace_event_call *event_call,
C(ARRAY_TOO_BIG, "Array number is too big"), \
C(BAD_TYPE, "Unknown type is specified"), \
C(BAD_STRING, "String accepts only memory argument"), \
+ C(BAD_SYMSTRING, "Symbol String doesn't accept data/userdata"), \
C(BAD_BITFIELD, "Invalid bitfield"), \
C(ARG_NAME_TOO_LONG, "Argument name is too long"), \
C(NO_ARG_NAME, "Argument name is not specified"), \
@@ -444,7 +497,26 @@ extern int traceprobe_define_arg_fields(struct trace_event_call *event_call,
C(FAIL_REG_PROBE, "Failed to register probe event"),\
C(DIFF_PROBE_TYPE, "Probe type is different from existing probe"),\
C(DIFF_ARG_TYPE, "Argument type or name is different from existing probe"),\
- C(SAME_PROBE, "There is already the exact same probe event"),
+ C(SAME_PROBE, "There is already the exact same probe event"),\
+ C(NO_EVENT_INFO, "This requires both group and event name to attach"),\
+ C(BAD_ATTACH_EVENT, "Attached event does not exist"),\
+ C(BAD_ATTACH_ARG, "Attached event does not have this field"),\
+ C(NO_EP_FILTER, "No filter rule after 'if'"), \
+ C(NOSUP_BTFARG, "BTF is not available or not supported"), \
+ C(NO_BTFARG, "This variable is not found at this probe point"),\
+ C(NO_BTF_ENTRY, "No BTF entry for this probe point"), \
+ C(BAD_VAR_ARGS, "$arg* must be an independent parameter without name etc."),\
+ C(NOFENTRY_ARGS, "$arg* can be used only on function entry"), \
+ C(DOUBLE_ARGS, "$arg* can be used only once in the parameters"), \
+ C(ARGS_2LONG, "$arg* failed because the argument list is too long"), \
+ C(ARGIDX_2BIG, "$argN index is too big"), \
+ C(NO_PTR_STRCT, "This is not a pointer to union/structure."), \
+ C(NOSUP_DAT_ARG, "Non pointer structure/union argument is not supported."),\
+ C(BAD_HYPHEN, "Failed to parse single hyphen. Forgot '>'?"), \
+ C(NO_BTF_FIELD, "This field is not found."), \
+ C(BAD_BTF_TID, "Failed to get BTF type info."),\
+ C(BAD_TYPE4STR, "This type does not fit for string."),\
+ C(NEED_STRING_TYPE, "$comm and immediate-string only accepts string type"),
#undef C
#define C(a, b) TP_ERR_##a
@@ -468,3 +540,8 @@ void __trace_probe_log_err(int offset, int err);
#define trace_probe_log_err(offs, err) \
__trace_probe_log_err(offs, TP_ERR_##err)
+
+struct uprobe_dispatch_data {
+ struct trace_uprobe *tu;
+ unsigned long bp_addr;
+};
diff --git a/kernel/trace/trace_probe_kernel.h b/kernel/trace/trace_probe_kernel.h
new file mode 100644
index 000000000000..bb723eefd7b7
--- /dev/null
+++ b/kernel/trace/trace_probe_kernel.h
@@ -0,0 +1,119 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __TRACE_PROBE_KERNEL_H_
+#define __TRACE_PROBE_KERNEL_H_
+
+/*
+ * This depends on trace_probe.h, but can not include it due to
+ * the way trace_probe_tmpl.h is used by trace_kprobe.c and trace_eprobe.c.
+ * Which means that any other user must include trace_probe.h before including
+ * this file.
+ */
+/* Return the length of string -- including null terminal byte */
+static nokprobe_inline int
+fetch_store_strlen_user(unsigned long addr)
+{
+ const void __user *uaddr = (__force const void __user *)addr;
+
+ return strnlen_user_nofault(uaddr, MAX_STRING_SIZE);
+}
+
+/* Return the length of string -- including null terminal byte */
+static nokprobe_inline int
+fetch_store_strlen(unsigned long addr)
+{
+ int ret, len = 0;
+ u8 c;
+
+#ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
+ if (addr < TASK_SIZE)
+ return fetch_store_strlen_user(addr);
+#endif
+
+ do {
+ ret = copy_from_kernel_nofault(&c, (u8 *)addr + len, 1);
+ len++;
+ } while (c && ret == 0 && len < MAX_STRING_SIZE);
+
+ return (ret < 0) ? ret : len;
+}
+
+static nokprobe_inline void set_data_loc(int ret, void *dest, void *__dest, void *base)
+{
+ if (ret < 0)
+ ret = 0;
+ *(u32 *)dest = make_data_loc(ret, __dest - base);
+}
+
+/*
+ * Fetch a null-terminated string from user. Caller MUST set *(u32 *)buf
+ * with max length and relative data location.
+ */
+static nokprobe_inline int
+fetch_store_string_user(unsigned long addr, void *dest, void *base)
+{
+ const void __user *uaddr = (__force const void __user *)addr;
+ int maxlen = get_loc_len(*(u32 *)dest);
+ void *__dest;
+ long ret;
+
+ if (unlikely(!maxlen))
+ return -ENOMEM;
+
+ __dest = get_loc_data(dest, base);
+
+ ret = strncpy_from_user_nofault(__dest, uaddr, maxlen);
+ set_data_loc(ret, dest, __dest, base);
+
+ return ret;
+}
+
+/*
+ * Fetch a null-terminated string. Caller MUST set *(u32 *)buf with max
+ * length and relative data location.
+ */
+static nokprobe_inline int
+fetch_store_string(unsigned long addr, void *dest, void *base)
+{
+ int maxlen = get_loc_len(*(u32 *)dest);
+ void *__dest;
+ long ret;
+
+#ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
+ if ((unsigned long)addr < TASK_SIZE)
+ return fetch_store_string_user(addr, dest, base);
+#endif
+
+ if (unlikely(!maxlen))
+ return -ENOMEM;
+
+ __dest = get_loc_data(dest, base);
+
+ /*
+ * Try to get string again, since the string can be changed while
+ * probing.
+ */
+ ret = strncpy_from_kernel_nofault(__dest, (void *)addr, maxlen);
+ set_data_loc(ret, dest, __dest, base);
+
+ return ret;
+}
+
+static nokprobe_inline int
+probe_mem_read_user(void *dest, void *src, size_t size)
+{
+ const void __user *uaddr = (__force const void __user *)src;
+
+ return copy_from_user_nofault(dest, uaddr, size);
+}
+
+static nokprobe_inline int
+probe_mem_read(void *dest, void *src, size_t size)
+{
+#ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
+ if ((unsigned long)src < TASK_SIZE)
+ return probe_mem_read_user(dest, src, size);
+#endif
+ return copy_from_kernel_nofault(dest, src, size);
+}
+
+#endif /* __TRACE_PROBE_KERNEL_H_ */
diff --git a/kernel/trace/trace_probe_tmpl.h b/kernel/trace/trace_probe_tmpl.h
index e5282828f4a6..3935b347f874 100644
--- a/kernel/trace/trace_probe_tmpl.h
+++ b/kernel/trace/trace_probe_tmpl.h
@@ -54,7 +54,7 @@ fetch_apply_bitfield(struct fetch_insn *code, void *buf)
* If dest is NULL, don't store result and return required dynamic data size.
*/
static int
-process_fetch_insn(struct fetch_insn *code, struct pt_regs *regs,
+process_fetch_insn(struct fetch_insn *code, void *rec,
void *dest, void *base);
static nokprobe_inline int fetch_store_strlen(unsigned long addr);
static nokprobe_inline int
@@ -67,6 +67,57 @@ probe_mem_read(void *dest, void *src, size_t size);
static nokprobe_inline int
probe_mem_read_user(void *dest, void *src, size_t size);
+static nokprobe_inline int
+fetch_store_symstrlen(unsigned long addr)
+{
+ char namebuf[KSYM_SYMBOL_LEN];
+ int ret;
+
+ ret = sprint_symbol(namebuf, addr);
+ if (ret < 0)
+ return 0;
+
+ return ret + 1;
+}
+
+/*
+ * Fetch a null-terminated symbol string + offset. Caller MUST set *(u32 *)buf
+ * with max length and relative data location.
+ */
+static nokprobe_inline int
+fetch_store_symstring(unsigned long addr, void *dest, void *base)
+{
+ int maxlen = get_loc_len(*(u32 *)dest);
+ void *__dest;
+
+ if (unlikely(!maxlen))
+ return -ENOMEM;
+
+ __dest = get_loc_data(dest, base);
+
+ return sprint_symbol(__dest, addr);
+}
+
+/* common part of process_fetch_insn*/
+static nokprobe_inline int
+process_common_fetch_insn(struct fetch_insn *code, unsigned long *val)
+{
+ switch (code->op) {
+ case FETCH_OP_IMM:
+ *val = code->immediate;
+ break;
+ case FETCH_OP_COMM:
+ *val = (unsigned long)current->comm;
+ break;
+ case FETCH_OP_DATA:
+ *val = (unsigned long)code->data;
+ break;
+ default:
+ return -EILSEQ;
+ }
+ return 0;
+}
+
/* From the 2nd stage, routine is same */
static nokprobe_inline int
process_fetch_insn_bottom(struct fetch_insn *code, unsigned long val,
@@ -99,16 +150,22 @@ stage2:
stage3:
/* 3rd stage: store value to buffer */
if (unlikely(!dest)) {
- if (code->op == FETCH_OP_ST_STRING) {
+ switch (code->op) {
+ case FETCH_OP_ST_STRING:
ret = fetch_store_strlen(val + code->offset);
code++;
goto array;
- } else if (code->op == FETCH_OP_ST_USTRING) {
- ret += fetch_store_strlen_user(val + code->offset);
+ case FETCH_OP_ST_USTRING:
+ ret = fetch_store_strlen_user(val + code->offset);
code++;
goto array;
- } else
+ case FETCH_OP_ST_SYMSTR:
+ ret = fetch_store_symstrlen(val + code->offset);
+ code++;
+ goto array;
+ default:
return -EILSEQ;
+ }
}
switch (code->op) {
@@ -129,6 +186,10 @@ stage3:
loc = *(u32 *)dest;
ret = fetch_store_string_user(val + code->offset, dest, base);
break;
+ case FETCH_OP_ST_SYMSTR:
+ loc = *(u32 *)dest;
+ ret = fetch_store_symstring(val + code->offset, dest, base);
+ break;
default:
return -EILSEQ;
}
@@ -143,6 +204,8 @@ stage3:
array:
/* the last stage: Loop on array */
if (code->op == FETCH_OP_LP_ARRAY) {
+ if (ret < 0)
+ ret = 0;
total += ret;
if (++i < code->param) {
code = s3;
@@ -167,7 +230,7 @@ array:
return code->op == FETCH_OP_END ? ret : -EILSEQ;
}
-/* Sum up total data length for dynamic arraies (strings) */
+/* Sum up total data length for dynamic arrays (strings) */
static nokprobe_inline int
__get_data_size(struct trace_probe *tp, struct pt_regs *regs)
{
@@ -188,7 +251,7 @@ __get_data_size(struct trace_probe *tp, struct pt_regs *regs)
/* Store the value of each argument */
static nokprobe_inline void
-store_trace_args(void *data, struct trace_probe *tp, struct pt_regs *regs,
+store_trace_args(void *data, struct trace_probe *tp, void *rec,
int header_size, int maxlen)
{
struct probe_arg *arg;
@@ -203,40 +266,10 @@ store_trace_args(void *data, struct trace_probe *tp, struct pt_regs *regs,
/* Point the dynamic data area if needed */
if (unlikely(arg->dynamic))
*dl = make_data_loc(maxlen, dyndata - base);
- ret = process_fetch_insn(arg->code, regs, dl, base);
- if (unlikely(ret < 0 && arg->dynamic)) {
- *dl = make_data_loc(0, dyndata - base);
- } else {
+ ret = process_fetch_insn(arg->code, rec, dl, base);
+ if (arg->dynamic && likely(ret > 0)) {
dyndata += ret;
maxlen -= ret;
}
}
}
-
-static inline int
-print_probe_args(struct trace_seq *s, struct probe_arg *args, int nr_args,
- u8 *data, void *field)
-{
- void *p;
- int i, j;
-
- for (i = 0; i < nr_args; i++) {
- struct probe_arg *a = args + i;
-
- trace_seq_printf(s, " %s=", a->name);
- if (likely(!a->count)) {
- if (!a->type->print(s, data + a->offset, field))
- return -ENOMEM;
- continue;
- }
- trace_seq_putc(s, '{');
- p = data + a->offset;
- for (j = 0; j < a->count; j++) {
- if (!a->type->print(s, p, field))
- return -ENOMEM;
- trace_seq_putc(s, j == a->count - 1 ? '}' : ',');
- p += a->type->size;
- }
- }
- return 0;
-}
diff --git a/kernel/trace/trace_recursion_record.c b/kernel/trace/trace_recursion_record.c
new file mode 100644
index 000000000000..a520b11afb0d
--- /dev/null
+++ b/kernel/trace/trace_recursion_record.c
@@ -0,0 +1,233 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/seq_file.h>
+#include <linux/kallsyms.h>
+#include <linux/module.h>
+#include <linux/ftrace.h>
+#include <linux/fs.h>
+
+#include "trace_output.h"
+
+struct recursed_functions {
+ unsigned long ip;
+ unsigned long parent_ip;
+};
+
+static struct recursed_functions recursed_functions[CONFIG_FTRACE_RECORD_RECURSION_SIZE];
+static atomic_t nr_records;
+
+/*
+ * Cache the last found function. Yes, updates to this is racey, but
+ * so is memory cache ;-)
+ */
+static unsigned long cached_function;
+
+void ftrace_record_recursion(unsigned long ip, unsigned long parent_ip)
+{
+ int index = 0;
+ int i;
+ unsigned long old;
+
+ again:
+ /* First check the last one recorded */
+ if (ip == cached_function)
+ return;
+
+ i = atomic_read(&nr_records);
+ /* nr_records is -1 when clearing records */
+ smp_mb__after_atomic();
+ if (i < 0)
+ return;
+
+ /*
+ * If there's two writers and this writer comes in second,
+ * the cmpxchg() below to update the ip will fail. Then this
+ * writer will try again. It is possible that index will now
+ * be greater than nr_records. This is because the writer
+ * that succeeded has not updated the nr_records yet.
+ * This writer could keep trying again until the other writer
+ * updates nr_records. But if the other writer takes an
+ * interrupt, and that interrupt locks up that CPU, we do
+ * not want this CPU to lock up due to the recursion protection,
+ * and have a bug report showing this CPU as the cause of
+ * locking up the computer. To not lose this record, this
+ * writer will simply use the next position to update the
+ * recursed_functions, and it will update the nr_records
+ * accordingly.
+ */
+ if (index < i)
+ index = i;
+ if (index >= CONFIG_FTRACE_RECORD_RECURSION_SIZE)
+ return;
+
+ for (i = index - 1; i >= 0; i--) {
+ if (recursed_functions[i].ip == ip) {
+ cached_function = ip;
+ return;
+ }
+ }
+
+ cached_function = ip;
+
+ /*
+ * We only want to add a function if it hasn't been added before.
+ * Add to the current location before incrementing the count.
+ * If it fails to add, then increment the index (save in i)
+ * and try again.
+ */
+ old = cmpxchg(&recursed_functions[index].ip, 0, ip);
+ if (old != 0) {
+ /* Did something else already added this for us? */
+ if (old == ip)
+ return;
+ /* Try the next location (use i for the next index) */
+ index++;
+ goto again;
+ }
+
+ recursed_functions[index].parent_ip = parent_ip;
+
+ /*
+ * It's still possible that we could race with the clearing
+ * CPU0 CPU1
+ * ---- ----
+ * ip = func
+ * nr_records = -1;
+ * recursed_functions[0] = 0;
+ * i = -1
+ * if (i < 0)
+ * nr_records = 0;
+ * (new recursion detected)
+ * recursed_functions[0] = func
+ * cmpxchg(recursed_functions[0],
+ * func, 0)
+ *
+ * But the worse that could happen is that we get a zero in
+ * the recursed_functions array, and it's likely that "func" will
+ * be recorded again.
+ */
+ i = atomic_read(&nr_records);
+ smp_mb__after_atomic();
+ if (i < 0)
+ cmpxchg(&recursed_functions[index].ip, ip, 0);
+ else if (i <= index)
+ atomic_cmpxchg(&nr_records, i, index + 1);
+}
+EXPORT_SYMBOL_GPL(ftrace_record_recursion);
+
+static DEFINE_MUTEX(recursed_function_lock);
+static struct trace_seq *tseq;
+
+static void *recursed_function_seq_start(struct seq_file *m, loff_t *pos)
+{
+ void *ret = NULL;
+ int index;
+
+ mutex_lock(&recursed_function_lock);
+ index = atomic_read(&nr_records);
+ if (*pos < index) {
+ ret = &recursed_functions[*pos];
+ }
+
+ tseq = kzalloc(sizeof(*tseq), GFP_KERNEL);
+ if (!tseq)
+ return ERR_PTR(-ENOMEM);
+
+ trace_seq_init(tseq);
+
+ return ret;
+}
+
+static void *recursed_function_seq_next(struct seq_file *m, void *v, loff_t *pos)
+{
+ int index;
+ int p;
+
+ index = atomic_read(&nr_records);
+ p = ++(*pos);
+
+ return p < index ? &recursed_functions[p] : NULL;
+}
+
+static void recursed_function_seq_stop(struct seq_file *m, void *v)
+{
+ kfree(tseq);
+ mutex_unlock(&recursed_function_lock);
+}
+
+static int recursed_function_seq_show(struct seq_file *m, void *v)
+{
+ struct recursed_functions *record = v;
+ int ret = 0;
+
+ if (record) {
+ trace_seq_print_sym(tseq, record->parent_ip, true);
+ trace_seq_puts(tseq, ":\t");
+ trace_seq_print_sym(tseq, record->ip, true);
+ trace_seq_putc(tseq, '\n');
+ ret = trace_print_seq(m, tseq);
+ }
+
+ return ret;
+}
+
+static const struct seq_operations recursed_function_seq_ops = {
+ .start = recursed_function_seq_start,
+ .next = recursed_function_seq_next,
+ .stop = recursed_function_seq_stop,
+ .show = recursed_function_seq_show
+};
+
+static int recursed_function_open(struct inode *inode, struct file *file)
+{
+ int ret = 0;
+
+ mutex_lock(&recursed_function_lock);
+ /* If this file was opened for write, then erase contents */
+ if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
+ /* disable updating records */
+ atomic_set(&nr_records, -1);
+ smp_mb__after_atomic();
+ memset(recursed_functions, 0, sizeof(recursed_functions));
+ smp_wmb();
+ /* enable them again */
+ atomic_set(&nr_records, 0);
+ }
+ if (file->f_mode & FMODE_READ)
+ ret = seq_open(file, &recursed_function_seq_ops);
+ mutex_unlock(&recursed_function_lock);
+
+ return ret;
+}
+
+static ssize_t recursed_function_write(struct file *file,
+ const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ return count;
+}
+
+static int recursed_function_release(struct inode *inode, struct file *file)
+{
+ if (file->f_mode & FMODE_READ)
+ seq_release(inode, file);
+ return 0;
+}
+
+static const struct file_operations recursed_functions_fops = {
+ .open = recursed_function_open,
+ .write = recursed_function_write,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = recursed_function_release,
+};
+
+__init static int create_recursed_functions(void)
+{
+
+ trace_create_file("recursed_functions", TRACE_MODE_WRITE,
+ NULL, NULL, &recursed_functions_fops);
+ return 0;
+}
+
+fs_initcall(create_recursed_functions);
diff --git a/kernel/trace/trace_sched_switch.c b/kernel/trace/trace_sched_switch.c
index e304196d7c28..c9ffdcfe622e 100644
--- a/kernel/trace/trace_sched_switch.c
+++ b/kernel/trace/trace_sched_switch.c
@@ -22,7 +22,8 @@ static DEFINE_MUTEX(sched_register_mutex);
static void
probe_sched_switch(void *ignore, bool preempt,
- struct task_struct *prev, struct task_struct *next)
+ struct task_struct *prev, struct task_struct *next,
+ unsigned int prev_state)
{
int flags;
@@ -44,7 +45,7 @@ probe_sched_wakeup(void *ignore, struct task_struct *wakee)
if (!flags)
return;
- tracing_record_taskinfo(current, flags);
+ tracing_record_taskinfo_sched_switch(current, wakee, flags);
}
static int tracing_sched_register(void)
diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c
index 97b10bb31a1f..0469a04a355f 100644
--- a/kernel/trace/trace_sched_wakeup.c
+++ b/kernel/trace/trace_sched_wakeup.c
@@ -26,9 +26,9 @@ static struct task_struct *wakeup_task;
static int wakeup_cpu;
static int wakeup_current_cpu;
static unsigned wakeup_prio = -1;
-static int wakeup_rt;
-static int wakeup_dl;
-static int tracing_dl = 0;
+static bool wakeup_rt;
+static bool wakeup_dl;
+static bool tracing_dl;
static arch_spinlock_t wakeup_lock =
(arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
@@ -67,7 +67,7 @@ static bool function_enabled;
static int
func_prolog_preempt_disable(struct trace_array *tr,
struct trace_array_cpu **data,
- int *pc)
+ unsigned int *trace_ctx)
{
long disabled;
int cpu;
@@ -75,7 +75,7 @@ func_prolog_preempt_disable(struct trace_array *tr,
if (likely(!wakeup_task))
return 0;
- *pc = preempt_count();
+ *trace_ctx = tracing_gen_ctx();
preempt_disable_notrace();
cpu = raw_smp_processor_id();
@@ -116,8 +116,8 @@ static int wakeup_graph_entry(struct ftrace_graph_ent *trace)
{
struct trace_array *tr = wakeup_trace;
struct trace_array_cpu *data;
- unsigned long flags;
- int pc, ret = 0;
+ unsigned int trace_ctx;
+ int ret = 0;
if (ftrace_graph_ignore_func(trace))
return 0;
@@ -131,11 +131,10 @@ static int wakeup_graph_entry(struct ftrace_graph_ent *trace)
if (ftrace_graph_notrace_addr(trace->func))
return 1;
- if (!func_prolog_preempt_disable(tr, &data, &pc))
+ if (!func_prolog_preempt_disable(tr, &data, &trace_ctx))
return 0;
- local_save_flags(flags);
- ret = __trace_graph_entry(tr, trace, flags, pc);
+ ret = __trace_graph_entry(tr, trace, trace_ctx);
atomic_dec(&data->disabled);
preempt_enable_notrace();
@@ -146,16 +145,14 @@ static void wakeup_graph_return(struct ftrace_graph_ret *trace)
{
struct trace_array *tr = wakeup_trace;
struct trace_array_cpu *data;
- unsigned long flags;
- int pc;
+ unsigned int trace_ctx;
ftrace_graph_addr_finish(trace);
- if (!func_prolog_preempt_disable(tr, &data, &pc))
+ if (!func_prolog_preempt_disable(tr, &data, &trace_ctx))
return;
- local_save_flags(flags);
- __trace_graph_return(tr, trace, flags, pc);
+ __trace_graph_return(tr, trace, trace_ctx);
atomic_dec(&data->disabled);
preempt_enable_notrace();
@@ -171,6 +168,8 @@ static void wakeup_trace_open(struct trace_iterator *iter)
{
if (is_graph(iter->tr))
graph_trace_open(iter);
+ else
+ iter->private = NULL;
}
static void wakeup_trace_close(struct trace_iterator *iter)
@@ -212,18 +211,18 @@ static void wakeup_print_header(struct seq_file *s)
*/
static void
wakeup_tracer_call(unsigned long ip, unsigned long parent_ip,
- struct ftrace_ops *op, struct pt_regs *pt_regs)
+ struct ftrace_ops *op, struct ftrace_regs *fregs)
{
struct trace_array *tr = wakeup_trace;
struct trace_array_cpu *data;
unsigned long flags;
- int pc;
+ unsigned int trace_ctx;
- if (!func_prolog_preempt_disable(tr, &data, &pc))
+ if (!func_prolog_preempt_disable(tr, &data, &trace_ctx))
return;
local_irq_save(flags);
- trace_function(tr, ip, parent_ip, flags, pc);
+ trace_function(tr, ip, parent_ip, trace_ctx);
local_irq_restore(flags);
atomic_dec(&data->disabled);
@@ -303,12 +302,12 @@ static void wakeup_print_header(struct seq_file *s)
static void
__trace_function(struct trace_array *tr,
unsigned long ip, unsigned long parent_ip,
- unsigned long flags, int pc)
+ unsigned int trace_ctx)
{
if (is_graph(tr))
- trace_graph_function(tr, ip, parent_ip, flags, pc);
+ trace_graph_function(tr, ip, parent_ip, trace_ctx);
else
- trace_function(tr, ip, parent_ip, flags, pc);
+ trace_function(tr, ip, parent_ip, trace_ctx);
}
static int wakeup_flag_changed(struct trace_array *tr, u32 mask, int set)
@@ -375,7 +374,7 @@ static void
tracing_sched_switch_trace(struct trace_array *tr,
struct task_struct *prev,
struct task_struct *next,
- unsigned long flags, int pc)
+ unsigned int trace_ctx)
{
struct trace_event_call *call = &event_context_switch;
struct trace_buffer *buffer = tr->array_buffer.buffer;
@@ -383,7 +382,7 @@ tracing_sched_switch_trace(struct trace_array *tr,
struct ctx_switch_entry *entry;
event = trace_buffer_lock_reserve(buffer, TRACE_CTX,
- sizeof(*entry), flags, pc);
+ sizeof(*entry), trace_ctx);
if (!event)
return;
entry = ring_buffer_event_data(event);
@@ -396,14 +395,14 @@ tracing_sched_switch_trace(struct trace_array *tr,
entry->next_cpu = task_cpu(next);
if (!call_filter_check_discard(call, entry, buffer, event))
- trace_buffer_unlock_commit(tr, buffer, event, flags, pc);
+ trace_buffer_unlock_commit(tr, buffer, event, trace_ctx);
}
static void
tracing_sched_wakeup_trace(struct trace_array *tr,
struct task_struct *wakee,
struct task_struct *curr,
- unsigned long flags, int pc)
+ unsigned int trace_ctx)
{
struct trace_event_call *call = &event_wakeup;
struct ring_buffer_event *event;
@@ -411,7 +410,7 @@ tracing_sched_wakeup_trace(struct trace_array *tr,
struct trace_buffer *buffer = tr->array_buffer.buffer;
event = trace_buffer_lock_reserve(buffer, TRACE_WAKE,
- sizeof(*entry), flags, pc);
+ sizeof(*entry), trace_ctx);
if (!event)
return;
entry = ring_buffer_event_data(event);
@@ -424,19 +423,20 @@ tracing_sched_wakeup_trace(struct trace_array *tr,
entry->next_cpu = task_cpu(wakee);
if (!call_filter_check_discard(call, entry, buffer, event))
- trace_buffer_unlock_commit(tr, buffer, event, flags, pc);
+ trace_buffer_unlock_commit(tr, buffer, event, trace_ctx);
}
static void notrace
probe_wakeup_sched_switch(void *ignore, bool preempt,
- struct task_struct *prev, struct task_struct *next)
+ struct task_struct *prev, struct task_struct *next,
+ unsigned int prev_state)
{
struct trace_array_cpu *data;
u64 T0, T1, delta;
unsigned long flags;
long disabled;
int cpu;
- int pc;
+ unsigned int trace_ctx;
tracing_record_cmdline(prev);
@@ -455,8 +455,6 @@ probe_wakeup_sched_switch(void *ignore, bool preempt,
if (next != wakeup_task)
return;
- pc = preempt_count();
-
/* disable local data, not wakeup_cpu data */
cpu = raw_smp_processor_id();
disabled = atomic_inc_return(&per_cpu_ptr(wakeup_trace->array_buffer.data, cpu)->disabled);
@@ -464,6 +462,8 @@ probe_wakeup_sched_switch(void *ignore, bool preempt,
goto out;
local_irq_save(flags);
+ trace_ctx = tracing_gen_ctx_flags(flags);
+
arch_spin_lock(&wakeup_lock);
/* We could race with grabbing wakeup_lock */
@@ -473,9 +473,9 @@ probe_wakeup_sched_switch(void *ignore, bool preempt,
/* The task we are waiting for is waking up */
data = per_cpu_ptr(wakeup_trace->array_buffer.data, wakeup_cpu);
- __trace_function(wakeup_trace, CALLER_ADDR0, CALLER_ADDR1, flags, pc);
- tracing_sched_switch_trace(wakeup_trace, prev, next, flags, pc);
- __trace_stack(wakeup_trace, flags, 0, pc);
+ __trace_function(wakeup_trace, CALLER_ADDR0, CALLER_ADDR1, trace_ctx);
+ tracing_sched_switch_trace(wakeup_trace, prev, next, trace_ctx);
+ __trace_stack(wakeup_trace, trace_ctx, 0);
T0 = data->preempt_timestamp;
T1 = ftrace_now(cpu);
@@ -501,7 +501,7 @@ static void __wakeup_reset(struct trace_array *tr)
{
wakeup_cpu = -1;
wakeup_prio = -1;
- tracing_dl = 0;
+ tracing_dl = false;
if (wakeup_task)
put_task_struct(wakeup_task);
@@ -527,9 +527,8 @@ probe_wakeup(void *ignore, struct task_struct *p)
{
struct trace_array_cpu *data;
int cpu = smp_processor_id();
- unsigned long flags;
long disabled;
- int pc;
+ unsigned int trace_ctx;
if (likely(!tracer_enabled))
return;
@@ -550,11 +549,12 @@ probe_wakeup(void *ignore, struct task_struct *p)
(!dl_task(p) && (p->prio >= wakeup_prio || p->prio >= current->prio)))
return;
- pc = preempt_count();
disabled = atomic_inc_return(&per_cpu_ptr(wakeup_trace->array_buffer.data, cpu)->disabled);
if (unlikely(disabled != 1))
goto out;
+ trace_ctx = tracing_gen_ctx();
+
/* interrupts should be off from try_to_wake_up */
arch_spin_lock(&wakeup_lock);
@@ -575,25 +575,23 @@ probe_wakeup(void *ignore, struct task_struct *p)
* another task until the first one wakes up.
*/
if (dl_task(p))
- tracing_dl = 1;
+ tracing_dl = true;
else
- tracing_dl = 0;
+ tracing_dl = false;
wakeup_task = get_task_struct(p);
- local_save_flags(flags);
-
data = per_cpu_ptr(wakeup_trace->array_buffer.data, wakeup_cpu);
data->preempt_timestamp = ftrace_now(cpu);
- tracing_sched_wakeup_trace(wakeup_trace, p, current, flags, pc);
- __trace_stack(wakeup_trace, flags, 0, pc);
+ tracing_sched_wakeup_trace(wakeup_trace, p, current, trace_ctx);
+ __trace_stack(wakeup_trace, trace_ctx, 0);
/*
* We must be careful in using CALLER_ADDR2. But since wake_up
* is not called by an assembly function (where as schedule is)
* it should be safe to use it here.
*/
- __trace_function(wakeup_trace, CALLER_ADDR1, CALLER_ADDR2, flags, pc);
+ __trace_function(wakeup_trace, CALLER_ADDR1, CALLER_ADDR2, trace_ctx);
out_locked:
arch_spin_unlock(&wakeup_lock);
@@ -690,8 +688,8 @@ static int wakeup_tracer_init(struct trace_array *tr)
if (wakeup_busy)
return -EBUSY;
- wakeup_dl = 0;
- wakeup_rt = 0;
+ wakeup_dl = false;
+ wakeup_rt = false;
return __wakeup_tracer_init(tr);
}
@@ -700,8 +698,8 @@ static int wakeup_rt_tracer_init(struct trace_array *tr)
if (wakeup_busy)
return -EBUSY;
- wakeup_dl = 0;
- wakeup_rt = 1;
+ wakeup_dl = false;
+ wakeup_rt = true;
return __wakeup_tracer_init(tr);
}
@@ -710,8 +708,8 @@ static int wakeup_dl_tracer_init(struct trace_array *tr)
if (wakeup_busy)
return -EBUSY;
- wakeup_dl = 1;
- wakeup_rt = 0;
+ wakeup_dl = true;
+ wakeup_rt = false;
return __wakeup_tracer_init(tr);
}
diff --git a/kernel/trace/trace_selftest.c b/kernel/trace/trace_selftest.c
index b5e3496cf803..529590499b1f 100644
--- a/kernel/trace/trace_selftest.c
+++ b/kernel/trace/trace_selftest.c
@@ -107,7 +107,7 @@ static int trace_selftest_test_probe1_cnt;
static void trace_selftest_test_probe1_func(unsigned long ip,
unsigned long pip,
struct ftrace_ops *op,
- struct pt_regs *pt_regs)
+ struct ftrace_regs *fregs)
{
trace_selftest_test_probe1_cnt++;
}
@@ -116,7 +116,7 @@ static int trace_selftest_test_probe2_cnt;
static void trace_selftest_test_probe2_func(unsigned long ip,
unsigned long pip,
struct ftrace_ops *op,
- struct pt_regs *pt_regs)
+ struct ftrace_regs *fregs)
{
trace_selftest_test_probe2_cnt++;
}
@@ -125,7 +125,7 @@ static int trace_selftest_test_probe3_cnt;
static void trace_selftest_test_probe3_func(unsigned long ip,
unsigned long pip,
struct ftrace_ops *op,
- struct pt_regs *pt_regs)
+ struct ftrace_regs *fregs)
{
trace_selftest_test_probe3_cnt++;
}
@@ -134,7 +134,7 @@ static int trace_selftest_test_global_cnt;
static void trace_selftest_test_global_func(unsigned long ip,
unsigned long pip,
struct ftrace_ops *op,
- struct pt_regs *pt_regs)
+ struct ftrace_regs *fregs)
{
trace_selftest_test_global_cnt++;
}
@@ -143,24 +143,21 @@ static int trace_selftest_test_dyn_cnt;
static void trace_selftest_test_dyn_func(unsigned long ip,
unsigned long pip,
struct ftrace_ops *op,
- struct pt_regs *pt_regs)
+ struct ftrace_regs *fregs)
{
trace_selftest_test_dyn_cnt++;
}
static struct ftrace_ops test_probe1 = {
.func = trace_selftest_test_probe1_func,
- .flags = FTRACE_OPS_FL_RECURSION_SAFE,
};
static struct ftrace_ops test_probe2 = {
.func = trace_selftest_test_probe2_func,
- .flags = FTRACE_OPS_FL_RECURSION_SAFE,
};
static struct ftrace_ops test_probe3 = {
.func = trace_selftest_test_probe3_func,
- .flags = FTRACE_OPS_FL_RECURSION_SAFE,
};
static void print_counts(void)
@@ -290,6 +287,40 @@ static int trace_selftest_ops(struct trace_array *tr, int cnt)
if (trace_selftest_test_probe3_cnt != 4)
goto out_free;
+ /* Remove trace function from probe 3 */
+ func1_name = "!" __stringify(DYN_FTRACE_TEST_NAME);
+ len1 = strlen(func1_name);
+
+ ftrace_set_filter(&test_probe3, func1_name, len1, 0);
+
+ DYN_FTRACE_TEST_NAME();
+
+ print_counts();
+
+ if (trace_selftest_test_probe1_cnt != 3)
+ goto out_free;
+ if (trace_selftest_test_probe2_cnt != 2)
+ goto out_free;
+ if (trace_selftest_test_probe3_cnt != 4)
+ goto out_free;
+ if (cnt > 1) {
+ if (trace_selftest_test_global_cnt == 0)
+ goto out_free;
+ }
+ if (trace_selftest_test_dyn_cnt == 0)
+ goto out_free;
+
+ DYN_FTRACE_TEST_NAME2();
+
+ print_counts();
+
+ if (trace_selftest_test_probe1_cnt != 3)
+ goto out_free;
+ if (trace_selftest_test_probe2_cnt != 3)
+ goto out_free;
+ if (trace_selftest_test_probe3_cnt != 5)
+ goto out_free;
+
ret = 0;
out_free:
unregister_ftrace_function(dyn_ops);
@@ -417,7 +448,7 @@ static int trace_selftest_recursion_cnt;
static void trace_selftest_test_recursion_func(unsigned long ip,
unsigned long pip,
struct ftrace_ops *op,
- struct pt_regs *pt_regs)
+ struct ftrace_regs *fregs)
{
/*
* This function is registered without the recursion safe flag.
@@ -432,7 +463,7 @@ static void trace_selftest_test_recursion_func(unsigned long ip,
static void trace_selftest_test_recursion_safe_func(unsigned long ip,
unsigned long pip,
struct ftrace_ops *op,
- struct pt_regs *pt_regs)
+ struct ftrace_regs *fregs)
{
/*
* We said we would provide our own recursion. By calling
@@ -448,11 +479,11 @@ static void trace_selftest_test_recursion_safe_func(unsigned long ip,
static struct ftrace_ops test_rec_probe = {
.func = trace_selftest_test_recursion_func,
+ .flags = FTRACE_OPS_FL_RECURSION,
};
static struct ftrace_ops test_recsafe_probe = {
.func = trace_selftest_test_recursion_safe_func,
- .flags = FTRACE_OPS_FL_RECURSION_SAFE,
};
static int
@@ -492,8 +523,13 @@ trace_selftest_function_recursion(void)
unregister_ftrace_function(&test_rec_probe);
ret = -1;
- if (trace_selftest_recursion_cnt != 1) {
- pr_cont("*callback not called once (%d)* ",
+ /*
+ * Recursion allows for transitions between context,
+ * and may call the callback twice.
+ */
+ if (trace_selftest_recursion_cnt != 1 &&
+ trace_selftest_recursion_cnt != 2) {
+ pr_cont("*callback not called once (or twice) (%d)* ",
trace_selftest_recursion_cnt);
goto out;
}
@@ -546,9 +582,11 @@ static enum {
static void trace_selftest_test_regs_func(unsigned long ip,
unsigned long pip,
struct ftrace_ops *op,
- struct pt_regs *pt_regs)
+ struct ftrace_regs *fregs)
{
- if (pt_regs)
+ struct pt_regs *regs = ftrace_get_regs(fregs);
+
+ if (regs)
trace_selftest_regs_stat = TRACE_SELFTEST_REGS_FOUND;
else
trace_selftest_regs_stat = TRACE_SELFTEST_REGS_NOT_FOUND;
@@ -556,7 +594,7 @@ static void trace_selftest_test_regs_func(unsigned long ip,
static struct ftrace_ops test_regs_probe = {
.func = trace_selftest_test_regs_func,
- .flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_SAVE_REGS,
+ .flags = FTRACE_OPS_FL_SAVE_REGS,
};
static int
@@ -746,6 +784,10 @@ static struct fgraph_ops fgraph_ops __initdata = {
.retfunc = &trace_graph_return,
};
+#ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
+static struct ftrace_ops direct;
+#endif
+
/*
* Pretty much the same than for the function tracer from which the selftest
* has been borrowed.
@@ -756,6 +798,7 @@ trace_selftest_startup_function_graph(struct tracer *trace,
{
int ret;
unsigned long count;
+ char *func_name __maybe_unused;
#ifdef CONFIG_DYNAMIC_FTRACE
if (ftrace_filter_param) {
@@ -782,7 +825,7 @@ trace_selftest_startup_function_graph(struct tracer *trace,
/* Have we just recovered from a hang? */
if (graph_hang_thresh > GRAPH_MAX_FUNC_TEST) {
- tracing_selftest_disabled = true;
+ disable_tracing_selftest("recovering from a hang");
ret = -1;
goto out;
}
@@ -804,8 +847,72 @@ trace_selftest_startup_function_graph(struct tracer *trace,
goto out;
}
- /* Don't test dynamic tracing, the function tracer already did */
+#ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
+ /*
+ * These tests can take some time to run. Make sure on non PREEMPT
+ * kernels, we do not trigger the softlockup detector.
+ */
+ cond_resched();
+
+ tracing_reset_online_cpus(&tr->array_buffer);
+ set_graph_array(tr);
+ /*
+ * Some archs *cough*PowerPC*cough* add characters to the
+ * start of the function names. We simply put a '*' to
+ * accommodate them.
+ */
+ func_name = "*" __stringify(DYN_FTRACE_TEST_NAME);
+ ftrace_set_global_filter(func_name, strlen(func_name), 1);
+
+ /*
+ * Register direct function together with graph tracer
+ * and make sure we get graph trace.
+ */
+ ftrace_set_filter_ip(&direct, (unsigned long)DYN_FTRACE_TEST_NAME, 0, 0);
+ ret = register_ftrace_direct(&direct,
+ (unsigned long)ftrace_stub_direct_tramp);
+ if (ret)
+ goto out;
+
+ cond_resched();
+
+ ret = register_ftrace_graph(&fgraph_ops);
+ if (ret) {
+ warn_failed_init_tracer(trace, ret);
+ goto out;
+ }
+
+ DYN_FTRACE_TEST_NAME();
+
+ count = 0;
+
+ tracing_stop();
+ /* check the trace buffer */
+ ret = trace_test_buffer(&tr->array_buffer, &count);
+
+ unregister_ftrace_graph(&fgraph_ops);
+
+ ret = unregister_ftrace_direct(&direct,
+ (unsigned long)ftrace_stub_direct_tramp,
+ true);
+ if (ret)
+ goto out;
+
+ cond_resched();
+
+ tracing_start();
+
+ if (!ret && !count) {
+ ret = -1;
+ goto out;
+ }
+
+ /* Enable tracing on all functions again */
+ ftrace_set_global_filter(NULL, 0, 1);
+#endif
+
+ /* Don't test dynamic tracing, the function tracer already did */
out:
/* Stop it if we failed */
if (ret)
@@ -874,7 +981,7 @@ trace_selftest_startup_preemptoff(struct tracer *trace, struct trace_array *tr)
int ret;
/*
- * Now that the big kernel lock is no longer preemptable,
+ * Now that the big kernel lock is no longer preemptible,
* and this is called with the BKL held, it will always
* fail. If preemption is already disabled, simply
* pass the test. When the BKL is removed, or becomes
@@ -936,7 +1043,7 @@ trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *
int ret;
/*
- * Now that the big kernel lock is no longer preemptable,
+ * Now that the big kernel lock is no longer preemptible,
* and this is called with the BKL held, it will always
* fail. If preemption is already disabled, simply
* pass the test. When the BKL is removed, or becomes
diff --git a/kernel/trace/trace_seq.c b/kernel/trace/trace_seq.c
index 1d84fcc78e3e..c158d65a8a88 100644
--- a/kernel/trace/trace_seq.c
+++ b/kernel/trace/trace_seq.c
@@ -13,10 +13,7 @@
* trace_seq_init() more than once to reset the trace_seq to start
* from scratch.
*
- * The buffer size is currently PAGE_SIZE, although it may become dynamic
- * in the future.
- *
- * A write to the buffer will either succed or fail. That is, unlike
+ * A write to the buffer will either succeed or fail. That is, unlike
* sprintf() there will not be a partial write (well it may write into
* the buffer but it wont update the pointers). This allows users to
* try to write something into the trace_seq buffer and if it fails
@@ -73,7 +70,7 @@ int trace_print_seq(struct seq_file *m, struct trace_seq *s)
* @fmt: printf format string
*
* The tracer may use either sequence operations or its own
- * copy to user routines. To simplify formating of a trace
+ * copy to user routines. To simplify formatting of a trace
* trace_seq_printf() is used to store strings into a special
* buffer (@s). Then the output may be either used by
* the sequencer or pulled into another buffer.
@@ -131,9 +128,10 @@ EXPORT_SYMBOL_GPL(trace_seq_bitmask);
* trace_seq_vprintf - sequence printing of trace information
* @s: trace sequence descriptor
* @fmt: printf format string
+ * @args: Arguments for the format string
*
* The tracer may use either sequence operations or its own
- * copy to user routines. To simplify formating of a trace
+ * copy to user routines. To simplify formatting of a trace
* trace_seq_printf is used to store strings into a special
* buffer (@s). Then the output may be either used by
* the sequencer or pulled into another buffer.
@@ -226,7 +224,7 @@ EXPORT_SYMBOL_GPL(trace_seq_puts);
* @c: simple character to record
*
* The tracer may use either the sequence operations or its own
- * copy to user routines. This function records a simple charater
+ * copy to user routines. This function records a simple character
* into a special buffer (@s) for later retrieval by a sequencer
* or other mechanism.
*/
@@ -348,7 +346,7 @@ int trace_seq_path(struct trace_seq *s, const struct path *path)
EXPORT_SYMBOL_GPL(trace_seq_path);
/**
- * trace_seq_to_user - copy the squence buffer to user space
+ * trace_seq_to_user - copy the sequence buffer to user space
* @s: trace sequence descriptor
* @ubuf: The userspace memory location to copy to
* @cnt: The amount to copy
@@ -363,14 +361,18 @@ EXPORT_SYMBOL_GPL(trace_seq_path);
*
* On failure it returns -EBUSY if all of the content in the
* sequence has been already read, which includes nothing in the
- * sequenc (@s->len == @s->readpos).
+ * sequence (@s->len == @s->readpos).
*
* Returns -EFAULT if the copy to userspace fails.
*/
int trace_seq_to_user(struct trace_seq *s, char __user *ubuf, int cnt)
{
+ int ret;
__trace_seq_init(s);
- return seq_buf_to_user(&s->seq, ubuf, cnt);
+ ret = seq_buf_to_user(&s->seq, ubuf, s->readpos, cnt);
+ if (ret > 0)
+ s->readpos += ret;
+ return ret;
}
EXPORT_SYMBOL_GPL(trace_seq_to_user);
@@ -403,3 +405,26 @@ int trace_seq_hex_dump(struct trace_seq *s, const char *prefix_str,
return 1;
}
EXPORT_SYMBOL(trace_seq_hex_dump);
+
+/*
+ * trace_seq_acquire - acquire seq buffer with size len
+ * @s: trace sequence descriptor
+ * @len: size of buffer to be acquired
+ *
+ * acquire buffer with size of @len from trace_seq for output usage,
+ * user can fill string into that buffer.
+ *
+ * Returns start address of acquired buffer.
+ *
+ * it allow multiple usage in one trace output function call.
+ */
+char *trace_seq_acquire(struct trace_seq *s, unsigned int len)
+{
+ char *ret = trace_seq_buffer_ptr(s);
+
+ if (!WARN_ON_ONCE(seq_buf_buffer_left(&s->seq) < len))
+ seq_buf_commit(&s->seq, len);
+
+ return ret;
+}
+EXPORT_SYMBOL(trace_seq_acquire);
diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
index 98bba4764c52..5a48dba912ea 100644
--- a/kernel/trace/trace_stack.c
+++ b/kernel/trace/trace_stack.c
@@ -290,7 +290,7 @@ static void check_stack(unsigned long ip, unsigned long *stack)
static void
stack_trace_call(unsigned long ip, unsigned long parent_ip,
- struct ftrace_ops *op, struct pt_regs *pt_regs)
+ struct ftrace_ops *op, struct ftrace_regs *fregs)
{
unsigned long stack;
@@ -318,7 +318,6 @@ stack_trace_call(unsigned long ip, unsigned long parent_ip,
static struct ftrace_ops trace_ops __read_mostly =
{
.func = stack_trace_call,
- .flags = FTRACE_OPS_FL_RECURSION_SAFE,
};
static ssize_t
@@ -554,20 +553,20 @@ __setup("stacktrace", enable_stacktrace);
static __init int stack_trace_init(void)
{
- struct dentry *d_tracer;
+ int ret;
- d_tracer = tracing_init_dentry();
- if (IS_ERR(d_tracer))
+ ret = tracing_init_dentry();
+ if (ret)
return 0;
- trace_create_file("stack_max_size", 0644, d_tracer,
+ trace_create_file("stack_max_size", TRACE_MODE_WRITE, NULL,
&stack_trace_max_size, &stack_max_size_fops);
- trace_create_file("stack_trace", 0444, d_tracer,
+ trace_create_file("stack_trace", TRACE_MODE_READ, NULL,
NULL, &stack_trace_fops);
#ifdef CONFIG_DYNAMIC_FTRACE
- trace_create_file("stack_trace_filter", 0644, d_tracer,
+ trace_create_file("stack_trace_filter", TRACE_MODE_WRITE, NULL,
&trace_ops, &stack_trace_filter_fops);
#endif
diff --git a/kernel/trace/trace_stat.c b/kernel/trace/trace_stat.c
index d1fa19773cc8..bb247beec447 100644
--- a/kernel/trace/trace_stat.c
+++ b/kernel/trace/trace_stat.c
@@ -276,13 +276,13 @@ static const struct file_operations tracing_stat_fops = {
static int tracing_stat_init(void)
{
- struct dentry *d_tracing;
+ int ret;
- d_tracing = tracing_init_dentry();
- if (IS_ERR(d_tracing))
+ ret = tracing_init_dentry();
+ if (ret)
return -ENODEV;
- stat_dir = tracefs_create_dir("trace_stat", d_tracing);
+ stat_dir = tracefs_create_dir("trace_stat", NULL);
if (!stat_dir) {
pr_warn("Could not create tracefs 'trace_stat' entry\n");
return -ENOMEM;
@@ -297,9 +297,9 @@ static int init_stat_file(struct stat_session *session)
if (!stat_dir && (ret = tracing_stat_init()))
return ret;
- session->file = tracefs_create_file(session->ts->name, 0644,
- stat_dir,
- session, &tracing_stat_fops);
+ session->file = tracefs_create_file(session->ts->name, TRACE_MODE_WRITE,
+ stat_dir, session,
+ &tracing_stat_fops);
if (!session->file)
return -ENOMEM;
return 0;
diff --git a/kernel/trace/trace_synth.h b/kernel/trace/trace_synth.h
index ac35c45207c4..43f6fb6078db 100644
--- a/kernel/trace/trace_synth.h
+++ b/kernel/trace/trace_synth.h
@@ -5,17 +5,20 @@
#include "trace_dynevent.h"
#define SYNTH_SYSTEM "synthetic"
-#define SYNTH_FIELDS_MAX 32
+#define SYNTH_FIELDS_MAX 64
-#define STR_VAR_LEN_MAX 32 /* must be multiple of sizeof(u64) */
+#define STR_VAR_LEN_MAX MAX_FILTER_STR_VAL /* must be multiple of sizeof(u64) */
struct synth_field {
char *type;
char *name;
size_t size;
unsigned int offset;
+ unsigned int field_pos;
bool is_signed;
bool is_string;
+ bool is_dynamic;
+ bool is_stack;
};
struct synth_event {
@@ -24,6 +27,8 @@ struct synth_event {
char *name;
struct synth_field **fields;
unsigned int n_fields;
+ struct synth_field **dynamic_fields;
+ unsigned int n_dynamic_fields;
unsigned int n_u64;
struct trace_event_class class;
struct trace_event_call call;
diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c
index d85a2f0f316b..9c581d6da843 100644
--- a/kernel/trace/trace_syscalls.c
+++ b/kernel/trace/trace_syscalls.c
@@ -154,7 +154,7 @@ print_syscall_enter(struct trace_iterator *iter, int flags,
goto end;
/* parameter types */
- if (tr->trace_flags & TRACE_ITER_VERBOSE)
+ if (tr && tr->trace_flags & TRACE_ITER_VERBOSE)
trace_seq_printf(s, "%s ", entry->types[i]);
/* parameter values */
@@ -201,8 +201,6 @@ print_syscall_exit(struct trace_iterator *iter, int flags,
return trace_handle_return(s);
}
-extern char *__bad_type_size(void);
-
#define SYSCALL_FIELD(_type, _name) { \
.type = #_type, .name = #_name, \
.size = sizeof(_type), .align = __alignof__(_type), \
@@ -296,11 +294,8 @@ static void ftrace_syscall_enter(void *data, struct pt_regs *regs, long id)
struct trace_event_file *trace_file;
struct syscall_trace_enter *entry;
struct syscall_metadata *sys_data;
- struct ring_buffer_event *event;
- struct trace_buffer *buffer;
- unsigned long irq_flags;
+ struct trace_event_buffer fbuffer;
unsigned long args[6];
- int pc;
int syscall_nr;
int size;
@@ -322,22 +317,16 @@ static void ftrace_syscall_enter(void *data, struct pt_regs *regs, long id)
size = sizeof(*entry) + sizeof(unsigned long) * sys_data->nb_args;
- local_save_flags(irq_flags);
- pc = preempt_count();
-
- buffer = tr->array_buffer.buffer;
- event = trace_buffer_lock_reserve(buffer,
- sys_data->enter_event->event.type, size, irq_flags, pc);
- if (!event)
+ entry = trace_event_buffer_reserve(&fbuffer, trace_file, size);
+ if (!entry)
return;
- entry = ring_buffer_event_data(event);
+ entry = ring_buffer_event_data(fbuffer.event);
entry->nr = syscall_nr;
syscall_get_arguments(current, regs, args);
memcpy(entry->args, args, sizeof(unsigned long) * sys_data->nb_args);
- event_trigger_unlock_commit(trace_file, buffer, event, entry,
- irq_flags, pc);
+ trace_event_buffer_commit(&fbuffer);
}
static void ftrace_syscall_exit(void *data, struct pt_regs *regs, long ret)
@@ -346,10 +335,7 @@ static void ftrace_syscall_exit(void *data, struct pt_regs *regs, long ret)
struct trace_event_file *trace_file;
struct syscall_trace_exit *entry;
struct syscall_metadata *sys_data;
- struct ring_buffer_event *event;
- struct trace_buffer *buffer;
- unsigned long irq_flags;
- int pc;
+ struct trace_event_buffer fbuffer;
int syscall_nr;
syscall_nr = trace_get_syscall_nr(current, regs);
@@ -368,22 +354,15 @@ static void ftrace_syscall_exit(void *data, struct pt_regs *regs, long ret)
if (!sys_data)
return;
- local_save_flags(irq_flags);
- pc = preempt_count();
-
- buffer = tr->array_buffer.buffer;
- event = trace_buffer_lock_reserve(buffer,
- sys_data->exit_event->event.type, sizeof(*entry),
- irq_flags, pc);
- if (!event)
+ entry = trace_event_buffer_reserve(&fbuffer, trace_file, sizeof(*entry));
+ if (!entry)
return;
- entry = ring_buffer_event_data(event);
+ entry = ring_buffer_event_data(fbuffer.event);
entry->nr = syscall_nr;
entry->ret = syscall_get_return_value(current, regs);
- event_trigger_unlock_commit(trace_file, buffer, event, entry,
- irq_flags, pc);
+ trace_event_buffer_commit(&fbuffer);
}
static int reg_event_syscall_enter(struct trace_event_file *file,
@@ -576,12 +555,15 @@ static int perf_call_bpf_enter(struct trace_event_call *call, struct pt_regs *re
struct syscall_trace_enter *rec)
{
struct syscall_tp_t {
- unsigned long long regs;
- unsigned long syscall_nr;
+ struct trace_entry ent;
+ int syscall_nr;
unsigned long args[SYSCALL_DEFINE_MAXARGS];
- } param;
+ } __aligned(8) param;
int i;
+ BUILD_BUG_ON(sizeof(param.ent) < sizeof(void *));
+
+ /* bpf prog requires 'regs' to be the first member in the ctx (a.k.a. &param) */
*(struct pt_regs **)&param = regs;
param.syscall_nr = rec->nr;
for (i = 0; i < sys_data->nb_args; i++)
@@ -678,11 +660,12 @@ static int perf_call_bpf_exit(struct trace_event_call *call, struct pt_regs *reg
struct syscall_trace_exit *rec)
{
struct syscall_tp_t {
- unsigned long long regs;
- unsigned long syscall_nr;
+ struct trace_entry ent;
+ int syscall_nr;
unsigned long ret;
- } param;
+ } __aligned(8) param;
+ /* bpf prog requires 'regs' to be the first member in the ctx (a.k.a. &param) */
*(struct pt_regs **)&param = regs;
param.syscall_nr = rec->nr;
param.ret = rec->ret;
diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c
index fdd47f99b18f..a84b85d8aac1 100644
--- a/kernel/trace/trace_uprobe.c
+++ b/kernel/trace/trace_uprobe.c
@@ -7,6 +7,7 @@
*/
#define pr_fmt(fmt) "trace_uprobe: " fmt
+#include <linux/bpf-cgroup.h>
#include <linux/security.h>
#include <linux/ctype.h>
#include <linux/module.h>
@@ -15,6 +16,7 @@
#include <linux/namei.h>
#include <linux/string.h>
#include <linux/rculist.h>
+#include <linux/filter.h>
#include "trace_dynevent.h"
#include "trace_probe.h"
@@ -34,7 +36,7 @@ struct uprobe_trace_entry_head {
#define DATAOF_TRACE_ENTRY(entry, is_return) \
((void*)(entry) + SIZEOF_TRACE_ENTRY(is_return))
-static int trace_uprobe_create(int argc, const char **argv);
+static int trace_uprobe_create(const char *raw_command);
static int trace_uprobe_show(struct seq_file *m, struct dyn_event *ev);
static int trace_uprobe_release(struct dyn_event *ev);
static bool trace_uprobe_is_busy(struct dyn_event *ev);
@@ -83,18 +85,9 @@ static struct trace_uprobe *to_trace_uprobe(struct dyn_event *ev)
for_each_dyn_event(dpos) \
if (is_trace_uprobe(dpos) && (pos = to_trace_uprobe(dpos)))
-#define SIZEOF_TRACE_UPROBE(n) \
- (offsetof(struct trace_uprobe, tp.args) + \
- (sizeof(struct probe_arg) * (n)))
-
static int register_uprobe_event(struct trace_uprobe *tu);
static int unregister_uprobe_event(struct trace_uprobe *tu);
-struct uprobe_dispatch_data {
- struct trace_uprobe *tu;
- unsigned long bp_addr;
-};
-
static int uprobe_dispatcher(struct uprobe_consumer *con, struct pt_regs *regs);
static int uretprobe_dispatcher(struct uprobe_consumer *con,
unsigned long func, struct pt_regs *regs);
@@ -158,7 +151,7 @@ fetch_store_string(unsigned long addr, void *dest, void *base)
return -ENOMEM;
if (addr == FETCH_TOKEN_COMM)
- ret = strlcpy(dst, current->comm, maxlen);
+ ret = strscpy(dst, current->comm, maxlen);
else
ret = strncpy_from_user(dst, src, maxlen);
if (ret >= 0) {
@@ -172,7 +165,8 @@ fetch_store_string(unsigned long addr, void *dest, void *base)
*/
ret++;
*(u32 *)dest = make_data_loc(ret, (void *)dst - base);
- }
+ } else
+ *(u32 *)dest = make_data_loc(0, (void *)dst - base);
return ret;
}
@@ -217,10 +211,12 @@ static unsigned long translate_user_vaddr(unsigned long file_offset)
/* Note that we don't verify it, since the code does not come from user space */
static int
-process_fetch_insn(struct fetch_insn *code, struct pt_regs *regs, void *dest,
+process_fetch_insn(struct fetch_insn *code, void *rec, void *dest,
void *base)
{
+ struct pt_regs *regs = rec;
unsigned long val;
+ int ret;
/* 1st stage: get value from context */
switch (code->op) {
@@ -236,20 +232,16 @@ process_fetch_insn(struct fetch_insn *code, struct pt_regs *regs, void *dest,
case FETCH_OP_RETVAL:
val = regs_return_value(regs);
break;
- case FETCH_OP_IMM:
- val = code->immediate;
- break;
case FETCH_OP_COMM:
val = FETCH_TOKEN_COMM;
break;
- case FETCH_OP_DATA:
- val = (unsigned long)code->data;
- break;
case FETCH_OP_FOFFS:
val = translate_user_vaddr(code->immediate);
break;
default:
- return -EILSEQ;
+ ret = process_common_fetch_insn(code, &val);
+ if (ret < 0)
+ return ret;
}
code++;
@@ -314,7 +306,8 @@ static bool trace_uprobe_match(const char *system, const char *event,
{
struct trace_uprobe *tu = to_trace_uprobe(ev);
- return strcmp(trace_probe_name(&tu->tp), event) == 0 &&
+ return (event[0] == '\0' ||
+ strcmp(trace_probe_name(&tu->tp), event) == 0) &&
(!system || strcmp(trace_probe_group_name(&tu->tp), system) == 0) &&
trace_uprobe_match_command_head(tu, argc, argv);
}
@@ -340,7 +333,7 @@ alloc_trace_uprobe(const char *group, const char *event, int nargs, bool is_ret)
struct trace_uprobe *tu;
int ret;
- tu = kzalloc(SIZEOF_TRACE_UPROBE(nargs), GFP_KERNEL);
+ tu = kzalloc(struct_size(tu, tp.args, nargs), GFP_KERNEL);
if (!tu)
return ERR_PTR(-ENOMEM);
@@ -393,6 +386,10 @@ static int unregister_trace_uprobe(struct trace_uprobe *tu)
if (trace_probe_has_sibling(&tu->tp))
goto unreg;
+ /* If there's a reference to the dynamic event */
+ if (trace_event_dyn_busy(trace_probe_event_call(&tu->tp)))
+ return -EBUSY;
+
ret = unregister_uprobe_event(tu);
if (ret)
return ret;
@@ -408,12 +405,10 @@ static bool trace_uprobe_has_same_uprobe(struct trace_uprobe *orig,
struct trace_uprobe *comp)
{
struct trace_probe_event *tpe = orig->tp.event;
- struct trace_probe *pos;
struct inode *comp_inode = d_real_inode(comp->path.dentry);
int i;
- list_for_each_entry(pos, &tpe->probes, list) {
- orig = container_of(pos, struct trace_uprobe, tp);
+ list_for_each_entry(orig, &tpe->probes, tp.list) {
if (comp_inode != d_real_inode(orig->path.dentry) ||
comp->offset != orig->offset)
continue;
@@ -455,7 +450,7 @@ static int append_trace_uprobe(struct trace_uprobe *tu, struct trace_uprobe *to)
/* Append to existing event */
ret = trace_probe_append(&tu->tp, &to->tp);
if (!ret)
- dyn_event_add(&tu->devent);
+ dyn_event_add(&tu->devent, trace_probe_event_call(&tu->tp));
return ret;
}
@@ -514,11 +509,15 @@ static int register_trace_uprobe(struct trace_uprobe *tu)
ret = register_uprobe_event(tu);
if (ret) {
- pr_warn("Failed to register probe event(%d)\n", ret);
+ if (ret == -EEXIST) {
+ trace_probe_log_set_index(0);
+ trace_probe_log_err(0, EVENT_EXIST);
+ } else
+ pr_warn("Failed to register probe event(%d)\n", ret);
goto end;
}
- dyn_event_add(&tu->devent);
+ dyn_event_add(&tu->devent, trace_probe_event_call(&tu->tp));
end:
mutex_unlock(&event_mutex);
@@ -528,20 +527,21 @@ end:
/*
* Argument syntax:
- * - Add uprobe: p|r[:[GRP/]EVENT] PATH:OFFSET [FETCHARGS]
+ * - Add uprobe: p|r[:[GRP/][EVENT]] PATH:OFFSET[%return][(REF)] [FETCHARGS]
*/
-static int trace_uprobe_create(int argc, const char **argv)
+static int __trace_uprobe_create(int argc, const char **argv)
{
struct trace_uprobe *tu;
const char *event = NULL, *group = UPROBE_EVENT_SYSTEM;
char *arg, *filename, *rctr, *rctr_end, *tmp;
char buf[MAX_EVENT_NAME_LEN];
+ char gbuf[MAX_EVENT_NAME_LEN];
+ enum probe_print_type ptype;
struct path path;
unsigned long offset, ref_ctr_offset;
bool is_return = false;
int i, ret;
- ret = 0;
ref_ctr_offset = 0;
switch (argv[0][0]) {
@@ -617,6 +617,19 @@ static int trace_uprobe_create(int argc, const char **argv)
}
}
+ /* Check if there is %return suffix */
+ tmp = strchr(arg, '%');
+ if (tmp) {
+ if (!strcmp(tmp, "%return")) {
+ *tmp = '\0';
+ is_return = true;
+ } else {
+ trace_probe_log_err(tmp - filename, BAD_ADDR_SUFFIX);
+ ret = -EINVAL;
+ goto fail_address_parse;
+ }
+ }
+
/* Parse uprobe offset. */
ret = kstrtoul(arg, 0, &offset);
if (ret) {
@@ -627,11 +640,13 @@ static int trace_uprobe_create(int argc, const char **argv)
/* setup a probe */
trace_probe_log_set_index(0);
if (event) {
- ret = traceprobe_parse_event_name(&event, &group, buf,
+ ret = traceprobe_parse_event_name(&event, &group, gbuf,
event - argv[0]);
if (ret)
goto fail_address_parse;
- } else {
+ }
+
+ if (!event) {
char *tail;
char *ptr;
@@ -667,21 +682,19 @@ static int trace_uprobe_create(int argc, const char **argv)
/* parse arguments */
for (i = 0; i < argc && i < MAX_TRACE_ARGS; i++) {
- tmp = kstrdup(argv[i], GFP_KERNEL);
- if (!tmp) {
- ret = -ENOMEM;
- goto error;
- }
+ struct traceprobe_parse_context ctx = {
+ .flags = (is_return ? TPARG_FL_RETURN : 0) | TPARG_FL_USER,
+ };
trace_probe_log_set_index(i + 2);
- ret = traceprobe_parse_probe_arg(&tu->tp, i, tmp,
- is_return ? TPARG_FL_RETURN : 0);
- kfree(tmp);
+ ret = traceprobe_parse_probe_arg(&tu->tp, i, argv[i], &ctx);
+ traceprobe_finish_parse(&ctx);
if (ret)
goto error;
}
- ret = traceprobe_set_print_fmt(&tu->tp, is_ret_probe(tu));
+ ptype = is_ret_probe(tu) ? PROBE_PRINT_RETURN : PROBE_PRINT_NORMAL;
+ ret = traceprobe_set_print_fmt(&tu->tp, ptype);
if (ret < 0)
goto error;
@@ -703,14 +716,19 @@ fail_address_parse:
return ret;
}
-static int create_or_delete_trace_uprobe(int argc, char **argv)
+int trace_uprobe_create(const char *raw_command)
+{
+ return trace_probe_create(raw_command, __trace_uprobe_create);
+}
+
+static int create_or_delete_trace_uprobe(const char *raw_command)
{
int ret;
- if (argv[0][0] == '-')
- return dyn_event_release(argc, argv, &trace_uprobe_ops);
+ if (raw_command[0] == '-')
+ return dyn_event_release(raw_command, &trace_uprobe_ops);
- ret = trace_uprobe_create(argc, (const char **)argv);
+ ret = trace_uprobe_create(raw_command);
return ret == -ECANCELED ? -EINVAL : ret;
}
@@ -931,8 +949,7 @@ static void __uprobe_trace_func(struct trace_uprobe *tu,
struct trace_event_file *trace_file)
{
struct uprobe_trace_entry_head *entry;
- struct trace_buffer *buffer;
- struct ring_buffer_event *event;
+ struct trace_event_buffer fbuffer;
void *data;
int size, esize;
struct trace_event_call *call = trace_probe_event_call(&tu->tp);
@@ -947,12 +964,10 @@ static void __uprobe_trace_func(struct trace_uprobe *tu,
esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
size = esize + tu->tp.size + dsize;
- event = trace_event_buffer_lock_reserve(&buffer, trace_file,
- call->event.type, size, 0, 0);
- if (!event)
+ entry = trace_event_buffer_reserve(&fbuffer, trace_file, size);
+ if (!entry)
return;
- entry = ring_buffer_event_data(event);
if (is_ret_probe(tu)) {
entry->vaddr[0] = func;
entry->vaddr[1] = instruction_pointer(regs);
@@ -964,7 +979,7 @@ static void __uprobe_trace_func(struct trace_uprobe *tu,
memcpy(data, ucb->buf, tu->tp.size + dsize);
- event_trigger_unlock_commit(trace_file, buffer, event, entry, 0, 0);
+ trace_event_buffer_commit(&fbuffer);
}
/* uprobe handler */
@@ -1023,7 +1038,7 @@ print_uprobe_event(struct trace_iterator *iter, int flags, struct trace_event *e
data = DATAOF_TRACE_ENTRY(entry, false);
}
- if (print_probe_args(s, tu->tp.args, tu->tp.nr_args, data, entry) < 0)
+ if (trace_probe_print_args(s, tu->tp.args, tu->tp.nr_args, data, entry) < 0)
goto out;
trace_seq_putc(s, '\n');
@@ -1057,14 +1072,12 @@ static int trace_uprobe_enable(struct trace_uprobe *tu, filter_func_t filter)
static void __probe_event_disable(struct trace_probe *tp)
{
- struct trace_probe *pos;
struct trace_uprobe *tu;
tu = container_of(tp, struct trace_uprobe, tp);
WARN_ON(!uprobe_filter_is_empty(tu->tp.event->filter));
- list_for_each_entry(pos, trace_probe_probe_list(tp), list) {
- tu = container_of(pos, struct trace_uprobe, tp);
+ list_for_each_entry(tu, trace_probe_probe_list(tp), tp.list) {
if (!tu->inode)
continue;
@@ -1076,7 +1089,7 @@ static void __probe_event_disable(struct trace_probe *tp)
static int probe_event_enable(struct trace_event_call *call,
struct trace_event_file *file, filter_func_t filter)
{
- struct trace_probe *pos, *tp;
+ struct trace_probe *tp;
struct trace_uprobe *tu;
bool enabled;
int ret;
@@ -1111,8 +1124,7 @@ static int probe_event_enable(struct trace_event_call *call,
if (ret)
goto err_flags;
- list_for_each_entry(pos, trace_probe_probe_list(tp), list) {
- tu = container_of(pos, struct trace_uprobe, tp);
+ list_for_each_entry(tu, trace_probe_probe_list(tp), tp.list) {
ret = trace_uprobe_enable(tu, filter);
if (ret) {
__probe_event_disable(tp);
@@ -1257,7 +1269,7 @@ static bool trace_uprobe_filter_add(struct trace_uprobe_filter *filter,
static int uprobe_perf_close(struct trace_event_call *call,
struct perf_event *event)
{
- struct trace_probe *pos, *tp;
+ struct trace_probe *tp;
struct trace_uprobe *tu;
int ret = 0;
@@ -1269,8 +1281,7 @@ static int uprobe_perf_close(struct trace_event_call *call,
if (trace_uprobe_filter_remove(tu->tp.event->filter, event))
return 0;
- list_for_each_entry(pos, trace_probe_probe_list(tp), list) {
- tu = container_of(pos, struct trace_uprobe, tp);
+ list_for_each_entry(tu, trace_probe_probe_list(tp), tp.list) {
ret = uprobe_apply(tu->inode, tu->offset, &tu->consumer, false);
if (ret)
break;
@@ -1282,7 +1293,7 @@ static int uprobe_perf_close(struct trace_event_call *call,
static int uprobe_perf_open(struct trace_event_call *call,
struct perf_event *event)
{
- struct trace_probe *pos, *tp;
+ struct trace_probe *tp;
struct trace_uprobe *tu;
int err = 0;
@@ -1294,7 +1305,7 @@ static int uprobe_perf_open(struct trace_event_call *call,
if (trace_uprobe_filter_add(tu->tp.event->filter, event))
return 0;
- list_for_each_entry(pos, trace_probe_probe_list(tp), list) {
+ list_for_each_entry(tu, trace_probe_probe_list(tp), tp.list) {
err = uprobe_apply(tu->inode, tu->offset, &tu->consumer, true);
if (err) {
uprobe_perf_close(call, event);
@@ -1333,15 +1344,15 @@ static void __uprobe_perf_func(struct trace_uprobe *tu,
int size, esize;
int rctx;
+#ifdef CONFIG_BPF_EVENTS
if (bpf_prog_array_valid(call)) {
u32 ret;
- preempt_disable();
- ret = trace_call_bpf(call, regs);
- preempt_enable();
+ ret = bpf_prog_run_array_uprobe(call->prog_array, regs, bpf_prog_run);
if (!ret)
return;
}
+#endif /* CONFIG_BPF_EVENTS */
esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
@@ -1403,7 +1414,7 @@ static void uretprobe_perf_func(struct trace_uprobe *tu, unsigned long func,
int bpf_get_uprobe_info(const struct perf_event *event, u32 *fd_type,
const char **filename, u64 *probe_offset,
- bool perf_type_tracepoint)
+ u64 *probe_addr, bool perf_type_tracepoint)
{
const char *pevent = trace_event_name(event->tp_event);
const char *group = event->tp_event->class->system;
@@ -1420,6 +1431,7 @@ int bpf_get_uprobe_info(const struct perf_event *event, u32 *fd_type,
: BPF_FD_TYPE_UPROBE;
*filename = tu->filename;
*probe_offset = tu->offset;
+ *probe_addr = 0;
return 0;
}
#endif /* CONFIG_PERF_EVENTS */
@@ -1456,7 +1468,6 @@ trace_uprobe_register(struct trace_event_call *event, enum trace_reg type,
default:
return 0;
}
- return 0;
}
static int uprobe_dispatcher(struct uprobe_consumer *con, struct pt_regs *regs)
@@ -1568,6 +1579,7 @@ struct trace_event_call *
create_local_trace_uprobe(char *name, unsigned long offs,
unsigned long ref_ctr_offset, bool is_return)
{
+ enum probe_print_type ptype;
struct trace_uprobe *tu;
struct path path;
int ret;
@@ -1600,9 +1612,15 @@ create_local_trace_uprobe(char *name, unsigned long offs,
tu->path = path;
tu->ref_ctr_offset = ref_ctr_offset;
tu->filename = kstrdup(name, GFP_KERNEL);
+ if (!tu->filename) {
+ ret = -ENOMEM;
+ goto error;
+ }
+
init_trace_event_call(tu);
- if (traceprobe_set_print_fmt(&tu->tp, is_ret_probe(tu)) < 0) {
+ ptype = is_ret_probe(tu) ? PROBE_PRINT_RETURN : PROBE_PRINT_NORMAL;
+ if (traceprobe_set_print_fmt(&tu->tp, ptype) < 0) {
ret = -ENOMEM;
goto error;
}
@@ -1623,24 +1641,23 @@ void destroy_local_trace_uprobe(struct trace_event_call *event_call)
}
#endif /* CONFIG_PERF_EVENTS */
-/* Make a trace interface for controling probe points */
+/* Make a trace interface for controlling probe points */
static __init int init_uprobe_trace(void)
{
- struct dentry *d_tracer;
int ret;
ret = dyn_event_register(&trace_uprobe_ops);
if (ret)
return ret;
- d_tracer = tracing_init_dentry();
- if (IS_ERR(d_tracer))
+ ret = tracing_init_dentry();
+ if (ret)
return 0;
- trace_create_file("uprobe_events", 0644, d_tracer,
+ trace_create_file("uprobe_events", TRACE_MODE_WRITE, NULL,
NULL, &uprobe_events_ops);
/* Profile interface */
- trace_create_file("uprobe_profile", 0444, d_tracer,
+ trace_create_file("uprobe_profile", TRACE_MODE_READ, NULL,
NULL, &uprobe_profile_ops);
return 0;
}
diff --git a/kernel/trace/tracing_map.c b/kernel/trace/tracing_map.c
index 74738c9856f1..a4dcf0f24352 100644
--- a/kernel/trace/tracing_map.c
+++ b/kernel/trace/tracing_map.c
@@ -15,6 +15,7 @@
#include <linux/jhash.h>
#include <linux/slab.h>
#include <linux/sort.h>
+#include <linux/kmemleak.h>
#include "tracing_map.h"
#include "trace.h"
@@ -260,7 +261,7 @@ int tracing_map_add_var(struct tracing_map *map)
* to use cmp_fn.
*
* A key can be a subset of a compound key; for that purpose, the
- * offset param is used to describe where within the the compound key
+ * offset param is used to describe where within the compound key
* the key referenced by this key field resides.
*
* Return: The index identifying the field in the map and associated
@@ -307,6 +308,7 @@ static void tracing_map_array_free(struct tracing_map_array *a)
for (i = 0; i < a->n_pages; i++) {
if (!a->pages[i])
break;
+ kmemleak_free(a->pages[i]);
free_page((unsigned long)a->pages[i]);
}
@@ -342,6 +344,7 @@ static struct tracing_map_array *tracing_map_array_alloc(unsigned int n_elts,
a->pages[i] = (void *)get_zeroed_page(GFP_KERNEL);
if (!a->pages[i])
goto free;
+ kmemleak_alloc(a->pages[i], PAGE_SIZE, 1, GFP_KERNEL);
}
out:
return a;
@@ -571,7 +574,12 @@ __tracing_map_insert(struct tracing_map *map, void *key, bool lookup_only)
}
memcpy(elt->key, key, map->key_size);
- entry->val = elt;
+ /*
+ * Ensure the initialization is visible and
+ * publish the elt.
+ */
+ smp_wmb();
+ WRITE_ONCE(entry->val, elt);
atomic64_inc(&map->hits);
return entry->val;
@@ -609,7 +617,7 @@ __tracing_map_insert(struct tracing_map *map, void *key, bool lookup_only)
* signal that state. There are two user-visible tracing_map
* variables, 'hits' and 'drops', which are updated by this function.
* Every time an element is either successfully inserted or retrieved,
- * the 'hits' value is incrememented. Every time an element insertion
+ * the 'hits' value is incremented. Every time an element insertion
* fails, the 'drops' value is incremented.
*
* This is a lock-free tracing map insertion function implementing a
@@ -642,9 +650,9 @@ struct tracing_map_elt *tracing_map_insert(struct tracing_map *map, void *key)
* tracing_map_elt. This is a lock-free lookup; see
* tracing_map_insert() for details on tracing_map and how it works.
* Every time an element is retrieved, the 'hits' value is
- * incrememented. There is one user-visible tracing_map variable,
+ * incremented. There is one user-visible tracing_map variable,
* 'hits', which is updated by this function. Every time an element
- * is successfully retrieved, the 'hits' value is incrememented. The
+ * is successfully retrieved, the 'hits' value is incremented. The
* 'drops' value is never updated by this function.
*
* Return: the tracing_map_elt pointer val associated with the key.
@@ -834,29 +842,35 @@ int tracing_map_init(struct tracing_map *map)
return err;
}
-static int cmp_entries_dup(const struct tracing_map_sort_entry **a,
- const struct tracing_map_sort_entry **b)
+static int cmp_entries_dup(const void *A, const void *B)
{
+ const struct tracing_map_sort_entry *a, *b;
int ret = 0;
- if (memcmp((*a)->key, (*b)->key, (*a)->elt->map->key_size))
+ a = *(const struct tracing_map_sort_entry **)A;
+ b = *(const struct tracing_map_sort_entry **)B;
+
+ if (memcmp(a->key, b->key, a->elt->map->key_size))
ret = 1;
return ret;
}
-static int cmp_entries_sum(const struct tracing_map_sort_entry **a,
- const struct tracing_map_sort_entry **b)
+static int cmp_entries_sum(const void *A, const void *B)
{
const struct tracing_map_elt *elt_a, *elt_b;
+ const struct tracing_map_sort_entry *a, *b;
struct tracing_map_sort_key *sort_key;
struct tracing_map_field *field;
tracing_map_cmp_fn_t cmp_fn;
void *val_a, *val_b;
int ret = 0;
- elt_a = (*a)->elt;
- elt_b = (*b)->elt;
+ a = *(const struct tracing_map_sort_entry **)A;
+ b = *(const struct tracing_map_sort_entry **)B;
+
+ elt_a = a->elt;
+ elt_b = b->elt;
sort_key = &elt_a->map->sort_key;
@@ -873,18 +887,21 @@ static int cmp_entries_sum(const struct tracing_map_sort_entry **a,
return ret;
}
-static int cmp_entries_key(const struct tracing_map_sort_entry **a,
- const struct tracing_map_sort_entry **b)
+static int cmp_entries_key(const void *A, const void *B)
{
const struct tracing_map_elt *elt_a, *elt_b;
+ const struct tracing_map_sort_entry *a, *b;
struct tracing_map_sort_key *sort_key;
struct tracing_map_field *field;
tracing_map_cmp_fn_t cmp_fn;
void *val_a, *val_b;
int ret = 0;
- elt_a = (*a)->elt;
- elt_b = (*b)->elt;
+ a = *(const struct tracing_map_sort_entry **)A;
+ b = *(const struct tracing_map_sort_entry **)B;
+
+ elt_a = a->elt;
+ elt_b = b->elt;
sort_key = &elt_a->map->sort_key;
@@ -949,7 +966,7 @@ create_sort_entry(void *key, struct tracing_map_elt *elt)
static void detect_dups(struct tracing_map_sort_entry **sort_entries,
int n_entries, unsigned int key_size)
{
- unsigned int dups = 0, total_dups = 0;
+ unsigned int total_dups = 0;
int i;
void *key;
@@ -962,11 +979,10 @@ static void detect_dups(struct tracing_map_sort_entry **sort_entries,
key = sort_entries[0]->key;
for (i = 1; i < n_entries; i++) {
if (!memcmp(sort_entries[i]->key, key, key_size)) {
- dups++; total_dups++;
+ total_dups++;
continue;
}
key = sort_entries[i]->key;
- dups = 0;
}
WARN_ONCE(total_dups > 0,
@@ -989,10 +1005,8 @@ static void sort_secondary(struct tracing_map *map,
struct tracing_map_sort_key *primary_key,
struct tracing_map_sort_key *secondary_key)
{
- int (*primary_fn)(const struct tracing_map_sort_entry **,
- const struct tracing_map_sort_entry **);
- int (*secondary_fn)(const struct tracing_map_sort_entry **,
- const struct tracing_map_sort_entry **);
+ int (*primary_fn)(const void *, const void *);
+ int (*secondary_fn)(const void *, const void *);
unsigned i, start = 0, n_sub = 1;
if (is_key(map, primary_key->field_idx))
@@ -1035,7 +1049,8 @@ static void sort_secondary(struct tracing_map *map,
/**
* tracing_map_sort_entries - Sort the current set of tracing_map_elts in a map
* @map: The tracing_map
- * @sort_key: The sort key to use for sorting
+ * @sort_keys: The sort key to use for sorting
+ * @n_sort_keys: hitcount, always have at least one
* @sort_entries: outval: pointer to allocated and sorted array of entries
*
* tracing_map_sort_entries() sorts the current set of entries in the
@@ -1061,8 +1076,7 @@ int tracing_map_sort_entries(struct tracing_map *map,
unsigned int n_sort_keys,
struct tracing_map_sort_entry ***sort_entries)
{
- int (*cmp_entries_fn)(const struct tracing_map_sort_entry **,
- const struct tracing_map_sort_entry **);
+ int (*cmp_entries_fn)(const void *, const void *);
struct tracing_map_sort_entry *sort_entry, **entries;
int i, n_entries, ret;
diff --git a/kernel/trace/tracing_map.h b/kernel/trace/tracing_map.h
index a6de61fc22de..99c37eeebc16 100644
--- a/kernel/trace/tracing_map.h
+++ b/kernel/trace/tracing_map.h
@@ -50,7 +50,7 @@ typedef int (*tracing_map_cmp_fn_t) (void *val_a, void *val_b);
* an instance of tracing_map_elt, where 'elt' in the latter part of
* that variable name is short for 'element'. The purpose of a
* tracing_map_elt is to hold values specific to the particular
- * 32-bit hashed key it's assocated with. Things such as the unique
+ * 32-bit hashed key it's associated with. Things such as the unique
* set of aggregated sums associated with the 32-bit hashed key, along
* with a copy of the full key associated with the entry, and which
* was used to produce the 32-bit hashed key.
@@ -272,10 +272,6 @@ extern u64 tracing_map_read_sum(struct tracing_map_elt *elt, unsigned int i);
extern u64 tracing_map_read_var(struct tracing_map_elt *elt, unsigned int i);
extern u64 tracing_map_read_var_once(struct tracing_map_elt *elt, unsigned int i);
-extern void tracing_map_set_field_descr(struct tracing_map *map,
- unsigned int i,
- unsigned int key_offset,
- tracing_map_cmp_fn_t cmp_fn);
extern int
tracing_map_sort_entries(struct tracing_map *map,
struct tracing_map_sort_key *sort_keys,