summaryrefslogtreecommitdiff
path: root/kernel/trace/trace_events.c
diff options
context:
space:
mode:
authorSteven Rostedt (VMware) <rostedt@goodmis.org>2018-12-04 13:35:45 -0500
committerSteven Rostedt (VMware) <rostedt@goodmis.org>2018-12-10 12:22:10 -0500
commit7e1413edd6194a9807aa5f3ac0378b9b4b9da879 (patch)
tree8357aadd8a15de615bf2fbb39cbd412a9697d165 /kernel/trace/trace_events.c
parent0e2b81f7b52a1c1a8c46986f9ca01eb7b3c421f8 (diff)
tracing: Consolidate trace_add/remove_event_call back to the nolock functions
The trace_add/remove_event_call_nolock() functions were added to allow the tace_add/remove_event_call() code be called when the event_mutex lock was already taken. Now that all callers are done within the event_mutex, there's no reason to have two different interfaces. Remove the current wrapper trace_add/remove_event_call()s and rename the _nolock versions back to the original names. Link: http://lkml.kernel.org/r/154140866955.17322.2081425494660638846.stgit@devbox Acked-by: Masami Hiramatsu <mhiramat@kernel.org> Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
Diffstat (limited to 'kernel/trace/trace_events.c')
-rw-r--r--kernel/trace/trace_events.c30
1 files changed, 4 insertions, 26 deletions
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index a3b157f689ee..bd0162c0467c 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -2305,7 +2305,8 @@ __trace_early_add_new_event(struct trace_event_call *call,
struct ftrace_module_file_ops;
static void __add_event_to_tracers(struct trace_event_call *call);
-int trace_add_event_call_nolock(struct trace_event_call *call)
+/* Add an additional event_call dynamically */
+int trace_add_event_call(struct trace_event_call *call)
{
int ret;
lockdep_assert_held(&event_mutex);
@@ -2320,17 +2321,6 @@ int trace_add_event_call_nolock(struct trace_event_call *call)
return ret;
}
-/* Add an additional event_call dynamically */
-int trace_add_event_call(struct trace_event_call *call)
-{
- int ret;
-
- mutex_lock(&event_mutex);
- ret = trace_add_event_call_nolock(call);
- mutex_unlock(&event_mutex);
- return ret;
-}
-
/*
* Must be called under locking of trace_types_lock, event_mutex and
* trace_event_sem.
@@ -2376,8 +2366,8 @@ static int probe_remove_event_call(struct trace_event_call *call)
return 0;
}
-/* no event_mutex version */
-int trace_remove_event_call_nolock(struct trace_event_call *call)
+/* Remove an event_call */
+int trace_remove_event_call(struct trace_event_call *call)
{
int ret;
@@ -2392,18 +2382,6 @@ int trace_remove_event_call_nolock(struct trace_event_call *call)
return ret;
}
-/* Remove an event_call */
-int trace_remove_event_call(struct trace_event_call *call)
-{
- int ret;
-
- mutex_lock(&event_mutex);
- ret = trace_remove_event_call_nolock(call);
- mutex_unlock(&event_mutex);
-
- return ret;
-}
-
#define for_each_event(event, start, end) \
for (event = start; \
(unsigned long)event < (unsigned long)end; \