summaryrefslogtreecommitdiff
path: root/kernel/trace/trace_clock.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/trace/trace_clock.c')
-rw-r--r--kernel/trace/trace_clock.c29
1 files changed, 25 insertions, 4 deletions
diff --git a/kernel/trace/trace_clock.c b/kernel/trace/trace_clock.c
index 685a67d55db0..26dc348332b7 100644
--- a/kernel/trace/trace_clock.c
+++ b/kernel/trace/trace_clock.c
@@ -21,8 +21,6 @@
#include <linux/ktime.h>
#include <linux/trace_clock.h>
-#include "trace.h"
-
/*
* trace_clock_local(): the simplest and least coherent tracing clock.
*
@@ -44,9 +42,10 @@ u64 notrace trace_clock_local(void)
return clock;
}
+EXPORT_SYMBOL_GPL(trace_clock_local);
/*
- * trace_clock(): 'inbetween' trace clock. Not completely serialized,
+ * trace_clock(): 'between' trace clock. Not completely serialized,
* but not completely incorrect when crossing CPUs either.
*
* This is based on cpu_clock(), which will allow at most ~1 jiffy of
@@ -58,6 +57,16 @@ u64 notrace trace_clock(void)
return local_clock();
}
+/*
+ * trace_jiffy_clock(): Simply use jiffies as a clock counter.
+ */
+u64 notrace trace_clock_jiffies(void)
+{
+ u64 jiffy = jiffies - INITIAL_JIFFIES;
+
+ /* Return nsecs */
+ return (u64)jiffies_to_usecs(jiffy) * 1000ULL;
+}
/*
* trace_clock_global(): special globally coherent trace clock
@@ -86,7 +95,7 @@ u64 notrace trace_clock_global(void)
local_irq_save(flags);
this_cpu = raw_smp_processor_id();
- now = cpu_clock(this_cpu);
+ now = sched_clock_cpu(this_cpu);
/*
* If in an NMI context then dont risk lockups and return the
* cpu_clock() time:
@@ -113,3 +122,15 @@ u64 notrace trace_clock_global(void)
return now;
}
+
+static atomic64_t trace_counter;
+
+/*
+ * trace_clock_counter(): simply an atomic counter.
+ * Use the trace_counter "counter" for cases where you do not care
+ * about timings, but are interested in strict ordering.
+ */
+u64 notrace trace_clock_counter(void)
+{
+ return atomic64_add_return(1, &trace_counter);
+}