summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2016-12-21 20:32:01 +0100
committerThomas Gleixner <tglx@linutronix.de>2016-12-25 11:04:12 +0100
commita5a1d1c2914b5316924c7893eb683a5420ebd3be (patch)
tree9078b8a179031e7e8b320e1c69f182cc285e7b5d /kernel
parent7c0f6ba682b9c7632072ffbedf8d328c8f3c42ba (diff)
clocksource: Use a plain u64 instead of cycle_t
There is no point in having an extra type for extra confusion. u64 is unambiguous. Conversion was done with the following coccinelle script: @rem@ @@ -typedef u64 cycle_t; @fix@ typedef cycle_t; @@ -cycle_t +u64 Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Cc: Peter Zijlstra <peterz@infradead.org> Cc: John Stultz <john.stultz@linaro.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/time/clocksource.c2
-rw-r--r--kernel/time/jiffies.c4
-rw-r--r--kernel/time/timecounter.c6
-rw-r--r--kernel/time/timekeeping.c57
-rw-r--r--kernel/time/timekeeping_internal.h6
-rw-r--r--kernel/trace/ftrace.c4
-rw-r--r--kernel/trace/trace.c6
-rw-r--r--kernel/trace/trace.h8
-rw-r--r--kernel/trace/trace_irqsoff.c4
-rw-r--r--kernel/trace/trace_sched_wakeup.c4
10 files changed, 49 insertions, 52 deletions
diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c
index 150242ccfcd2..665985b0a89a 100644
--- a/kernel/time/clocksource.c
+++ b/kernel/time/clocksource.c
@@ -170,7 +170,7 @@ void clocksource_mark_unstable(struct clocksource *cs)
static void clocksource_watchdog(unsigned long data)
{
struct clocksource *cs;
- cycle_t csnow, wdnow, cslast, wdlast, delta;
+ u64 csnow, wdnow, cslast, wdlast, delta;
int64_t wd_nsec, cs_nsec;
int next_cpu, reset_pending;
diff --git a/kernel/time/jiffies.c b/kernel/time/jiffies.c
index 555e21f7b966..a4a0e478e44d 100644
--- a/kernel/time/jiffies.c
+++ b/kernel/time/jiffies.c
@@ -59,9 +59,9 @@
#define JIFFIES_SHIFT 8
#endif
-static cycle_t jiffies_read(struct clocksource *cs)
+static u64 jiffies_read(struct clocksource *cs)
{
- return (cycle_t) jiffies;
+ return (u64) jiffies;
}
static struct clocksource clocksource_jiffies = {
diff --git a/kernel/time/timecounter.c b/kernel/time/timecounter.c
index 4687b3104bae..8afd78932bdf 100644
--- a/kernel/time/timecounter.c
+++ b/kernel/time/timecounter.c
@@ -43,7 +43,7 @@ EXPORT_SYMBOL_GPL(timecounter_init);
*/
static u64 timecounter_read_delta(struct timecounter *tc)
{
- cycle_t cycle_now, cycle_delta;
+ u64 cycle_now, cycle_delta;
u64 ns_offset;
/* read cycle counter: */
@@ -80,7 +80,7 @@ EXPORT_SYMBOL_GPL(timecounter_read);
* time previous to the time stored in the cycle counter.
*/
static u64 cc_cyc2ns_backwards(const struct cyclecounter *cc,
- cycle_t cycles, u64 mask, u64 frac)
+ u64 cycles, u64 mask, u64 frac)
{
u64 ns = (u64) cycles;
@@ -90,7 +90,7 @@ static u64 cc_cyc2ns_backwards(const struct cyclecounter *cc,
}
u64 timecounter_cyc2time(struct timecounter *tc,
- cycle_t cycle_tstamp)
+ u64 cycle_tstamp)
{
u64 delta = (cycle_tstamp - tc->cycle_last) & tc->cc->mask;
u64 nsec = tc->nsec, frac = tc->frac;
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index da233cdf89b0..f4152a69277f 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -119,10 +119,10 @@ static inline void tk_update_sleep_time(struct timekeeper *tk, ktime_t delta)
#ifdef CONFIG_DEBUG_TIMEKEEPING
#define WARNING_FREQ (HZ*300) /* 5 minute rate-limiting */
-static void timekeeping_check_update(struct timekeeper *tk, cycle_t offset)
+static void timekeeping_check_update(struct timekeeper *tk, u64 offset)
{
- cycle_t max_cycles = tk->tkr_mono.clock->max_cycles;
+ u64 max_cycles = tk->tkr_mono.clock->max_cycles;
const char *name = tk->tkr_mono.clock->name;
if (offset > max_cycles) {
@@ -158,10 +158,10 @@ static void timekeeping_check_update(struct timekeeper *tk, cycle_t offset)
}
}
-static inline cycle_t timekeeping_get_delta(struct tk_read_base *tkr)
+static inline u64 timekeeping_get_delta(struct tk_read_base *tkr)
{
struct timekeeper *tk = &tk_core.timekeeper;
- cycle_t now, last, mask, max, delta;
+ u64 now, last, mask, max, delta;
unsigned int seq;
/*
@@ -199,12 +199,12 @@ static inline cycle_t timekeeping_get_delta(struct tk_read_base *tkr)
return delta;
}
#else
-static inline void timekeeping_check_update(struct timekeeper *tk, cycle_t offset)
+static inline void timekeeping_check_update(struct timekeeper *tk, u64 offset)
{
}
-static inline cycle_t timekeeping_get_delta(struct tk_read_base *tkr)
+static inline u64 timekeeping_get_delta(struct tk_read_base *tkr)
{
- cycle_t cycle_now, delta;
+ u64 cycle_now, delta;
/* read clocksource */
cycle_now = tkr->read(tkr->clock);
@@ -229,7 +229,7 @@ static inline cycle_t timekeeping_get_delta(struct tk_read_base *tkr)
*/
static void tk_setup_internals(struct timekeeper *tk, struct clocksource *clock)
{
- cycle_t interval;
+ u64 interval;
u64 tmp, ntpinterval;
struct clocksource *old_clock;
@@ -254,7 +254,7 @@ static void tk_setup_internals(struct timekeeper *tk, struct clocksource *clock)
if (tmp == 0)
tmp = 1;
- interval = (cycle_t) tmp;
+ interval = (u64) tmp;
tk->cycle_interval = interval;
/* Go back from cycles -> shifted ns */
@@ -298,8 +298,7 @@ u32 (*arch_gettimeoffset)(void) = default_arch_gettimeoffset;
static inline u32 arch_gettimeoffset(void) { return 0; }
#endif
-static inline u64 timekeeping_delta_to_ns(struct tk_read_base *tkr,
- cycle_t delta)
+static inline u64 timekeeping_delta_to_ns(struct tk_read_base *tkr, u64 delta)
{
u64 nsec;
@@ -312,16 +311,15 @@ static inline u64 timekeeping_delta_to_ns(struct tk_read_base *tkr,
static inline u64 timekeeping_get_ns(struct tk_read_base *tkr)
{
- cycle_t delta;
+ u64 delta;
delta = timekeeping_get_delta(tkr);
return timekeeping_delta_to_ns(tkr, delta);
}
-static inline u64 timekeeping_cycles_to_ns(struct tk_read_base *tkr,
- cycle_t cycles)
+static inline u64 timekeeping_cycles_to_ns(struct tk_read_base *tkr, u64 cycles)
{
- cycle_t delta;
+ u64 delta;
/* calculate the delta since the last update_wall_time */
delta = clocksource_delta(cycles, tkr->cycle_last, tkr->mask);
@@ -454,9 +452,9 @@ u64 notrace ktime_get_boot_fast_ns(void)
EXPORT_SYMBOL_GPL(ktime_get_boot_fast_ns);
/* Suspend-time cycles value for halted fast timekeeper. */
-static cycle_t cycles_at_suspend;
+static u64 cycles_at_suspend;
-static cycle_t dummy_clock_read(struct clocksource *cs)
+static u64 dummy_clock_read(struct clocksource *cs)
{
return cycles_at_suspend;
}
@@ -650,7 +648,7 @@ static void timekeeping_update(struct timekeeper *tk, unsigned int action)
static void timekeeping_forward_now(struct timekeeper *tk)
{
struct clocksource *clock = tk->tkr_mono.clock;
- cycle_t cycle_now, delta;
+ u64 cycle_now, delta;
u64 nsec;
cycle_now = tk->tkr_mono.read(clock);
@@ -923,7 +921,7 @@ void ktime_get_snapshot(struct system_time_snapshot *systime_snapshot)
ktime_t base_real;
u64 nsec_raw;
u64 nsec_real;
- cycle_t now;
+ u64 now;
WARN_ON_ONCE(timekeeping_suspended);
@@ -982,8 +980,8 @@ static int scale64_check_overflow(u64 mult, u64 div, u64 *base)
* interval is partial_history_cycles.
*/
static int adjust_historical_crosststamp(struct system_time_snapshot *history,
- cycle_t partial_history_cycles,
- cycle_t total_history_cycles,
+ u64 partial_history_cycles,
+ u64 total_history_cycles,
bool discontinuity,
struct system_device_crosststamp *ts)
{
@@ -1047,7 +1045,7 @@ static int adjust_historical_crosststamp(struct system_time_snapshot *history,
/*
* cycle_between - true if test occurs chronologically between before and after
*/
-static bool cycle_between(cycle_t before, cycle_t test, cycle_t after)
+static bool cycle_between(u64 before, u64 test, u64 after)
{
if (test > before && test < after)
return true;
@@ -1077,7 +1075,7 @@ int get_device_system_crosststamp(int (*get_time_fn)
{
struct system_counterval_t system_counterval;
struct timekeeper *tk = &tk_core.timekeeper;
- cycle_t cycles, now, interval_start;
+ u64 cycles, now, interval_start;
unsigned int clock_was_set_seq = 0;
ktime_t base_real, base_raw;
u64 nsec_real, nsec_raw;
@@ -1138,7 +1136,7 @@ int get_device_system_crosststamp(int (*get_time_fn)
* current interval
*/
if (do_interp) {
- cycle_t partial_history_cycles, total_history_cycles;
+ u64 partial_history_cycles, total_history_cycles;
bool discontinuity;
/*
@@ -1644,7 +1642,7 @@ void timekeeping_resume(void)
struct clocksource *clock = tk->tkr_mono.clock;
unsigned long flags;
struct timespec64 ts_new, ts_delta;
- cycle_t cycle_now;
+ u64 cycle_now;
sleeptime_injected = false;
read_persistent_clock64(&ts_new);
@@ -2010,11 +2008,10 @@ static inline unsigned int accumulate_nsecs_to_secs(struct timekeeper *tk)
*
* Returns the unconsumed cycles.
*/
-static cycle_t logarithmic_accumulation(struct timekeeper *tk, cycle_t offset,
- u32 shift,
- unsigned int *clock_set)
+static u64 logarithmic_accumulation(struct timekeeper *tk, u64 offset,
+ u32 shift, unsigned int *clock_set)
{
- cycle_t interval = tk->cycle_interval << shift;
+ u64 interval = tk->cycle_interval << shift;
u64 raw_nsecs;
/* If the offset is smaller than a shifted interval, do nothing */
@@ -2055,7 +2052,7 @@ void update_wall_time(void)
{
struct timekeeper *real_tk = &tk_core.timekeeper;
struct timekeeper *tk = &shadow_timekeeper;
- cycle_t offset;
+ u64 offset;
int shift = 0, maxshift;
unsigned int clock_set = 0;
unsigned long flags;
diff --git a/kernel/time/timekeeping_internal.h b/kernel/time/timekeeping_internal.h
index 5be76270ec4a..9a18f121f399 100644
--- a/kernel/time/timekeeping_internal.h
+++ b/kernel/time/timekeeping_internal.h
@@ -13,9 +13,9 @@ extern void tk_debug_account_sleep_time(struct timespec64 *t);
#endif
#ifdef CONFIG_CLOCKSOURCE_VALIDATE_LAST_CYCLE
-static inline cycle_t clocksource_delta(cycle_t now, cycle_t last, cycle_t mask)
+static inline u64 clocksource_delta(u64 now, u64 last, u64 mask)
{
- cycle_t ret = (now - last) & mask;
+ u64 ret = (now - last) & mask;
/*
* Prevent time going backwards by checking the MSB of mask in
@@ -24,7 +24,7 @@ static inline cycle_t clocksource_delta(cycle_t now, cycle_t last, cycle_t mask)
return ret & ~(mask >> 1) ? 0 : ret;
}
#else
-static inline cycle_t clocksource_delta(cycle_t now, cycle_t last, cycle_t mask)
+static inline u64 clocksource_delta(u64 now, u64 last, u64 mask)
{
return (now - last) & mask;
}
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 1f0f547c54da..eb230f06ba41 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -2847,7 +2847,7 @@ static void ftrace_shutdown_sysctl(void)
}
}
-static cycle_t ftrace_update_time;
+static u64 ftrace_update_time;
unsigned long ftrace_update_tot_cnt;
static inline int ops_traces_mod(struct ftrace_ops *ops)
@@ -2894,7 +2894,7 @@ static int ftrace_update_code(struct module *mod, struct ftrace_page *new_pgs)
{
struct ftrace_page *pg;
struct dyn_ftrace *p;
- cycle_t start, stop;
+ u64 start, stop;
unsigned long update_cnt = 0;
unsigned long rec_flags = 0;
int i;
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 66f829c47bec..d7449783987a 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -236,7 +236,7 @@ static int __init set_tracepoint_printk(char *str)
}
__setup("tp_printk", set_tracepoint_printk);
-unsigned long long ns2usecs(cycle_t nsec)
+unsigned long long ns2usecs(u64 nsec)
{
nsec += 500;
do_div(nsec, 1000);
@@ -573,7 +573,7 @@ int trace_pid_write(struct trace_pid_list *filtered_pids,
return read;
}
-static cycle_t buffer_ftrace_now(struct trace_buffer *buf, int cpu)
+static u64 buffer_ftrace_now(struct trace_buffer *buf, int cpu)
{
u64 ts;
@@ -587,7 +587,7 @@ static cycle_t buffer_ftrace_now(struct trace_buffer *buf, int cpu)
return ts;
}
-cycle_t ftrace_now(int cpu)
+u64 ftrace_now(int cpu)
{
return buffer_ftrace_now(&global_trace.trace_buffer, cpu);
}
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index c2234494f40c..1ea51ab53edf 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -159,7 +159,7 @@ struct trace_array_cpu {
unsigned long policy;
unsigned long rt_priority;
unsigned long skipped_entries;
- cycle_t preempt_timestamp;
+ u64 preempt_timestamp;
pid_t pid;
kuid_t uid;
char comm[TASK_COMM_LEN];
@@ -177,7 +177,7 @@ struct trace_buffer {
struct trace_array *tr;
struct ring_buffer *buffer;
struct trace_array_cpu __percpu *data;
- cycle_t time_start;
+ u64 time_start;
int cpu;
};
@@ -689,7 +689,7 @@ static inline void __trace_stack(struct trace_array *tr, unsigned long flags,
}
#endif /* CONFIG_STACKTRACE */
-extern cycle_t ftrace_now(int cpu);
+extern u64 ftrace_now(int cpu);
extern void trace_find_cmdline(int pid, char comm[]);
extern void trace_event_follow_fork(struct trace_array *tr, bool enable);
@@ -736,7 +736,7 @@ extern int trace_selftest_startup_branch(struct tracer *trace,
#endif /* CONFIG_FTRACE_STARTUP_TEST */
extern void *head_page(struct trace_array_cpu *data);
-extern unsigned long long ns2usecs(cycle_t nsec);
+extern unsigned long long ns2usecs(u64 nsec);
extern int
trace_vbprintk(unsigned long ip, const char *fmt, va_list args);
extern int
diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c
index 86654d7e1afe..7758bc0617cb 100644
--- a/kernel/trace/trace_irqsoff.c
+++ b/kernel/trace/trace_irqsoff.c
@@ -298,7 +298,7 @@ static void irqsoff_print_header(struct seq_file *s)
/*
* Should this new latency be reported/recorded?
*/
-static bool report_latency(struct trace_array *tr, cycle_t delta)
+static bool report_latency(struct trace_array *tr, u64 delta)
{
if (tracing_thresh) {
if (delta < tracing_thresh)
@@ -316,7 +316,7 @@ check_critical_timing(struct trace_array *tr,
unsigned long parent_ip,
int cpu)
{
- cycle_t T0, T1, delta;
+ u64 T0, T1, delta;
unsigned long flags;
int pc;
diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c
index 5d0bb025bb21..ddec53b67646 100644
--- a/kernel/trace/trace_sched_wakeup.c
+++ b/kernel/trace/trace_sched_wakeup.c
@@ -358,7 +358,7 @@ static void wakeup_print_header(struct seq_file *s)
/*
* Should this new latency be reported/recorded?
*/
-static bool report_latency(struct trace_array *tr, cycle_t delta)
+static bool report_latency(struct trace_array *tr, u64 delta)
{
if (tracing_thresh) {
if (delta < tracing_thresh)
@@ -440,7 +440,7 @@ probe_wakeup_sched_switch(void *ignore, bool preempt,
struct task_struct *prev, struct task_struct *next)
{
struct trace_array_cpu *data;
- cycle_t T0, T1, delta;
+ u64 T0, T1, delta;
unsigned long flags;
long disabled;
int cpu;