summaryrefslogtreecommitdiff
path: root/kernel/time/sched_clock.c
diff options
context:
space:
mode:
authorStephen Boyd <sboyd@codeaurora.org>2013-07-18 16:21:16 -0700
committerJohn Stultz <john.stultz@linaro.org>2013-07-30 11:24:20 -0700
commita08ca5d1089da03724f96fa0870c64968e66765b (patch)
tree763e988f9f4d412681f2859bc12370edbfe565de /kernel/time/sched_clock.c
parent85c3d2dd15be4d577a37ffb8bbbd019fc8e3280a (diff)
sched_clock: Use an hrtimer instead of timer
In the next patch we're going to increase the number of bits that the generic sched_clock can handle to be greater than 32. With more than 32 bits the wraparound time can be larger than what can fit into the units that msecs_to_jiffies takes (unsigned int). Luckily, the wraparound is initially calculated in nanoseconds which we can easily use with hrtimers, so switch to using an hrtimer. Cc: Russell King <linux@arm.linux.org.uk> Signed-off-by: Stephen Boyd <sboyd@codeaurora.org> [jstultz: Fixup hrtimer intitialization order issue] Signed-off-by: John Stultz <john.stultz@linaro.org>
Diffstat (limited to 'kernel/time/sched_clock.c')
-rw-r--r--kernel/time/sched_clock.c38
1 files changed, 21 insertions, 17 deletions
diff --git a/kernel/time/sched_clock.c b/kernel/time/sched_clock.c
index 396f7b9dccc9..c018ffc59937 100644
--- a/kernel/time/sched_clock.c
+++ b/kernel/time/sched_clock.c
@@ -8,15 +8,17 @@
#include <linux/clocksource.h>
#include <linux/init.h>
#include <linux/jiffies.h>
+#include <linux/ktime.h>
#include <linux/kernel.h>
#include <linux/moduleparam.h>
#include <linux/sched.h>
#include <linux/syscore_ops.h>
-#include <linux/timer.h>
+#include <linux/hrtimer.h>
#include <linux/sched_clock.h>
#include <linux/seqlock.h>
struct clock_data {
+ ktime_t wrap_kt;
u64 epoch_ns;
u32 epoch_cyc;
seqcount_t seq;
@@ -26,8 +28,7 @@ struct clock_data {
bool suspended;
};
-static void sched_clock_poll(unsigned long wrap_ticks);
-static DEFINE_TIMER(sched_clock_timer, sched_clock_poll, 0, 0);
+static struct hrtimer sched_clock_timer;
static int irqtime = -1;
core_param(irqtime, irqtime, int, 0400);
@@ -93,15 +94,16 @@ static void notrace update_sched_clock(void)
raw_local_irq_restore(flags);
}
-static void sched_clock_poll(unsigned long wrap_ticks)
+static enum hrtimer_restart sched_clock_poll(struct hrtimer *hrt)
{
- mod_timer(&sched_clock_timer, round_jiffies(jiffies + wrap_ticks));
update_sched_clock();
+ hrtimer_forward_now(hrt, cd.wrap_kt);
+ return HRTIMER_RESTART;
}
void __init setup_sched_clock(u32 (*read)(void), int bits, unsigned long rate)
{
- unsigned long r, w;
+ unsigned long r;
u64 res, wrap;
char r_unit;
@@ -129,19 +131,13 @@ void __init setup_sched_clock(u32 (*read)(void), int bits, unsigned long rate)
/* calculate how many ns until we wrap */
wrap = cyc_to_ns((1ULL << bits) - 1, cd.mult, cd.shift);
- do_div(wrap, NSEC_PER_MSEC);
- w = wrap;
+ cd.wrap_kt = ns_to_ktime(wrap - (wrap >> 3));
/* calculate the ns resolution of this counter */
res = cyc_to_ns(1ULL, cd.mult, cd.shift);
- pr_info("sched_clock: %u bits at %lu%cHz, resolution %lluns, wraps every %lums\n",
- bits, r, r_unit, res, w);
+ pr_info("sched_clock: %u bits at %lu%cHz, resolution %lluns, wraps every %lluns\n",
+ bits, r, r_unit, res, wrap);
- /*
- * Start the timer to keep sched_clock() properly updated and
- * sets the initial epoch.
- */
- sched_clock_timer.data = msecs_to_jiffies(w - (w / 10));
update_sched_clock();
/*
@@ -172,12 +168,20 @@ void __init sched_clock_postinit(void)
if (read_sched_clock == jiffy_sched_clock_read)
setup_sched_clock(jiffy_sched_clock_read, 32, HZ);
- sched_clock_poll(sched_clock_timer.data);
+ update_sched_clock();
+
+ /*
+ * Start the timer to keep sched_clock() properly updated and
+ * sets the initial epoch.
+ */
+ hrtimer_init(&sched_clock_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ sched_clock_timer.function = sched_clock_poll;
+ hrtimer_start(&sched_clock_timer, cd.wrap_kt, HRTIMER_MODE_REL);
}
static int sched_clock_suspend(void)
{
- sched_clock_poll(sched_clock_timer.data);
+ sched_clock_poll(&sched_clock_timer);
cd.suspended = true;
return 0;
}