summaryrefslogtreecommitdiff
path: root/arch/x86/platform
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2016-12-21 20:32:01 +0100
committerThomas Gleixner <tglx@linutronix.de>2016-12-25 11:04:12 +0100
commita5a1d1c2914b5316924c7893eb683a5420ebd3be (patch)
tree9078b8a179031e7e8b320e1c69f182cc285e7b5d /arch/x86/platform
parent7c0f6ba682b9c7632072ffbedf8d328c8f3c42ba (diff)
clocksource: Use a plain u64 instead of cycle_t
There is no point in having an extra type for extra confusion. u64 is unambiguous. Conversion was done with the following coccinelle script: @rem@ @@ -typedef u64 cycle_t; @fix@ typedef cycle_t; @@ -cycle_t +u64 Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Cc: Peter Zijlstra <peterz@infradead.org> Cc: John Stultz <john.stultz@linaro.org>
Diffstat (limited to 'arch/x86/platform')
-rw-r--r--arch/x86/platform/uv/uv_time.c8
1 files changed, 4 insertions, 4 deletions
diff --git a/arch/x86/platform/uv/uv_time.c b/arch/x86/platform/uv/uv_time.c
index b333fc45f9ec..2ee7632d4916 100644
--- a/arch/x86/platform/uv/uv_time.c
+++ b/arch/x86/platform/uv/uv_time.c
@@ -30,7 +30,7 @@
#define RTC_NAME "sgi_rtc"
-static cycle_t uv_read_rtc(struct clocksource *cs);
+static u64 uv_read_rtc(struct clocksource *cs);
static int uv_rtc_next_event(unsigned long, struct clock_event_device *);
static int uv_rtc_shutdown(struct clock_event_device *evt);
@@ -38,7 +38,7 @@ static struct clocksource clocksource_uv = {
.name = RTC_NAME,
.rating = 299,
.read = uv_read_rtc,
- .mask = (cycle_t)UVH_RTC_REAL_TIME_CLOCK_MASK,
+ .mask = (u64)UVH_RTC_REAL_TIME_CLOCK_MASK,
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
};
@@ -296,7 +296,7 @@ static int uv_rtc_unset_timer(int cpu, int force)
* cachelines of it's own page. This allows faster simultaneous reads
* from a given socket.
*/
-static cycle_t uv_read_rtc(struct clocksource *cs)
+static u64 uv_read_rtc(struct clocksource *cs)
{
unsigned long offset;
@@ -305,7 +305,7 @@ static cycle_t uv_read_rtc(struct clocksource *cs)
else
offset = (uv_blade_processor_id() * L1_CACHE_BYTES) % PAGE_SIZE;
- return (cycle_t)uv_read_local_mmr(UVH_RTC | offset);
+ return (u64)uv_read_local_mmr(UVH_RTC | offset);
}
/*