summaryrefslogtreecommitdiff
path: root/arch
diff options
context:
space:
mode:
authorAndy Lutomirski <luto@kernel.org>2015-06-25 18:43:58 +0200
committerIngo Molnar <mingo@kernel.org>2015-07-06 15:23:26 +0200
commit87be28aaf1458445d5f648688c2eec0f13b8f3b9 (patch)
treec040f44d77fe24680f16cc2ab5ddab05fd780431 /arch
parent9261e050b686c9fe229cd9918d997b3caaf20e34 (diff)
x86/asm/tsc: Replace rdtscll() with native_read_tsc()
Now that the ->read_tsc() paravirt hook is gone, rdtscll() is just a wrapper around native_read_tsc(). Unwrap it. Signed-off-by: Andy Lutomirski <luto@kernel.org> Signed-off-by: Borislav Petkov <bp@suse.de> Cc: Andy Lutomirski <luto@amacapital.net> Cc: Borislav Petkov <bp@alien8.de> Cc: Brian Gerst <brgerst@gmail.com> Cc: Denys Vlasenko <dvlasenk@redhat.com> Cc: H. Peter Anvin <hpa@zytor.com> Cc: Huang Rui <ray.huang@amd.com> Cc: John Stultz <john.stultz@linaro.org> Cc: Len Brown <lenb@kernel.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Ralf Baechle <ralf@linux-mips.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: kvm ML <kvm@vger.kernel.org> Link: http://lkml.kernel.org/r/d2449ae62c1b1fb90195bcfb19ef4a35883a04dc.1434501121.git.luto@kernel.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/boot/compressed/aslr.c2
-rw-r--r--arch/x86/include/asm/msr.h3
-rw-r--r--arch/x86/include/asm/tsc.h5
-rw-r--r--arch/x86/kernel/apb_timer.c4
-rw-r--r--arch/x86/kernel/apic/apic.c8
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce.c4
-rw-r--r--arch/x86/kernel/espfix_64.c2
-rw-r--r--arch/x86/kernel/hpet.c4
-rw-r--r--arch/x86/kernel/trace_clock.c2
-rw-r--r--arch/x86/kernel/tsc.c4
-rw-r--r--arch/x86/kvm/vmx.c2
-rw-r--r--arch/x86/lib/delay.c2
12 files changed, 18 insertions, 24 deletions
diff --git a/arch/x86/boot/compressed/aslr.c b/arch/x86/boot/compressed/aslr.c
index d7b1f655b3ef..ea33236190b1 100644
--- a/arch/x86/boot/compressed/aslr.c
+++ b/arch/x86/boot/compressed/aslr.c
@@ -82,7 +82,7 @@ static unsigned long get_random_long(void)
if (has_cpuflag(X86_FEATURE_TSC)) {
debug_putstr(" RDTSC");
- rdtscll(raw);
+ raw = native_read_tsc();
random ^= raw;
use_i8254 = false;
diff --git a/arch/x86/include/asm/msr.h b/arch/x86/include/asm/msr.h
index d1afac7df484..7273b74e0f99 100644
--- a/arch/x86/include/asm/msr.h
+++ b/arch/x86/include/asm/msr.h
@@ -192,9 +192,6 @@ do { \
#define rdtscl(low) \
((low) = (u32)native_read_tsc())
-#define rdtscll(val) \
- ((val) = native_read_tsc())
-
#define rdtscp(low, high, aux) \
do { \
unsigned long long _val = native_read_tscp(&(aux)); \
diff --git a/arch/x86/include/asm/tsc.h b/arch/x86/include/asm/tsc.h
index 3da1cc1218ac..b4883902948b 100644
--- a/arch/x86/include/asm/tsc.h
+++ b/arch/x86/include/asm/tsc.h
@@ -21,15 +21,12 @@ extern void disable_TSC(void);
static inline cycles_t get_cycles(void)
{
- unsigned long long ret = 0;
-
#ifndef CONFIG_X86_TSC
if (!cpu_has_tsc)
return 0;
#endif
- rdtscll(ret);
- return ret;
+ return native_read_tsc();
}
extern void tsc_init(void);
diff --git a/arch/x86/kernel/apb_timer.c b/arch/x86/kernel/apb_timer.c
index 9fe111cc50f8..25efa534c4e4 100644
--- a/arch/x86/kernel/apb_timer.c
+++ b/arch/x86/kernel/apb_timer.c
@@ -263,7 +263,7 @@ static int apbt_clocksource_register(void)
/* Verify whether apbt counter works */
t1 = dw_apb_clocksource_read(clocksource_apbt);
- rdtscll(start);
+ start = native_read_tsc();
/*
* We don't know the TSC frequency yet, but waiting for
@@ -273,7 +273,7 @@ static int apbt_clocksource_register(void)
*/
do {
rep_nop();
- rdtscll(now);
+ now = native_read_tsc();
} while ((now - start) < 200000UL);
/* APBT is the only always on clocksource, it has to work! */
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
index dcb52850a28f..51af1ed1ae2e 100644
--- a/arch/x86/kernel/apic/apic.c
+++ b/arch/x86/kernel/apic/apic.c
@@ -457,7 +457,7 @@ static int lapic_next_deadline(unsigned long delta,
{
u64 tsc;
- rdtscll(tsc);
+ tsc = native_read_tsc();
wrmsrl(MSR_IA32_TSC_DEADLINE, tsc + (((u64) delta) * TSC_DIVISOR));
return 0;
}
@@ -592,7 +592,7 @@ static void __init lapic_cal_handler(struct clock_event_device *dev)
unsigned long pm = acpi_pm_read_early();
if (cpu_has_tsc)
- rdtscll(tsc);
+ tsc = native_read_tsc();
switch (lapic_cal_loops++) {
case 0:
@@ -1209,7 +1209,7 @@ void setup_local_APIC(void)
long long max_loops = cpu_khz ? cpu_khz : 1000000;
if (cpu_has_tsc)
- rdtscll(tsc);
+ tsc = native_read_tsc();
if (disable_apic) {
disable_ioapic_support();
@@ -1293,7 +1293,7 @@ void setup_local_APIC(void)
}
if (queued) {
if (cpu_has_tsc && cpu_khz) {
- rdtscll(ntsc);
+ ntsc = native_read_tsc();
max_loops = (cpu_khz << 10) - (ntsc - tsc);
} else
max_loops--;
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
index df919ff103c3..a5283d2d0094 100644
--- a/arch/x86/kernel/cpu/mcheck/mce.c
+++ b/arch/x86/kernel/cpu/mcheck/mce.c
@@ -125,7 +125,7 @@ void mce_setup(struct mce *m)
{
memset(m, 0, sizeof(struct mce));
m->cpu = m->extcpu = smp_processor_id();
- rdtscll(m->tsc);
+ m->tsc = native_read_tsc();
/* We hope get_seconds stays lockless */
m->time = get_seconds();
m->cpuvendor = boot_cpu_data.x86_vendor;
@@ -1784,7 +1784,7 @@ static void collect_tscs(void *data)
{
unsigned long *cpu_tsc = (unsigned long *)data;
- rdtscll(cpu_tsc[smp_processor_id()]);
+ cpu_tsc[smp_processor_id()] = native_read_tsc();
}
static int mce_apei_read_done;
diff --git a/arch/x86/kernel/espfix_64.c b/arch/x86/kernel/espfix_64.c
index f5d0730e7b08..334a2a9c034d 100644
--- a/arch/x86/kernel/espfix_64.c
+++ b/arch/x86/kernel/espfix_64.c
@@ -110,7 +110,7 @@ static void init_espfix_random(void)
*/
if (!arch_get_random_long(&rand)) {
/* The constant is an arbitrary large prime */
- rdtscll(rand);
+ rand = native_read_tsc();
rand *= 0xc345c6b72fd16123UL;
}
diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c
index 10757d0a3fcf..cc390fe69b71 100644
--- a/arch/x86/kernel/hpet.c
+++ b/arch/x86/kernel/hpet.c
@@ -735,7 +735,7 @@ static int hpet_clocksource_register(void)
/* Verify whether hpet counter works */
t1 = hpet_readl(HPET_COUNTER);
- rdtscll(start);
+ start = native_read_tsc();
/*
* We don't know the TSC frequency yet, but waiting for
@@ -745,7 +745,7 @@ static int hpet_clocksource_register(void)
*/
do {
rep_nop();
- rdtscll(now);
+ now = native_read_tsc();
} while ((now - start) < 200000UL);
if (t1 == hpet_readl(HPET_COUNTER)) {
diff --git a/arch/x86/kernel/trace_clock.c b/arch/x86/kernel/trace_clock.c
index 25b993729f9b..bd8f4d41bd56 100644
--- a/arch/x86/kernel/trace_clock.c
+++ b/arch/x86/kernel/trace_clock.c
@@ -15,7 +15,7 @@ u64 notrace trace_clock_x86_tsc(void)
u64 ret;
rdtsc_barrier();
- rdtscll(ret);
+ ret = native_read_tsc();
return ret;
}
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
index e7710cd7ba00..e66f5dcaeb63 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -248,7 +248,7 @@ static void set_cyc2ns_scale(unsigned long cpu_khz, int cpu)
data = cyc2ns_write_begin(cpu);
- rdtscll(tsc_now);
+ tsc_now = native_read_tsc();
ns_now = cycles_2_ns(tsc_now);
/*
@@ -290,7 +290,7 @@ u64 native_sched_clock(void)
}
/* read the Time Stamp Counter: */
- rdtscll(tsc_now);
+ tsc_now = native_read_tsc();
/* return the value in ns */
return cycles_2_ns(tsc_now);
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index e856dd566f4c..4fa1ccad7beb 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -2236,7 +2236,7 @@ static u64 guest_read_tsc(void)
{
u64 host_tsc, tsc_offset;
- rdtscll(host_tsc);
+ host_tsc = native_read_tsc();
tsc_offset = vmcs_read64(TSC_OFFSET);
return host_tsc + tsc_offset;
}
diff --git a/arch/x86/lib/delay.c b/arch/x86/lib/delay.c
index 39d6a3db0b96..9a52ad0c0758 100644
--- a/arch/x86/lib/delay.c
+++ b/arch/x86/lib/delay.c
@@ -100,7 +100,7 @@ void use_tsc_delay(void)
int read_current_timer(unsigned long *timer_val)
{
if (delay_fn == delay_tsc) {
- rdtscll(*timer_val);
+ *timer_val = native_read_tsc();
return 0;
}
return -1;