From e76b027e6408f5570dc940b731ec9ae870c6188a Mon Sep 17 00:00:00 2001 From: Andy Lutomirski Date: Thu, 30 Oct 2014 14:58:01 -0700 Subject: x86,vdso: Use LSL unconditionally for vgetcpu LSL is faster than RDTSCP and works everywhere; there's no need to switch between them depending on CPU. Signed-off-by: Andy Lutomirski Cc: Andi Kleen Link: http://lkml.kernel.org/r/72f73d5ec4514e02bba345b9759177ef03742efb.1414706021.git.luto@amacapital.net Signed-off-by: Thomas Gleixner --- arch/x86/include/asm/vsyscall.h | 29 ----------------------------- 1 file changed, 29 deletions(-) (limited to 'arch/x86/include/asm/vsyscall.h') diff --git a/arch/x86/include/asm/vsyscall.h b/arch/x86/include/asm/vsyscall.h index 2a46ca720afc..34f7d8857542 100644 --- a/arch/x86/include/asm/vsyscall.h +++ b/arch/x86/include/asm/vsyscall.h @@ -4,15 +4,6 @@ #include #include -#define VGETCPU_RDTSCP 1 -#define VGETCPU_LSL 2 - -/* kernel space (writeable) */ -extern int vgetcpu_mode; -extern struct timezone sys_tz; - -#include - extern void map_vsyscall(void); /* @@ -21,24 +12,4 @@ extern void map_vsyscall(void); */ extern bool emulate_vsyscall(struct pt_regs *regs, unsigned long address); -#ifdef CONFIG_X86_64 - -#define VGETCPU_CPU_MASK 0xfff - -static inline unsigned int __getcpu(void) -{ - unsigned int p; - - if (VVAR(vgetcpu_mode) == VGETCPU_RDTSCP) { - /* Load per CPU data from RDTSCP */ - native_read_tscp(&p); - } else { - /* Load per CPU data from GDT */ - asm("lsl %1,%0" : "=r" (p) : "r" (__PER_CPU_SEG)); - } - - return p; -} -#endif /* CONFIG_X86_64 */ - #endif /* _ASM_X86_VSYSCALL_H */ -- cgit