summaryrefslogtreecommitdiff
path: root/arch/x86
diff options
context:
space:
mode:
authorChang S. Bae <chang.seok.bae@intel.com>2018-09-18 16:08:58 -0700
committerIngo Molnar <mingo@kernel.org>2018-10-08 10:41:10 +0200
commitffebbaedc8616cffe648202e364dce6a045d65a2 (patch)
treedb77803dfcd20c32ec9703f8d382943479941559 /arch/x86
parentc4755613a1339ea77dbb15de75c9f74217209265 (diff)
x86/vdso: Introduce helper functions for CPU and node number
Clean up the CPU/node number related code a bit, to make it more apparent how we are encoding/extracting the CPU and node fields from the segment limit. No change in functionality intended. [ mingo: Wrote new changelog. ] Suggested-by: Andy Lutomirski <luto@kernel.org> Suggested-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Chang S. Bae <chang.seok.bae@intel.com> Cc: Andy Lutomirski <luto@amacapital.net> Cc: Borislav Petkov <bp@alien8.de> Cc: Brian Gerst <brgerst@gmail.com> Cc: Dave Hansen <dave.hansen@linux.intel.com> Cc: Denys Vlasenko <dvlasenk@redhat.com> Cc: H. Peter Anvin <hpa@zytor.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Markus T Metzger <markus.t.metzger@intel.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Ravi Shankar <ravi.v.shankar@intel.com> Cc: Rik van Riel <riel@surriel.com> Link: http://lkml.kernel.org/r/1537312139-5580-8-git-send-email-chang.seok.bae@intel.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/entry/vdso/vgetcpu.c9
-rw-r--r--arch/x86/entry/vdso/vma.c19
-rw-r--r--arch/x86/include/asm/segment.h41
-rw-r--r--arch/x86/include/asm/vgtod.h26
4 files changed, 49 insertions, 46 deletions
diff --git a/arch/x86/entry/vdso/vgetcpu.c b/arch/x86/entry/vdso/vgetcpu.c
index 8ec3d1f4ce9a..de78fc9cd963 100644
--- a/arch/x86/entry/vdso/vgetcpu.c
+++ b/arch/x86/entry/vdso/vgetcpu.c
@@ -13,14 +13,7 @@
notrace long
__vdso_getcpu(unsigned *cpu, unsigned *node, struct getcpu_cache *unused)
{
- unsigned int p;
-
- p = __getcpu();
-
- if (cpu)
- *cpu = p & VGETCPU_CPU_MASK;
- if (node)
- *node = p >> 12;
+ vdso_read_cpu_node(cpu, node);
return 0;
}
diff --git a/arch/x86/entry/vdso/vma.c b/arch/x86/entry/vdso/vma.c
index 0b114aafcedc..39b5584c5808 100644
--- a/arch/x86/entry/vdso/vma.c
+++ b/arch/x86/entry/vdso/vma.c
@@ -339,20 +339,15 @@ static void vgetcpu_cpu_init(void *arg)
{
int cpu = smp_processor_id();
struct desc_struct d = { };
- unsigned long node = 0;
-#ifdef CONFIG_NUMA
- node = cpu_to_node(cpu);
-#endif
+ unsigned long cpudata = vdso_encode_cpu_node(cpu, cpu_to_node(cpu));
+
if (static_cpu_has(X86_FEATURE_RDTSCP))
- write_rdtscp_aux((node << 12) | cpu);
+ write_rdtscp_aux(cpudata);
+
+ /* Store CPU and node number in limit */
+ d.limit0 = cpudata;
+ d.limit1 = cpudata >> 16;
- /*
- * Store cpu number in limit so that it can be loaded
- * quickly in user space in vgetcpu. (12 bits for the CPU
- * and 8 bits for the node)
- */
- d.limit0 = cpu | ((node & 0xf) << 12);
- d.limit1 = node >> 4;
d.type = 5; /* RO data, expand down, accessed */
d.dpl = 3; /* Visible to user code */
d.s = 1; /* Not a system segment */
diff --git a/arch/x86/include/asm/segment.h b/arch/x86/include/asm/segment.h
index e3e788ea52e5..4d1f6cc62e13 100644
--- a/arch/x86/include/asm/segment.h
+++ b/arch/x86/include/asm/segment.h
@@ -224,6 +224,47 @@
#define GDT_ENTRY_TLS_ENTRIES 3
#define TLS_SIZE (GDT_ENTRY_TLS_ENTRIES* 8)
+#ifdef CONFIG_X86_64
+
+/* Bit size and mask of CPU number stored in the per CPU data (and TSC_AUX) */
+#define VDSO_CPU_SIZE 12
+#define VDSO_CPU_MASK 0xfff
+
+#ifndef __ASSEMBLY__
+
+/* Helper functions to store/load CPU and node numbers */
+
+static inline unsigned long vdso_encode_cpu_node(int cpu, unsigned long node)
+{
+ return ((node << VDSO_CPU_SIZE) | cpu);
+}
+
+static inline void vdso_read_cpu_node(unsigned *cpu, unsigned *node)
+{
+ unsigned int p;
+
+ /*
+ * Load CPU and node number from GDT. LSL is faster than RDTSCP
+ * and works on all CPUs. This is volatile so that it orders
+ * correctly with respect to barrier() and to keep GCC from cleverly
+ * hoisting it out of the calling function.
+ *
+ * If RDPID is available, use it.
+ */
+ alternative_io ("lsl %[seg],%[p]",
+ ".byte 0xf3,0x0f,0xc7,0xf8", /* RDPID %eax/rax */
+ X86_FEATURE_RDPID,
+ [p] "=a" (p), [seg] "r" (__CPU_NUMBER_SEG));
+
+ if (cpu)
+ *cpu = (p & VDSO_CPU_MASK);
+ if (node)
+ *node = (p >> VDSO_CPU_SIZE);
+}
+
+#endif /* !__ASSEMBLY__ */
+#endif /* CONFIG_X86_64 */
+
#ifdef __KERNEL__
/*
diff --git a/arch/x86/include/asm/vgtod.h b/arch/x86/include/asm/vgtod.h
index 4e81ea920722..056a61c8c5c7 100644
--- a/arch/x86/include/asm/vgtod.h
+++ b/arch/x86/include/asm/vgtod.h
@@ -77,30 +77,4 @@ static inline void gtod_write_end(struct vsyscall_gtod_data *s)
++s->seq;
}
-#ifdef CONFIG_X86_64
-
-#define VGETCPU_CPU_MASK 0xfff
-
-static inline unsigned int __getcpu(void)
-{
- unsigned int p;
-
- /*
- * Load CPU (and node) number from GDT. LSL is faster than RDTSCP
- * and works on all CPUs. This is volatile so that it orders
- * correctly with respect to barrier() and to keep GCC from cleverly
- * hoisting it out of the calling function.
- *
- * If RDPID is available, use it.
- */
- alternative_io ("lsl %[seg],%[p]",
- ".byte 0xf3,0x0f,0xc7,0xf8", /* RDPID %eax/rax */
- X86_FEATURE_RDPID,
- [p] "=a" (p), [seg] "r" (__CPU_NUMBER_SEG));
-
- return p;
-}
-
-#endif /* CONFIG_X86_64 */
-
#endif /* _ASM_X86_VGTOD_H */