summaryrefslogtreecommitdiff
path: root/arch/x86/include/asm/sync_core.h
blob: ab7382f92aff27405127748074a11341cbda474d (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_X86_SYNC_CORE_H
#define _ASM_X86_SYNC_CORE_H

#include <linux/preempt.h>
#include <asm/processor.h>
#include <asm/cpufeature.h>
#include <asm/special_insns.h>

#ifdef CONFIG_X86_32
static inline void iret_to_self(void)
{
	asm volatile (
		"pushfl\n\t"
		"pushl %%cs\n\t"
		"pushl $1f\n\t"
		"iret\n\t"
		"1:"
		: ASM_CALL_CONSTRAINT : : "memory");
}
#else
static inline void iret_to_self(void)
{
	unsigned int tmp;

	asm volatile (
		"mov %%ss, %0\n\t"
		"pushq %q0\n\t"
		"pushq %%rsp\n\t"
		"addq $8, (%%rsp)\n\t"
		"pushfq\n\t"
		"mov %%cs, %0\n\t"
		"pushq %q0\n\t"
		"pushq $1f\n\t"
		"iretq\n\t"
		"1:"
		: "=&r" (tmp), ASM_CALL_CONSTRAINT : : "cc", "memory");
}
#endif /* CONFIG_X86_32 */

/*
 * This function forces the icache and prefetched instruction stream to
 * catch up with reality in two very specific cases:
 *
 *  a) Text was modified using one virtual address and is about to be executed
 *     from the same physical page at a different virtual address.
 *
 *  b) Text was modified on a different CPU, may subsequently be
 *     executed on this CPU, and you want to make sure the new version
 *     gets executed.  This generally means you're calling this in an IPI.
 *
 * If you're calling this for a different reason, you're probably doing
 * it wrong.
 *
 * Like all of Linux's memory ordering operations, this is a
 * compiler barrier as well.
 */
static inline void sync_core(void)
{
	/*
	 * The SERIALIZE instruction is the most straightforward way to
	 * do this, but it is not universally available.
	 */
	if (static_cpu_has(X86_FEATURE_SERIALIZE)) {
		serialize();
		return;
	}

	/*
	 * For all other processors, there are quite a few ways to do this.
	 * IRET-to-self is nice because it works on every CPU, at any CPL
	 * (so it's compatible with paravirtualization), and it never exits
	 * to a hypervisor.  The only downsides are that it's a bit slow
	 * (it seems to be a bit more than 2x slower than the fastest
	 * options) and that it unmasks NMIs.  The "push %cs" is needed,
	 * because in paravirtual environments __KERNEL_CS may not be a
	 * valid CS value when we do IRET directly.
	 *
	 * In case NMI unmasking or performance ever becomes a problem,
	 * the next best option appears to be MOV-to-CR2 and an
	 * unconditional jump.  That sequence also works on all CPUs,
	 * but it will fault at CPL3 (i.e. Xen PV).
	 *
	 * CPUID is the conventional way, but it's nasty: it doesn't
	 * exist on some 486-like CPUs, and it usually exits to a
	 * hypervisor.
	 */
	iret_to_self();
}

/*
 * Ensure that a core serializing instruction is issued before returning
 * to user-mode. x86 implements return to user-space through sysexit,
 * sysrel, and sysretq, which are not core serializing.
 */
static inline void sync_core_before_usermode(void)
{
	/* With PTI, we unconditionally serialize before running user code. */
	if (static_cpu_has(X86_FEATURE_PTI))
		return;

	/*
	 * Even if we're in an interrupt, we might reschedule before returning,
	 * in which case we could switch to a different thread in the same mm
	 * and return using SYSRET or SYSEXIT.  Instead of trying to keep
	 * track of our need to sync the core, just sync right away.
	 */
	sync_core();
}

#endif /* _ASM_X86_SYNC_CORE_H */