summaryrefslogtreecommitdiff
path: root/arch/x86/kernel/paravirt_patch_64.c
blob: 9d9e04b310773789f8894a4900f6e1c6d0ab2e6d (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
// SPDX-License-Identifier: GPL-2.0
#include <asm/paravirt.h>
#include <asm/asm-offsets.h>
#include <linux/stringify.h>

#ifdef CONFIG_PARAVIRT_XXL
DEF_NATIVE(irq, irq_disable, "cli");
DEF_NATIVE(irq, irq_enable, "sti");
DEF_NATIVE(irq, restore_fl, "pushq %rdi; popfq");
DEF_NATIVE(irq, save_fl, "pushfq; popq %rax");
DEF_NATIVE(mmu, read_cr2, "movq %cr2, %rax");
DEF_NATIVE(mmu, read_cr3, "movq %cr3, %rax");
DEF_NATIVE(mmu, write_cr3, "movq %rdi, %cr3");
DEF_NATIVE(cpu, wbinvd, "wbinvd");

DEF_NATIVE(cpu, usergs_sysret64, "swapgs; sysretq");
DEF_NATIVE(cpu, swapgs, "swapgs");
DEF_NATIVE(, mov64, "mov %rdi, %rax");

unsigned paravirt_patch_ident_64(void *insnbuf, unsigned len)
{
	return paravirt_patch_insns(insnbuf, len,
				    start__mov64, end__mov64);
}
#endif

#if defined(CONFIG_PARAVIRT_SPINLOCKS)
DEF_NATIVE(lock, queued_spin_unlock, "movb $0, (%rdi)");
DEF_NATIVE(lock, vcpu_is_preempted, "xor %eax, %eax");
#endif

extern bool pv_is_native_spin_unlock(void);
extern bool pv_is_native_vcpu_is_preempted(void);

unsigned native_patch(u8 type, void *ibuf, unsigned long addr, unsigned len)
{
#define PATCH_SITE(ops, x)					\
	case PARAVIRT_PATCH(ops.x):				\
		return paravirt_patch_insns(ibuf, len, start_##ops##_##x, end_##ops##_##x)

	switch (type) {
#ifdef CONFIG_PARAVIRT_XXL
		PATCH_SITE(irq, restore_fl);
		PATCH_SITE(irq, save_fl);
		PATCH_SITE(irq, irq_enable);
		PATCH_SITE(irq, irq_disable);
		PATCH_SITE(cpu, usergs_sysret64);
		PATCH_SITE(cpu, swapgs);
		PATCH_SITE(cpu, wbinvd);
		PATCH_SITE(mmu, read_cr2);
		PATCH_SITE(mmu, read_cr3);
		PATCH_SITE(mmu, write_cr3);
#endif
#if defined(CONFIG_PARAVIRT_SPINLOCKS)
	case PARAVIRT_PATCH(lock.queued_spin_unlock):
		if (pv_is_native_spin_unlock())
			return paravirt_patch_insns(ibuf, len,
						    start_lock_queued_spin_unlock,
						    end_lock_queued_spin_unlock);
		break;

	case PARAVIRT_PATCH(lock.vcpu_is_preempted):
		if (pv_is_native_vcpu_is_preempted())
			return paravirt_patch_insns(ibuf, len,
						    start_lock_vcpu_is_preempted,
						    end_lock_vcpu_is_preempted);
		break;
#endif

	default:
		break;
	}
#undef PATCH_SITE
	return paravirt_patch_default(type, ibuf, addr, len);
}