summaryrefslogtreecommitdiff
path: root/arch/x86/include/asm/cpu_entry_area.h
blob: cff3f3f3bfe0895a4e6c78b515b86c29b54cb2ca (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
// SPDX-License-Identifier: GPL-2.0

#ifndef _ASM_X86_CPU_ENTRY_AREA_H
#define _ASM_X86_CPU_ENTRY_AREA_H

#include <linux/percpu-defs.h>
#include <asm/processor.h>
#include <asm/intel_ds.h>

#ifdef CONFIG_X86_64

/* Macro to enforce the same ordering and stack sizes */
#define ESTACKS_MEMBERS(guardsize, db2_holesize)\
	char	DF_stack_guard[guardsize];	\
	char	DF_stack[EXCEPTION_STKSZ];	\
	char	NMI_stack_guard[guardsize];	\
	char	NMI_stack[EXCEPTION_STKSZ];	\
	char	DB2_stack_guard[guardsize];	\
	char	DB2_stack[db2_holesize];	\
	char	DB1_stack_guard[guardsize];	\
	char	DB1_stack[EXCEPTION_STKSZ];	\
	char	DB_stack_guard[guardsize];	\
	char	DB_stack[EXCEPTION_STKSZ];	\
	char	MCE_stack_guard[guardsize];	\
	char	MCE_stack[EXCEPTION_STKSZ];	\
	char	IST_top_guard[guardsize];	\

/* The exception stacks' physical storage. No guard pages required */
struct exception_stacks {
	ESTACKS_MEMBERS(0, 0)
};

/* The effective cpu entry area mapping with guard pages. */
struct cea_exception_stacks {
	ESTACKS_MEMBERS(PAGE_SIZE, EXCEPTION_STKSZ)
};

/*
 * The exception stack ordering in [cea_]exception_stacks
 */
enum exception_stack_ordering {
	ESTACK_DF,
	ESTACK_NMI,
	ESTACK_DB2,
	ESTACK_DB1,
	ESTACK_DB,
	ESTACK_MCE,
	N_EXCEPTION_STACKS
};

#define CEA_ESTACK_SIZE(st)					\
	sizeof(((struct cea_exception_stacks *)0)->st## _stack)

#define CEA_ESTACK_BOT(ceastp, st)				\
	((unsigned long)&(ceastp)->st## _stack)

#define CEA_ESTACK_TOP(ceastp, st)				\
	(CEA_ESTACK_BOT(ceastp, st) + CEA_ESTACK_SIZE(st))

#define CEA_ESTACK_OFFS(st)					\
	offsetof(struct cea_exception_stacks, st## _stack)

#define CEA_ESTACK_PAGES					\
	(sizeof(struct cea_exception_stacks) / PAGE_SIZE)

#endif

/*
 * cpu_entry_area is a percpu region that contains things needed by the CPU
 * and early entry/exit code.  Real types aren't used for all fields here
 * to avoid circular header dependencies.
 *
 * Every field is a virtual alias of some other allocated backing store.
 * There is no direct allocation of a struct cpu_entry_area.
 */
struct cpu_entry_area {
	char gdt[PAGE_SIZE];

	/*
	 * The GDT is just below entry_stack and thus serves (on x86_64) as
	 * a a read-only guard page.
	 */
	struct entry_stack_page entry_stack_page;

	/*
	 * On x86_64, the TSS is mapped RO.  On x86_32, it's mapped RW because
	 * we need task switches to work, and task switches write to the TSS.
	 */
	struct tss_struct tss;

#ifdef CONFIG_X86_64
	/*
	 * Exception stacks used for IST entries with guard pages.
	 */
	struct cea_exception_stacks estacks;
#endif
#ifdef CONFIG_CPU_SUP_INTEL
	/*
	 * Per CPU debug store for Intel performance monitoring. Wastes a
	 * full page at the moment.
	 */
	struct debug_store cpu_debug_store;
	/*
	 * The actual PEBS/BTS buffers must be mapped to user space
	 * Reserve enough fixmap PTEs.
	 */
	struct debug_store_buffers cpu_debug_buffers;
#endif
};

#define CPU_ENTRY_AREA_SIZE	(sizeof(struct cpu_entry_area))
#define CPU_ENTRY_AREA_TOT_SIZE	(CPU_ENTRY_AREA_SIZE * NR_CPUS)

DECLARE_PER_CPU(struct cpu_entry_area *, cpu_entry_area);
DECLARE_PER_CPU(struct cea_exception_stacks *, cea_exception_stacks);

extern void setup_cpu_entry_areas(void);
extern void cea_set_pte(void *cea_vaddr, phys_addr_t pa, pgprot_t flags);

#define	CPU_ENTRY_AREA_RO_IDT		CPU_ENTRY_AREA_BASE
#define CPU_ENTRY_AREA_PER_CPU		(CPU_ENTRY_AREA_RO_IDT + PAGE_SIZE)

#define CPU_ENTRY_AREA_RO_IDT_VADDR	((void *)CPU_ENTRY_AREA_RO_IDT)

#define CPU_ENTRY_AREA_MAP_SIZE			\
	(CPU_ENTRY_AREA_PER_CPU + CPU_ENTRY_AREA_TOT_SIZE - CPU_ENTRY_AREA_BASE)

extern struct cpu_entry_area *get_cpu_entry_area(int cpu);

static inline struct entry_stack *cpu_entry_stack(int cpu)
{
	return &get_cpu_entry_area(cpu)->entry_stack_page.stack;
}

#define __this_cpu_ist_top_va(name)					\
	CEA_ESTACK_TOP(__this_cpu_read(cea_exception_stacks), name)

#endif