summaryrefslogtreecommitdiff
path: root/lib/cpus/aarch64/workaround_cve_2017_5715_bpiall.S
blob: cd29266ed7d73303700ed70d5f775ba85503286f (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
/*
 * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
 *
 * SPDX-License-Identifier: BSD-3-Clause
 */

#include <arch.h>
#include <asm_macros.S>
#include <context.h>

	.globl	workaround_bpiall_vbar0_runtime_exceptions

#define EMIT_BPIALL		0xee070fd5
#define EMIT_MOV_R0_IMM(v)	0xe3a0000##v
#define EMIT_SMC		0xe1600070

	.macro	enter_workaround _stub_name
	/* Save GP regs */
	stp	x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0]
	stp	x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2]
	stp	x4, x5, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X4]
	stp	x6, x7, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X6]
	stp	x8, x9, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X8]
	stp	x10, x11, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X10]
	stp	x12, x13, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X12]
	stp	x14, x15, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X14]
	stp	x16, x17, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X16]
	stp	x18, x19, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X18]
	stp	x20, x21, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X20]
	stp	x22, x23, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X22]
	stp	x24, x25, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X24]
	stp	x26, x27, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X26]
	stp	x28, x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X28]

	adr	x4, \_stub_name

	/*
	 * Load SPSR_EL3 and VBAR_EL3.  SPSR_EL3 is set up to have
	 * all interrupts masked in preparation to running the workaround
	 * stub in S-EL1.  VBAR_EL3 points to the vector table that
	 * will handle the SMC back from the workaround stub.
	 */
	ldp	x0, x1, [x4, #0]

	/*
	 * Load SCTLR_EL1 and ELR_EL3.  SCTLR_EL1 is configured to disable
	 * the MMU in S-EL1.  ELR_EL3 points to the appropriate stub in S-EL1.
	 */
	ldp	x2, x3, [x4, #16]

	mrs	x4, scr_el3
	mrs	x5, spsr_el3
	mrs	x6, elr_el3
	mrs	x7, sctlr_el1
	mrs	x8, esr_el3

	/* Preserve system registers in the workaround context */
	stp	x4, x5, [sp, #CTX_CVE_2017_5715_OFFSET + CTX_CVE_2017_5715_QUAD0]
	stp	x6, x7, [sp, #CTX_CVE_2017_5715_OFFSET + CTX_CVE_2017_5715_QUAD2]
	stp	x8, x30, [sp, #CTX_CVE_2017_5715_OFFSET + CTX_CVE_2017_5715_QUAD4]

	/*
	 * Setting SCR_EL3 to all zeroes means that the NS, RW
	 * and SMD bits are configured as expected.
	 */
	msr	scr_el3, xzr

	/*
	 * Reload system registers with the crafted values
	 * in preparation for entry in S-EL1.
	 */
	msr	spsr_el3, x0
	msr	vbar_el3, x1
	msr	sctlr_el1, x2
	msr	elr_el3, x3

	eret
	.endm

	/* ---------------------------------------------------------------------
	 * This vector table is used at runtime to enter the workaround at
	 * AArch32 S-EL1 for Sync/IRQ/FIQ/SError exceptions.  If the workaround
	 * is not enabled, the existing runtime exception vector table is used.
	 * ---------------------------------------------------------------------
	 */
vector_base workaround_bpiall_vbar0_runtime_exceptions

	/* ---------------------------------------------------------------------
	 * Current EL with SP_EL0 : 0x0 - 0x200
	 * ---------------------------------------------------------------------
	 */
vector_entry workaround_bpiall_vbar0_sync_exception_sp_el0
	b	sync_exception_sp_el0
	/*
	 * Since each vector table entry is 128 bytes, we can store the
	 * stub context in the unused space to minimize memory footprint.
	 */
aarch32_stub_smc:
	.word	EMIT_BPIALL
	.word	EMIT_MOV_R0_IMM(1)
	.word	EMIT_SMC
aarch32_stub_ctx_smc:
	/* Mask all interrupts and set AArch32 Supervisor mode */
	.quad	(SPSR_AIF_MASK << SPSR_AIF_SHIFT | \
	         SPSR_M_AARCH32 << SPSR_M_SHIFT | \
	         MODE32_svc << MODE32_SHIFT)

	/*
	 * VBAR_EL3 points to vbar1 which is the vector table
	 * used while the workaround is executing.
	 */
	.quad	workaround_bpiall_vbar1_runtime_exceptions

	/* Setup SCTLR_EL1 with MMU off and I$ on */
	.quad	SCTLR_AARCH32_EL1_RES1 | SCTLR_I_BIT

	/* ELR_EL3 is setup to point to the sync exception stub in AArch32 */
	.quad	aarch32_stub_smc
	check_vector_size workaround_bpiall_vbar0_sync_exception_sp_el0

vector_entry workaround_bpiall_vbar0_irq_sp_el0
	b	irq_sp_el0
aarch32_stub_irq:
	.word	EMIT_BPIALL
	.word	EMIT_MOV_R0_IMM(2)
	.word	EMIT_SMC
aarch32_stub_ctx_irq:
	.quad	(SPSR_AIF_MASK << SPSR_AIF_SHIFT | \
	         SPSR_M_AARCH32 << SPSR_M_SHIFT | \
	         MODE32_svc << MODE32_SHIFT)
	.quad	workaround_bpiall_vbar1_runtime_exceptions
	.quad	SCTLR_AARCH32_EL1_RES1 | SCTLR_I_BIT
	.quad	aarch32_stub_irq
	check_vector_size workaround_bpiall_vbar0_irq_sp_el0

vector_entry workaround_bpiall_vbar0_fiq_sp_el0
	b	fiq_sp_el0
aarch32_stub_fiq:
	.word	EMIT_BPIALL
	.word	EMIT_MOV_R0_IMM(4)
	.word	EMIT_SMC
aarch32_stub_ctx_fiq:
	.quad	(SPSR_AIF_MASK << SPSR_AIF_SHIFT | \
	         SPSR_M_AARCH32 << SPSR_M_SHIFT | \
	         MODE32_svc << MODE32_SHIFT)
	.quad	workaround_bpiall_vbar1_runtime_exceptions
	.quad	SCTLR_AARCH32_EL1_RES1 | SCTLR_I_BIT
	.quad	aarch32_stub_fiq
	check_vector_size workaround_bpiall_vbar0_fiq_sp_el0

vector_entry workaround_bpiall_vbar0_serror_sp_el0
	b	serror_sp_el0
aarch32_stub_serror:
	.word	EMIT_BPIALL
	.word	EMIT_MOV_R0_IMM(8)
	.word	EMIT_SMC
aarch32_stub_ctx_serror:
	.quad	(SPSR_AIF_MASK << SPSR_AIF_SHIFT | \
	         SPSR_M_AARCH32 << SPSR_M_SHIFT | \
	         MODE32_svc << MODE32_SHIFT)
	.quad	workaround_bpiall_vbar1_runtime_exceptions
	.quad	SCTLR_AARCH32_EL1_RES1 | SCTLR_I_BIT
	.quad	aarch32_stub_serror
	check_vector_size workaround_bpiall_vbar0_serror_sp_el0

	/* ---------------------------------------------------------------------
	 * Current EL with SP_ELx: 0x200 - 0x400
	 * ---------------------------------------------------------------------
	 */
vector_entry workaround_bpiall_vbar0_sync_exception_sp_elx
	b	sync_exception_sp_elx
	check_vector_size workaround_bpiall_vbar0_sync_exception_sp_elx

vector_entry workaround_bpiall_vbar0_irq_sp_elx
	b	irq_sp_elx
	check_vector_size workaround_bpiall_vbar0_irq_sp_elx

vector_entry workaround_bpiall_vbar0_fiq_sp_elx
	b	fiq_sp_elx
	check_vector_size workaround_bpiall_vbar0_fiq_sp_elx

vector_entry workaround_bpiall_vbar0_serror_sp_elx
	b	serror_sp_elx
	check_vector_size workaround_bpiall_vbar0_serror_sp_elx

	/* ---------------------------------------------------------------------
	 * Lower EL using AArch64 : 0x400 - 0x600
	 * ---------------------------------------------------------------------
	 */
vector_entry workaround_bpiall_vbar0_sync_exception_aarch64
	enter_workaround aarch32_stub_ctx_smc
	check_vector_size workaround_bpiall_vbar0_sync_exception_aarch64

vector_entry workaround_bpiall_vbar0_irq_aarch64
	enter_workaround aarch32_stub_ctx_irq
	check_vector_size workaround_bpiall_vbar0_irq_aarch64

vector_entry workaround_bpiall_vbar0_fiq_aarch64
	enter_workaround aarch32_stub_ctx_fiq
	check_vector_size workaround_bpiall_vbar0_fiq_aarch64

vector_entry workaround_bpiall_vbar0_serror_aarch64
	enter_workaround aarch32_stub_ctx_serror
	check_vector_size workaround_bpiall_vbar0_serror_aarch64

	/* ---------------------------------------------------------------------
	 * Lower EL using AArch32 : 0x600 - 0x800
	 * ---------------------------------------------------------------------
	 */
vector_entry workaround_bpiall_vbar0_sync_exception_aarch32
	enter_workaround aarch32_stub_ctx_smc
	check_vector_size workaround_bpiall_vbar0_sync_exception_aarch32

vector_entry workaround_bpiall_vbar0_irq_aarch32
	enter_workaround aarch32_stub_ctx_irq
	check_vector_size workaround_bpiall_vbar0_irq_aarch32

vector_entry workaround_bpiall_vbar0_fiq_aarch32
	enter_workaround aarch32_stub_ctx_fiq
	check_vector_size workaround_bpiall_vbar0_fiq_aarch32

vector_entry workaround_bpiall_vbar0_serror_aarch32
	enter_workaround aarch32_stub_ctx_serror
	check_vector_size workaround_bpiall_vbar0_serror_aarch32

	/* ---------------------------------------------------------------------
	 * This vector table is used while the workaround is executing.  It
	 * installs a simple SMC handler to allow the Sync/IRQ/FIQ/SError
	 * workaround stubs to enter EL3 from S-EL1.  It restores the previous
	 * EL3 state before proceeding with the normal runtime exception vector.
	 * ---------------------------------------------------------------------
	 */
vector_base workaround_bpiall_vbar1_runtime_exceptions

	/* ---------------------------------------------------------------------
	 * Current EL with SP_EL0 : 0x0 - 0x200 (UNUSED)
	 * ---------------------------------------------------------------------
	 */
vector_entry workaround_bpiall_vbar1_sync_exception_sp_el0
	b	report_unhandled_exception
	check_vector_size workaround_bpiall_vbar1_sync_exception_sp_el0

vector_entry workaround_bpiall_vbar1_irq_sp_el0
	b	report_unhandled_interrupt
	check_vector_size workaround_bpiall_vbar1_irq_sp_el0

vector_entry workaround_bpiall_vbar1_fiq_sp_el0
	b	report_unhandled_interrupt
	check_vector_size workaround_bpiall_vbar1_fiq_sp_el0

vector_entry workaround_bpiall_vbar1_serror_sp_el0
	b	report_unhandled_exception
	check_vector_size workaround_bpiall_vbar1_serror_sp_el0

	/* ---------------------------------------------------------------------
	 * Current EL with SP_ELx: 0x200 - 0x400 (UNUSED)
	 * ---------------------------------------------------------------------
	 */
vector_entry workaround_bpiall_vbar1_sync_exception_sp_elx
	b	report_unhandled_exception
	check_vector_size workaround_bpiall_vbar1_sync_exception_sp_elx

vector_entry workaround_bpiall_vbar1_irq_sp_elx
	b	report_unhandled_interrupt
	check_vector_size workaround_bpiall_vbar1_irq_sp_elx

vector_entry workaround_bpiall_vbar1_fiq_sp_elx
	b	report_unhandled_interrupt
	check_vector_size workaround_bpiall_vbar1_fiq_sp_elx

vector_entry workaround_bpiall_vbar1_serror_sp_elx
	b	report_unhandled_exception
	check_vector_size workaround_bpiall_vbar1_serror_sp_elx

	/* ---------------------------------------------------------------------
	 * Lower EL using AArch64 : 0x400 - 0x600 (UNUSED)
	 * ---------------------------------------------------------------------
	 */
vector_entry workaround_bpiall_vbar1_sync_exception_aarch64
	b	report_unhandled_exception
	check_vector_size workaround_bpiall_vbar1_sync_exception_aarch64

vector_entry workaround_bpiall_vbar1_irq_aarch64
	b	report_unhandled_interrupt
	check_vector_size workaround_bpiall_vbar1_irq_aarch64

vector_entry workaround_bpiall_vbar1_fiq_aarch64
	b	report_unhandled_interrupt
	check_vector_size workaround_bpiall_vbar1_fiq_aarch64

vector_entry workaround_bpiall_vbar1_serror_aarch64
	b	report_unhandled_exception
	check_vector_size workaround_bpiall_vbar1_serror_aarch64

	/* ---------------------------------------------------------------------
	 * Lower EL using AArch32 : 0x600 - 0x800
	 * ---------------------------------------------------------------------
	 */
vector_entry workaround_bpiall_vbar1_sync_exception_aarch32
	/* Restore register state from the workaround context */
	ldp	x2, x3, [sp, #CTX_CVE_2017_5715_OFFSET + CTX_CVE_2017_5715_QUAD0]
	ldp	x4, x5, [sp, #CTX_CVE_2017_5715_OFFSET + CTX_CVE_2017_5715_QUAD2]
	ldp	x6, x30, [sp, #CTX_CVE_2017_5715_OFFSET + CTX_CVE_2017_5715_QUAD4]

	/* Apply the restored system register state */
	msr	scr_el3, x2
	msr	spsr_el3, x3
	msr	elr_el3, x4
	msr	sctlr_el1, x5
	msr	esr_el3, x6

	/*
	 * Workaround is complete, so swap VBAR_EL3 to point
	 * to workaround entry table in preparation for subsequent
	 * Sync/IRQ/FIQ/SError exceptions.
	 */
	adr	x2, workaround_bpiall_vbar0_runtime_exceptions
	msr	vbar_el3, x2

	/*
	 * Restore all GP regs except x0 and x1.  The value in x0
	 * indicates the type of the original exception.
	 */
	ldp	x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2]
	ldp	x4, x5, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X4]
	ldp	x6, x7, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X6]
	ldp	x8, x9, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X8]
	ldp	x10, x11, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X10]
	ldp	x12, x13, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X12]
	ldp	x14, x15, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X14]
	ldp	x16, x17, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X16]
	ldp	x18, x19, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X18]
	ldp	x20, x21, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X20]
	ldp	x22, x23, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X22]
	ldp	x24, x25, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X24]
	ldp	x26, x27, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X26]
	ldp	x28, x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X28]

	/*
	 * Each of these handlers will first restore x0 and x1 from
	 * the context and the branch to the common implementation for
	 * each of the exception types.
	 */
	tbnz	x0, #1, workaround_bpiall_vbar1_irq
	tbnz	x0, #2, workaround_bpiall_vbar1_fiq
	tbnz	x0, #3, workaround_bpiall_vbar1_serror

	/* Fallthrough case for Sync exception */
	ldp	x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0]
	b	sync_exception_aarch64
	check_vector_size workaround_bpiall_vbar1_sync_exception_aarch32

vector_entry workaround_bpiall_vbar1_irq_aarch32
	b	report_unhandled_interrupt
workaround_bpiall_vbar1_irq:
	ldp	x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0]
	b	irq_aarch64
	check_vector_size workaround_bpiall_vbar1_irq_aarch32

vector_entry workaround_bpiall_vbar1_fiq_aarch32
	b	report_unhandled_interrupt
workaround_bpiall_vbar1_fiq:
	ldp	x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0]
	b	fiq_aarch64
	check_vector_size workaround_bpiall_vbar1_fiq_aarch32

vector_entry workaround_bpiall_vbar1_serror_aarch32
	b	report_unhandled_exception
workaround_bpiall_vbar1_serror:
	ldp	x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0]
	b	serror_aarch64
	check_vector_size workaround_bpiall_vbar1_serror_aarch32