summaryrefslogtreecommitdiff
path: root/arch/s390/mm/kasan_init.c
blob: 9d4f3138b0e7a259c3a47d2cbe173c051696c7e7 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
// SPDX-License-Identifier: GPL-2.0
#include <linux/kasan.h>
#include <linux/sched/task.h>
#include <linux/pgtable.h>
#include <asm/pgalloc.h>
#include <asm/kasan.h>
#include <asm/mem_detect.h>
#include <asm/processor.h>
#include <asm/sclp.h>
#include <asm/facility.h>
#include <asm/sections.h>
#include <asm/setup.h>
#include <asm/uv.h>

static unsigned long segment_pos __initdata;
static unsigned long segment_low __initdata;
static bool has_edat __initdata;
static bool has_nx __initdata;

#define __sha(x) ((unsigned long)kasan_mem_to_shadow((void *)x))

static void __init kasan_early_panic(const char *reason)
{
	sclp_early_printk("The Linux kernel failed to boot with the KernelAddressSanitizer:\n");
	sclp_early_printk(reason);
	disabled_wait();
}

static void * __init kasan_early_alloc_segment(void)
{
	segment_pos -= _SEGMENT_SIZE;

	if (segment_pos < segment_low)
		kasan_early_panic("out of memory during initialisation\n");

	return __va(segment_pos);
}

static void * __init kasan_early_alloc_pages(unsigned int order)
{
	pgalloc_pos -= (PAGE_SIZE << order);

	if (pgalloc_pos < pgalloc_low)
		kasan_early_panic("out of memory during initialisation\n");

	return __va(pgalloc_pos);
}

static void * __init kasan_early_crst_alloc(unsigned long val)
{
	unsigned long *table;

	table = kasan_early_alloc_pages(CRST_ALLOC_ORDER);
	if (table)
		crst_table_init(table, val);
	return table;
}

static pte_t * __init kasan_early_pte_alloc(void)
{
	static void *pte_leftover;
	pte_t *pte;

	BUILD_BUG_ON(_PAGE_TABLE_SIZE * 2 != PAGE_SIZE);

	if (!pte_leftover) {
		pte_leftover = kasan_early_alloc_pages(0);
		pte = pte_leftover + _PAGE_TABLE_SIZE;
	} else {
		pte = pte_leftover;
		pte_leftover = NULL;
	}
	memset64((u64 *)pte, _PAGE_INVALID, PTRS_PER_PTE);
	return pte;
}

enum populate_mode {
	POPULATE_MAP,
	POPULATE_ZERO_SHADOW,
	POPULATE_SHALLOW
};

static inline pgprot_t pgprot_clear_bit(pgprot_t pgprot, unsigned long bit)
{
	return __pgprot(pgprot_val(pgprot) & ~bit);
}

static void __init kasan_early_pgtable_populate(unsigned long address,
						unsigned long end,
						enum populate_mode mode)
{
	pgprot_t pgt_prot_zero = PAGE_KERNEL_RO;
	pgprot_t pgt_prot = PAGE_KERNEL;
	pgprot_t sgt_prot = SEGMENT_KERNEL;
	pgd_t *pg_dir;
	p4d_t *p4_dir;
	pud_t *pu_dir;
	pmd_t *pm_dir;
	pte_t *pt_dir;
	pmd_t pmd;
	pte_t pte;

	if (!has_nx) {
		pgt_prot_zero = pgprot_clear_bit(pgt_prot_zero, _PAGE_NOEXEC);
		pgt_prot = pgprot_clear_bit(pgt_prot, _PAGE_NOEXEC);
		sgt_prot = pgprot_clear_bit(sgt_prot, _SEGMENT_ENTRY_NOEXEC);
	}

	while (address < end) {
		pg_dir = pgd_offset_k(address);
		if (pgd_none(*pg_dir)) {
			if (mode == POPULATE_ZERO_SHADOW &&
			    IS_ALIGNED(address, PGDIR_SIZE) &&
			    end - address >= PGDIR_SIZE) {
				pgd_populate(&init_mm, pg_dir,
						kasan_early_shadow_p4d);
				address = (address + PGDIR_SIZE) & PGDIR_MASK;
				continue;
			}
			p4_dir = kasan_early_crst_alloc(_REGION2_ENTRY_EMPTY);
			pgd_populate(&init_mm, pg_dir, p4_dir);
		}

		if (mode == POPULATE_SHALLOW) {
			address = (address + P4D_SIZE) & P4D_MASK;
			continue;
		}

		p4_dir = p4d_offset(pg_dir, address);
		if (p4d_none(*p4_dir)) {
			if (mode == POPULATE_ZERO_SHADOW &&
			    IS_ALIGNED(address, P4D_SIZE) &&
			    end - address >= P4D_SIZE) {
				p4d_populate(&init_mm, p4_dir,
						kasan_early_shadow_pud);
				address = (address + P4D_SIZE) & P4D_MASK;
				continue;
			}
			pu_dir = kasan_early_crst_alloc(_REGION3_ENTRY_EMPTY);
			p4d_populate(&init_mm, p4_dir, pu_dir);
		}

		pu_dir = pud_offset(p4_dir, address);
		if (pud_none(*pu_dir)) {
			if (mode == POPULATE_ZERO_SHADOW &&
			    IS_ALIGNED(address, PUD_SIZE) &&
			    end - address >= PUD_SIZE) {
				pud_populate(&init_mm, pu_dir,
						kasan_early_shadow_pmd);
				address = (address + PUD_SIZE) & PUD_MASK;
				continue;
			}
			pm_dir = kasan_early_crst_alloc(_SEGMENT_ENTRY_EMPTY);
			pud_populate(&init_mm, pu_dir, pm_dir);
		}

		pm_dir = pmd_offset(pu_dir, address);
		if (pmd_none(*pm_dir)) {
			if (IS_ALIGNED(address, PMD_SIZE) &&
			    end - address >= PMD_SIZE) {
				if (mode == POPULATE_ZERO_SHADOW) {
					pmd_populate(&init_mm, pm_dir, kasan_early_shadow_pte);
					address = (address + PMD_SIZE) & PMD_MASK;
					continue;
				} else if (has_edat) {
					void *page = kasan_early_alloc_segment();

					memset(page, 0, _SEGMENT_SIZE);
					pmd = __pmd(__pa(page));
					pmd = set_pmd_bit(pmd, sgt_prot);
					set_pmd(pm_dir, pmd);
					address = (address + PMD_SIZE) & PMD_MASK;
					continue;
				}
			}
			pt_dir = kasan_early_pte_alloc();
			pmd_populate(&init_mm, pm_dir, pt_dir);
		} else if (pmd_large(*pm_dir)) {
			address = (address + PMD_SIZE) & PMD_MASK;
			continue;
		}

		pt_dir = pte_offset_kernel(pm_dir, address);
		if (pte_none(*pt_dir)) {
			void *page;

			switch (mode) {
			case POPULATE_MAP:
				page = kasan_early_alloc_pages(0);
				memset(page, 0, PAGE_SIZE);
				pte = __pte(__pa(page));
				pte = set_pte_bit(pte, pgt_prot);
				set_pte(pt_dir, pte);
				break;
			case POPULATE_ZERO_SHADOW:
				page = kasan_early_shadow_page;
				pte = __pte(__pa(page));
				pte = set_pte_bit(pte, pgt_prot_zero);
				set_pte(pt_dir, pte);
				break;
			case POPULATE_SHALLOW:
				/* should never happen */
				break;
			}
		}
		address += PAGE_SIZE;
	}
}

static void __init kasan_early_detect_facilities(void)
{
	if (test_facility(8)) {
		has_edat = true;
		__ctl_set_bit(0, 23);
	}
	if (!noexec_disabled && test_facility(130)) {
		has_nx = true;
		__ctl_set_bit(0, 20);
	}
}

void __init kasan_early_init(void)
{
	pte_t pte_z = __pte(__pa(kasan_early_shadow_page) | pgprot_val(PAGE_KERNEL_RO));
	pmd_t pmd_z = __pmd(__pa(kasan_early_shadow_pte) | _SEGMENT_ENTRY);
	pud_t pud_z = __pud(__pa(kasan_early_shadow_pmd) | _REGION3_ENTRY);
	p4d_t p4d_z = __p4d(__pa(kasan_early_shadow_pud) | _REGION2_ENTRY);
	unsigned long untracked_end = MODULES_VADDR;
	unsigned long shadow_alloc_size;
	unsigned long memsize;

	kasan_early_detect_facilities();
	if (!has_nx)
		pte_z = clear_pte_bit(pte_z, __pgprot(_PAGE_NOEXEC));

	memsize = get_mem_detect_end();
	if (!memsize)
		kasan_early_panic("cannot detect physical memory size\n");
	/*
	 * Kasan currently supports standby memory but only if it follows
	 * online memory (default allocation), i.e. no memory holes.
	 * - memsize represents end of online memory
	 * - ident_map_size represents online + standby and memory limits
	 *   accounted.
	 * Kasan maps "memsize" right away.
	 * [__sha(0), __sha(memsize)]	- shadow memory for identity mapping
	 * The rest [memsize, ident_map_size] if memsize < ident_map_size
	 * could be mapped/unmapped dynamically later during memory hotplug.
	 */
	memsize = min(memsize, ident_map_size);

	BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_START, P4D_SIZE));
	BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_END, P4D_SIZE));

	/* init kasan zero shadow */
	crst_table_init((unsigned long *)kasan_early_shadow_p4d, p4d_val(p4d_z));
	crst_table_init((unsigned long *)kasan_early_shadow_pud, pud_val(pud_z));
	crst_table_init((unsigned long *)kasan_early_shadow_pmd, pmd_val(pmd_z));
	memset64((u64 *)kasan_early_shadow_pte, pte_val(pte_z), PTRS_PER_PTE);

	shadow_alloc_size = memsize >> KASAN_SHADOW_SCALE_SHIFT;

	if (pgalloc_low + shadow_alloc_size > memsize)
		kasan_early_panic("out of memory during initialisation\n");

	if (has_edat) {
		segment_pos = round_down(pgalloc_pos, _SEGMENT_SIZE);
		segment_low = segment_pos - shadow_alloc_size;
		segment_low = round_down(segment_low, _SEGMENT_SIZE);
		pgalloc_pos = segment_low;
	}
	/*
	 * Current memory layout:
	 * +- 0 -------------+	       +- shadow start -+
	 * |1:1 ident mapping|	      /|1/8 of ident map|
	 * |		     |	     / |		|
	 * +-end of ident map+	    /  +----------------+
	 * | ... gap ...     |	   /   |    kasan	|
	 * |		     |	  /    |  zero page	|
	 * +- vmalloc area  -+	 /     |   mapping	|
	 * | vmalloc_size    |	/      | (untracked)	|
	 * +- modules vaddr -+ /       +----------------+
	 * | 2Gb	     |/        |    unmapped	| allocated per module
	 * +- shadow start  -+	       +----------------+
	 * | 1/8 addr space  |	       | zero pg mapping| (untracked)
	 * +- shadow end ----+---------+- shadow end ---+
	 *
	 * Current memory layout (KASAN_VMALLOC):
	 * +- 0 -------------+	       +- shadow start -+
	 * |1:1 ident mapping|	      /|1/8 of ident map|
	 * |		     |	     / |		|
	 * +-end of ident map+	    /  +----------------+
	 * | ... gap ...     |	   /   | kasan zero page| (untracked)
	 * |		     |	  /    | mapping	|
	 * +- vmalloc area  -+	 /     +----------------+
	 * | vmalloc_size    |	/      |shallow populate|
	 * +- modules vaddr -+ /       +----------------+
	 * | 2Gb	     |/        |shallow populate|
	 * +- shadow start  -+	       +----------------+
	 * | 1/8 addr space  |	       | zero pg mapping| (untracked)
	 * +- shadow end ----+---------+- shadow end ---+
	 */
	/* populate kasan shadow (for identity mapping and zero page mapping) */
	kasan_early_pgtable_populate(__sha(0), __sha(memsize), POPULATE_MAP);
	if (IS_ENABLED(CONFIG_KASAN_VMALLOC)) {
		untracked_end = VMALLOC_START;
		/* shallowly populate kasan shadow for vmalloc and modules */
		kasan_early_pgtable_populate(__sha(VMALLOC_START), __sha(MODULES_END),
					     POPULATE_SHALLOW);
	}
	/* populate kasan shadow for untracked memory */
	kasan_early_pgtable_populate(__sha(ident_map_size), __sha(untracked_end),
				     POPULATE_ZERO_SHADOW);
	kasan_early_pgtable_populate(__sha(MODULES_END), __sha(_REGION1_SIZE),
				     POPULATE_ZERO_SHADOW);
	/* enable kasan */
	init_task.kasan_depth = 0;
	sclp_early_printk("KernelAddressSanitizer initialized\n");
}