summaryrefslogtreecommitdiff
path: root/mm/kmsan/shadow.c
blob: b9d05aff313e21f0dd73d4b3d7b75d9be7aa60fc (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
// SPDX-License-Identifier: GPL-2.0
/*
 * KMSAN shadow implementation.
 *
 * Copyright (C) 2017-2022 Google LLC
 * Author: Alexander Potapenko <glider@google.com>
 *
 */

#include <asm/kmsan.h>
#include <asm/tlbflush.h>
#include <linux/cacheflush.h>
#include <linux/memblock.h>
#include <linux/mm_types.h>
#include <linux/slab.h>
#include <linux/smp.h>
#include <linux/stddef.h>

#include "../internal.h"
#include "kmsan.h"

#define shadow_page_for(page) ((page)->kmsan_shadow)

#define origin_page_for(page) ((page)->kmsan_origin)

static void *shadow_ptr_for(struct page *page)
{
	return page_address(shadow_page_for(page));
}

static void *origin_ptr_for(struct page *page)
{
	return page_address(origin_page_for(page));
}

static bool page_has_metadata(struct page *page)
{
	return shadow_page_for(page) && origin_page_for(page);
}

static void set_no_shadow_origin_page(struct page *page)
{
	shadow_page_for(page) = NULL;
	origin_page_for(page) = NULL;
}

/*
 * Dummy load and store pages to be used when the real metadata is unavailable.
 * There are separate pages for loads and stores, so that every load returns a
 * zero, and every store doesn't affect other loads.
 */
static char dummy_load_page[PAGE_SIZE] __aligned(PAGE_SIZE);
static char dummy_store_page[PAGE_SIZE] __aligned(PAGE_SIZE);

static unsigned long vmalloc_meta(void *addr, bool is_origin)
{
	unsigned long addr64 = (unsigned long)addr, off;

	KMSAN_WARN_ON(is_origin && !IS_ALIGNED(addr64, KMSAN_ORIGIN_SIZE));
	if (kmsan_internal_is_vmalloc_addr(addr)) {
		off = addr64 - VMALLOC_START;
		return off + (is_origin ? KMSAN_VMALLOC_ORIGIN_START :
					  KMSAN_VMALLOC_SHADOW_START);
	}
	if (kmsan_internal_is_module_addr(addr)) {
		off = addr64 - MODULES_VADDR;
		return off + (is_origin ? KMSAN_MODULES_ORIGIN_START :
					  KMSAN_MODULES_SHADOW_START);
	}
	return 0;
}

static struct page *virt_to_page_or_null(void *vaddr)
{
	if (kmsan_virt_addr_valid(vaddr))
		return virt_to_page(vaddr);
	else
		return NULL;
}

struct shadow_origin_ptr kmsan_get_shadow_origin_ptr(void *address, u64 size,
						     bool store)
{
	struct shadow_origin_ptr ret;
	void *shadow;

	/*
	 * Even if we redirect this memory access to the dummy page, it will
	 * go out of bounds.
	 */
	KMSAN_WARN_ON(size > PAGE_SIZE);

	if (!kmsan_enabled)
		goto return_dummy;

	KMSAN_WARN_ON(!kmsan_metadata_is_contiguous(address, size));
	shadow = kmsan_get_metadata(address, KMSAN_META_SHADOW);
	if (!shadow)
		goto return_dummy;

	ret.shadow = shadow;
	ret.origin = kmsan_get_metadata(address, KMSAN_META_ORIGIN);
	return ret;

return_dummy:
	if (store) {
		/* Ignore this store. */
		ret.shadow = dummy_store_page;
		ret.origin = dummy_store_page;
	} else {
		/* This load will return zero. */
		ret.shadow = dummy_load_page;
		ret.origin = dummy_load_page;
	}
	return ret;
}

/*
 * Obtain the shadow or origin pointer for the given address, or NULL if there's
 * none. The caller must check the return value for being non-NULL if needed.
 * The return value of this function should not depend on whether we're in the
 * runtime or not.
 */
void *kmsan_get_metadata(void *address, bool is_origin)
{
	u64 addr = (u64)address, pad, off;
	struct page *page;
	void *ret;

	if (is_origin && !IS_ALIGNED(addr, KMSAN_ORIGIN_SIZE)) {
		pad = addr % KMSAN_ORIGIN_SIZE;
		addr -= pad;
	}
	address = (void *)addr;
	if (kmsan_internal_is_vmalloc_addr(address) ||
	    kmsan_internal_is_module_addr(address))
		return (void *)vmalloc_meta(address, is_origin);

	ret = arch_kmsan_get_meta_or_null(address, is_origin);
	if (ret)
		return ret;

	page = virt_to_page_or_null(address);
	if (!page)
		return NULL;
	if (!page_has_metadata(page))
		return NULL;
	off = offset_in_page(addr);

	return (is_origin ? origin_ptr_for(page) : shadow_ptr_for(page)) + off;
}

void kmsan_copy_page_meta(struct page *dst, struct page *src)
{
	if (!kmsan_enabled || kmsan_in_runtime())
		return;
	if (!dst || !page_has_metadata(dst))
		return;
	if (!src || !page_has_metadata(src)) {
		kmsan_internal_unpoison_memory(page_address(dst), PAGE_SIZE,
					       /*checked*/ false);
		return;
	}

	kmsan_enter_runtime();
	__memcpy(shadow_ptr_for(dst), shadow_ptr_for(src), PAGE_SIZE);
	__memcpy(origin_ptr_for(dst), origin_ptr_for(src), PAGE_SIZE);
	kmsan_leave_runtime();
}
EXPORT_SYMBOL(kmsan_copy_page_meta);

void kmsan_alloc_page(struct page *page, unsigned int order, gfp_t flags)
{
	bool initialized = (flags & __GFP_ZERO) || !kmsan_enabled;
	struct page *shadow, *origin;
	depot_stack_handle_t handle;
	int pages = 1 << order;

	if (!page)
		return;

	shadow = shadow_page_for(page);
	origin = origin_page_for(page);

	if (initialized) {
		__memset(page_address(shadow), 0, PAGE_SIZE * pages);
		__memset(page_address(origin), 0, PAGE_SIZE * pages);
		return;
	}

	/* Zero pages allocated by the runtime should also be initialized. */
	if (kmsan_in_runtime())
		return;

	__memset(page_address(shadow), -1, PAGE_SIZE * pages);
	kmsan_enter_runtime();
	handle = kmsan_save_stack_with_flags(flags, /*extra_bits*/ 0);
	kmsan_leave_runtime();
	/*
	 * Addresses are page-aligned, pages are contiguous, so it's ok
	 * to just fill the origin pages with @handle.
	 */
	for (int i = 0; i < PAGE_SIZE * pages / sizeof(handle); i++)
		((depot_stack_handle_t *)page_address(origin))[i] = handle;
}

void kmsan_free_page(struct page *page, unsigned int order)
{
	if (!kmsan_enabled || kmsan_in_runtime())
		return;
	kmsan_enter_runtime();
	kmsan_internal_poison_memory(page_address(page),
				     page_size(page),
				     GFP_KERNEL,
				     KMSAN_POISON_CHECK | KMSAN_POISON_FREE);
	kmsan_leave_runtime();
}

int kmsan_vmap_pages_range_noflush(unsigned long start, unsigned long end,
				   pgprot_t prot, struct page **pages,
				   unsigned int page_shift)
{
	unsigned long shadow_start, origin_start, shadow_end, origin_end;
	struct page **s_pages, **o_pages;
	int nr, mapped, err = 0;

	if (!kmsan_enabled)
		return 0;

	shadow_start = vmalloc_meta((void *)start, KMSAN_META_SHADOW);
	shadow_end = vmalloc_meta((void *)end, KMSAN_META_SHADOW);
	if (!shadow_start)
		return 0;

	nr = (end - start) / PAGE_SIZE;
	s_pages = kcalloc(nr, sizeof(*s_pages), GFP_KERNEL);
	o_pages = kcalloc(nr, sizeof(*o_pages), GFP_KERNEL);
	if (!s_pages || !o_pages) {
		err = -ENOMEM;
		goto ret;
	}
	for (int i = 0; i < nr; i++) {
		s_pages[i] = shadow_page_for(pages[i]);
		o_pages[i] = origin_page_for(pages[i]);
	}
	prot = __pgprot(pgprot_val(prot) | _PAGE_NX);
	prot = PAGE_KERNEL;

	origin_start = vmalloc_meta((void *)start, KMSAN_META_ORIGIN);
	origin_end = vmalloc_meta((void *)end, KMSAN_META_ORIGIN);
	kmsan_enter_runtime();
	mapped = __vmap_pages_range_noflush(shadow_start, shadow_end, prot,
					    s_pages, page_shift);
	if (mapped) {
		err = mapped;
		goto ret;
	}
	mapped = __vmap_pages_range_noflush(origin_start, origin_end, prot,
					    o_pages, page_shift);
	if (mapped) {
		err = mapped;
		goto ret;
	}
	kmsan_leave_runtime();
	flush_tlb_kernel_range(shadow_start, shadow_end);
	flush_tlb_kernel_range(origin_start, origin_end);
	flush_cache_vmap(shadow_start, shadow_end);
	flush_cache_vmap(origin_start, origin_end);

ret:
	kfree(s_pages);
	kfree(o_pages);
	return err;
}

/* Allocate metadata for pages allocated at boot time. */
void __init kmsan_init_alloc_meta_for_range(void *start, void *end)
{
	struct page *shadow_p, *origin_p;
	void *shadow, *origin;
	struct page *page;
	u64 size;

	start = (void *)PAGE_ALIGN_DOWN((u64)start);
	size = PAGE_ALIGN((u64)end - (u64)start);
	shadow = memblock_alloc(size, PAGE_SIZE);
	origin = memblock_alloc(size, PAGE_SIZE);

	if (!shadow || !origin)
		panic("%s: Failed to allocate metadata memory for early boot range of size %llu",
		      __func__, size);

	for (u64 addr = 0; addr < size; addr += PAGE_SIZE) {
		page = virt_to_page_or_null((char *)start + addr);
		shadow_p = virt_to_page((char *)shadow + addr);
		set_no_shadow_origin_page(shadow_p);
		shadow_page_for(page) = shadow_p;
		origin_p = virt_to_page((char *)origin + addr);
		set_no_shadow_origin_page(origin_p);
		origin_page_for(page) = origin_p;
	}
}

void kmsan_setup_meta(struct page *page, struct page *shadow,
		      struct page *origin, int order)
{
	for (int i = 0; i < (1 << order); i++) {
		set_no_shadow_origin_page(&shadow[i]);
		set_no_shadow_origin_page(&origin[i]);
		shadow_page_for(&page[i]) = &shadow[i];
		origin_page_for(&page[i]) = &origin[i];
	}
}