summaryrefslogtreecommitdiff
path: root/mm/kmsan
diff options
context:
space:
mode:
Diffstat (limited to 'mm/kmsan')
-rw-r--r--mm/kmsan/core.c175
-rw-r--r--mm/kmsan/hooks.c108
-rw-r--r--mm/kmsan/init.c16
-rw-r--r--mm/kmsan/instrumentation.c19
-rw-r--r--mm/kmsan/kmsan.h40
-rw-r--r--mm/kmsan/kmsan_test.c175
-rw-r--r--mm/kmsan/report.c12
-rw-r--r--mm/kmsan/shadow.c33
8 files changed, 302 insertions, 276 deletions
diff --git a/mm/kmsan/core.c b/mm/kmsan/core.c
index 3adb4c1d3b19..90f427b95a21 100644
--- a/mm/kmsan/core.c
+++ b/mm/kmsan/core.c
@@ -33,7 +33,7 @@ bool kmsan_enabled __read_mostly;
/*
* Per-CPU KMSAN context to be used in interrupts, where current->kmsan is
- * unavaliable.
+ * unavailable.
*/
DEFINE_PER_CPU(struct kmsan_ctx, kmsan_percpu_ctx);
@@ -43,7 +43,6 @@ void kmsan_internal_task_create(struct task_struct *task)
struct thread_info *info = current_thread_info();
__memset(ctx, 0, sizeof(*ctx));
- ctx->allow_reporting = true;
kmsan_internal_unpoison_memory(info, sizeof(*info), false);
}
@@ -73,141 +72,73 @@ depot_stack_handle_t kmsan_save_stack_with_flags(gfp_t flags,
nr_entries = stack_trace_save(entries, KMSAN_STACK_DEPTH, 0);
- /* Don't sleep. */
- flags &= ~(__GFP_DIRECT_RECLAIM | __GFP_KSWAPD_RECLAIM);
-
- handle = __stack_depot_save(entries, nr_entries, flags, true);
+ handle = stack_depot_save(entries, nr_entries, flags);
return stack_depot_set_extra_bits(handle, extra);
}
/* Copy the metadata following the memmove() behavior. */
void kmsan_internal_memmove_metadata(void *dst, void *src, size_t n)
{
+ depot_stack_handle_t prev_old_origin = 0, prev_new_origin = 0;
+ int i, iter, step, src_off, dst_off, oiter_src, oiter_dst;
depot_stack_handle_t old_origin = 0, new_origin = 0;
- int src_slots, dst_slots, i, iter, step, skip_bits;
depot_stack_handle_t *origin_src, *origin_dst;
- void *shadow_src, *shadow_dst;
- u32 *align_shadow_src, shadow;
+ u8 *shadow_src, *shadow_dst;
+ u32 *align_shadow_dst;
bool backwards;
shadow_dst = kmsan_get_metadata(dst, KMSAN_META_SHADOW);
if (!shadow_dst)
return;
KMSAN_WARN_ON(!kmsan_metadata_is_contiguous(dst, n));
+ align_shadow_dst =
+ (u32 *)ALIGN_DOWN((u64)shadow_dst, KMSAN_ORIGIN_SIZE);
shadow_src = kmsan_get_metadata(src, KMSAN_META_SHADOW);
if (!shadow_src) {
- /*
- * @src is untracked: zero out destination shadow, ignore the
- * origins, we're done.
- */
- __memset(shadow_dst, 0, n);
+ /* @src is untracked: mark @dst as initialized. */
+ kmsan_internal_unpoison_memory(dst, n, /*checked*/ false);
return;
}
KMSAN_WARN_ON(!kmsan_metadata_is_contiguous(src, n));
- __memmove(shadow_dst, shadow_src, n);
-
origin_dst = kmsan_get_metadata(dst, KMSAN_META_ORIGIN);
origin_src = kmsan_get_metadata(src, KMSAN_META_ORIGIN);
KMSAN_WARN_ON(!origin_dst || !origin_src);
- src_slots = (ALIGN((u64)src + n, KMSAN_ORIGIN_SIZE) -
- ALIGN_DOWN((u64)src, KMSAN_ORIGIN_SIZE)) /
- KMSAN_ORIGIN_SIZE;
- dst_slots = (ALIGN((u64)dst + n, KMSAN_ORIGIN_SIZE) -
- ALIGN_DOWN((u64)dst, KMSAN_ORIGIN_SIZE)) /
- KMSAN_ORIGIN_SIZE;
- KMSAN_WARN_ON((src_slots < 1) || (dst_slots < 1));
- KMSAN_WARN_ON((src_slots - dst_slots > 1) ||
- (dst_slots - src_slots < -1));
backwards = dst > src;
- i = backwards ? min(src_slots, dst_slots) - 1 : 0;
- iter = backwards ? -1 : 1;
-
- align_shadow_src =
- (u32 *)ALIGN_DOWN((u64)shadow_src, KMSAN_ORIGIN_SIZE);
- for (step = 0; step < min(src_slots, dst_slots); step++, i += iter) {
- KMSAN_WARN_ON(i < 0);
- shadow = align_shadow_src[i];
- if (i == 0) {
- /*
- * If @src isn't aligned on KMSAN_ORIGIN_SIZE, don't
- * look at the first @src % KMSAN_ORIGIN_SIZE bytes
- * of the first shadow slot.
- */
- skip_bits = ((u64)src % KMSAN_ORIGIN_SIZE) * 8;
- shadow = (shadow >> skip_bits) << skip_bits;
- }
- if (i == src_slots - 1) {
- /*
- * If @src + n isn't aligned on
- * KMSAN_ORIGIN_SIZE, don't look at the last
- * (@src + n) % KMSAN_ORIGIN_SIZE bytes of the
- * last shadow slot.
- */
- skip_bits = (((u64)src + n) % KMSAN_ORIGIN_SIZE) * 8;
- shadow = (shadow << skip_bits) >> skip_bits;
+ step = backwards ? -1 : 1;
+ iter = backwards ? n - 1 : 0;
+ src_off = (u64)src % KMSAN_ORIGIN_SIZE;
+ dst_off = (u64)dst % KMSAN_ORIGIN_SIZE;
+
+ /* Copy shadow bytes one by one, updating the origins if necessary. */
+ for (i = 0; i < n; i++, iter += step) {
+ oiter_src = (iter + src_off) / KMSAN_ORIGIN_SIZE;
+ oiter_dst = (iter + dst_off) / KMSAN_ORIGIN_SIZE;
+ if (!shadow_src[iter]) {
+ shadow_dst[iter] = 0;
+ if (!align_shadow_dst[oiter_dst])
+ origin_dst[oiter_dst] = 0;
+ continue;
}
- /*
- * Overwrite the origin only if the corresponding
- * shadow is nonempty.
- */
- if (origin_src[i] && (origin_src[i] != old_origin) && shadow) {
- old_origin = origin_src[i];
- new_origin = kmsan_internal_chain_origin(old_origin);
+ shadow_dst[iter] = shadow_src[iter];
+ old_origin = origin_src[oiter_src];
+ if (old_origin == prev_old_origin)
+ new_origin = prev_new_origin;
+ else {
/*
* kmsan_internal_chain_origin() may return
* NULL, but we don't want to lose the previous
* origin value.
*/
+ new_origin = kmsan_internal_chain_origin(old_origin);
if (!new_origin)
new_origin = old_origin;
}
- if (shadow)
- origin_dst[i] = new_origin;
- else
- origin_dst[i] = 0;
- }
- /*
- * If dst_slots is greater than src_slots (i.e.
- * dst_slots == src_slots + 1), there is an extra origin slot at the
- * beginning or end of the destination buffer, for which we take the
- * origin from the previous slot.
- * This is only done if the part of the source shadow corresponding to
- * slot is non-zero.
- *
- * E.g. if we copy 8 aligned bytes that are marked as uninitialized
- * and have origins o111 and o222, to an unaligned buffer with offset 1,
- * these two origins are copied to three origin slots, so one of then
- * needs to be duplicated, depending on the copy direction (@backwards)
- *
- * src shadow: |uuuu|uuuu|....|
- * src origin: |o111|o222|....|
- *
- * backwards = 0:
- * dst shadow: |.uuu|uuuu|u...|
- * dst origin: |....|o111|o222| - fill the empty slot with o111
- * backwards = 1:
- * dst shadow: |.uuu|uuuu|u...|
- * dst origin: |o111|o222|....| - fill the empty slot with o222
- */
- if (src_slots < dst_slots) {
- if (backwards) {
- shadow = align_shadow_src[src_slots - 1];
- skip_bits = (((u64)dst + n) % KMSAN_ORIGIN_SIZE) * 8;
- shadow = (shadow << skip_bits) >> skip_bits;
- if (shadow)
- /* src_slots > 0, therefore dst_slots is at least 2 */
- origin_dst[dst_slots - 1] =
- origin_dst[dst_slots - 2];
- } else {
- shadow = align_shadow_src[0];
- skip_bits = ((u64)dst % KMSAN_ORIGIN_SIZE) * 8;
- shadow = (shadow >> skip_bits) << skip_bits;
- if (shadow)
- origin_dst[0] = origin_dst[1];
- }
+ origin_dst[oiter_dst] = new_origin;
+ prev_new_origin = new_origin;
+ prev_old_origin = old_origin;
}
}
@@ -225,8 +156,8 @@ depot_stack_handle_t kmsan_internal_chain_origin(depot_stack_handle_t id)
* Make sure we have enough spare bits in @id to hold the UAF bit and
* the chain depth.
*/
- BUILD_BUG_ON(
- (1 << STACK_DEPOT_EXTRA_BITS) <= (KMSAN_MAX_ORIGIN_DEPTH << 1));
+ BUILD_BUG_ON((1 << STACK_DEPOT_EXTRA_BITS) <=
+ (KMSAN_MAX_ORIGIN_DEPTH << 1));
extra_bits = stack_depot_get_extra_bits(id);
depth = kmsan_depth_from_eb(extra_bits);
@@ -250,11 +181,10 @@ depot_stack_handle_t kmsan_internal_chain_origin(depot_stack_handle_t id)
/*
* @entries is a local var in non-instrumented code, so KMSAN does not
* know it is initialized. Explicitly unpoison it to avoid false
- * positives when __stack_depot_save() passes it to instrumented code.
+ * positives when stack_depot_save() passes it to instrumented code.
*/
kmsan_internal_unpoison_memory(entries, sizeof(entries), false);
- handle = __stack_depot_save(entries, ARRAY_SIZE(entries), __GFP_HIGH,
- true);
+ handle = stack_depot_save(entries, ARRAY_SIZE(entries), __GFP_HIGH);
return stack_depot_set_extra_bits(handle, extra_bits);
}
@@ -263,7 +193,7 @@ void kmsan_internal_set_shadow_origin(void *addr, size_t size, int b,
{
u64 address = (u64)addr;
void *shadow_start;
- u32 *origin_start;
+ u32 *aligned_shadow, *origin_start;
size_t pad = 0;
KMSAN_WARN_ON(!kmsan_metadata_is_contiguous(addr, size));
@@ -282,17 +212,28 @@ void kmsan_internal_set_shadow_origin(void *addr, size_t size, int b,
}
__memset(shadow_start, b, size);
- if (!IS_ALIGNED(address, KMSAN_ORIGIN_SIZE)) {
+ if (IS_ALIGNED(address, KMSAN_ORIGIN_SIZE)) {
+ aligned_shadow = shadow_start;
+ } else {
pad = address % KMSAN_ORIGIN_SIZE;
address -= pad;
+ aligned_shadow = shadow_start - pad;
size += pad;
}
size = ALIGN(size, KMSAN_ORIGIN_SIZE);
origin_start =
(u32 *)kmsan_get_metadata((void *)address, KMSAN_META_ORIGIN);
- for (int i = 0; i < size / KMSAN_ORIGIN_SIZE; i++)
- origin_start[i] = origin;
+ /*
+ * If the new origin is non-zero, assume that the shadow byte is also non-zero,
+ * and unconditionally overwrite the old origin slot.
+ * If the new origin is zero, overwrite the old origin slot iff the
+ * corresponding shadow slot is zero.
+ */
+ for (int i = 0; i < size / KMSAN_ORIGIN_SIZE; i++) {
+ if (origin || !aligned_shadow[i])
+ origin_start[i] = origin;
+ }
}
struct page *kmsan_vmalloc_to_page_or_null(void *vaddr)
@@ -309,8 +250,8 @@ struct page *kmsan_vmalloc_to_page_or_null(void *vaddr)
return NULL;
}
-void kmsan_internal_check_memory(void *addr, size_t size, const void *user_addr,
- int reason)
+void kmsan_internal_check_memory(void *addr, size_t size,
+ const void __user *user_addr, int reason)
{
depot_stack_handle_t cur_origin = 0, new_origin = 0;
unsigned long addr64 = (unsigned long)addr;
@@ -334,11 +275,9 @@ void kmsan_internal_check_memory(void *addr, size_t size, const void *user_addr,
* bytes before, report them.
*/
if (cur_origin) {
- kmsan_enter_runtime();
kmsan_report(cur_origin, addr, size,
cur_off_start, pos - 1, user_addr,
reason);
- kmsan_leave_runtime();
}
cur_origin = 0;
cur_off_start = -1;
@@ -352,11 +291,9 @@ void kmsan_internal_check_memory(void *addr, size_t size, const void *user_addr,
* poisoned bytes before, report them.
*/
if (cur_origin) {
- kmsan_enter_runtime();
kmsan_report(cur_origin, addr, size,
cur_off_start, pos + i - 1,
user_addr, reason);
- kmsan_leave_runtime();
}
cur_origin = 0;
cur_off_start = -1;
@@ -372,11 +309,9 @@ void kmsan_internal_check_memory(void *addr, size_t size, const void *user_addr,
*/
if (cur_origin != new_origin) {
if (cur_origin) {
- kmsan_enter_runtime();
kmsan_report(cur_origin, addr, size,
cur_off_start, pos + i - 1,
user_addr, reason);
- kmsan_leave_runtime();
}
cur_origin = new_origin;
cur_off_start = pos + i;
@@ -386,10 +321,8 @@ void kmsan_internal_check_memory(void *addr, size_t size, const void *user_addr,
}
KMSAN_WARN_ON(pos != size);
if (cur_origin) {
- kmsan_enter_runtime();
kmsan_report(cur_origin, addr, size, cur_off_start, pos - 1,
user_addr, reason);
- kmsan_leave_runtime();
}
}
diff --git a/mm/kmsan/hooks.c b/mm/kmsan/hooks.c
index 5d6e2dee5692..8f22d1f22981 100644
--- a/mm/kmsan/hooks.c
+++ b/mm/kmsan/hooks.c
@@ -39,12 +39,10 @@ void kmsan_task_create(struct task_struct *task)
void kmsan_task_exit(struct task_struct *task)
{
- struct kmsan_ctx *ctx = &task->kmsan_ctx;
-
if (!kmsan_enabled || kmsan_in_runtime())
return;
- ctx->allow_reporting = false;
+ kmsan_disable_current();
}
void kmsan_slab_alloc(struct kmem_cache *s, void *object, gfp_t flags)
@@ -76,7 +74,7 @@ void kmsan_slab_free(struct kmem_cache *s, void *object)
return;
/* RCU slabs could be legally used after free within the RCU period */
- if (unlikely(s->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON)))
+ if (unlikely(s->flags & SLAB_TYPESAFE_BY_RCU))
return;
/*
* If there's a constructor, freed memory must remain in the same state
@@ -86,7 +84,8 @@ void kmsan_slab_free(struct kmem_cache *s, void *object)
if (s->ctor)
return;
kmsan_enter_runtime();
- kmsan_internal_poison_memory(object, s->object_size, GFP_KERNEL,
+ kmsan_internal_poison_memory(object, s->object_size,
+ GFP_KERNEL & ~(__GFP_RECLAIM),
KMSAN_POISON_CHECK | KMSAN_POISON_FREE);
kmsan_leave_runtime();
}
@@ -116,9 +115,8 @@ void kmsan_kfree_large(const void *ptr)
kmsan_enter_runtime();
page = virt_to_head_page((void *)ptr);
KMSAN_WARN_ON(ptr != page_address(page));
- kmsan_internal_poison_memory((void *)ptr,
- page_size(page),
- GFP_KERNEL,
+ kmsan_internal_poison_memory((void *)ptr, page_size(page),
+ GFP_KERNEL & ~(__GFP_RECLAIM),
KMSAN_POISON_CHECK | KMSAN_POISON_FREE);
kmsan_leave_runtime();
}
@@ -267,7 +265,8 @@ void kmsan_copy_to_user(void __user *to, const void *from, size_t to_copy,
return;
ua_flags = user_access_save();
- if ((u64)to < TASK_SIZE) {
+ if (!IS_ENABLED(CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE) ||
+ (u64)to < TASK_SIZE) {
/* This is a user memory access, check it. */
kmsan_internal_check_memory((void *)from, to_copy - left, to,
REASON_COPY_TO_USER);
@@ -278,13 +277,26 @@ void kmsan_copy_to_user(void __user *to, const void *from, size_t to_copy,
* Don't check anything, just copy the shadow of the copied
* bytes.
*/
+ kmsan_enter_runtime();
kmsan_internal_memmove_metadata((void *)to, (void *)from,
to_copy - left);
+ kmsan_leave_runtime();
}
user_access_restore(ua_flags);
}
EXPORT_SYMBOL(kmsan_copy_to_user);
+void kmsan_memmove(void *to, const void *from, size_t size)
+{
+ if (!kmsan_enabled || kmsan_in_runtime())
+ return;
+
+ kmsan_enter_runtime();
+ kmsan_internal_memmove_metadata(to, (void *)from, size);
+ kmsan_leave_runtime();
+}
+EXPORT_SYMBOL(kmsan_memmove);
+
/* Helper function to check an URB. */
void kmsan_handle_urb(const struct urb *urb, bool is_out)
{
@@ -293,7 +305,8 @@ void kmsan_handle_urb(const struct urb *urb, bool is_out)
if (is_out)
kmsan_internal_check_memory(urb->transfer_buffer,
urb->transfer_buffer_length,
- /*user_addr*/ 0, REASON_SUBMIT_URB);
+ /*user_addr*/ NULL,
+ REASON_SUBMIT_URB);
else
kmsan_internal_unpoison_memory(urb->transfer_buffer,
urb->transfer_buffer_length,
@@ -306,14 +319,14 @@ static void kmsan_handle_dma_page(const void *addr, size_t size,
{
switch (dir) {
case DMA_BIDIRECTIONAL:
- kmsan_internal_check_memory((void *)addr, size, /*user_addr*/ 0,
- REASON_ANY);
+ kmsan_internal_check_memory((void *)addr, size,
+ /*user_addr*/ NULL, REASON_ANY);
kmsan_internal_unpoison_memory((void *)addr, size,
/*checked*/ false);
break;
case DMA_TO_DEVICE:
- kmsan_internal_check_memory((void *)addr, size, /*user_addr*/ 0,
- REASON_ANY);
+ kmsan_internal_check_memory((void *)addr, size,
+ /*user_addr*/ NULL, REASON_ANY);
break;
case DMA_FROM_DEVICE:
kmsan_internal_unpoison_memory((void *)addr, size,
@@ -325,14 +338,15 @@ static void kmsan_handle_dma_page(const void *addr, size_t size,
}
/* Helper function to handle DMA data transfers. */
-void kmsan_handle_dma(struct page *page, size_t offset, size_t size,
+void kmsan_handle_dma(phys_addr_t phys, size_t size,
enum dma_data_direction dir)
{
- u64 page_offset, to_go, addr;
+ u64 page_offset, to_go;
+ void *addr;
- if (PageHighMem(page))
+ if (PhysHighMem(phys))
return;
- addr = (u64)page_address(page) + offset;
+ addr = phys_to_virt(phys);
/*
* The kernel may occasionally give us adjacent DMA pages not belonging
* to the same allocation. Process them separately to avoid triggering
@@ -346,6 +360,7 @@ void kmsan_handle_dma(struct page *page, size_t offset, size_t size,
size -= to_go;
}
}
+EXPORT_SYMBOL_GPL(kmsan_handle_dma);
void kmsan_handle_dma_sg(struct scatterlist *sg, int nents,
enum dma_data_direction dir)
@@ -354,11 +369,16 @@ void kmsan_handle_dma_sg(struct scatterlist *sg, int nents,
int i;
for_each_sg(sg, item, nents, i)
- kmsan_handle_dma(sg_page(item), item->offset, item->length,
- dir);
+ kmsan_handle_dma(sg_phys(item), item->length, dir);
}
/* Functions from kmsan-checks.h follow. */
+
+/*
+ * To create an origin, kmsan_poison_memory() unwinds the stacks and stores it
+ * into the stack depot. This may cause deadlocks if done from within KMSAN
+ * runtime, therefore we bail out if kmsan_in_runtime().
+ */
void kmsan_poison_memory(const void *address, size_t size, gfp_t flags)
{
if (!kmsan_enabled || kmsan_in_runtime())
@@ -371,54 +391,52 @@ void kmsan_poison_memory(const void *address, size_t size, gfp_t flags)
}
EXPORT_SYMBOL(kmsan_poison_memory);
+/*
+ * Unlike kmsan_poison_memory(), this function can be used from within KMSAN
+ * runtime, because it does not trigger allocations or call instrumented code.
+ */
void kmsan_unpoison_memory(const void *address, size_t size)
{
unsigned long ua_flags;
- if (!kmsan_enabled || kmsan_in_runtime())
+ if (!kmsan_enabled)
return;
ua_flags = user_access_save();
- kmsan_enter_runtime();
/* The users may want to poison/unpoison random memory. */
kmsan_internal_unpoison_memory((void *)address, size,
KMSAN_POISON_NOCHECK);
- kmsan_leave_runtime();
user_access_restore(ua_flags);
}
EXPORT_SYMBOL(kmsan_unpoison_memory);
/*
- * Version of kmsan_unpoison_memory() that can be called from within the KMSAN
- * runtime.
- *
- * Non-instrumented IRQ entry functions receive struct pt_regs from assembly
- * code. Those regs need to be unpoisoned, otherwise using them will result in
- * false positives.
- * Using kmsan_unpoison_memory() is not an option in entry code, because the
- * return value of in_task() is inconsistent - as a result, certain calls to
- * kmsan_unpoison_memory() are ignored. kmsan_unpoison_entry_regs() ensures that
- * the registers are unpoisoned even if kmsan_in_runtime() is true in the early
- * entry code.
+ * Version of kmsan_unpoison_memory() called from IRQ entry functions.
*/
void kmsan_unpoison_entry_regs(const struct pt_regs *regs)
{
- unsigned long ua_flags;
-
- if (!kmsan_enabled)
- return;
-
- ua_flags = user_access_save();
- kmsan_internal_unpoison_memory((void *)regs, sizeof(*regs),
- KMSAN_POISON_NOCHECK);
- user_access_restore(ua_flags);
+ kmsan_unpoison_memory((void *)regs, sizeof(*regs));
}
void kmsan_check_memory(const void *addr, size_t size)
{
if (!kmsan_enabled)
return;
- return kmsan_internal_check_memory((void *)addr, size, /*user_addr*/ 0,
- REASON_ANY);
+ return kmsan_internal_check_memory((void *)addr, size,
+ /*user_addr*/ NULL, REASON_ANY);
}
EXPORT_SYMBOL(kmsan_check_memory);
+
+void kmsan_enable_current(void)
+{
+ KMSAN_WARN_ON(current->kmsan_ctx.depth == 0);
+ current->kmsan_ctx.depth--;
+}
+EXPORT_SYMBOL(kmsan_enable_current);
+
+void kmsan_disable_current(void)
+{
+ current->kmsan_ctx.depth++;
+ KMSAN_WARN_ON(current->kmsan_ctx.depth == 0);
+}
+EXPORT_SYMBOL(kmsan_disable_current);
diff --git a/mm/kmsan/init.c b/mm/kmsan/init.c
index ffedf4dbc49d..b14ce3417e65 100644
--- a/mm/kmsan/init.c
+++ b/mm/kmsan/init.c
@@ -33,7 +33,9 @@ static void __init kmsan_record_future_shadow_range(void *start, void *end)
bool merged = false;
KMSAN_WARN_ON(future_index == NUM_FUTURE_RANGES);
- KMSAN_WARN_ON((nstart >= nend) || !nstart || !nend);
+ KMSAN_WARN_ON((nstart >= nend) ||
+ /* Virtual address 0 is valid on s390. */
+ (!IS_ENABLED(CONFIG_S390) && !nstart) || !nend);
nstart = ALIGN_DOWN(nstart, PAGE_SIZE);
nend = ALIGN(nend, PAGE_SIZE);
@@ -72,7 +74,7 @@ static void __init kmsan_record_future_shadow_range(void *start, void *end)
*/
void __init kmsan_init_shadow(void)
{
- const size_t nd_size = roundup(sizeof(pg_data_t), PAGE_SIZE);
+ const size_t nd_size = sizeof(pg_data_t);
phys_addr_t p_start, p_end;
u64 loop;
int nid;
@@ -96,7 +98,7 @@ void __init kmsan_init_shadow(void)
struct metadata_page_pair {
struct page *shadow, *origin;
};
-static struct metadata_page_pair held_back[MAX_ORDER + 1] __initdata;
+static struct metadata_page_pair held_back[NR_PAGE_ORDERS] __initdata;
/*
* Eager metadata allocation. When the memblock allocator is freeing pages to
@@ -141,7 +143,7 @@ struct smallstack {
static struct smallstack collect = {
.index = 0,
- .order = MAX_ORDER,
+ .order = MAX_PAGE_ORDER,
};
static void smallstack_push(struct smallstack *stack, struct page *pages)
@@ -172,7 +174,7 @@ static void do_collection(void)
shadow = smallstack_pop(&collect);
origin = smallstack_pop(&collect);
kmsan_setup_meta(page, shadow, origin, collect.order);
- __free_pages_core(page, collect.order);
+ __free_pages_core(page, collect.order, MEMINIT_EARLY);
}
}
@@ -211,8 +213,8 @@ static void kmsan_memblock_discard(void)
* order=N-1,
* - repeat.
*/
- collect.order = MAX_ORDER;
- for (int i = MAX_ORDER; i >= 0; i--) {
+ collect.order = MAX_PAGE_ORDER;
+ for (int i = MAX_PAGE_ORDER; i >= 0; i--) {
if (held_back[i].shadow)
smallstack_push(&collect, held_back[i].shadow);
if (held_back[i].origin)
diff --git a/mm/kmsan/instrumentation.c b/mm/kmsan/instrumentation.c
index cc3907a9c33a..69f0a57a401c 100644
--- a/mm/kmsan/instrumentation.c
+++ b/mm/kmsan/instrumentation.c
@@ -14,13 +14,15 @@
#include "kmsan.h"
#include <linux/gfp.h>
+#include <linux/kmsan.h>
#include <linux/kmsan_string.h>
#include <linux/mm.h>
#include <linux/uaccess.h>
static inline bool is_bad_asm_addr(void *addr, uintptr_t size, bool is_store)
{
- if ((u64)addr < TASK_SIZE)
+ if (IS_ENABLED(CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE) &&
+ (u64)addr < TASK_SIZE)
return true;
if (!kmsan_get_metadata(addr, KMSAN_META_SHADOW))
return true;
@@ -110,11 +112,10 @@ void __msan_instrument_asm_store(void *addr, uintptr_t size)
ua_flags = user_access_save();
/*
- * Most of the accesses are below 32 bytes. The two exceptions so far
- * are clwb() (64 bytes) and FPU state (512 bytes).
- * It's unlikely that the assembly will touch more than 512 bytes.
+ * Most of the accesses are below 32 bytes. The exceptions so far are
+ * clwb() (64 bytes), FPU state (512 bytes) and chsc() (4096 bytes).
*/
- if (size > 512) {
+ if (size > 4096) {
WARN_ONCE(1, "assembly store size too big: %ld\n", size);
size = 8;
}
@@ -311,13 +312,9 @@ EXPORT_SYMBOL(__msan_unpoison_alloca);
void __msan_warning(u32 origin);
void __msan_warning(u32 origin)
{
- if (!kmsan_enabled || kmsan_in_runtime())
- return;
- kmsan_enter_runtime();
- kmsan_report(origin, /*address*/ 0, /*size*/ 0,
- /*off_first*/ 0, /*off_last*/ 0, /*user_addr*/ 0,
+ kmsan_report(origin, /*address*/ NULL, /*size*/ 0,
+ /*off_first*/ 0, /*off_last*/ 0, /*user_addr*/ NULL,
REASON_ANY);
- kmsan_leave_runtime();
}
EXPORT_SYMBOL(__msan_warning);
diff --git a/mm/kmsan/kmsan.h b/mm/kmsan/kmsan.h
index a14744205435..bc3d1810f352 100644
--- a/mm/kmsan/kmsan.h
+++ b/mm/kmsan/kmsan.h
@@ -10,14 +10,15 @@
#ifndef __MM_KMSAN_KMSAN_H
#define __MM_KMSAN_KMSAN_H
-#include <asm/pgtable_64_types.h>
#include <linux/irqflags.h>
+#include <linux/kmsan.h>
+#include <linux/mm.h>
+#include <linux/nmi.h>
+#include <linux/pgtable.h>
+#include <linux/printk.h>
#include <linux/sched.h>
#include <linux/stackdepot.h>
#include <linux/stacktrace.h>
-#include <linux/nmi.h>
-#include <linux/mm.h>
-#include <linux/printk.h>
#define KMSAN_ALLOCA_MAGIC_ORIGIN 0xabcd0100
#define KMSAN_CHAIN_MAGIC_ORIGIN 0xabcd0200
@@ -34,29 +35,6 @@
#define KMSAN_META_SHADOW (false)
#define KMSAN_META_ORIGIN (true)
-extern bool kmsan_enabled;
-extern int panic_on_kmsan;
-
-/*
- * KMSAN performs a lot of consistency checks that are currently enabled by
- * default. BUG_ON is normally discouraged in the kernel, unless used for
- * debugging, but KMSAN itself is a debugging tool, so it makes little sense to
- * recover if something goes wrong.
- */
-#define KMSAN_WARN_ON(cond) \
- ({ \
- const bool __cond = WARN_ON(cond); \
- if (unlikely(__cond)) { \
- WRITE_ONCE(kmsan_enabled, false); \
- if (panic_on_kmsan) { \
- /* Can't call panic() here because */ \
- /* of uaccess checks. */ \
- BUG(); \
- } \
- } \
- __cond; \
- })
-
/*
* A pair of metadata pointers to be returned by the instrumentation functions.
*/
@@ -66,7 +44,6 @@ struct shadow_origin_ptr {
struct shadow_origin_ptr kmsan_get_shadow_origin_ptr(void *addr, u64 size,
bool store);
-void *kmsan_get_metadata(void *addr, bool is_origin);
void __init kmsan_init_alloc_meta_for_range(void *start, void *end);
enum kmsan_bug_reason {
@@ -96,7 +73,7 @@ void kmsan_print_origin(depot_stack_handle_t origin);
* @off_last corresponding to different @origin values.
*/
void kmsan_report(depot_stack_handle_t origin, void *address, int size,
- int off_first, int off_last, const void *user_addr,
+ int off_first, int off_last, const void __user *user_addr,
enum kmsan_bug_reason reason);
DECLARE_PER_CPU(struct kmsan_ctx, kmsan_percpu_ctx);
@@ -144,7 +121,6 @@ static __always_inline void kmsan_leave_runtime(void)
KMSAN_WARN_ON(--ctx->kmsan_in_runtime);
}
-depot_stack_handle_t kmsan_save_stack(void);
depot_stack_handle_t kmsan_save_stack_with_flags(gfp_t flags,
unsigned int extra_bits);
@@ -186,8 +162,8 @@ depot_stack_handle_t kmsan_internal_chain_origin(depot_stack_handle_t id);
void kmsan_internal_task_create(struct task_struct *task);
bool kmsan_metadata_is_contiguous(void *addr, size_t size);
-void kmsan_internal_check_memory(void *addr, size_t size, const void *user_addr,
- int reason);
+void kmsan_internal_check_memory(void *addr, size_t size,
+ const void __user *user_addr, int reason);
struct page *kmsan_vmalloc_to_page_or_null(void *vaddr);
void kmsan_setup_meta(struct page *page, struct page *shadow,
diff --git a/mm/kmsan/kmsan_test.c b/mm/kmsan/kmsan_test.c
index 312989aa2865..902ec48b1e3e 100644
--- a/mm/kmsan/kmsan_test.c
+++ b/mm/kmsan/kmsan_test.c
@@ -67,6 +67,17 @@ static bool report_available(void)
return READ_ONCE(observed.available);
}
+/* Reset observed.available, so that the test can trigger another report. */
+static void report_reset(void)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&observed.lock, flags);
+ WRITE_ONCE(observed.available, false);
+ observed.ignore = false;
+ spin_unlock_irqrestore(&observed.lock, flags);
+}
+
/* Information we expect in a report. */
struct expect_report {
const char *error_type; /* Error type. */
@@ -407,33 +418,25 @@ static void test_printk(struct kunit *test)
KUNIT_EXPECT_TRUE(test, report_matches(&expect));
}
-/*
- * Prevent the compiler from optimizing @var away. Without this, Clang may
- * notice that @var is uninitialized and drop memcpy() calls that use it.
- *
- * There is OPTIMIZER_HIDE_VAR() in linux/compier.h that we cannot use here,
- * because it is implemented as inline assembly receiving @var as a parameter
- * and will enforce a KMSAN check. Same is true for e.g. barrier_data(var).
- */
-#define DO_NOT_OPTIMIZE(var) barrier()
+/* Prevent the compiler from inlining a memcpy() call. */
+static noinline void *memcpy_noinline(volatile void *dst,
+ const volatile void *src, size_t size)
+{
+ return memcpy((void *)dst, (const void *)src, size);
+}
-/*
- * Test case: ensure that memcpy() correctly copies initialized values.
- * Also serves as a regression test to ensure DO_NOT_OPTIMIZE() does not cause
- * extra checks.
- */
+/* Test case: ensure that memcpy() correctly copies initialized values. */
static void test_init_memcpy(struct kunit *test)
{
EXPECTATION_NO_REPORT(expect);
- volatile int src;
- volatile int dst = 0;
+ volatile long long src;
+ volatile long long dst = 0;
- DO_NOT_OPTIMIZE(src);
src = 1;
kunit_info(
test,
"memcpy()ing aligned initialized src to aligned dst (no reports)\n");
- memcpy((void *)&dst, (void *)&src, sizeof(src));
+ memcpy_noinline((void *)&dst, (void *)&src, sizeof(src));
kmsan_check_memory((void *)&dst, sizeof(dst));
KUNIT_EXPECT_TRUE(test, report_matches(&expect));
}
@@ -451,8 +454,7 @@ static void test_memcpy_aligned_to_aligned(struct kunit *test)
kunit_info(
test,
"memcpy()ing aligned uninit src to aligned dst (UMR report)\n");
- DO_NOT_OPTIMIZE(uninit_src);
- memcpy((void *)&dst, (void *)&uninit_src, sizeof(uninit_src));
+ memcpy_noinline((void *)&dst, (void *)&uninit_src, sizeof(uninit_src));
kmsan_check_memory((void *)&dst, sizeof(dst));
KUNIT_EXPECT_TRUE(test, report_matches(&expect));
}
@@ -463,7 +465,7 @@ static void test_memcpy_aligned_to_aligned(struct kunit *test)
*
* Copying aligned 4-byte value to an unaligned one leads to touching two
* aligned 4-byte values. This test case checks that KMSAN correctly reports an
- * error on the first of the two values.
+ * error on the mentioned two values.
*/
static void test_memcpy_aligned_to_unaligned(struct kunit *test)
{
@@ -474,33 +476,65 @@ static void test_memcpy_aligned_to_unaligned(struct kunit *test)
kunit_info(
test,
"memcpy()ing aligned uninit src to unaligned dst (UMR report)\n");
- DO_NOT_OPTIMIZE(uninit_src);
- memcpy((void *)&dst[1], (void *)&uninit_src, sizeof(uninit_src));
+ kmsan_check_memory((void *)&uninit_src, sizeof(uninit_src));
+ memcpy_noinline((void *)&dst[1], (void *)&uninit_src,
+ sizeof(uninit_src));
kmsan_check_memory((void *)dst, 4);
KUNIT_EXPECT_TRUE(test, report_matches(&expect));
+ report_reset();
+ kmsan_check_memory((void *)&dst[4], sizeof(uninit_src));
+ KUNIT_EXPECT_TRUE(test, report_matches(&expect));
}
/*
- * Test case: ensure that memcpy() correctly copies uninitialized values between
- * aligned `src` and unaligned `dst`.
+ * Test case: ensure that origin slots do not accidentally get overwritten with
+ * zeroes during memcpy().
*
- * Copying aligned 4-byte value to an unaligned one leads to touching two
- * aligned 4-byte values. This test case checks that KMSAN correctly reports an
- * error on the second of the two values.
+ * Previously, when copying memory from an aligned buffer to an unaligned one,
+ * if there were zero origins corresponding to zero shadow values in the source
+ * buffer, they could have ended up being copied to nonzero shadow values in the
+ * destination buffer:
+ *
+ * memcpy(0xffff888080a00000, 0xffff888080900002, 8)
+ *
+ * src (0xffff888080900002): ..xx .... xx..
+ * src origins: o111 0000 o222
+ * dst (0xffff888080a00000): xx.. ..xx
+ * dst origins: o111 0000
+ * (or 0000 o222)
+ *
+ * (here . stands for an initialized byte, and x for an uninitialized one.
+ *
+ * Ensure that this does not happen anymore, and for both destination bytes
+ * the origin is nonzero (i.e. KMSAN reports an error).
*/
-static void test_memcpy_aligned_to_unaligned2(struct kunit *test)
+static void test_memcpy_initialized_gap(struct kunit *test)
{
- EXPECTATION_UNINIT_VALUE_FN(expect,
- "test_memcpy_aligned_to_unaligned2");
- volatile int uninit_src;
+ EXPECTATION_UNINIT_VALUE_FN(expect, "test_memcpy_initialized_gap");
+ volatile char uninit_src[12];
volatile char dst[8] = { 0 };
kunit_info(
test,
- "memcpy()ing aligned uninit src to unaligned dst - part 2 (UMR report)\n");
- DO_NOT_OPTIMIZE(uninit_src);
- memcpy((void *)&dst[1], (void *)&uninit_src, sizeof(uninit_src));
- kmsan_check_memory((void *)&dst[4], sizeof(uninit_src));
+ "unaligned 4-byte initialized value gets a nonzero origin after memcpy() - (2 UMR reports)\n");
+
+ uninit_src[0] = 42;
+ uninit_src[1] = 42;
+ uninit_src[4] = 42;
+ uninit_src[5] = 42;
+ uninit_src[6] = 42;
+ uninit_src[7] = 42;
+ uninit_src[10] = 42;
+ uninit_src[11] = 42;
+ memcpy_noinline((void *)&dst[0], (void *)&uninit_src[2], 8);
+
+ kmsan_check_memory((void *)&dst[0], 4);
+ KUNIT_EXPECT_TRUE(test, report_matches(&expect));
+ report_reset();
+ kmsan_check_memory((void *)&dst[2], 4);
+ KUNIT_EXPECT_FALSE(test, report_matches(&expect));
+ report_reset();
+ kmsan_check_memory((void *)&dst[4], 4);
KUNIT_EXPECT_TRUE(test, report_matches(&expect));
}
@@ -513,7 +547,6 @@ static void test_memcpy_aligned_to_unaligned2(struct kunit *test)
\
kunit_info(test, \
"memset" #size "() should initialize memory\n"); \
- DO_NOT_OPTIMIZE(uninit); \
memset##size((uint##size##_t *)&uninit, 0, 1); \
kmsan_check_memory((void *)&uninit, sizeof(uninit)); \
KUNIT_EXPECT_TRUE(test, report_matches(&expect)); \
@@ -523,6 +556,21 @@ DEFINE_TEST_MEMSETXX(16)
DEFINE_TEST_MEMSETXX(32)
DEFINE_TEST_MEMSETXX(64)
+/* Test case: ensure that KMSAN does not access shadow memory out of bounds. */
+static void test_memset_on_guarded_buffer(struct kunit *test)
+{
+ void *buf = vmalloc(PAGE_SIZE);
+
+ kunit_info(test,
+ "memset() on ends of guarded buffer should not crash\n");
+
+ for (size_t size = 0; size <= 128; size++) {
+ memset(buf, 0xff, size);
+ memset(buf + PAGE_SIZE - size, 0xff, size);
+ }
+ vfree(buf);
+}
+
static noinline void fibonacci(int *array, int size, int start)
{
if (start < 2 || (start == size))
@@ -581,6 +629,48 @@ static void test_stackdepot_roundtrip(struct kunit *test)
KUNIT_EXPECT_TRUE(test, report_matches(&expect));
}
+/*
+ * Test case: ensure that kmsan_unpoison_memory() and the instrumentation work
+ * the same.
+ */
+static void test_unpoison_memory(struct kunit *test)
+{
+ EXPECTATION_UNINIT_VALUE_FN(expect, "test_unpoison_memory");
+ volatile char a[4], b[4];
+
+ kunit_info(
+ test,
+ "unpoisoning via the instrumentation vs. kmsan_unpoison_memory() (2 UMR reports)\n");
+
+ /* Initialize a[0] and check a[1]--a[3]. */
+ a[0] = 0;
+ kmsan_check_memory((char *)&a[1], 3);
+ KUNIT_EXPECT_TRUE(test, report_matches(&expect));
+
+ report_reset();
+
+ /* Initialize b[0] and check b[1]--b[3]. */
+ kmsan_unpoison_memory((char *)&b[0], 1);
+ kmsan_check_memory((char *)&b[1], 3);
+ KUNIT_EXPECT_TRUE(test, report_matches(&expect));
+}
+
+static void test_copy_from_kernel_nofault(struct kunit *test)
+{
+ long ret;
+ char buf[4], src[4];
+ size_t size = sizeof(buf);
+
+ EXPECTATION_UNINIT_VALUE_FN(expect, "copy_from_kernel_nofault");
+ kunit_info(
+ test,
+ "testing copy_from_kernel_nofault with uninitialized memory\n");
+
+ ret = copy_from_kernel_nofault((char *)&buf[0], (char *)&src[0], size);
+ USE(ret);
+ KUNIT_EXPECT_TRUE(test, report_matches(&expect));
+}
+
static struct kunit_case kmsan_test_cases[] = {
KUNIT_CASE(test_uninit_kmalloc),
KUNIT_CASE(test_init_kmalloc),
@@ -598,12 +688,15 @@ static struct kunit_case kmsan_test_cases[] = {
KUNIT_CASE(test_init_memcpy),
KUNIT_CASE(test_memcpy_aligned_to_aligned),
KUNIT_CASE(test_memcpy_aligned_to_unaligned),
- KUNIT_CASE(test_memcpy_aligned_to_unaligned2),
+ KUNIT_CASE(test_memcpy_initialized_gap),
KUNIT_CASE(test_memset16),
KUNIT_CASE(test_memset32),
KUNIT_CASE(test_memset64),
+ KUNIT_CASE(test_memset_on_guarded_buffer),
KUNIT_CASE(test_long_origin_chain),
KUNIT_CASE(test_stackdepot_roundtrip),
+ KUNIT_CASE(test_unpoison_memory),
+ KUNIT_CASE(test_copy_from_kernel_nofault),
{},
};
@@ -626,9 +719,13 @@ static void test_exit(struct kunit *test)
{
}
+static int orig_panic_on_kmsan;
+
static int kmsan_suite_init(struct kunit_suite *suite)
{
register_trace_console(probe_console, NULL);
+ orig_panic_on_kmsan = panic_on_kmsan;
+ panic_on_kmsan = 0;
return 0;
}
@@ -636,6 +733,7 @@ static void kmsan_suite_exit(struct kunit_suite *suite)
{
unregister_trace_console(probe_console, NULL);
tracepoint_synchronize_unregister();
+ panic_on_kmsan = orig_panic_on_kmsan;
}
static struct kunit_suite kmsan_test_suite = {
@@ -650,3 +748,4 @@ kunit_test_suites(&kmsan_test_suite);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Alexander Potapenko <glider@google.com>");
+MODULE_DESCRIPTION("Test cases for KMSAN");
diff --git a/mm/kmsan/report.c b/mm/kmsan/report.c
index 02736ec757f2..d6853ce08954 100644
--- a/mm/kmsan/report.c
+++ b/mm/kmsan/report.c
@@ -8,6 +8,7 @@
*/
#include <linux/console.h>
+#include <linux/kmsan.h>
#include <linux/moduleparam.h>
#include <linux/stackdepot.h>
#include <linux/stacktrace.h>
@@ -20,6 +21,7 @@ static DEFINE_RAW_SPINLOCK(kmsan_report_lock);
/* Protected by kmsan_report_lock */
static char report_local_descr[DESCR_SIZE];
int panic_on_kmsan __read_mostly;
+EXPORT_SYMBOL_GPL(panic_on_kmsan);
#ifdef MODULE_PARAM_PREFIX
#undef MODULE_PARAM_PREFIX
@@ -146,7 +148,7 @@ void kmsan_print_origin(depot_stack_handle_t origin)
}
void kmsan_report(depot_stack_handle_t origin, void *address, int size,
- int off_first, int off_last, const void *user_addr,
+ int off_first, int off_last, const void __user *user_addr,
enum kmsan_bug_reason reason)
{
unsigned long stack_entries[KMSAN_STACK_DEPTH];
@@ -155,14 +157,14 @@ void kmsan_report(depot_stack_handle_t origin, void *address, int size,
unsigned long ua_flags;
bool is_uaf;
- if (!kmsan_enabled)
+ if (!kmsan_enabled || kmsan_in_runtime())
return;
- if (!current->kmsan_ctx.allow_reporting)
+ if (current->kmsan_ctx.depth)
return;
if (!origin)
return;
- current->kmsan_ctx.allow_reporting = false;
+ kmsan_enter_runtime();
ua_flags = user_access_save();
raw_spin_lock(&kmsan_report_lock);
pr_err("=====================================================\n");
@@ -215,5 +217,5 @@ void kmsan_report(depot_stack_handle_t origin, void *address, int size,
if (panic_on_kmsan)
panic("kmsan.panic set ...\n");
user_access_restore(ua_flags);
- current->kmsan_ctx.allow_reporting = true;
+ kmsan_leave_runtime();
}
diff --git a/mm/kmsan/shadow.c b/mm/kmsan/shadow.c
index 87318f9170f1..e7f554a31bb4 100644
--- a/mm/kmsan/shadow.c
+++ b/mm/kmsan/shadow.c
@@ -123,14 +123,12 @@ return_dummy:
*/
void *kmsan_get_metadata(void *address, bool is_origin)
{
- u64 addr = (u64)address, pad, off;
+ u64 addr = (u64)address, off;
struct page *page;
void *ret;
- if (is_origin && !IS_ALIGNED(addr, KMSAN_ORIGIN_SIZE)) {
- pad = addr % KMSAN_ORIGIN_SIZE;
- addr -= pad;
- }
+ if (is_origin)
+ addr = ALIGN_DOWN(addr, KMSAN_ORIGIN_SIZE);
address = (void *)addr;
if (kmsan_internal_is_vmalloc_addr(address) ||
kmsan_internal_is_module_addr(address))
@@ -209,16 +207,15 @@ void kmsan_free_page(struct page *page, unsigned int order)
if (!kmsan_enabled || kmsan_in_runtime())
return;
kmsan_enter_runtime();
- kmsan_internal_poison_memory(page_address(page),
- page_size(page),
- GFP_KERNEL,
+ kmsan_internal_poison_memory(page_address(page), page_size(page),
+ GFP_KERNEL & ~(__GFP_RECLAIM),
KMSAN_POISON_CHECK | KMSAN_POISON_FREE);
kmsan_leave_runtime();
}
int kmsan_vmap_pages_range_noflush(unsigned long start, unsigned long end,
pgprot_t prot, struct page **pages,
- unsigned int page_shift)
+ unsigned int page_shift, gfp_t gfp_mask)
{
unsigned long shadow_start, origin_start, shadow_end, origin_end;
struct page **s_pages, **o_pages;
@@ -233,8 +230,8 @@ int kmsan_vmap_pages_range_noflush(unsigned long start, unsigned long end,
return 0;
nr = (end - start) / PAGE_SIZE;
- s_pages = kcalloc(nr, sizeof(*s_pages), GFP_KERNEL);
- o_pages = kcalloc(nr, sizeof(*o_pages), GFP_KERNEL);
+ s_pages = kcalloc(nr, sizeof(*s_pages), gfp_mask);
+ o_pages = kcalloc(nr, sizeof(*o_pages), gfp_mask);
if (!s_pages || !o_pages) {
err = -ENOMEM;
goto ret;
@@ -243,7 +240,6 @@ int kmsan_vmap_pages_range_noflush(unsigned long start, unsigned long end,
s_pages[i] = shadow_page_for(pages[i]);
o_pages[i] = origin_page_for(pages[i]);
}
- prot = __pgprot(pgprot_val(prot) | _PAGE_NX);
prot = PAGE_KERNEL;
origin_start = vmalloc_meta((void *)start, KMSAN_META_ORIGIN);
@@ -251,17 +247,19 @@ int kmsan_vmap_pages_range_noflush(unsigned long start, unsigned long end,
kmsan_enter_runtime();
mapped = __vmap_pages_range_noflush(shadow_start, shadow_end, prot,
s_pages, page_shift);
+ kmsan_leave_runtime();
if (mapped) {
err = mapped;
goto ret;
}
+ kmsan_enter_runtime();
mapped = __vmap_pages_range_noflush(origin_start, origin_end, prot,
o_pages, page_shift);
+ kmsan_leave_runtime();
if (mapped) {
err = mapped;
goto ret;
}
- kmsan_leave_runtime();
flush_tlb_kernel_range(shadow_start, shadow_end);
flush_tlb_kernel_range(origin_start, origin_end);
flush_cache_vmap(shadow_start, shadow_end);
@@ -283,14 +281,15 @@ void __init kmsan_init_alloc_meta_for_range(void *start, void *end)
start = (void *)PAGE_ALIGN_DOWN((u64)start);
size = PAGE_ALIGN((u64)end - (u64)start);
- shadow = memblock_alloc(size, PAGE_SIZE);
- origin = memblock_alloc(size, PAGE_SIZE);
+ shadow = memblock_alloc_or_panic(size, PAGE_SIZE);
+ origin = memblock_alloc_or_panic(size, PAGE_SIZE);
+
for (u64 addr = 0; addr < size; addr += PAGE_SIZE) {
page = virt_to_page_or_null((char *)start + addr);
- shadow_p = virt_to_page_or_null((char *)shadow + addr);
+ shadow_p = virt_to_page((char *)shadow + addr);
set_no_shadow_origin_page(shadow_p);
shadow_page_for(page) = shadow_p;
- origin_p = virt_to_page_or_null((char *)origin + addr);
+ origin_p = virt_to_page((char *)origin + addr);
set_no_shadow_origin_page(origin_p);
origin_page_for(page) = origin_p;
}