diff options
Diffstat (limited to 'mm/kasan/quarantine.c')
| -rw-r--r-- | mm/kasan/quarantine.c | 174 |
1 files changed, 130 insertions, 44 deletions
diff --git a/mm/kasan/quarantine.c b/mm/kasan/quarantine.c index 3a8ddf8baf7d..6958aa713c67 100644 --- a/mm/kasan/quarantine.c +++ b/mm/kasan/quarantine.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0 /* * KASAN quarantine. * @@ -5,18 +6,10 @@ * Copyright (C) 2016 Google, Inc. * * Based on code by Dmitry Chernenkov. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * version 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * */ +#define pr_fmt(fmt) "kasan: " fmt + #include <linux/gfp.h> #include <linux/hash.h> #include <linux/kernel.h> @@ -28,6 +21,7 @@ #include <linux/srcu.h> #include <linux/string.h> #include <linux/types.h> +#include <linux/cpuhotplug.h> #include "../slab.h" #include "kasan.h" @@ -35,13 +29,14 @@ /* Data structure and operations for quarantine queues. */ /* - * Each queue is a signle-linked list, which also stores the total size of + * Each queue is a single-linked list, which also stores the total size of * objects inside of it. */ struct qlist_head { struct qlist_node *head; struct qlist_node *tail; size_t bytes; + bool offline; }; #define QLIST_INIT { NULL, NULL, 0 } @@ -103,9 +98,18 @@ static int quarantine_head; static int quarantine_tail; /* Total size of all objects in global_quarantine across all batches. */ static unsigned long quarantine_size; -static DEFINE_SPINLOCK(quarantine_lock); +static DEFINE_RAW_SPINLOCK(quarantine_lock); DEFINE_STATIC_SRCU(remove_cache_srcu); +struct cpu_shrink_qlist { + raw_spinlock_t lock; + struct qlist_head qlist; +}; + +static DEFINE_PER_CPU(struct cpu_shrink_qlist, shrink_qlist) = { + .lock = __RAW_SPIN_LOCK_UNLOCKED(shrink_qlist.lock), +}; + /* Maximum size of the global queue. */ static unsigned long quarantine_max_size; @@ -124,7 +128,7 @@ static unsigned long quarantine_batch_size; static struct kmem_cache *qlink_to_cache(struct qlist_node *qlink) { - return virt_to_head_page(qlink)->slab_cache; + return virt_to_slab(qlink)->slab_cache; } static void *qlink_to_object(struct qlist_node *qlink, struct kmem_cache *cache) @@ -139,15 +143,24 @@ static void *qlink_to_object(struct qlist_node *qlink, struct kmem_cache *cache) static void qlink_free(struct qlist_node *qlink, struct kmem_cache *cache) { void *object = qlink_to_object(qlink, cache); - unsigned long flags; + struct kasan_free_meta *free_meta = kasan_get_free_meta(cache, object); - if (IS_ENABLED(CONFIG_SLAB)) - local_irq_save(flags); + /* + * Note: Keep per-object metadata to allow KASAN print stack traces for + * use-after-free-before-realloc bugs. + */ - ___cache_free(cache, object, _THIS_IP_); + /* + * If init_on_free is enabled and KASAN's free metadata is stored in + * the object, zero the metadata. Otherwise, the object's memory will + * not be properly zeroed, as KASAN saves the metadata after the slab + * allocator zeroes the object. + */ + if (slab_want_init_on_free(cache) && + cache->kasan_info.free_meta_offset == 0) + memzero_explicit(free_meta, sizeof(*free_meta)); - if (IS_ENABLED(CONFIG_SLAB)) - local_irq_restore(flags); + ___cache_free(cache, object, _THIS_IP_); } static void qlist_free_all(struct qlist_head *q, struct kmem_cache *cache) @@ -169,28 +182,40 @@ static void qlist_free_all(struct qlist_head *q, struct kmem_cache *cache) qlist_init(q); } -void quarantine_put(struct kasan_free_meta *info, struct kmem_cache *cache) +bool kasan_quarantine_put(struct kmem_cache *cache, void *object) { unsigned long flags; struct qlist_head *q; struct qlist_head temp = QLIST_INIT; + struct kasan_free_meta *meta = kasan_get_free_meta(cache, object); + + /* + * If there's no metadata for this object, don't put it into + * quarantine. + */ + if (!meta) + return false; /* * Note: irq must be disabled until after we move the batch to the - * global quarantine. Otherwise quarantine_remove_cache() can miss - * some objects belonging to the cache if they are in our local temp - * list. quarantine_remove_cache() executes on_each_cpu() at the - * beginning which ensures that it either sees the objects in per-cpu - * lists or in the global quarantine. + * global quarantine. Otherwise kasan_quarantine_remove_cache() can + * miss some objects belonging to the cache if they are in our local + * temp list. kasan_quarantine_remove_cache() executes on_each_cpu() + * at the beginning which ensures that it either sees the objects in + * per-cpu lists or in the global quarantine. */ local_irq_save(flags); q = this_cpu_ptr(&cpu_quarantine); - qlist_put(q, &info->quarantine_link, cache->size); + if (q->offline) { + local_irq_restore(flags); + return false; + } + qlist_put(q, &meta->quarantine_link, cache->size); if (unlikely(q->bytes > QUARANTINE_PERCPU_SIZE)) { qlist_move_all(q, &temp); - spin_lock(&quarantine_lock); + raw_spin_lock(&quarantine_lock); WRITE_ONCE(quarantine_size, quarantine_size + temp.bytes); qlist_move_all(&temp, &global_quarantine[quarantine_tail]); if (global_quarantine[quarantine_tail].bytes >= @@ -203,13 +228,15 @@ void quarantine_put(struct kasan_free_meta *info, struct kmem_cache *cache) if (new_tail != quarantine_head) quarantine_tail = new_tail; } - spin_unlock(&quarantine_lock); + raw_spin_unlock(&quarantine_lock); } local_irq_restore(flags); + + return true; } -void quarantine_reduce(void) +void kasan_quarantine_reduce(void) { size_t total_size, new_quarantine_size, percpu_quarantines; unsigned long flags; @@ -221,7 +248,7 @@ void quarantine_reduce(void) return; /* - * srcu critical section ensures that quarantine_remove_cache() + * srcu critical section ensures that kasan_quarantine_remove_cache() * will not miss objects belonging to the cache while they are in our * local to_free list. srcu is chosen because (1) it gives us private * grace period domain that does not interfere with anything else, @@ -230,13 +257,13 @@ void quarantine_reduce(void) * expected case). */ srcu_idx = srcu_read_lock(&remove_cache_srcu); - spin_lock_irqsave(&quarantine_lock, flags); + raw_spin_lock_irqsave(&quarantine_lock, flags); /* * Update quarantine size in case of hotplug. Allocate a fraction of * the installed memory to quarantine minus per-cpu queue limits. */ - total_size = (READ_ONCE(totalram_pages) << PAGE_SHIFT) / + total_size = (totalram_pages() << PAGE_SHIFT) / QUARANTINE_FRACTION; percpu_quarantines = QUARANTINE_PERCPU_SIZE * num_online_cpus(); new_quarantine_size = (total_size < percpu_quarantines) ? @@ -254,7 +281,7 @@ void quarantine_reduce(void) quarantine_head = 0; } - spin_unlock_irqrestore(&quarantine_lock, flags); + raw_spin_unlock_irqrestore(&quarantine_lock, flags); qlist_free_all(&to_free, NULL); srcu_read_unlock(&remove_cache_srcu, srcu_idx); @@ -284,45 +311,104 @@ static void qlist_move_cache(struct qlist_head *from, } } -static void per_cpu_remove_cache(void *arg) +static void __per_cpu_remove_cache(struct qlist_head *q, void *arg) { struct kmem_cache *cache = arg; - struct qlist_head to_free = QLIST_INIT; + unsigned long flags; + struct cpu_shrink_qlist *sq; + + sq = this_cpu_ptr(&shrink_qlist); + raw_spin_lock_irqsave(&sq->lock, flags); + qlist_move_cache(q, &sq->qlist, cache); + raw_spin_unlock_irqrestore(&sq->lock, flags); +} + +static void per_cpu_remove_cache(void *arg) +{ struct qlist_head *q; q = this_cpu_ptr(&cpu_quarantine); - qlist_move_cache(q, &to_free, cache); - qlist_free_all(&to_free, cache); + /* + * Ensure the ordering between the writing to q->offline and + * per_cpu_remove_cache. Prevent cpu_quarantine from being corrupted + * by interrupt. + */ + if (READ_ONCE(q->offline)) + return; + __per_cpu_remove_cache(q, arg); } /* Free all quarantined objects belonging to cache. */ -void quarantine_remove_cache(struct kmem_cache *cache) +void kasan_quarantine_remove_cache(struct kmem_cache *cache) { unsigned long flags, i; struct qlist_head to_free = QLIST_INIT; + int cpu; + struct cpu_shrink_qlist *sq; /* * Must be careful to not miss any objects that are being moved from - * per-cpu list to the global quarantine in quarantine_put(), - * nor objects being freed in quarantine_reduce(). on_each_cpu() + * per-cpu list to the global quarantine in kasan_quarantine_put(), + * nor objects being freed in kasan_quarantine_reduce(). on_each_cpu() * achieves the first goal, while synchronize_srcu() achieves the * second. */ on_each_cpu(per_cpu_remove_cache, cache, 1); - spin_lock_irqsave(&quarantine_lock, flags); + for_each_online_cpu(cpu) { + sq = per_cpu_ptr(&shrink_qlist, cpu); + raw_spin_lock_irqsave(&sq->lock, flags); + qlist_move_cache(&sq->qlist, &to_free, cache); + raw_spin_unlock_irqrestore(&sq->lock, flags); + } + qlist_free_all(&to_free, cache); + + raw_spin_lock_irqsave(&quarantine_lock, flags); for (i = 0; i < QUARANTINE_BATCHES; i++) { if (qlist_empty(&global_quarantine[i])) continue; qlist_move_cache(&global_quarantine[i], &to_free, cache); /* Scanning whole quarantine can take a while. */ - spin_unlock_irqrestore(&quarantine_lock, flags); + raw_spin_unlock_irqrestore(&quarantine_lock, flags); cond_resched(); - spin_lock_irqsave(&quarantine_lock, flags); + raw_spin_lock_irqsave(&quarantine_lock, flags); } - spin_unlock_irqrestore(&quarantine_lock, flags); + raw_spin_unlock_irqrestore(&quarantine_lock, flags); qlist_free_all(&to_free, cache); synchronize_srcu(&remove_cache_srcu); } + +static int kasan_cpu_online(unsigned int cpu) +{ + this_cpu_ptr(&cpu_quarantine)->offline = false; + return 0; +} + +static int kasan_cpu_offline(unsigned int cpu) +{ + struct qlist_head *q; + + q = this_cpu_ptr(&cpu_quarantine); + /* Ensure the ordering between the writing to q->offline and + * qlist_free_all. Otherwise, cpu_quarantine may be corrupted + * by interrupt. + */ + WRITE_ONCE(q->offline, true); + barrier(); + qlist_free_all(q, NULL); + return 0; +} + +static int __init kasan_cpu_quarantine_init(void) +{ + int ret = 0; + + ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "mm/kasan:online", + kasan_cpu_online, kasan_cpu_offline); + if (ret < 0) + pr_err("cpu quarantine register failed [%d]\n", ret); + return ret; +} +late_initcall(kasan_cpu_quarantine_init); |
