summaryrefslogtreecommitdiff
path: root/mm/kfence/core.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/kfence/core.c')
-rw-r--r--mm/kfence/core.c48
1 files changed, 39 insertions, 9 deletions
diff --git a/mm/kfence/core.c b/mm/kfence/core.c
index 67fc321db79b..577a1699c553 100644
--- a/mm/kfence/core.c
+++ b/mm/kfence/core.c
@@ -21,10 +21,12 @@
#include <linux/log2.h>
#include <linux/memblock.h>
#include <linux/moduleparam.h>
+#include <linux/nodemask.h>
#include <linux/notifier.h>
#include <linux/panic_notifier.h>
#include <linux/random.h>
#include <linux/rcupdate.h>
+#include <linux/reboot.h>
#include <linux/sched/clock.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
@@ -593,32 +595,33 @@ static void rcu_guarded_free(struct rcu_head *h)
*/
static unsigned long kfence_init_pool(void)
{
- unsigned long addr;
- struct page *pages;
+ unsigned long addr, start_pfn;
int i;
if (!arch_kfence_init_pool())
return (unsigned long)__kfence_pool;
addr = (unsigned long)__kfence_pool;
- pages = virt_to_page(__kfence_pool);
+ start_pfn = PHYS_PFN(virt_to_phys(__kfence_pool));
/*
- * Set up object pages: they must have PG_slab set, to avoid freeing
- * these as real pages.
+ * Set up object pages: they must have PGTY_slab set to avoid freeing
+ * them as real pages.
*
* We also want to avoid inserting kfence_free() in the kfree()
* fast-path in SLUB, and therefore need to ensure kfree() correctly
* enters __slab_free() slow-path.
*/
for (i = 0; i < KFENCE_POOL_SIZE / PAGE_SIZE; i++) {
- struct slab *slab = page_slab(nth_page(pages, i));
+ struct page *page;
if (!i || (i % 2))
continue;
- __folio_set_slab(slab_folio(slab));
+ page = pfn_to_page(start_pfn + i);
+ __SetPageSlab(page);
#ifdef CONFIG_MEMCG
+ struct slab *slab = page_slab(page);
slab->obj_exts = (unsigned long)&kfence_metadata_init[i / 2 - 1].obj_exts |
MEMCG_DATA_OBJEXTS;
#endif
@@ -664,14 +667,17 @@ static unsigned long kfence_init_pool(void)
reset_slab:
for (i = 0; i < KFENCE_POOL_SIZE / PAGE_SIZE; i++) {
- struct slab *slab = page_slab(nth_page(pages, i));
+ struct page *page;
if (!i || (i % 2))
continue;
+
+ page = pfn_to_page(start_pfn + i);
#ifdef CONFIG_MEMCG
+ struct slab *slab = page_slab(page);
slab->obj_exts = 0;
#endif
- __folio_clear_slab(slab_folio(slab));
+ __ClearPageSlab(page);
}
return addr;
@@ -817,6 +823,25 @@ static struct notifier_block kfence_check_canary_notifier = {
static struct delayed_work kfence_timer;
#ifdef CONFIG_KFENCE_STATIC_KEYS
+static int kfence_reboot_callback(struct notifier_block *nb,
+ unsigned long action, void *data)
+{
+ /*
+ * Disable kfence to avoid static keys IPI synchronization during
+ * late shutdown/kexec
+ */
+ WRITE_ONCE(kfence_enabled, false);
+ /* Cancel any pending timer work */
+ cancel_delayed_work_sync(&kfence_timer);
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block kfence_reboot_notifier = {
+ .notifier_call = kfence_reboot_callback,
+ .priority = INT_MAX, /* Run early to stop timers ASAP */
+};
+
/* Wait queue to wake up allocation-gate timer task. */
static DECLARE_WAIT_QUEUE_HEAD(allocation_wait);
@@ -898,6 +923,10 @@ static void kfence_init_enable(void)
if (kfence_check_on_panic)
atomic_notifier_chain_register(&panic_notifier_list, &kfence_check_canary_notifier);
+#ifdef CONFIG_KFENCE_STATIC_KEYS
+ register_reboot_notifier(&kfence_reboot_notifier);
+#endif
+
WRITE_ONCE(kfence_enabled, true);
queue_delayed_work(system_unbound_wq, &kfence_timer, 0);
@@ -1084,6 +1113,7 @@ void *__kfence_alloc(struct kmem_cache *s, size_t size, gfp_t flags)
* properties (e.g. reside in DMAable memory).
*/
if ((flags & GFP_ZONEMASK) ||
+ ((flags & __GFP_THISNODE) && num_online_nodes() > 1) ||
(s->flags & (SLAB_CACHE_DMA | SLAB_CACHE_DMA32))) {
atomic_long_inc(&counters[KFENCE_COUNTER_SKIP_INCOMPAT]);
return NULL;