summaryrefslogtreecommitdiff
path: root/mm/kfence/core.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2022-04-01 12:08:34 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2022-04-01 12:08:34 -0700
commitb012b3235cb9d05e4ccaff8327bfbed6faf014aa (patch)
tree4db2d153b219e67c8f914f0ad3b00de10ab269ac /mm/kfence/core.c
parentd0d642a5d365b5e2295950fd184d5d1f630896dd (diff)
parent78049e94a171837e5a882814ca5bc4f14f265603 (diff)
Merge branch 'akpm' (patches from Andrew)
Merge still more updates from Andrew Morton: "16 patches. Subsystems affected by this patch series: ofs2, nilfs2, mailmap, and mm (madvise, mlock, mfence, memory-failure, kasan, debug, kmemleak, and damon)" * emailed patches from Andrew Morton <akpm@linux-foundation.org>: mm/damon: prevent activated scheme from sleeping by deactivated schemes mm/kmemleak: reset tag when compare object pointer doc/vm/page_owner.rst: remove content related to -c option tools/vm/page_owner_sort.c: remove -c option mm, kasan: fix __GFP_BITS_SHIFT definition breaking LOCKDEP mm,hwpoison: unmap poisoned page before invalidation mailmap: update Kirill's email mm: kfence: fix objcgs vector allocation mm/munlock: protect the per-CPU pagevec by a local_lock_t mm/munlock: update Documentation/vm/unevictable-lru.rst mm/munlock: add lru_add_drain() to fix memcg_stat_test nilfs2: get rid of nilfs_mapping_init() nilfs2: fix lockdep warnings during disk space reclamation nilfs2: fix lockdep warnings in page operations for btree nodes ocfs2: fix crash when mount with quota enabled Revert "mm: madvise: skip unmapped vma holes passed to process_madvise"
Diffstat (limited to 'mm/kfence/core.c')
-rw-r--r--mm/kfence/core.c11
1 files changed, 10 insertions, 1 deletions
diff --git a/mm/kfence/core.c b/mm/kfence/core.c
index 2f9fdfde1941..a203747ad2c0 100644
--- a/mm/kfence/core.c
+++ b/mm/kfence/core.c
@@ -566,6 +566,8 @@ static unsigned long kfence_init_pool(void)
* enters __slab_free() slow-path.
*/
for (i = 0; i < KFENCE_POOL_SIZE / PAGE_SIZE; i++) {
+ struct slab *slab = page_slab(&pages[i]);
+
if (!i || (i % 2))
continue;
@@ -573,7 +575,11 @@ static unsigned long kfence_init_pool(void)
if (WARN_ON(compound_head(&pages[i]) != &pages[i]))
return addr;
- __SetPageSlab(&pages[i]);
+ __folio_set_slab(slab_folio(slab));
+#ifdef CONFIG_MEMCG
+ slab->memcg_data = (unsigned long)&kfence_metadata[i / 2 - 1].objcg |
+ MEMCG_DATA_OBJCGS;
+#endif
}
/*
@@ -1033,6 +1039,9 @@ void __kfence_free(void *addr)
{
struct kfence_metadata *meta = addr_to_metadata((unsigned long)addr);
+#ifdef CONFIG_MEMCG
+ KFENCE_WARN_ON(meta->objcg);
+#endif
/*
* If the objects of the cache are SLAB_TYPESAFE_BY_RCU, defer freeing
* the object, as the object page may be recycled for other-typed