summaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/Kconfig.kasan12
-rw-r--r--lib/alloc_tag.c30
-rw-r--r--lib/maple_tree.c12
-rw-r--r--lib/rhashtable.c4
-rw-r--r--lib/test_hmm.c2
-rw-r--r--lib/test_kho.c52
-rw-r--r--lib/test_maple_tree.c2
-rw-r--r--lib/xarray.c2
8 files changed, 79 insertions, 37 deletions
diff --git a/lib/Kconfig.kasan b/lib/Kconfig.kasan
index f82889a830fa..a4bb610a7a6f 100644
--- a/lib/Kconfig.kasan
+++ b/lib/Kconfig.kasan
@@ -19,6 +19,18 @@ config ARCH_DISABLE_KASAN_INLINE
Disables both inline and stack instrumentation. Selected by
architectures that do not support these instrumentation types.
+config ARCH_NEEDS_DEFER_KASAN
+ bool
+
+config ARCH_DEFER_KASAN
+ def_bool y
+ depends on KASAN && ARCH_NEEDS_DEFER_KASAN
+ help
+ Architectures should select this if they need to defer KASAN
+ initialization until shadow memory is properly set up. This
+ enables runtime control via static keys. Otherwise, KASAN uses
+ compile-time constants for better performance.
+
config CC_HAS_KASAN_GENERIC
def_bool $(cc-option, -fsanitize=kernel-address)
diff --git a/lib/alloc_tag.c b/lib/alloc_tag.c
index e9b33848700a..3ef702e6b69a 100644
--- a/lib/alloc_tag.c
+++ b/lib/alloc_tag.c
@@ -80,7 +80,7 @@ static void allocinfo_stop(struct seq_file *m, void *arg)
static void print_allocinfo_header(struct seq_buf *buf)
{
/* Output format version, so we can change it. */
- seq_buf_printf(buf, "allocinfo - version: 1.0\n");
+ seq_buf_printf(buf, "allocinfo - version: 2.0\n");
seq_buf_printf(buf, "# <size> <calls> <tag info>\n");
}
@@ -92,6 +92,8 @@ static void alloc_tag_to_text(struct seq_buf *out, struct codetag *ct)
seq_buf_printf(out, "%12lli %8llu ", bytes, counter.calls);
codetag_to_text(out, ct);
+ if (unlikely(alloc_tag_is_inaccurate(tag)))
+ seq_buf_printf(out, " accurate:no");
seq_buf_putc(out, ' ');
seq_buf_putc(out, '\n');
}
@@ -438,9 +440,10 @@ static int vm_module_tags_populate(void)
if (nr < more_pages ||
vmap_pages_range(phys_end, phys_end + (nr << PAGE_SHIFT), PAGE_KERNEL,
next_page, PAGE_SHIFT) < 0) {
+ release_pages_arg arg = { .pages = next_page };
+
/* Clean up and error out */
- for (int i = 0; i < nr; i++)
- __free_page(next_page[i]);
+ release_pages(arg, nr);
return -ENOMEM;
}
@@ -682,11 +685,10 @@ static int __init alloc_mod_tags_mem(void)
static void __init free_mod_tags_mem(void)
{
- int i;
+ release_pages_arg arg = { .pages = vm_module_tags->pages };
module_tags.start_addr = 0;
- for (i = 0; i < vm_module_tags->nr_pages; i++)
- __free_page(vm_module_tags->pages[i]);
+ release_pages(arg, vm_module_tags->nr_pages);
kfree(vm_module_tags->pages);
free_vm_area(vm_module_tags);
}
@@ -766,6 +768,20 @@ struct page_ext_operations page_alloc_tagging_ops = {
EXPORT_SYMBOL(page_alloc_tagging_ops);
#ifdef CONFIG_SYSCTL
+/*
+ * Not using proc_do_static_key() directly to prevent enabling profiling
+ * after it was shut down.
+ */
+static int proc_mem_profiling_handler(const struct ctl_table *table, int write,
+ void *buffer, size_t *lenp, loff_t *ppos)
+{
+ if (!mem_profiling_support && write)
+ return -EINVAL;
+
+ return proc_do_static_key(table, write, buffer, lenp, ppos);
+}
+
+
static struct ctl_table memory_allocation_profiling_sysctls[] = {
{
.procname = "mem_profiling",
@@ -775,7 +791,7 @@ static struct ctl_table memory_allocation_profiling_sysctls[] = {
#else
.mode = 0644,
#endif
- .proc_handler = proc_do_static_key,
+ .proc_handler = proc_mem_profiling_handler,
},
};
diff --git a/lib/maple_tree.c b/lib/maple_tree.c
index ab4c6c21a625..39bb779cb311 100644
--- a/lib/maple_tree.c
+++ b/lib/maple_tree.c
@@ -400,11 +400,11 @@ static __always_inline bool mt_is_alloc(struct maple_tree *mt)
* a reuse of the last bit in the node type. This is possible by using bit 1 to
* indicate if bit 2 is part of the type or the slot.
*
- * Note types:
- * 0x??1 = Root
- * 0x?00 = 16 bit nodes
- * 0x010 = 32 bit nodes
- * 0x110 = 64 bit nodes
+ * Node types:
+ * 0b??1 = Root
+ * 0b?00 = 16 bit nodes
+ * 0b010 = 32 bit nodes
+ * 0b110 = 64 bit nodes
*
* Slot size and alignment
* 0b??1 : Root
@@ -422,7 +422,7 @@ static __always_inline bool mt_is_alloc(struct maple_tree *mt)
#define MAPLE_PARENT_16B_SLOT_MASK 0xFC
#define MAPLE_PARENT_RANGE64 0x06
-#define MAPLE_PARENT_RANGE32 0x04
+#define MAPLE_PARENT_RANGE32 0x02
#define MAPLE_PARENT_NOT_RANGE16 0x02
/*
diff --git a/lib/rhashtable.c b/lib/rhashtable.c
index 3e555d012ed6..fde0f0e556f8 100644
--- a/lib/rhashtable.c
+++ b/lib/rhashtable.c
@@ -184,8 +184,8 @@ static struct bucket_table *bucket_table_alloc(struct rhashtable *ht,
static struct lock_class_key __key;
tbl = alloc_hooks_tag(ht->alloc_tag,
- kvmalloc_node_noprof(struct_size(tbl, buckets, nbuckets),
- gfp|__GFP_ZERO, NUMA_NO_NODE));
+ kvmalloc_node_align_noprof(struct_size(tbl, buckets, nbuckets),
+ 1, gfp|__GFP_ZERO, NUMA_NO_NODE));
size = nbuckets;
diff --git a/lib/test_hmm.c b/lib/test_hmm.c
index 761725bc713c..83e3d8208a54 100644
--- a/lib/test_hmm.c
+++ b/lib/test_hmm.c
@@ -140,7 +140,7 @@ static int dmirror_bounce_init(struct dmirror_bounce *bounce,
static bool dmirror_is_private_zone(struct dmirror_device *mdevice)
{
return (mdevice->zone_device_type ==
- HMM_DMIRROR_MEMORY_DEVICE_PRIVATE) ? true : false;
+ HMM_DMIRROR_MEMORY_DEVICE_PRIVATE);
}
static enum migrate_vma_direction
diff --git a/lib/test_kho.c b/lib/test_kho.c
index c2eb899c3b45..fe8504e3407b 100644
--- a/lib/test_kho.c
+++ b/lib/test_kho.c
@@ -67,13 +67,20 @@ static struct notifier_block kho_test_nb = {
static int kho_test_save_data(struct kho_test_state *state, void *fdt)
{
- phys_addr_t *folios_info __free(kvfree) = NULL;
+ phys_addr_t *folios_info;
int err = 0;
- folios_info = kvmalloc_array(state->nr_folios, sizeof(*folios_info),
- GFP_KERNEL);
- if (!folios_info)
- return -ENOMEM;
+ err |= fdt_begin_node(fdt, "data");
+ err |= fdt_property(fdt, "nr_folios", &state->nr_folios,
+ sizeof(state->nr_folios));
+ err |= fdt_property_placeholder(fdt, "folios_info",
+ state->nr_folios * sizeof(*folios_info),
+ (void **)&folios_info);
+ err |= fdt_property(fdt, "csum", &state->csum, sizeof(state->csum));
+ err |= fdt_end_node(fdt);
+
+ if (err)
+ return err;
for (int i = 0; i < state->nr_folios; i++) {
struct folio *folio = state->folios[i];
@@ -83,17 +90,9 @@ static int kho_test_save_data(struct kho_test_state *state, void *fdt)
err = kho_preserve_folio(folio);
if (err)
- return err;
+ break;
}
- err |= fdt_begin_node(fdt, "data");
- err |= fdt_property(fdt, "nr_folios", &state->nr_folios,
- sizeof(state->nr_folios));
- err |= fdt_property(fdt, "folios_info", folios_info,
- state->nr_folios * sizeof(*folios_info));
- err |= fdt_property(fdt, "csum", &state->csum, sizeof(state->csum));
- err |= fdt_end_node(fdt);
-
return err;
}
@@ -140,7 +139,10 @@ static int kho_test_generate_data(struct kho_test_state *state)
unsigned int size;
void *addr;
- /* cap allocation so that we won't exceed max_mem */
+ /*
+ * Since get_order() rounds up, make sure that actual
+ * allocation is smaller so that we won't exceed max_mem
+ */
if (alloc_size + (PAGE_SIZE << order) > max_mem) {
order = get_order(max_mem - alloc_size);
if (order)
@@ -165,13 +167,14 @@ static int kho_test_generate_data(struct kho_test_state *state)
err_free_folios:
for (int i = 0; i < state->nr_folios; i++)
folio_put(state->folios[i]);
+ state->nr_folios = 0;
return -ENOMEM;
}
static int kho_test_save(void)
{
struct kho_test_state *state = &kho_test_state;
- struct folio **folios __free(kvfree) = NULL;
+ struct folio **folios;
unsigned long max_nr;
int err;
@@ -185,13 +188,23 @@ static int kho_test_save(void)
err = kho_test_generate_data(state);
if (err)
- return err;
+ goto err_free_folios;
err = kho_test_prepare_fdt(state);
if (err)
- return err;
+ goto err_free_folios;
- return register_kho_notifier(&kho_test_nb);
+ err = register_kho_notifier(&kho_test_nb);
+ if (err)
+ goto err_free_fdt;
+
+ return 0;
+
+err_free_fdt:
+ folio_put(state->fdt);
+err_free_folios:
+ kvfree(folios);
+ return err;
}
static int kho_test_restore_data(const void *fdt, int node)
@@ -291,6 +304,7 @@ static void kho_test_cleanup(void)
folio_put(kho_test_state.folios[i]);
kvfree(kho_test_state.folios);
+ folio_put(kho_test_state.fdt);
}
static void __exit kho_test_exit(void)
diff --git a/lib/test_maple_tree.c b/lib/test_maple_tree.c
index 14fbbee32046..a182e48b5f5e 100644
--- a/lib/test_maple_tree.c
+++ b/lib/test_maple_tree.c
@@ -3429,7 +3429,7 @@ static noinline void __init check_state_handling(struct maple_tree *mt)
MT_BUG_ON(mt, mas.last != 0x1500);
MT_BUG_ON(mt, !mas_is_active(&mas));
- /* find: start ->active on value */;
+ /* find: start ->active on value */
mas_set(&mas, 1200);
entry = mas_find(&mas, ULONG_MAX);
MT_BUG_ON(mt, entry != ptr);
diff --git a/lib/xarray.c b/lib/xarray.c
index ae3d80f4b4ee..9a8b4916540c 100644
--- a/lib/xarray.c
+++ b/lib/xarray.c
@@ -370,7 +370,7 @@ static void *xas_alloc(struct xa_state *xas, unsigned int shift)
if (node) {
xas->xa_alloc = NULL;
} else {
- gfp_t gfp = GFP_NOWAIT | __GFP_NOWARN;
+ gfp_t gfp = GFP_NOWAIT;
if (xas->xa->xa_flags & XA_FLAGS_ACCOUNT)
gfp |= __GFP_ACCOUNT;