summaryrefslogtreecommitdiff
path: root/lib/stackdepot.c
diff options
context:
space:
mode:
Diffstat (limited to 'lib/stackdepot.c')
-rw-r--r--lib/stackdepot.c85
1 files changed, 73 insertions, 12 deletions
diff --git a/lib/stackdepot.c b/lib/stackdepot.c
index 5ed34cc963fc..de0b0025af2b 100644
--- a/lib/stackdepot.c
+++ b/lib/stackdepot.c
@@ -36,11 +36,11 @@
#include <linux/memblock.h>
#include <linux/kasan-enabled.h>
-#define DEPOT_POOLS_CAP 8192
-/* The pool_index is offset by 1 so the first record does not have a 0 handle. */
-#define DEPOT_MAX_POOLS \
- (((1LL << (DEPOT_POOL_INDEX_BITS)) - 1 < DEPOT_POOLS_CAP) ? \
- (1LL << (DEPOT_POOL_INDEX_BITS)) - 1 : DEPOT_POOLS_CAP)
+/*
+ * The pool_index is offset by 1 so the first record does not have a 0 handle.
+ */
+static unsigned int stack_max_pools __read_mostly =
+ MIN((1LL << DEPOT_POOL_INDEX_BITS) - 1, 8192);
static bool stack_depot_disabled;
static bool __stack_depot_early_init_requested __initdata = IS_ENABLED(CONFIG_STACKDEPOT_ALWAYS_INIT);
@@ -62,7 +62,7 @@ static unsigned int stack_bucket_number_order;
static unsigned int stack_hash_mask;
/* Array of memory regions that store stack records. */
-static void *stack_pools[DEPOT_MAX_POOLS];
+static void **stack_pools;
/* Newly allocated pool that is not yet added to stack_pools. */
static void *new_pool;
/* Number of pools in stack_pools. */
@@ -101,6 +101,34 @@ static int __init disable_stack_depot(char *str)
}
early_param("stack_depot_disable", disable_stack_depot);
+static int __init parse_max_pools(char *str)
+{
+ const long long limit = (1LL << (DEPOT_POOL_INDEX_BITS)) - 1;
+ unsigned int max_pools;
+ int rv;
+
+ rv = kstrtouint(str, 0, &max_pools);
+ if (rv)
+ return rv;
+
+ if (max_pools < 1024) {
+ pr_err("stack_depot_max_pools below 1024, using default of %u\n",
+ stack_max_pools);
+ goto out;
+ }
+
+ if (max_pools > limit) {
+ pr_err("stack_depot_max_pools exceeds %lld, using default of %u\n",
+ limit, stack_max_pools);
+ goto out;
+ }
+
+ stack_max_pools = max_pools;
+out:
+ return 0;
+}
+early_param("stack_depot_max_pools", parse_max_pools);
+
void __init stack_depot_request_early_init(void)
{
/* Too late to request early init now. */
@@ -182,6 +210,17 @@ int __init stack_depot_early_init(void)
}
init_stack_table(entries);
+ pr_info("allocating space for %u stack pools via memblock\n",
+ stack_max_pools);
+ stack_pools =
+ memblock_alloc(stack_max_pools * sizeof(void *), PAGE_SIZE);
+ if (!stack_pools) {
+ pr_err("stack pools allocation failed, disabling\n");
+ memblock_free(stack_table, entries * sizeof(struct list_head));
+ stack_depot_disabled = true;
+ return -ENOMEM;
+ }
+
return 0;
}
@@ -231,6 +270,16 @@ int stack_depot_init(void)
stack_hash_mask = entries - 1;
init_stack_table(entries);
+ pr_info("allocating space for %u stack pools via kvcalloc\n",
+ stack_max_pools);
+ stack_pools = kvcalloc(stack_max_pools, sizeof(void *), GFP_KERNEL);
+ if (!stack_pools) {
+ pr_err("stack pools allocation failed, disabling\n");
+ kvfree(stack_table);
+ stack_depot_disabled = true;
+ ret = -ENOMEM;
+ }
+
out_unlock:
mutex_unlock(&stack_depot_init_mutex);
@@ -245,9 +294,9 @@ static bool depot_init_pool(void **prealloc)
{
lockdep_assert_held(&pool_lock);
- if (unlikely(pools_num >= DEPOT_MAX_POOLS)) {
+ if (unlikely(pools_num >= stack_max_pools)) {
/* Bail out if we reached the pool limit. */
- WARN_ON_ONCE(pools_num > DEPOT_MAX_POOLS); /* should never happen */
+ WARN_ON_ONCE(pools_num > stack_max_pools); /* should never happen */
WARN_ON_ONCE(!new_pool); /* to avoid unnecessary pre-allocation */
WARN_ONCE(1, "Stack depot reached limit capacity");
return false;
@@ -273,7 +322,7 @@ static bool depot_init_pool(void **prealloc)
* NULL; do not reset to NULL if we have reached the maximum number of
* pools.
*/
- if (pools_num < DEPOT_MAX_POOLS)
+ if (pools_num < stack_max_pools)
WRITE_ONCE(new_pool, NULL);
else
WRITE_ONCE(new_pool, STACK_DEPOT_POISON);
@@ -591,7 +640,8 @@ depot_stack_handle_t stack_depot_save_flags(unsigned long *entries,
depot_stack_handle_t handle = 0;
struct page *page = NULL;
void *prealloc = NULL;
- bool can_alloc = depot_flags & STACK_DEPOT_FLAG_CAN_ALLOC;
+ bool allow_spin = gfpflags_allow_spinning(alloc_flags);
+ bool can_alloc = (depot_flags & STACK_DEPOT_FLAG_CAN_ALLOC) && allow_spin;
unsigned long flags;
u32 hash;
@@ -630,7 +680,15 @@ depot_stack_handle_t stack_depot_save_flags(unsigned long *entries,
prealloc = page_address(page);
}
- raw_spin_lock_irqsave(&pool_lock, flags);
+ if (in_nmi() || !allow_spin) {
+ /* We can never allocate in NMI context. */
+ WARN_ON_ONCE(can_alloc);
+ /* Best effort; bail if we fail to take the lock. */
+ if (!raw_spin_trylock_irqsave(&pool_lock, flags))
+ goto exit;
+ } else {
+ raw_spin_lock_irqsave(&pool_lock, flags);
+ }
printk_deferred_enter();
/* Try to find again, to avoid concurrently inserting duplicates. */
@@ -663,7 +721,10 @@ depot_stack_handle_t stack_depot_save_flags(unsigned long *entries,
exit:
if (prealloc) {
/* Stack depot didn't use this memory, free it. */
- free_pages((unsigned long)prealloc, DEPOT_POOL_ORDER);
+ if (!allow_spin)
+ free_pages_nolock(virt_to_page(prealloc), DEPOT_POOL_ORDER);
+ else
+ free_pages((unsigned long)prealloc, DEPOT_POOL_ORDER);
}
if (found)
handle = found->handle.handle;