summaryrefslogtreecommitdiff
path: root/lib/stackdepot.c
diff options
context:
space:
mode:
Diffstat (limited to 'lib/stackdepot.c')
-rw-r--r--lib/stackdepot.c29
1 files changed, 17 insertions, 12 deletions
diff --git a/lib/stackdepot.c b/lib/stackdepot.c
index cd8f23455285..73d7b50924ef 100644
--- a/lib/stackdepot.c
+++ b/lib/stackdepot.c
@@ -591,7 +591,8 @@ depot_stack_handle_t stack_depot_save_flags(unsigned long *entries,
depot_stack_handle_t handle = 0;
struct page *page = NULL;
void *prealloc = NULL;
- bool can_alloc = depot_flags & STACK_DEPOT_FLAG_CAN_ALLOC;
+ bool allow_spin = gfpflags_allow_spinning(alloc_flags);
+ bool can_alloc = (depot_flags & STACK_DEPOT_FLAG_CAN_ALLOC) && allow_spin;
unsigned long flags;
u32 hash;
@@ -624,20 +625,21 @@ depot_stack_handle_t stack_depot_save_flags(unsigned long *entries,
* we won't be able to do that under the lock.
*/
if (unlikely(can_alloc && !READ_ONCE(new_pool))) {
- /*
- * Zero out zone modifiers, as we don't have specific zone
- * requirements. Keep the flags related to allocation in atomic
- * contexts, I/O, nolockdep.
- */
- alloc_flags &= ~GFP_ZONEMASK;
- alloc_flags &= (GFP_ATOMIC | GFP_KERNEL | __GFP_NOLOCKDEP);
- alloc_flags |= __GFP_NOWARN;
- page = alloc_pages(alloc_flags, DEPOT_POOL_ORDER);
+ page = alloc_pages(gfp_nested_mask(alloc_flags),
+ DEPOT_POOL_ORDER);
if (page)
prealloc = page_address(page);
}
- raw_spin_lock_irqsave(&pool_lock, flags);
+ if (in_nmi() || !allow_spin) {
+ /* We can never allocate in NMI context. */
+ WARN_ON_ONCE(can_alloc);
+ /* Best effort; bail if we fail to take the lock. */
+ if (!raw_spin_trylock_irqsave(&pool_lock, flags))
+ goto exit;
+ } else {
+ raw_spin_lock_irqsave(&pool_lock, flags);
+ }
printk_deferred_enter();
/* Try to find again, to avoid concurrently inserting duplicates. */
@@ -670,7 +672,10 @@ depot_stack_handle_t stack_depot_save_flags(unsigned long *entries,
exit:
if (prealloc) {
/* Stack depot didn't use this memory, free it. */
- free_pages((unsigned long)prealloc, DEPOT_POOL_ORDER);
+ if (!allow_spin)
+ free_pages_nolock(virt_to_page(prealloc), DEPOT_POOL_ORDER);
+ else
+ free_pages((unsigned long)prealloc, DEPOT_POOL_ORDER);
}
if (found)
handle = found->handle.handle;