summaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
authorOscar Salvador <osalvador@suse.de>2024-03-15 23:26:10 +0100
committerAndrew Morton <akpm@linux-foundation.org>2024-03-26 11:07:20 -0700
commit7844c01472119f55bd9a107a4578a6d26be04c46 (patch)
tree171a46b83e3dcc7aae5e787cf17170edeea49317 /mm
parent329003246617dc52064a2dd9be7496c7a186bdac (diff)
mm,page_owner: fix recursion
Prior to 217b2119b9e2 ("mm,page_owner: implement the tracking of the stacks count") the only place where page_owner could potentially go into recursion due to its need of allocating more memory was in save_stack(), which ends up calling into stackdepot code with the possibility of allocating memory. We made sure to guard against that by signaling that the current task was already in page_owner code, so in case a recursion attempt was made, we could catch that and return dummy_handle. After above commit, a new place in page_owner code was introduced where we could allocate memory, meaning we could go into recursion would we take that path. Make sure to signal that we are in page_owner in that codepath as well. Move the guard code into two helpers {un}set_current_in_page_owner() and use them prior to calling in the two functions that might allocate memory. Link: https://lkml.kernel.org/r/20240315222610.6870-1-osalvador@suse.de Signed-off-by: Oscar Salvador <osalvador@suse.de> Fixes: 217b2119b9e2 ("mm,page_owner: implement the tracking of the stacks count") Reviewed-by: Vlastimil Babka <vbabka@suse.cz> Cc: Alexander Potapenko <glider@google.com> Cc: Andrey Konovalov <andreyknvl@gmail.com> Cc: Marco Elver <elver@google.com> Cc: Michal Hocko <mhocko@suse.com> Cc: Oscar Salvador <osalvador@suse.de> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/page_owner.c33
1 files changed, 23 insertions, 10 deletions
diff --git a/mm/page_owner.c b/mm/page_owner.c
index e7139952ffd9..d17d1351ec84 100644
--- a/mm/page_owner.c
+++ b/mm/page_owner.c
@@ -54,6 +54,22 @@ static depot_stack_handle_t early_handle;
static void init_early_allocated_pages(void);
+static inline void set_current_in_page_owner(void)
+{
+ /*
+ * Avoid recursion.
+ *
+ * We might need to allocate more memory from page_owner code, so make
+ * sure to signal it in order to avoid recursion.
+ */
+ current->in_page_owner = 1;
+}
+
+static inline void unset_current_in_page_owner(void)
+{
+ current->in_page_owner = 0;
+}
+
static int __init early_page_owner_param(char *buf)
{
int ret = kstrtobool(buf, &page_owner_enabled);
@@ -133,23 +149,16 @@ static noinline depot_stack_handle_t save_stack(gfp_t flags)
depot_stack_handle_t handle;
unsigned int nr_entries;
- /*
- * Avoid recursion.
- *
- * Sometimes page metadata allocation tracking requires more
- * memory to be allocated:
- * - when new stack trace is saved to stack depot
- */
if (current->in_page_owner)
return dummy_handle;
- current->in_page_owner = 1;
+ set_current_in_page_owner();
nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 2);
handle = stack_depot_save(entries, nr_entries, flags);
if (!handle)
handle = failure_handle;
+ unset_current_in_page_owner();
- current->in_page_owner = 0;
return handle;
}
@@ -164,9 +173,13 @@ static void add_stack_record_to_list(struct stack_record *stack_record,
gfp_mask &= (GFP_ATOMIC | GFP_KERNEL);
gfp_mask |= __GFP_NOWARN;
+ set_current_in_page_owner();
stack = kmalloc(sizeof(*stack), gfp_mask);
- if (!stack)
+ if (!stack) {
+ unset_current_in_page_owner();
return;
+ }
+ unset_current_in_page_owner();
stack->stack_record = stack_record;
stack->next = NULL;