summaryrefslogtreecommitdiff
path: root/mm/percpu-vm.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/percpu-vm.c')
-rw-r--r--mm/percpu-vm.c49
1 files changed, 40 insertions, 9 deletions
diff --git a/mm/percpu-vm.c b/mm/percpu-vm.c
index d8078de912de..4f5937090590 100644
--- a/mm/percpu-vm.c
+++ b/mm/percpu-vm.c
@@ -1,14 +1,14 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* mm/percpu-vm.c - vmalloc area based chunk allocation
*
* Copyright (C) 2010 SUSE Linux Products GmbH
* Copyright (C) 2010 Tejun Heo <tj@kernel.org>
*
- * This file is released under the GPLv2.
- *
* Chunks are mapped into vmalloc areas and populated page by page.
* This is the default chunk allocator.
*/
+#include "internal.h"
static struct page *pcpu_chunk_page(struct pcpu_chunk *chunk,
unsigned int cpu, int page_idx)
@@ -134,7 +134,7 @@ static void pcpu_pre_unmap_flush(struct pcpu_chunk *chunk,
static void __pcpu_unmap_pages(unsigned long addr, int nr_pages)
{
- unmap_kernel_range_noflush(addr, nr_pages << PAGE_SHIFT);
+ vunmap_range_noflush(addr, addr + (nr_pages << PAGE_SHIFT));
}
/**
@@ -193,8 +193,8 @@ static void pcpu_post_unmap_tlb_flush(struct pcpu_chunk *chunk,
static int __pcpu_map_pages(unsigned long addr, struct page **pages,
int nr_pages)
{
- return map_kernel_range_noflush(addr, nr_pages << PAGE_SHIFT,
- PAGE_KERNEL, pages);
+ return vmap_pages_range_noflush(addr, addr + (nr_pages << PAGE_SHIFT),
+ PAGE_KERNEL, pages, PAGE_SHIFT, GFP_KERNEL);
}
/**
@@ -231,10 +231,10 @@ static int pcpu_map_pages(struct pcpu_chunk *chunk,
return 0;
err:
for_each_possible_cpu(tcpu) {
- if (tcpu == cpu)
- break;
__pcpu_unmap_pages(pcpu_chunk_addr(chunk, tcpu, page_start),
page_end - page_start);
+ if (tcpu == cpu)
+ break;
}
pcpu_post_unmap_tlb_flush(chunk, page_start, page_end);
return err;
@@ -303,6 +303,9 @@ static int pcpu_populate_chunk(struct pcpu_chunk *chunk,
* For each cpu, depopulate and unmap pages [@page_start,@page_end)
* from @chunk.
*
+ * Caller is required to call pcpu_post_unmap_tlb_flush() if not returning the
+ * region back to vmalloc() which will lazily flush the tlb.
+ *
* CONTEXT:
* pcpu_alloc_mutex.
*/
@@ -324,8 +327,6 @@ static void pcpu_depopulate_chunk(struct pcpu_chunk *chunk,
pcpu_unmap_pages(chunk, pages, page_start, page_end);
- /* no need to flush tlb, vmalloc will handle it lazily */
-
pcpu_free_pages(chunk, pages, page_start, page_end);
}
@@ -377,3 +378,33 @@ static int __init pcpu_verify_alloc_info(const struct pcpu_alloc_info *ai)
/* no extra restriction */
return 0;
}
+
+/**
+ * pcpu_should_reclaim_chunk - determine if a chunk should go into reclaim
+ * @chunk: chunk of interest
+ *
+ * This is the entry point for percpu reclaim. If a chunk qualifies, it is then
+ * isolated and managed in separate lists at the back of pcpu_slot: sidelined
+ * and to_depopulate respectively. The to_depopulate list holds chunks slated
+ * for depopulation. They no longer contribute to pcpu_nr_empty_pop_pages once
+ * they are on this list. Once depopulated, they are moved onto the sidelined
+ * list which enables them to be pulled back in for allocation if no other chunk
+ * can suffice the allocation.
+ */
+static bool pcpu_should_reclaim_chunk(struct pcpu_chunk *chunk)
+{
+ /* do not reclaim either the first chunk or reserved chunk */
+ if (chunk == pcpu_first_chunk || chunk == pcpu_reserved_chunk)
+ return false;
+
+ /*
+ * If it is isolated, it may be on the sidelined list so move it back to
+ * the to_depopulate list. If we hit at least 1/4 pages empty pages AND
+ * there is no system-wide shortage of empty pages aside from this
+ * chunk, move it to the to_depopulate list.
+ */
+ return ((chunk->isolated && chunk->nr_empty_pop_pages) ||
+ (pcpu_nr_empty_pop_pages >
+ (PCPU_EMPTY_POP_PAGES_HIGH + chunk->nr_empty_pop_pages) &&
+ chunk->nr_empty_pop_pages >= chunk->nr_pages / 4));
+}