summaryrefslogtreecommitdiff
path: root/arch/powerpc/kernel/setup_64.c
diff options
context:
space:
mode:
authorAneesh Kumar K.V <aneesh.kumar@linux.ibm.com>2020-06-08 12:39:01 +0530
committerMichael Ellerman <mpe@ellerman.id.au>2020-09-15 22:13:21 +1000
commit2a32abac8860aa1c3a1fc99973ce67179575b36c (patch)
treea80e57da000b20d44bc584dc125cb5f2d6a37135 /arch/powerpc/kernel/setup_64.c
parentac234524056da4e0c081f682da3ea25cdaab737a (diff)
powerpc/percpu: Update percpu bootmem allocator
This update the ppc64 version to be closer to x86/sparc. Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au> Link: https://lore.kernel.org/r/20200608070904.387440-1-aneesh.kumar@linux.ibm.com
Diffstat (limited to 'arch/powerpc/kernel/setup_64.c')
-rw-r--r--arch/powerpc/kernel/setup_64.c45
1 files changed, 37 insertions, 8 deletions
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index 6be430107c6f..55bbbf89ea82 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -756,17 +756,46 @@ void __init emergency_stack_init(void)
}
#ifdef CONFIG_SMP
-#define PCPU_DYN_SIZE ()
-
-static void * __init pcpu_fc_alloc(unsigned int cpu, size_t size, size_t align)
+/**
+ * pcpu_alloc_bootmem - NUMA friendly alloc_bootmem wrapper for percpu
+ * @cpu: cpu to allocate for
+ * @size: size allocation in bytes
+ * @align: alignment
+ *
+ * Allocate @size bytes aligned at @align for cpu @cpu. This wrapper
+ * does the right thing for NUMA regardless of the current
+ * configuration.
+ *
+ * RETURNS:
+ * Pointer to the allocated area on success, NULL on failure.
+ */
+static void * __init pcpu_alloc_bootmem(unsigned int cpu, size_t size,
+ size_t align)
{
- return memblock_alloc_try_nid(size, align, __pa(MAX_DMA_ADDRESS),
- MEMBLOCK_ALLOC_ACCESSIBLE,
- early_cpu_to_node(cpu));
+ const unsigned long goal = __pa(MAX_DMA_ADDRESS);
+#ifdef CONFIG_NEED_MULTIPLE_NODES
+ int node = early_cpu_to_node(cpu);
+ void *ptr;
+ if (!node_online(node) || !NODE_DATA(node)) {
+ ptr = memblock_alloc_from(size, align, goal);
+ pr_info("cpu %d has no node %d or node-local memory\n",
+ cpu, node);
+ pr_debug("per cpu data for cpu%d %lu bytes at %016lx\n",
+ cpu, size, __pa(ptr));
+ } else {
+ ptr = memblock_alloc_try_nid(size, align, goal,
+ MEMBLOCK_ALLOC_ACCESSIBLE, node);
+ pr_debug("per cpu data for cpu%d %lu bytes on node%d at "
+ "%016lx\n", cpu, size, node, __pa(ptr));
+ }
+ return ptr;
+#else
+ return memblock_alloc_from(size, align, goal);
+#endif
}
-static void __init pcpu_fc_free(void *ptr, size_t size)
+static void __init pcpu_free_bootmem(void *ptr, size_t size)
{
memblock_free(__pa(ptr), size);
}
@@ -801,7 +830,7 @@ void __init setup_per_cpu_areas(void)
atom_size = 1 << 20;
rc = pcpu_embed_first_chunk(0, dyn_size, atom_size, pcpu_cpu_distance,
- pcpu_fc_alloc, pcpu_fc_free);
+ pcpu_alloc_bootmem, pcpu_free_bootmem);
if (rc < 0)
panic("cannot initialize percpu area (err=%d)", rc);