summaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/memblock.c8
-rw-r--r--mm/page_alloc.c6
-rw-r--r--mm/percpu.c38
-rw-r--r--mm/sparse.c3
4 files changed, 26 insertions, 29 deletions
diff --git a/mm/memblock.c b/mm/memblock.c
index c655342569f8..839531133816 100644
--- a/mm/memblock.c
+++ b/mm/memblock.c
@@ -1247,9 +1247,6 @@ static phys_addr_t __init memblock_alloc_range_nid(phys_addr_t size,
{
phys_addr_t found;
- if (!align)
- align = SMP_CACHE_BYTES;
-
found = memblock_find_in_range_node(size, align, start, end, nid,
flags);
if (found && !memblock_reserve(found, size)) {
@@ -1343,8 +1340,6 @@ phys_addr_t __init memblock_phys_alloc_try_nid(phys_addr_t size, phys_addr_t ali
* The allocation is performed from memory region limited by
* memblock.current_limit if @max_addr == %MEMBLOCK_ALLOC_ACCESSIBLE.
*
- * The memory block is aligned on %SMP_CACHE_BYTES if @align == 0.
- *
* The phys address of allocated boot memory block is converted to virtual and
* allocated memory is reset to 0.
*
@@ -1374,9 +1369,6 @@ static void * __init memblock_alloc_internal(
if (WARN_ON_ONCE(slab_is_available()))
return kzalloc_node(size, GFP_NOWAIT, nid);
- if (!align)
- align = SMP_CACHE_BYTES;
-
if (max_addr > memblock.current_limit)
max_addr = memblock.current_limit;
again:
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index ef289fadec0e..a919ba5cb3c8 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -7710,9 +7710,11 @@ void *__init alloc_large_system_hash(const char *tablename,
size = bucketsize << log2qty;
if (flags & HASH_EARLY) {
if (flags & HASH_ZERO)
- table = memblock_alloc_nopanic(size, 0);
+ table = memblock_alloc_nopanic(size,
+ SMP_CACHE_BYTES);
else
- table = memblock_alloc_raw(size, 0);
+ table = memblock_alloc_raw(size,
+ SMP_CACHE_BYTES);
} else if (hashdist) {
table = __vmalloc(size, gfp_flags, PAGE_KERNEL);
} else {
diff --git a/mm/percpu.c b/mm/percpu.c
index 61cdbb3b3736..a6b74c6fe0be 100644
--- a/mm/percpu.c
+++ b/mm/percpu.c
@@ -1102,8 +1102,8 @@ static struct pcpu_chunk * __init pcpu_alloc_first_chunk(unsigned long tmp_addr,
/* allocate chunk */
chunk = memblock_alloc(sizeof(struct pcpu_chunk) +
- BITS_TO_LONGS(region_size >> PAGE_SHIFT),
- 0);
+ BITS_TO_LONGS(region_size >> PAGE_SHIFT),
+ SMP_CACHE_BYTES);
INIT_LIST_HEAD(&chunk->list);
@@ -1114,12 +1114,12 @@ static struct pcpu_chunk * __init pcpu_alloc_first_chunk(unsigned long tmp_addr,
chunk->nr_pages = region_size >> PAGE_SHIFT;
region_bits = pcpu_chunk_map_bits(chunk);
- chunk->alloc_map = memblock_alloc(BITS_TO_LONGS(region_bits) *
- sizeof(chunk->alloc_map[0]), 0);
- chunk->bound_map = memblock_alloc(BITS_TO_LONGS(region_bits + 1) *
- sizeof(chunk->bound_map[0]), 0);
- chunk->md_blocks = memblock_alloc(pcpu_chunk_nr_blocks(chunk) *
- sizeof(chunk->md_blocks[0]), 0);
+ chunk->alloc_map = memblock_alloc(BITS_TO_LONGS(region_bits) * sizeof(chunk->alloc_map[0]),
+ SMP_CACHE_BYTES);
+ chunk->bound_map = memblock_alloc(BITS_TO_LONGS(region_bits + 1) * sizeof(chunk->bound_map[0]),
+ SMP_CACHE_BYTES);
+ chunk->md_blocks = memblock_alloc(pcpu_chunk_nr_blocks(chunk) * sizeof(chunk->md_blocks[0]),
+ SMP_CACHE_BYTES);
pcpu_init_md_blocks(chunk);
/* manage populated page bitmap */
@@ -2075,12 +2075,14 @@ int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
PCPU_SETUP_BUG_ON(pcpu_verify_alloc_info(ai) < 0);
/* process group information and build config tables accordingly */
- group_offsets = memblock_alloc(ai->nr_groups *
- sizeof(group_offsets[0]), 0);
- group_sizes = memblock_alloc(ai->nr_groups *
- sizeof(group_sizes[0]), 0);
- unit_map = memblock_alloc(nr_cpu_ids * sizeof(unit_map[0]), 0);
- unit_off = memblock_alloc(nr_cpu_ids * sizeof(unit_off[0]), 0);
+ group_offsets = memblock_alloc(ai->nr_groups * sizeof(group_offsets[0]),
+ SMP_CACHE_BYTES);
+ group_sizes = memblock_alloc(ai->nr_groups * sizeof(group_sizes[0]),
+ SMP_CACHE_BYTES);
+ unit_map = memblock_alloc(nr_cpu_ids * sizeof(unit_map[0]),
+ SMP_CACHE_BYTES);
+ unit_off = memblock_alloc(nr_cpu_ids * sizeof(unit_off[0]),
+ SMP_CACHE_BYTES);
for (cpu = 0; cpu < nr_cpu_ids; cpu++)
unit_map[cpu] = UINT_MAX;
@@ -2144,8 +2146,8 @@ int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
* empty chunks.
*/
pcpu_nr_slots = __pcpu_size_to_slot(pcpu_unit_size) + 2;
- pcpu_slot = memblock_alloc(
- pcpu_nr_slots * sizeof(pcpu_slot[0]), 0);
+ pcpu_slot = memblock_alloc(pcpu_nr_slots * sizeof(pcpu_slot[0]),
+ SMP_CACHE_BYTES);
for (i = 0; i < pcpu_nr_slots; i++)
INIT_LIST_HEAD(&pcpu_slot[i]);
@@ -2458,7 +2460,7 @@ int __init pcpu_embed_first_chunk(size_t reserved_size, size_t dyn_size,
size_sum = ai->static_size + ai->reserved_size + ai->dyn_size;
areas_size = PFN_ALIGN(ai->nr_groups * sizeof(void *));
- areas = memblock_alloc_nopanic(areas_size, 0);
+ areas = memblock_alloc_nopanic(areas_size, SMP_CACHE_BYTES);
if (!areas) {
rc = -ENOMEM;
goto out_free;
@@ -2599,7 +2601,7 @@ int __init pcpu_page_first_chunk(size_t reserved_size,
/* unaligned allocations can't be freed, round up to page size */
pages_size = PFN_ALIGN(unit_pages * num_possible_cpus() *
sizeof(pages[0]));
- pages = memblock_alloc(pages_size, 0);
+ pages = memblock_alloc(pages_size, SMP_CACHE_BYTES);
/* allocate pages */
j = 0;
diff --git a/mm/sparse.c b/mm/sparse.c
index ab2ac45e0440..33307fc05c4d 100644
--- a/mm/sparse.c
+++ b/mm/sparse.c
@@ -68,7 +68,8 @@ static noinline struct mem_section __ref *sparse_index_alloc(int nid)
if (slab_is_available())
section = kzalloc_node(array_size, GFP_KERNEL, nid);
else
- section = memblock_alloc_node(array_size, 0, nid);
+ section = memblock_alloc_node(array_size, SMP_CACHE_BYTES,
+ nid);
return section;
}