summaryrefslogtreecommitdiff
path: root/arch/ia64/mm/contig.c
diff options
context:
space:
mode:
authorMike Rapoport <rppt@linux.vnet.ibm.com>2018-07-23 08:56:55 +0300
committerTony Luck <tony.luck@intel.com>2018-07-23 10:29:55 -0700
commit1ecd64abb5e462cfbb675d71a6d33fa1463945ea (patch)
tree90661d29e8d30561f0fb297b4ef9a678252c7cc6 /arch/ia64/mm/contig.c
parentd72e90f33aa4709ebecc5005562f52335e106a60 (diff)
ia64: contig/paging_init: reduce code duplication
The FLATMEM version of paging_init has calls to free_area_init_nodes() in the end of every branch of 'if' and 'ifdef' statements. Let's call this function outside the 'ifdef' and 'if' statements instead. Signed-off-by: Mike Rapoport <rppt@linux.vnet.ibm.com> Signed-off-by: Tony Luck <tony.luck@intel.com>
Diffstat (limited to 'arch/ia64/mm/contig.c')
-rw-r--r--arch/ia64/mm/contig.c4
1 files changed, 1 insertions, 3 deletions
diff --git a/arch/ia64/mm/contig.c b/arch/ia64/mm/contig.c
index 7d64b30913d1..1835144268ec 100644
--- a/arch/ia64/mm/contig.c
+++ b/arch/ia64/mm/contig.c
@@ -248,7 +248,6 @@ paging_init (void)
efi_memmap_walk(find_largest_hole, (u64 *)&max_gap);
if (max_gap < LARGE_GAP) {
vmem_map = (struct page *) 0;
- free_area_init_nodes(max_zone_pfns);
} else {
unsigned long map_size;
@@ -266,13 +265,12 @@ paging_init (void)
*/
NODE_DATA(0)->node_mem_map = vmem_map +
find_min_pfn_with_active_regions();
- free_area_init_nodes(max_zone_pfns);
printk("Virtual mem_map starts at 0x%p\n", mem_map);
}
#else /* !CONFIG_VIRTUAL_MEM_MAP */
memblock_add_node(0, PFN_PHYS(max_low_pfn), 0);
- free_area_init_nodes(max_zone_pfns);
#endif /* !CONFIG_VIRTUAL_MEM_MAP */
+ free_area_init_nodes(max_zone_pfns);
zero_page_memmap_ptr = virt_to_page(ia64_imva(empty_zero_page));
}