summaryrefslogtreecommitdiff
path: root/mm/page_alloc.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r--mm/page_alloc.c17
1 files changed, 15 insertions, 2 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 8d7b4424b645..8c6b4ba62ac1 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1219,7 +1219,8 @@ static void __free_pages_ok(struct page *page, unsigned int order,
__count_vm_events(PGFREE, 1 << order);
}
-void __free_pages_core(struct page *page, unsigned int order)
+void __free_pages_core(struct page *page, unsigned int order,
+ enum meminit_context context)
{
unsigned int nr_pages = 1 << order;
struct page *p = page;
@@ -1239,7 +1240,19 @@ void __free_pages_core(struct page *page, unsigned int order)
__ClearPageReserved(p);
set_page_count(p, 0);
- atomic_long_add(nr_pages, &page_zone(page)->managed_pages);
+ if (IS_ENABLED(CONFIG_MEMORY_HOTPLUG) &&
+ unlikely(context == MEMINIT_HOTPLUG)) {
+ /*
+ * Freeing the page with debug_pagealloc enabled will try to
+ * unmap it; some archs don't like double-unmappings, so
+ * map it first.
+ */
+ debug_pagealloc_map_pages(page, nr_pages);
+ adjust_managed_page_count(page, nr_pages);
+ } else {
+ /* memblock adjusts totalram_pages() manually. */
+ atomic_long_add(nr_pages, &page_zone(page)->managed_pages);
+ }
if (page_contains_unaccepted(page, order)) {
if (order == MAX_PAGE_ORDER && __free_unaccepted(page))