summaryrefslogtreecommitdiff
path: root/include/linux/memory_hotplug.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/memory_hotplug.h')
-rw-r--r--include/linux/memory_hotplug.h367
1 files changed, 182 insertions, 185 deletions
diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h
index c8a5056a5ae0..f2f16cdd73ee 100644
--- a/include/linux/memory_hotplug.h
+++ b/include/linux/memory_hotplug.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __LINUX_MEMORY_HOTPLUG_H
#define __LINUX_MEMORY_HOTPLUG_H
@@ -10,63 +11,70 @@ struct page;
struct zone;
struct pglist_data;
struct mem_section;
-struct memory_block;
+struct memory_group;
struct resource;
+struct vmem_altmap;
+struct dev_pagemap;
#ifdef CONFIG_MEMORY_HOTPLUG
-/*
- * Return page for the valid pfn only if the page is online. All pfn
- * walkers which rely on the fully initialized page->flags and others
- * should use this rather than pfn_valid && pfn_to_page
- */
-#define pfn_to_online_page(pfn) \
-({ \
- struct page *___page = NULL; \
- unsigned long ___nr = pfn_to_section_nr(pfn); \
- \
- if (___nr < NR_MEM_SECTIONS && online_section_nr(___nr))\
- ___page = pfn_to_page(pfn); \
- ___page; \
-})
-
-/*
- * Types for free bootmem stored in page->lru.next. These have to be in
- * some random range in unsigned long space for debugging purposes.
- */
-enum {
- MEMORY_HOTPLUG_MIN_BOOTMEM_TYPE = 12,
- SECTION_INFO = MEMORY_HOTPLUG_MIN_BOOTMEM_TYPE,
- MIX_SECTION_INFO,
- NODE_INFO,
- MEMORY_HOTPLUG_MAX_BOOTMEM_TYPE = NODE_INFO,
-};
+struct page *pfn_to_online_page(unsigned long pfn);
/* Types for control the zone type of onlined and offlined memory */
enum {
- MMOP_OFFLINE = -1,
- MMOP_ONLINE_KEEP,
+ /* Offline the memory. */
+ MMOP_OFFLINE = 0,
+ /* Online the memory. Zone depends, see default_zone_for_pfn(). */
+ MMOP_ONLINE,
+ /* Online the memory to ZONE_NORMAL. */
MMOP_ONLINE_KERNEL,
+ /* Online the memory to ZONE_MOVABLE. */
MMOP_ONLINE_MOVABLE,
};
+/* Flags for add_memory() and friends to specify memory hotplug details. */
+typedef int __bitwise mhp_t;
+
+/* No special request */
+#define MHP_NONE ((__force mhp_t)0)
/*
- * pgdat resizing functions
+ * Allow merging of the added System RAM resource with adjacent,
+ * mergeable resources. After a successful call to add_memory_resource()
+ * with this flag set, the resource pointer must no longer be used as it
+ * might be stale, or the resource might have changed.
*/
-static inline
-void pgdat_resize_lock(struct pglist_data *pgdat, unsigned long *flags)
-{
- spin_lock_irqsave(&pgdat->node_size_lock, *flags);
-}
-static inline
-void pgdat_resize_unlock(struct pglist_data *pgdat, unsigned long *flags)
-{
- spin_unlock_irqrestore(&pgdat->node_size_lock, *flags);
-}
-static inline
-void pgdat_resize_init(struct pglist_data *pgdat)
-{
- spin_lock_init(&pgdat->node_size_lock);
-}
+#define MHP_MERGE_RESOURCE ((__force mhp_t)BIT(0))
+
+/*
+ * We want memmap (struct page array) to be self contained.
+ * To do so, we will use the beginning of the hot-added range to build
+ * the page tables for the memmap array that describes the entire range.
+ * Only selected architectures support it with SPARSE_VMEMMAP.
+ * This is only a hint, the core kernel can decide to not do this based on
+ * different alignment checks.
+ */
+#define MHP_MEMMAP_ON_MEMORY ((__force mhp_t)BIT(1))
+/*
+ * The nid field specifies a memory group id (mgid) instead. The memory group
+ * implies the node id (nid).
+ */
+#define MHP_NID_IS_MGID ((__force mhp_t)BIT(2))
+
+/*
+ * Extended parameters for memory hotplug:
+ * altmap: alternative allocator for memmap array (optional)
+ * pgprot: page protection flags to apply to newly created page tables
+ * (required)
+ */
+struct mhp_params {
+ struct vmem_altmap *altmap;
+ pgprot_t pgprot;
+ struct dev_pagemap *pgmap;
+};
+
+bool mhp_range_allowed(u64 start, u64 size, bool need_mapping);
+struct range mhp_get_pluggable_range(bool need_mapping);
+bool mhp_supports_memmap_on_memory(void);
+
/*
* Zone resizing functions
*
@@ -94,27 +102,32 @@ static inline void zone_seqlock_init(struct zone *zone)
{
seqlock_init(&zone->span_seqlock);
}
-extern int zone_grow_free_lists(struct zone *zone, unsigned long new_nr_pages);
-extern int zone_grow_waitqueues(struct zone *zone, unsigned long nr_pages);
-extern int add_one_highpage(struct page *page, int pfn, int bad_ppro);
+extern void adjust_present_page_count(struct page *page,
+ struct memory_group *group,
+ long nr_pages);
/* VM interface that may be used by firmware interface */
-extern int online_pages(unsigned long, unsigned long, int);
-extern int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn,
- unsigned long *valid_start, unsigned long *valid_end);
-extern void __offline_isolated_pages(unsigned long, unsigned long);
+extern int mhp_init_memmap_on_memory(unsigned long pfn, unsigned long nr_pages,
+ struct zone *zone);
+extern void mhp_deinit_memmap_on_memory(unsigned long pfn, unsigned long nr_pages);
+extern int online_pages(unsigned long pfn, unsigned long nr_pages,
+ struct zone *zone, struct memory_group *group);
+extern unsigned long __offline_isolated_pages(unsigned long start_pfn,
+ unsigned long end_pfn);
-typedef void (*online_page_callback_t)(struct page *page);
+typedef void (*online_page_callback_t)(struct page *page, unsigned int order);
+extern void generic_online_page(struct page *page, unsigned int order);
extern int set_online_page_callback(online_page_callback_t callback);
extern int restore_online_page_callback(online_page_callback_t callback);
-extern void __online_page_set_limits(struct page *page);
-extern void __online_page_increment_counters(struct page *page);
-extern void __online_page_free(struct page *page);
-
extern int try_online_node(int nid);
-extern bool memhp_auto_online;
+extern int arch_add_memory(int nid, u64 start, u64 size,
+ struct mhp_params *params);
+extern u64 max_mem_size;
+
+extern int mhp_online_type_from_str(const char *str);
+
/* If movable_node boot option specified */
extern bool movable_node_enabled;
static inline bool movable_node_is_enabled(void)
@@ -122,105 +135,46 @@ static inline bool movable_node_is_enabled(void)
return movable_node_enabled;
}
-#ifdef CONFIG_MEMORY_HOTREMOVE
-extern bool is_pageblock_removable_nolock(struct page *page);
-extern int arch_remove_memory(u64 start, u64 size);
-extern int __remove_pages(struct zone *zone, unsigned long start_pfn,
- unsigned long nr_pages);
-#endif /* CONFIG_MEMORY_HOTREMOVE */
+extern void arch_remove_memory(u64 start, u64 size, struct vmem_altmap *altmap);
+extern void __remove_pages(unsigned long start_pfn, unsigned long nr_pages,
+ struct vmem_altmap *altmap);
/* reasonably generic interface to expand the physical pages */
-extern int __add_pages(int nid, unsigned long start_pfn,
- unsigned long nr_pages, bool want_memblock);
+extern int __add_pages(int nid, unsigned long start_pfn, unsigned long nr_pages,
+ struct mhp_params *params);
-#ifdef CONFIG_NUMA
-extern int memory_add_physaddr_to_nid(u64 start);
-#else
-static inline int memory_add_physaddr_to_nid(u64 start)
+#ifndef CONFIG_ARCH_HAS_ADD_PAGES
+static inline int add_pages(int nid, unsigned long start_pfn,
+ unsigned long nr_pages, struct mhp_params *params)
{
- return 0;
+ return __add_pages(nid, start_pfn, nr_pages, params);
}
-#endif
-
-#ifdef CONFIG_HAVE_ARCH_NODEDATA_EXTENSION
-/*
- * For supporting node-hotadd, we have to allocate a new pgdat.
- *
- * If an arch has generic style NODE_DATA(),
- * node_data[nid] = kzalloc() works well. But it depends on the architecture.
- *
- * In general, generic_alloc_nodedata() is used.
- * Now, arch_free_nodedata() is just defined for error path of node_hot_add.
- *
- */
-extern pg_data_t *arch_alloc_nodedata(int nid);
-extern void arch_free_nodedata(pg_data_t *pgdat);
-extern void arch_refresh_nodedata(int nid, pg_data_t *pgdat);
+#else /* ARCH_HAS_ADD_PAGES */
+int add_pages(int nid, unsigned long start_pfn, unsigned long nr_pages,
+ struct mhp_params *params);
+#endif /* ARCH_HAS_ADD_PAGES */
-#else /* CONFIG_HAVE_ARCH_NODEDATA_EXTENSION */
-
-#define arch_alloc_nodedata(nid) generic_alloc_nodedata(nid)
-#define arch_free_nodedata(pgdat) generic_free_nodedata(pgdat)
+void get_online_mems(void);
+void put_online_mems(void);
-#ifdef CONFIG_NUMA
-/*
- * If ARCH_HAS_NODEDATA_EXTENSION=n, this func is used to allocate pgdat.
- * XXX: kmalloc_node() can't work well to get new node's memory at this time.
- * Because, pgdat for the new node is not allocated/initialized yet itself.
- * To use new node's memory, more consideration will be necessary.
- */
-#define generic_alloc_nodedata(nid) \
-({ \
- kzalloc(sizeof(pg_data_t), GFP_KERNEL); \
-})
-/*
- * This definition is just for error path in node hotadd.
- * For node hotremove, we have to replace this.
- */
-#define generic_free_nodedata(pgdat) kfree(pgdat)
+void mem_hotplug_begin(void);
+void mem_hotplug_done(void);
-extern pg_data_t *node_data[];
-static inline void arch_refresh_nodedata(int nid, pg_data_t *pgdat)
+/* See kswapd_is_running() */
+static inline void pgdat_kswapd_lock(pg_data_t *pgdat)
{
- node_data[nid] = pgdat;
+ mutex_lock(&pgdat->kswapd_lock);
}
-#else /* !CONFIG_NUMA */
-
-/* never called */
-static inline pg_data_t *generic_alloc_nodedata(int nid)
-{
- BUG();
- return NULL;
-}
-static inline void generic_free_nodedata(pg_data_t *pgdat)
+static inline void pgdat_kswapd_unlock(pg_data_t *pgdat)
{
+ mutex_unlock(&pgdat->kswapd_lock);
}
-static inline void arch_refresh_nodedata(int nid, pg_data_t *pgdat)
-{
-}
-#endif /* CONFIG_NUMA */
-#endif /* CONFIG_HAVE_ARCH_NODEDATA_EXTENSION */
-#ifdef CONFIG_HAVE_BOOTMEM_INFO_NODE
-extern void __init register_page_bootmem_info_node(struct pglist_data *pgdat);
-#else
-static inline void register_page_bootmem_info_node(struct pglist_data *pgdat)
+static inline void pgdat_kswapd_lock_init(pg_data_t *pgdat)
{
+ mutex_init(&pgdat->kswapd_lock);
}
-#endif
-extern void put_page_bootmem(struct page *page);
-extern void get_page_bootmem(unsigned long ingo, struct page *page,
- unsigned long type);
-
-void get_online_mems(void);
-void put_online_mems(void);
-
-void mem_hotplug_begin(void);
-void mem_hotplug_done(void);
-
-extern void set_zone_contiguous(struct zone *zone);
-extern void clear_zone_contiguous(struct zone *zone);
#else /* ! CONFIG_MEMORY_HOTPLUG */
#define pfn_to_online_page(pfn) \
@@ -231,13 +185,6 @@ extern void clear_zone_contiguous(struct zone *zone);
___page; \
})
-/*
- * Stub functions for when hotplug is off
- */
-static inline void pgdat_resize_lock(struct pglist_data *p, unsigned long *f) {}
-static inline void pgdat_resize_unlock(struct pglist_data *p, unsigned long *f) {}
-static inline void pgdat_resize_init(struct pglist_data *pgdat) {}
-
static inline unsigned zone_span_seqbegin(struct zone *zone)
{
return 0;
@@ -250,17 +197,6 @@ static inline void zone_span_writelock(struct zone *zone) {}
static inline void zone_span_writeunlock(struct zone *zone) {}
static inline void zone_seqlock_init(struct zone *zone) {}
-static inline int mhp_notimplemented(const char *func)
-{
- printk(KERN_WARNING "%s() called, with CONFIG_MEMORY_HOTPLUG disabled\n", func);
- dump_stack();
- return -ENOSYS;
-}
-
-static inline void register_page_bootmem_info_node(struct pglist_data *pgdat)
-{
-}
-
static inline int try_online_node(int nid)
{
return 0;
@@ -276,49 +212,110 @@ static inline bool movable_node_is_enabled(void)
{
return false;
}
+
+static inline bool mhp_supports_memmap_on_memory(void)
+{
+ return false;
+}
+
+static inline void pgdat_kswapd_lock(pg_data_t *pgdat) {}
+static inline void pgdat_kswapd_unlock(pg_data_t *pgdat) {}
+static inline void pgdat_kswapd_lock_init(pg_data_t *pgdat) {}
#endif /* ! CONFIG_MEMORY_HOTPLUG */
+/*
+ * Keep this declaration outside CONFIG_MEMORY_HOTPLUG as some
+ * platforms might override and use arch_get_mappable_range()
+ * for internal non memory hotplug purposes.
+ */
+struct range arch_get_mappable_range(void);
+
+#if defined(CONFIG_MEMORY_HOTPLUG) || defined(CONFIG_DEFERRED_STRUCT_PAGE_INIT)
+/*
+ * pgdat resizing functions
+ */
+static inline
+void pgdat_resize_lock(struct pglist_data *pgdat, unsigned long *flags)
+{
+ spin_lock_irqsave(&pgdat->node_size_lock, *flags);
+}
+static inline
+void pgdat_resize_unlock(struct pglist_data *pgdat, unsigned long *flags)
+{
+ spin_unlock_irqrestore(&pgdat->node_size_lock, *flags);
+}
+static inline
+void pgdat_resize_init(struct pglist_data *pgdat)
+{
+ spin_lock_init(&pgdat->node_size_lock);
+}
+#else /* !(CONFIG_MEMORY_HOTPLUG || CONFIG_DEFERRED_STRUCT_PAGE_INIT) */
+/*
+ * Stub functions for when hotplug is off
+ */
+static inline void pgdat_resize_lock(struct pglist_data *p, unsigned long *f) {}
+static inline void pgdat_resize_unlock(struct pglist_data *p, unsigned long *f) {}
+static inline void pgdat_resize_init(struct pglist_data *pgdat) {}
+#endif /* !(CONFIG_MEMORY_HOTPLUG || CONFIG_DEFERRED_STRUCT_PAGE_INIT) */
+
#ifdef CONFIG_MEMORY_HOTREMOVE
-extern bool is_mem_section_removable(unsigned long pfn, unsigned long nr_pages);
extern void try_offline_node(int nid);
-extern int offline_pages(unsigned long start_pfn, unsigned long nr_pages);
-extern void remove_memory(int nid, u64 start, u64 size);
+extern int offline_pages(unsigned long start_pfn, unsigned long nr_pages,
+ struct zone *zone, struct memory_group *group);
+extern int remove_memory(u64 start, u64 size);
+extern void __remove_memory(u64 start, u64 size);
+extern int offline_and_remove_memory(u64 start, u64 size);
#else
-static inline bool is_mem_section_removable(unsigned long pfn,
- unsigned long nr_pages)
-{
- return false;
-}
-
static inline void try_offline_node(int nid) {}
-static inline int offline_pages(unsigned long start_pfn, unsigned long nr_pages)
+static inline int offline_pages(unsigned long start_pfn, unsigned long nr_pages,
+ struct zone *zone, struct memory_group *group)
{
return -EINVAL;
}
-static inline void remove_memory(int nid, u64 start, u64 size) {}
+static inline int remove_memory(u64 start, u64 size)
+{
+ return -EBUSY;
+}
+
+static inline void __remove_memory(u64 start, u64 size) {}
#endif /* CONFIG_MEMORY_HOTREMOVE */
-extern int walk_memory_range(unsigned long start_pfn, unsigned long end_pfn,
- void *arg, int (*func)(struct memory_block *, void *));
-extern int add_memory(int nid, u64 start, u64 size);
-extern int add_memory_resource(int nid, struct resource *resource, bool online);
-extern int arch_add_memory(int nid, u64 start, u64 size, bool want_memblock);
+#ifdef CONFIG_MEMORY_HOTPLUG
+/* Default online_type (MMOP_*) when new memory blocks are added. */
+extern int mhp_get_default_online_type(void);
+extern void mhp_set_default_online_type(int online_type);
+extern void __ref free_area_init_core_hotplug(struct pglist_data *pgdat);
+extern int __add_memory(int nid, u64 start, u64 size, mhp_t mhp_flags);
+extern int add_memory(int nid, u64 start, u64 size, mhp_t mhp_flags);
+extern int add_memory_resource(int nid, struct resource *resource,
+ mhp_t mhp_flags);
+extern int add_memory_driver_managed(int nid, u64 start, u64 size,
+ const char *resource_name,
+ mhp_t mhp_flags);
extern void move_pfn_range_to_zone(struct zone *zone, unsigned long start_pfn,
- unsigned long nr_pages);
-extern int offline_pages(unsigned long start_pfn, unsigned long nr_pages);
-extern bool is_memblock_offlined(struct memory_block *mem);
-extern void remove_memory(int nid, u64 start, u64 size);
-extern int sparse_add_one_section(struct pglist_data *pgdat, unsigned long start_pfn);
-extern void sparse_remove_one_section(struct zone *zone, struct mem_section *ms,
- unsigned long map_offset);
+ unsigned long nr_pages,
+ struct vmem_altmap *altmap, int migratetype,
+ bool isolate_pageblock);
+extern void remove_pfn_range_from_zone(struct zone *zone,
+ unsigned long start_pfn,
+ unsigned long nr_pages);
+extern int sparse_add_section(int nid, unsigned long pfn,
+ unsigned long nr_pages, struct vmem_altmap *altmap,
+ struct dev_pagemap *pgmap);
+extern void sparse_remove_section(unsigned long pfn, unsigned long nr_pages,
+ struct vmem_altmap *altmap);
extern struct page *sparse_decode_mem_map(unsigned long coded_mem_map,
unsigned long pnum);
-extern bool allow_online_pfn_range(int nid, unsigned long pfn, unsigned long nr_pages,
- int online_type);
-extern struct zone *default_zone_for_pfn(int nid, unsigned long pfn,
+extern struct zone *zone_for_pfn_range(int online_type, int nid,
+ struct memory_group *group, unsigned long start_pfn,
unsigned long nr_pages);
+extern int arch_create_linear_mapping(int nid, u64 start, u64 size,
+ struct mhp_params *params);
+void arch_remove_linear_mapping(u64 start, u64 size);
+#endif /* CONFIG_MEMORY_HOTPLUG */
+
#endif /* __LINUX_MEMORY_HOTPLUG_H */