diff options
Diffstat (limited to 'mm/hugetlb_vmemmap.h')
| -rw-r--r-- | mm/hugetlb_vmemmap.h | 83 |
1 files changed, 68 insertions, 15 deletions
diff --git a/mm/hugetlb_vmemmap.h b/mm/hugetlb_vmemmap.h index cb2bef8f9e73..18b490825215 100644 --- a/mm/hugetlb_vmemmap.h +++ b/mm/hugetlb_vmemmap.h @@ -1,45 +1,98 @@ // SPDX-License-Identifier: GPL-2.0 /* - * Free some vmemmap pages of HugeTLB + * HugeTLB Vmemmap Optimization (HVO) * - * Copyright (c) 2020, Bytedance. All rights reserved. + * Copyright (c) 2020, ByteDance. All rights reserved. * * Author: Muchun Song <songmuchun@bytedance.com> */ #ifndef _LINUX_HUGETLB_VMEMMAP_H #define _LINUX_HUGETLB_VMEMMAP_H #include <linux/hugetlb.h> +#include <linux/io.h> +#include <linux/memblock.h> -#ifdef CONFIG_HUGETLB_PAGE_FREE_VMEMMAP -int alloc_huge_page_vmemmap(struct hstate *h, struct page *head); -void free_huge_page_vmemmap(struct hstate *h, struct page *head); -void hugetlb_vmemmap_init(struct hstate *h); +/* + * Reserve one vmemmap page, all vmemmap addresses are mapped to it. See + * Documentation/mm/vmemmap_dedup.rst. + */ +#define HUGETLB_VMEMMAP_RESERVE_SIZE PAGE_SIZE +#define HUGETLB_VMEMMAP_RESERVE_PAGES (HUGETLB_VMEMMAP_RESERVE_SIZE / sizeof(struct page)) + +#ifdef CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP +int hugetlb_vmemmap_restore_folio(const struct hstate *h, struct folio *folio); +long hugetlb_vmemmap_restore_folios(const struct hstate *h, + struct list_head *folio_list, + struct list_head *non_hvo_folios); +void hugetlb_vmemmap_optimize_folio(const struct hstate *h, struct folio *folio); +void hugetlb_vmemmap_optimize_folios(struct hstate *h, struct list_head *folio_list); +void hugetlb_vmemmap_optimize_bootmem_folios(struct hstate *h, struct list_head *folio_list); +#ifdef CONFIG_SPARSEMEM_VMEMMAP_PREINIT +void hugetlb_vmemmap_init_early(int nid); +void hugetlb_vmemmap_init_late(int nid); +#endif + + +static inline unsigned int hugetlb_vmemmap_size(const struct hstate *h) +{ + return pages_per_huge_page(h) * sizeof(struct page); +} /* - * How many vmemmap pages associated with a HugeTLB page that can be freed - * to the buddy allocator. + * Return how many vmemmap size associated with a HugeTLB page that can be + * optimized and can be freed to the buddy allocator. */ -static inline unsigned int free_vmemmap_pages_per_hpage(struct hstate *h) +static inline unsigned int hugetlb_vmemmap_optimizable_size(const struct hstate *h) { - return h->nr_free_vmemmap_pages; + int size = hugetlb_vmemmap_size(h) - HUGETLB_VMEMMAP_RESERVE_SIZE; + + if (!is_power_of_2(sizeof(struct page))) + return 0; + return size > 0 ? size : 0; } #else -static inline int alloc_huge_page_vmemmap(struct hstate *h, struct page *head) +static inline int hugetlb_vmemmap_restore_folio(const struct hstate *h, struct folio *folio) +{ + return 0; +} + +static inline long hugetlb_vmemmap_restore_folios(const struct hstate *h, + struct list_head *folio_list, + struct list_head *non_hvo_folios) { + list_splice_init(folio_list, non_hvo_folios); return 0; } -static inline void free_huge_page_vmemmap(struct hstate *h, struct page *head) +static inline void hugetlb_vmemmap_optimize_folio(const struct hstate *h, struct folio *folio) { } -static inline void hugetlb_vmemmap_init(struct hstate *h) +static inline void hugetlb_vmemmap_optimize_folios(struct hstate *h, struct list_head *folio_list) { } -static inline unsigned int free_vmemmap_pages_per_hpage(struct hstate *h) +static inline void hugetlb_vmemmap_optimize_bootmem_folios(struct hstate *h, + struct list_head *folio_list) +{ +} + +static inline void hugetlb_vmemmap_init_early(int nid) +{ +} + +static inline void hugetlb_vmemmap_init_late(int nid) +{ +} + +static inline unsigned int hugetlb_vmemmap_optimizable_size(const struct hstate *h) { return 0; } -#endif /* CONFIG_HUGETLB_PAGE_FREE_VMEMMAP */ +#endif /* CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP */ + +static inline bool hugetlb_vmemmap_optimizable(const struct hstate *h) +{ + return hugetlb_vmemmap_optimizable_size(h) != 0; +} #endif /* _LINUX_HUGETLB_VMEMMAP_H */ |
