summaryrefslogtreecommitdiff
path: root/mm/hugetlb_vmemmap.c
diff options
context:
space:
mode:
authorMuchun Song <songmuchun@bytedance.com>2022-04-28 23:16:15 -0700
committerakpm <akpm@linux-foundation.org>2022-04-28 23:16:15 -0700
commitf10f1442c309ccef7a80ba3dc4abde0978e86fb4 (patch)
tree2bc957f9e1d13a8f94a67f590f2729f7473b5ac4 /mm/hugetlb_vmemmap.c
parent5981611d0a006472d367d7a8e6ead8afaecf17c7 (diff)
mm: hugetlb_vmemmap: cleanup hugetlb_free_vmemmap_enabled*
The word of "free" is not expressive enough to express the feature of optimizing vmemmap pages associated with each HugeTLB, rename this keywork to "optimize". In this patch , cheanup the static key and hugetlb_free_vmemmap_enabled() to make code more expressive. Link: https://lkml.kernel.org/r/20220404074652.68024-3-songmuchun@bytedance.com Signed-off-by: Muchun Song <songmuchun@bytedance.com> Cc: David Hildenbrand <david@redhat.com> Cc: Mike Kravetz <mike.kravetz@oracle.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'mm/hugetlb_vmemmap.c')
-rw-r--r--mm/hugetlb_vmemmap.c10
1 files changed, 5 insertions, 5 deletions
diff --git a/mm/hugetlb_vmemmap.c b/mm/hugetlb_vmemmap.c
index 91b79b9d9e25..f25294973398 100644
--- a/mm/hugetlb_vmemmap.c
+++ b/mm/hugetlb_vmemmap.c
@@ -189,8 +189,8 @@
#define RESERVE_VMEMMAP_SIZE (RESERVE_VMEMMAP_NR << PAGE_SHIFT)
DEFINE_STATIC_KEY_MAYBE(CONFIG_HUGETLB_PAGE_FREE_VMEMMAP_DEFAULT_ON,
- hugetlb_free_vmemmap_enabled_key);
-EXPORT_SYMBOL(hugetlb_free_vmemmap_enabled_key);
+ hugetlb_optimize_vmemmap_key);
+EXPORT_SYMBOL(hugetlb_optimize_vmemmap_key);
static int __init hugetlb_vmemmap_early_param(char *buf)
{
@@ -204,9 +204,9 @@ static int __init hugetlb_vmemmap_early_param(char *buf)
return -EINVAL;
if (!strcmp(buf, "on"))
- static_branch_enable(&hugetlb_free_vmemmap_enabled_key);
+ static_branch_enable(&hugetlb_optimize_vmemmap_key);
else if (!strcmp(buf, "off"))
- static_branch_disable(&hugetlb_free_vmemmap_enabled_key);
+ static_branch_disable(&hugetlb_optimize_vmemmap_key);
else
return -EINVAL;
@@ -282,7 +282,7 @@ void __init hugetlb_vmemmap_init(struct hstate *h)
BUILD_BUG_ON(__NR_USED_SUBPAGE >=
RESERVE_VMEMMAP_SIZE / sizeof(struct page));
- if (!hugetlb_free_vmemmap_enabled())
+ if (!hugetlb_optimize_vmemmap_enabled())
return;
vmemmap_pages = (nr_pages * sizeof(struct page)) >> PAGE_SHIFT;