summaryrefslogtreecommitdiff
path: root/arch
diff options
context:
space:
mode:
authorKefeng Wang <wangkefeng.wang@huawei.com>2023-08-02 09:27:31 +0800
committerAndrew Morton <akpm@linux-foundation.org>2023-08-21 13:37:40 -0700
commit9cf6a060f95578c8147bdacdf55a1eaaa182ce49 (patch)
tree8e1a902b8137e7b7a4feac0ec34a8fe5ed05c052 /arch
parentf720b471fdb35619402293dcd421761fb1942e27 (diff)
arm64: hugetlb: enable __HAVE_ARCH_FLUSH_HUGETLB_TLB_RANGE
It is better to use huge page size instead of PAGE_SIZE for stride when flush hugepage, which reduces the loop in __flush_tlb_range(). Let's support arch's flush_hugetlb_tlb_range(), which is used in hugetlb_unshare_all_pmds(), move_hugetlb_page_tables() and hugetlb_change_protection() for now. Note,: for hugepages based on contiguous bit, it has to be invalidated individually since the contiguous PTE bit is just a hint, the hardware may or may not take it into account. Link: https://lkml.kernel.org/r/20230802012731.62512-1-wangkefeng.wang@huawei.com Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com> Reviewed-by: Muchun Song <songmuchun@bytedance.com> Reviewed-by: Catalin Marinas <catalin.marinas@arm.com> Cc: Barry Song <21cnbao@gmail.com> Cc: Joel Fernandes (Google) <joel@joelfernandes.org> Cc: Kalesh Singh <kaleshsingh@google.com> Cc: "Kirill A. Shutemov" <kirill@shutemov.name> Cc: Mike Kravetz <mike.kravetz@oracle.com> Cc: Mina Almasry <almasrymina@google.com> Cc: Will Deacon <will@kernel.org> Cc: William Kucharski <william.kucharski@oracle.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'arch')
-rw-r--r--arch/arm64/include/asm/hugetlb.h15
1 files changed, 15 insertions, 0 deletions
diff --git a/arch/arm64/include/asm/hugetlb.h b/arch/arm64/include/asm/hugetlb.h
index 6a4a1ab8eb23..a91d6219aa78 100644
--- a/arch/arm64/include/asm/hugetlb.h
+++ b/arch/arm64/include/asm/hugetlb.h
@@ -60,4 +60,19 @@ extern void huge_ptep_modify_prot_commit(struct vm_area_struct *vma,
#include <asm-generic/hugetlb.h>
+#define __HAVE_ARCH_FLUSH_HUGETLB_TLB_RANGE
+static inline void flush_hugetlb_tlb_range(struct vm_area_struct *vma,
+ unsigned long start,
+ unsigned long end)
+{
+ unsigned long stride = huge_page_size(hstate_vma(vma));
+
+ if (stride == PMD_SIZE)
+ __flush_tlb_range(vma, start, end, stride, false, 2);
+ else if (stride == PUD_SIZE)
+ __flush_tlb_range(vma, start, end, stride, false, 1);
+ else
+ __flush_tlb_range(vma, start, end, PAGE_SIZE, false, 0);
+}
+
#endif /* __ASM_HUGETLB_H */