summaryrefslogtreecommitdiff
path: root/arch/arm64/mm/copypage.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm64/mm/copypage.c')
-rw-r--r--arch/arm64/mm/copypage.c57
1 files changed, 50 insertions, 7 deletions
diff --git a/arch/arm64/mm/copypage.c b/arch/arm64/mm/copypage.c
index 2ee7b73433a5..a86c897017df 100644
--- a/arch/arm64/mm/copypage.c
+++ b/arch/arm64/mm/copypage.c
@@ -6,21 +6,64 @@
* Copyright (C) 2012 ARM Ltd.
*/
+#include <linux/bitops.h>
#include <linux/mm.h>
#include <asm/page.h>
#include <asm/cacheflush.h>
+#include <asm/cpufeature.h>
+#include <asm/mte.h>
-void __cpu_copy_user_page(void *kto, const void *kfrom, unsigned long vaddr)
+void copy_highpage(struct page *to, struct page *from)
{
- struct page *page = virt_to_page(kto);
+ void *kto = page_address(to);
+ void *kfrom = page_address(from);
+ struct folio *src = page_folio(from);
+ struct folio *dst = page_folio(to);
+ unsigned int i, nr_pages;
+
copy_page(kto, kfrom);
- flush_dcache_page(page);
+
+ if (kasan_hw_tags_enabled())
+ page_kasan_tag_reset(to);
+
+ if (!system_supports_mte())
+ return;
+
+ if (folio_test_hugetlb(src)) {
+ if (!folio_test_hugetlb_mte_tagged(src) ||
+ from != folio_page(src, 0))
+ return;
+
+ WARN_ON_ONCE(!folio_try_hugetlb_mte_tagging(dst));
+
+ /*
+ * Populate tags for all subpages.
+ *
+ * Don't assume the first page is head page since
+ * huge page copy may start from any subpage.
+ */
+ nr_pages = folio_nr_pages(src);
+ for (i = 0; i < nr_pages; i++) {
+ kfrom = page_address(folio_page(src, i));
+ kto = page_address(folio_page(dst, i));
+ mte_copy_page_tags(kto, kfrom);
+ }
+ folio_set_hugetlb_mte_tagged(dst);
+ } else if (page_mte_tagged(from)) {
+ /* It's a new page, shouldn't have been tagged yet */
+ WARN_ON_ONCE(!try_page_mte_tagging(to));
+
+ mte_copy_page_tags(kto, kfrom);
+ set_page_mte_tagged(to);
+ }
}
-EXPORT_SYMBOL_GPL(__cpu_copy_user_page);
+EXPORT_SYMBOL(copy_highpage);
-void __cpu_clear_user_page(void *kaddr, unsigned long vaddr)
+void copy_user_highpage(struct page *to, struct page *from,
+ unsigned long vaddr, struct vm_area_struct *vma)
{
- clear_page(kaddr);
+ copy_highpage(to, from);
+ flush_dcache_page(to);
}
-EXPORT_SYMBOL_GPL(__cpu_clear_user_page);
+EXPORT_SYMBOL_GPL(copy_user_highpage);