summaryrefslogtreecommitdiff
path: root/arch
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2023-05-19 11:05:42 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2023-05-19 11:05:42 -0700
commit4ffd96c9621fcec3b84f3f997e036cc7077ec465 (patch)
tree9e184f7bfca42e53d7d2cfe88fd477270311d281 /arch
parent46be92e58fa8868fc10854de94f270e1d58ec434 (diff)
parentc4c597f1b367433c52c531dccd6859a39b4580fb (diff)
Merge tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux
Pull arm64 fixes from Will Deacon: "A mixture of compiler/static checker resolutions and a couple of MTE fixes: - Avoid erroneously marking untagged pages with PG_mte_tagged - Always reset KASAN tags for destination page in copy_page() - Mark PMU header functions 'static inline' - Fix some sparse warnings due to missing casts" * tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux: arm64: mte: Do not set PG_mte_tagged if tags were not initialized arm64: Also reset KASAN tag if page is not PG_mte_tagged arm64: perf: Mark all accessor functions inline ARM: perf: Mark all accessor functions inline arm64: vdso: Pass (void *) to virt_to_page() arm64/mm: mark private VM_FAULT_X defines as vm_fault_t
Diffstat (limited to 'arch')
-rw-r--r--arch/arm/include/asm/arm_pmuv3.h6
-rw-r--r--arch/arm64/include/asm/arm_pmuv3.h6
-rw-r--r--arch/arm64/kernel/mte.c7
-rw-r--r--arch/arm64/kernel/vdso.c2
-rw-r--r--arch/arm64/mm/copypage.c5
-rw-r--r--arch/arm64/mm/fault.c4
6 files changed, 14 insertions, 16 deletions
diff --git a/arch/arm/include/asm/arm_pmuv3.h b/arch/arm/include/asm/arm_pmuv3.h
index 78d3d4b82c6c..f4db3e75d75f 100644
--- a/arch/arm/include/asm/arm_pmuv3.h
+++ b/arch/arm/include/asm/arm_pmuv3.h
@@ -92,7 +92,7 @@
#define RETURN_READ_PMEVCNTRN(n) \
return read_sysreg(PMEVCNTR##n)
-static unsigned long read_pmevcntrn(int n)
+static inline unsigned long read_pmevcntrn(int n)
{
PMEVN_SWITCH(n, RETURN_READ_PMEVCNTRN);
return 0;
@@ -100,14 +100,14 @@ static unsigned long read_pmevcntrn(int n)
#define WRITE_PMEVCNTRN(n) \
write_sysreg(val, PMEVCNTR##n)
-static void write_pmevcntrn(int n, unsigned long val)
+static inline void write_pmevcntrn(int n, unsigned long val)
{
PMEVN_SWITCH(n, WRITE_PMEVCNTRN);
}
#define WRITE_PMEVTYPERN(n) \
write_sysreg(val, PMEVTYPER##n)
-static void write_pmevtypern(int n, unsigned long val)
+static inline void write_pmevtypern(int n, unsigned long val)
{
PMEVN_SWITCH(n, WRITE_PMEVTYPERN);
}
diff --git a/arch/arm64/include/asm/arm_pmuv3.h b/arch/arm64/include/asm/arm_pmuv3.h
index d6b51deb7bf0..18dc2fb3d7b7 100644
--- a/arch/arm64/include/asm/arm_pmuv3.h
+++ b/arch/arm64/include/asm/arm_pmuv3.h
@@ -13,7 +13,7 @@
#define RETURN_READ_PMEVCNTRN(n) \
return read_sysreg(pmevcntr##n##_el0)
-static unsigned long read_pmevcntrn(int n)
+static inline unsigned long read_pmevcntrn(int n)
{
PMEVN_SWITCH(n, RETURN_READ_PMEVCNTRN);
return 0;
@@ -21,14 +21,14 @@ static unsigned long read_pmevcntrn(int n)
#define WRITE_PMEVCNTRN(n) \
write_sysreg(val, pmevcntr##n##_el0)
-static void write_pmevcntrn(int n, unsigned long val)
+static inline void write_pmevcntrn(int n, unsigned long val)
{
PMEVN_SWITCH(n, WRITE_PMEVCNTRN);
}
#define WRITE_PMEVTYPERN(n) \
write_sysreg(val, pmevtyper##n##_el0)
-static void write_pmevtypern(int n, unsigned long val)
+static inline void write_pmevtypern(int n, unsigned long val)
{
PMEVN_SWITCH(n, WRITE_PMEVTYPERN);
}
diff --git a/arch/arm64/kernel/mte.c b/arch/arm64/kernel/mte.c
index f5bcb0dc6267..7e89968bd282 100644
--- a/arch/arm64/kernel/mte.c
+++ b/arch/arm64/kernel/mte.c
@@ -66,13 +66,10 @@ void mte_sync_tags(pte_t old_pte, pte_t pte)
return;
/* if PG_mte_tagged is set, tags have already been initialised */
- for (i = 0; i < nr_pages; i++, page++) {
- if (!page_mte_tagged(page)) {
+ for (i = 0; i < nr_pages; i++, page++)
+ if (!page_mte_tagged(page))
mte_sync_page_tags(page, old_pte, check_swap,
pte_is_tagged);
- set_page_mte_tagged(page);
- }
- }
/* ensure the tags are visible before the PTE is set */
smp_wmb();
diff --git a/arch/arm64/kernel/vdso.c b/arch/arm64/kernel/vdso.c
index 0119dc91abb5..d9e1355730ef 100644
--- a/arch/arm64/kernel/vdso.c
+++ b/arch/arm64/kernel/vdso.c
@@ -288,7 +288,7 @@ static int aarch32_alloc_kuser_vdso_page(void)
memcpy((void *)(vdso_page + 0x1000 - kuser_sz), __kuser_helper_start,
kuser_sz);
- aarch32_vectors_page = virt_to_page(vdso_page);
+ aarch32_vectors_page = virt_to_page((void *)vdso_page);
return 0;
}
diff --git a/arch/arm64/mm/copypage.c b/arch/arm64/mm/copypage.c
index 4aadcfb01754..a7bb20055ce0 100644
--- a/arch/arm64/mm/copypage.c
+++ b/arch/arm64/mm/copypage.c
@@ -21,9 +21,10 @@ void copy_highpage(struct page *to, struct page *from)
copy_page(kto, kfrom);
+ if (kasan_hw_tags_enabled())
+ page_kasan_tag_reset(to);
+
if (system_supports_mte() && page_mte_tagged(from)) {
- if (kasan_hw_tags_enabled())
- page_kasan_tag_reset(to);
/* It's a new page, shouldn't have been tagged yet */
WARN_ON_ONCE(!try_page_mte_tagging(to));
mte_copy_page_tags(kto, kfrom);
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
index 9e0db5c387e3..cb21ccd7940d 100644
--- a/arch/arm64/mm/fault.c
+++ b/arch/arm64/mm/fault.c
@@ -480,8 +480,8 @@ static void do_bad_area(unsigned long far, unsigned long esr,
}
}
-#define VM_FAULT_BADMAP 0x010000
-#define VM_FAULT_BADACCESS 0x020000
+#define VM_FAULT_BADMAP ((__force vm_fault_t)0x010000)
+#define VM_FAULT_BADACCESS ((__force vm_fault_t)0x020000)
static vm_fault_t __do_page_fault(struct mm_struct *mm, unsigned long addr,
unsigned int mm_flags, unsigned long vm_flags,