summaryrefslogtreecommitdiff
path: root/arch/arm64/include/asm/pgtable.h
diff options
context:
space:
mode:
authorVladimir Murzin <vladimir.murzin@arm.com>2021-03-12 17:38:10 +0000
committerCatalin Marinas <catalin.marinas@arm.com>2021-03-26 09:37:23 +0000
commit18107f8a2df6bf1c6cac8d0713f757f866d5af51 (patch)
tree0411230b8d6c259233a6aadcd0c0aaf261b86c2a /arch/arm64/include/asm/pgtable.h
parent1e28eed17697bcf343c6743f0028cc3b5dd88bf0 (diff)
arm64: Support execute-only permissions with Enhanced PAN
Enhanced Privileged Access Never (EPAN) allows Privileged Access Never to be used with Execute-only mappings. Absence of such support was a reason for 24cecc377463 ("arm64: Revert support for execute-only user mappings"). Thus now it can be revisited and re-enabled. Cc: Kees Cook <keescook@chromium.org> Signed-off-by: Vladimir Murzin <vladimir.murzin@arm.com> Acked-by: Will Deacon <will@kernel.org> Link: https://lore.kernel.org/r/20210312173811.58284-2-vladimir.murzin@arm.com Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
Diffstat (limited to 'arch/arm64/include/asm/pgtable.h')
-rw-r--r--arch/arm64/include/asm/pgtable.h31
1 files changed, 23 insertions, 8 deletions
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index 47027796c2f9..0b10204e72fc 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -113,11 +113,12 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
#define pte_dirty(pte) (pte_sw_dirty(pte) || pte_hw_dirty(pte))
#define pte_valid(pte) (!!(pte_val(pte) & PTE_VALID))
+/*
+ * Execute-only user mappings do not have the PTE_USER bit set. All valid
+ * kernel mappings have the PTE_UXN bit set.
+ */
#define pte_valid_not_user(pte) \
- ((pte_val(pte) & (PTE_VALID | PTE_USER)) == PTE_VALID)
-#define pte_valid_user(pte) \
- ((pte_val(pte) & (PTE_VALID | PTE_USER)) == (PTE_VALID | PTE_USER))
-
+ ((pte_val(pte) & (PTE_VALID | PTE_USER | PTE_UXN)) == (PTE_VALID | PTE_UXN))
/*
* Could the pte be present in the TLB? We must check mm_tlb_flush_pending
* so that we don't erroneously return false for pages that have been
@@ -130,12 +131,14 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
(mm_tlb_flush_pending(mm) ? pte_present(pte) : pte_valid(pte))
/*
- * p??_access_permitted() is true for valid user mappings (subject to the
- * write permission check). PROT_NONE mappings do not have the PTE_VALID bit
- * set.
+ * p??_access_permitted() is true for valid user mappings (PTE_USER
+ * bit set, subject to the write permission check). For execute-only
+ * mappings, like PROT_EXEC with EPAN (both PTE_USER and PTE_UXN bits
+ * not set) must return false. PROT_NONE mappings do not have the
+ * PTE_VALID bit set.
*/
#define pte_access_permitted(pte, write) \
- (pte_valid_user(pte) && (!(write) || pte_write(pte)))
+ (((pte_val(pte) & (PTE_VALID | PTE_USER)) == (PTE_VALID | PTE_USER)) && (!(write) || pte_write(pte)))
#define pmd_access_permitted(pmd, write) \
(pte_access_permitted(pmd_pte(pmd), (write)))
#define pud_access_permitted(pud, write) \
@@ -995,6 +998,18 @@ static inline bool arch_wants_old_prefaulted_pte(void)
}
#define arch_wants_old_prefaulted_pte arch_wants_old_prefaulted_pte
+static inline pgprot_t arch_filter_pgprot(pgprot_t prot)
+{
+ if (cpus_have_const_cap(ARM64_HAS_EPAN))
+ return prot;
+
+ if (pgprot_val(prot) != pgprot_val(PAGE_EXECONLY))
+ return prot;
+
+ return PAGE_READONLY_EXEC;
+}
+
+
#endif /* !__ASSEMBLY__ */
#endif /* __ASM_PGTABLE_H */