summaryrefslogtreecommitdiff
path: root/arch/arm64/include/asm/pgtable-prot.h
diff options
context:
space:
mode:
authorMarc Zyngier <marc.zyngier@arm.com>2018-04-06 12:27:28 +0100
committerMarc Zyngier <marc.zyngier@arm.com>2018-07-09 11:37:41 +0100
commite48d53a91f6e90873e21a5ca5e8c0d7a9f8936a4 (patch)
tree8226aa3bd5ae8eefb39ad90d66fff26917bd747b /arch/arm64/include/asm/pgtable-prot.h
parent1e4b044d22517cae7047c99038abb444423243ca (diff)
arm64: KVM: Add support for Stage-2 control of memory types and cacheability
Up to ARMv8.3, the combinaison of Stage-1 and Stage-2 attributes results in the strongest attribute of the two stages. This means that the hypervisor has to perform quite a lot of cache maintenance just in case the guest has some non-cacheable mappings around. ARMv8.4 solves this problem by offering a different mode (FWB) where Stage-2 has total control over the memory attribute (this is limited to systems where both I/O and instruction fetches are coherent with the dcache). This is achieved by having a different set of memory attributes in the page tables, and a new bit set in HCR_EL2. On such a system, we can then safely sidestep any form of dcache management. Acked-by: Catalin Marinas <catalin.marinas@arm.com> Reviewed-by: Christoffer Dall <christoffer.dall@arm.com> Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
Diffstat (limited to 'arch/arm64/include/asm/pgtable-prot.h')
-rw-r--r--arch/arm64/include/asm/pgtable-prot.h14
1 files changed, 12 insertions, 2 deletions
diff --git a/arch/arm64/include/asm/pgtable-prot.h b/arch/arm64/include/asm/pgtable-prot.h
index 108ecad7acc5..c66c3047400e 100644
--- a/arch/arm64/include/asm/pgtable-prot.h
+++ b/arch/arm64/include/asm/pgtable-prot.h
@@ -67,8 +67,18 @@
#define PAGE_HYP_RO __pgprot(_HYP_PAGE_DEFAULT | PTE_HYP | PTE_RDONLY | PTE_HYP_XN)
#define PAGE_HYP_DEVICE __pgprot(PROT_DEVICE_nGnRE | PTE_HYP)
-#define PAGE_S2 __pgprot(_PROT_DEFAULT | PTE_S2_MEMATTR(MT_S2_NORMAL) | PTE_S2_RDONLY | PTE_S2_XN)
-#define PAGE_S2_DEVICE __pgprot(_PROT_DEFAULT | PTE_S2_MEMATTR(MT_S2_DEVICE_nGnRE) | PTE_S2_RDONLY | PTE_S2_XN)
+#define PAGE_S2_MEMATTR(attr) \
+ ({ \
+ u64 __val; \
+ if (cpus_have_const_cap(ARM64_HAS_STAGE2_FWB)) \
+ __val = PTE_S2_MEMATTR(MT_S2_FWB_ ## attr); \
+ else \
+ __val = PTE_S2_MEMATTR(MT_S2_ ## attr); \
+ __val; \
+ })
+
+#define PAGE_S2 __pgprot(_PROT_DEFAULT | PAGE_S2_MEMATTR(NORMAL) | PTE_S2_RDONLY | PTE_S2_XN)
+#define PAGE_S2_DEVICE __pgprot(_PROT_DEFAULT | PAGE_S2_MEMATTR(DEVICE_nGnRE) | PTE_S2_RDONLY | PTE_S2_XN)
#define PAGE_NONE __pgprot(((_PAGE_DEFAULT) & ~PTE_VALID) | PTE_PROT_NONE | PTE_RDONLY | PTE_NG | PTE_PXN | PTE_UXN)
#define PAGE_SHARED __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_WRITE)