summaryrefslogtreecommitdiff
path: root/arch/arm64/kvm/va_layout.c
diff options
context:
space:
mode:
authorMarc Zyngier <marc.zyngier@arm.com>2017-12-08 14:18:27 +0000
committerMarc Zyngier <marc.zyngier@arm.com>2018-03-19 13:05:15 +0000
commit005e975a3bd08dce8d77746d6688cac615fe7c97 (patch)
treefa06aba199457ce4e360ec4d69c38b54a08308be /arch/arm64/kvm/va_layout.c
parent11d764079c9f25d1da8e10906d54da7fefec5844 (diff)
arm64: KVM: Dynamically compute the HYP VA mask
As we're moving towards a much more dynamic way to compute our HYP VA, let's express the mask in a slightly different way. Instead of comparing the idmap position to the "low" VA mask, we directly compute the mask by taking into account the idmap's (VA_BIT-1) bit. No functionnal change. Acked-by: Catalin Marinas <catalin.marinas@arm.com> Reviewed-by: Christoffer Dall <christoffer.dall@linaro.org> Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
Diffstat (limited to 'arch/arm64/kvm/va_layout.c')
-rw-r--r--arch/arm64/kvm/va_layout.c17
1 files changed, 6 insertions, 11 deletions
diff --git a/arch/arm64/kvm/va_layout.c b/arch/arm64/kvm/va_layout.c
index 0d7bf8319894..7998d1a60916 100644
--- a/arch/arm64/kvm/va_layout.c
+++ b/arch/arm64/kvm/va_layout.c
@@ -21,24 +21,19 @@
#include <asm/insn.h>
#include <asm/kvm_mmu.h>
-#define HYP_PAGE_OFFSET_HIGH_MASK ((UL(1) << VA_BITS) - 1)
-#define HYP_PAGE_OFFSET_LOW_MASK ((UL(1) << (VA_BITS - 1)) - 1)
-
static u64 va_mask;
static void compute_layout(void)
{
phys_addr_t idmap_addr = __pa_symbol(__hyp_idmap_text_start);
- unsigned long mask = HYP_PAGE_OFFSET_HIGH_MASK;
+ u64 hyp_va_msb;
- /*
- * Activate the lower HYP offset only if the idmap doesn't
- * clash with it,
- */
- if (idmap_addr > HYP_PAGE_OFFSET_LOW_MASK)
- mask = HYP_PAGE_OFFSET_LOW_MASK;
+ /* Where is my RAM region? */
+ hyp_va_msb = idmap_addr & BIT(VA_BITS - 1);
+ hyp_va_msb ^= BIT(VA_BITS - 1);
- va_mask = mask;
+ va_mask = GENMASK_ULL(VA_BITS - 2, 0);
+ va_mask |= hyp_va_msb;
}
static u32 compute_instruction(int n, u32 rd, u32 rn)