summaryrefslogtreecommitdiff
path: root/arch/arm64/include/asm/kvm_asm.h
diff options
context:
space:
mode:
authorArd Biesheuvel <ard.biesheuvel@linaro.org>2016-02-16 13:52:39 +0100
committerCatalin Marinas <catalin.marinas@arm.com>2016-02-18 18:16:40 +0000
commita0bf9776cd0be4490d4675d4108e13379849fc7f (patch)
tree3ccf0e44607d452910fdc1552ff9a70d8dcde1fc /arch/arm64/include/asm/kvm_asm.h
parent157962f5a8f236cab898b68bdaa69ce68922f0bf (diff)
arm64: kvm: deal with kernel symbols outside of linear mapping
KVM on arm64 uses a fixed offset between the linear mapping at EL1 and the HYP mapping at EL2. Before we can move the kernel virtual mapping out of the linear mapping, we have to make sure that references to kernel symbols that are accessed via the HYP mapping are translated to their linear equivalent. Reviewed-by: Mark Rutland <mark.rutland@arm.com> Acked-by: Marc Zyngier <marc.zyngier@arm.com> Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org> Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
Diffstat (limited to 'arch/arm64/include/asm/kvm_asm.h')
-rw-r--r--arch/arm64/include/asm/kvm_asm.h17
1 files changed, 17 insertions, 0 deletions
diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h
index 52b777b7d407..31b56008f412 100644
--- a/arch/arm64/include/asm/kvm_asm.h
+++ b/arch/arm64/include/asm/kvm_asm.h
@@ -26,7 +26,24 @@
#define KVM_ARM64_DEBUG_DIRTY_SHIFT 0
#define KVM_ARM64_DEBUG_DIRTY (1 << KVM_ARM64_DEBUG_DIRTY_SHIFT)
+#define kvm_ksym_ref(sym) ((void *)&sym + kvm_ksym_shift)
+
#ifndef __ASSEMBLY__
+#if __GNUC__ > 4
+#define kvm_ksym_shift (PAGE_OFFSET - KIMAGE_VADDR)
+#else
+/*
+ * GCC versions 4.9 and older will fold the constant below into the addend of
+ * the reference to 'sym' above if kvm_ksym_shift is declared static or if the
+ * constant is used directly. However, since we use the small code model for
+ * the core kernel, the reference to 'sym' will be emitted as a adrp/add pair,
+ * with a +/- 4 GB range, resulting in linker relocation errors if the shift
+ * is sufficiently large. So prevent the compiler from folding the shift into
+ * the addend, by making the shift a variable with external linkage.
+ */
+__weak u64 kvm_ksym_shift = PAGE_OFFSET - KIMAGE_VADDR;
+#endif
+
struct kvm;
struct kvm_vcpu;