summaryrefslogtreecommitdiff
path: root/arch/arm/kvm
diff options
context:
space:
mode:
authorMarc Zyngier <marc.zyngier@arm.com>2016-06-30 18:40:51 +0100
committerChristoffer Dall <christoffer.dall@linaro.org>2016-07-03 23:41:27 +0200
commit6c41a413fd44af8eae2949869d4d57ce681a0c30 (patch)
treedd5393c1fb009a4dfcd206c2eae9d3f82212f011 /arch/arm/kvm
parenteac378a9ceb7196b776a965d915e02995fb8ba55 (diff)
arm/arm64: Get rid of KERN_TO_HYP
We have both KERN_TO_HYP and kern_hyp_va, which do the exact same thing. Let's standardize on the latter. Signed-off-by: Marc Zyngier <marc.zyngier@arm.com> Signed-off-by: Christoffer Dall <christoffer.dall@linaro.org>
Diffstat (limited to 'arch/arm/kvm')
-rw-r--r--arch/arm/kvm/mmu.c18
1 files changed, 9 insertions, 9 deletions
diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c
index 8a0aa37605c5..bda27b6b1aa2 100644
--- a/arch/arm/kvm/mmu.c
+++ b/arch/arm/kvm/mmu.c
@@ -506,9 +506,9 @@ void free_hyp_pgds(void)
if (hyp_pgd) {
unmap_hyp_range(hyp_pgd, hyp_idmap_start, PAGE_SIZE);
for (addr = PAGE_OFFSET; virt_addr_valid(addr); addr += PGDIR_SIZE)
- unmap_hyp_range(hyp_pgd, KERN_TO_HYP(addr), PGDIR_SIZE);
+ unmap_hyp_range(hyp_pgd, kern_hyp_va(addr), PGDIR_SIZE);
for (addr = VMALLOC_START; is_vmalloc_addr((void*)addr); addr += PGDIR_SIZE)
- unmap_hyp_range(hyp_pgd, KERN_TO_HYP(addr), PGDIR_SIZE);
+ unmap_hyp_range(hyp_pgd, kern_hyp_va(addr), PGDIR_SIZE);
free_pages((unsigned long)hyp_pgd, hyp_pgd_order);
hyp_pgd = NULL;
@@ -670,8 +670,8 @@ int create_hyp_mappings(void *from, void *to, pgprot_t prot)
{
phys_addr_t phys_addr;
unsigned long virt_addr;
- unsigned long start = KERN_TO_HYP((unsigned long)from);
- unsigned long end = KERN_TO_HYP((unsigned long)to);
+ unsigned long start = kern_hyp_va((unsigned long)from);
+ unsigned long end = kern_hyp_va((unsigned long)to);
if (is_kernel_in_hyp_mode())
return 0;
@@ -705,8 +705,8 @@ int create_hyp_mappings(void *from, void *to, pgprot_t prot)
*/
int create_hyp_io_mappings(void *from, void *to, phys_addr_t phys_addr)
{
- unsigned long start = KERN_TO_HYP((unsigned long)from);
- unsigned long end = KERN_TO_HYP((unsigned long)to);
+ unsigned long start = kern_hyp_va((unsigned long)from);
+ unsigned long end = kern_hyp_va((unsigned long)to);
if (is_kernel_in_hyp_mode())
return 0;
@@ -1711,10 +1711,10 @@ int kvm_mmu_init(void)
kvm_info("IDMAP page: %lx\n", hyp_idmap_start);
kvm_info("HYP VA range: %lx:%lx\n",
- KERN_TO_HYP(PAGE_OFFSET), KERN_TO_HYP(~0UL));
+ kern_hyp_va(PAGE_OFFSET), kern_hyp_va(~0UL));
- if (hyp_idmap_start >= KERN_TO_HYP(PAGE_OFFSET) &&
- hyp_idmap_start < KERN_TO_HYP(~0UL)) {
+ if (hyp_idmap_start >= kern_hyp_va(PAGE_OFFSET) &&
+ hyp_idmap_start < kern_hyp_va(~0UL)) {
/*
* The idmap page is intersecting with the VA space,
* it is not safe to continue further.