summaryrefslogtreecommitdiff
path: root/arch/arm64/include/asm/stage2_pgtable.h
blob: 326aac658b9da86fd4be5a98f18eb6781b9653eb (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
/* SPDX-License-Identifier: GPL-2.0-only */
/*
 * Copyright (C) 2016 - ARM Ltd
 *
 * stage2 page table helpers
 */

#ifndef __ARM64_S2_PGTABLE_H_
#define __ARM64_S2_PGTABLE_H_

#include <linux/hugetlb.h>
#include <asm/pgtable.h>

/*
 * PGDIR_SHIFT determines the size a top-level page table entry can map
 * and depends on the number of levels in the page table. Compute the
 * PGDIR_SHIFT for a given number of levels.
 */
#define pt_levels_pgdir_shift(lvls)	ARM64_HW_PGTABLE_LEVEL_SHIFT(4 - (lvls))

/*
 * The hardware supports concatenation of up to 16 tables at stage2 entry
 * level and we use the feature whenever possible, which means we resolve 4
 * additional bits of address at the entry level.
 *
 * This implies, the total number of page table levels required for
 * IPA_SHIFT at stage2 expected by the hardware can be calculated using
 * the same logic used for the (non-collapsable) stage1 page tables but for
 * (IPA_SHIFT - 4).
 */
#define stage2_pgtable_levels(ipa)	ARM64_HW_PGTABLE_LEVELS((ipa) - 4)
#define kvm_stage2_levels(kvm)		VTCR_EL2_LVLS(kvm->arch.vtcr)

/* stage2_pgdir_shift() is the size mapped by top-level stage2 entry for the VM */
#define stage2_pgdir_shift(kvm)		pt_levels_pgdir_shift(kvm_stage2_levels(kvm))
#define stage2_pgdir_size(kvm)		(1ULL << stage2_pgdir_shift(kvm))
#define stage2_pgdir_mask(kvm)		~(stage2_pgdir_size(kvm) - 1)

/*
 * The number of PTRS across all concatenated stage2 tables given by the
 * number of bits resolved at the initial level.
 * If we force more levels than necessary, we may have (stage2_pgdir_shift > IPA),
 * in which case, stage2_pgd_ptrs will have one entry.
 */
#define pgd_ptrs_shift(ipa, pgdir_shift)	\
	((ipa) > (pgdir_shift) ? ((ipa) - (pgdir_shift)) : 0)
#define __s2_pgd_ptrs(ipa, lvls)		\
	(1 << (pgd_ptrs_shift((ipa), pt_levels_pgdir_shift(lvls))))
#define __s2_pgd_size(ipa, lvls)	(__s2_pgd_ptrs((ipa), (lvls)) * sizeof(pgd_t))

#define stage2_pgd_ptrs(kvm)		__s2_pgd_ptrs(kvm_phys_shift(kvm), kvm_stage2_levels(kvm))
#define stage2_pgd_size(kvm)		__s2_pgd_size(kvm_phys_shift(kvm), kvm_stage2_levels(kvm))

/*
 * kvm_mmmu_cache_min_pages() is the number of pages required to install
 * a stage-2 translation. We pre-allocate the entry level page table at
 * the VM creation.
 */
#define kvm_mmu_cache_min_pages(kvm)	(kvm_stage2_levels(kvm) - 1)

/* Stage2 PUD definitions when the level is present */
static inline bool kvm_stage2_has_pud(struct kvm *kvm)
{
	return (CONFIG_PGTABLE_LEVELS > 3) && (kvm_stage2_levels(kvm) > 3);
}

#define S2_PUD_SHIFT			ARM64_HW_PGTABLE_LEVEL_SHIFT(1)
#define S2_PUD_SIZE			(1UL << S2_PUD_SHIFT)
#define S2_PUD_MASK			(~(S2_PUD_SIZE - 1))

static inline bool stage2_pgd_none(struct kvm *kvm, pgd_t pgd)
{
	if (kvm_stage2_has_pud(kvm))
		return pgd_none(pgd);
	else
		return 0;
}

static inline void stage2_pgd_clear(struct kvm *kvm, pgd_t *pgdp)
{
	if (kvm_stage2_has_pud(kvm))
		pgd_clear(pgdp);
}

static inline bool stage2_pgd_present(struct kvm *kvm, pgd_t pgd)
{
	if (kvm_stage2_has_pud(kvm))
		return pgd_present(pgd);
	else
		return 1;
}

static inline void stage2_pgd_populate(struct kvm *kvm, pgd_t *pgd, pud_t *pud)
{
	if (kvm_stage2_has_pud(kvm))
		pgd_populate(NULL, pgd, pud);
}

static inline pud_t *stage2_pud_offset(struct kvm *kvm,
				       pgd_t *pgd, unsigned long address)
{
	if (kvm_stage2_has_pud(kvm))
		return pud_offset(pgd, address);
	else
		return (pud_t *)pgd;
}

static inline void stage2_pud_free(struct kvm *kvm, pud_t *pud)
{
	if (kvm_stage2_has_pud(kvm))
		free_page((unsigned long)pud);
}

static inline bool stage2_pud_table_empty(struct kvm *kvm, pud_t *pudp)
{
	if (kvm_stage2_has_pud(kvm))
		return kvm_page_empty(pudp);
	else
		return false;
}

static inline phys_addr_t
stage2_pud_addr_end(struct kvm *kvm, phys_addr_t addr, phys_addr_t end)
{
	if (kvm_stage2_has_pud(kvm)) {
		phys_addr_t boundary = (addr + S2_PUD_SIZE) & S2_PUD_MASK;

		return (boundary - 1 < end - 1) ? boundary : end;
	} else {
		return end;
	}
}

/* Stage2 PMD definitions when the level is present */
static inline bool kvm_stage2_has_pmd(struct kvm *kvm)
{
	return (CONFIG_PGTABLE_LEVELS > 2) && (kvm_stage2_levels(kvm) > 2);
}

#define S2_PMD_SHIFT			ARM64_HW_PGTABLE_LEVEL_SHIFT(2)
#define S2_PMD_SIZE			(1UL << S2_PMD_SHIFT)
#define S2_PMD_MASK			(~(S2_PMD_SIZE - 1))

static inline bool stage2_pud_none(struct kvm *kvm, pud_t pud)
{
	if (kvm_stage2_has_pmd(kvm))
		return pud_none(pud);
	else
		return 0;
}

static inline void stage2_pud_clear(struct kvm *kvm, pud_t *pud)
{
	if (kvm_stage2_has_pmd(kvm))
		pud_clear(pud);
}

static inline bool stage2_pud_present(struct kvm *kvm, pud_t pud)
{
	if (kvm_stage2_has_pmd(kvm))
		return pud_present(pud);
	else
		return 1;
}

static inline void stage2_pud_populate(struct kvm *kvm, pud_t *pud, pmd_t *pmd)
{
	if (kvm_stage2_has_pmd(kvm))
		pud_populate(NULL, pud, pmd);
}

static inline pmd_t *stage2_pmd_offset(struct kvm *kvm,
				       pud_t *pud, unsigned long address)
{
	if (kvm_stage2_has_pmd(kvm))
		return pmd_offset(pud, address);
	else
		return (pmd_t *)pud;
}

static inline void stage2_pmd_free(struct kvm *kvm, pmd_t *pmd)
{
	if (kvm_stage2_has_pmd(kvm))
		free_page((unsigned long)pmd);
}

static inline bool stage2_pud_huge(struct kvm *kvm, pud_t pud)
{
	if (kvm_stage2_has_pmd(kvm))
		return pud_huge(pud);
	else
		return 0;
}

static inline bool stage2_pmd_table_empty(struct kvm *kvm, pmd_t *pmdp)
{
	if (kvm_stage2_has_pmd(kvm))
		return kvm_page_empty(pmdp);
	else
		return 0;
}

static inline phys_addr_t
stage2_pmd_addr_end(struct kvm *kvm, phys_addr_t addr, phys_addr_t end)
{
	if (kvm_stage2_has_pmd(kvm)) {
		phys_addr_t boundary = (addr + S2_PMD_SIZE) & S2_PMD_MASK;

		return (boundary - 1 < end - 1) ? boundary : end;
	} else {
		return end;
	}
}

static inline bool stage2_pte_table_empty(struct kvm *kvm, pte_t *ptep)
{
	return kvm_page_empty(ptep);
}

static inline unsigned long stage2_pgd_index(struct kvm *kvm, phys_addr_t addr)
{
	return (((addr) >> stage2_pgdir_shift(kvm)) & (stage2_pgd_ptrs(kvm) - 1));
}

static inline phys_addr_t
stage2_pgd_addr_end(struct kvm *kvm, phys_addr_t addr, phys_addr_t end)
{
	phys_addr_t boundary = (addr + stage2_pgdir_size(kvm)) & stage2_pgdir_mask(kvm);

	return (boundary - 1 < end - 1) ? boundary : end;
}

#endif	/* __ARM64_S2_PGTABLE_H_ */