summaryrefslogtreecommitdiff
path: root/arch/mips/kvm/mmu.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/mips/kvm/mmu.c')
-rw-r--r--arch/mips/kvm/mmu.c1329
1 files changed, 1088 insertions, 241 deletions
diff --git a/arch/mips/kvm/mmu.c b/arch/mips/kvm/mmu.c
index 3b677c851be0..cb0faade311e 100644
--- a/arch/mips/kvm/mmu.c
+++ b/arch/mips/kvm/mmu.c
@@ -11,86 +11,995 @@
#include <linux/highmem.h>
#include <linux/kvm_host.h>
+#include <linux/uaccess.h>
#include <asm/mmu_context.h>
+#include <asm/pgalloc.h>
-static u32 kvm_mips_get_kernel_asid(struct kvm_vcpu *vcpu)
+/*
+ * KVM_MMU_CACHE_MIN_PAGES is the number of GPA page table translation levels
+ * for which pages need to be cached.
+ */
+#if defined(__PAGETABLE_PMD_FOLDED)
+#define KVM_MMU_CACHE_MIN_PAGES 1
+#else
+#define KVM_MMU_CACHE_MIN_PAGES 2
+#endif
+
+static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache,
+ int min, int max)
{
- int cpu = smp_processor_id();
+ void *page;
+
+ BUG_ON(max > KVM_NR_MEM_OBJS);
+ if (cache->nobjs >= min)
+ return 0;
+ while (cache->nobjs < max) {
+ page = (void *)__get_free_page(GFP_KERNEL);
+ if (!page)
+ return -ENOMEM;
+ cache->objects[cache->nobjs++] = page;
+ }
+ return 0;
+}
- return vcpu->arch.guest_kernel_asid[cpu] &
- cpu_asid_mask(&cpu_data[cpu]);
+static void mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc)
+{
+ while (mc->nobjs)
+ free_page((unsigned long)mc->objects[--mc->nobjs]);
}
-static u32 kvm_mips_get_user_asid(struct kvm_vcpu *vcpu)
+static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc)
{
- int cpu = smp_processor_id();
+ void *p;
- return vcpu->arch.guest_user_asid[cpu] &
- cpu_asid_mask(&cpu_data[cpu]);
+ BUG_ON(!mc || !mc->nobjs);
+ p = mc->objects[--mc->nobjs];
+ return p;
}
-static int kvm_mips_map_page(struct kvm *kvm, gfn_t gfn)
+void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu)
{
- int srcu_idx, err = 0;
- kvm_pfn_t pfn;
+ mmu_free_memory_cache(&vcpu->arch.mmu_page_cache);
+}
+
+/**
+ * kvm_pgd_init() - Initialise KVM GPA page directory.
+ * @page: Pointer to page directory (PGD) for KVM GPA.
+ *
+ * Initialise a KVM GPA page directory with pointers to the invalid table, i.e.
+ * representing no mappings. This is similar to pgd_init(), however it
+ * initialises all the page directory pointers, not just the ones corresponding
+ * to the userland address space (since it is for the guest physical address
+ * space rather than a virtual address space).
+ */
+static void kvm_pgd_init(void *page)
+{
+ unsigned long *p, *end;
+ unsigned long entry;
+
+#ifdef __PAGETABLE_PMD_FOLDED
+ entry = (unsigned long)invalid_pte_table;
+#else
+ entry = (unsigned long)invalid_pmd_table;
+#endif
+
+ p = (unsigned long *)page;
+ end = p + PTRS_PER_PGD;
+
+ do {
+ p[0] = entry;
+ p[1] = entry;
+ p[2] = entry;
+ p[3] = entry;
+ p[4] = entry;
+ p += 8;
+ p[-3] = entry;
+ p[-2] = entry;
+ p[-1] = entry;
+ } while (p != end);
+}
+
+/**
+ * kvm_pgd_alloc() - Allocate and initialise a KVM GPA page directory.
+ *
+ * Allocate a blank KVM GPA page directory (PGD) for representing guest physical
+ * to host physical page mappings.
+ *
+ * Returns: Pointer to new KVM GPA page directory.
+ * NULL on allocation failure.
+ */
+pgd_t *kvm_pgd_alloc(void)
+{
+ pgd_t *ret;
+
+ ret = (pgd_t *)__get_free_pages(GFP_KERNEL, PGD_ORDER);
+ if (ret)
+ kvm_pgd_init(ret);
+
+ return ret;
+}
+
+/**
+ * kvm_mips_walk_pgd() - Walk page table with optional allocation.
+ * @pgd: Page directory pointer.
+ * @addr: Address to index page table using.
+ * @cache: MMU page cache to allocate new page tables from, or NULL.
+ *
+ * Walk the page tables pointed to by @pgd to find the PTE corresponding to the
+ * address @addr. If page tables don't exist for @addr, they will be created
+ * from the MMU cache if @cache is not NULL.
+ *
+ * Returns: Pointer to pte_t corresponding to @addr.
+ * NULL if a page table doesn't exist for @addr and !@cache.
+ * NULL if a page table allocation failed.
+ */
+static pte_t *kvm_mips_walk_pgd(pgd_t *pgd, struct kvm_mmu_memory_cache *cache,
+ unsigned long addr)
+{
+ pud_t *pud;
+ pmd_t *pmd;
+
+ pgd += pgd_index(addr);
+ if (pgd_none(*pgd)) {
+ /* Not used on MIPS yet */
+ BUG();
+ return NULL;
+ }
+ pud = pud_offset(pgd, addr);
+ if (pud_none(*pud)) {
+ pmd_t *new_pmd;
+
+ if (!cache)
+ return NULL;
+ new_pmd = mmu_memory_cache_alloc(cache);
+ pmd_init((unsigned long)new_pmd,
+ (unsigned long)invalid_pte_table);
+ pud_populate(NULL, pud, new_pmd);
+ }
+ pmd = pmd_offset(pud, addr);
+ if (pmd_none(*pmd)) {
+ pte_t *new_pte;
+
+ if (!cache)
+ return NULL;
+ new_pte = mmu_memory_cache_alloc(cache);
+ clear_page(new_pte);
+ pmd_populate_kernel(NULL, pmd, new_pte);
+ }
+ return pte_offset(pmd, addr);
+}
+
+/* Caller must hold kvm->mm_lock */
+static pte_t *kvm_mips_pte_for_gpa(struct kvm *kvm,
+ struct kvm_mmu_memory_cache *cache,
+ unsigned long addr)
+{
+ return kvm_mips_walk_pgd(kvm->arch.gpa_mm.pgd, cache, addr);
+}
+
+/*
+ * kvm_mips_flush_gpa_{pte,pmd,pud,pgd,pt}.
+ * Flush a range of guest physical address space from the VM's GPA page tables.
+ */
+
+static bool kvm_mips_flush_gpa_pte(pte_t *pte, unsigned long start_gpa,
+ unsigned long end_gpa)
+{
+ int i_min = __pte_offset(start_gpa);
+ int i_max = __pte_offset(end_gpa);
+ bool safe_to_remove = (i_min == 0 && i_max == PTRS_PER_PTE - 1);
+ int i;
+
+ for (i = i_min; i <= i_max; ++i) {
+ if (!pte_present(pte[i]))
+ continue;
+
+ set_pte(pte + i, __pte(0));
+ }
+ return safe_to_remove;
+}
+
+static bool kvm_mips_flush_gpa_pmd(pmd_t *pmd, unsigned long start_gpa,
+ unsigned long end_gpa)
+{
+ pte_t *pte;
+ unsigned long end = ~0ul;
+ int i_min = __pmd_offset(start_gpa);
+ int i_max = __pmd_offset(end_gpa);
+ bool safe_to_remove = (i_min == 0 && i_max == PTRS_PER_PMD - 1);
+ int i;
+
+ for (i = i_min; i <= i_max; ++i, start_gpa = 0) {
+ if (!pmd_present(pmd[i]))
+ continue;
+
+ pte = pte_offset(pmd + i, 0);
+ if (i == i_max)
+ end = end_gpa;
+
+ if (kvm_mips_flush_gpa_pte(pte, start_gpa, end)) {
+ pmd_clear(pmd + i);
+ pte_free_kernel(NULL, pte);
+ } else {
+ safe_to_remove = false;
+ }
+ }
+ return safe_to_remove;
+}
+
+static bool kvm_mips_flush_gpa_pud(pud_t *pud, unsigned long start_gpa,
+ unsigned long end_gpa)
+{
+ pmd_t *pmd;
+ unsigned long end = ~0ul;
+ int i_min = __pud_offset(start_gpa);
+ int i_max = __pud_offset(end_gpa);
+ bool safe_to_remove = (i_min == 0 && i_max == PTRS_PER_PUD - 1);
+ int i;
+
+ for (i = i_min; i <= i_max; ++i, start_gpa = 0) {
+ if (!pud_present(pud[i]))
+ continue;
+
+ pmd = pmd_offset(pud + i, 0);
+ if (i == i_max)
+ end = end_gpa;
+
+ if (kvm_mips_flush_gpa_pmd(pmd, start_gpa, end)) {
+ pud_clear(pud + i);
+ pmd_free(NULL, pmd);
+ } else {
+ safe_to_remove = false;
+ }
+ }
+ return safe_to_remove;
+}
+
+static bool kvm_mips_flush_gpa_pgd(pgd_t *pgd, unsigned long start_gpa,
+ unsigned long end_gpa)
+{
+ pud_t *pud;
+ unsigned long end = ~0ul;
+ int i_min = pgd_index(start_gpa);
+ int i_max = pgd_index(end_gpa);
+ bool safe_to_remove = (i_min == 0 && i_max == PTRS_PER_PGD - 1);
+ int i;
+
+ for (i = i_min; i <= i_max; ++i, start_gpa = 0) {
+ if (!pgd_present(pgd[i]))
+ continue;
+
+ pud = pud_offset(pgd + i, 0);
+ if (i == i_max)
+ end = end_gpa;
+
+ if (kvm_mips_flush_gpa_pud(pud, start_gpa, end)) {
+ pgd_clear(pgd + i);
+ pud_free(NULL, pud);
+ } else {
+ safe_to_remove = false;
+ }
+ }
+ return safe_to_remove;
+}
+
+/**
+ * kvm_mips_flush_gpa_pt() - Flush a range of guest physical addresses.
+ * @kvm: KVM pointer.
+ * @start_gfn: Guest frame number of first page in GPA range to flush.
+ * @end_gfn: Guest frame number of last page in GPA range to flush.
+ *
+ * Flushes a range of GPA mappings from the GPA page tables.
+ *
+ * The caller must hold the @kvm->mmu_lock spinlock.
+ *
+ * Returns: Whether its safe to remove the top level page directory because
+ * all lower levels have been removed.
+ */
+bool kvm_mips_flush_gpa_pt(struct kvm *kvm, gfn_t start_gfn, gfn_t end_gfn)
+{
+ return kvm_mips_flush_gpa_pgd(kvm->arch.gpa_mm.pgd,
+ start_gfn << PAGE_SHIFT,
+ end_gfn << PAGE_SHIFT);
+}
+
+#define BUILD_PTE_RANGE_OP(name, op) \
+static int kvm_mips_##name##_pte(pte_t *pte, unsigned long start, \
+ unsigned long end) \
+{ \
+ int ret = 0; \
+ int i_min = __pte_offset(start); \
+ int i_max = __pte_offset(end); \
+ int i; \
+ pte_t old, new; \
+ \
+ for (i = i_min; i <= i_max; ++i) { \
+ if (!pte_present(pte[i])) \
+ continue; \
+ \
+ old = pte[i]; \
+ new = op(old); \
+ if (pte_val(new) == pte_val(old)) \
+ continue; \
+ set_pte(pte + i, new); \
+ ret = 1; \
+ } \
+ return ret; \
+} \
+ \
+/* returns true if anything was done */ \
+static int kvm_mips_##name##_pmd(pmd_t *pmd, unsigned long start, \
+ unsigned long end) \
+{ \
+ int ret = 0; \
+ pte_t *pte; \
+ unsigned long cur_end = ~0ul; \
+ int i_min = __pmd_offset(start); \
+ int i_max = __pmd_offset(end); \
+ int i; \
+ \
+ for (i = i_min; i <= i_max; ++i, start = 0) { \
+ if (!pmd_present(pmd[i])) \
+ continue; \
+ \
+ pte = pte_offset(pmd + i, 0); \
+ if (i == i_max) \
+ cur_end = end; \
+ \
+ ret |= kvm_mips_##name##_pte(pte, start, cur_end); \
+ } \
+ return ret; \
+} \
+ \
+static int kvm_mips_##name##_pud(pud_t *pud, unsigned long start, \
+ unsigned long end) \
+{ \
+ int ret = 0; \
+ pmd_t *pmd; \
+ unsigned long cur_end = ~0ul; \
+ int i_min = __pud_offset(start); \
+ int i_max = __pud_offset(end); \
+ int i; \
+ \
+ for (i = i_min; i <= i_max; ++i, start = 0) { \
+ if (!pud_present(pud[i])) \
+ continue; \
+ \
+ pmd = pmd_offset(pud + i, 0); \
+ if (i == i_max) \
+ cur_end = end; \
+ \
+ ret |= kvm_mips_##name##_pmd(pmd, start, cur_end); \
+ } \
+ return ret; \
+} \
+ \
+static int kvm_mips_##name##_pgd(pgd_t *pgd, unsigned long start, \
+ unsigned long end) \
+{ \
+ int ret = 0; \
+ pud_t *pud; \
+ unsigned long cur_end = ~0ul; \
+ int i_min = pgd_index(start); \
+ int i_max = pgd_index(end); \
+ int i; \
+ \
+ for (i = i_min; i <= i_max; ++i, start = 0) { \
+ if (!pgd_present(pgd[i])) \
+ continue; \
+ \
+ pud = pud_offset(pgd + i, 0); \
+ if (i == i_max) \
+ cur_end = end; \
+ \
+ ret |= kvm_mips_##name##_pud(pud, start, cur_end); \
+ } \
+ return ret; \
+}
+
+/*
+ * kvm_mips_mkclean_gpa_pt.
+ * Mark a range of guest physical address space clean (writes fault) in the VM's
+ * GPA page table to allow dirty page tracking.
+ */
- if (kvm->arch.guest_pmap[gfn] != KVM_INVALID_PAGE)
+BUILD_PTE_RANGE_OP(mkclean, pte_mkclean)
+
+/**
+ * kvm_mips_mkclean_gpa_pt() - Make a range of guest physical addresses clean.
+ * @kvm: KVM pointer.
+ * @start_gfn: Guest frame number of first page in GPA range to flush.
+ * @end_gfn: Guest frame number of last page in GPA range to flush.
+ *
+ * Make a range of GPA mappings clean so that guest writes will fault and
+ * trigger dirty page logging.
+ *
+ * The caller must hold the @kvm->mmu_lock spinlock.
+ *
+ * Returns: Whether any GPA mappings were modified, which would require
+ * derived mappings (GVA page tables & TLB enties) to be
+ * invalidated.
+ */
+int kvm_mips_mkclean_gpa_pt(struct kvm *kvm, gfn_t start_gfn, gfn_t end_gfn)
+{
+ return kvm_mips_mkclean_pgd(kvm->arch.gpa_mm.pgd,
+ start_gfn << PAGE_SHIFT,
+ end_gfn << PAGE_SHIFT);
+}
+
+/**
+ * kvm_arch_mmu_enable_log_dirty_pt_masked() - write protect dirty pages
+ * @kvm: The KVM pointer
+ * @slot: The memory slot associated with mask
+ * @gfn_offset: The gfn offset in memory slot
+ * @mask: The mask of dirty pages at offset 'gfn_offset' in this memory
+ * slot to be write protected
+ *
+ * Walks bits set in mask write protects the associated pte's. Caller must
+ * acquire @kvm->mmu_lock.
+ */
+void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
+ struct kvm_memory_slot *slot,
+ gfn_t gfn_offset, unsigned long mask)
+{
+ gfn_t base_gfn = slot->base_gfn + gfn_offset;
+ gfn_t start = base_gfn + __ffs(mask);
+ gfn_t end = base_gfn + __fls(mask);
+
+ kvm_mips_mkclean_gpa_pt(kvm, start, end);
+}
+
+/*
+ * kvm_mips_mkold_gpa_pt.
+ * Mark a range of guest physical address space old (all accesses fault) in the
+ * VM's GPA page table to allow detection of commonly used pages.
+ */
+
+BUILD_PTE_RANGE_OP(mkold, pte_mkold)
+
+static int kvm_mips_mkold_gpa_pt(struct kvm *kvm, gfn_t start_gfn,
+ gfn_t end_gfn)
+{
+ return kvm_mips_mkold_pgd(kvm->arch.gpa_mm.pgd,
+ start_gfn << PAGE_SHIFT,
+ end_gfn << PAGE_SHIFT);
+}
+
+static int handle_hva_to_gpa(struct kvm *kvm,
+ unsigned long start,
+ unsigned long end,
+ int (*handler)(struct kvm *kvm, gfn_t gfn,
+ gpa_t gfn_end,
+ struct kvm_memory_slot *memslot,
+ void *data),
+ void *data)
+{
+ struct kvm_memslots *slots;
+ struct kvm_memory_slot *memslot;
+ int ret = 0;
+
+ slots = kvm_memslots(kvm);
+
+ /* we only care about the pages that the guest sees */
+ kvm_for_each_memslot(memslot, slots) {
+ unsigned long hva_start, hva_end;
+ gfn_t gfn, gfn_end;
+
+ hva_start = max(start, memslot->userspace_addr);
+ hva_end = min(end, memslot->userspace_addr +
+ (memslot->npages << PAGE_SHIFT));
+ if (hva_start >= hva_end)
+ continue;
+
+ /*
+ * {gfn(page) | page intersects with [hva_start, hva_end)} =
+ * {gfn_start, gfn_start+1, ..., gfn_end-1}.
+ */
+ gfn = hva_to_gfn_memslot(hva_start, memslot);
+ gfn_end = hva_to_gfn_memslot(hva_end + PAGE_SIZE - 1, memslot);
+
+ ret |= handler(kvm, gfn, gfn_end, memslot, data);
+ }
+
+ return ret;
+}
+
+
+static int kvm_unmap_hva_handler(struct kvm *kvm, gfn_t gfn, gfn_t gfn_end,
+ struct kvm_memory_slot *memslot, void *data)
+{
+ kvm_mips_flush_gpa_pt(kvm, gfn, gfn_end);
+ return 1;
+}
+
+int kvm_unmap_hva(struct kvm *kvm, unsigned long hva)
+{
+ unsigned long end = hva + PAGE_SIZE;
+
+ handle_hva_to_gpa(kvm, hva, end, &kvm_unmap_hva_handler, NULL);
+
+ kvm_mips_callbacks->flush_shadow_all(kvm);
+ return 0;
+}
+
+int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end)
+{
+ handle_hva_to_gpa(kvm, start, end, &kvm_unmap_hva_handler, NULL);
+
+ kvm_mips_callbacks->flush_shadow_all(kvm);
+ return 0;
+}
+
+static int kvm_set_spte_handler(struct kvm *kvm, gfn_t gfn, gfn_t gfn_end,
+ struct kvm_memory_slot *memslot, void *data)
+{
+ gpa_t gpa = gfn << PAGE_SHIFT;
+ pte_t hva_pte = *(pte_t *)data;
+ pte_t *gpa_pte = kvm_mips_pte_for_gpa(kvm, NULL, gpa);
+ pte_t old_pte;
+
+ if (!gpa_pte)
+ return 0;
+
+ /* Mapping may need adjusting depending on memslot flags */
+ old_pte = *gpa_pte;
+ if (memslot->flags & KVM_MEM_LOG_DIRTY_PAGES && !pte_dirty(old_pte))
+ hva_pte = pte_mkclean(hva_pte);
+ else if (memslot->flags & KVM_MEM_READONLY)
+ hva_pte = pte_wrprotect(hva_pte);
+
+ set_pte(gpa_pte, hva_pte);
+
+ /* Replacing an absent or old page doesn't need flushes */
+ if (!pte_present(old_pte) || !pte_young(old_pte))
return 0;
+ /* Pages swapped, aged, moved, or cleaned require flushes */
+ return !pte_present(hva_pte) ||
+ !pte_young(hva_pte) ||
+ pte_pfn(old_pte) != pte_pfn(hva_pte) ||
+ (pte_dirty(old_pte) && !pte_dirty(hva_pte));
+}
+
+void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
+{
+ unsigned long end = hva + PAGE_SIZE;
+ int ret;
+
+ ret = handle_hva_to_gpa(kvm, hva, end, &kvm_set_spte_handler, &pte);
+ if (ret)
+ kvm_mips_callbacks->flush_shadow_all(kvm);
+}
+
+static int kvm_age_hva_handler(struct kvm *kvm, gfn_t gfn, gfn_t gfn_end,
+ struct kvm_memory_slot *memslot, void *data)
+{
+ return kvm_mips_mkold_gpa_pt(kvm, gfn, gfn_end);
+}
+
+static int kvm_test_age_hva_handler(struct kvm *kvm, gfn_t gfn, gfn_t gfn_end,
+ struct kvm_memory_slot *memslot, void *data)
+{
+ gpa_t gpa = gfn << PAGE_SHIFT;
+ pte_t *gpa_pte = kvm_mips_pte_for_gpa(kvm, NULL, gpa);
+
+ if (!gpa_pte)
+ return 0;
+ return pte_young(*gpa_pte);
+}
+
+int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end)
+{
+ return handle_hva_to_gpa(kvm, start, end, kvm_age_hva_handler, NULL);
+}
+
+int kvm_test_age_hva(struct kvm *kvm, unsigned long hva)
+{
+ return handle_hva_to_gpa(kvm, hva, hva, kvm_test_age_hva_handler, NULL);
+}
+
+/**
+ * _kvm_mips_map_page_fast() - Fast path GPA fault handler.
+ * @vcpu: VCPU pointer.
+ * @gpa: Guest physical address of fault.
+ * @write_fault: Whether the fault was due to a write.
+ * @out_entry: New PTE for @gpa (written on success unless NULL).
+ * @out_buddy: New PTE for @gpa's buddy (written on success unless
+ * NULL).
+ *
+ * Perform fast path GPA fault handling, doing all that can be done without
+ * calling into KVM. This handles marking old pages young (for idle page
+ * tracking), and dirtying of clean pages (for dirty page logging).
+ *
+ * Returns: 0 on success, in which case we can update derived mappings and
+ * resume guest execution.
+ * -EFAULT on failure due to absent GPA mapping or write to
+ * read-only page, in which case KVM must be consulted.
+ */
+static int _kvm_mips_map_page_fast(struct kvm_vcpu *vcpu, unsigned long gpa,
+ bool write_fault,
+ pte_t *out_entry, pte_t *out_buddy)
+{
+ struct kvm *kvm = vcpu->kvm;
+ gfn_t gfn = gpa >> PAGE_SHIFT;
+ pte_t *ptep;
+ kvm_pfn_t pfn = 0; /* silence bogus GCC warning */
+ bool pfn_valid = false;
+ int ret = 0;
+
+ spin_lock(&kvm->mmu_lock);
+
+ /* Fast path - just check GPA page table for an existing entry */
+ ptep = kvm_mips_pte_for_gpa(kvm, NULL, gpa);
+ if (!ptep || !pte_present(*ptep)) {
+ ret = -EFAULT;
+ goto out;
+ }
+
+ /* Track access to pages marked old */
+ if (!pte_young(*ptep)) {
+ set_pte(ptep, pte_mkyoung(*ptep));
+ pfn = pte_pfn(*ptep);
+ pfn_valid = true;
+ /* call kvm_set_pfn_accessed() after unlock */
+ }
+ if (write_fault && !pte_dirty(*ptep)) {
+ if (!pte_write(*ptep)) {
+ ret = -EFAULT;
+ goto out;
+ }
+
+ /* Track dirtying of writeable pages */
+ set_pte(ptep, pte_mkdirty(*ptep));
+ pfn = pte_pfn(*ptep);
+ mark_page_dirty(kvm, gfn);
+ kvm_set_pfn_dirty(pfn);
+ }
+
+ if (out_entry)
+ *out_entry = *ptep;
+ if (out_buddy)
+ *out_buddy = *ptep_buddy(ptep);
+
+out:
+ spin_unlock(&kvm->mmu_lock);
+ if (pfn_valid)
+ kvm_set_pfn_accessed(pfn);
+ return ret;
+}
+
+/**
+ * kvm_mips_map_page() - Map a guest physical page.
+ * @vcpu: VCPU pointer.
+ * @gpa: Guest physical address of fault.
+ * @write_fault: Whether the fault was due to a write.
+ * @out_entry: New PTE for @gpa (written on success unless NULL).
+ * @out_buddy: New PTE for @gpa's buddy (written on success unless
+ * NULL).
+ *
+ * Handle GPA faults by creating a new GPA mapping (or updating an existing
+ * one).
+ *
+ * This takes care of marking pages young or dirty (idle/dirty page tracking),
+ * asking KVM for the corresponding PFN, and creating a mapping in the GPA page
+ * tables. Derived mappings (GVA page tables and TLBs) must be handled by the
+ * caller.
+ *
+ * Returns: 0 on success, in which case the caller may use the @out_entry
+ * and @out_buddy PTEs to update derived mappings and resume guest
+ * execution.
+ * -EFAULT if there is no memory region at @gpa or a write was
+ * attempted to a read-only memory region. This is usually handled
+ * as an MMIO access.
+ */
+static int kvm_mips_map_page(struct kvm_vcpu *vcpu, unsigned long gpa,
+ bool write_fault,
+ pte_t *out_entry, pte_t *out_buddy)
+{
+ struct kvm *kvm = vcpu->kvm;
+ struct kvm_mmu_memory_cache *memcache = &vcpu->arch.mmu_page_cache;
+ gfn_t gfn = gpa >> PAGE_SHIFT;
+ int srcu_idx, err;
+ kvm_pfn_t pfn;
+ pte_t *ptep, entry, old_pte;
+ bool writeable;
+ unsigned long prot_bits;
+ unsigned long mmu_seq;
+
+ /* Try the fast path to handle old / clean pages */
srcu_idx = srcu_read_lock(&kvm->srcu);
- pfn = gfn_to_pfn(kvm, gfn);
+ err = _kvm_mips_map_page_fast(vcpu, gpa, write_fault, out_entry,
+ out_buddy);
+ if (!err)
+ goto out;
+ /* We need a minimum of cached pages ready for page table creation */
+ err = mmu_topup_memory_cache(memcache, KVM_MMU_CACHE_MIN_PAGES,
+ KVM_NR_MEM_OBJS);
+ if (err)
+ goto out;
+
+retry:
+ /*
+ * Used to check for invalidations in progress, of the pfn that is
+ * returned by pfn_to_pfn_prot below.
+ */
+ mmu_seq = kvm->mmu_notifier_seq;
+ /*
+ * Ensure the read of mmu_notifier_seq isn't reordered with PTE reads in
+ * gfn_to_pfn_prot() (which calls get_user_pages()), so that we don't
+ * risk the page we get a reference to getting unmapped before we have a
+ * chance to grab the mmu_lock without mmu_notifier_retry() noticing.
+ *
+ * This smp_rmb() pairs with the effective smp_wmb() of the combination
+ * of the pte_unmap_unlock() after the PTE is zapped, and the
+ * spin_lock() in kvm_mmu_notifier_invalidate_<page|range_end>() before
+ * mmu_notifier_seq is incremented.
+ */
+ smp_rmb();
+
+ /* Slow path - ask KVM core whether we can access this GPA */
+ pfn = gfn_to_pfn_prot(kvm, gfn, write_fault, &writeable);
if (is_error_noslot_pfn(pfn)) {
- kvm_err("Couldn't get pfn for gfn %#llx!\n", gfn);
err = -EFAULT;
goto out;
}
- kvm->arch.guest_pmap[gfn] = pfn;
+ spin_lock(&kvm->mmu_lock);
+ /* Check if an invalidation has taken place since we got pfn */
+ if (mmu_notifier_retry(kvm, mmu_seq)) {
+ /*
+ * This can happen when mappings are changed asynchronously, but
+ * also synchronously if a COW is triggered by
+ * gfn_to_pfn_prot().
+ */
+ spin_unlock(&kvm->mmu_lock);
+ kvm_release_pfn_clean(pfn);
+ goto retry;
+ }
+
+ /* Ensure page tables are allocated */
+ ptep = kvm_mips_pte_for_gpa(kvm, memcache, gpa);
+
+ /* Set up the PTE */
+ prot_bits = _PAGE_PRESENT | __READABLE | _page_cachable_default;
+ if (writeable) {
+ prot_bits |= _PAGE_WRITE;
+ if (write_fault) {
+ prot_bits |= __WRITEABLE;
+ mark_page_dirty(kvm, gfn);
+ kvm_set_pfn_dirty(pfn);
+ }
+ }
+ entry = pfn_pte(pfn, __pgprot(prot_bits));
+
+ /* Write the PTE */
+ old_pte = *ptep;
+ set_pte(ptep, entry);
+
+ err = 0;
+ if (out_entry)
+ *out_entry = *ptep;
+ if (out_buddy)
+ *out_buddy = *ptep_buddy(ptep);
+
+ spin_unlock(&kvm->mmu_lock);
+ kvm_release_pfn_clean(pfn);
+ kvm_set_pfn_accessed(pfn);
out:
srcu_read_unlock(&kvm->srcu, srcu_idx);
return err;
}
-/* Translate guest KSEG0 addresses to Host PA */
-unsigned long kvm_mips_translate_guest_kseg0_to_hpa(struct kvm_vcpu *vcpu,
- unsigned long gva)
+static pte_t *kvm_trap_emul_pte_for_gva(struct kvm_vcpu *vcpu,
+ unsigned long addr)
{
- gfn_t gfn;
- unsigned long offset = gva & ~PAGE_MASK;
- struct kvm *kvm = vcpu->kvm;
+ struct kvm_mmu_memory_cache *memcache = &vcpu->arch.mmu_page_cache;
+ pgd_t *pgdp;
+ int ret;
+
+ /* We need a minimum of cached pages ready for page table creation */
+ ret = mmu_topup_memory_cache(memcache, KVM_MMU_CACHE_MIN_PAGES,
+ KVM_NR_MEM_OBJS);
+ if (ret)
+ return NULL;
+
+ if (KVM_GUEST_KERNEL_MODE(vcpu))
+ pgdp = vcpu->arch.guest_kernel_mm.pgd;
+ else
+ pgdp = vcpu->arch.guest_user_mm.pgd;
+
+ return kvm_mips_walk_pgd(pgdp, memcache, addr);
+}
- if (KVM_GUEST_KSEGX(gva) != KVM_GUEST_KSEG0) {
- kvm_err("%s/%p: Invalid gva: %#lx\n", __func__,
- __builtin_return_address(0), gva);
- return KVM_INVALID_PAGE;
+void kvm_trap_emul_invalidate_gva(struct kvm_vcpu *vcpu, unsigned long addr,
+ bool user)
+{
+ pgd_t *pgdp;
+ pte_t *ptep;
+
+ addr &= PAGE_MASK << 1;
+
+ pgdp = vcpu->arch.guest_kernel_mm.pgd;
+ ptep = kvm_mips_walk_pgd(pgdp, NULL, addr);
+ if (ptep) {
+ ptep[0] = pfn_pte(0, __pgprot(0));
+ ptep[1] = pfn_pte(0, __pgprot(0));
+ }
+
+ if (user) {
+ pgdp = vcpu->arch.guest_user_mm.pgd;
+ ptep = kvm_mips_walk_pgd(pgdp, NULL, addr);
+ if (ptep) {
+ ptep[0] = pfn_pte(0, __pgprot(0));
+ ptep[1] = pfn_pte(0, __pgprot(0));
+ }
}
+}
- gfn = (KVM_GUEST_CPHYSADDR(gva) >> PAGE_SHIFT);
+/*
+ * kvm_mips_flush_gva_{pte,pmd,pud,pgd,pt}.
+ * Flush a range of guest physical address space from the VM's GPA page tables.
+ */
- if (gfn >= kvm->arch.guest_pmap_npages) {
- kvm_err("%s: Invalid gfn: %#llx, GVA: %#lx\n", __func__, gfn,
- gva);
- return KVM_INVALID_PAGE;
+static bool kvm_mips_flush_gva_pte(pte_t *pte, unsigned long start_gva,
+ unsigned long end_gva)
+{
+ int i_min = __pte_offset(start_gva);
+ int i_max = __pte_offset(end_gva);
+ bool safe_to_remove = (i_min == 0 && i_max == PTRS_PER_PTE - 1);
+ int i;
+
+ /*
+ * There's no freeing to do, so there's no point clearing individual
+ * entries unless only part of the last level page table needs flushing.
+ */
+ if (safe_to_remove)
+ return true;
+
+ for (i = i_min; i <= i_max; ++i) {
+ if (!pte_present(pte[i]))
+ continue;
+
+ set_pte(pte + i, __pte(0));
}
+ return false;
+}
- if (kvm_mips_map_page(vcpu->kvm, gfn) < 0)
- return KVM_INVALID_ADDR;
+static bool kvm_mips_flush_gva_pmd(pmd_t *pmd, unsigned long start_gva,
+ unsigned long end_gva)
+{
+ pte_t *pte;
+ unsigned long end = ~0ul;
+ int i_min = __pmd_offset(start_gva);
+ int i_max = __pmd_offset(end_gva);
+ bool safe_to_remove = (i_min == 0 && i_max == PTRS_PER_PMD - 1);
+ int i;
+
+ for (i = i_min; i <= i_max; ++i, start_gva = 0) {
+ if (!pmd_present(pmd[i]))
+ continue;
+
+ pte = pte_offset(pmd + i, 0);
+ if (i == i_max)
+ end = end_gva;
+
+ if (kvm_mips_flush_gva_pte(pte, start_gva, end)) {
+ pmd_clear(pmd + i);
+ pte_free_kernel(NULL, pte);
+ } else {
+ safe_to_remove = false;
+ }
+ }
+ return safe_to_remove;
+}
- return (kvm->arch.guest_pmap[gfn] << PAGE_SHIFT) + offset;
+static bool kvm_mips_flush_gva_pud(pud_t *pud, unsigned long start_gva,
+ unsigned long end_gva)
+{
+ pmd_t *pmd;
+ unsigned long end = ~0ul;
+ int i_min = __pud_offset(start_gva);
+ int i_max = __pud_offset(end_gva);
+ bool safe_to_remove = (i_min == 0 && i_max == PTRS_PER_PUD - 1);
+ int i;
+
+ for (i = i_min; i <= i_max; ++i, start_gva = 0) {
+ if (!pud_present(pud[i]))
+ continue;
+
+ pmd = pmd_offset(pud + i, 0);
+ if (i == i_max)
+ end = end_gva;
+
+ if (kvm_mips_flush_gva_pmd(pmd, start_gva, end)) {
+ pud_clear(pud + i);
+ pmd_free(NULL, pmd);
+ } else {
+ safe_to_remove = false;
+ }
+ }
+ return safe_to_remove;
+}
+
+static bool kvm_mips_flush_gva_pgd(pgd_t *pgd, unsigned long start_gva,
+ unsigned long end_gva)
+{
+ pud_t *pud;
+ unsigned long end = ~0ul;
+ int i_min = pgd_index(start_gva);
+ int i_max = pgd_index(end_gva);
+ bool safe_to_remove = (i_min == 0 && i_max == PTRS_PER_PGD - 1);
+ int i;
+
+ for (i = i_min; i <= i_max; ++i, start_gva = 0) {
+ if (!pgd_present(pgd[i]))
+ continue;
+
+ pud = pud_offset(pgd + i, 0);
+ if (i == i_max)
+ end = end_gva;
+
+ if (kvm_mips_flush_gva_pud(pud, start_gva, end)) {
+ pgd_clear(pgd + i);
+ pud_free(NULL, pud);
+ } else {
+ safe_to_remove = false;
+ }
+ }
+ return safe_to_remove;
+}
+
+void kvm_mips_flush_gva_pt(pgd_t *pgd, enum kvm_mips_flush flags)
+{
+ if (flags & KMF_GPA) {
+ /* all of guest virtual address space could be affected */
+ if (flags & KMF_KERN)
+ /* useg, kseg0, seg2/3 */
+ kvm_mips_flush_gva_pgd(pgd, 0, 0x7fffffff);
+ else
+ /* useg */
+ kvm_mips_flush_gva_pgd(pgd, 0, 0x3fffffff);
+ } else {
+ /* useg */
+ kvm_mips_flush_gva_pgd(pgd, 0, 0x3fffffff);
+
+ /* kseg2/3 */
+ if (flags & KMF_KERN)
+ kvm_mips_flush_gva_pgd(pgd, 0x60000000, 0x7fffffff);
+ }
+}
+
+static pte_t kvm_mips_gpa_pte_to_gva_unmapped(pte_t pte)
+{
+ /*
+ * Don't leak writeable but clean entries from GPA page tables. We don't
+ * want the normal Linux tlbmod handler to handle dirtying when KVM
+ * accesses guest memory.
+ */
+ if (!pte_dirty(pte))
+ pte = pte_wrprotect(pte);
+
+ return pte;
+}
+
+static pte_t kvm_mips_gpa_pte_to_gva_mapped(pte_t pte, long entrylo)
+{
+ /* Guest EntryLo overrides host EntryLo */
+ if (!(entrylo & ENTRYLO_D))
+ pte = pte_mkclean(pte);
+
+ return kvm_mips_gpa_pte_to_gva_unmapped(pte);
}
/* XXXKYMA: Must be called with interrupts disabled */
int kvm_mips_handle_kseg0_tlb_fault(unsigned long badvaddr,
- struct kvm_vcpu *vcpu)
+ struct kvm_vcpu *vcpu,
+ bool write_fault)
{
- gfn_t gfn;
- kvm_pfn_t pfn0, pfn1;
- unsigned long vaddr = 0;
- unsigned long entryhi = 0, entrylo0 = 0, entrylo1 = 0;
- struct kvm *kvm = vcpu->kvm;
- const int flush_dcache_mask = 0;
- int ret;
+ unsigned long gpa;
+ pte_t pte_gpa[2], *ptep_gva;
+ int idx;
if (KVM_GUEST_KSEGX(badvaddr) != KVM_GUEST_KSEG0) {
kvm_err("%s: Invalid BadVaddr: %#lx\n", __func__, badvaddr);
@@ -98,49 +1007,39 @@ int kvm_mips_handle_kseg0_tlb_fault(unsigned long badvaddr,
return -1;
}
- gfn = (KVM_GUEST_CPHYSADDR(badvaddr) >> PAGE_SHIFT);
- if ((gfn | 1) >= kvm->arch.guest_pmap_npages) {
- kvm_err("%s: Invalid gfn: %#llx, BadVaddr: %#lx\n", __func__,
- gfn, badvaddr);
- kvm_mips_dump_host_tlbs();
+ /* Get the GPA page table entry */
+ gpa = KVM_GUEST_CPHYSADDR(badvaddr);
+ idx = (badvaddr >> PAGE_SHIFT) & 1;
+ if (kvm_mips_map_page(vcpu, gpa, write_fault, &pte_gpa[idx],
+ &pte_gpa[!idx]) < 0)
return -1;
- }
- vaddr = badvaddr & (PAGE_MASK << 1);
- if (kvm_mips_map_page(vcpu->kvm, gfn) < 0)
+ /* Get the GVA page table entry */
+ ptep_gva = kvm_trap_emul_pte_for_gva(vcpu, badvaddr & ~PAGE_SIZE);
+ if (!ptep_gva) {
+ kvm_err("No ptep for gva %lx\n", badvaddr);
return -1;
+ }
- if (kvm_mips_map_page(vcpu->kvm, gfn ^ 0x1) < 0)
- return -1;
-
- pfn0 = kvm->arch.guest_pmap[gfn & ~0x1];
- pfn1 = kvm->arch.guest_pmap[gfn | 0x1];
-
- entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) |
- ((_page_cachable_default >> _CACHE_SHIFT) << ENTRYLO_C_SHIFT) |
- ENTRYLO_D | ENTRYLO_V;
- entrylo1 = mips3_paddr_to_tlbpfn(pfn1 << PAGE_SHIFT) |
- ((_page_cachable_default >> _CACHE_SHIFT) << ENTRYLO_C_SHIFT) |
- ENTRYLO_D | ENTRYLO_V;
-
- preempt_disable();
- entryhi = (vaddr | kvm_mips_get_kernel_asid(vcpu));
- ret = kvm_mips_host_tlb_write(vcpu, entryhi, entrylo0, entrylo1,
- flush_dcache_mask);
- preempt_enable();
+ /* Copy a pair of entries from GPA page table to GVA page table */
+ ptep_gva[0] = kvm_mips_gpa_pte_to_gva_unmapped(pte_gpa[0]);
+ ptep_gva[1] = kvm_mips_gpa_pte_to_gva_unmapped(pte_gpa[1]);
- return ret;
+ /* Invalidate this entry in the TLB, guest kernel ASID only */
+ kvm_mips_host_tlb_inv(vcpu, badvaddr, false, true);
+ return 0;
}
int kvm_mips_handle_mapped_seg_tlb_fault(struct kvm_vcpu *vcpu,
- struct kvm_mips_tlb *tlb)
+ struct kvm_mips_tlb *tlb,
+ unsigned long gva,
+ bool write_fault)
{
- unsigned long entryhi = 0, entrylo0 = 0, entrylo1 = 0;
struct kvm *kvm = vcpu->kvm;
- kvm_pfn_t pfn0, pfn1;
- gfn_t gfn0, gfn1;
long tlb_lo[2];
- int ret;
+ pte_t pte_gpa[2], *ptep_buddy, *ptep_gva;
+ unsigned int idx = TLB_LO_IDX(*tlb, gva);
+ bool kernel = KVM_GUEST_KERNEL_MODE(vcpu);
tlb_lo[0] = tlb->tlb_lo[0];
tlb_lo[1] = tlb->tlb_lo[1];
@@ -149,70 +1048,64 @@ int kvm_mips_handle_mapped_seg_tlb_fault(struct kvm_vcpu *vcpu,
* The commpage address must not be mapped to anything else if the guest
* TLB contains entries nearby, or commpage accesses will break.
*/
- if (!((tlb->tlb_hi ^ KVM_GUEST_COMMPAGE_ADDR) &
- VPN2_MASK & (PAGE_MASK << 1)))
- tlb_lo[(KVM_GUEST_COMMPAGE_ADDR >> PAGE_SHIFT) & 1] = 0;
-
- gfn0 = mips3_tlbpfn_to_paddr(tlb_lo[0]) >> PAGE_SHIFT;
- gfn1 = mips3_tlbpfn_to_paddr(tlb_lo[1]) >> PAGE_SHIFT;
- if (gfn0 >= kvm->arch.guest_pmap_npages ||
- gfn1 >= kvm->arch.guest_pmap_npages) {
- kvm_err("%s: Invalid gfn: [%#llx, %#llx], EHi: %#lx\n",
- __func__, gfn0, gfn1, tlb->tlb_hi);
- kvm_mips_dump_guest_tlbs(vcpu);
- return -1;
- }
+ if (!((gva ^ KVM_GUEST_COMMPAGE_ADDR) & VPN2_MASK & (PAGE_MASK << 1)))
+ tlb_lo[TLB_LO_IDX(*tlb, KVM_GUEST_COMMPAGE_ADDR)] = 0;
- if (kvm_mips_map_page(kvm, gfn0) < 0)
+ /* Get the GPA page table entry */
+ if (kvm_mips_map_page(vcpu, mips3_tlbpfn_to_paddr(tlb_lo[idx]),
+ write_fault, &pte_gpa[idx], NULL) < 0)
return -1;
- if (kvm_mips_map_page(kvm, gfn1) < 0)
+ /* And its GVA buddy's GPA page table entry if it also exists */
+ pte_gpa[!idx] = pfn_pte(0, __pgprot(0));
+ if (tlb_lo[!idx] & ENTRYLO_V) {
+ spin_lock(&kvm->mmu_lock);
+ ptep_buddy = kvm_mips_pte_for_gpa(kvm, NULL,
+ mips3_tlbpfn_to_paddr(tlb_lo[!idx]));
+ if (ptep_buddy)
+ pte_gpa[!idx] = *ptep_buddy;
+ spin_unlock(&kvm->mmu_lock);
+ }
+
+ /* Get the GVA page table entry pair */
+ ptep_gva = kvm_trap_emul_pte_for_gva(vcpu, gva & ~PAGE_SIZE);
+ if (!ptep_gva) {
+ kvm_err("No ptep for gva %lx\n", gva);
return -1;
+ }
- pfn0 = kvm->arch.guest_pmap[gfn0];
- pfn1 = kvm->arch.guest_pmap[gfn1];
+ /* Copy a pair of entries from GPA page table to GVA page table */
+ ptep_gva[0] = kvm_mips_gpa_pte_to_gva_mapped(pte_gpa[0], tlb_lo[0]);
+ ptep_gva[1] = kvm_mips_gpa_pte_to_gva_mapped(pte_gpa[1], tlb_lo[1]);
- /* Get attributes from the Guest TLB */
- entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) |
- ((_page_cachable_default >> _CACHE_SHIFT) << ENTRYLO_C_SHIFT) |
- (tlb_lo[0] & ENTRYLO_D) |
- (tlb_lo[0] & ENTRYLO_V);
- entrylo1 = mips3_paddr_to_tlbpfn(pfn1 << PAGE_SHIFT) |
- ((_page_cachable_default >> _CACHE_SHIFT) << ENTRYLO_C_SHIFT) |
- (tlb_lo[1] & ENTRYLO_D) |
- (tlb_lo[1] & ENTRYLO_V);
+ /* Invalidate this entry in the TLB, current guest mode ASID only */
+ kvm_mips_host_tlb_inv(vcpu, gva, !kernel, kernel);
kvm_debug("@ %#lx tlb_lo0: 0x%08lx tlb_lo1: 0x%08lx\n", vcpu->arch.pc,
tlb->tlb_lo[0], tlb->tlb_lo[1]);
- preempt_disable();
- entryhi = (tlb->tlb_hi & VPN2_MASK) | (KVM_GUEST_KERNEL_MODE(vcpu) ?
- kvm_mips_get_kernel_asid(vcpu) :
- kvm_mips_get_user_asid(vcpu));
- ret = kvm_mips_host_tlb_write(vcpu, entryhi, entrylo0, entrylo1,
- tlb->tlb_mask);
- preempt_enable();
-
- return ret;
+ return 0;
}
-void kvm_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu,
- struct kvm_vcpu *vcpu)
+int kvm_mips_handle_commpage_tlb_fault(unsigned long badvaddr,
+ struct kvm_vcpu *vcpu)
{
- unsigned long asid = asid_cache(cpu);
-
- asid += cpu_asid_inc();
- if (!(asid & cpu_asid_mask(&cpu_data[cpu]))) {
- if (cpu_has_vtag_icache)
- flush_icache_all();
-
- kvm_local_flush_tlb_all(); /* start new asid cycle */
+ kvm_pfn_t pfn;
+ pte_t *ptep;
- if (!asid) /* fix version if needed */
- asid = asid_first_version(cpu);
+ ptep = kvm_trap_emul_pte_for_gva(vcpu, badvaddr);
+ if (!ptep) {
+ kvm_err("No ptep for commpage %lx\n", badvaddr);
+ return -1;
}
- cpu_context(cpu, mm) = asid_cache(cpu) = asid;
+ pfn = PFN_DOWN(virt_to_phys(vcpu->arch.kseg0_commpage));
+ /* Also set valid and dirty, so refill handler doesn't have to */
+ *ptep = pte_mkyoung(pte_mkdirty(pfn_pte(pfn, PAGE_SHARED)));
+
+ /* Invalidate this entry in the TLB, guest kernel ASID only */
+ kvm_mips_host_tlb_inv(vcpu, badvaddr, false, true);
+ return 0;
}
/**
@@ -235,42 +1128,13 @@ static void kvm_mips_migrate_count(struct kvm_vcpu *vcpu)
/* Restore ASID once we are scheduled back after preemption */
void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
{
- unsigned long asid_mask = cpu_asid_mask(&cpu_data[cpu]);
unsigned long flags;
- int newasid = 0;
kvm_debug("%s: vcpu %p, cpu: %d\n", __func__, vcpu, cpu);
- /* Allocate new kernel and user ASIDs if needed */
-
local_irq_save(flags);
- if ((vcpu->arch.guest_kernel_asid[cpu] ^ asid_cache(cpu)) &
- asid_version_mask(cpu)) {
- kvm_get_new_mmu_context(&vcpu->arch.guest_kernel_mm, cpu, vcpu);
- vcpu->arch.guest_kernel_asid[cpu] =
- vcpu->arch.guest_kernel_mm.context.asid[cpu];
- newasid++;
-
- kvm_debug("[%d]: cpu_context: %#lx\n", cpu,
- cpu_context(cpu, current->mm));
- kvm_debug("[%d]: Allocated new ASID for Guest Kernel: %#x\n",
- cpu, vcpu->arch.guest_kernel_asid[cpu]);
- }
-
- if ((vcpu->arch.guest_user_asid[cpu] ^ asid_cache(cpu)) &
- asid_version_mask(cpu)) {
- kvm_get_new_mmu_context(&vcpu->arch.guest_user_mm, cpu, vcpu);
- vcpu->arch.guest_user_asid[cpu] =
- vcpu->arch.guest_user_mm.context.asid[cpu];
- newasid++;
-
- kvm_debug("[%d]: cpu_context: %#lx\n", cpu,
- cpu_context(cpu, current->mm));
- kvm_debug("[%d]: Allocated new ASID for Guest User: %#x\n", cpu,
- vcpu->arch.guest_user_asid[cpu]);
- }
-
+ vcpu->cpu = cpu;
if (vcpu->arch.last_sched_cpu != cpu) {
kvm_debug("[%d->%d]KVM VCPU[%d] switch\n",
vcpu->arch.last_sched_cpu, cpu, vcpu->vcpu_id);
@@ -282,42 +1146,10 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
kvm_mips_migrate_count(vcpu);
}
- if (!newasid) {
- /*
- * If we preempted while the guest was executing, then reload
- * the pre-empted ASID
- */
- if (current->flags & PF_VCPU) {
- write_c0_entryhi(vcpu->arch.
- preempt_entryhi & asid_mask);
- ehb();
- }
- } else {
- /* New ASIDs were allocated for the VM */
-
- /*
- * Were we in guest context? If so then the pre-empted ASID is
- * no longer valid, we need to set it to what it should be based
- * on the mode of the Guest (Kernel/User)
- */
- if (current->flags & PF_VCPU) {
- if (KVM_GUEST_KERNEL_MODE(vcpu))
- write_c0_entryhi(vcpu->arch.
- guest_kernel_asid[cpu] &
- asid_mask);
- else
- write_c0_entryhi(vcpu->arch.
- guest_user_asid[cpu] &
- asid_mask);
- ehb();
- }
- }
-
/* restore guest state to registers */
- kvm_mips_callbacks->vcpu_set_regs(vcpu);
+ kvm_mips_callbacks->vcpu_load(vcpu, cpu);
local_irq_restore(flags);
-
}
/* ASID can change if another task is scheduled during preemption */
@@ -329,75 +1161,90 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
local_irq_save(flags);
cpu = smp_processor_id();
-
- vcpu->arch.preempt_entryhi = read_c0_entryhi();
vcpu->arch.last_sched_cpu = cpu;
+ vcpu->cpu = -1;
/* save guest state in registers */
- kvm_mips_callbacks->vcpu_get_regs(vcpu);
-
- if (((cpu_context(cpu, current->mm) ^ asid_cache(cpu)) &
- asid_version_mask(cpu))) {
- kvm_debug("%s: Dropping MMU Context: %#lx\n", __func__,
- cpu_context(cpu, current->mm));
- drop_mmu_context(current->mm, cpu);
- }
- write_c0_entryhi(cpu_asid(cpu, current->mm));
- ehb();
+ kvm_mips_callbacks->vcpu_put(vcpu, cpu);
local_irq_restore(flags);
}
-u32 kvm_get_inst(u32 *opc, struct kvm_vcpu *vcpu)
+/**
+ * kvm_trap_emul_gva_fault() - Safely attempt to handle a GVA access fault.
+ * @vcpu: Virtual CPU.
+ * @gva: Guest virtual address to be accessed.
+ * @write: True if write attempted (must be dirtied and made writable).
+ *
+ * Safely attempt to handle a GVA fault, mapping GVA pages if necessary, and
+ * dirtying the page if @write so that guest instructions can be modified.
+ *
+ * Returns: KVM_MIPS_MAPPED on success.
+ * KVM_MIPS_GVA if bad guest virtual address.
+ * KVM_MIPS_GPA if bad guest physical address.
+ * KVM_MIPS_TLB if guest TLB not present.
+ * KVM_MIPS_TLBINV if guest TLB present but not valid.
+ * KVM_MIPS_TLBMOD if guest TLB read only.
+ */
+enum kvm_mips_fault_result kvm_trap_emul_gva_fault(struct kvm_vcpu *vcpu,
+ unsigned long gva,
+ bool write)
{
struct mips_coproc *cop0 = vcpu->arch.cop0;
- unsigned long paddr, flags, vpn2, asid;
- unsigned long va = (unsigned long)opc;
- void *vaddr;
- u32 inst;
+ struct kvm_mips_tlb *tlb;
int index;
- if (KVM_GUEST_KSEGX(va) < KVM_GUEST_KSEG0 ||
- KVM_GUEST_KSEGX(va) == KVM_GUEST_KSEG23) {
- local_irq_save(flags);
- index = kvm_mips_host_tlb_lookup(vcpu, va);
- if (index >= 0) {
- inst = *(opc);
- } else {
- vpn2 = va & VPN2_MASK;
- asid = kvm_read_c0_guest_entryhi(cop0) &
- KVM_ENTRYHI_ASID;
- index = kvm_mips_guest_tlb_lookup(vcpu, vpn2 | asid);
- if (index < 0) {
- kvm_err("%s: get_user_failed for %p, vcpu: %p, ASID: %#lx\n",
- __func__, opc, vcpu, read_c0_entryhi());
- kvm_mips_dump_host_tlbs();
- kvm_mips_dump_guest_tlbs(vcpu);
- local_irq_restore(flags);
- return KVM_INVALID_INST;
- }
- if (kvm_mips_handle_mapped_seg_tlb_fault(vcpu,
- &vcpu->arch.guest_tlb[index])) {
- kvm_err("%s: handling mapped seg tlb fault failed for %p, index: %u, vcpu: %p, ASID: %#lx\n",
- __func__, opc, index, vcpu,
- read_c0_entryhi());
- kvm_mips_dump_guest_tlbs(vcpu);
- local_irq_restore(flags);
- return KVM_INVALID_INST;
- }
- inst = *(opc);
- }
- local_irq_restore(flags);
- } else if (KVM_GUEST_KSEGX(va) == KVM_GUEST_KSEG0) {
- paddr = kvm_mips_translate_guest_kseg0_to_hpa(vcpu, va);
- vaddr = kmap_atomic(pfn_to_page(PHYS_PFN(paddr)));
- vaddr += paddr & ~PAGE_MASK;
- inst = *(u32 *)vaddr;
- kunmap_atomic(vaddr);
+ if (KVM_GUEST_KSEGX(gva) == KVM_GUEST_KSEG0) {
+ if (kvm_mips_handle_kseg0_tlb_fault(gva, vcpu, write) < 0)
+ return KVM_MIPS_GPA;
+ } else if ((KVM_GUEST_KSEGX(gva) < KVM_GUEST_KSEG0) ||
+ KVM_GUEST_KSEGX(gva) == KVM_GUEST_KSEG23) {
+ /* Address should be in the guest TLB */
+ index = kvm_mips_guest_tlb_lookup(vcpu, (gva & VPN2_MASK) |
+ (kvm_read_c0_guest_entryhi(cop0) & KVM_ENTRYHI_ASID));
+ if (index < 0)
+ return KVM_MIPS_TLB;
+ tlb = &vcpu->arch.guest_tlb[index];
+
+ /* Entry should be valid, and dirty for writes */
+ if (!TLB_IS_VALID(*tlb, gva))
+ return KVM_MIPS_TLBINV;
+ if (write && !TLB_IS_DIRTY(*tlb, gva))
+ return KVM_MIPS_TLBMOD;
+
+ if (kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb, gva, write))
+ return KVM_MIPS_GPA;
} else {
- kvm_err("%s: illegal address: %p\n", __func__, opc);
- return KVM_INVALID_INST;
+ return KVM_MIPS_GVA;
}
- return inst;
+ return KVM_MIPS_MAPPED;
+}
+
+int kvm_get_inst(u32 *opc, struct kvm_vcpu *vcpu, u32 *out)
+{
+ int err;
+
+retry:
+ kvm_trap_emul_gva_lockless_begin(vcpu);
+ err = get_user(*out, opc);
+ kvm_trap_emul_gva_lockless_end(vcpu);
+
+ if (unlikely(err)) {
+ /*
+ * Try to handle the fault, maybe we just raced with a GVA
+ * invalidation.
+ */
+ err = kvm_trap_emul_gva_fault(vcpu, (unsigned long)opc,
+ false);
+ if (unlikely(err)) {
+ kvm_err("%s: illegal address: %p\n",
+ __func__, opc);
+ return -EFAULT;
+ }
+
+ /* Hopefully it'll work now */
+ goto retry;
+ }
+ return 0;
}