summaryrefslogtreecommitdiff
path: root/arch/mips/kvm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/mips/kvm')
-rw-r--r--arch/mips/kvm/00README.txt31
-rw-r--r--arch/mips/kvm/Kconfig50
-rw-r--r--arch/mips/kvm/Makefile24
-rw-r--r--arch/mips/kvm/callback.c14
-rw-r--r--arch/mips/kvm/commpage.c32
-rw-r--r--arch/mips/kvm/commpage.h24
-rw-r--r--arch/mips/kvm/dyntrans.c143
-rw-r--r--arch/mips/kvm/emulate.c2212
-rw-r--r--arch/mips/kvm/entry.c484
-rw-r--r--arch/mips/kvm/fpu.S6
-rw-r--r--arch/mips/kvm/interrupt.c190
-rw-r--r--arch/mips/kvm/interrupt.h32
-rw-r--r--arch/mips/kvm/loongson_ipi.c214
-rw-r--r--arch/mips/kvm/mips.c546
-rw-r--r--arch/mips/kvm/mmu.c582
-rw-r--r--arch/mips/kvm/stats.c4
-rw-r--r--arch/mips/kvm/tlb.c213
-rw-r--r--arch/mips/kvm/trace.h8
-rw-r--r--arch/mips/kvm/trap_emul.c1317
-rw-r--r--arch/mips/kvm/vz.c346
20 files changed, 1490 insertions, 4982 deletions
diff --git a/arch/mips/kvm/00README.txt b/arch/mips/kvm/00README.txt
deleted file mode 100644
index 51617e481aa3..000000000000
--- a/arch/mips/kvm/00README.txt
+++ /dev/null
@@ -1,31 +0,0 @@
-KVM/MIPS Trap & Emulate Release Notes
-=====================================
-
-(1) KVM/MIPS should support MIPS32R2 and beyond. It has been tested on the following platforms:
- Malta Board with FPGA based 34K
- Sigma Designs TangoX board with a 24K based 8654 SoC.
- Malta Board with 74K @ 1GHz
-
-(2) Both Guest kernel and Guest Userspace execute in UM.
- Guest User address space: 0x00000000 -> 0x40000000
- Guest Kernel Unmapped: 0x40000000 -> 0x60000000
- Guest Kernel Mapped: 0x60000000 -> 0x80000000
-
- Guest Usermode virtual memory is limited to 1GB.
-
-(2) 16K Page Sizes: Both Host Kernel and Guest Kernel should have the same page size, currently at least 16K.
- Note that due to cache aliasing issues, 4K page sizes are NOT supported.
-
-(3) No HugeTLB Support
- Both the host kernel and Guest kernel should have the page size set to 16K.
- This will be implemented in a future release.
-
-(4) KVM/MIPS does not have support for SMP Guests
- Linux-3.7-rc2 based SMP guest hangs due to the following code sequence in the generated TLB handlers:
- LL/TLBP/SC. Since the TLBP instruction causes a trap the reservation gets cleared
- when we ERET back to the guest. This causes the guest to hang in an infinite loop.
- This will be fixed in a future release.
-
-(5) Use Host FPU
- Currently KVM/MIPS emulates a 24K CPU without a FPU.
- This will be fixed in a future release
diff --git a/arch/mips/kvm/Kconfig b/arch/mips/kvm/Kconfig
index eac25aef21e0..ab57221fa4dd 100644
--- a/arch/mips/kvm/Kconfig
+++ b/arch/mips/kvm/Kconfig
@@ -6,7 +6,7 @@ source "virt/kvm/Kconfig"
menuconfig VIRTUALIZATION
bool "Virtualization"
- ---help---
+ help
Say Y here to get to see options for using your Linux host to run
other operating systems inside virtual machines (guests).
This option alone does not add any kernel code.
@@ -17,61 +17,27 @@ if VIRTUALIZATION
config KVM
tristate "Kernel-based Virtual Machine (KVM) support"
- depends on HAVE_KVM
+ depends on CPU_SUPPORTS_VZ
depends on MIPS_FP_SUPPORT
select EXPORT_UASM
- select PREEMPT_NOTIFIERS
+ select KVM_COMMON
select KVM_GENERIC_DIRTYLOG_READ_PROTECT
select HAVE_KVM_VCPU_ASYNC_IOCTL
select KVM_MMIO
- select MMU_NOTIFIER
- select SRCU
- ---help---
+ select KVM_GENERIC_MMU_NOTIFIER
+ select KVM_GENERIC_HARDWARE_ENABLING
+ select HAVE_KVM_READONLY_MEM
+ help
Support for hosting Guest kernels.
-choice
- prompt "Virtualization mode"
- depends on KVM
- default KVM_MIPS_TE
-
-config KVM_MIPS_TE
- bool "Trap & Emulate"
- ---help---
- Use trap and emulate to virtualize 32-bit guests in user mode. This
- does not require any special hardware Virtualization support beyond
- standard MIPS32/64 r2 or later, but it does require the guest kernel
- to be configured with CONFIG_KVM_GUEST=y so that it resides in the
- user address segment.
-
-config KVM_MIPS_VZ
- bool "MIPS Virtualization (VZ) ASE"
- ---help---
- Use the MIPS Virtualization (VZ) ASE to virtualize guests. This
- supports running unmodified guest kernels (with CONFIG_KVM_GUEST=n),
- but requires hardware support.
-
-endchoice
-
-config KVM_MIPS_DYN_TRANS
- bool "KVM/MIPS: Dynamic binary translation to reduce traps"
- depends on KVM_MIPS_TE
- default y
- ---help---
- When running in Trap & Emulate mode patch privileged
- instructions to reduce the number of traps.
-
- If unsure, say Y.
-
config KVM_MIPS_DEBUG_COP0_COUNTERS
bool "Maintain counters for COP0 accesses"
depends on KVM
- ---help---
+ help
Maintain statistics for Guest COP0 accesses.
A histogram of COP0 accesses is printed when the VM is
shutdown.
If unsure, say N.
-source "drivers/vhost/Kconfig"
-
endif # VIRTUALIZATION
diff --git a/arch/mips/kvm/Makefile b/arch/mips/kvm/Makefile
index 01affc1d21c5..805aeea2166e 100644
--- a/arch/mips/kvm/Makefile
+++ b/arch/mips/kvm/Makefile
@@ -2,23 +2,19 @@
# Makefile for KVM support for MIPS
#
-common-objs-y = $(addprefix ../../../virt/kvm/, kvm_main.o coalesced_mmio.o)
+include $(srctree)/virt/kvm/Makefile.kvm
-EXTRA_CFLAGS += -Ivirt/kvm -Iarch/mips/kvm
+ccflags-y += -Ivirt/kvm -Iarch/mips/kvm
-common-objs-$(CONFIG_CPU_HAS_MSA) += msa.o
+kvm-$(CONFIG_CPU_HAS_MSA) += msa.o
-kvm-objs := $(common-objs-y) mips.o emulate.o entry.o \
- interrupt.o stats.o commpage.o \
+kvm-y += mips.o emulate.o entry.o \
+ interrupt.o stats.o \
fpu.o
-kvm-objs += hypcall.o
-kvm-objs += mmu.o
+kvm-y += hypcall.o
+kvm-y += mmu.o
+kvm-$(CONFIG_CPU_LOONGSON64) += loongson_ipi.o
-ifdef CONFIG_KVM_MIPS_VZ
-kvm-objs += vz.o
-else
-kvm-objs += dyntrans.o
-kvm-objs += trap_emul.o
-endif
+kvm-y += vz.o
obj-$(CONFIG_KVM) += kvm.o
-obj-y += callback.o tlb.o
+obj-y += tlb.o
diff --git a/arch/mips/kvm/callback.c b/arch/mips/kvm/callback.c
deleted file mode 100644
index d88aa2173fb0..000000000000
--- a/arch/mips/kvm/callback.c
+++ /dev/null
@@ -1,14 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
- * Authors: Yann Le Du <ledu@kymasys.com>
- */
-
-#include <linux/export.h>
-#include <linux/kvm_host.h>
-
-struct kvm_mips_callbacks *kvm_mips_callbacks;
-EXPORT_SYMBOL_GPL(kvm_mips_callbacks);
diff --git a/arch/mips/kvm/commpage.c b/arch/mips/kvm/commpage.c
deleted file mode 100644
index 5812e6145801..000000000000
--- a/arch/mips/kvm/commpage.c
+++ /dev/null
@@ -1,32 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * commpage, currently used for Virtual COP0 registers.
- * Mapped into the guest kernel @ KVM_GUEST_COMMPAGE_ADDR.
- *
- * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
- * Authors: Sanjay Lal <sanjayl@kymasys.com>
- */
-
-#include <linux/errno.h>
-#include <linux/err.h>
-#include <linux/vmalloc.h>
-#include <linux/fs.h>
-#include <linux/memblock.h>
-#include <asm/page.h>
-#include <asm/cacheflush.h>
-#include <asm/mmu_context.h>
-
-#include <linux/kvm_host.h>
-
-#include "commpage.h"
-
-void kvm_mips_commpage_init(struct kvm_vcpu *vcpu)
-{
- struct kvm_mips_commpage *page = vcpu->arch.kseg0_commpage;
-
- /* Specific init values for fields */
- vcpu->arch.cop0 = &page->cop0;
-}
diff --git a/arch/mips/kvm/commpage.h b/arch/mips/kvm/commpage.h
deleted file mode 100644
index 08c5fa2bbc0f..000000000000
--- a/arch/mips/kvm/commpage.h
+++ /dev/null
@@ -1,24 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * KVM/MIPS: commpage: mapped into get kernel space
- *
- * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
- * Authors: Sanjay Lal <sanjayl@kymasys.com>
- */
-
-#ifndef __KVM_MIPS_COMMPAGE_H__
-#define __KVM_MIPS_COMMPAGE_H__
-
-struct kvm_mips_commpage {
- /* COP0 state is mapped into Guest kernel via commpage */
- struct mips_coproc cop0;
-};
-
-#define KVM_MIPS_COMM_EIDI_OFFSET 0x0
-
-extern void kvm_mips_commpage_init(struct kvm_vcpu *vcpu);
-
-#endif /* __KVM_MIPS_COMMPAGE_H__ */
diff --git a/arch/mips/kvm/dyntrans.c b/arch/mips/kvm/dyntrans.c
deleted file mode 100644
index d77b61b3d6ee..000000000000
--- a/arch/mips/kvm/dyntrans.c
+++ /dev/null
@@ -1,143 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * KVM/MIPS: Binary Patching for privileged instructions, reduces traps.
- *
- * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
- * Authors: Sanjay Lal <sanjayl@kymasys.com>
- */
-
-#include <linux/errno.h>
-#include <linux/err.h>
-#include <linux/highmem.h>
-#include <linux/kvm_host.h>
-#include <linux/uaccess.h>
-#include <linux/vmalloc.h>
-#include <linux/fs.h>
-#include <linux/memblock.h>
-#include <asm/cacheflush.h>
-
-#include "commpage.h"
-
-/**
- * kvm_mips_trans_replace() - Replace trapping instruction in guest memory.
- * @vcpu: Virtual CPU.
- * @opc: PC of instruction to replace.
- * @replace: Instruction to write
- */
-static int kvm_mips_trans_replace(struct kvm_vcpu *vcpu, u32 *opc,
- union mips_instruction replace)
-{
- unsigned long vaddr = (unsigned long)opc;
- int err;
-
-retry:
- /* The GVA page table is still active so use the Linux TLB handlers */
- kvm_trap_emul_gva_lockless_begin(vcpu);
- err = put_user(replace.word, opc);
- kvm_trap_emul_gva_lockless_end(vcpu);
-
- if (unlikely(err)) {
- /*
- * We write protect clean pages in GVA page table so normal
- * Linux TLB mod handler doesn't silently dirty the page.
- * Its also possible we raced with a GVA invalidation.
- * Try to force the page to become dirty.
- */
- err = kvm_trap_emul_gva_fault(vcpu, vaddr, true);
- if (unlikely(err)) {
- kvm_info("%s: Address unwriteable: %p\n",
- __func__, opc);
- return -EFAULT;
- }
-
- /*
- * Try again. This will likely trigger a TLB refill, which will
- * fetch the new dirty entry from the GVA page table, which
- * should then succeed.
- */
- goto retry;
- }
- __local_flush_icache_user_range(vaddr, vaddr + 4);
-
- return 0;
-}
-
-int kvm_mips_trans_cache_index(union mips_instruction inst, u32 *opc,
- struct kvm_vcpu *vcpu)
-{
- union mips_instruction nop_inst = { 0 };
-
- /* Replace the CACHE instruction, with a NOP */
- return kvm_mips_trans_replace(vcpu, opc, nop_inst);
-}
-
-/*
- * Address based CACHE instructions are transformed into synci(s). A little
- * heavy for just D-cache invalidates, but avoids an expensive trap
- */
-int kvm_mips_trans_cache_va(union mips_instruction inst, u32 *opc,
- struct kvm_vcpu *vcpu)
-{
- union mips_instruction synci_inst = { 0 };
-
- synci_inst.i_format.opcode = bcond_op;
- synci_inst.i_format.rs = inst.i_format.rs;
- synci_inst.i_format.rt = synci_op;
- if (cpu_has_mips_r6)
- synci_inst.i_format.simmediate = inst.spec3_format.simmediate;
- else
- synci_inst.i_format.simmediate = inst.i_format.simmediate;
-
- return kvm_mips_trans_replace(vcpu, opc, synci_inst);
-}
-
-int kvm_mips_trans_mfc0(union mips_instruction inst, u32 *opc,
- struct kvm_vcpu *vcpu)
-{
- union mips_instruction mfc0_inst = { 0 };
- u32 rd, sel;
-
- rd = inst.c0r_format.rd;
- sel = inst.c0r_format.sel;
-
- if (rd == MIPS_CP0_ERRCTL && sel == 0) {
- mfc0_inst.r_format.opcode = spec_op;
- mfc0_inst.r_format.rd = inst.c0r_format.rt;
- mfc0_inst.r_format.func = add_op;
- } else {
- mfc0_inst.i_format.opcode = lw_op;
- mfc0_inst.i_format.rt = inst.c0r_format.rt;
- mfc0_inst.i_format.simmediate = KVM_GUEST_COMMPAGE_ADDR |
- offsetof(struct kvm_mips_commpage, cop0.reg[rd][sel]);
-#ifdef CONFIG_CPU_BIG_ENDIAN
- if (sizeof(vcpu->arch.cop0->reg[0][0]) == 8)
- mfc0_inst.i_format.simmediate |= 4;
-#endif
- }
-
- return kvm_mips_trans_replace(vcpu, opc, mfc0_inst);
-}
-
-int kvm_mips_trans_mtc0(union mips_instruction inst, u32 *opc,
- struct kvm_vcpu *vcpu)
-{
- union mips_instruction mtc0_inst = { 0 };
- u32 rd, sel;
-
- rd = inst.c0r_format.rd;
- sel = inst.c0r_format.sel;
-
- mtc0_inst.i_format.opcode = sw_op;
- mtc0_inst.i_format.rt = inst.c0r_format.rt;
- mtc0_inst.i_format.simmediate = KVM_GUEST_COMMPAGE_ADDR |
- offsetof(struct kvm_mips_commpage, cop0.reg[rd][sel]);
-#ifdef CONFIG_CPU_BIG_ENDIAN
- if (sizeof(vcpu->arch.cop0->reg[0][0]) == 8)
- mtc0_inst.i_format.simmediate |= 4;
-#endif
-
- return kvm_mips_trans_replace(vcpu, opc, mtc0_inst);
-}
diff --git a/arch/mips/kvm/emulate.c b/arch/mips/kvm/emulate.c
index 754094b40a75..0feec52222fb 100644
--- a/arch/mips/kvm/emulate.c
+++ b/arch/mips/kvm/emulate.c
@@ -30,7 +30,6 @@
#define CONFIG_MIPS_MT
#include "interrupt.h"
-#include "commpage.h"
#include "trace.h"
@@ -64,7 +63,7 @@ static int kvm_compute_return_epc(struct kvm_vcpu *vcpu, unsigned long instpc,
switch (insn.r_format.func) {
case jalr_op:
arch->gprs[insn.r_format.rd] = epc + 8;
- /* Fall through */
+ fallthrough;
case jr_op:
nextpc = arch->gprs[insn.r_format.rs];
break;
@@ -140,7 +139,7 @@ static int kvm_compute_return_epc(struct kvm_vcpu *vcpu, unsigned long instpc,
/* These are unconditional and in j_format. */
case jal_op:
arch->gprs[31] = instpc + 8;
- /* fall through */
+ fallthrough;
case j_op:
epc += 4;
epc >>= 28;
@@ -276,7 +275,8 @@ int kvm_get_badinstr(u32 *opc, struct kvm_vcpu *vcpu, u32 *out)
*out = vcpu->arch.host_cp0_badinstr;
return 0;
} else {
- return kvm_get_inst(opc, vcpu, out);
+ WARN_ONCE(1, "CPU doesn't have BadInstr register\n");
+ return -EINVAL;
}
}
@@ -297,7 +297,8 @@ int kvm_get_badinstrp(u32 *opc, struct kvm_vcpu *vcpu, u32 *out)
*out = vcpu->arch.host_cp0_badinstrp;
return 0;
} else {
- return kvm_get_inst(opc, vcpu, out);
+ WARN_ONCE(1, "CPU doesn't have BadInstrp register\n");
+ return -EINVAL;
}
}
@@ -311,7 +312,7 @@ int kvm_get_badinstrp(u32 *opc, struct kvm_vcpu *vcpu, u32 *out)
*/
int kvm_mips_count_disabled(struct kvm_vcpu *vcpu)
{
- struct mips_coproc *cop0 = vcpu->arch.cop0;
+ struct mips_coproc *cop0 = &vcpu->arch.cop0;
return (vcpu->arch.count_ctl & KVM_REG_MIPS_COUNT_CTL_DC) ||
(kvm_read_c0_guest_cause(cop0) & CAUSEF_DC);
@@ -383,7 +384,7 @@ static inline ktime_t kvm_mips_count_time(struct kvm_vcpu *vcpu)
*/
static u32 kvm_mips_read_count_running(struct kvm_vcpu *vcpu, ktime_t now)
{
- struct mips_coproc *cop0 = vcpu->arch.cop0;
+ struct mips_coproc *cop0 = &vcpu->arch.cop0;
ktime_t expires, threshold;
u32 count, compare;
int running;
@@ -443,7 +444,7 @@ static u32 kvm_mips_read_count_running(struct kvm_vcpu *vcpu, ktime_t now)
*/
u32 kvm_mips_read_count(struct kvm_vcpu *vcpu)
{
- struct mips_coproc *cop0 = vcpu->arch.cop0;
+ struct mips_coproc *cop0 = &vcpu->arch.cop0;
/* If count disabled just read static copy of count */
if (kvm_mips_count_disabled(vcpu))
@@ -501,7 +502,7 @@ ktime_t kvm_mips_freeze_hrtimer(struct kvm_vcpu *vcpu, u32 *count)
static void kvm_mips_resume_hrtimer(struct kvm_vcpu *vcpu,
ktime_t now, u32 count)
{
- struct mips_coproc *cop0 = vcpu->arch.cop0;
+ struct mips_coproc *cop0 = &vcpu->arch.cop0;
u32 compare;
u64 delta;
ktime_t expire;
@@ -530,7 +531,7 @@ static void kvm_mips_resume_hrtimer(struct kvm_vcpu *vcpu,
* to be used for a period of time, but the exact ktime corresponding to the
* final Count that must be restored is not known.
*
- * It is gauranteed that a timer interrupt immediately after restore will be
+ * It is guaranteed that a timer interrupt immediately after restore will be
* handled, but not if CP0_Compare is exactly at @count. That case should
* already be handled when the hardware timer state is saved.
*
@@ -602,7 +603,7 @@ resume:
*/
void kvm_mips_write_count(struct kvm_vcpu *vcpu, u32 count)
{
- struct mips_coproc *cop0 = vcpu->arch.cop0;
+ struct mips_coproc *cop0 = &vcpu->arch.cop0;
ktime_t now;
/* Calculate bias */
@@ -648,7 +649,7 @@ void kvm_mips_init_count(struct kvm_vcpu *vcpu, unsigned long count_hz)
*/
int kvm_mips_set_count_hz(struct kvm_vcpu *vcpu, s64 count_hz)
{
- struct mips_coproc *cop0 = vcpu->arch.cop0;
+ struct mips_coproc *cop0 = &vcpu->arch.cop0;
int dc;
ktime_t now;
u32 count;
@@ -695,7 +696,7 @@ int kvm_mips_set_count_hz(struct kvm_vcpu *vcpu, s64 count_hz)
*/
void kvm_mips_write_compare(struct kvm_vcpu *vcpu, u32 compare, bool ack)
{
- struct mips_coproc *cop0 = vcpu->arch.cop0;
+ struct mips_coproc *cop0 = &vcpu->arch.cop0;
int dc;
u32 old_compare = kvm_read_c0_guest_compare(cop0);
s32 delta = compare - old_compare;
@@ -721,7 +722,7 @@ void kvm_mips_write_compare(struct kvm_vcpu *vcpu, u32 compare, bool ack)
* preemption until the new value is written to prevent restore of a
* GTOffset corresponding to the old CP0_Compare value.
*/
- if (IS_ENABLED(CONFIG_KVM_MIPS_VZ) && delta > 0) {
+ if (delta > 0) {
preempt_disable();
write_c0_gtoffset(compare - read_c0_count());
back_to_back_c0_hazard();
@@ -734,7 +735,7 @@ void kvm_mips_write_compare(struct kvm_vcpu *vcpu, u32 compare, bool ack)
if (ack)
kvm_mips_callbacks->dequeue_timer_int(vcpu);
- else if (IS_ENABLED(CONFIG_KVM_MIPS_VZ))
+ else
/*
* With VZ, writing CP0_Compare acks (clears) CP0_Cause.TI, so
* preserve guest CP0_Cause.TI if we don't want to ack it.
@@ -743,15 +744,13 @@ void kvm_mips_write_compare(struct kvm_vcpu *vcpu, u32 compare, bool ack)
kvm_write_c0_guest_compare(cop0, compare);
- if (IS_ENABLED(CONFIG_KVM_MIPS_VZ)) {
- if (delta > 0)
- preempt_enable();
+ if (delta > 0)
+ preempt_enable();
- back_to_back_c0_hazard();
+ back_to_back_c0_hazard();
- if (!ack && cause & CAUSEF_TI)
- kvm_write_c0_guest_cause(cop0, cause);
- }
+ if (!ack && cause & CAUSEF_TI)
+ kvm_write_c0_guest_cause(cop0, cause);
/* resume_hrtimer() takes care of timer interrupts > count */
if (!dc)
@@ -762,7 +761,7 @@ void kvm_mips_write_compare(struct kvm_vcpu *vcpu, u32 compare, bool ack)
* until after the new CP0_Compare is written, otherwise new guest
* CP0_Count could hit new guest CP0_Compare.
*/
- if (IS_ENABLED(CONFIG_KVM_MIPS_VZ) && delta <= 0)
+ if (delta <= 0)
write_c0_gtoffset(compare - read_c0_count());
}
@@ -780,7 +779,7 @@ void kvm_mips_write_compare(struct kvm_vcpu *vcpu, u32 compare, bool ack)
*/
static ktime_t kvm_mips_count_disable(struct kvm_vcpu *vcpu)
{
- struct mips_coproc *cop0 = vcpu->arch.cop0;
+ struct mips_coproc *cop0 = &vcpu->arch.cop0;
u32 count;
ktime_t now;
@@ -807,7 +806,7 @@ static ktime_t kvm_mips_count_disable(struct kvm_vcpu *vcpu)
*/
void kvm_mips_count_disable_cause(struct kvm_vcpu *vcpu)
{
- struct mips_coproc *cop0 = vcpu->arch.cop0;
+ struct mips_coproc *cop0 = &vcpu->arch.cop0;
kvm_set_c0_guest_cause(cop0, CAUSEF_DC);
if (!(vcpu->arch.count_ctl & KVM_REG_MIPS_COUNT_CTL_DC))
@@ -827,7 +826,7 @@ void kvm_mips_count_disable_cause(struct kvm_vcpu *vcpu)
*/
void kvm_mips_count_enable_cause(struct kvm_vcpu *vcpu)
{
- struct mips_coproc *cop0 = vcpu->arch.cop0;
+ struct mips_coproc *cop0 = &vcpu->arch.cop0;
u32 count;
kvm_clear_c0_guest_cause(cop0, CAUSEF_DC);
@@ -853,7 +852,7 @@ void kvm_mips_count_enable_cause(struct kvm_vcpu *vcpu)
*/
int kvm_mips_set_count_ctl(struct kvm_vcpu *vcpu, s64 count_ctl)
{
- struct mips_coproc *cop0 = vcpu->arch.cop0;
+ struct mips_coproc *cop0 = &vcpu->arch.cop0;
s64 changed = count_ctl ^ vcpu->arch.count_ctl;
s64 delta;
ktime_t expire, now;
@@ -943,29 +942,6 @@ enum hrtimer_restart kvm_mips_count_timeout(struct kvm_vcpu *vcpu)
return HRTIMER_RESTART;
}
-enum emulation_result kvm_mips_emul_eret(struct kvm_vcpu *vcpu)
-{
- struct mips_coproc *cop0 = vcpu->arch.cop0;
- enum emulation_result er = EMULATE_DONE;
-
- if (kvm_read_c0_guest_status(cop0) & ST0_ERL) {
- kvm_clear_c0_guest_status(cop0, ST0_ERL);
- vcpu->arch.pc = kvm_read_c0_guest_errorepc(cop0);
- } else if (kvm_read_c0_guest_status(cop0) & ST0_EXL) {
- kvm_debug("[%#lx] ERET to %#lx\n", vcpu->arch.pc,
- kvm_read_c0_guest_epc(cop0));
- kvm_clear_c0_guest_status(cop0, ST0_EXL);
- vcpu->arch.pc = kvm_read_c0_guest_epc(cop0);
-
- } else {
- kvm_err("[%#lx] ERET when MIPS_SR_EXL|MIPS_SR_ERL == 0\n",
- vcpu->arch.pc);
- er = EMULATE_FAIL;
- }
-
- return er;
-}
-
enum emulation_result kvm_mips_emul_wait(struct kvm_vcpu *vcpu)
{
kvm_debug("[%#lx] !!!WAIT!!! (%#lx)\n", vcpu->arch.pc,
@@ -976,633 +952,29 @@ enum emulation_result kvm_mips_emul_wait(struct kvm_vcpu *vcpu)
if (!vcpu->arch.pending_exceptions) {
kvm_vz_lose_htimer(vcpu);
vcpu->arch.wait = 1;
- kvm_vcpu_block(vcpu);
+ kvm_vcpu_halt(vcpu);
/*
- * We we are runnable, then definitely go off to user space to
+ * We are runnable, then definitely go off to user space to
* check if any I/O interrupts are pending.
*/
- if (kvm_check_request(KVM_REQ_UNHALT, vcpu)) {
- kvm_clear_request(KVM_REQ_UNHALT, vcpu);
+ if (kvm_arch_vcpu_runnable(vcpu))
vcpu->run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN;
- }
- }
-
- return EMULATE_DONE;
-}
-
-static void kvm_mips_change_entryhi(struct kvm_vcpu *vcpu,
- unsigned long entryhi)
-{
- struct mips_coproc *cop0 = vcpu->arch.cop0;
- struct mm_struct *kern_mm = &vcpu->arch.guest_kernel_mm;
- int cpu, i;
- u32 nasid = entryhi & KVM_ENTRYHI_ASID;
-
- if (((kvm_read_c0_guest_entryhi(cop0) & KVM_ENTRYHI_ASID) != nasid)) {
- trace_kvm_asid_change(vcpu, kvm_read_c0_guest_entryhi(cop0) &
- KVM_ENTRYHI_ASID, nasid);
-
- /*
- * Flush entries from the GVA page tables.
- * Guest user page table will get flushed lazily on re-entry to
- * guest user if the guest ASID actually changes.
- */
- kvm_mips_flush_gva_pt(kern_mm->pgd, KMF_KERN);
-
- /*
- * Regenerate/invalidate kernel MMU context.
- * The user MMU context will be regenerated lazily on re-entry
- * to guest user if the guest ASID actually changes.
- */
- preempt_disable();
- cpu = smp_processor_id();
- get_new_mmu_context(kern_mm);
- for_each_possible_cpu(i)
- if (i != cpu)
- set_cpu_context(i, kern_mm, 0);
- preempt_enable();
- }
- kvm_write_c0_guest_entryhi(cop0, entryhi);
-}
-
-enum emulation_result kvm_mips_emul_tlbr(struct kvm_vcpu *vcpu)
-{
- struct mips_coproc *cop0 = vcpu->arch.cop0;
- struct kvm_mips_tlb *tlb;
- unsigned long pc = vcpu->arch.pc;
- int index;
-
- index = kvm_read_c0_guest_index(cop0);
- if (index < 0 || index >= KVM_MIPS_GUEST_TLB_SIZE) {
- /* UNDEFINED */
- kvm_debug("[%#lx] TLBR Index %#x out of range\n", pc, index);
- index &= KVM_MIPS_GUEST_TLB_SIZE - 1;
}
- tlb = &vcpu->arch.guest_tlb[index];
- kvm_write_c0_guest_pagemask(cop0, tlb->tlb_mask);
- kvm_write_c0_guest_entrylo0(cop0, tlb->tlb_lo[0]);
- kvm_write_c0_guest_entrylo1(cop0, tlb->tlb_lo[1]);
- kvm_mips_change_entryhi(vcpu, tlb->tlb_hi);
-
return EMULATE_DONE;
}
-/**
- * kvm_mips_invalidate_guest_tlb() - Indicates a change in guest MMU map.
- * @vcpu: VCPU with changed mappings.
- * @tlb: TLB entry being removed.
- *
- * This is called to indicate a single change in guest MMU mappings, so that we
- * can arrange TLB flushes on this and other CPUs.
- */
-static void kvm_mips_invalidate_guest_tlb(struct kvm_vcpu *vcpu,
- struct kvm_mips_tlb *tlb)
-{
- struct mm_struct *kern_mm = &vcpu->arch.guest_kernel_mm;
- struct mm_struct *user_mm = &vcpu->arch.guest_user_mm;
- int cpu, i;
- bool user;
-
- /* No need to flush for entries which are already invalid */
- if (!((tlb->tlb_lo[0] | tlb->tlb_lo[1]) & ENTRYLO_V))
- return;
- /* Don't touch host kernel page tables or TLB mappings */
- if ((unsigned long)tlb->tlb_hi > 0x7fffffff)
- return;
- /* User address space doesn't need flushing for KSeg2/3 changes */
- user = tlb->tlb_hi < KVM_GUEST_KSEG0;
-
- preempt_disable();
-
- /* Invalidate page table entries */
- kvm_trap_emul_invalidate_gva(vcpu, tlb->tlb_hi & VPN2_MASK, user);
-
- /*
- * Probe the shadow host TLB for the entry being overwritten, if one
- * matches, invalidate it
- */
- kvm_mips_host_tlb_inv(vcpu, tlb->tlb_hi, user, true);
-
- /* Invalidate the whole ASID on other CPUs */
- cpu = smp_processor_id();
- for_each_possible_cpu(i) {
- if (i == cpu)
- continue;
- if (user)
- set_cpu_context(i, user_mm, 0);
- set_cpu_context(i, kern_mm, 0);
- }
-
- preempt_enable();
-}
-
-/* Write Guest TLB Entry @ Index */
-enum emulation_result kvm_mips_emul_tlbwi(struct kvm_vcpu *vcpu)
-{
- struct mips_coproc *cop0 = vcpu->arch.cop0;
- int index = kvm_read_c0_guest_index(cop0);
- struct kvm_mips_tlb *tlb = NULL;
- unsigned long pc = vcpu->arch.pc;
-
- if (index < 0 || index >= KVM_MIPS_GUEST_TLB_SIZE) {
- kvm_debug("%s: illegal index: %d\n", __func__, index);
- kvm_debug("[%#lx] COP0_TLBWI [%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx, mask: %#lx)\n",
- pc, index, kvm_read_c0_guest_entryhi(cop0),
- kvm_read_c0_guest_entrylo0(cop0),
- kvm_read_c0_guest_entrylo1(cop0),
- kvm_read_c0_guest_pagemask(cop0));
- index = (index & ~0x80000000) % KVM_MIPS_GUEST_TLB_SIZE;
- }
-
- tlb = &vcpu->arch.guest_tlb[index];
-
- kvm_mips_invalidate_guest_tlb(vcpu, tlb);
-
- tlb->tlb_mask = kvm_read_c0_guest_pagemask(cop0);
- tlb->tlb_hi = kvm_read_c0_guest_entryhi(cop0);
- tlb->tlb_lo[0] = kvm_read_c0_guest_entrylo0(cop0);
- tlb->tlb_lo[1] = kvm_read_c0_guest_entrylo1(cop0);
-
- kvm_debug("[%#lx] COP0_TLBWI [%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx, mask: %#lx)\n",
- pc, index, kvm_read_c0_guest_entryhi(cop0),
- kvm_read_c0_guest_entrylo0(cop0),
- kvm_read_c0_guest_entrylo1(cop0),
- kvm_read_c0_guest_pagemask(cop0));
-
- return EMULATE_DONE;
-}
-
-/* Write Guest TLB Entry @ Random Index */
-enum emulation_result kvm_mips_emul_tlbwr(struct kvm_vcpu *vcpu)
-{
- struct mips_coproc *cop0 = vcpu->arch.cop0;
- struct kvm_mips_tlb *tlb = NULL;
- unsigned long pc = vcpu->arch.pc;
- int index;
-
- index = prandom_u32_max(KVM_MIPS_GUEST_TLB_SIZE);
- tlb = &vcpu->arch.guest_tlb[index];
-
- kvm_mips_invalidate_guest_tlb(vcpu, tlb);
-
- tlb->tlb_mask = kvm_read_c0_guest_pagemask(cop0);
- tlb->tlb_hi = kvm_read_c0_guest_entryhi(cop0);
- tlb->tlb_lo[0] = kvm_read_c0_guest_entrylo0(cop0);
- tlb->tlb_lo[1] = kvm_read_c0_guest_entrylo1(cop0);
-
- kvm_debug("[%#lx] COP0_TLBWR[%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx)\n",
- pc, index, kvm_read_c0_guest_entryhi(cop0),
- kvm_read_c0_guest_entrylo0(cop0),
- kvm_read_c0_guest_entrylo1(cop0));
-
- return EMULATE_DONE;
-}
-
-enum emulation_result kvm_mips_emul_tlbp(struct kvm_vcpu *vcpu)
-{
- struct mips_coproc *cop0 = vcpu->arch.cop0;
- long entryhi = kvm_read_c0_guest_entryhi(cop0);
- unsigned long pc = vcpu->arch.pc;
- int index = -1;
-
- index = kvm_mips_guest_tlb_lookup(vcpu, entryhi);
-
- kvm_write_c0_guest_index(cop0, index);
-
- kvm_debug("[%#lx] COP0_TLBP (entryhi: %#lx), index: %d\n", pc, entryhi,
- index);
-
- return EMULATE_DONE;
-}
-
-/**
- * kvm_mips_config1_wrmask() - Find mask of writable bits in guest Config1
- * @vcpu: Virtual CPU.
- *
- * Finds the mask of bits which are writable in the guest's Config1 CP0
- * register, by userland (currently read-only to the guest).
- */
-unsigned int kvm_mips_config1_wrmask(struct kvm_vcpu *vcpu)
-{
- unsigned int mask = 0;
-
- /* Permit FPU to be present if FPU is supported */
- if (kvm_mips_guest_can_have_fpu(&vcpu->arch))
- mask |= MIPS_CONF1_FP;
-
- return mask;
-}
-
-/**
- * kvm_mips_config3_wrmask() - Find mask of writable bits in guest Config3
- * @vcpu: Virtual CPU.
- *
- * Finds the mask of bits which are writable in the guest's Config3 CP0
- * register, by userland (currently read-only to the guest).
- */
-unsigned int kvm_mips_config3_wrmask(struct kvm_vcpu *vcpu)
-{
- /* Config4 and ULRI are optional */
- unsigned int mask = MIPS_CONF_M | MIPS_CONF3_ULRI;
-
- /* Permit MSA to be present if MSA is supported */
- if (kvm_mips_guest_can_have_msa(&vcpu->arch))
- mask |= MIPS_CONF3_MSA;
-
- return mask;
-}
-
-/**
- * kvm_mips_config4_wrmask() - Find mask of writable bits in guest Config4
- * @vcpu: Virtual CPU.
- *
- * Finds the mask of bits which are writable in the guest's Config4 CP0
- * register, by userland (currently read-only to the guest).
- */
-unsigned int kvm_mips_config4_wrmask(struct kvm_vcpu *vcpu)
-{
- /* Config5 is optional */
- unsigned int mask = MIPS_CONF_M;
-
- /* KScrExist */
- mask |= 0xfc << MIPS_CONF4_KSCREXIST_SHIFT;
-
- return mask;
-}
-
-/**
- * kvm_mips_config5_wrmask() - Find mask of writable bits in guest Config5
- * @vcpu: Virtual CPU.
- *
- * Finds the mask of bits which are writable in the guest's Config5 CP0
- * register, by the guest itself.
- */
-unsigned int kvm_mips_config5_wrmask(struct kvm_vcpu *vcpu)
-{
- unsigned int mask = 0;
-
- /* Permit MSAEn changes if MSA supported and enabled */
- if (kvm_mips_guest_has_msa(&vcpu->arch))
- mask |= MIPS_CONF5_MSAEN;
-
- /*
- * Permit guest FPU mode changes if FPU is enabled and the relevant
- * feature exists according to FIR register.
- */
- if (kvm_mips_guest_has_fpu(&vcpu->arch)) {
- if (cpu_has_fre)
- mask |= MIPS_CONF5_FRE;
- /* We don't support UFR or UFE */
- }
-
- return mask;
-}
-
-enum emulation_result kvm_mips_emulate_CP0(union mips_instruction inst,
- u32 *opc, u32 cause,
- struct kvm_run *run,
- struct kvm_vcpu *vcpu)
-{
- struct mips_coproc *cop0 = vcpu->arch.cop0;
- enum emulation_result er = EMULATE_DONE;
- u32 rt, rd, sel;
- unsigned long curr_pc;
-
- /*
- * Update PC and hold onto current PC in case there is
- * an error and we want to rollback the PC
- */
- curr_pc = vcpu->arch.pc;
- er = update_pc(vcpu, cause);
- if (er == EMULATE_FAIL)
- return er;
-
- if (inst.co_format.co) {
- switch (inst.co_format.func) {
- case tlbr_op: /* Read indexed TLB entry */
- er = kvm_mips_emul_tlbr(vcpu);
- break;
- case tlbwi_op: /* Write indexed */
- er = kvm_mips_emul_tlbwi(vcpu);
- break;
- case tlbwr_op: /* Write random */
- er = kvm_mips_emul_tlbwr(vcpu);
- break;
- case tlbp_op: /* TLB Probe */
- er = kvm_mips_emul_tlbp(vcpu);
- break;
- case rfe_op:
- kvm_err("!!!COP0_RFE!!!\n");
- break;
- case eret_op:
- er = kvm_mips_emul_eret(vcpu);
- goto dont_update_pc;
- case wait_op:
- er = kvm_mips_emul_wait(vcpu);
- break;
- case hypcall_op:
- er = kvm_mips_emul_hypcall(vcpu, inst);
- break;
- }
- } else {
- rt = inst.c0r_format.rt;
- rd = inst.c0r_format.rd;
- sel = inst.c0r_format.sel;
-
- switch (inst.c0r_format.rs) {
- case mfc_op:
-#ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS
- cop0->stat[rd][sel]++;
-#endif
- /* Get reg */
- if ((rd == MIPS_CP0_COUNT) && (sel == 0)) {
- vcpu->arch.gprs[rt] =
- (s32)kvm_mips_read_count(vcpu);
- } else if ((rd == MIPS_CP0_ERRCTL) && (sel == 0)) {
- vcpu->arch.gprs[rt] = 0x0;
-#ifdef CONFIG_KVM_MIPS_DYN_TRANS
- kvm_mips_trans_mfc0(inst, opc, vcpu);
-#endif
- } else {
- vcpu->arch.gprs[rt] = (s32)cop0->reg[rd][sel];
-
-#ifdef CONFIG_KVM_MIPS_DYN_TRANS
- kvm_mips_trans_mfc0(inst, opc, vcpu);
-#endif
- }
-
- trace_kvm_hwr(vcpu, KVM_TRACE_MFC0,
- KVM_TRACE_COP0(rd, sel),
- vcpu->arch.gprs[rt]);
- break;
-
- case dmfc_op:
- vcpu->arch.gprs[rt] = cop0->reg[rd][sel];
-
- trace_kvm_hwr(vcpu, KVM_TRACE_DMFC0,
- KVM_TRACE_COP0(rd, sel),
- vcpu->arch.gprs[rt]);
- break;
-
- case mtc_op:
-#ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS
- cop0->stat[rd][sel]++;
-#endif
- trace_kvm_hwr(vcpu, KVM_TRACE_MTC0,
- KVM_TRACE_COP0(rd, sel),
- vcpu->arch.gprs[rt]);
-
- if ((rd == MIPS_CP0_TLB_INDEX)
- && (vcpu->arch.gprs[rt] >=
- KVM_MIPS_GUEST_TLB_SIZE)) {
- kvm_err("Invalid TLB Index: %ld",
- vcpu->arch.gprs[rt]);
- er = EMULATE_FAIL;
- break;
- }
- if ((rd == MIPS_CP0_PRID) && (sel == 1)) {
- /*
- * Preserve core number, and keep the exception
- * base in guest KSeg0.
- */
- kvm_change_c0_guest_ebase(cop0, 0x1ffff000,
- vcpu->arch.gprs[rt]);
- } else if (rd == MIPS_CP0_TLB_HI && sel == 0) {
- kvm_mips_change_entryhi(vcpu,
- vcpu->arch.gprs[rt]);
- }
- /* Are we writing to COUNT */
- else if ((rd == MIPS_CP0_COUNT) && (sel == 0)) {
- kvm_mips_write_count(vcpu, vcpu->arch.gprs[rt]);
- goto done;
- } else if ((rd == MIPS_CP0_COMPARE) && (sel == 0)) {
- /* If we are writing to COMPARE */
- /* Clear pending timer interrupt, if any */
- kvm_mips_write_compare(vcpu,
- vcpu->arch.gprs[rt],
- true);
- } else if ((rd == MIPS_CP0_STATUS) && (sel == 0)) {
- unsigned int old_val, val, change;
-
- old_val = kvm_read_c0_guest_status(cop0);
- val = vcpu->arch.gprs[rt];
- change = val ^ old_val;
-
- /* Make sure that the NMI bit is never set */
- val &= ~ST0_NMI;
-
- /*
- * Don't allow CU1 or FR to be set unless FPU
- * capability enabled and exists in guest
- * configuration.
- */
- if (!kvm_mips_guest_has_fpu(&vcpu->arch))
- val &= ~(ST0_CU1 | ST0_FR);
-
- /*
- * Also don't allow FR to be set if host doesn't
- * support it.
- */
- if (!(current_cpu_data.fpu_id & MIPS_FPIR_F64))
- val &= ~ST0_FR;
-
-
- /* Handle changes in FPU mode */
- preempt_disable();
-
- /*
- * FPU and Vector register state is made
- * UNPREDICTABLE by a change of FR, so don't
- * even bother saving it.
- */
- if (change & ST0_FR)
- kvm_drop_fpu(vcpu);
-
- /*
- * If MSA state is already live, it is undefined
- * how it interacts with FR=0 FPU state, and we
- * don't want to hit reserved instruction
- * exceptions trying to save the MSA state later
- * when CU=1 && FR=1, so play it safe and save
- * it first.
- */
- if (change & ST0_CU1 && !(val & ST0_FR) &&
- vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA)
- kvm_lose_fpu(vcpu);
-
- /*
- * Propagate CU1 (FPU enable) changes
- * immediately if the FPU context is already
- * loaded. When disabling we leave the context
- * loaded so it can be quickly enabled again in
- * the near future.
- */
- if (change & ST0_CU1 &&
- vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU)
- change_c0_status(ST0_CU1, val);
-
- preempt_enable();
-
- kvm_write_c0_guest_status(cop0, val);
-
-#ifdef CONFIG_KVM_MIPS_DYN_TRANS
- /*
- * If FPU present, we need CU1/FR bits to take
- * effect fairly soon.
- */
- if (!kvm_mips_guest_has_fpu(&vcpu->arch))
- kvm_mips_trans_mtc0(inst, opc, vcpu);
-#endif
- } else if ((rd == MIPS_CP0_CONFIG) && (sel == 5)) {
- unsigned int old_val, val, change, wrmask;
-
- old_val = kvm_read_c0_guest_config5(cop0);
- val = vcpu->arch.gprs[rt];
-
- /* Only a few bits are writable in Config5 */
- wrmask = kvm_mips_config5_wrmask(vcpu);
- change = (val ^ old_val) & wrmask;
- val = old_val ^ change;
-
-
- /* Handle changes in FPU/MSA modes */
- preempt_disable();
-
- /*
- * Propagate FRE changes immediately if the FPU
- * context is already loaded.
- */
- if (change & MIPS_CONF5_FRE &&
- vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU)
- change_c0_config5(MIPS_CONF5_FRE, val);
-
- /*
- * Propagate MSAEn changes immediately if the
- * MSA context is already loaded. When disabling
- * we leave the context loaded so it can be
- * quickly enabled again in the near future.
- */
- if (change & MIPS_CONF5_MSAEN &&
- vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA)
- change_c0_config5(MIPS_CONF5_MSAEN,
- val);
-
- preempt_enable();
-
- kvm_write_c0_guest_config5(cop0, val);
- } else if ((rd == MIPS_CP0_CAUSE) && (sel == 0)) {
- u32 old_cause, new_cause;
-
- old_cause = kvm_read_c0_guest_cause(cop0);
- new_cause = vcpu->arch.gprs[rt];
- /* Update R/W bits */
- kvm_change_c0_guest_cause(cop0, 0x08800300,
- new_cause);
- /* DC bit enabling/disabling timer? */
- if ((old_cause ^ new_cause) & CAUSEF_DC) {
- if (new_cause & CAUSEF_DC)
- kvm_mips_count_disable_cause(vcpu);
- else
- kvm_mips_count_enable_cause(vcpu);
- }
- } else if ((rd == MIPS_CP0_HWRENA) && (sel == 0)) {
- u32 mask = MIPS_HWRENA_CPUNUM |
- MIPS_HWRENA_SYNCISTEP |
- MIPS_HWRENA_CC |
- MIPS_HWRENA_CCRES;
-
- if (kvm_read_c0_guest_config3(cop0) &
- MIPS_CONF3_ULRI)
- mask |= MIPS_HWRENA_ULR;
- cop0->reg[rd][sel] = vcpu->arch.gprs[rt] & mask;
- } else {
- cop0->reg[rd][sel] = vcpu->arch.gprs[rt];
-#ifdef CONFIG_KVM_MIPS_DYN_TRANS
- kvm_mips_trans_mtc0(inst, opc, vcpu);
-#endif
- }
- break;
-
- case dmtc_op:
- kvm_err("!!!!!!![%#lx]dmtc_op: rt: %d, rd: %d, sel: %d!!!!!!\n",
- vcpu->arch.pc, rt, rd, sel);
- trace_kvm_hwr(vcpu, KVM_TRACE_DMTC0,
- KVM_TRACE_COP0(rd, sel),
- vcpu->arch.gprs[rt]);
- er = EMULATE_FAIL;
- break;
-
- case mfmc0_op:
-#ifdef KVM_MIPS_DEBUG_COP0_COUNTERS
- cop0->stat[MIPS_CP0_STATUS][0]++;
-#endif
- if (rt != 0)
- vcpu->arch.gprs[rt] =
- kvm_read_c0_guest_status(cop0);
- /* EI */
- if (inst.mfmc0_format.sc) {
- kvm_debug("[%#lx] mfmc0_op: EI\n",
- vcpu->arch.pc);
- kvm_set_c0_guest_status(cop0, ST0_IE);
- } else {
- kvm_debug("[%#lx] mfmc0_op: DI\n",
- vcpu->arch.pc);
- kvm_clear_c0_guest_status(cop0, ST0_IE);
- }
-
- break;
-
- case wrpgpr_op:
- {
- u32 css = cop0->reg[MIPS_CP0_STATUS][2] & 0xf;
- u32 pss =
- (cop0->reg[MIPS_CP0_STATUS][2] >> 6) & 0xf;
- /*
- * We don't support any shadow register sets, so
- * SRSCtl[PSS] == SRSCtl[CSS] = 0
- */
- if (css || pss) {
- er = EMULATE_FAIL;
- break;
- }
- kvm_debug("WRPGPR[%d][%d] = %#lx\n", pss, rd,
- vcpu->arch.gprs[rt]);
- vcpu->arch.gprs[rd] = vcpu->arch.gprs[rt];
- }
- break;
- default:
- kvm_err("[%#lx]MachEmulateCP0: unsupported COP0, copz: 0x%x\n",
- vcpu->arch.pc, inst.c0r_format.rs);
- er = EMULATE_FAIL;
- break;
- }
- }
-
-done:
- /* Rollback PC only if emulation was unsuccessful */
- if (er == EMULATE_FAIL)
- vcpu->arch.pc = curr_pc;
-
-dont_update_pc:
- /*
- * This is for special instructions whose emulation
- * updates the PC, so do not overwrite the PC under
- * any circumstances
- */
-
- return er;
-}
-
enum emulation_result kvm_mips_emulate_store(union mips_instruction inst,
u32 cause,
- struct kvm_run *run,
struct kvm_vcpu *vcpu)
{
+ int r;
enum emulation_result er;
u32 rt;
+ struct kvm_run *run = vcpu->run;
void *data = run->mmio.data;
+ unsigned int imme;
unsigned long curr_pc;
/*
@@ -1622,7 +994,7 @@ enum emulation_result kvm_mips_emulate_store(union mips_instruction inst,
goto out_fail;
switch (inst.i_format.opcode) {
-#if defined(CONFIG_64BIT) && defined(CONFIG_KVM_MIPS_VZ)
+#if defined(CONFIG_64BIT)
case sd_op:
run->mmio.len = 8;
*(u64 *)data = vcpu->arch.gprs[rt];
@@ -1660,15 +1032,231 @@ enum emulation_result kvm_mips_emulate_store(union mips_instruction inst,
vcpu->arch.gprs[rt], *(u8 *)data);
break;
+ case swl_op:
+ run->mmio.phys_addr = kvm_mips_callbacks->gva_to_gpa(
+ vcpu->arch.host_cp0_badvaddr) & (~0x3);
+ run->mmio.len = 4;
+ imme = vcpu->arch.host_cp0_badvaddr & 0x3;
+ switch (imme) {
+ case 0:
+ *(u32 *)data = ((*(u32 *)data) & 0xffffff00) |
+ (vcpu->arch.gprs[rt] >> 24);
+ break;
+ case 1:
+ *(u32 *)data = ((*(u32 *)data) & 0xffff0000) |
+ (vcpu->arch.gprs[rt] >> 16);
+ break;
+ case 2:
+ *(u32 *)data = ((*(u32 *)data) & 0xff000000) |
+ (vcpu->arch.gprs[rt] >> 8);
+ break;
+ case 3:
+ *(u32 *)data = vcpu->arch.gprs[rt];
+ break;
+ default:
+ break;
+ }
+
+ kvm_debug("[%#lx] OP_SWL: eaddr: %#lx, gpr: %#lx, data: %#x\n",
+ vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
+ vcpu->arch.gprs[rt], *(u32 *)data);
+ break;
+
+ case swr_op:
+ run->mmio.phys_addr = kvm_mips_callbacks->gva_to_gpa(
+ vcpu->arch.host_cp0_badvaddr) & (~0x3);
+ run->mmio.len = 4;
+ imme = vcpu->arch.host_cp0_badvaddr & 0x3;
+ switch (imme) {
+ case 0:
+ *(u32 *)data = vcpu->arch.gprs[rt];
+ break;
+ case 1:
+ *(u32 *)data = ((*(u32 *)data) & 0xff) |
+ (vcpu->arch.gprs[rt] << 8);
+ break;
+ case 2:
+ *(u32 *)data = ((*(u32 *)data) & 0xffff) |
+ (vcpu->arch.gprs[rt] << 16);
+ break;
+ case 3:
+ *(u32 *)data = ((*(u32 *)data) & 0xffffff) |
+ (vcpu->arch.gprs[rt] << 24);
+ break;
+ default:
+ break;
+ }
+
+ kvm_debug("[%#lx] OP_SWR: eaddr: %#lx, gpr: %#lx, data: %#x\n",
+ vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
+ vcpu->arch.gprs[rt], *(u32 *)data);
+ break;
+
+#if defined(CONFIG_64BIT)
+ case sdl_op:
+ run->mmio.phys_addr = kvm_mips_callbacks->gva_to_gpa(
+ vcpu->arch.host_cp0_badvaddr) & (~0x7);
+
+ run->mmio.len = 8;
+ imme = vcpu->arch.host_cp0_badvaddr & 0x7;
+ switch (imme) {
+ case 0:
+ *(u64 *)data = ((*(u64 *)data) & 0xffffffffffffff00) |
+ ((vcpu->arch.gprs[rt] >> 56) & 0xff);
+ break;
+ case 1:
+ *(u64 *)data = ((*(u64 *)data) & 0xffffffffffff0000) |
+ ((vcpu->arch.gprs[rt] >> 48) & 0xffff);
+ break;
+ case 2:
+ *(u64 *)data = ((*(u64 *)data) & 0xffffffffff000000) |
+ ((vcpu->arch.gprs[rt] >> 40) & 0xffffff);
+ break;
+ case 3:
+ *(u64 *)data = ((*(u64 *)data) & 0xffffffff00000000) |
+ ((vcpu->arch.gprs[rt] >> 32) & 0xffffffff);
+ break;
+ case 4:
+ *(u64 *)data = ((*(u64 *)data) & 0xffffff0000000000) |
+ ((vcpu->arch.gprs[rt] >> 24) & 0xffffffffff);
+ break;
+ case 5:
+ *(u64 *)data = ((*(u64 *)data) & 0xffff000000000000) |
+ ((vcpu->arch.gprs[rt] >> 16) & 0xffffffffffff);
+ break;
+ case 6:
+ *(u64 *)data = ((*(u64 *)data) & 0xff00000000000000) |
+ ((vcpu->arch.gprs[rt] >> 8) & 0xffffffffffffff);
+ break;
+ case 7:
+ *(u64 *)data = vcpu->arch.gprs[rt];
+ break;
+ default:
+ break;
+ }
+
+ kvm_debug("[%#lx] OP_SDL: eaddr: %#lx, gpr: %#lx, data: %llx\n",
+ vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
+ vcpu->arch.gprs[rt], *(u64 *)data);
+ break;
+
+ case sdr_op:
+ run->mmio.phys_addr = kvm_mips_callbacks->gva_to_gpa(
+ vcpu->arch.host_cp0_badvaddr) & (~0x7);
+
+ run->mmio.len = 8;
+ imme = vcpu->arch.host_cp0_badvaddr & 0x7;
+ switch (imme) {
+ case 0:
+ *(u64 *)data = vcpu->arch.gprs[rt];
+ break;
+ case 1:
+ *(u64 *)data = ((*(u64 *)data) & 0xff) |
+ (vcpu->arch.gprs[rt] << 8);
+ break;
+ case 2:
+ *(u64 *)data = ((*(u64 *)data) & 0xffff) |
+ (vcpu->arch.gprs[rt] << 16);
+ break;
+ case 3:
+ *(u64 *)data = ((*(u64 *)data) & 0xffffff) |
+ (vcpu->arch.gprs[rt] << 24);
+ break;
+ case 4:
+ *(u64 *)data = ((*(u64 *)data) & 0xffffffff) |
+ (vcpu->arch.gprs[rt] << 32);
+ break;
+ case 5:
+ *(u64 *)data = ((*(u64 *)data) & 0xffffffffff) |
+ (vcpu->arch.gprs[rt] << 40);
+ break;
+ case 6:
+ *(u64 *)data = ((*(u64 *)data) & 0xffffffffffff) |
+ (vcpu->arch.gprs[rt] << 48);
+ break;
+ case 7:
+ *(u64 *)data = ((*(u64 *)data) & 0xffffffffffffff) |
+ (vcpu->arch.gprs[rt] << 56);
+ break;
+ default:
+ break;
+ }
+
+ kvm_debug("[%#lx] OP_SDR: eaddr: %#lx, gpr: %#lx, data: %llx\n",
+ vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
+ vcpu->arch.gprs[rt], *(u64 *)data);
+ break;
+#endif
+
+#ifdef CONFIG_CPU_LOONGSON64
+ case sdc2_op:
+ rt = inst.loongson3_lsdc2_format.rt;
+ switch (inst.loongson3_lsdc2_format.opcode1) {
+ /*
+ * Loongson-3 overridden sdc2 instructions.
+ * opcode1 instruction
+ * 0x0 gssbx: store 1 bytes from GPR
+ * 0x1 gsshx: store 2 bytes from GPR
+ * 0x2 gsswx: store 4 bytes from GPR
+ * 0x3 gssdx: store 8 bytes from GPR
+ */
+ case 0x0:
+ run->mmio.len = 1;
+ *(u8 *)data = vcpu->arch.gprs[rt];
+
+ kvm_debug("[%#lx] OP_GSSBX: eaddr: %#lx, gpr: %#lx, data: %#x\n",
+ vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
+ vcpu->arch.gprs[rt], *(u8 *)data);
+ break;
+ case 0x1:
+ run->mmio.len = 2;
+ *(u16 *)data = vcpu->arch.gprs[rt];
+
+ kvm_debug("[%#lx] OP_GSSSHX: eaddr: %#lx, gpr: %#lx, data: %#x\n",
+ vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
+ vcpu->arch.gprs[rt], *(u16 *)data);
+ break;
+ case 0x2:
+ run->mmio.len = 4;
+ *(u32 *)data = vcpu->arch.gprs[rt];
+
+ kvm_debug("[%#lx] OP_GSSWX: eaddr: %#lx, gpr: %#lx, data: %#x\n",
+ vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
+ vcpu->arch.gprs[rt], *(u32 *)data);
+ break;
+ case 0x3:
+ run->mmio.len = 8;
+ *(u64 *)data = vcpu->arch.gprs[rt];
+
+ kvm_debug("[%#lx] OP_GSSDX: eaddr: %#lx, gpr: %#lx, data: %#llx\n",
+ vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
+ vcpu->arch.gprs[rt], *(u64 *)data);
+ break;
+ default:
+ kvm_err("Godson Extended GS-Store not yet supported (inst=0x%08x)\n",
+ inst.word);
+ break;
+ }
+ break;
+#endif
default:
kvm_err("Store not yet supported (inst=0x%08x)\n",
inst.word);
goto out_fail;
}
- run->mmio.is_write = 1;
vcpu->mmio_needed = 1;
+ run->mmio.is_write = 1;
vcpu->mmio_is_write = 1;
+
+ r = kvm_io_bus_write(vcpu, KVM_MMIO_BUS,
+ run->mmio.phys_addr, run->mmio.len, data);
+
+ if (!r) {
+ vcpu->mmio_needed = 0;
+ return EMULATE_DONE;
+ }
+
return EMULATE_DO_MMIO;
out_fail:
@@ -1678,12 +1266,14 @@ out_fail:
}
enum emulation_result kvm_mips_emulate_load(union mips_instruction inst,
- u32 cause, struct kvm_run *run,
- struct kvm_vcpu *vcpu)
+ u32 cause, struct kvm_vcpu *vcpu)
{
+ struct kvm_run *run = vcpu->run;
+ int r;
enum emulation_result er;
unsigned long curr_pc;
u32 op, rt;
+ unsigned int imme;
rt = inst.i_format.rt;
op = inst.i_format.opcode;
@@ -1709,14 +1299,14 @@ enum emulation_result kvm_mips_emulate_load(union mips_instruction inst,
vcpu->mmio_needed = 2; /* signed */
switch (op) {
-#if defined(CONFIG_64BIT) && defined(CONFIG_KVM_MIPS_VZ)
+#if defined(CONFIG_64BIT)
case ld_op:
run->mmio.len = 8;
break;
case lwu_op:
vcpu->mmio_needed = 1; /* unsigned */
- /* fall through */
+ fallthrough;
#endif
case lw_op:
run->mmio.len = 4;
@@ -1724,859 +1314,201 @@ enum emulation_result kvm_mips_emulate_load(union mips_instruction inst,
case lhu_op:
vcpu->mmio_needed = 1; /* unsigned */
- /* fall through */
+ fallthrough;
case lh_op:
run->mmio.len = 2;
break;
case lbu_op:
vcpu->mmio_needed = 1; /* unsigned */
- /* fall through */
+ fallthrough;
case lb_op:
run->mmio.len = 1;
break;
- default:
- kvm_err("Load not yet supported (inst=0x%08x)\n",
- inst.word);
- vcpu->mmio_needed = 0;
- return EMULATE_FAIL;
- }
-
- run->mmio.is_write = 0;
- vcpu->mmio_is_write = 0;
- return EMULATE_DO_MMIO;
-}
-
-#ifndef CONFIG_KVM_MIPS_VZ
-static enum emulation_result kvm_mips_guest_cache_op(int (*fn)(unsigned long),
- unsigned long curr_pc,
- unsigned long addr,
- struct kvm_run *run,
- struct kvm_vcpu *vcpu,
- u32 cause)
-{
- int err;
+ case lwl_op:
+ run->mmio.phys_addr = kvm_mips_callbacks->gva_to_gpa(
+ vcpu->arch.host_cp0_badvaddr) & (~0x3);
- for (;;) {
- /* Carefully attempt the cache operation */
- kvm_trap_emul_gva_lockless_begin(vcpu);
- err = fn(addr);
- kvm_trap_emul_gva_lockless_end(vcpu);
-
- if (likely(!err))
- return EMULATE_DONE;
-
- /*
- * Try to handle the fault and retry, maybe we just raced with a
- * GVA invalidation.
- */
- switch (kvm_trap_emul_gva_fault(vcpu, addr, false)) {
- case KVM_MIPS_GVA:
- case KVM_MIPS_GPA:
- /* bad virtual or physical address */
- return EMULATE_FAIL;
- case KVM_MIPS_TLB:
- /* no matching guest TLB */
- vcpu->arch.host_cp0_badvaddr = addr;
- vcpu->arch.pc = curr_pc;
- kvm_mips_emulate_tlbmiss_ld(cause, NULL, run, vcpu);
- return EMULATE_EXCEPT;
- case KVM_MIPS_TLBINV:
- /* invalid matching guest TLB */
- vcpu->arch.host_cp0_badvaddr = addr;
- vcpu->arch.pc = curr_pc;
- kvm_mips_emulate_tlbinv_ld(cause, NULL, run, vcpu);
- return EMULATE_EXCEPT;
+ run->mmio.len = 4;
+ imme = vcpu->arch.host_cp0_badvaddr & 0x3;
+ switch (imme) {
+ case 0:
+ vcpu->mmio_needed = 3; /* 1 byte */
+ break;
+ case 1:
+ vcpu->mmio_needed = 4; /* 2 bytes */
+ break;
+ case 2:
+ vcpu->mmio_needed = 5; /* 3 bytes */
+ break;
+ case 3:
+ vcpu->mmio_needed = 6; /* 4 bytes */
+ break;
default:
break;
- };
- }
-}
+ }
+ break;
-enum emulation_result kvm_mips_emulate_cache(union mips_instruction inst,
- u32 *opc, u32 cause,
- struct kvm_run *run,
- struct kvm_vcpu *vcpu)
-{
- enum emulation_result er = EMULATE_DONE;
- u32 cache, op_inst, op, base;
- s16 offset;
- struct kvm_vcpu_arch *arch = &vcpu->arch;
- unsigned long va;
- unsigned long curr_pc;
+ case lwr_op:
+ run->mmio.phys_addr = kvm_mips_callbacks->gva_to_gpa(
+ vcpu->arch.host_cp0_badvaddr) & (~0x3);
- /*
- * Update PC and hold onto current PC in case there is
- * an error and we want to rollback the PC
- */
- curr_pc = vcpu->arch.pc;
- er = update_pc(vcpu, cause);
- if (er == EMULATE_FAIL)
- return er;
+ run->mmio.len = 4;
+ imme = vcpu->arch.host_cp0_badvaddr & 0x3;
+ switch (imme) {
+ case 0:
+ vcpu->mmio_needed = 7; /* 4 bytes */
+ break;
+ case 1:
+ vcpu->mmio_needed = 8; /* 3 bytes */
+ break;
+ case 2:
+ vcpu->mmio_needed = 9; /* 2 bytes */
+ break;
+ case 3:
+ vcpu->mmio_needed = 10; /* 1 byte */
+ break;
+ default:
+ break;
+ }
+ break;
- base = inst.i_format.rs;
- op_inst = inst.i_format.rt;
- if (cpu_has_mips_r6)
- offset = inst.spec3_format.simmediate;
- else
- offset = inst.i_format.simmediate;
- cache = op_inst & CacheOp_Cache;
- op = op_inst & CacheOp_Op;
+#if defined(CONFIG_64BIT)
+ case ldl_op:
+ run->mmio.phys_addr = kvm_mips_callbacks->gva_to_gpa(
+ vcpu->arch.host_cp0_badvaddr) & (~0x7);
- va = arch->gprs[base] + offset;
+ run->mmio.len = 8;
+ imme = vcpu->arch.host_cp0_badvaddr & 0x7;
+ switch (imme) {
+ case 0:
+ vcpu->mmio_needed = 11; /* 1 byte */
+ break;
+ case 1:
+ vcpu->mmio_needed = 12; /* 2 bytes */
+ break;
+ case 2:
+ vcpu->mmio_needed = 13; /* 3 bytes */
+ break;
+ case 3:
+ vcpu->mmio_needed = 14; /* 4 bytes */
+ break;
+ case 4:
+ vcpu->mmio_needed = 15; /* 5 bytes */
+ break;
+ case 5:
+ vcpu->mmio_needed = 16; /* 6 bytes */
+ break;
+ case 6:
+ vcpu->mmio_needed = 17; /* 7 bytes */
+ break;
+ case 7:
+ vcpu->mmio_needed = 18; /* 8 bytes */
+ break;
+ default:
+ break;
+ }
+ break;
- kvm_debug("CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
- cache, op, base, arch->gprs[base], offset);
+ case ldr_op:
+ run->mmio.phys_addr = kvm_mips_callbacks->gva_to_gpa(
+ vcpu->arch.host_cp0_badvaddr) & (~0x7);
- /*
- * Treat INDEX_INV as a nop, basically issued by Linux on startup to
- * invalidate the caches entirely by stepping through all the
- * ways/indexes
- */
- if (op == Index_Writeback_Inv) {
- kvm_debug("@ %#lx/%#lx CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
- vcpu->arch.pc, vcpu->arch.gprs[31], cache, op, base,
- arch->gprs[base], offset);
-
- if (cache == Cache_D) {
-#ifdef CONFIG_CPU_R4K_CACHE_TLB
- r4k_blast_dcache();
-#else
- switch (boot_cpu_type()) {
- case CPU_CAVIUM_OCTEON3:
- /* locally flush icache */
- local_flush_icache_range(0, 0);
- break;
- default:
- __flush_cache_all();
- break;
- }
-#endif
- } else if (cache == Cache_I) {
-#ifdef CONFIG_CPU_R4K_CACHE_TLB
- r4k_blast_icache();
-#else
- switch (boot_cpu_type()) {
- case CPU_CAVIUM_OCTEON3:
- /* locally flush icache */
- local_flush_icache_range(0, 0);
- break;
- default:
- flush_icache_all();
- break;
- }
-#endif
- } else {
- kvm_err("%s: unsupported CACHE INDEX operation\n",
- __func__);
- return EMULATE_FAIL;
+ run->mmio.len = 8;
+ imme = vcpu->arch.host_cp0_badvaddr & 0x7;
+ switch (imme) {
+ case 0:
+ vcpu->mmio_needed = 19; /* 8 bytes */
+ break;
+ case 1:
+ vcpu->mmio_needed = 20; /* 7 bytes */
+ break;
+ case 2:
+ vcpu->mmio_needed = 21; /* 6 bytes */
+ break;
+ case 3:
+ vcpu->mmio_needed = 22; /* 5 bytes */
+ break;
+ case 4:
+ vcpu->mmio_needed = 23; /* 4 bytes */
+ break;
+ case 5:
+ vcpu->mmio_needed = 24; /* 3 bytes */
+ break;
+ case 6:
+ vcpu->mmio_needed = 25; /* 2 bytes */
+ break;
+ case 7:
+ vcpu->mmio_needed = 26; /* 1 byte */
+ break;
+ default:
+ break;
}
-
-#ifdef CONFIG_KVM_MIPS_DYN_TRANS
- kvm_mips_trans_cache_index(inst, opc, vcpu);
+ break;
#endif
- goto done;
- }
- /* XXXKYMA: Only a subset of cache ops are supported, used by Linux */
- if (op_inst == Hit_Writeback_Inv_D || op_inst == Hit_Invalidate_D) {
- /*
- * Perform the dcache part of icache synchronisation on the
- * guest's behalf.
- */
- er = kvm_mips_guest_cache_op(protected_writeback_dcache_line,
- curr_pc, va, run, vcpu, cause);
- if (er != EMULATE_DONE)
- goto done;
-#ifdef CONFIG_KVM_MIPS_DYN_TRANS
+#ifdef CONFIG_CPU_LOONGSON64
+ case ldc2_op:
+ rt = inst.loongson3_lsdc2_format.rt;
+ switch (inst.loongson3_lsdc2_format.opcode1) {
/*
- * Replace the CACHE instruction, with a SYNCI, not the same,
- * but avoids a trap
+ * Loongson-3 overridden ldc2 instructions.
+ * opcode1 instruction
+ * 0x0 gslbx: store 1 bytes from GPR
+ * 0x1 gslhx: store 2 bytes from GPR
+ * 0x2 gslwx: store 4 bytes from GPR
+ * 0x3 gsldx: store 8 bytes from GPR
*/
- kvm_mips_trans_cache_va(inst, opc, vcpu);
-#endif
- } else if (op_inst == Hit_Invalidate_I) {
- /* Perform the icache synchronisation on the guest's behalf */
- er = kvm_mips_guest_cache_op(protected_writeback_dcache_line,
- curr_pc, va, run, vcpu, cause);
- if (er != EMULATE_DONE)
- goto done;
- er = kvm_mips_guest_cache_op(protected_flush_icache_line,
- curr_pc, va, run, vcpu, cause);
- if (er != EMULATE_DONE)
- goto done;
-
-#ifdef CONFIG_KVM_MIPS_DYN_TRANS
- /* Replace the CACHE instruction, with a SYNCI */
- kvm_mips_trans_cache_va(inst, opc, vcpu);
-#endif
- } else {
- kvm_err("NO-OP CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
- cache, op, base, arch->gprs[base], offset);
- er = EMULATE_FAIL;
- }
-
-done:
- /* Rollback PC only if emulation was unsuccessful */
- if (er == EMULATE_FAIL)
- vcpu->arch.pc = curr_pc;
- /* Guest exception needs guest to resume */
- if (er == EMULATE_EXCEPT)
- er = EMULATE_DONE;
-
- return er;
-}
-
-enum emulation_result kvm_mips_emulate_inst(u32 cause, u32 *opc,
- struct kvm_run *run,
- struct kvm_vcpu *vcpu)
-{
- union mips_instruction inst;
- enum emulation_result er = EMULATE_DONE;
- int err;
-
- /* Fetch the instruction. */
- if (cause & CAUSEF_BD)
- opc += 1;
- err = kvm_get_badinstr(opc, vcpu, &inst.word);
- if (err)
- return EMULATE_FAIL;
-
- switch (inst.r_format.opcode) {
- case cop0_op:
- er = kvm_mips_emulate_CP0(inst, opc, cause, run, vcpu);
- break;
-
-#ifndef CONFIG_CPU_MIPSR6
- case cache_op:
- ++vcpu->stat.cache_exits;
- trace_kvm_exit(vcpu, KVM_TRACE_EXIT_CACHE);
- er = kvm_mips_emulate_cache(inst, opc, cause, run, vcpu);
- break;
-#else
- case spec3_op:
- switch (inst.spec3_format.func) {
- case cache6_op:
- ++vcpu->stat.cache_exits;
- trace_kvm_exit(vcpu, KVM_TRACE_EXIT_CACHE);
- er = kvm_mips_emulate_cache(inst, opc, cause, run,
- vcpu);
+ case 0x0:
+ run->mmio.len = 1;
+ vcpu->mmio_needed = 27; /* signed */
+ break;
+ case 0x1:
+ run->mmio.len = 2;
+ vcpu->mmio_needed = 28; /* signed */
+ break;
+ case 0x2:
+ run->mmio.len = 4;
+ vcpu->mmio_needed = 29; /* signed */
+ break;
+ case 0x3:
+ run->mmio.len = 8;
+ vcpu->mmio_needed = 30; /* signed */
break;
default:
- goto unknown;
- };
+ kvm_err("Godson Extended GS-Load for float not yet supported (inst=0x%08x)\n",
+ inst.word);
+ break;
+ }
break;
-unknown:
#endif
default:
- kvm_err("Instruction emulation not supported (%p/%#x)\n", opc,
+ kvm_err("Load not yet supported (inst=0x%08x)\n",
inst.word);
- kvm_arch_vcpu_dump_regs(vcpu);
- er = EMULATE_FAIL;
- break;
- }
-
- return er;
-}
-#endif /* CONFIG_KVM_MIPS_VZ */
-
-/**
- * kvm_mips_guest_exception_base() - Find guest exception vector base address.
- *
- * Returns: The base address of the current guest exception vector, taking
- * both Guest.CP0_Status.BEV and Guest.CP0_EBase into account.
- */
-long kvm_mips_guest_exception_base(struct kvm_vcpu *vcpu)
-{
- struct mips_coproc *cop0 = vcpu->arch.cop0;
-
- if (kvm_read_c0_guest_status(cop0) & ST0_BEV)
- return KVM_GUEST_CKSEG1ADDR(0x1fc00200);
- else
- return kvm_read_c0_guest_ebase(cop0) & MIPS_EBASE_BASE;
-}
-
-enum emulation_result kvm_mips_emulate_syscall(u32 cause,
- u32 *opc,
- struct kvm_run *run,
- struct kvm_vcpu *vcpu)
-{
- struct mips_coproc *cop0 = vcpu->arch.cop0;
- struct kvm_vcpu_arch *arch = &vcpu->arch;
- enum emulation_result er = EMULATE_DONE;
-
- if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
- /* save old pc */
- kvm_write_c0_guest_epc(cop0, arch->pc);
- kvm_set_c0_guest_status(cop0, ST0_EXL);
-
- if (cause & CAUSEF_BD)
- kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
- else
- kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
-
- kvm_debug("Delivering SYSCALL @ pc %#lx\n", arch->pc);
-
- kvm_change_c0_guest_cause(cop0, (0xff),
- (EXCCODE_SYS << CAUSEB_EXCCODE));
-
- /* Set PC to the exception entry point */
- arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180;
-
- } else {
- kvm_err("Trying to deliver SYSCALL when EXL is already set\n");
- er = EMULATE_FAIL;
- }
-
- return er;
-}
-
-enum emulation_result kvm_mips_emulate_tlbmiss_ld(u32 cause,
- u32 *opc,
- struct kvm_run *run,
- struct kvm_vcpu *vcpu)
-{
- struct mips_coproc *cop0 = vcpu->arch.cop0;
- struct kvm_vcpu_arch *arch = &vcpu->arch;
- unsigned long entryhi = (vcpu->arch. host_cp0_badvaddr & VPN2_MASK) |
- (kvm_read_c0_guest_entryhi(cop0) & KVM_ENTRYHI_ASID);
-
- if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
- /* save old pc */
- kvm_write_c0_guest_epc(cop0, arch->pc);
- kvm_set_c0_guest_status(cop0, ST0_EXL);
-
- if (cause & CAUSEF_BD)
- kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
- else
- kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
-
- kvm_debug("[EXL == 0] delivering TLB MISS @ pc %#lx\n",
- arch->pc);
-
- /* set pc to the exception entry point */
- arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x0;
-
- } else {
- kvm_debug("[EXL == 1] delivering TLB MISS @ pc %#lx\n",
- arch->pc);
-
- arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180;
- }
-
- kvm_change_c0_guest_cause(cop0, (0xff),
- (EXCCODE_TLBL << CAUSEB_EXCCODE));
-
- /* setup badvaddr, context and entryhi registers for the guest */
- kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
- /* XXXKYMA: is the context register used by linux??? */
- kvm_write_c0_guest_entryhi(cop0, entryhi);
-
- return EMULATE_DONE;
-}
-
-enum emulation_result kvm_mips_emulate_tlbinv_ld(u32 cause,
- u32 *opc,
- struct kvm_run *run,
- struct kvm_vcpu *vcpu)
-{
- struct mips_coproc *cop0 = vcpu->arch.cop0;
- struct kvm_vcpu_arch *arch = &vcpu->arch;
- unsigned long entryhi =
- (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
- (kvm_read_c0_guest_entryhi(cop0) & KVM_ENTRYHI_ASID);
-
- if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
- /* save old pc */
- kvm_write_c0_guest_epc(cop0, arch->pc);
- kvm_set_c0_guest_status(cop0, ST0_EXL);
-
- if (cause & CAUSEF_BD)
- kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
- else
- kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
-
- kvm_debug("[EXL == 0] delivering TLB INV @ pc %#lx\n",
- arch->pc);
- } else {
- kvm_debug("[EXL == 1] delivering TLB MISS @ pc %#lx\n",
- arch->pc);
- }
-
- /* set pc to the exception entry point */
- arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180;
-
- kvm_change_c0_guest_cause(cop0, (0xff),
- (EXCCODE_TLBL << CAUSEB_EXCCODE));
-
- /* setup badvaddr, context and entryhi registers for the guest */
- kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
- /* XXXKYMA: is the context register used by linux??? */
- kvm_write_c0_guest_entryhi(cop0, entryhi);
-
- return EMULATE_DONE;
-}
-
-enum emulation_result kvm_mips_emulate_tlbmiss_st(u32 cause,
- u32 *opc,
- struct kvm_run *run,
- struct kvm_vcpu *vcpu)
-{
- struct mips_coproc *cop0 = vcpu->arch.cop0;
- struct kvm_vcpu_arch *arch = &vcpu->arch;
- unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
- (kvm_read_c0_guest_entryhi(cop0) & KVM_ENTRYHI_ASID);
-
- if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
- /* save old pc */
- kvm_write_c0_guest_epc(cop0, arch->pc);
- kvm_set_c0_guest_status(cop0, ST0_EXL);
-
- if (cause & CAUSEF_BD)
- kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
- else
- kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
-
- kvm_debug("[EXL == 0] Delivering TLB MISS @ pc %#lx\n",
- arch->pc);
-
- /* Set PC to the exception entry point */
- arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x0;
- } else {
- kvm_debug("[EXL == 1] Delivering TLB MISS @ pc %#lx\n",
- arch->pc);
- arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180;
- }
-
- kvm_change_c0_guest_cause(cop0, (0xff),
- (EXCCODE_TLBS << CAUSEB_EXCCODE));
-
- /* setup badvaddr, context and entryhi registers for the guest */
- kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
- /* XXXKYMA: is the context register used by linux??? */
- kvm_write_c0_guest_entryhi(cop0, entryhi);
-
- return EMULATE_DONE;
-}
-
-enum emulation_result kvm_mips_emulate_tlbinv_st(u32 cause,
- u32 *opc,
- struct kvm_run *run,
- struct kvm_vcpu *vcpu)
-{
- struct mips_coproc *cop0 = vcpu->arch.cop0;
- struct kvm_vcpu_arch *arch = &vcpu->arch;
- unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
- (kvm_read_c0_guest_entryhi(cop0) & KVM_ENTRYHI_ASID);
-
- if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
- /* save old pc */
- kvm_write_c0_guest_epc(cop0, arch->pc);
- kvm_set_c0_guest_status(cop0, ST0_EXL);
-
- if (cause & CAUSEF_BD)
- kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
- else
- kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
-
- kvm_debug("[EXL == 0] Delivering TLB MISS @ pc %#lx\n",
- arch->pc);
- } else {
- kvm_debug("[EXL == 1] Delivering TLB MISS @ pc %#lx\n",
- arch->pc);
- }
-
- /* Set PC to the exception entry point */
- arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180;
-
- kvm_change_c0_guest_cause(cop0, (0xff),
- (EXCCODE_TLBS << CAUSEB_EXCCODE));
-
- /* setup badvaddr, context and entryhi registers for the guest */
- kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
- /* XXXKYMA: is the context register used by linux??? */
- kvm_write_c0_guest_entryhi(cop0, entryhi);
-
- return EMULATE_DONE;
-}
-
-enum emulation_result kvm_mips_emulate_tlbmod(u32 cause,
- u32 *opc,
- struct kvm_run *run,
- struct kvm_vcpu *vcpu)
-{
- struct mips_coproc *cop0 = vcpu->arch.cop0;
- unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
- (kvm_read_c0_guest_entryhi(cop0) & KVM_ENTRYHI_ASID);
- struct kvm_vcpu_arch *arch = &vcpu->arch;
-
- if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
- /* save old pc */
- kvm_write_c0_guest_epc(cop0, arch->pc);
- kvm_set_c0_guest_status(cop0, ST0_EXL);
-
- if (cause & CAUSEF_BD)
- kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
- else
- kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
-
- kvm_debug("[EXL == 0] Delivering TLB MOD @ pc %#lx\n",
- arch->pc);
- } else {
- kvm_debug("[EXL == 1] Delivering TLB MOD @ pc %#lx\n",
- arch->pc);
- }
-
- arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180;
-
- kvm_change_c0_guest_cause(cop0, (0xff),
- (EXCCODE_MOD << CAUSEB_EXCCODE));
-
- /* setup badvaddr, context and entryhi registers for the guest */
- kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
- /* XXXKYMA: is the context register used by linux??? */
- kvm_write_c0_guest_entryhi(cop0, entryhi);
-
- return EMULATE_DONE;
-}
-
-enum emulation_result kvm_mips_emulate_fpu_exc(u32 cause,
- u32 *opc,
- struct kvm_run *run,
- struct kvm_vcpu *vcpu)
-{
- struct mips_coproc *cop0 = vcpu->arch.cop0;
- struct kvm_vcpu_arch *arch = &vcpu->arch;
-
- if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
- /* save old pc */
- kvm_write_c0_guest_epc(cop0, arch->pc);
- kvm_set_c0_guest_status(cop0, ST0_EXL);
-
- if (cause & CAUSEF_BD)
- kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
- else
- kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
-
- }
-
- arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180;
-
- kvm_change_c0_guest_cause(cop0, (0xff),
- (EXCCODE_CPU << CAUSEB_EXCCODE));
- kvm_change_c0_guest_cause(cop0, (CAUSEF_CE), (0x1 << CAUSEB_CE));
-
- return EMULATE_DONE;
-}
-
-enum emulation_result kvm_mips_emulate_ri_exc(u32 cause,
- u32 *opc,
- struct kvm_run *run,
- struct kvm_vcpu *vcpu)
-{
- struct mips_coproc *cop0 = vcpu->arch.cop0;
- struct kvm_vcpu_arch *arch = &vcpu->arch;
- enum emulation_result er = EMULATE_DONE;
-
- if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
- /* save old pc */
- kvm_write_c0_guest_epc(cop0, arch->pc);
- kvm_set_c0_guest_status(cop0, ST0_EXL);
-
- if (cause & CAUSEF_BD)
- kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
- else
- kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
-
- kvm_debug("Delivering RI @ pc %#lx\n", arch->pc);
-
- kvm_change_c0_guest_cause(cop0, (0xff),
- (EXCCODE_RI << CAUSEB_EXCCODE));
-
- /* Set PC to the exception entry point */
- arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180;
-
- } else {
- kvm_err("Trying to deliver RI when EXL is already set\n");
- er = EMULATE_FAIL;
- }
-
- return er;
-}
-
-enum emulation_result kvm_mips_emulate_bp_exc(u32 cause,
- u32 *opc,
- struct kvm_run *run,
- struct kvm_vcpu *vcpu)
-{
- struct mips_coproc *cop0 = vcpu->arch.cop0;
- struct kvm_vcpu_arch *arch = &vcpu->arch;
- enum emulation_result er = EMULATE_DONE;
-
- if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
- /* save old pc */
- kvm_write_c0_guest_epc(cop0, arch->pc);
- kvm_set_c0_guest_status(cop0, ST0_EXL);
-
- if (cause & CAUSEF_BD)
- kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
- else
- kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
-
- kvm_debug("Delivering BP @ pc %#lx\n", arch->pc);
-
- kvm_change_c0_guest_cause(cop0, (0xff),
- (EXCCODE_BP << CAUSEB_EXCCODE));
-
- /* Set PC to the exception entry point */
- arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180;
-
- } else {
- kvm_err("Trying to deliver BP when EXL is already set\n");
- er = EMULATE_FAIL;
- }
-
- return er;
-}
-
-enum emulation_result kvm_mips_emulate_trap_exc(u32 cause,
- u32 *opc,
- struct kvm_run *run,
- struct kvm_vcpu *vcpu)
-{
- struct mips_coproc *cop0 = vcpu->arch.cop0;
- struct kvm_vcpu_arch *arch = &vcpu->arch;
- enum emulation_result er = EMULATE_DONE;
-
- if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
- /* save old pc */
- kvm_write_c0_guest_epc(cop0, arch->pc);
- kvm_set_c0_guest_status(cop0, ST0_EXL);
-
- if (cause & CAUSEF_BD)
- kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
- else
- kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
-
- kvm_debug("Delivering TRAP @ pc %#lx\n", arch->pc);
-
- kvm_change_c0_guest_cause(cop0, (0xff),
- (EXCCODE_TR << CAUSEB_EXCCODE));
-
- /* Set PC to the exception entry point */
- arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180;
-
- } else {
- kvm_err("Trying to deliver TRAP when EXL is already set\n");
- er = EMULATE_FAIL;
- }
-
- return er;
-}
-
-enum emulation_result kvm_mips_emulate_msafpe_exc(u32 cause,
- u32 *opc,
- struct kvm_run *run,
- struct kvm_vcpu *vcpu)
-{
- struct mips_coproc *cop0 = vcpu->arch.cop0;
- struct kvm_vcpu_arch *arch = &vcpu->arch;
- enum emulation_result er = EMULATE_DONE;
-
- if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
- /* save old pc */
- kvm_write_c0_guest_epc(cop0, arch->pc);
- kvm_set_c0_guest_status(cop0, ST0_EXL);
-
- if (cause & CAUSEF_BD)
- kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
- else
- kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
-
- kvm_debug("Delivering MSAFPE @ pc %#lx\n", arch->pc);
-
- kvm_change_c0_guest_cause(cop0, (0xff),
- (EXCCODE_MSAFPE << CAUSEB_EXCCODE));
-
- /* Set PC to the exception entry point */
- arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180;
-
- } else {
- kvm_err("Trying to deliver MSAFPE when EXL is already set\n");
- er = EMULATE_FAIL;
- }
-
- return er;
-}
-
-enum emulation_result kvm_mips_emulate_fpe_exc(u32 cause,
- u32 *opc,
- struct kvm_run *run,
- struct kvm_vcpu *vcpu)
-{
- struct mips_coproc *cop0 = vcpu->arch.cop0;
- struct kvm_vcpu_arch *arch = &vcpu->arch;
- enum emulation_result er = EMULATE_DONE;
-
- if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
- /* save old pc */
- kvm_write_c0_guest_epc(cop0, arch->pc);
- kvm_set_c0_guest_status(cop0, ST0_EXL);
-
- if (cause & CAUSEF_BD)
- kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
- else
- kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
-
- kvm_debug("Delivering FPE @ pc %#lx\n", arch->pc);
-
- kvm_change_c0_guest_cause(cop0, (0xff),
- (EXCCODE_FPE << CAUSEB_EXCCODE));
-
- /* Set PC to the exception entry point */
- arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180;
-
- } else {
- kvm_err("Trying to deliver FPE when EXL is already set\n");
- er = EMULATE_FAIL;
- }
-
- return er;
-}
-
-enum emulation_result kvm_mips_emulate_msadis_exc(u32 cause,
- u32 *opc,
- struct kvm_run *run,
- struct kvm_vcpu *vcpu)
-{
- struct mips_coproc *cop0 = vcpu->arch.cop0;
- struct kvm_vcpu_arch *arch = &vcpu->arch;
- enum emulation_result er = EMULATE_DONE;
-
- if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
- /* save old pc */
- kvm_write_c0_guest_epc(cop0, arch->pc);
- kvm_set_c0_guest_status(cop0, ST0_EXL);
-
- if (cause & CAUSEF_BD)
- kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
- else
- kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
-
- kvm_debug("Delivering MSADIS @ pc %#lx\n", arch->pc);
-
- kvm_change_c0_guest_cause(cop0, (0xff),
- (EXCCODE_MSADIS << CAUSEB_EXCCODE));
-
- /* Set PC to the exception entry point */
- arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180;
-
- } else {
- kvm_err("Trying to deliver MSADIS when EXL is already set\n");
- er = EMULATE_FAIL;
- }
-
- return er;
-}
-
-enum emulation_result kvm_mips_handle_ri(u32 cause, u32 *opc,
- struct kvm_run *run,
- struct kvm_vcpu *vcpu)
-{
- struct mips_coproc *cop0 = vcpu->arch.cop0;
- struct kvm_vcpu_arch *arch = &vcpu->arch;
- enum emulation_result er = EMULATE_DONE;
- unsigned long curr_pc;
- union mips_instruction inst;
- int err;
-
- /*
- * Update PC and hold onto current PC in case there is
- * an error and we want to rollback the PC
- */
- curr_pc = vcpu->arch.pc;
- er = update_pc(vcpu, cause);
- if (er == EMULATE_FAIL)
- return er;
-
- /* Fetch the instruction. */
- if (cause & CAUSEF_BD)
- opc += 1;
- err = kvm_get_badinstr(opc, vcpu, &inst.word);
- if (err) {
- kvm_err("%s: Cannot get inst @ %p (%d)\n", __func__, opc, err);
+ vcpu->mmio_needed = 0;
return EMULATE_FAIL;
}
- if (inst.r_format.opcode == spec3_op &&
- inst.r_format.func == rdhwr_op &&
- inst.r_format.rs == 0 &&
- (inst.r_format.re >> 3) == 0) {
- int usermode = !KVM_GUEST_KERNEL_MODE(vcpu);
- int rd = inst.r_format.rd;
- int rt = inst.r_format.rt;
- int sel = inst.r_format.re & 0x7;
-
- /* If usermode, check RDHWR rd is allowed by guest HWREna */
- if (usermode && !(kvm_read_c0_guest_hwrena(cop0) & BIT(rd))) {
- kvm_debug("RDHWR %#x disallowed by HWREna @ %p\n",
- rd, opc);
- goto emulate_ri;
- }
- switch (rd) {
- case MIPS_HWR_CPUNUM: /* CPU number */
- arch->gprs[rt] = vcpu->vcpu_id;
- break;
- case MIPS_HWR_SYNCISTEP: /* SYNCI length */
- arch->gprs[rt] = min(current_cpu_data.dcache.linesz,
- current_cpu_data.icache.linesz);
- break;
- case MIPS_HWR_CC: /* Read count register */
- arch->gprs[rt] = (s32)kvm_mips_read_count(vcpu);
- break;
- case MIPS_HWR_CCRES: /* Count register resolution */
- switch (current_cpu_data.cputype) {
- case CPU_20KC:
- case CPU_25KF:
- arch->gprs[rt] = 1;
- break;
- default:
- arch->gprs[rt] = 2;
- }
- break;
- case MIPS_HWR_ULR: /* Read UserLocal register */
- arch->gprs[rt] = kvm_read_c0_guest_userlocal(cop0);
- break;
+ run->mmio.is_write = 0;
+ vcpu->mmio_is_write = 0;
- default:
- kvm_debug("RDHWR %#x not supported @ %p\n", rd, opc);
- goto emulate_ri;
- }
+ r = kvm_io_bus_read(vcpu, KVM_MMIO_BUS,
+ run->mmio.phys_addr, run->mmio.len, run->mmio.data);
- trace_kvm_hwr(vcpu, KVM_TRACE_RDHWR, KVM_TRACE_HWR(rd, sel),
- vcpu->arch.gprs[rt]);
- } else {
- kvm_debug("Emulate RI not supported @ %p: %#x\n",
- opc, inst.word);
- goto emulate_ri;
+ if (!r) {
+ kvm_mips_complete_mmio_load(vcpu);
+ vcpu->mmio_needed = 0;
+ return EMULATE_DONE;
}
- return EMULATE_DONE;
-
-emulate_ri:
- /*
- * Rollback PC (if in branch delay slot then the PC already points to
- * branch target), and pass the RI exception to the guest OS.
- */
- vcpu->arch.pc = curr_pc;
- return kvm_mips_emulate_ri_exc(cause, opc, run, vcpu);
+ return EMULATE_DO_MMIO;
}
-enum emulation_result kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu,
- struct kvm_run *run)
+enum emulation_result kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu)
{
+ struct kvm_run *run = vcpu->run;
unsigned long *gpr = &vcpu->arch.gprs[vcpu->arch.io_gpr];
enum emulation_result er = EMULATE_DONE;
@@ -2591,238 +1523,128 @@ enum emulation_result kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu,
switch (run->mmio.len) {
case 8:
- *gpr = *(s64 *)run->mmio.data;
+ switch (vcpu->mmio_needed) {
+ case 11:
+ *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffffffffffff) |
+ (((*(s64 *)run->mmio.data) & 0xff) << 56);
+ break;
+ case 12:
+ *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffffffffff) |
+ (((*(s64 *)run->mmio.data) & 0xffff) << 48);
+ break;
+ case 13:
+ *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffffffff) |
+ (((*(s64 *)run->mmio.data) & 0xffffff) << 40);
+ break;
+ case 14:
+ *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffffff) |
+ (((*(s64 *)run->mmio.data) & 0xffffffff) << 32);
+ break;
+ case 15:
+ *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffff) |
+ (((*(s64 *)run->mmio.data) & 0xffffffffff) << 24);
+ break;
+ case 16:
+ *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffff) |
+ (((*(s64 *)run->mmio.data) & 0xffffffffffff) << 16);
+ break;
+ case 17:
+ *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xff) |
+ (((*(s64 *)run->mmio.data) & 0xffffffffffffff) << 8);
+ break;
+ case 18:
+ case 19:
+ *gpr = *(s64 *)run->mmio.data;
+ break;
+ case 20:
+ *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xff00000000000000) |
+ ((((*(s64 *)run->mmio.data)) >> 8) & 0xffffffffffffff);
+ break;
+ case 21:
+ *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffff000000000000) |
+ ((((*(s64 *)run->mmio.data)) >> 16) & 0xffffffffffff);
+ break;
+ case 22:
+ *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffff0000000000) |
+ ((((*(s64 *)run->mmio.data)) >> 24) & 0xffffffffff);
+ break;
+ case 23:
+ *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffffff00000000) |
+ ((((*(s64 *)run->mmio.data)) >> 32) & 0xffffffff);
+ break;
+ case 24:
+ *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffffffff000000) |
+ ((((*(s64 *)run->mmio.data)) >> 40) & 0xffffff);
+ break;
+ case 25:
+ *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffffffffff0000) |
+ ((((*(s64 *)run->mmio.data)) >> 48) & 0xffff);
+ break;
+ case 26:
+ *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffffffffffff00) |
+ ((((*(s64 *)run->mmio.data)) >> 56) & 0xff);
+ break;
+ default:
+ *gpr = *(s64 *)run->mmio.data;
+ }
break;
case 4:
- if (vcpu->mmio_needed == 2)
- *gpr = *(s32 *)run->mmio.data;
- else
+ switch (vcpu->mmio_needed) {
+ case 1:
*gpr = *(u32 *)run->mmio.data;
- break;
-
- case 2:
- if (vcpu->mmio_needed == 2)
- *gpr = *(s16 *) run->mmio.data;
- else
- *gpr = *(u16 *)run->mmio.data;
-
- break;
- case 1:
- if (vcpu->mmio_needed == 2)
- *gpr = *(s8 *) run->mmio.data;
- else
- *gpr = *(u8 *) run->mmio.data;
- break;
- }
-
-done:
- return er;
-}
-
-static enum emulation_result kvm_mips_emulate_exc(u32 cause,
- u32 *opc,
- struct kvm_run *run,
- struct kvm_vcpu *vcpu)
-{
- u32 exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
- struct mips_coproc *cop0 = vcpu->arch.cop0;
- struct kvm_vcpu_arch *arch = &vcpu->arch;
- enum emulation_result er = EMULATE_DONE;
-
- if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
- /* save old pc */
- kvm_write_c0_guest_epc(cop0, arch->pc);
- kvm_set_c0_guest_status(cop0, ST0_EXL);
-
- if (cause & CAUSEF_BD)
- kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
- else
- kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
-
- kvm_change_c0_guest_cause(cop0, (0xff),
- (exccode << CAUSEB_EXCCODE));
-
- /* Set PC to the exception entry point */
- arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180;
- kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
-
- kvm_debug("Delivering EXC %d @ pc %#lx, badVaddr: %#lx\n",
- exccode, kvm_read_c0_guest_epc(cop0),
- kvm_read_c0_guest_badvaddr(cop0));
- } else {
- kvm_err("Trying to deliver EXC when EXL is already set\n");
- er = EMULATE_FAIL;
- }
-
- return er;
-}
-
-enum emulation_result kvm_mips_check_privilege(u32 cause,
- u32 *opc,
- struct kvm_run *run,
- struct kvm_vcpu *vcpu)
-{
- enum emulation_result er = EMULATE_DONE;
- u32 exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
- unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
-
- int usermode = !KVM_GUEST_KERNEL_MODE(vcpu);
-
- if (usermode) {
- switch (exccode) {
- case EXCCODE_INT:
- case EXCCODE_SYS:
- case EXCCODE_BP:
- case EXCCODE_RI:
- case EXCCODE_TR:
- case EXCCODE_MSAFPE:
- case EXCCODE_FPE:
- case EXCCODE_MSADIS:
break;
-
- case EXCCODE_CPU:
- if (((cause & CAUSEF_CE) >> CAUSEB_CE) == 0)
- er = EMULATE_PRIV_FAIL;
+ case 2:
+ *gpr = *(s32 *)run->mmio.data;
break;
-
- case EXCCODE_MOD:
+ case 3:
+ *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffff) |
+ (((*(s32 *)run->mmio.data) & 0xff) << 24);
break;
-
- case EXCCODE_TLBL:
- /*
- * We we are accessing Guest kernel space, then send an
- * address error exception to the guest
- */
- if (badvaddr >= (unsigned long) KVM_GUEST_KSEG0) {
- kvm_debug("%s: LD MISS @ %#lx\n", __func__,
- badvaddr);
- cause &= ~0xff;
- cause |= (EXCCODE_ADEL << CAUSEB_EXCCODE);
- er = EMULATE_PRIV_FAIL;
- }
+ case 4:
+ *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffff) |
+ (((*(s32 *)run->mmio.data) & 0xffff) << 16);
break;
-
- case EXCCODE_TLBS:
- /*
- * We we are accessing Guest kernel space, then send an
- * address error exception to the guest
- */
- if (badvaddr >= (unsigned long) KVM_GUEST_KSEG0) {
- kvm_debug("%s: ST MISS @ %#lx\n", __func__,
- badvaddr);
- cause &= ~0xff;
- cause |= (EXCCODE_ADES << CAUSEB_EXCCODE);
- er = EMULATE_PRIV_FAIL;
- }
+ case 5:
+ *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xff) |
+ (((*(s32 *)run->mmio.data) & 0xffffff) << 8);
break;
-
- case EXCCODE_ADES:
- kvm_debug("%s: address error ST @ %#lx\n", __func__,
- badvaddr);
- if ((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR) {
- cause &= ~0xff;
- cause |= (EXCCODE_TLBS << CAUSEB_EXCCODE);
- }
- er = EMULATE_PRIV_FAIL;
- break;
- case EXCCODE_ADEL:
- kvm_debug("%s: address error LD @ %#lx\n", __func__,
- badvaddr);
- if ((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR) {
- cause &= ~0xff;
- cause |= (EXCCODE_TLBL << CAUSEB_EXCCODE);
- }
- er = EMULATE_PRIV_FAIL;
+ case 6:
+ case 7:
+ *gpr = *(s32 *)run->mmio.data;
break;
- default:
- er = EMULATE_PRIV_FAIL;
+ case 8:
+ *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xff000000) |
+ ((((*(s32 *)run->mmio.data)) >> 8) & 0xffffff);
+ break;
+ case 9:
+ *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffff0000) |
+ ((((*(s32 *)run->mmio.data)) >> 16) & 0xffff);
+ break;
+ case 10:
+ *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffff00) |
+ ((((*(s32 *)run->mmio.data)) >> 24) & 0xff);
break;
+ default:
+ *gpr = *(s32 *)run->mmio.data;
}
- }
-
- if (er == EMULATE_PRIV_FAIL)
- kvm_mips_emulate_exc(cause, opc, run, vcpu);
-
- return er;
-}
-
-/*
- * User Address (UA) fault, this could happen if
- * (1) TLB entry not present/valid in both Guest and shadow host TLBs, in this
- * case we pass on the fault to the guest kernel and let it handle it.
- * (2) TLB entry is present in the Guest TLB but not in the shadow, in this
- * case we inject the TLB from the Guest TLB into the shadow host TLB
- */
-enum emulation_result kvm_mips_handle_tlbmiss(u32 cause,
- u32 *opc,
- struct kvm_run *run,
- struct kvm_vcpu *vcpu,
- bool write_fault)
-{
- enum emulation_result er = EMULATE_DONE;
- u32 exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
- unsigned long va = vcpu->arch.host_cp0_badvaddr;
- int index;
-
- kvm_debug("kvm_mips_handle_tlbmiss: badvaddr: %#lx\n",
- vcpu->arch.host_cp0_badvaddr);
+ break;
- /*
- * KVM would not have got the exception if this entry was valid in the
- * shadow host TLB. Check the Guest TLB, if the entry is not there then
- * send the guest an exception. The guest exc handler should then inject
- * an entry into the guest TLB.
- */
- index = kvm_mips_guest_tlb_lookup(vcpu,
- (va & VPN2_MASK) |
- (kvm_read_c0_guest_entryhi(vcpu->arch.cop0) &
- KVM_ENTRYHI_ASID));
- if (index < 0) {
- if (exccode == EXCCODE_TLBL) {
- er = kvm_mips_emulate_tlbmiss_ld(cause, opc, run, vcpu);
- } else if (exccode == EXCCODE_TLBS) {
- er = kvm_mips_emulate_tlbmiss_st(cause, opc, run, vcpu);
- } else {
- kvm_err("%s: invalid exc code: %d\n", __func__,
- exccode);
- er = EMULATE_FAIL;
- }
- } else {
- struct kvm_mips_tlb *tlb = &vcpu->arch.guest_tlb[index];
+ case 2:
+ if (vcpu->mmio_needed == 1)
+ *gpr = *(u16 *)run->mmio.data;
+ else
+ *gpr = *(s16 *)run->mmio.data;
- /*
- * Check if the entry is valid, if not then setup a TLB invalid
- * exception to the guest
- */
- if (!TLB_IS_VALID(*tlb, va)) {
- if (exccode == EXCCODE_TLBL) {
- er = kvm_mips_emulate_tlbinv_ld(cause, opc, run,
- vcpu);
- } else if (exccode == EXCCODE_TLBS) {
- er = kvm_mips_emulate_tlbinv_st(cause, opc, run,
- vcpu);
- } else {
- kvm_err("%s: invalid exc code: %d\n", __func__,
- exccode);
- er = EMULATE_FAIL;
- }
- } else {
- kvm_debug("Injecting hi: %#lx, lo0: %#lx, lo1: %#lx into shadow host TLB\n",
- tlb->tlb_hi, tlb->tlb_lo[0], tlb->tlb_lo[1]);
- /*
- * OK we have a Guest TLB entry, now inject it into the
- * shadow host TLB
- */
- if (kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb, va,
- write_fault)) {
- kvm_err("%s: handling mapped seg tlb fault for %lx, index: %u, vcpu: %p, ASID: %#lx\n",
- __func__, va, index, vcpu,
- read_c0_entryhi());
- er = EMULATE_FAIL;
- }
- }
+ break;
+ case 1:
+ if (vcpu->mmio_needed == 1)
+ *gpr = *(u8 *)run->mmio.data;
+ else
+ *gpr = *(s8 *)run->mmio.data;
+ break;
}
+done:
return er;
}
diff --git a/arch/mips/kvm/entry.c b/arch/mips/kvm/entry.c
index 16e1c93b484f..ac8e074c6bb7 100644
--- a/arch/mips/kvm/entry.c
+++ b/arch/mips/kvm/entry.c
@@ -13,69 +13,17 @@
#include <linux/kvm_host.h>
#include <linux/log2.h>
+#include <asm/mipsregs.h>
#include <asm/mmu_context.h>
#include <asm/msa.h>
+#include <asm/regdef.h>
#include <asm/setup.h>
#include <asm/tlbex.h>
#include <asm/uasm.h>
-/* Register names */
-#define ZERO 0
-#define AT 1
-#define V0 2
-#define V1 3
-#define A0 4
-#define A1 5
-
-#if _MIPS_SIM == _MIPS_SIM_ABI32
-#define T0 8
-#define T1 9
-#define T2 10
-#define T3 11
-#endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */
-
-#if _MIPS_SIM == _MIPS_SIM_ABI64 || _MIPS_SIM == _MIPS_SIM_NABI32
-#define T0 12
-#define T1 13
-#define T2 14
-#define T3 15
-#endif /* _MIPS_SIM == _MIPS_SIM_ABI64 || _MIPS_SIM == _MIPS_SIM_NABI32 */
-
-#define S0 16
-#define S1 17
-#define T9 25
-#define K0 26
-#define K1 27
-#define GP 28
-#define SP 29
-#define RA 31
-
-/* Some CP0 registers */
-#define C0_PWBASE 5, 5
-#define C0_HWRENA 7, 0
-#define C0_BADVADDR 8, 0
-#define C0_BADINSTR 8, 1
-#define C0_BADINSTRP 8, 2
-#define C0_ENTRYHI 10, 0
-#define C0_GUESTCTL1 10, 4
-#define C0_STATUS 12, 0
-#define C0_GUESTCTL0 12, 6
-#define C0_CAUSE 13, 0
-#define C0_EPC 14, 0
-#define C0_EBASE 15, 1
-#define C0_CONFIG5 16, 5
-#define C0_DDATA_LO 28, 3
-#define C0_ERROREPC 30, 0
-
#define CALLFRAME_SIZ 32
-#ifdef CONFIG_64BIT
-#define ST0_KX_IF_64 ST0_KX
-#else
-#define ST0_KX_IF_64 0
-#endif
-
-static unsigned int scratch_vcpu[2] = { C0_DDATA_LO };
+static unsigned int scratch_vcpu[2] = { C0_DDATALO };
static unsigned int scratch_tmp[2] = { C0_ERROREPC };
enum label_id {
@@ -103,13 +51,7 @@ static void *kvm_mips_build_ret_to_host(void *addr);
*/
static int c0_kscratch(void)
{
- switch (boot_cpu_type()) {
- case CPU_XLP:
- case CPU_XLR:
- return 22;
- default:
- return 31;
- }
+ return 31;
}
/**
@@ -204,7 +146,7 @@ static inline void build_set_exc_base(u32 **p, unsigned int reg)
* Assemble the start of the vcpu_run function to run a guest VCPU. The function
* conforms to the following prototype:
*
- * int vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu);
+ * int vcpu_run(struct kvm_vcpu *vcpu);
*
* The exit from the guest and return to the caller is handled by the code
* generated by kvm_mips_build_ret_to_host().
@@ -217,61 +159,60 @@ void *kvm_mips_build_vcpu_run(void *addr)
unsigned int i;
/*
- * A0: run
- * A1: vcpu
+ * GPR_A0: vcpu
*/
/* k0/k1 not being used in host kernel context */
- UASM_i_ADDIU(&p, K1, SP, -(int)sizeof(struct pt_regs));
+ UASM_i_ADDIU(&p, GPR_K1, GPR_SP, -(int)sizeof(struct pt_regs));
for (i = 16; i < 32; ++i) {
if (i == 24)
i = 28;
- UASM_i_SW(&p, i, offsetof(struct pt_regs, regs[i]), K1);
+ UASM_i_SW(&p, i, offsetof(struct pt_regs, regs[i]), GPR_K1);
}
/* Save host status */
- uasm_i_mfc0(&p, V0, C0_STATUS);
- UASM_i_SW(&p, V0, offsetof(struct pt_regs, cp0_status), K1);
+ uasm_i_mfc0(&p, GPR_V0, C0_STATUS);
+ UASM_i_SW(&p, GPR_V0, offsetof(struct pt_regs, cp0_status), GPR_K1);
/* Save scratch registers, will be used to store pointer to vcpu etc */
- kvm_mips_build_save_scratch(&p, V1, K1);
+ kvm_mips_build_save_scratch(&p, GPR_V1, GPR_K1);
/* VCPU scratch register has pointer to vcpu */
- UASM_i_MTC0(&p, A1, scratch_vcpu[0], scratch_vcpu[1]);
+ UASM_i_MTC0(&p, GPR_A0, scratch_vcpu[0], scratch_vcpu[1]);
/* Offset into vcpu->arch */
- UASM_i_ADDIU(&p, K1, A1, offsetof(struct kvm_vcpu, arch));
+ UASM_i_ADDIU(&p, GPR_K1, GPR_A0, offsetof(struct kvm_vcpu, arch));
/*
* Save the host stack to VCPU, used for exception processing
* when we exit from the Guest
*/
- UASM_i_SW(&p, SP, offsetof(struct kvm_vcpu_arch, host_stack), K1);
+ UASM_i_SW(&p, GPR_SP, offsetof(struct kvm_vcpu_arch, host_stack), GPR_K1);
/* Save the kernel gp as well */
- UASM_i_SW(&p, GP, offsetof(struct kvm_vcpu_arch, host_gp), K1);
+ UASM_i_SW(&p, GPR_GP, offsetof(struct kvm_vcpu_arch, host_gp), GPR_K1);
/*
* Setup status register for running the guest in UM, interrupts
* are disabled
*/
- UASM_i_LA(&p, K0, ST0_EXL | KSU_USER | ST0_BEV | ST0_KX_IF_64);
- uasm_i_mtc0(&p, K0, C0_STATUS);
+ UASM_i_LA(&p, GPR_K0, ST0_EXL | KSU_USER | ST0_BEV | ST0_KX_IF_64);
+ uasm_i_mtc0(&p, GPR_K0, C0_STATUS);
uasm_i_ehb(&p);
/* load up the new EBASE */
- UASM_i_LW(&p, K0, offsetof(struct kvm_vcpu_arch, guest_ebase), K1);
- build_set_exc_base(&p, K0);
+ UASM_i_LW(&p, GPR_K0, offsetof(struct kvm_vcpu_arch, guest_ebase), GPR_K1);
+ build_set_exc_base(&p, GPR_K0);
/*
* Now that the new EBASE has been loaded, unset BEV, set
* interrupt mask as it was but make sure that timer interrupts
* are enabled
*/
- uasm_i_addiu(&p, K0, ZERO, ST0_EXL | KSU_USER | ST0_IE | ST0_KX_IF_64);
- uasm_i_andi(&p, V0, V0, ST0_IM);
- uasm_i_or(&p, K0, K0, V0);
- uasm_i_mtc0(&p, K0, C0_STATUS);
+ uasm_i_addiu(&p, GPR_K0, GPR_ZERO, ST0_EXL | KSU_USER | ST0_IE | ST0_KX_IF_64);
+ uasm_i_andi(&p, GPR_V0, GPR_V0, ST0_IM);
+ uasm_i_or(&p, GPR_K0, GPR_K0, GPR_V0);
+ uasm_i_mtc0(&p, GPR_K0, C0_STATUS);
uasm_i_ehb(&p);
p = kvm_mips_build_enter_guest(p);
@@ -302,13 +243,15 @@ static void *kvm_mips_build_enter_guest(void *addr)
memset(relocs, 0, sizeof(relocs));
/* Set Guest EPC */
- UASM_i_LW(&p, T0, offsetof(struct kvm_vcpu_arch, pc), K1);
- UASM_i_MTC0(&p, T0, C0_EPC);
+ UASM_i_LW(&p, GPR_T0, offsetof(struct kvm_vcpu_arch, pc), GPR_K1);
+ UASM_i_MTC0(&p, GPR_T0, C0_EPC);
-#ifdef CONFIG_KVM_MIPS_VZ
/* Save normal linux process pgd (VZ guarantees pgd_reg is set) */
- UASM_i_MFC0(&p, K0, c0_kscratch(), pgd_reg);
- UASM_i_SW(&p, K0, offsetof(struct kvm_vcpu_arch, host_pgd), K1);
+ if (cpu_has_ldpte)
+ UASM_i_MFC0(&p, GPR_K0, C0_PWBASE);
+ else
+ UASM_i_MFC0(&p, GPR_K0, c0_kscratch(), pgd_reg);
+ UASM_i_SW(&p, GPR_K0, offsetof(struct kvm_vcpu_arch, host_pgd), GPR_K1);
/*
* Set up KVM GPA pgd.
@@ -316,24 +259,24 @@ static void *kvm_mips_build_enter_guest(void *addr)
* - call tlbmiss_handler_setup_pgd(mm->pgd)
* - write mm->pgd into CP0_PWBase
*
- * We keep S0 pointing at struct kvm so we can load the ASID below.
+ * We keep GPR_S0 pointing at struct kvm so we can load the ASID below.
*/
- UASM_i_LW(&p, S0, (int)offsetof(struct kvm_vcpu, kvm) -
- (int)offsetof(struct kvm_vcpu, arch), K1);
- UASM_i_LW(&p, A0, offsetof(struct kvm, arch.gpa_mm.pgd), S0);
- UASM_i_LA(&p, T9, (unsigned long)tlbmiss_handler_setup_pgd);
- uasm_i_jalr(&p, RA, T9);
+ UASM_i_LW(&p, GPR_S0, (int)offsetof(struct kvm_vcpu, kvm) -
+ (int)offsetof(struct kvm_vcpu, arch), GPR_K1);
+ UASM_i_LW(&p, GPR_A0, offsetof(struct kvm, arch.gpa_mm.pgd), GPR_S0);
+ UASM_i_LA(&p, GPR_T9, (unsigned long)tlbmiss_handler_setup_pgd);
+ uasm_i_jalr(&p, GPR_RA, GPR_T9);
/* delay slot */
if (cpu_has_htw)
- UASM_i_MTC0(&p, A0, C0_PWBASE);
+ UASM_i_MTC0(&p, GPR_A0, C0_PWBASE);
else
uasm_i_nop(&p);
/* Set GM bit to setup eret to VZ guest context */
- uasm_i_addiu(&p, V1, ZERO, 1);
- uasm_i_mfc0(&p, K0, C0_GUESTCTL0);
- uasm_i_ins(&p, K0, V1, MIPS_GCTL0_GM_SHIFT, 1);
- uasm_i_mtc0(&p, K0, C0_GUESTCTL0);
+ uasm_i_addiu(&p, GPR_V1, GPR_ZERO, 1);
+ uasm_i_mfc0(&p, GPR_K0, C0_GUESTCTL0);
+ uasm_i_ins(&p, GPR_K0, GPR_V1, MIPS_GCTL0_GM_SHIFT, 1);
+ uasm_i_mtc0(&p, GPR_K0, C0_GUESTCTL0);
if (cpu_has_guestid) {
/*
@@ -342,13 +285,13 @@ static void *kvm_mips_build_enter_guest(void *addr)
*/
/* Get current GuestID */
- uasm_i_mfc0(&p, T0, C0_GUESTCTL1);
+ uasm_i_mfc0(&p, GPR_T0, C0_GUESTCTL1);
/* Set GuestCtl1.RID = GuestCtl1.ID */
- uasm_i_ext(&p, T1, T0, MIPS_GCTL1_ID_SHIFT,
+ uasm_i_ext(&p, GPR_T1, GPR_T0, MIPS_GCTL1_ID_SHIFT,
MIPS_GCTL1_ID_WIDTH);
- uasm_i_ins(&p, T0, T1, MIPS_GCTL1_RID_SHIFT,
+ uasm_i_ins(&p, GPR_T0, GPR_T1, MIPS_GCTL1_RID_SHIFT,
MIPS_GCTL1_RID_WIDTH);
- uasm_i_mtc0(&p, T0, C0_GUESTCTL1);
+ uasm_i_mtc0(&p, GPR_T0, C0_GUESTCTL1);
/* GuestID handles dealiasing so we don't need to touch ASID */
goto skip_asid_restore;
@@ -357,95 +300,65 @@ static void *kvm_mips_build_enter_guest(void *addr)
/* Root ASID Dealias (RAD) */
/* Save host ASID */
- UASM_i_MFC0(&p, K0, C0_ENTRYHI);
- UASM_i_SW(&p, K0, offsetof(struct kvm_vcpu_arch, host_entryhi),
- K1);
+ UASM_i_MFC0(&p, GPR_K0, C0_ENTRYHI);
+ UASM_i_SW(&p, GPR_K0, offsetof(struct kvm_vcpu_arch, host_entryhi),
+ GPR_K1);
/* Set the root ASID for the Guest */
- UASM_i_ADDIU(&p, T1, S0,
+ UASM_i_ADDIU(&p, GPR_T1, GPR_S0,
offsetof(struct kvm, arch.gpa_mm.context.asid));
-#else
- /* Set the ASID for the Guest Kernel or User */
- UASM_i_LW(&p, T0, offsetof(struct kvm_vcpu_arch, cop0), K1);
- UASM_i_LW(&p, T0, offsetof(struct mips_coproc, reg[MIPS_CP0_STATUS][0]),
- T0);
- uasm_i_andi(&p, T0, T0, KSU_USER | ST0_ERL | ST0_EXL);
- uasm_i_xori(&p, T0, T0, KSU_USER);
- uasm_il_bnez(&p, &r, T0, label_kernel_asid);
- UASM_i_ADDIU(&p, T1, K1, offsetof(struct kvm_vcpu_arch,
- guest_kernel_mm.context.asid));
- /* else user */
- UASM_i_ADDIU(&p, T1, K1, offsetof(struct kvm_vcpu_arch,
- guest_user_mm.context.asid));
- uasm_l_kernel_asid(&l, p);
-#endif
/* t1: contains the base of the ASID array, need to get the cpu id */
/* smp_processor_id */
- uasm_i_lw(&p, T2, offsetof(struct thread_info, cpu), GP);
+ uasm_i_lw(&p, GPR_T2, offsetof(struct thread_info, cpu), GPR_GP);
/* index the ASID array */
- uasm_i_sll(&p, T2, T2, ilog2(sizeof(long)));
- UASM_i_ADDU(&p, T3, T1, T2);
- UASM_i_LW(&p, K0, 0, T3);
+ uasm_i_sll(&p, GPR_T2, GPR_T2, ilog2(sizeof(long)));
+ UASM_i_ADDU(&p, GPR_T3, GPR_T1, GPR_T2);
+ UASM_i_LW(&p, GPR_K0, 0, GPR_T3);
#ifdef CONFIG_MIPS_ASID_BITS_VARIABLE
/*
* reuse ASID array offset
* cpuinfo_mips is a multiple of sizeof(long)
*/
- uasm_i_addiu(&p, T3, ZERO, sizeof(struct cpuinfo_mips)/sizeof(long));
- uasm_i_mul(&p, T2, T2, T3);
+ uasm_i_addiu(&p, GPR_T3, GPR_ZERO, sizeof(struct cpuinfo_mips)/sizeof(long));
+ uasm_i_mul(&p, GPR_T2, GPR_T2, GPR_T3);
- UASM_i_LA_mostly(&p, AT, (long)&cpu_data[0].asid_mask);
- UASM_i_ADDU(&p, AT, AT, T2);
- UASM_i_LW(&p, T2, uasm_rel_lo((long)&cpu_data[0].asid_mask), AT);
- uasm_i_and(&p, K0, K0, T2);
+ UASM_i_LA_mostly(&p, GPR_AT, (long)&cpu_data[0].asid_mask);
+ UASM_i_ADDU(&p, GPR_AT, GPR_AT, GPR_T2);
+ UASM_i_LW(&p, GPR_T2, uasm_rel_lo((long)&cpu_data[0].asid_mask), GPR_AT);
+ uasm_i_and(&p, GPR_K0, GPR_K0, GPR_T2);
#else
- uasm_i_andi(&p, K0, K0, MIPS_ENTRYHI_ASID);
+ uasm_i_andi(&p, GPR_K0, GPR_K0, MIPS_ENTRYHI_ASID);
#endif
-#ifndef CONFIG_KVM_MIPS_VZ
- /*
- * Set up KVM T&E GVA pgd.
- * This does roughly the same as TLBMISS_HANDLER_SETUP_PGD():
- * - call tlbmiss_handler_setup_pgd(mm->pgd)
- * - but skips write into CP0_PWBase for now
- */
- UASM_i_LW(&p, A0, (int)offsetof(struct mm_struct, pgd) -
- (int)offsetof(struct mm_struct, context.asid), T1);
-
- UASM_i_LA(&p, T9, (unsigned long)tlbmiss_handler_setup_pgd);
- uasm_i_jalr(&p, RA, T9);
- uasm_i_mtc0(&p, K0, C0_ENTRYHI);
-#else
/* Set up KVM VZ root ASID (!guestid) */
- uasm_i_mtc0(&p, K0, C0_ENTRYHI);
+ uasm_i_mtc0(&p, GPR_K0, C0_ENTRYHI);
skip_asid_restore:
-#endif
uasm_i_ehb(&p);
/* Disable RDHWR access */
- uasm_i_mtc0(&p, ZERO, C0_HWRENA);
+ uasm_i_mtc0(&p, GPR_ZERO, C0_HWRENA);
/* load the guest context from VCPU and return */
for (i = 1; i < 32; ++i) {
/* Guest k0/k1 loaded later */
- if (i == K0 || i == K1)
+ if (i == GPR_K0 || i == GPR_K1)
continue;
- UASM_i_LW(&p, i, offsetof(struct kvm_vcpu_arch, gprs[i]), K1);
+ UASM_i_LW(&p, i, offsetof(struct kvm_vcpu_arch, gprs[i]), GPR_K1);
}
#ifndef CONFIG_CPU_MIPSR6
/* Restore hi/lo */
- UASM_i_LW(&p, K0, offsetof(struct kvm_vcpu_arch, hi), K1);
- uasm_i_mthi(&p, K0);
+ UASM_i_LW(&p, GPR_K0, offsetof(struct kvm_vcpu_arch, hi), GPR_K1);
+ uasm_i_mthi(&p, GPR_K0);
- UASM_i_LW(&p, K0, offsetof(struct kvm_vcpu_arch, lo), K1);
- uasm_i_mtlo(&p, K0);
+ UASM_i_LW(&p, GPR_K0, offsetof(struct kvm_vcpu_arch, lo), GPR_K1);
+ uasm_i_mtlo(&p, GPR_K0);
#endif
/* Restore the guest's k0/k1 registers */
- UASM_i_LW(&p, K0, offsetof(struct kvm_vcpu_arch, gprs[K0]), K1);
- UASM_i_LW(&p, K1, offsetof(struct kvm_vcpu_arch, gprs[K1]), K1);
+ UASM_i_LW(&p, GPR_K0, offsetof(struct kvm_vcpu_arch, gprs[GPR_K0]), GPR_K1);
+ UASM_i_LW(&p, GPR_K1, offsetof(struct kvm_vcpu_arch, gprs[GPR_K1]), GPR_K1);
/* Jump to guest */
uasm_i_eret(&p);
@@ -469,20 +382,22 @@ void *kvm_mips_build_tlb_refill_exception(void *addr, void *handler)
u32 *p = addr;
struct uasm_label labels[2];
struct uasm_reloc relocs[2];
+#ifndef CONFIG_CPU_LOONGSON64
struct uasm_label *l = labels;
struct uasm_reloc *r = relocs;
+#endif
memset(labels, 0, sizeof(labels));
memset(relocs, 0, sizeof(relocs));
/* Save guest k1 into scratch register */
- UASM_i_MTC0(&p, K1, scratch_tmp[0], scratch_tmp[1]);
+ UASM_i_MTC0(&p, GPR_K1, scratch_tmp[0], scratch_tmp[1]);
/* Get the VCPU pointer from the VCPU scratch register */
- UASM_i_MFC0(&p, K1, scratch_vcpu[0], scratch_vcpu[1]);
+ UASM_i_MFC0(&p, GPR_K1, scratch_vcpu[0], scratch_vcpu[1]);
/* Save guest k0 into VCPU structure */
- UASM_i_SW(&p, K0, offsetof(struct kvm_vcpu, arch.gprs[K0]), K1);
+ UASM_i_SW(&p, GPR_K0, offsetof(struct kvm_vcpu, arch.gprs[GPR_K0]), GPR_K1);
/*
* Some of the common tlbex code uses current_cpu_type(). For KVM we
@@ -490,6 +405,16 @@ void *kvm_mips_build_tlb_refill_exception(void *addr, void *handler)
*/
preempt_disable();
+#ifdef CONFIG_CPU_LOONGSON64
+ UASM_i_MFC0(&p, GPR_K1, C0_PGD);
+ uasm_i_lddir(&p, GPR_K0, GPR_K1, 3); /* global page dir */
+#ifndef __PAGETABLE_PMD_FOLDED
+ uasm_i_lddir(&p, GPR_K1, GPR_K0, 1); /* middle page dir */
+#endif
+ uasm_i_ldpte(&p, GPR_K1, 0); /* even */
+ uasm_i_ldpte(&p, GPR_K1, 1); /* odd */
+ uasm_i_tlbwr(&p);
+#else
/*
* Now for the actual refill bit. A lot of this can be common with the
* Linux TLB refill handler, however we don't need to handle so many
@@ -502,26 +427,27 @@ void *kvm_mips_build_tlb_refill_exception(void *addr, void *handler)
*/
#ifdef CONFIG_64BIT
- build_get_pmde64(&p, &l, &r, K0, K1); /* get pmd in K1 */
+ build_get_pmde64(&p, &l, &r, GPR_K0, GPR_K1); /* get pmd in GPR_K1 */
#else
- build_get_pgde32(&p, K0, K1); /* get pgd in K1 */
+ build_get_pgde32(&p, GPR_K0, GPR_K1); /* get pgd in GPR_K1 */
#endif
/* we don't support huge pages yet */
- build_get_ptep(&p, K0, K1);
- build_update_entries(&p, K0, K1);
+ build_get_ptep(&p, GPR_K0, GPR_K1);
+ build_update_entries(&p, GPR_K0, GPR_K1);
build_tlb_write_entry(&p, &l, &r, tlb_random);
+#endif
preempt_enable();
/* Get the VCPU pointer from the VCPU scratch register again */
- UASM_i_MFC0(&p, K1, scratch_vcpu[0], scratch_vcpu[1]);
+ UASM_i_MFC0(&p, GPR_K1, scratch_vcpu[0], scratch_vcpu[1]);
/* Restore the guest's k0/k1 registers */
- UASM_i_LW(&p, K0, offsetof(struct kvm_vcpu, arch.gprs[K0]), K1);
+ UASM_i_LW(&p, GPR_K0, offsetof(struct kvm_vcpu, arch.gprs[GPR_K0]), GPR_K1);
uasm_i_ehb(&p);
- UASM_i_MFC0(&p, K1, scratch_tmp[0], scratch_tmp[1]);
+ UASM_i_MFC0(&p, GPR_K1, scratch_tmp[0], scratch_tmp[1]);
/* Jump to guest */
uasm_i_eret(&p);
@@ -551,14 +477,14 @@ void *kvm_mips_build_exception(void *addr, void *handler)
memset(relocs, 0, sizeof(relocs));
/* Save guest k1 into scratch register */
- UASM_i_MTC0(&p, K1, scratch_tmp[0], scratch_tmp[1]);
+ UASM_i_MTC0(&p, GPR_K1, scratch_tmp[0], scratch_tmp[1]);
/* Get the VCPU pointer from the VCPU scratch register */
- UASM_i_MFC0(&p, K1, scratch_vcpu[0], scratch_vcpu[1]);
- UASM_i_ADDIU(&p, K1, K1, offsetof(struct kvm_vcpu, arch));
+ UASM_i_MFC0(&p, GPR_K1, scratch_vcpu[0], scratch_vcpu[1]);
+ UASM_i_ADDIU(&p, GPR_K1, GPR_K1, offsetof(struct kvm_vcpu, arch));
/* Save guest k0 into VCPU structure */
- UASM_i_SW(&p, K0, offsetof(struct kvm_vcpu_arch, gprs[K0]), K1);
+ UASM_i_SW(&p, GPR_K0, offsetof(struct kvm_vcpu_arch, gprs[GPR_K0]), GPR_K1);
/* Branch to the common handler */
uasm_il_b(&p, &r, label_exit_common);
@@ -606,88 +532,85 @@ void *kvm_mips_build_exit(void *addr)
/* Start saving Guest context to VCPU */
for (i = 0; i < 32; ++i) {
/* Guest k0/k1 saved later */
- if (i == K0 || i == K1)
+ if (i == GPR_K0 || i == GPR_K1)
continue;
- UASM_i_SW(&p, i, offsetof(struct kvm_vcpu_arch, gprs[i]), K1);
+ UASM_i_SW(&p, i, offsetof(struct kvm_vcpu_arch, gprs[i]), GPR_K1);
}
#ifndef CONFIG_CPU_MIPSR6
/* We need to save hi/lo and restore them on the way out */
- uasm_i_mfhi(&p, T0);
- UASM_i_SW(&p, T0, offsetof(struct kvm_vcpu_arch, hi), K1);
+ uasm_i_mfhi(&p, GPR_T0);
+ UASM_i_SW(&p, GPR_T0, offsetof(struct kvm_vcpu_arch, hi), GPR_K1);
- uasm_i_mflo(&p, T0);
- UASM_i_SW(&p, T0, offsetof(struct kvm_vcpu_arch, lo), K1);
+ uasm_i_mflo(&p, GPR_T0);
+ UASM_i_SW(&p, GPR_T0, offsetof(struct kvm_vcpu_arch, lo), GPR_K1);
#endif
/* Finally save guest k1 to VCPU */
uasm_i_ehb(&p);
- UASM_i_MFC0(&p, T0, scratch_tmp[0], scratch_tmp[1]);
- UASM_i_SW(&p, T0, offsetof(struct kvm_vcpu_arch, gprs[K1]), K1);
+ UASM_i_MFC0(&p, GPR_T0, scratch_tmp[0], scratch_tmp[1]);
+ UASM_i_SW(&p, GPR_T0, offsetof(struct kvm_vcpu_arch, gprs[GPR_K1]), GPR_K1);
/* Now that context has been saved, we can use other registers */
/* Restore vcpu */
- UASM_i_MFC0(&p, S1, scratch_vcpu[0], scratch_vcpu[1]);
-
- /* Restore run (vcpu->run) */
- UASM_i_LW(&p, S0, offsetof(struct kvm_vcpu, run), S1);
+ UASM_i_MFC0(&p, GPR_S0, scratch_vcpu[0], scratch_vcpu[1]);
/*
* Save Host level EPC, BadVaddr and Cause to VCPU, useful to process
* the exception
*/
- UASM_i_MFC0(&p, K0, C0_EPC);
- UASM_i_SW(&p, K0, offsetof(struct kvm_vcpu_arch, pc), K1);
+ UASM_i_MFC0(&p, GPR_K0, C0_EPC);
+ UASM_i_SW(&p, GPR_K0, offsetof(struct kvm_vcpu_arch, pc), GPR_K1);
- UASM_i_MFC0(&p, K0, C0_BADVADDR);
- UASM_i_SW(&p, K0, offsetof(struct kvm_vcpu_arch, host_cp0_badvaddr),
- K1);
+ UASM_i_MFC0(&p, GPR_K0, C0_BADVADDR);
+ UASM_i_SW(&p, GPR_K0, offsetof(struct kvm_vcpu_arch, host_cp0_badvaddr),
+ GPR_K1);
- uasm_i_mfc0(&p, K0, C0_CAUSE);
- uasm_i_sw(&p, K0, offsetof(struct kvm_vcpu_arch, host_cp0_cause), K1);
+ uasm_i_mfc0(&p, GPR_K0, C0_CAUSE);
+ uasm_i_sw(&p, GPR_K0, offsetof(struct kvm_vcpu_arch, host_cp0_cause), GPR_K1);
if (cpu_has_badinstr) {
- uasm_i_mfc0(&p, K0, C0_BADINSTR);
- uasm_i_sw(&p, K0, offsetof(struct kvm_vcpu_arch,
- host_cp0_badinstr), K1);
+ uasm_i_mfc0(&p, GPR_K0, C0_BADINSTR);
+ uasm_i_sw(&p, GPR_K0, offsetof(struct kvm_vcpu_arch,
+ host_cp0_badinstr), GPR_K1);
}
if (cpu_has_badinstrp) {
- uasm_i_mfc0(&p, K0, C0_BADINSTRP);
- uasm_i_sw(&p, K0, offsetof(struct kvm_vcpu_arch,
- host_cp0_badinstrp), K1);
+ uasm_i_mfc0(&p, GPR_K0, C0_BADINSTRP);
+ uasm_i_sw(&p, GPR_K0, offsetof(struct kvm_vcpu_arch,
+ host_cp0_badinstrp), GPR_K1);
}
/* Now restore the host state just enough to run the handlers */
/* Switch EBASE to the one used by Linux */
/* load up the host EBASE */
- uasm_i_mfc0(&p, V0, C0_STATUS);
+ uasm_i_mfc0(&p, GPR_V0, C0_STATUS);
- uasm_i_lui(&p, AT, ST0_BEV >> 16);
- uasm_i_or(&p, K0, V0, AT);
+ uasm_i_lui(&p, GPR_AT, ST0_BEV >> 16);
+ uasm_i_or(&p, GPR_K0, GPR_V0, GPR_AT);
- uasm_i_mtc0(&p, K0, C0_STATUS);
+ uasm_i_mtc0(&p, GPR_K0, C0_STATUS);
uasm_i_ehb(&p);
- UASM_i_LA_mostly(&p, K0, (long)&ebase);
- UASM_i_LW(&p, K0, uasm_rel_lo((long)&ebase), K0);
- build_set_exc_base(&p, K0);
+ UASM_i_LA_mostly(&p, GPR_K0, (long)&ebase);
+ UASM_i_LW(&p, GPR_K0, uasm_rel_lo((long)&ebase), GPR_K0);
+ build_set_exc_base(&p, GPR_K0);
if (raw_cpu_has_fpu) {
/*
* If FPU is enabled, save FCR31 and clear it so that later
* ctc1's don't trigger FPE for pending exceptions.
*/
- uasm_i_lui(&p, AT, ST0_CU1 >> 16);
- uasm_i_and(&p, V1, V0, AT);
- uasm_il_beqz(&p, &r, V1, label_fpu_1);
+ uasm_i_lui(&p, GPR_AT, ST0_CU1 >> 16);
+ uasm_i_and(&p, GPR_V1, GPR_V0, GPR_AT);
+ uasm_il_beqz(&p, &r, GPR_V1, label_fpu_1);
uasm_i_nop(&p);
- uasm_i_cfc1(&p, T0, 31);
- uasm_i_sw(&p, T0, offsetof(struct kvm_vcpu_arch, fpu.fcr31),
- K1);
- uasm_i_ctc1(&p, ZERO, 31);
+ uasm_i_cfc1(&p, GPR_T0, 31);
+ uasm_i_sw(&p, GPR_T0, offsetof(struct kvm_vcpu_arch, fpu.fcr31),
+ GPR_K1);
+ uasm_i_ctc1(&p, GPR_ZERO, 31);
uasm_l_fpu_1(&l, p);
}
@@ -696,23 +619,22 @@ void *kvm_mips_build_exit(void *addr)
* If MSA is enabled, save MSACSR and clear it so that later
* instructions don't trigger MSAFPE for pending exceptions.
*/
- uasm_i_mfc0(&p, T0, C0_CONFIG5);
- uasm_i_ext(&p, T0, T0, 27, 1); /* MIPS_CONF5_MSAEN */
- uasm_il_beqz(&p, &r, T0, label_msa_1);
+ uasm_i_mfc0(&p, GPR_T0, C0_CONFIG5);
+ uasm_i_ext(&p, GPR_T0, GPR_T0, 27, 1); /* MIPS_CONF5_MSAEN */
+ uasm_il_beqz(&p, &r, GPR_T0, label_msa_1);
uasm_i_nop(&p);
- uasm_i_cfcmsa(&p, T0, MSA_CSR);
- uasm_i_sw(&p, T0, offsetof(struct kvm_vcpu_arch, fpu.msacsr),
- K1);
- uasm_i_ctcmsa(&p, MSA_CSR, ZERO);
+ uasm_i_cfcmsa(&p, GPR_T0, MSA_CSR);
+ uasm_i_sw(&p, GPR_T0, offsetof(struct kvm_vcpu_arch, fpu.msacsr),
+ GPR_K1);
+ uasm_i_ctcmsa(&p, MSA_CSR, GPR_ZERO);
uasm_l_msa_1(&l, p);
}
-#ifdef CONFIG_KVM_MIPS_VZ
/* Restore host ASID */
if (!cpu_has_guestid) {
- UASM_i_LW(&p, K0, offsetof(struct kvm_vcpu_arch, host_entryhi),
- K1);
- UASM_i_MTC0(&p, K0, C0_ENTRYHI);
+ UASM_i_LW(&p, GPR_K0, offsetof(struct kvm_vcpu_arch, host_entryhi),
+ GPR_K1);
+ UASM_i_MTC0(&p, GPR_K0, C0_ENTRYHI);
}
/*
@@ -721,57 +643,56 @@ void *kvm_mips_build_exit(void *addr)
* - call tlbmiss_handler_setup_pgd(mm->pgd)
* - write mm->pgd into CP0_PWBase
*/
- UASM_i_LW(&p, A0,
- offsetof(struct kvm_vcpu_arch, host_pgd), K1);
- UASM_i_LA(&p, T9, (unsigned long)tlbmiss_handler_setup_pgd);
- uasm_i_jalr(&p, RA, T9);
+ UASM_i_LW(&p, GPR_A0,
+ offsetof(struct kvm_vcpu_arch, host_pgd), GPR_K1);
+ UASM_i_LA(&p, GPR_T9, (unsigned long)tlbmiss_handler_setup_pgd);
+ uasm_i_jalr(&p, GPR_RA, GPR_T9);
/* delay slot */
if (cpu_has_htw)
- UASM_i_MTC0(&p, A0, C0_PWBASE);
+ UASM_i_MTC0(&p, GPR_A0, C0_PWBASE);
else
uasm_i_nop(&p);
/* Clear GM bit so we don't enter guest mode when EXL is cleared */
- uasm_i_mfc0(&p, K0, C0_GUESTCTL0);
- uasm_i_ins(&p, K0, ZERO, MIPS_GCTL0_GM_SHIFT, 1);
- uasm_i_mtc0(&p, K0, C0_GUESTCTL0);
+ uasm_i_mfc0(&p, GPR_K0, C0_GUESTCTL0);
+ uasm_i_ins(&p, GPR_K0, GPR_ZERO, MIPS_GCTL0_GM_SHIFT, 1);
+ uasm_i_mtc0(&p, GPR_K0, C0_GUESTCTL0);
/* Save GuestCtl0 so we can access GExcCode after CPU migration */
- uasm_i_sw(&p, K0,
- offsetof(struct kvm_vcpu_arch, host_cp0_guestctl0), K1);
+ uasm_i_sw(&p, GPR_K0,
+ offsetof(struct kvm_vcpu_arch, host_cp0_guestctl0), GPR_K1);
if (cpu_has_guestid) {
/*
* Clear root mode GuestID, so that root TLB operations use the
* root GuestID in the root TLB.
*/
- uasm_i_mfc0(&p, T0, C0_GUESTCTL1);
+ uasm_i_mfc0(&p, GPR_T0, C0_GUESTCTL1);
/* Set GuestCtl1.RID = MIPS_GCTL1_ROOT_GUESTID (i.e. 0) */
- uasm_i_ins(&p, T0, ZERO, MIPS_GCTL1_RID_SHIFT,
+ uasm_i_ins(&p, GPR_T0, GPR_ZERO, MIPS_GCTL1_RID_SHIFT,
MIPS_GCTL1_RID_WIDTH);
- uasm_i_mtc0(&p, T0, C0_GUESTCTL1);
+ uasm_i_mtc0(&p, GPR_T0, C0_GUESTCTL1);
}
-#endif
/* Now that the new EBASE has been loaded, unset BEV and KSU_USER */
- uasm_i_addiu(&p, AT, ZERO, ~(ST0_EXL | KSU_USER | ST0_IE));
- uasm_i_and(&p, V0, V0, AT);
- uasm_i_lui(&p, AT, ST0_CU0 >> 16);
- uasm_i_or(&p, V0, V0, AT);
+ uasm_i_addiu(&p, GPR_AT, GPR_ZERO, ~(ST0_EXL | KSU_USER | ST0_IE));
+ uasm_i_and(&p, GPR_V0, GPR_V0, GPR_AT);
+ uasm_i_lui(&p, GPR_AT, ST0_CU0 >> 16);
+ uasm_i_or(&p, GPR_V0, GPR_V0, GPR_AT);
#ifdef CONFIG_64BIT
- uasm_i_ori(&p, V0, V0, ST0_SX | ST0_UX);
+ uasm_i_ori(&p, GPR_V0, GPR_V0, ST0_SX | ST0_UX);
#endif
- uasm_i_mtc0(&p, V0, C0_STATUS);
+ uasm_i_mtc0(&p, GPR_V0, C0_STATUS);
uasm_i_ehb(&p);
- /* Load up host GP */
- UASM_i_LW(&p, GP, offsetof(struct kvm_vcpu_arch, host_gp), K1);
+ /* Load up host GPR_GP */
+ UASM_i_LW(&p, GPR_GP, offsetof(struct kvm_vcpu_arch, host_gp), GPR_K1);
/* Need a stack before we can jump to "C" */
- UASM_i_LW(&p, SP, offsetof(struct kvm_vcpu_arch, host_stack), K1);
+ UASM_i_LW(&p, GPR_SP, offsetof(struct kvm_vcpu_arch, host_stack), GPR_K1);
/* Saved host state */
- UASM_i_ADDIU(&p, SP, SP, -(int)sizeof(struct pt_regs));
+ UASM_i_ADDIU(&p, GPR_SP, GPR_SP, -(int)sizeof(struct pt_regs));
/*
* XXXKYMA do we need to load the host ASID, maybe not because the
@@ -779,12 +700,12 @@ void *kvm_mips_build_exit(void *addr)
*/
/* Restore host scratch registers, as we'll have clobbered them */
- kvm_mips_build_restore_scratch(&p, K0, SP);
+ kvm_mips_build_restore_scratch(&p, GPR_K0, GPR_SP);
/* Restore RDHWR access */
- UASM_i_LA_mostly(&p, K0, (long)&hwrena);
- uasm_i_lw(&p, K0, uasm_rel_lo((long)&hwrena), K0);
- uasm_i_mtc0(&p, K0, C0_HWRENA);
+ UASM_i_LA_mostly(&p, GPR_K0, (long)&hwrena);
+ uasm_i_lw(&p, GPR_K0, uasm_rel_lo((long)&hwrena), GPR_K0);
+ uasm_i_mtc0(&p, GPR_K0, C0_HWRENA);
/* Jump to handler */
/*
@@ -792,11 +713,10 @@ void *kvm_mips_build_exit(void *addr)
* Now jump to the kvm_mips_handle_exit() to see if we can deal
* with this in the kernel
*/
- uasm_i_move(&p, A0, S0);
- uasm_i_move(&p, A1, S1);
- UASM_i_LA(&p, T9, (unsigned long)kvm_mips_handle_exit);
- uasm_i_jalr(&p, RA, T9);
- UASM_i_ADDIU(&p, SP, SP, -CALLFRAME_SIZ);
+ uasm_i_move(&p, GPR_A0, GPR_S0);
+ UASM_i_LA(&p, GPR_T9, (unsigned long)kvm_mips_handle_exit);
+ uasm_i_jalr(&p, GPR_RA, GPR_T9);
+ UASM_i_ADDIU(&p, GPR_SP, GPR_SP, -CALLFRAME_SIZ);
uasm_resolve_relocs(relocs, labels);
@@ -826,7 +746,7 @@ static void *kvm_mips_build_ret_from_exit(void *addr)
memset(relocs, 0, sizeof(relocs));
/* Return from handler Make sure interrupts are disabled */
- uasm_i_di(&p, ZERO);
+ uasm_i_di(&p, GPR_ZERO);
uasm_i_ehb(&p);
/*
@@ -835,15 +755,15 @@ static void *kvm_mips_build_ret_from_exit(void *addr)
* guest, reload k1
*/
- uasm_i_move(&p, K1, S1);
- UASM_i_ADDIU(&p, K1, K1, offsetof(struct kvm_vcpu, arch));
+ uasm_i_move(&p, GPR_K1, GPR_S0);
+ UASM_i_ADDIU(&p, GPR_K1, GPR_K1, offsetof(struct kvm_vcpu, arch));
/*
* Check return value, should tell us if we are returning to the
* host (handle I/O etc)or resuming the guest
*/
- uasm_i_andi(&p, T0, V0, RESUME_HOST);
- uasm_il_bnez(&p, &r, T0, label_return_to_host);
+ uasm_i_andi(&p, GPR_T0, GPR_V0, RESUME_HOST);
+ uasm_il_bnez(&p, &r, GPR_T0, label_return_to_host);
uasm_i_nop(&p);
p = kvm_mips_build_ret_to_guest(p);
@@ -869,25 +789,25 @@ static void *kvm_mips_build_ret_to_guest(void *addr)
{
u32 *p = addr;
- /* Put the saved pointer to vcpu (s1) back into the scratch register */
- UASM_i_MTC0(&p, S1, scratch_vcpu[0], scratch_vcpu[1]);
+ /* Put the saved pointer to vcpu (s0) back into the scratch register */
+ UASM_i_MTC0(&p, GPR_S0, scratch_vcpu[0], scratch_vcpu[1]);
/* Load up the Guest EBASE to minimize the window where BEV is set */
- UASM_i_LW(&p, T0, offsetof(struct kvm_vcpu_arch, guest_ebase), K1);
+ UASM_i_LW(&p, GPR_T0, offsetof(struct kvm_vcpu_arch, guest_ebase), GPR_K1);
/* Switch EBASE back to the one used by KVM */
- uasm_i_mfc0(&p, V1, C0_STATUS);
- uasm_i_lui(&p, AT, ST0_BEV >> 16);
- uasm_i_or(&p, K0, V1, AT);
- uasm_i_mtc0(&p, K0, C0_STATUS);
+ uasm_i_mfc0(&p, GPR_V1, C0_STATUS);
+ uasm_i_lui(&p, GPR_AT, ST0_BEV >> 16);
+ uasm_i_or(&p, GPR_K0, GPR_V1, GPR_AT);
+ uasm_i_mtc0(&p, GPR_K0, C0_STATUS);
uasm_i_ehb(&p);
- build_set_exc_base(&p, T0);
+ build_set_exc_base(&p, GPR_T0);
/* Setup status register for running guest in UM */
- uasm_i_ori(&p, V1, V1, ST0_EXL | KSU_USER | ST0_IE);
- UASM_i_LA(&p, AT, ~(ST0_CU0 | ST0_MX | ST0_SX | ST0_UX));
- uasm_i_and(&p, V1, V1, AT);
- uasm_i_mtc0(&p, V1, C0_STATUS);
+ uasm_i_ori(&p, GPR_V1, GPR_V1, ST0_EXL | KSU_USER | ST0_IE);
+ UASM_i_LA(&p, GPR_AT, ~(ST0_CU0 | ST0_MX | ST0_SX | ST0_UX));
+ uasm_i_and(&p, GPR_V1, GPR_V1, GPR_AT);
+ uasm_i_mtc0(&p, GPR_V1, C0_STATUS);
uasm_i_ehb(&p);
p = kvm_mips_build_enter_guest(p);
@@ -911,31 +831,31 @@ static void *kvm_mips_build_ret_to_host(void *addr)
unsigned int i;
/* EBASE is already pointing to Linux */
- UASM_i_LW(&p, K1, offsetof(struct kvm_vcpu_arch, host_stack), K1);
- UASM_i_ADDIU(&p, K1, K1, -(int)sizeof(struct pt_regs));
+ UASM_i_LW(&p, GPR_K1, offsetof(struct kvm_vcpu_arch, host_stack), GPR_K1);
+ UASM_i_ADDIU(&p, GPR_K1, GPR_K1, -(int)sizeof(struct pt_regs));
/*
* r2/v0 is the return code, shift it down by 2 (arithmetic)
* to recover the err code
*/
- uasm_i_sra(&p, K0, V0, 2);
- uasm_i_move(&p, V0, K0);
+ uasm_i_sra(&p, GPR_K0, GPR_V0, 2);
+ uasm_i_move(&p, GPR_V0, GPR_K0);
/* Load context saved on the host stack */
for (i = 16; i < 31; ++i) {
if (i == 24)
i = 28;
- UASM_i_LW(&p, i, offsetof(struct pt_regs, regs[i]), K1);
+ UASM_i_LW(&p, i, offsetof(struct pt_regs, regs[i]), GPR_K1);
}
/* Restore RDHWR access */
- UASM_i_LA_mostly(&p, K0, (long)&hwrena);
- uasm_i_lw(&p, K0, uasm_rel_lo((long)&hwrena), K0);
- uasm_i_mtc0(&p, K0, C0_HWRENA);
+ UASM_i_LA_mostly(&p, GPR_K0, (long)&hwrena);
+ uasm_i_lw(&p, GPR_K0, uasm_rel_lo((long)&hwrena), GPR_K0);
+ uasm_i_mtc0(&p, GPR_K0, C0_HWRENA);
- /* Restore RA, which is the address we will return to */
- UASM_i_LW(&p, RA, offsetof(struct pt_regs, regs[RA]), K1);
- uasm_i_jr(&p, RA);
+ /* Restore GPR_RA, which is the address we will return to */
+ UASM_i_LW(&p, GPR_RA, offsetof(struct pt_regs, regs[GPR_RA]), GPR_K1);
+ uasm_i_jr(&p, GPR_RA);
uasm_i_nop(&p);
return p;
diff --git a/arch/mips/kvm/fpu.S b/arch/mips/kvm/fpu.S
index 16f17c6390dd..eb2e8cc3532f 100644
--- a/arch/mips/kvm/fpu.S
+++ b/arch/mips/kvm/fpu.S
@@ -22,7 +22,7 @@
LEAF(__kvm_save_fpu)
.set push
- SET_HARDFLOAT
+ .set hardfloat
.set fp=64
mfc0 t0, CP0_STATUS
sll t0, t0, 5 # is Status.FR set?
@@ -66,7 +66,7 @@ LEAF(__kvm_save_fpu)
LEAF(__kvm_restore_fpu)
.set push
- SET_HARDFLOAT
+ .set hardfloat
.set fp=64
mfc0 t0, CP0_STATUS
sll t0, t0, 5 # is Status.FR set?
@@ -110,7 +110,7 @@ LEAF(__kvm_restore_fpu)
LEAF(__kvm_restore_fcsr)
.set push
- SET_HARDFLOAT
+ .set hardfloat
lw t0, VCPU_FCR31(a0)
/*
* The ctc1 must stay at this offset in __kvm_restore_fcsr.
diff --git a/arch/mips/kvm/interrupt.c b/arch/mips/kvm/interrupt.c
index 7257e8b6f5a9..0277942279ea 100644
--- a/arch/mips/kvm/interrupt.c
+++ b/arch/mips/kvm/interrupt.c
@@ -21,186 +21,6 @@
#include "interrupt.h"
-void kvm_mips_queue_irq(struct kvm_vcpu *vcpu, unsigned int priority)
-{
- set_bit(priority, &vcpu->arch.pending_exceptions);
-}
-
-void kvm_mips_dequeue_irq(struct kvm_vcpu *vcpu, unsigned int priority)
-{
- clear_bit(priority, &vcpu->arch.pending_exceptions);
-}
-
-void kvm_mips_queue_timer_int_cb(struct kvm_vcpu *vcpu)
-{
- /*
- * Cause bits to reflect the pending timer interrupt,
- * the EXC code will be set when we are actually
- * delivering the interrupt:
- */
- kvm_set_c0_guest_cause(vcpu->arch.cop0, (C_IRQ5 | C_TI));
-
- /* Queue up an INT exception for the core */
- kvm_mips_queue_irq(vcpu, MIPS_EXC_INT_TIMER);
-
-}
-
-void kvm_mips_dequeue_timer_int_cb(struct kvm_vcpu *vcpu)
-{
- kvm_clear_c0_guest_cause(vcpu->arch.cop0, (C_IRQ5 | C_TI));
- kvm_mips_dequeue_irq(vcpu, MIPS_EXC_INT_TIMER);
-}
-
-void kvm_mips_queue_io_int_cb(struct kvm_vcpu *vcpu,
- struct kvm_mips_interrupt *irq)
-{
- int intr = (int)irq->irq;
-
- /*
- * Cause bits to reflect the pending IO interrupt,
- * the EXC code will be set when we are actually
- * delivering the interrupt:
- */
- switch (intr) {
- case 2:
- kvm_set_c0_guest_cause(vcpu->arch.cop0, (C_IRQ0));
- /* Queue up an INT exception for the core */
- kvm_mips_queue_irq(vcpu, MIPS_EXC_INT_IO);
- break;
-
- case 3:
- kvm_set_c0_guest_cause(vcpu->arch.cop0, (C_IRQ1));
- kvm_mips_queue_irq(vcpu, MIPS_EXC_INT_IPI_1);
- break;
-
- case 4:
- kvm_set_c0_guest_cause(vcpu->arch.cop0, (C_IRQ2));
- kvm_mips_queue_irq(vcpu, MIPS_EXC_INT_IPI_2);
- break;
-
- default:
- break;
- }
-
-}
-
-void kvm_mips_dequeue_io_int_cb(struct kvm_vcpu *vcpu,
- struct kvm_mips_interrupt *irq)
-{
- int intr = (int)irq->irq;
-
- switch (intr) {
- case -2:
- kvm_clear_c0_guest_cause(vcpu->arch.cop0, (C_IRQ0));
- kvm_mips_dequeue_irq(vcpu, MIPS_EXC_INT_IO);
- break;
-
- case -3:
- kvm_clear_c0_guest_cause(vcpu->arch.cop0, (C_IRQ1));
- kvm_mips_dequeue_irq(vcpu, MIPS_EXC_INT_IPI_1);
- break;
-
- case -4:
- kvm_clear_c0_guest_cause(vcpu->arch.cop0, (C_IRQ2));
- kvm_mips_dequeue_irq(vcpu, MIPS_EXC_INT_IPI_2);
- break;
-
- default:
- break;
- }
-
-}
-
-/* Deliver the interrupt of the corresponding priority, if possible. */
-int kvm_mips_irq_deliver_cb(struct kvm_vcpu *vcpu, unsigned int priority,
- u32 cause)
-{
- int allowed = 0;
- u32 exccode;
-
- struct kvm_vcpu_arch *arch = &vcpu->arch;
- struct mips_coproc *cop0 = vcpu->arch.cop0;
-
- switch (priority) {
- case MIPS_EXC_INT_TIMER:
- if ((kvm_read_c0_guest_status(cop0) & ST0_IE)
- && (!(kvm_read_c0_guest_status(cop0) & (ST0_EXL | ST0_ERL)))
- && (kvm_read_c0_guest_status(cop0) & IE_IRQ5)) {
- allowed = 1;
- exccode = EXCCODE_INT;
- }
- break;
-
- case MIPS_EXC_INT_IO:
- if ((kvm_read_c0_guest_status(cop0) & ST0_IE)
- && (!(kvm_read_c0_guest_status(cop0) & (ST0_EXL | ST0_ERL)))
- && (kvm_read_c0_guest_status(cop0) & IE_IRQ0)) {
- allowed = 1;
- exccode = EXCCODE_INT;
- }
- break;
-
- case MIPS_EXC_INT_IPI_1:
- if ((kvm_read_c0_guest_status(cop0) & ST0_IE)
- && (!(kvm_read_c0_guest_status(cop0) & (ST0_EXL | ST0_ERL)))
- && (kvm_read_c0_guest_status(cop0) & IE_IRQ1)) {
- allowed = 1;
- exccode = EXCCODE_INT;
- }
- break;
-
- case MIPS_EXC_INT_IPI_2:
- if ((kvm_read_c0_guest_status(cop0) & ST0_IE)
- && (!(kvm_read_c0_guest_status(cop0) & (ST0_EXL | ST0_ERL)))
- && (kvm_read_c0_guest_status(cop0) & IE_IRQ2)) {
- allowed = 1;
- exccode = EXCCODE_INT;
- }
- break;
-
- default:
- break;
- }
-
- /* Are we allowed to deliver the interrupt ??? */
- if (allowed) {
- if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
- /* save old pc */
- kvm_write_c0_guest_epc(cop0, arch->pc);
- kvm_set_c0_guest_status(cop0, ST0_EXL);
-
- if (cause & CAUSEF_BD)
- kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
- else
- kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
-
- kvm_debug("Delivering INT @ pc %#lx\n", arch->pc);
-
- } else
- kvm_err("Trying to deliver interrupt when EXL is already set\n");
-
- kvm_change_c0_guest_cause(cop0, CAUSEF_EXCCODE,
- (exccode << CAUSEB_EXCCODE));
-
- /* XXXSL Set PC to the interrupt exception entry point */
- arch->pc = kvm_mips_guest_exception_base(vcpu);
- if (kvm_read_c0_guest_cause(cop0) & CAUSEF_IV)
- arch->pc += 0x200;
- else
- arch->pc += 0x180;
-
- clear_bit(priority, &vcpu->arch.pending_exceptions);
- }
-
- return allowed;
-}
-
-int kvm_mips_irq_clear_cb(struct kvm_vcpu *vcpu, unsigned int priority,
- u32 cause)
-{
- return 1;
-}
-
void kvm_mips_deliver_interrupts(struct kvm_vcpu *vcpu, u32 cause)
{
unsigned long *pending = &vcpu->arch.pending_exceptions;
@@ -212,10 +32,7 @@ void kvm_mips_deliver_interrupts(struct kvm_vcpu *vcpu, u32 cause)
priority = __ffs(*pending_clr);
while (priority <= MIPS_EXC_MAX) {
- if (kvm_mips_callbacks->irq_clear(vcpu, priority, cause)) {
- if (!KVM_MIPS_IRQ_CLEAR_ALL_AT_ONCE)
- break;
- }
+ kvm_mips_callbacks->irq_clear(vcpu, priority, cause);
priority = find_next_bit(pending_clr,
BITS_PER_BYTE * sizeof(*pending_clr),
@@ -224,10 +41,7 @@ void kvm_mips_deliver_interrupts(struct kvm_vcpu *vcpu, u32 cause)
priority = __ffs(*pending);
while (priority <= MIPS_EXC_MAX) {
- if (kvm_mips_callbacks->irq_deliver(vcpu, priority, cause)) {
- if (!KVM_MIPS_IRQ_DELIVER_ALL_AT_ONCE)
- break;
- }
+ kvm_mips_callbacks->irq_deliver(vcpu, priority, cause);
priority = find_next_bit(pending,
BITS_PER_BYTE * sizeof(*pending),
diff --git a/arch/mips/kvm/interrupt.h b/arch/mips/kvm/interrupt.h
index 3bf0a49725e8..e529ea2bb34b 100644
--- a/arch/mips/kvm/interrupt.h
+++ b/arch/mips/kvm/interrupt.h
@@ -21,35 +21,19 @@
#define MIPS_EXC_NMI 5
#define MIPS_EXC_MCHK 6
#define MIPS_EXC_INT_TIMER 7
-#define MIPS_EXC_INT_IO 8
-#define MIPS_EXC_EXECUTE 9
-#define MIPS_EXC_INT_IPI_1 10
-#define MIPS_EXC_INT_IPI_2 11
-#define MIPS_EXC_MAX 12
+#define MIPS_EXC_INT_IO_1 8
+#define MIPS_EXC_INT_IO_2 9
+#define MIPS_EXC_EXECUTE 10
+#define MIPS_EXC_INT_IPI_1 11
+#define MIPS_EXC_INT_IPI_2 12
+#define MIPS_EXC_MAX 13
/* XXXSL More to follow */
#define C_TI (_ULCAST_(1) << 30)
-#ifdef CONFIG_KVM_MIPS_VZ
-#define KVM_MIPS_IRQ_DELIVER_ALL_AT_ONCE (1)
-#define KVM_MIPS_IRQ_CLEAR_ALL_AT_ONCE (1)
-#else
-#define KVM_MIPS_IRQ_DELIVER_ALL_AT_ONCE (0)
-#define KVM_MIPS_IRQ_CLEAR_ALL_AT_ONCE (0)
-#endif
+extern u32 *kvm_priority_to_irq;
+u32 kvm_irq_to_priority(u32 irq);
-void kvm_mips_queue_irq(struct kvm_vcpu *vcpu, unsigned int priority);
-void kvm_mips_dequeue_irq(struct kvm_vcpu *vcpu, unsigned int priority);
int kvm_mips_pending_timer(struct kvm_vcpu *vcpu);
-void kvm_mips_queue_timer_int_cb(struct kvm_vcpu *vcpu);
-void kvm_mips_dequeue_timer_int_cb(struct kvm_vcpu *vcpu);
-void kvm_mips_queue_io_int_cb(struct kvm_vcpu *vcpu,
- struct kvm_mips_interrupt *irq);
-void kvm_mips_dequeue_io_int_cb(struct kvm_vcpu *vcpu,
- struct kvm_mips_interrupt *irq);
-int kvm_mips_irq_deliver_cb(struct kvm_vcpu *vcpu, unsigned int priority,
- u32 cause);
-int kvm_mips_irq_clear_cb(struct kvm_vcpu *vcpu, unsigned int priority,
- u32 cause);
void kvm_mips_deliver_interrupts(struct kvm_vcpu *vcpu, u32 cause);
diff --git a/arch/mips/kvm/loongson_ipi.c b/arch/mips/kvm/loongson_ipi.c
new file mode 100644
index 000000000000..5d53f32d837c
--- /dev/null
+++ b/arch/mips/kvm/loongson_ipi.c
@@ -0,0 +1,214 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Loongson-3 Virtual IPI interrupt support.
+ *
+ * Copyright (C) 2019 Loongson Technologies, Inc. All rights reserved.
+ *
+ * Authors: Chen Zhu <zhuchen@loongson.cn>
+ * Authors: Huacai Chen <chenhc@lemote.com>
+ */
+
+#include <linux/kvm_host.h>
+
+#define IPI_BASE 0x3ff01000ULL
+
+#define CORE0_STATUS_OFF 0x000
+#define CORE0_EN_OFF 0x004
+#define CORE0_SET_OFF 0x008
+#define CORE0_CLEAR_OFF 0x00c
+#define CORE0_BUF_20 0x020
+#define CORE0_BUF_28 0x028
+#define CORE0_BUF_30 0x030
+#define CORE0_BUF_38 0x038
+
+#define CORE1_STATUS_OFF 0x100
+#define CORE1_EN_OFF 0x104
+#define CORE1_SET_OFF 0x108
+#define CORE1_CLEAR_OFF 0x10c
+#define CORE1_BUF_20 0x120
+#define CORE1_BUF_28 0x128
+#define CORE1_BUF_30 0x130
+#define CORE1_BUF_38 0x138
+
+#define CORE2_STATUS_OFF 0x200
+#define CORE2_EN_OFF 0x204
+#define CORE2_SET_OFF 0x208
+#define CORE2_CLEAR_OFF 0x20c
+#define CORE2_BUF_20 0x220
+#define CORE2_BUF_28 0x228
+#define CORE2_BUF_30 0x230
+#define CORE2_BUF_38 0x238
+
+#define CORE3_STATUS_OFF 0x300
+#define CORE3_EN_OFF 0x304
+#define CORE3_SET_OFF 0x308
+#define CORE3_CLEAR_OFF 0x30c
+#define CORE3_BUF_20 0x320
+#define CORE3_BUF_28 0x328
+#define CORE3_BUF_30 0x330
+#define CORE3_BUF_38 0x338
+
+static int loongson_vipi_read(struct loongson_kvm_ipi *ipi,
+ gpa_t addr, int len, void *val)
+{
+ uint32_t core = (addr >> 8) & 3;
+ uint32_t node = (addr >> 44) & 3;
+ uint32_t id = core + node * 4;
+ uint64_t offset = addr & 0xff;
+ void *pbuf;
+ struct ipi_state *s = &(ipi->ipistate[id]);
+
+ BUG_ON(offset & (len - 1));
+
+ switch (offset) {
+ case CORE0_STATUS_OFF:
+ *(uint64_t *)val = s->status;
+ break;
+
+ case CORE0_EN_OFF:
+ *(uint64_t *)val = s->en;
+ break;
+
+ case CORE0_SET_OFF:
+ *(uint64_t *)val = 0;
+ break;
+
+ case CORE0_CLEAR_OFF:
+ *(uint64_t *)val = 0;
+ break;
+
+ case CORE0_BUF_20 ... CORE0_BUF_38:
+ pbuf = (void *)s->buf + (offset - 0x20);
+ if (len == 8)
+ *(uint64_t *)val = *(uint64_t *)pbuf;
+ else /* Assume len == 4 */
+ *(uint32_t *)val = *(uint32_t *)pbuf;
+ break;
+
+ default:
+ pr_notice("%s with unknown addr %llx\n", __func__, addr);
+ break;
+ }
+
+ return 0;
+}
+
+static int loongson_vipi_write(struct loongson_kvm_ipi *ipi,
+ gpa_t addr, int len, const void *val)
+{
+ uint32_t core = (addr >> 8) & 3;
+ uint32_t node = (addr >> 44) & 3;
+ uint32_t id = core + node * 4;
+ uint64_t data, offset = addr & 0xff;
+ void *pbuf;
+ struct kvm *kvm = ipi->kvm;
+ struct kvm_mips_interrupt irq;
+ struct ipi_state *s = &(ipi->ipistate[id]);
+
+ data = *(uint64_t *)val;
+ BUG_ON(offset & (len - 1));
+
+ switch (offset) {
+ case CORE0_STATUS_OFF:
+ break;
+
+ case CORE0_EN_OFF:
+ s->en = data;
+ break;
+
+ case CORE0_SET_OFF:
+ s->status |= data;
+ irq.cpu = id;
+ irq.irq = 6;
+ kvm_vcpu_ioctl_interrupt(kvm_get_vcpu(kvm, id), &irq);
+ break;
+
+ case CORE0_CLEAR_OFF:
+ s->status &= ~data;
+ if (!s->status) {
+ irq.cpu = id;
+ irq.irq = -6;
+ kvm_vcpu_ioctl_interrupt(kvm_get_vcpu(kvm, id), &irq);
+ }
+ break;
+
+ case CORE0_BUF_20 ... CORE0_BUF_38:
+ pbuf = (void *)s->buf + (offset - 0x20);
+ if (len == 8)
+ *(uint64_t *)pbuf = (uint64_t)data;
+ else /* Assume len == 4 */
+ *(uint32_t *)pbuf = (uint32_t)data;
+ break;
+
+ default:
+ pr_notice("%s with unknown addr %llx\n", __func__, addr);
+ break;
+ }
+
+ return 0;
+}
+
+static int kvm_ipi_read(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
+ gpa_t addr, int len, void *val)
+{
+ unsigned long flags;
+ struct loongson_kvm_ipi *ipi;
+ struct ipi_io_device *ipi_device;
+
+ ipi_device = container_of(dev, struct ipi_io_device, device);
+ ipi = ipi_device->ipi;
+
+ spin_lock_irqsave(&ipi->lock, flags);
+ loongson_vipi_read(ipi, addr, len, val);
+ spin_unlock_irqrestore(&ipi->lock, flags);
+
+ return 0;
+}
+
+static int kvm_ipi_write(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
+ gpa_t addr, int len, const void *val)
+{
+ unsigned long flags;
+ struct loongson_kvm_ipi *ipi;
+ struct ipi_io_device *ipi_device;
+
+ ipi_device = container_of(dev, struct ipi_io_device, device);
+ ipi = ipi_device->ipi;
+
+ spin_lock_irqsave(&ipi->lock, flags);
+ loongson_vipi_write(ipi, addr, len, val);
+ spin_unlock_irqrestore(&ipi->lock, flags);
+
+ return 0;
+}
+
+static const struct kvm_io_device_ops kvm_ipi_ops = {
+ .read = kvm_ipi_read,
+ .write = kvm_ipi_write,
+};
+
+void kvm_init_loongson_ipi(struct kvm *kvm)
+{
+ int i;
+ unsigned long addr;
+ struct loongson_kvm_ipi *s;
+ struct kvm_io_device *device;
+
+ s = &kvm->arch.ipi;
+ s->kvm = kvm;
+ spin_lock_init(&s->lock);
+
+ /*
+ * Initialize IPI device
+ */
+ for (i = 0; i < 4; i++) {
+ device = &s->dev_ipi[i].device;
+ kvm_iodevice_init(device, &kvm_ipi_ops);
+ addr = (((unsigned long)i) << 44) + IPI_BASE;
+ mutex_lock(&kvm->slots_lock);
+ kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, addr, 0x400, device);
+ mutex_unlock(&kvm->slots_lock);
+ s->dev_ipi[i].ipi = s;
+ s->dev_ipi[i].node_id = i;
+ }
+}
diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c
index 1109924560d8..231ac052b506 100644
--- a/arch/mips/kvm/mips.c
+++ b/arch/mips/kvm/mips.c
@@ -19,18 +19,17 @@
#include <linux/sched/signal.h>
#include <linux/fs.h>
#include <linux/memblock.h>
+#include <linux/pgtable.h>
#include <asm/fpu.h>
#include <asm/page.h>
#include <asm/cacheflush.h>
#include <asm/mmu_context.h>
#include <asm/pgalloc.h>
-#include <asm/pgtable.h>
#include <linux/kvm_host.h>
#include "interrupt.h"
-#include "commpage.h"
#define CREATE_TRACE_POINTS
#include "trace.h"
@@ -39,54 +38,72 @@
#define VECTORSPACING 0x100 /* for EI/VI mode */
#endif
-#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x)
-struct kvm_stats_debugfs_item debugfs_entries[] = {
- { "wait", VCPU_STAT(wait_exits), KVM_STAT_VCPU },
- { "cache", VCPU_STAT(cache_exits), KVM_STAT_VCPU },
- { "signal", VCPU_STAT(signal_exits), KVM_STAT_VCPU },
- { "interrupt", VCPU_STAT(int_exits), KVM_STAT_VCPU },
- { "cop_unusable", VCPU_STAT(cop_unusable_exits), KVM_STAT_VCPU },
- { "tlbmod", VCPU_STAT(tlbmod_exits), KVM_STAT_VCPU },
- { "tlbmiss_ld", VCPU_STAT(tlbmiss_ld_exits), KVM_STAT_VCPU },
- { "tlbmiss_st", VCPU_STAT(tlbmiss_st_exits), KVM_STAT_VCPU },
- { "addrerr_st", VCPU_STAT(addrerr_st_exits), KVM_STAT_VCPU },
- { "addrerr_ld", VCPU_STAT(addrerr_ld_exits), KVM_STAT_VCPU },
- { "syscall", VCPU_STAT(syscall_exits), KVM_STAT_VCPU },
- { "resvd_inst", VCPU_STAT(resvd_inst_exits), KVM_STAT_VCPU },
- { "break_inst", VCPU_STAT(break_inst_exits), KVM_STAT_VCPU },
- { "trap_inst", VCPU_STAT(trap_inst_exits), KVM_STAT_VCPU },
- { "msa_fpe", VCPU_STAT(msa_fpe_exits), KVM_STAT_VCPU },
- { "fpe", VCPU_STAT(fpe_exits), KVM_STAT_VCPU },
- { "msa_disabled", VCPU_STAT(msa_disabled_exits), KVM_STAT_VCPU },
- { "flush_dcache", VCPU_STAT(flush_dcache_exits), KVM_STAT_VCPU },
-#ifdef CONFIG_KVM_MIPS_VZ
- { "vz_gpsi", VCPU_STAT(vz_gpsi_exits), KVM_STAT_VCPU },
- { "vz_gsfc", VCPU_STAT(vz_gsfc_exits), KVM_STAT_VCPU },
- { "vz_hc", VCPU_STAT(vz_hc_exits), KVM_STAT_VCPU },
- { "vz_grr", VCPU_STAT(vz_grr_exits), KVM_STAT_VCPU },
- { "vz_gva", VCPU_STAT(vz_gva_exits), KVM_STAT_VCPU },
- { "vz_ghfc", VCPU_STAT(vz_ghfc_exits), KVM_STAT_VCPU },
- { "vz_gpa", VCPU_STAT(vz_gpa_exits), KVM_STAT_VCPU },
- { "vz_resvd", VCPU_STAT(vz_resvd_exits), KVM_STAT_VCPU },
+const struct _kvm_stats_desc kvm_vm_stats_desc[] = {
+ KVM_GENERIC_VM_STATS()
+};
+
+const struct kvm_stats_header kvm_vm_stats_header = {
+ .name_size = KVM_STATS_NAME_SIZE,
+ .num_desc = ARRAY_SIZE(kvm_vm_stats_desc),
+ .id_offset = sizeof(struct kvm_stats_header),
+ .desc_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE,
+ .data_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE +
+ sizeof(kvm_vm_stats_desc),
+};
+
+const struct _kvm_stats_desc kvm_vcpu_stats_desc[] = {
+ KVM_GENERIC_VCPU_STATS(),
+ STATS_DESC_COUNTER(VCPU, wait_exits),
+ STATS_DESC_COUNTER(VCPU, cache_exits),
+ STATS_DESC_COUNTER(VCPU, signal_exits),
+ STATS_DESC_COUNTER(VCPU, int_exits),
+ STATS_DESC_COUNTER(VCPU, cop_unusable_exits),
+ STATS_DESC_COUNTER(VCPU, tlbmod_exits),
+ STATS_DESC_COUNTER(VCPU, tlbmiss_ld_exits),
+ STATS_DESC_COUNTER(VCPU, tlbmiss_st_exits),
+ STATS_DESC_COUNTER(VCPU, addrerr_st_exits),
+ STATS_DESC_COUNTER(VCPU, addrerr_ld_exits),
+ STATS_DESC_COUNTER(VCPU, syscall_exits),
+ STATS_DESC_COUNTER(VCPU, resvd_inst_exits),
+ STATS_DESC_COUNTER(VCPU, break_inst_exits),
+ STATS_DESC_COUNTER(VCPU, trap_inst_exits),
+ STATS_DESC_COUNTER(VCPU, msa_fpe_exits),
+ STATS_DESC_COUNTER(VCPU, fpe_exits),
+ STATS_DESC_COUNTER(VCPU, msa_disabled_exits),
+ STATS_DESC_COUNTER(VCPU, flush_dcache_exits),
+ STATS_DESC_COUNTER(VCPU, vz_gpsi_exits),
+ STATS_DESC_COUNTER(VCPU, vz_gsfc_exits),
+ STATS_DESC_COUNTER(VCPU, vz_hc_exits),
+ STATS_DESC_COUNTER(VCPU, vz_grr_exits),
+ STATS_DESC_COUNTER(VCPU, vz_gva_exits),
+ STATS_DESC_COUNTER(VCPU, vz_ghfc_exits),
+ STATS_DESC_COUNTER(VCPU, vz_gpa_exits),
+ STATS_DESC_COUNTER(VCPU, vz_resvd_exits),
+#ifdef CONFIG_CPU_LOONGSON64
+ STATS_DESC_COUNTER(VCPU, vz_cpucfg_exits),
#endif
- { "halt_successful_poll", VCPU_STAT(halt_successful_poll), KVM_STAT_VCPU },
- { "halt_attempted_poll", VCPU_STAT(halt_attempted_poll), KVM_STAT_VCPU },
- { "halt_poll_invalid", VCPU_STAT(halt_poll_invalid), KVM_STAT_VCPU },
- { "halt_wakeup", VCPU_STAT(halt_wakeup), KVM_STAT_VCPU },
- {NULL}
+};
+
+const struct kvm_stats_header kvm_vcpu_stats_header = {
+ .name_size = KVM_STATS_NAME_SIZE,
+ .num_desc = ARRAY_SIZE(kvm_vcpu_stats_desc),
+ .id_offset = sizeof(struct kvm_stats_header),
+ .desc_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE,
+ .data_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE +
+ sizeof(kvm_vcpu_stats_desc),
};
bool kvm_trace_guest_mode_change;
int kvm_guest_mode_change_trace_reg(void)
{
- kvm_trace_guest_mode_change = 1;
+ kvm_trace_guest_mode_change = true;
return 0;
}
void kvm_guest_mode_change_trace_unreg(void)
{
- kvm_trace_guest_mode_change = 0;
+ kvm_trace_guest_mode_change = false;
}
/*
@@ -118,55 +135,30 @@ void kvm_arch_hardware_disable(void)
kvm_mips_callbacks->hardware_disable();
}
-int kvm_arch_hardware_setup(void)
-{
- return 0;
-}
-
-int kvm_arch_check_processor_compat(void)
-{
- return 0;
-}
+extern void kvm_init_loongson_ipi(struct kvm *kvm);
int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
{
switch (type) {
-#ifdef CONFIG_KVM_MIPS_VZ
+ case KVM_VM_MIPS_AUTO:
+ break;
case KVM_VM_MIPS_VZ:
-#else
- case KVM_VM_MIPS_TE:
-#endif
break;
default:
/* Unsupported KVM type */
return -EINVAL;
- };
+ }
/* Allocate page table to map GPA -> RPA */
kvm->arch.gpa_mm.pgd = kvm_pgd_alloc();
if (!kvm->arch.gpa_mm.pgd)
return -ENOMEM;
- return 0;
-}
-
-void kvm_mips_free_vcpus(struct kvm *kvm)
-{
- unsigned int i;
- struct kvm_vcpu *vcpu;
-
- kvm_for_each_vcpu(i, vcpu, kvm) {
- kvm_arch_vcpu_free(vcpu);
- }
-
- mutex_lock(&kvm->lock);
-
- for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
- kvm->vcpus[i] = NULL;
-
- atomic_set(&kvm->online_vcpus, 0);
+#ifdef CONFIG_CPU_LOONGSON64
+ kvm_init_loongson_ipi(kvm);
+#endif
- mutex_unlock(&kvm->lock);
+ return 0;
}
static void kvm_mips_free_gpa_pt(struct kvm *kvm)
@@ -178,7 +170,7 @@ static void kvm_mips_free_gpa_pt(struct kvm *kvm)
void kvm_arch_destroy_vm(struct kvm *kvm)
{
- kvm_mips_free_vcpus(kvm);
+ kvm_destroy_vcpus(kvm);
kvm_mips_free_gpa_pt(kvm);
}
@@ -188,19 +180,11 @@ long kvm_arch_dev_ioctl(struct file *filp, unsigned int ioctl,
return -ENOIOCTLCMD;
}
-int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
- unsigned long npages)
-{
- return 0;
-}
-
void kvm_arch_flush_shadow_all(struct kvm *kvm)
{
/* Flush whole GPA */
kvm_mips_flush_gpa_pt(kvm, 0, ~0);
-
- /* Let implementation do the rest */
- kvm_mips_callbacks->flush_shadow_all(kvm);
+ kvm_flush_remote_tlbs(kvm);
}
void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
@@ -215,31 +199,25 @@ void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
/* Flush slot from GPA */
kvm_mips_flush_gpa_pt(kvm, slot->base_gfn,
slot->base_gfn + slot->npages - 1);
- /* Let implementation do the rest */
- kvm_mips_callbacks->flush_shadow_memslot(kvm, slot);
+ kvm_flush_remote_tlbs_memslot(kvm, slot);
spin_unlock(&kvm->mmu_lock);
}
int kvm_arch_prepare_memory_region(struct kvm *kvm,
- struct kvm_memory_slot *memslot,
- const struct kvm_userspace_memory_region *mem,
+ const struct kvm_memory_slot *old,
+ struct kvm_memory_slot *new,
enum kvm_mr_change change)
{
return 0;
}
void kvm_arch_commit_memory_region(struct kvm *kvm,
- const struct kvm_userspace_memory_region *mem,
- const struct kvm_memory_slot *old,
+ struct kvm_memory_slot *old,
const struct kvm_memory_slot *new,
enum kvm_mr_change change)
{
int needs_flush;
- kvm_debug("%s: kvm: %p slot: %d, GPA: %llx, size: %llx, QVA: %llx\n",
- __func__, kvm, mem->slot, mem->guest_phys_addr,
- mem->memory_size, mem->userspace_addr);
-
/*
* If dirty page logging is enabled, write protect all pages in the slot
* ready for dirty logging.
@@ -256,9 +234,8 @@ void kvm_arch_commit_memory_region(struct kvm *kvm,
/* Write protect GPA page table entries */
needs_flush = kvm_mips_mkclean_gpa_pt(kvm, new->base_gfn,
new->base_gfn + new->npages - 1);
- /* Let implementation do the rest */
if (needs_flush)
- kvm_mips_callbacks->flush_shadow_memslot(kvm, new);
+ kvm_flush_remote_tlbs_memslot(kvm, new);
spin_unlock(&kvm->mmu_lock);
}
}
@@ -280,25 +257,42 @@ static inline void dump_handler(const char *symbol, void *start, void *end)
pr_debug("\tEND(%s)\n", symbol);
}
-struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
+/* low level hrtimer wake routine */
+static enum hrtimer_restart kvm_mips_comparecount_wakeup(struct hrtimer *timer)
+{
+ struct kvm_vcpu *vcpu;
+
+ vcpu = container_of(timer, struct kvm_vcpu, arch.comparecount_timer);
+
+ kvm_mips_callbacks->queue_timer_int(vcpu);
+
+ vcpu->arch.wait = 0;
+ rcuwait_wake_up(&vcpu->wait);
+
+ return kvm_mips_count_timeout(vcpu);
+}
+
+int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id)
+{
+ return 0;
+}
+
+int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
{
int err, size;
void *gebase, *p, *handler, *refill_start, *refill_end;
int i;
- struct kvm_vcpu *vcpu = kzalloc(sizeof(struct kvm_vcpu), GFP_KERNEL);
-
- if (!vcpu) {
- err = -ENOMEM;
- goto out;
- }
-
- err = kvm_vcpu_init(vcpu, kvm, id);
+ kvm_debug("kvm @ %p: create cpu %d at %p\n",
+ vcpu->kvm, vcpu->vcpu_id, vcpu);
+ err = kvm_mips_callbacks->vcpu_init(vcpu);
if (err)
- goto out_free_cpu;
+ return err;
- kvm_debug("kvm @ %p: create cpu %d at %p\n", kvm, id, vcpu);
+ hrtimer_init(&vcpu->arch.comparecount_timer, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
+ vcpu->arch.comparecount_timer.function = kvm_mips_comparecount_wakeup;
/*
* Allocate space for host mode exception handlers that handle
@@ -313,7 +307,7 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
if (!gebase) {
err = -ENOMEM;
- goto out_uninit_cpu;
+ goto out_uninit_vcpu;
}
kvm_debug("Allocated %d bytes for KVM Exception Handlers @ %p\n",
ALIGN(size, PAGE_SIZE), gebase);
@@ -338,7 +332,7 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
/* TLB refill (or XTLB refill on 64-bit VZ where KX=1) */
refill_start = gebase;
- if (IS_ENABLED(CONFIG_KVM_MIPS_VZ) && IS_ENABLED(CONFIG_64BIT))
+ if (IS_ENABLED(CONFIG_64BIT))
refill_start += 0x080;
refill_end = kvm_mips_build_tlb_refill_exception(refill_start, handler);
@@ -374,56 +368,34 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
flush_icache_range((unsigned long)gebase,
(unsigned long)gebase + ALIGN(size, PAGE_SIZE));
- /*
- * Allocate comm page for guest kernel, a TLB will be reserved for
- * mapping GVA @ 0xFFFF8000 to this page
- */
- vcpu->arch.kseg0_commpage = kzalloc(PAGE_SIZE << 1, GFP_KERNEL);
-
- if (!vcpu->arch.kseg0_commpage) {
- err = -ENOMEM;
- goto out_free_gebase;
- }
-
- kvm_debug("Allocated COMM page @ %p\n", vcpu->arch.kseg0_commpage);
- kvm_mips_commpage_init(vcpu);
-
/* Init */
vcpu->arch.last_sched_cpu = -1;
vcpu->arch.last_exec_cpu = -1;
- return vcpu;
+ /* Initial guest state */
+ err = kvm_mips_callbacks->vcpu_setup(vcpu);
+ if (err)
+ goto out_free_gebase;
+
+ return 0;
out_free_gebase:
kfree(gebase);
-
-out_uninit_cpu:
- kvm_vcpu_uninit(vcpu);
-
-out_free_cpu:
- kfree(vcpu);
-
-out:
- return ERR_PTR(err);
+out_uninit_vcpu:
+ kvm_mips_callbacks->vcpu_uninit(vcpu);
+ return err;
}
-void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
+void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
{
hrtimer_cancel(&vcpu->arch.comparecount_timer);
- kvm_vcpu_uninit(vcpu);
-
kvm_mips_dump_stats(vcpu);
kvm_mmu_free_memory_caches(vcpu);
kfree(vcpu->arch.guest_ebase);
- kfree(vcpu->arch.kseg0_commpage);
- kfree(vcpu);
-}
-void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
-{
- kvm_arch_vcpu_free(vcpu);
+ kvm_mips_callbacks->vcpu_uninit(vcpu);
}
int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
@@ -432,7 +404,25 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
return -ENOIOCTLCMD;
}
-int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
+/*
+ * Actually run the vCPU, entering an RCU extended quiescent state (EQS) while
+ * the vCPU is running.
+ *
+ * This must be noinstr as instrumentation may make use of RCU, and this is not
+ * safe during the EQS.
+ */
+static int noinstr kvm_mips_vcpu_enter_exit(struct kvm_vcpu *vcpu)
+{
+ int ret;
+
+ guest_state_enter_irqoff();
+ ret = kvm_mips_callbacks->vcpu_run(vcpu);
+ guest_state_exit_irqoff();
+
+ return ret;
+}
+
+int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
{
int r = -EINTR;
@@ -442,17 +432,17 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
if (vcpu->mmio_needed) {
if (!vcpu->mmio_is_write)
- kvm_mips_complete_mmio_load(vcpu, run);
+ kvm_mips_complete_mmio_load(vcpu);
vcpu->mmio_needed = 0;
}
- if (run->immediate_exit)
+ if (vcpu->run->immediate_exit)
goto out;
lose_fpu(1);
local_irq_disable();
- guest_enter_irqoff();
+ guest_timing_enter_irqoff();
trace_kvm_enter(vcpu);
/*
@@ -463,10 +453,23 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
*/
smp_store_mb(vcpu->mode, IN_GUEST_MODE);
- r = kvm_mips_callbacks->vcpu_run(run, vcpu);
+ r = kvm_mips_vcpu_enter_exit(vcpu);
+
+ /*
+ * We must ensure that any pending interrupts are taken before
+ * we exit guest timing so that timer ticks are accounted as
+ * guest time. Transiently unmask interrupts so that any
+ * pending interrupts are taken.
+ *
+ * TODO: is there a barrier which ensures that pending interrupts are
+ * recognised? Currently this just hopes that the CPU takes any pending
+ * interrupts between the enable and disable.
+ */
+ local_irq_enable();
+ local_irq_disable();
trace_kvm_out(vcpu);
- guest_exit_irqoff();
+ guest_timing_exit_irqoff();
local_irq_enable();
out:
@@ -482,19 +485,22 @@ int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
int intr = (int)irq->irq;
struct kvm_vcpu *dvcpu = NULL;
- if (intr == 3 || intr == -3 || intr == 4 || intr == -4)
+ if (intr == kvm_priority_to_irq[MIPS_EXC_INT_IPI_1] ||
+ intr == kvm_priority_to_irq[MIPS_EXC_INT_IPI_2] ||
+ intr == (-kvm_priority_to_irq[MIPS_EXC_INT_IPI_1]) ||
+ intr == (-kvm_priority_to_irq[MIPS_EXC_INT_IPI_2]))
kvm_debug("%s: CPU: %d, INTR: %d\n", __func__, irq->cpu,
(int)intr);
if (irq->cpu == -1)
dvcpu = vcpu;
else
- dvcpu = vcpu->kvm->vcpus[irq->cpu];
+ dvcpu = kvm_get_vcpu(vcpu->kvm, irq->cpu);
- if (intr == 2 || intr == 3 || intr == 4) {
+ if (intr == 2 || intr == 3 || intr == 4 || intr == 6) {
kvm_mips_callbacks->queue_io_int(dvcpu, irq);
- } else if (intr == -2 || intr == -3 || intr == -4) {
+ } else if (intr == -2 || intr == -3 || intr == -4 || intr == -6) {
kvm_mips_callbacks->dequeue_io_int(dvcpu, irq);
} else {
kvm_err("%s: invalid interrupt ioctl (%d:%d)\n", __func__,
@@ -504,8 +510,7 @@ int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
dvcpu->arch.wait = 0;
- if (swq_has_sleeper(&dvcpu->wq))
- swake_up_one(&dvcpu->wq);
+ rcuwait_wake_up(&dvcpu->wait);
return 0;
}
@@ -644,7 +649,7 @@ static int kvm_mips_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices)
static int kvm_mips_get_reg(struct kvm_vcpu *vcpu,
const struct kvm_one_reg *reg)
{
- struct mips_coproc *cop0 = vcpu->arch.cop0;
+ struct mips_coproc *cop0 = &vcpu->arch.cop0;
struct mips_fpu_struct *fpu = &vcpu->arch.fpu;
int ret;
s64 v;
@@ -756,7 +761,7 @@ static int kvm_mips_get_reg(struct kvm_vcpu *vcpu,
static int kvm_mips_set_reg(struct kvm_vcpu *vcpu,
const struct kvm_one_reg *reg)
{
- struct mips_coproc *cop0 = vcpu->arch.cop0;
+ struct mips_coproc *cop0 = &vcpu->arch.cop0;
struct mips_fpu_struct *fpu = &vcpu->arch.fpu;
s64 v;
s64 vs[2];
@@ -971,74 +976,20 @@ long kvm_arch_vcpu_ioctl(struct file *filp, unsigned int ioctl,
return r;
}
-/**
- * kvm_vm_ioctl_get_dirty_log - get and clear the log of dirty pages in a slot
- * @kvm: kvm instance
- * @log: slot id and address to which we copy the log
- *
- * Steps 1-4 below provide general overview of dirty page logging. See
- * kvm_get_dirty_log_protect() function description for additional details.
- *
- * We call kvm_get_dirty_log_protect() to handle steps 1-3, upon return we
- * always flush the TLB (step 4) even if previous step failed and the dirty
- * bitmap may be corrupt. Regardless of previous outcome the KVM logging API
- * does not preclude user space subsequent dirty log read. Flushing TLB ensures
- * writes will be marked dirty for next log read.
- *
- * 1. Take a snapshot of the bit and clear it if needed.
- * 2. Write protect the corresponding page.
- * 3. Copy the snapshot to the userspace.
- * 4. Flush TLB's if needed.
- */
-int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
+void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot)
{
- struct kvm_memslots *slots;
- struct kvm_memory_slot *memslot;
- bool flush = false;
- int r;
-
- mutex_lock(&kvm->slots_lock);
-
- r = kvm_get_dirty_log_protect(kvm, log, &flush);
-
- if (flush) {
- slots = kvm_memslots(kvm);
- memslot = id_to_memslot(slots, log->slot);
-
- /* Let implementation handle TLB/GVA invalidation */
- kvm_mips_callbacks->flush_shadow_memslot(kvm, memslot);
- }
- mutex_unlock(&kvm->slots_lock);
- return r;
}
-int kvm_vm_ioctl_clear_dirty_log(struct kvm *kvm, struct kvm_clear_dirty_log *log)
+int kvm_arch_flush_remote_tlbs(struct kvm *kvm)
{
- struct kvm_memslots *slots;
- struct kvm_memory_slot *memslot;
- bool flush = false;
- int r;
-
- mutex_lock(&kvm->slots_lock);
-
- r = kvm_clear_dirty_log_protect(kvm, log, &flush);
-
- if (flush) {
- slots = kvm_memslots(kvm);
- memslot = id_to_memslot(slots, log->slot);
-
- /* Let implementation handle TLB/GVA invalidation */
- kvm_mips_callbacks->flush_shadow_memslot(kvm, memslot);
- }
-
- mutex_unlock(&kvm->slots_lock);
- return r;
+ kvm_mips_callbacks->prepare_flush_shadow(kvm);
+ return 1;
}
-long kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
+int kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
{
- long r;
+ int r;
switch (ioctl) {
default:
@@ -1048,21 +999,6 @@ long kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
return r;
}
-int kvm_arch_init(void *opaque)
-{
- if (kvm_mips_callbacks) {
- kvm_err("kvm: module already exists\n");
- return -EEXIST;
- }
-
- return kvm_mips_emulation_init(&kvm_mips_callbacks);
-}
-
-void kvm_arch_exit(void)
-{
- kvm_mips_callbacks = NULL;
-}
-
int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
struct kvm_sregs *sregs)
{
@@ -1107,13 +1043,13 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
r = 1;
break;
case KVM_CAP_NR_VCPUS:
- r = num_online_cpus();
+ r = min_t(unsigned int, num_online_cpus(), KVM_MAX_VCPUS);
break;
case KVM_CAP_MAX_VCPUS:
r = KVM_MAX_VCPUS;
break;
case KVM_CAP_MAX_VCPU_ID:
- r = KVM_MAX_VCPU_ID;
+ r = KVM_MAX_VCPU_IDS;
break;
case KVM_CAP_MIPS_FPU:
/* We don't handle systems with inconsistent cpu_has_fpu */
@@ -1144,7 +1080,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
{
return kvm_mips_pending_timer(vcpu) ||
- kvm_read_c0_guest_cause(vcpu->arch.cop0) & C_TI;
+ kvm_read_c0_guest_cause(&vcpu->arch.cop0) & C_TI;
}
int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu)
@@ -1168,7 +1104,7 @@ int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu)
kvm_debug("\thi: 0x%08lx\n", vcpu->arch.hi);
kvm_debug("\tlo: 0x%08lx\n", vcpu->arch.lo);
- cop0 = vcpu->arch.cop0;
+ cop0 = &vcpu->arch.cop0;
kvm_debug("\tStatus: 0x%08x, Cause: 0x%08x\n",
kvm_read_c0_guest_status(cop0),
kvm_read_c0_guest_cause(cop0));
@@ -1212,58 +1148,12 @@ int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
return 0;
}
-static void kvm_mips_comparecount_func(unsigned long data)
-{
- struct kvm_vcpu *vcpu = (struct kvm_vcpu *)data;
-
- kvm_mips_callbacks->queue_timer_int(vcpu);
-
- vcpu->arch.wait = 0;
- if (swq_has_sleeper(&vcpu->wq))
- swake_up_one(&vcpu->wq);
-}
-
-/* low level hrtimer wake routine */
-static enum hrtimer_restart kvm_mips_comparecount_wakeup(struct hrtimer *timer)
-{
- struct kvm_vcpu *vcpu;
-
- vcpu = container_of(timer, struct kvm_vcpu, arch.comparecount_timer);
- kvm_mips_comparecount_func((unsigned long) vcpu);
- return kvm_mips_count_timeout(vcpu);
-}
-
-int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
-{
- int err;
-
- err = kvm_mips_callbacks->vcpu_init(vcpu);
- if (err)
- return err;
-
- hrtimer_init(&vcpu->arch.comparecount_timer, CLOCK_MONOTONIC,
- HRTIMER_MODE_REL);
- vcpu->arch.comparecount_timer.function = kvm_mips_comparecount_wakeup;
- return 0;
-}
-
-void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
-{
- kvm_mips_callbacks->vcpu_uninit(vcpu);
-}
-
int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
struct kvm_translation *tr)
{
return 0;
}
-/* Initial guest state */
-int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
-{
- return kvm_mips_callbacks->vcpu_setup(vcpu);
-}
-
static void kvm_mips_set_c0_status(void)
{
u32 status = read_c0_status();
@@ -1278,8 +1168,9 @@ static void kvm_mips_set_c0_status(void)
/*
* Return value is in the form (errcode<<2 | RESUME_FLAG_HOST | RESUME_FLAG_NV)
*/
-int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
+static int __kvm_mips_handle_exit(struct kvm_vcpu *vcpu)
{
+ struct kvm_run *run = vcpu->run;
u32 cause = vcpu->arch.host_cp0_cause;
u32 exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
u32 __user *opc = (u32 __user *) vcpu->arch.pc;
@@ -1290,10 +1181,6 @@ int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
vcpu->mode = OUTSIDE_GUEST_MODE;
- /* re-enable HTW before enabling interrupts */
- if (!IS_ENABLED(CONFIG_KVM_MIPS_VZ))
- htw_start();
-
/* Set a default exit reason */
run->exit_reason = KVM_EXIT_UNKNOWN;
run->ready_for_interrupt_injection = 1;
@@ -1310,22 +1197,6 @@ int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
cause, opc, run, vcpu);
trace_kvm_exit(vcpu, exccode);
- if (!IS_ENABLED(CONFIG_KVM_MIPS_VZ)) {
- /*
- * Do a privilege check, if in UM most of these exit conditions
- * end up causing an exception to be delivered to the Guest
- * Kernel
- */
- er = kvm_mips_check_privilege(cause, opc, run, vcpu);
- if (er == EMULATE_PRIV_FAIL) {
- goto skip_emul;
- } else if (er == EMULATE_FAIL) {
- run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
- ret = RESUME_HOST;
- goto skip_emul;
- }
- }
-
switch (exccode) {
case EXCCODE_INT:
kvm_debug("[%d]EXCCODE_INT @ %p\n", vcpu->vcpu_id, opc);
@@ -1355,7 +1226,7 @@ int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
case EXCCODE_TLBS:
kvm_debug("TLB ST fault: cause %#x, status %#x, PC: %p, BadVaddr: %#lx\n",
- cause, kvm_read_c0_guest_status(vcpu->arch.cop0), opc,
+ cause, kvm_read_c0_guest_status(&vcpu->arch.cop0), opc,
badvaddr);
++vcpu->stat.tlbmiss_st_exits;
@@ -1427,7 +1298,7 @@ int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
kvm_get_badinstr(opc, vcpu, &inst);
kvm_err("Exception Code: %d, not yet handled, @ PC: %p, inst: 0x%08x BadVaddr: %#lx Status: %#x\n",
exccode, opc, inst, badvaddr,
- kvm_read_c0_guest_status(vcpu->arch.cop0));
+ kvm_read_c0_guest_status(&vcpu->arch.cop0));
kvm_arch_vcpu_dump_regs(vcpu);
run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
ret = RESUME_HOST;
@@ -1435,7 +1306,6 @@ int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
}
-skip_emul:
local_irq_disable();
if (ret == RESUME_GUEST)
@@ -1465,7 +1335,7 @@ skip_emul:
*/
smp_store_mb(vcpu->mode, IN_GUEST_MODE);
- kvm_mips_callbacks->vcpu_reenter(run, vcpu);
+ kvm_mips_callbacks->vcpu_reenter(vcpu);
/*
* If FPU / MSA are enabled (i.e. the guest's FPU / MSA context
@@ -1484,10 +1354,16 @@ skip_emul:
read_c0_config5() & MIPS_CONF5_MSAEN)
__kvm_restore_msacsr(&vcpu->arch);
}
+ return ret;
+}
- /* Disable HTW before returning to guest or host */
- if (!IS_ENABLED(CONFIG_KVM_MIPS_VZ))
- htw_stop();
+int noinstr kvm_mips_handle_exit(struct kvm_vcpu *vcpu)
+{
+ int ret;
+
+ guest_state_exit_irqoff();
+ ret = __kvm_mips_handle_exit(vcpu);
+ guest_state_enter_irqoff();
return ret;
}
@@ -1495,7 +1371,7 @@ skip_emul:
/* Enable FPU for guest and restore context */
void kvm_own_fpu(struct kvm_vcpu *vcpu)
{
- struct mips_coproc *cop0 = vcpu->arch.cop0;
+ struct mips_coproc *cop0 = &vcpu->arch.cop0;
unsigned int sr, cfg5;
preempt_disable();
@@ -1507,10 +1383,6 @@ void kvm_own_fpu(struct kvm_vcpu *vcpu)
* FR=0 FPU state, and we don't want to hit reserved instruction
* exceptions trying to save the MSA state later when CU=1 && FR=1, so
* play it safe and save it first.
- *
- * In theory we shouldn't ever hit this case since kvm_lose_fpu() should
- * get called when guest CU1 is set, however we can't trust the guest
- * not to clobber the status register directly via the commpage.
*/
if (cpu_has_msa && sr & ST0_CU1 && !(sr & ST0_FR) &&
vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA)
@@ -1543,7 +1415,7 @@ void kvm_own_fpu(struct kvm_vcpu *vcpu)
/* Enable MSA for guest and restore context */
void kvm_own_msa(struct kvm_vcpu *vcpu)
{
- struct mips_coproc *cop0 = vcpu->arch.cop0;
+ struct mips_coproc *cop0 = &vcpu->arch.cop0;
unsigned int sr, cfg5;
preempt_disable();
@@ -1631,11 +1503,6 @@ void kvm_lose_fpu(struct kvm_vcpu *vcpu)
preempt_disable();
if (cpu_has_msa && vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA) {
- if (!IS_ENABLED(CONFIG_KVM_MIPS_VZ)) {
- set_c0_config5(MIPS_CONF5_MSAEN);
- enable_fpu_hazard();
- }
-
__kvm_save_msa(&vcpu->arch);
trace_kvm_aux(vcpu, KVM_TRACE_AUX_SAVE, KVM_TRACE_AUX_FPU_MSA);
@@ -1647,11 +1514,6 @@ void kvm_lose_fpu(struct kvm_vcpu *vcpu)
}
vcpu->arch.aux_inuse &= ~(KVM_MIPS_AUX_FPU | KVM_MIPS_AUX_MSA);
} else if (vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU) {
- if (!IS_ENABLED(CONFIG_KVM_MIPS_VZ)) {
- set_c0_status(ST0_CU1);
- enable_fpu_hazard();
- }
-
__kvm_save_fpu(&vcpu->arch);
vcpu->arch.aux_inuse &= ~KVM_MIPS_AUX_FPU;
trace_kvm_aux(vcpu, KVM_TRACE_AUX_SAVE, KVM_TRACE_AUX_FPU);
@@ -1712,6 +1574,34 @@ static struct notifier_block kvm_mips_csr_die_notifier = {
.notifier_call = kvm_mips_csr_die_notify,
};
+static u32 kvm_default_priority_to_irq[MIPS_EXC_MAX] = {
+ [MIPS_EXC_INT_TIMER] = C_IRQ5,
+ [MIPS_EXC_INT_IO_1] = C_IRQ0,
+ [MIPS_EXC_INT_IPI_1] = C_IRQ1,
+ [MIPS_EXC_INT_IPI_2] = C_IRQ2,
+};
+
+static u32 kvm_loongson3_priority_to_irq[MIPS_EXC_MAX] = {
+ [MIPS_EXC_INT_TIMER] = C_IRQ5,
+ [MIPS_EXC_INT_IO_1] = C_IRQ0,
+ [MIPS_EXC_INT_IO_2] = C_IRQ1,
+ [MIPS_EXC_INT_IPI_1] = C_IRQ4,
+};
+
+u32 *kvm_priority_to_irq = kvm_default_priority_to_irq;
+
+u32 kvm_irq_to_priority(u32 irq)
+{
+ int i;
+
+ for (i = MIPS_EXC_INT_TIMER; i < MIPS_EXC_MAX; i++) {
+ if (kvm_priority_to_irq[i] == (1 << (irq + 8)))
+ return i;
+ }
+
+ return MIPS_EXC_MAX;
+}
+
static int __init kvm_mips_init(void)
{
int ret;
@@ -1725,13 +1615,21 @@ static int __init kvm_mips_init(void)
if (ret)
return ret;
- ret = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
-
+ ret = kvm_mips_emulation_init();
if (ret)
return ret;
+
+ if (boot_cpu_type() == CPU_LOONGSON64)
+ kvm_priority_to_irq = kvm_loongson3_priority_to_irq;
+
register_die_notifier(&kvm_mips_csr_die_notifier);
+ ret = kvm_init(sizeof(struct kvm_vcpu), 0, THIS_MODULE);
+ if (ret) {
+ unregister_die_notifier(&kvm_mips_csr_die_notifier);
+ return ret;
+ }
return 0;
}
diff --git a/arch/mips/kvm/mmu.c b/arch/mips/kvm/mmu.c
index 7dad7a293eae..467ee6b95ae1 100644
--- a/arch/mips/kvm/mmu.c
+++ b/arch/mips/kvm/mmu.c
@@ -25,41 +25,9 @@
#define KVM_MMU_CACHE_MIN_PAGES 2
#endif
-static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache,
- int min, int max)
-{
- void *page;
-
- BUG_ON(max > KVM_NR_MEM_OBJS);
- if (cache->nobjs >= min)
- return 0;
- while (cache->nobjs < max) {
- page = (void *)__get_free_page(GFP_KERNEL);
- if (!page)
- return -ENOMEM;
- cache->objects[cache->nobjs++] = page;
- }
- return 0;
-}
-
-static void mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc)
-{
- while (mc->nobjs)
- free_page((unsigned long)mc->objects[--mc->nobjs]);
-}
-
-static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc)
-{
- void *p;
-
- BUG_ON(!mc || !mc->nobjs);
- p = mc->objects[--mc->nobjs];
- return p;
-}
-
void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu)
{
- mmu_free_memory_cache(&vcpu->arch.mmu_page_cache);
+ kvm_mmu_free_memory_cache(&vcpu->arch.mmu_page_cache);
}
/**
@@ -112,7 +80,7 @@ pgd_t *kvm_pgd_alloc(void)
{
pgd_t *ret;
- ret = (pgd_t *)__get_free_pages(GFP_KERNEL, PGD_ORDER);
+ ret = (pgd_t *)__get_free_pages(GFP_KERNEL, PGD_TABLE_ORDER);
if (ret)
kvm_pgd_init(ret);
@@ -153,9 +121,8 @@ static pte_t *kvm_mips_walk_pgd(pgd_t *pgd, struct kvm_mmu_memory_cache *cache,
if (!cache)
return NULL;
- new_pmd = mmu_memory_cache_alloc(cache);
- pmd_init((unsigned long)new_pmd,
- (unsigned long)invalid_pte_table);
+ new_pmd = kvm_mmu_memory_cache_alloc(cache);
+ pmd_init(new_pmd);
pud_populate(NULL, pud, new_pmd);
}
pmd = pmd_offset(pud, addr);
@@ -164,11 +131,11 @@ static pte_t *kvm_mips_walk_pgd(pgd_t *pgd, struct kvm_mmu_memory_cache *cache,
if (!cache)
return NULL;
- new_pte = mmu_memory_cache_alloc(cache);
+ new_pte = kvm_mmu_memory_cache_alloc(cache);
clear_page(new_pte);
pmd_populate_kernel(NULL, pmd, new_pte);
}
- return pte_offset(pmd, addr);
+ return pte_offset_kernel(pmd, addr);
}
/* Caller must hold kvm->mm_lock */
@@ -187,8 +154,8 @@ static pte_t *kvm_mips_pte_for_gpa(struct kvm *kvm,
static bool kvm_mips_flush_gpa_pte(pte_t *pte, unsigned long start_gpa,
unsigned long end_gpa)
{
- int i_min = __pte_offset(start_gpa);
- int i_max = __pte_offset(end_gpa);
+ int i_min = pte_index(start_gpa);
+ int i_max = pte_index(end_gpa);
bool safe_to_remove = (i_min == 0 && i_max == PTRS_PER_PTE - 1);
int i;
@@ -215,7 +182,7 @@ static bool kvm_mips_flush_gpa_pmd(pmd_t *pmd, unsigned long start_gpa,
if (!pmd_present(pmd[i]))
continue;
- pte = pte_offset(pmd + i, 0);
+ pte = pte_offset_kernel(pmd + i, 0);
if (i == i_max)
end = end_gpa;
@@ -312,8 +279,8 @@ static int kvm_mips_##name##_pte(pte_t *pte, unsigned long start, \
unsigned long end) \
{ \
int ret = 0; \
- int i_min = __pte_offset(start); \
- int i_max = __pte_offset(end); \
+ int i_min = pte_index(start); \
+ int i_max = pte_index(end); \
int i; \
pte_t old, new; \
\
@@ -346,7 +313,7 @@ static int kvm_mips_##name##_pmd(pmd_t *pmd, unsigned long start, \
if (!pmd_present(pmd[i])) \
continue; \
\
- pte = pte_offset(pmd + i, 0); \
+ pte = pte_offset_kernel(pmd + i, 0); \
if (i == i_max) \
cur_end = end; \
\
@@ -471,84 +438,34 @@ static int kvm_mips_mkold_gpa_pt(struct kvm *kvm, gfn_t start_gfn,
end_gfn << PAGE_SHIFT);
}
-static int handle_hva_to_gpa(struct kvm *kvm,
- unsigned long start,
- unsigned long end,
- int (*handler)(struct kvm *kvm, gfn_t gfn,
- gpa_t gfn_end,
- struct kvm_memory_slot *memslot,
- void *data),
- void *data)
-{
- struct kvm_memslots *slots;
- struct kvm_memory_slot *memslot;
- int ret = 0;
-
- slots = kvm_memslots(kvm);
-
- /* we only care about the pages that the guest sees */
- kvm_for_each_memslot(memslot, slots) {
- unsigned long hva_start, hva_end;
- gfn_t gfn, gfn_end;
-
- hva_start = max(start, memslot->userspace_addr);
- hva_end = min(end, memslot->userspace_addr +
- (memslot->npages << PAGE_SHIFT));
- if (hva_start >= hva_end)
- continue;
-
- /*
- * {gfn(page) | page intersects with [hva_start, hva_end)} =
- * {gfn_start, gfn_start+1, ..., gfn_end-1}.
- */
- gfn = hva_to_gfn_memslot(hva_start, memslot);
- gfn_end = hva_to_gfn_memslot(hva_end + PAGE_SIZE - 1, memslot);
-
- ret |= handler(kvm, gfn, gfn_end, memslot, data);
- }
-
- return ret;
-}
-
-
-static int kvm_unmap_hva_handler(struct kvm *kvm, gfn_t gfn, gfn_t gfn_end,
- struct kvm_memory_slot *memslot, void *data)
+bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range)
{
- kvm_mips_flush_gpa_pt(kvm, gfn, gfn_end);
- return 1;
+ kvm_mips_flush_gpa_pt(kvm, range->start, range->end);
+ return true;
}
-int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end)
+bool kvm_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
{
- handle_hva_to_gpa(kvm, start, end, &kvm_unmap_hva_handler, NULL);
-
- kvm_mips_callbacks->flush_shadow_all(kvm);
- return 0;
-}
-
-static int kvm_set_spte_handler(struct kvm *kvm, gfn_t gfn, gfn_t gfn_end,
- struct kvm_memory_slot *memslot, void *data)
-{
- gpa_t gpa = gfn << PAGE_SHIFT;
- pte_t hva_pte = *(pte_t *)data;
+ gpa_t gpa = range->start << PAGE_SHIFT;
+ pte_t hva_pte = range->arg.pte;
pte_t *gpa_pte = kvm_mips_pte_for_gpa(kvm, NULL, gpa);
pte_t old_pte;
if (!gpa_pte)
- return 0;
+ return false;
/* Mapping may need adjusting depending on memslot flags */
old_pte = *gpa_pte;
- if (memslot->flags & KVM_MEM_LOG_DIRTY_PAGES && !pte_dirty(old_pte))
+ if (range->slot->flags & KVM_MEM_LOG_DIRTY_PAGES && !pte_dirty(old_pte))
hva_pte = pte_mkclean(hva_pte);
- else if (memslot->flags & KVM_MEM_READONLY)
+ else if (range->slot->flags & KVM_MEM_READONLY)
hva_pte = pte_wrprotect(hva_pte);
set_pte(gpa_pte, hva_pte);
/* Replacing an absent or old page doesn't need flushes */
if (!pte_present(old_pte) || !pte_young(old_pte))
- return 0;
+ return false;
/* Pages swapped, aged, moved, or cleaned require flushes */
return !pte_present(hva_pte) ||
@@ -557,44 +474,21 @@ static int kvm_set_spte_handler(struct kvm *kvm, gfn_t gfn, gfn_t gfn_end,
(pte_dirty(old_pte) && !pte_dirty(hva_pte));
}
-int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
-{
- unsigned long end = hva + PAGE_SIZE;
- int ret;
-
- ret = handle_hva_to_gpa(kvm, hva, end, &kvm_set_spte_handler, &pte);
- if (ret)
- kvm_mips_callbacks->flush_shadow_all(kvm);
- return 0;
-}
-
-static int kvm_age_hva_handler(struct kvm *kvm, gfn_t gfn, gfn_t gfn_end,
- struct kvm_memory_slot *memslot, void *data)
+bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
{
- return kvm_mips_mkold_gpa_pt(kvm, gfn, gfn_end);
+ return kvm_mips_mkold_gpa_pt(kvm, range->start, range->end);
}
-static int kvm_test_age_hva_handler(struct kvm *kvm, gfn_t gfn, gfn_t gfn_end,
- struct kvm_memory_slot *memslot, void *data)
+bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
{
- gpa_t gpa = gfn << PAGE_SHIFT;
+ gpa_t gpa = range->start << PAGE_SHIFT;
pte_t *gpa_pte = kvm_mips_pte_for_gpa(kvm, NULL, gpa);
if (!gpa_pte)
- return 0;
+ return false;
return pte_young(*gpa_pte);
}
-int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end)
-{
- return handle_hva_to_gpa(kvm, start, end, kvm_age_hva_handler, NULL);
-}
-
-int kvm_test_age_hva(struct kvm *kvm, unsigned long hva)
-{
- return handle_hva_to_gpa(kvm, hva, hva, kvm_test_age_hva_handler, NULL);
-}
-
/**
* _kvm_mips_map_page_fast() - Fast path GPA fault handler.
* @vcpu: VCPU pointer.
@@ -698,7 +592,7 @@ static int kvm_mips_map_page(struct kvm_vcpu *vcpu, unsigned long gpa,
gfn_t gfn = gpa >> PAGE_SHIFT;
int srcu_idx, err;
kvm_pfn_t pfn;
- pte_t *ptep, entry, old_pte;
+ pte_t *ptep, entry;
bool writeable;
unsigned long prot_bits;
unsigned long mmu_seq;
@@ -711,8 +605,7 @@ static int kvm_mips_map_page(struct kvm_vcpu *vcpu, unsigned long gpa,
goto out;
/* We need a minimum of cached pages ready for page table creation */
- err = mmu_topup_memory_cache(memcache, KVM_MMU_CACHE_MIN_PAGES,
- KVM_NR_MEM_OBJS);
+ err = kvm_mmu_topup_memory_cache(memcache, KVM_MMU_CACHE_MIN_PAGES);
if (err)
goto out;
@@ -721,17 +614,17 @@ retry:
* Used to check for invalidations in progress, of the pfn that is
* returned by pfn_to_pfn_prot below.
*/
- mmu_seq = kvm->mmu_notifier_seq;
+ mmu_seq = kvm->mmu_invalidate_seq;
/*
- * Ensure the read of mmu_notifier_seq isn't reordered with PTE reads in
- * gfn_to_pfn_prot() (which calls get_user_pages()), so that we don't
+ * Ensure the read of mmu_invalidate_seq isn't reordered with PTE reads
+ * in gfn_to_pfn_prot() (which calls get_user_pages()), so that we don't
* risk the page we get a reference to getting unmapped before we have a
- * chance to grab the mmu_lock without mmu_notifier_retry() noticing.
+ * chance to grab the mmu_lock without mmu_invalidate_retry() noticing.
*
* This smp_rmb() pairs with the effective smp_wmb() of the combination
* of the pte_unmap_unlock() after the PTE is zapped, and the
* spin_lock() in kvm_mmu_notifier_invalidate_<page|range_end>() before
- * mmu_notifier_seq is incremented.
+ * mmu_invalidate_seq is incremented.
*/
smp_rmb();
@@ -744,7 +637,7 @@ retry:
spin_lock(&kvm->mmu_lock);
/* Check if an invalidation has taken place since we got pfn */
- if (mmu_notifier_retry(kvm, mmu_seq)) {
+ if (mmu_invalidate_retry(kvm, mmu_seq)) {
/*
* This can happen when mappings are changed asynchronously, but
* also synchronously if a COW is triggered by
@@ -771,7 +664,6 @@ retry:
entry = pfn_pte(pfn, __pgprot(prot_bits));
/* Write the PTE */
- old_pte = *ptep;
set_pte(ptep, entry);
err = 0;
@@ -788,210 +680,6 @@ out:
return err;
}
-static pte_t *kvm_trap_emul_pte_for_gva(struct kvm_vcpu *vcpu,
- unsigned long addr)
-{
- struct kvm_mmu_memory_cache *memcache = &vcpu->arch.mmu_page_cache;
- pgd_t *pgdp;
- int ret;
-
- /* We need a minimum of cached pages ready for page table creation */
- ret = mmu_topup_memory_cache(memcache, KVM_MMU_CACHE_MIN_PAGES,
- KVM_NR_MEM_OBJS);
- if (ret)
- return NULL;
-
- if (KVM_GUEST_KERNEL_MODE(vcpu))
- pgdp = vcpu->arch.guest_kernel_mm.pgd;
- else
- pgdp = vcpu->arch.guest_user_mm.pgd;
-
- return kvm_mips_walk_pgd(pgdp, memcache, addr);
-}
-
-void kvm_trap_emul_invalidate_gva(struct kvm_vcpu *vcpu, unsigned long addr,
- bool user)
-{
- pgd_t *pgdp;
- pte_t *ptep;
-
- addr &= PAGE_MASK << 1;
-
- pgdp = vcpu->arch.guest_kernel_mm.pgd;
- ptep = kvm_mips_walk_pgd(pgdp, NULL, addr);
- if (ptep) {
- ptep[0] = pfn_pte(0, __pgprot(0));
- ptep[1] = pfn_pte(0, __pgprot(0));
- }
-
- if (user) {
- pgdp = vcpu->arch.guest_user_mm.pgd;
- ptep = kvm_mips_walk_pgd(pgdp, NULL, addr);
- if (ptep) {
- ptep[0] = pfn_pte(0, __pgprot(0));
- ptep[1] = pfn_pte(0, __pgprot(0));
- }
- }
-}
-
-/*
- * kvm_mips_flush_gva_{pte,pmd,pud,pgd,pt}.
- * Flush a range of guest physical address space from the VM's GPA page tables.
- */
-
-static bool kvm_mips_flush_gva_pte(pte_t *pte, unsigned long start_gva,
- unsigned long end_gva)
-{
- int i_min = __pte_offset(start_gva);
- int i_max = __pte_offset(end_gva);
- bool safe_to_remove = (i_min == 0 && i_max == PTRS_PER_PTE - 1);
- int i;
-
- /*
- * There's no freeing to do, so there's no point clearing individual
- * entries unless only part of the last level page table needs flushing.
- */
- if (safe_to_remove)
- return true;
-
- for (i = i_min; i <= i_max; ++i) {
- if (!pte_present(pte[i]))
- continue;
-
- set_pte(pte + i, __pte(0));
- }
- return false;
-}
-
-static bool kvm_mips_flush_gva_pmd(pmd_t *pmd, unsigned long start_gva,
- unsigned long end_gva)
-{
- pte_t *pte;
- unsigned long end = ~0ul;
- int i_min = pmd_index(start_gva);
- int i_max = pmd_index(end_gva);
- bool safe_to_remove = (i_min == 0 && i_max == PTRS_PER_PMD - 1);
- int i;
-
- for (i = i_min; i <= i_max; ++i, start_gva = 0) {
- if (!pmd_present(pmd[i]))
- continue;
-
- pte = pte_offset(pmd + i, 0);
- if (i == i_max)
- end = end_gva;
-
- if (kvm_mips_flush_gva_pte(pte, start_gva, end)) {
- pmd_clear(pmd + i);
- pte_free_kernel(NULL, pte);
- } else {
- safe_to_remove = false;
- }
- }
- return safe_to_remove;
-}
-
-static bool kvm_mips_flush_gva_pud(pud_t *pud, unsigned long start_gva,
- unsigned long end_gva)
-{
- pmd_t *pmd;
- unsigned long end = ~0ul;
- int i_min = pud_index(start_gva);
- int i_max = pud_index(end_gva);
- bool safe_to_remove = (i_min == 0 && i_max == PTRS_PER_PUD - 1);
- int i;
-
- for (i = i_min; i <= i_max; ++i, start_gva = 0) {
- if (!pud_present(pud[i]))
- continue;
-
- pmd = pmd_offset(pud + i, 0);
- if (i == i_max)
- end = end_gva;
-
- if (kvm_mips_flush_gva_pmd(pmd, start_gva, end)) {
- pud_clear(pud + i);
- pmd_free(NULL, pmd);
- } else {
- safe_to_remove = false;
- }
- }
- return safe_to_remove;
-}
-
-static bool kvm_mips_flush_gva_pgd(pgd_t *pgd, unsigned long start_gva,
- unsigned long end_gva)
-{
- p4d_t *p4d;
- pud_t *pud;
- unsigned long end = ~0ul;
- int i_min = pgd_index(start_gva);
- int i_max = pgd_index(end_gva);
- bool safe_to_remove = (i_min == 0 && i_max == PTRS_PER_PGD - 1);
- int i;
-
- for (i = i_min; i <= i_max; ++i, start_gva = 0) {
- if (!pgd_present(pgd[i]))
- continue;
-
- p4d = p4d_offset(pgd, 0);
- pud = pud_offset(p4d + i, 0);
- if (i == i_max)
- end = end_gva;
-
- if (kvm_mips_flush_gva_pud(pud, start_gva, end)) {
- pgd_clear(pgd + i);
- pud_free(NULL, pud);
- } else {
- safe_to_remove = false;
- }
- }
- return safe_to_remove;
-}
-
-void kvm_mips_flush_gva_pt(pgd_t *pgd, enum kvm_mips_flush flags)
-{
- if (flags & KMF_GPA) {
- /* all of guest virtual address space could be affected */
- if (flags & KMF_KERN)
- /* useg, kseg0, seg2/3 */
- kvm_mips_flush_gva_pgd(pgd, 0, 0x7fffffff);
- else
- /* useg */
- kvm_mips_flush_gva_pgd(pgd, 0, 0x3fffffff);
- } else {
- /* useg */
- kvm_mips_flush_gva_pgd(pgd, 0, 0x3fffffff);
-
- /* kseg2/3 */
- if (flags & KMF_KERN)
- kvm_mips_flush_gva_pgd(pgd, 0x60000000, 0x7fffffff);
- }
-}
-
-static pte_t kvm_mips_gpa_pte_to_gva_unmapped(pte_t pte)
-{
- /*
- * Don't leak writeable but clean entries from GPA page tables. We don't
- * want the normal Linux tlbmod handler to handle dirtying when KVM
- * accesses guest memory.
- */
- if (!pte_dirty(pte))
- pte = pte_wrprotect(pte);
-
- return pte;
-}
-
-static pte_t kvm_mips_gpa_pte_to_gva_mapped(pte_t pte, long entrylo)
-{
- /* Guest EntryLo overrides host EntryLo */
- if (!(entrylo & ENTRYLO_D))
- pte = pte_mkclean(pte);
-
- return kvm_mips_gpa_pte_to_gva_unmapped(pte);
-}
-
-#ifdef CONFIG_KVM_MIPS_VZ
int kvm_mips_handle_vz_root_tlb_fault(unsigned long badvaddr,
struct kvm_vcpu *vcpu,
bool write_fault)
@@ -1005,123 +693,6 @@ int kvm_mips_handle_vz_root_tlb_fault(unsigned long badvaddr,
/* Invalidate this entry in the TLB */
return kvm_vz_host_tlb_inv(vcpu, badvaddr);
}
-#endif
-
-/* XXXKYMA: Must be called with interrupts disabled */
-int kvm_mips_handle_kseg0_tlb_fault(unsigned long badvaddr,
- struct kvm_vcpu *vcpu,
- bool write_fault)
-{
- unsigned long gpa;
- pte_t pte_gpa[2], *ptep_gva;
- int idx;
-
- if (KVM_GUEST_KSEGX(badvaddr) != KVM_GUEST_KSEG0) {
- kvm_err("%s: Invalid BadVaddr: %#lx\n", __func__, badvaddr);
- kvm_mips_dump_host_tlbs();
- return -1;
- }
-
- /* Get the GPA page table entry */
- gpa = KVM_GUEST_CPHYSADDR(badvaddr);
- idx = (badvaddr >> PAGE_SHIFT) & 1;
- if (kvm_mips_map_page(vcpu, gpa, write_fault, &pte_gpa[idx],
- &pte_gpa[!idx]) < 0)
- return -1;
-
- /* Get the GVA page table entry */
- ptep_gva = kvm_trap_emul_pte_for_gva(vcpu, badvaddr & ~PAGE_SIZE);
- if (!ptep_gva) {
- kvm_err("No ptep for gva %lx\n", badvaddr);
- return -1;
- }
-
- /* Copy a pair of entries from GPA page table to GVA page table */
- ptep_gva[0] = kvm_mips_gpa_pte_to_gva_unmapped(pte_gpa[0]);
- ptep_gva[1] = kvm_mips_gpa_pte_to_gva_unmapped(pte_gpa[1]);
-
- /* Invalidate this entry in the TLB, guest kernel ASID only */
- kvm_mips_host_tlb_inv(vcpu, badvaddr, false, true);
- return 0;
-}
-
-int kvm_mips_handle_mapped_seg_tlb_fault(struct kvm_vcpu *vcpu,
- struct kvm_mips_tlb *tlb,
- unsigned long gva,
- bool write_fault)
-{
- struct kvm *kvm = vcpu->kvm;
- long tlb_lo[2];
- pte_t pte_gpa[2], *ptep_buddy, *ptep_gva;
- unsigned int idx = TLB_LO_IDX(*tlb, gva);
- bool kernel = KVM_GUEST_KERNEL_MODE(vcpu);
-
- tlb_lo[0] = tlb->tlb_lo[0];
- tlb_lo[1] = tlb->tlb_lo[1];
-
- /*
- * The commpage address must not be mapped to anything else if the guest
- * TLB contains entries nearby, or commpage accesses will break.
- */
- if (!((gva ^ KVM_GUEST_COMMPAGE_ADDR) & VPN2_MASK & (PAGE_MASK << 1)))
- tlb_lo[TLB_LO_IDX(*tlb, KVM_GUEST_COMMPAGE_ADDR)] = 0;
-
- /* Get the GPA page table entry */
- if (kvm_mips_map_page(vcpu, mips3_tlbpfn_to_paddr(tlb_lo[idx]),
- write_fault, &pte_gpa[idx], NULL) < 0)
- return -1;
-
- /* And its GVA buddy's GPA page table entry if it also exists */
- pte_gpa[!idx] = pfn_pte(0, __pgprot(0));
- if (tlb_lo[!idx] & ENTRYLO_V) {
- spin_lock(&kvm->mmu_lock);
- ptep_buddy = kvm_mips_pte_for_gpa(kvm, NULL,
- mips3_tlbpfn_to_paddr(tlb_lo[!idx]));
- if (ptep_buddy)
- pte_gpa[!idx] = *ptep_buddy;
- spin_unlock(&kvm->mmu_lock);
- }
-
- /* Get the GVA page table entry pair */
- ptep_gva = kvm_trap_emul_pte_for_gva(vcpu, gva & ~PAGE_SIZE);
- if (!ptep_gva) {
- kvm_err("No ptep for gva %lx\n", gva);
- return -1;
- }
-
- /* Copy a pair of entries from GPA page table to GVA page table */
- ptep_gva[0] = kvm_mips_gpa_pte_to_gva_mapped(pte_gpa[0], tlb_lo[0]);
- ptep_gva[1] = kvm_mips_gpa_pte_to_gva_mapped(pte_gpa[1], tlb_lo[1]);
-
- /* Invalidate this entry in the TLB, current guest mode ASID only */
- kvm_mips_host_tlb_inv(vcpu, gva, !kernel, kernel);
-
- kvm_debug("@ %#lx tlb_lo0: 0x%08lx tlb_lo1: 0x%08lx\n", vcpu->arch.pc,
- tlb->tlb_lo[0], tlb->tlb_lo[1]);
-
- return 0;
-}
-
-int kvm_mips_handle_commpage_tlb_fault(unsigned long badvaddr,
- struct kvm_vcpu *vcpu)
-{
- kvm_pfn_t pfn;
- pte_t *ptep;
-
- ptep = kvm_trap_emul_pte_for_gva(vcpu, badvaddr);
- if (!ptep) {
- kvm_err("No ptep for commpage %lx\n", badvaddr);
- return -1;
- }
-
- pfn = PFN_DOWN(virt_to_phys(vcpu->arch.kseg0_commpage));
- /* Also set valid and dirty, so refill handler doesn't have to */
- *ptep = pte_mkyoung(pte_mkdirty(pfn_pte(pfn, PAGE_SHARED)));
-
- /* Invalidate this entry in the TLB, guest kernel ASID only */
- kvm_mips_host_tlb_inv(vcpu, badvaddr, false, true);
- return 0;
-}
/**
* kvm_mips_migrate_count() - Migrate timer.
@@ -1184,86 +755,3 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
local_irq_restore(flags);
}
-
-/**
- * kvm_trap_emul_gva_fault() - Safely attempt to handle a GVA access fault.
- * @vcpu: Virtual CPU.
- * @gva: Guest virtual address to be accessed.
- * @write: True if write attempted (must be dirtied and made writable).
- *
- * Safely attempt to handle a GVA fault, mapping GVA pages if necessary, and
- * dirtying the page if @write so that guest instructions can be modified.
- *
- * Returns: KVM_MIPS_MAPPED on success.
- * KVM_MIPS_GVA if bad guest virtual address.
- * KVM_MIPS_GPA if bad guest physical address.
- * KVM_MIPS_TLB if guest TLB not present.
- * KVM_MIPS_TLBINV if guest TLB present but not valid.
- * KVM_MIPS_TLBMOD if guest TLB read only.
- */
-enum kvm_mips_fault_result kvm_trap_emul_gva_fault(struct kvm_vcpu *vcpu,
- unsigned long gva,
- bool write)
-{
- struct mips_coproc *cop0 = vcpu->arch.cop0;
- struct kvm_mips_tlb *tlb;
- int index;
-
- if (KVM_GUEST_KSEGX(gva) == KVM_GUEST_KSEG0) {
- if (kvm_mips_handle_kseg0_tlb_fault(gva, vcpu, write) < 0)
- return KVM_MIPS_GPA;
- } else if ((KVM_GUEST_KSEGX(gva) < KVM_GUEST_KSEG0) ||
- KVM_GUEST_KSEGX(gva) == KVM_GUEST_KSEG23) {
- /* Address should be in the guest TLB */
- index = kvm_mips_guest_tlb_lookup(vcpu, (gva & VPN2_MASK) |
- (kvm_read_c0_guest_entryhi(cop0) & KVM_ENTRYHI_ASID));
- if (index < 0)
- return KVM_MIPS_TLB;
- tlb = &vcpu->arch.guest_tlb[index];
-
- /* Entry should be valid, and dirty for writes */
- if (!TLB_IS_VALID(*tlb, gva))
- return KVM_MIPS_TLBINV;
- if (write && !TLB_IS_DIRTY(*tlb, gva))
- return KVM_MIPS_TLBMOD;
-
- if (kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb, gva, write))
- return KVM_MIPS_GPA;
- } else {
- return KVM_MIPS_GVA;
- }
-
- return KVM_MIPS_MAPPED;
-}
-
-int kvm_get_inst(u32 *opc, struct kvm_vcpu *vcpu, u32 *out)
-{
- int err;
-
- if (WARN(IS_ENABLED(CONFIG_KVM_MIPS_VZ),
- "Expect BadInstr/BadInstrP registers to be used with VZ\n"))
- return -EINVAL;
-
-retry:
- kvm_trap_emul_gva_lockless_begin(vcpu);
- err = get_user(*out, opc);
- kvm_trap_emul_gva_lockless_end(vcpu);
-
- if (unlikely(err)) {
- /*
- * Try to handle the fault, maybe we just raced with a GVA
- * invalidation.
- */
- err = kvm_trap_emul_gva_fault(vcpu, (unsigned long)opc,
- false);
- if (unlikely(err)) {
- kvm_err("%s: illegal address: %p\n",
- __func__, opc);
- return -EFAULT;
- }
-
- /* Hopefully it'll work now */
- goto retry;
- }
- return 0;
-}
diff --git a/arch/mips/kvm/stats.c b/arch/mips/kvm/stats.c
index 53f851a61554..3e6682018fbe 100644
--- a/arch/mips/kvm/stats.c
+++ b/arch/mips/kvm/stats.c
@@ -54,9 +54,9 @@ void kvm_mips_dump_stats(struct kvm_vcpu *vcpu)
kvm_info("\nKVM VCPU[%d] COP0 Access Profile:\n", vcpu->vcpu_id);
for (i = 0; i < N_MIPS_COPROC_REGS; i++) {
for (j = 0; j < N_MIPS_COPROC_SEL; j++) {
- if (vcpu->arch.cop0->stat[i][j])
+ if (vcpu->arch.cop0.stat[i][j])
kvm_info("%s[%d]: %lu\n", kvm_cop0_str[i], j,
- vcpu->arch.cop0->stat[i][j]);
+ vcpu->arch.cop0.stat[i][j]);
}
}
#endif
diff --git a/arch/mips/kvm/tlb.c b/arch/mips/kvm/tlb.c
index 7cd92166a0b9..4e91971daae1 100644
--- a/arch/mips/kvm/tlb.c
+++ b/arch/mips/kvm/tlb.c
@@ -20,8 +20,8 @@
#include <asm/cpu.h>
#include <asm/bootinfo.h>
+#include <asm/mipsregs.h>
#include <asm/mmu_context.h>
-#include <asm/pgtable.h>
#include <asm/cacheflush.h>
#include <asm/tlb.h>
#include <asm/tlbdebug.h>
@@ -30,10 +30,6 @@
#include <asm/r4kcache.h>
#define CONFIG_MIPS_MT
-#define KVM_GUEST_PC_TLB 0
-#define KVM_GUEST_SP_TLB 1
-
-#ifdef CONFIG_KVM_MIPS_VZ
unsigned long GUESTID_MASK;
EXPORT_SYMBOL_GPL(GUESTID_MASK);
unsigned long GUESTID_FIRST_VERSION;
@@ -50,91 +46,6 @@ static u32 kvm_mips_get_root_asid(struct kvm_vcpu *vcpu)
else
return cpu_asid(smp_processor_id(), gpa_mm);
}
-#endif
-
-static u32 kvm_mips_get_kernel_asid(struct kvm_vcpu *vcpu)
-{
- struct mm_struct *kern_mm = &vcpu->arch.guest_kernel_mm;
- int cpu = smp_processor_id();
-
- return cpu_asid(cpu, kern_mm);
-}
-
-static u32 kvm_mips_get_user_asid(struct kvm_vcpu *vcpu)
-{
- struct mm_struct *user_mm = &vcpu->arch.guest_user_mm;
- int cpu = smp_processor_id();
-
- return cpu_asid(cpu, user_mm);
-}
-
-/* Structure defining an tlb entry data set. */
-
-void kvm_mips_dump_host_tlbs(void)
-{
- unsigned long flags;
-
- local_irq_save(flags);
-
- kvm_info("HOST TLBs:\n");
- dump_tlb_regs();
- pr_info("\n");
- dump_tlb_all();
-
- local_irq_restore(flags);
-}
-EXPORT_SYMBOL_GPL(kvm_mips_dump_host_tlbs);
-
-void kvm_mips_dump_guest_tlbs(struct kvm_vcpu *vcpu)
-{
- struct mips_coproc *cop0 = vcpu->arch.cop0;
- struct kvm_mips_tlb tlb;
- int i;
-
- kvm_info("Guest TLBs:\n");
- kvm_info("Guest EntryHi: %#lx\n", kvm_read_c0_guest_entryhi(cop0));
-
- for (i = 0; i < KVM_MIPS_GUEST_TLB_SIZE; i++) {
- tlb = vcpu->arch.guest_tlb[i];
- kvm_info("TLB%c%3d Hi 0x%08lx ",
- (tlb.tlb_lo[0] | tlb.tlb_lo[1]) & ENTRYLO_V
- ? ' ' : '*',
- i, tlb.tlb_hi);
- kvm_info("Lo0=0x%09llx %c%c attr %lx ",
- (u64) mips3_tlbpfn_to_paddr(tlb.tlb_lo[0]),
- (tlb.tlb_lo[0] & ENTRYLO_D) ? 'D' : ' ',
- (tlb.tlb_lo[0] & ENTRYLO_G) ? 'G' : ' ',
- (tlb.tlb_lo[0] & ENTRYLO_C) >> ENTRYLO_C_SHIFT);
- kvm_info("Lo1=0x%09llx %c%c attr %lx sz=%lx\n",
- (u64) mips3_tlbpfn_to_paddr(tlb.tlb_lo[1]),
- (tlb.tlb_lo[1] & ENTRYLO_D) ? 'D' : ' ',
- (tlb.tlb_lo[1] & ENTRYLO_G) ? 'G' : ' ',
- (tlb.tlb_lo[1] & ENTRYLO_C) >> ENTRYLO_C_SHIFT,
- tlb.tlb_mask);
- }
-}
-EXPORT_SYMBOL_GPL(kvm_mips_dump_guest_tlbs);
-
-int kvm_mips_guest_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long entryhi)
-{
- int i;
- int index = -1;
- struct kvm_mips_tlb *tlb = vcpu->arch.guest_tlb;
-
- for (i = 0; i < KVM_MIPS_GUEST_TLB_SIZE; i++) {
- if (TLB_HI_VPN2_HIT(tlb[i], entryhi) &&
- TLB_HI_ASID_HIT(tlb[i], entryhi)) {
- index = i;
- break;
- }
- }
-
- kvm_debug("%s: entryhi: %#lx, index: %d lo0: %#lx, lo1: %#lx\n",
- __func__, entryhi, index, tlb[i].tlb_lo[0], tlb[i].tlb_lo[1]);
-
- return index;
-}
-EXPORT_SYMBOL_GPL(kvm_mips_guest_tlb_lookup);
static int _kvm_mips_host_tlb_inv(unsigned long entryhi)
{
@@ -147,8 +58,7 @@ static int _kvm_mips_host_tlb_inv(unsigned long entryhi)
tlb_probe_hazard();
idx = read_c0_index();
- if (idx >= current_cpu_data.tlbsize)
- BUG();
+ BUG_ON(idx >= current_cpu_data.tlbsize);
if (idx >= 0) {
write_c0_entryhi(UNIQUE_ENTRYHI(idx));
@@ -163,54 +73,6 @@ static int _kvm_mips_host_tlb_inv(unsigned long entryhi)
return idx;
}
-int kvm_mips_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long va,
- bool user, bool kernel)
-{
- /*
- * Initialize idx_user and idx_kernel to workaround bogus
- * maybe-initialized warning when using GCC 6.
- */
- int idx_user = 0, idx_kernel = 0;
- unsigned long flags, old_entryhi;
-
- local_irq_save(flags);
-
- old_entryhi = read_c0_entryhi();
-
- if (user)
- idx_user = _kvm_mips_host_tlb_inv((va & VPN2_MASK) |
- kvm_mips_get_user_asid(vcpu));
- if (kernel)
- idx_kernel = _kvm_mips_host_tlb_inv((va & VPN2_MASK) |
- kvm_mips_get_kernel_asid(vcpu));
-
- write_c0_entryhi(old_entryhi);
- mtc0_tlbw_hazard();
-
- local_irq_restore(flags);
-
- /*
- * We don't want to get reserved instruction exceptions for missing tlb
- * entries.
- */
- if (cpu_has_vtag_icache)
- flush_icache_all();
-
- if (user && idx_user >= 0)
- kvm_debug("%s: Invalidated guest user entryhi %#lx @ idx %d\n",
- __func__, (va & VPN2_MASK) |
- kvm_mips_get_user_asid(vcpu), idx_user);
- if (kernel && idx_kernel >= 0)
- kvm_debug("%s: Invalidated guest kernel entryhi %#lx @ idx %d\n",
- __func__, (va & VPN2_MASK) |
- kvm_mips_get_kernel_asid(vcpu), idx_kernel);
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(kvm_mips_host_tlb_inv);
-
-#ifdef CONFIG_KVM_MIPS_VZ
-
/* GuestID management */
/**
@@ -291,7 +153,7 @@ EXPORT_SYMBOL_GPL(kvm_vz_host_tlb_inv);
* kvm_vz_guest_tlb_lookup() - Lookup a guest VZ TLB mapping.
* @vcpu: KVM VCPU pointer.
* @gpa: Guest virtual address in a TLB mapped guest segment.
- * @gpa: Ponter to output guest physical address it maps to.
+ * @gpa: Pointer to output guest physical address it maps to.
*
* Converts a guest virtual address in a guest TLB mapped segment to a guest
* physical address, by probing the guest TLB.
@@ -469,7 +331,7 @@ void kvm_vz_local_flush_guesttlb_all(void)
cvmmemctl2 |= CVMMEMCTL2_INHIBITTS;
write_c0_cvmmemctl2(cvmmemctl2);
break;
- };
+ }
/* Invalidate guest entries in guest TLB */
write_gc0_entrylo0(0);
@@ -486,7 +348,7 @@ void kvm_vz_local_flush_guesttlb_all(void)
if (cvmmemctl2) {
cvmmemctl2 &= ~CVMMEMCTL2_INHIBITTS;
write_c0_cvmmemctl2(cvmmemctl2);
- };
+ }
write_gc0_index(old_index);
write_gc0_entryhi(old_entryhi);
@@ -622,39 +484,42 @@ void kvm_vz_load_guesttlb(const struct kvm_mips_tlb *buf, unsigned int index,
}
EXPORT_SYMBOL_GPL(kvm_vz_load_guesttlb);
-#endif
-
-/**
- * kvm_mips_suspend_mm() - Suspend the active mm.
- * @cpu The CPU we're running on.
- *
- * Suspend the active_mm, ready for a switch to a KVM guest virtual address
- * space. This is left active for the duration of guest context, including time
- * with interrupts enabled, so we need to be careful not to confuse e.g. cache
- * management IPIs.
- *
- * kvm_mips_resume_mm() should be called before context switching to a different
- * process so we don't need to worry about reference counting.
- *
- * This needs to be in static kernel code to avoid exporting init_mm.
- */
-void kvm_mips_suspend_mm(int cpu)
+#ifdef CONFIG_CPU_LOONGSON64
+void kvm_loongson_clear_guest_vtlb(void)
{
- cpumask_clear_cpu(cpu, mm_cpumask(current->active_mm));
- current->active_mm = &init_mm;
+ int idx = read_gc0_index();
+
+ /* Set root GuestID for root probe and write of guest TLB entry */
+ set_root_gid_to_guest_gid();
+
+ write_gc0_index(0);
+ guest_tlbinvf();
+ write_gc0_index(idx);
+
+ clear_root_gid();
+ set_c0_diag(LOONGSON_DIAG_ITLB | LOONGSON_DIAG_DTLB);
}
-EXPORT_SYMBOL_GPL(kvm_mips_suspend_mm);
+EXPORT_SYMBOL_GPL(kvm_loongson_clear_guest_vtlb);
-/**
- * kvm_mips_resume_mm() - Resume the current process mm.
- * @cpu The CPU we're running on.
- *
- * Resume the mm of the current process, after a switch back from a KVM guest
- * virtual address space (see kvm_mips_suspend_mm()).
- */
-void kvm_mips_resume_mm(int cpu)
+void kvm_loongson_clear_guest_ftlb(void)
{
- cpumask_set_cpu(cpu, mm_cpumask(current->mm));
- current->active_mm = current->mm;
+ int i;
+ int idx = read_gc0_index();
+
+ /* Set root GuestID for root probe and write of guest TLB entry */
+ set_root_gid_to_guest_gid();
+
+ for (i = current_cpu_data.tlbsizevtlb;
+ i < (current_cpu_data.tlbsizevtlb +
+ current_cpu_data.tlbsizeftlbsets);
+ i++) {
+ write_gc0_index(i);
+ guest_tlbinvf();
+ }
+ write_gc0_index(idx);
+
+ clear_root_gid();
+ set_c0_diag(LOONGSON_DIAG_ITLB | LOONGSON_DIAG_DTLB);
}
-EXPORT_SYMBOL_GPL(kvm_mips_resume_mm);
+EXPORT_SYMBOL_GPL(kvm_loongson_clear_guest_ftlb);
+#endif
diff --git a/arch/mips/kvm/trace.h b/arch/mips/kvm/trace.h
index a8c7fd7bf6d2..136c3535a1cb 100644
--- a/arch/mips/kvm/trace.h
+++ b/arch/mips/kvm/trace.h
@@ -322,11 +322,11 @@ TRACE_EVENT_FN(kvm_guest_mode_change,
),
TP_fast_assign(
- __entry->epc = kvm_read_c0_guest_epc(vcpu->arch.cop0);
+ __entry->epc = kvm_read_c0_guest_epc(&vcpu->arch.cop0);
__entry->pc = vcpu->arch.pc;
- __entry->badvaddr = kvm_read_c0_guest_badvaddr(vcpu->arch.cop0);
- __entry->status = kvm_read_c0_guest_status(vcpu->arch.cop0);
- __entry->cause = kvm_read_c0_guest_cause(vcpu->arch.cop0);
+ __entry->badvaddr = kvm_read_c0_guest_badvaddr(&vcpu->arch.cop0);
+ __entry->status = kvm_read_c0_guest_status(&vcpu->arch.cop0);
+ __entry->cause = kvm_read_c0_guest_cause(&vcpu->arch.cop0);
),
TP_printk("EPC: 0x%08lx PC: 0x%08lx Status: 0x%08x Cause: 0x%08x BadVAddr: 0x%08lx",
diff --git a/arch/mips/kvm/trap_emul.c b/arch/mips/kvm/trap_emul.c
deleted file mode 100644
index 5a11e83dffe6..000000000000
--- a/arch/mips/kvm/trap_emul.c
+++ /dev/null
@@ -1,1317 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * KVM/MIPS: Deliver/Emulate exceptions to the guest kernel
- *
- * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
- * Authors: Sanjay Lal <sanjayl@kymasys.com>
- */
-
-#include <linux/errno.h>
-#include <linux/err.h>
-#include <linux/kvm_host.h>
-#include <linux/log2.h>
-#include <linux/uaccess.h>
-#include <linux/vmalloc.h>
-#include <asm/mmu_context.h>
-#include <asm/pgalloc.h>
-
-#include "interrupt.h"
-
-static gpa_t kvm_trap_emul_gva_to_gpa_cb(gva_t gva)
-{
- gpa_t gpa;
- gva_t kseg = KSEGX(gva);
- gva_t gkseg = KVM_GUEST_KSEGX(gva);
-
- if ((kseg == CKSEG0) || (kseg == CKSEG1))
- gpa = CPHYSADDR(gva);
- else if (gkseg == KVM_GUEST_KSEG0)
- gpa = KVM_GUEST_CPHYSADDR(gva);
- else {
- kvm_err("%s: cannot find GPA for GVA: %#lx\n", __func__, gva);
- kvm_mips_dump_host_tlbs();
- gpa = KVM_INVALID_ADDR;
- }
-
- kvm_debug("%s: gva %#lx, gpa: %#llx\n", __func__, gva, gpa);
-
- return gpa;
-}
-
-static int kvm_trap_emul_no_handler(struct kvm_vcpu *vcpu)
-{
- u32 __user *opc = (u32 __user *) vcpu->arch.pc;
- u32 cause = vcpu->arch.host_cp0_cause;
- u32 exccode = (cause & CAUSEF_EXCCODE) >> CAUSEB_EXCCODE;
- unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
- u32 inst = 0;
-
- /*
- * Fetch the instruction.
- */
- if (cause & CAUSEF_BD)
- opc += 1;
- kvm_get_badinstr(opc, vcpu, &inst);
-
- kvm_err("Exception Code: %d not handled @ PC: %p, inst: 0x%08x BadVaddr: %#lx Status: %#x\n",
- exccode, opc, inst, badvaddr,
- kvm_read_c0_guest_status(vcpu->arch.cop0));
- kvm_arch_vcpu_dump_regs(vcpu);
- vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
- return RESUME_HOST;
-}
-
-static int kvm_trap_emul_handle_cop_unusable(struct kvm_vcpu *vcpu)
-{
- struct mips_coproc *cop0 = vcpu->arch.cop0;
- struct kvm_run *run = vcpu->run;
- u32 __user *opc = (u32 __user *) vcpu->arch.pc;
- u32 cause = vcpu->arch.host_cp0_cause;
- enum emulation_result er = EMULATE_DONE;
- int ret = RESUME_GUEST;
-
- if (((cause & CAUSEF_CE) >> CAUSEB_CE) == 1) {
- /* FPU Unusable */
- if (!kvm_mips_guest_has_fpu(&vcpu->arch) ||
- (kvm_read_c0_guest_status(cop0) & ST0_CU1) == 0) {
- /*
- * Unusable/no FPU in guest:
- * deliver guest COP1 Unusable Exception
- */
- er = kvm_mips_emulate_fpu_exc(cause, opc, run, vcpu);
- } else {
- /* Restore FPU state */
- kvm_own_fpu(vcpu);
- er = EMULATE_DONE;
- }
- } else {
- er = kvm_mips_emulate_inst(cause, opc, run, vcpu);
- }
-
- switch (er) {
- case EMULATE_DONE:
- ret = RESUME_GUEST;
- break;
-
- case EMULATE_FAIL:
- run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
- ret = RESUME_HOST;
- break;
-
- case EMULATE_WAIT:
- run->exit_reason = KVM_EXIT_INTR;
- ret = RESUME_HOST;
- break;
-
- case EMULATE_HYPERCALL:
- ret = kvm_mips_handle_hypcall(vcpu);
- break;
-
- default:
- BUG();
- }
- return ret;
-}
-
-static int kvm_mips_bad_load(u32 cause, u32 *opc, struct kvm_run *run,
- struct kvm_vcpu *vcpu)
-{
- enum emulation_result er;
- union mips_instruction inst;
- int err;
-
- /* A code fetch fault doesn't count as an MMIO */
- if (kvm_is_ifetch_fault(&vcpu->arch)) {
- run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
- return RESUME_HOST;
- }
-
- /* Fetch the instruction. */
- if (cause & CAUSEF_BD)
- opc += 1;
- err = kvm_get_badinstr(opc, vcpu, &inst.word);
- if (err) {
- run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
- return RESUME_HOST;
- }
-
- /* Emulate the load */
- er = kvm_mips_emulate_load(inst, cause, run, vcpu);
- if (er == EMULATE_FAIL) {
- kvm_err("Emulate load from MMIO space failed\n");
- run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
- } else {
- run->exit_reason = KVM_EXIT_MMIO;
- }
- return RESUME_HOST;
-}
-
-static int kvm_mips_bad_store(u32 cause, u32 *opc, struct kvm_run *run,
- struct kvm_vcpu *vcpu)
-{
- enum emulation_result er;
- union mips_instruction inst;
- int err;
-
- /* Fetch the instruction. */
- if (cause & CAUSEF_BD)
- opc += 1;
- err = kvm_get_badinstr(opc, vcpu, &inst.word);
- if (err) {
- run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
- return RESUME_HOST;
- }
-
- /* Emulate the store */
- er = kvm_mips_emulate_store(inst, cause, run, vcpu);
- if (er == EMULATE_FAIL) {
- kvm_err("Emulate store to MMIO space failed\n");
- run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
- } else {
- run->exit_reason = KVM_EXIT_MMIO;
- }
- return RESUME_HOST;
-}
-
-static int kvm_mips_bad_access(u32 cause, u32 *opc, struct kvm_run *run,
- struct kvm_vcpu *vcpu, bool store)
-{
- if (store)
- return kvm_mips_bad_store(cause, opc, run, vcpu);
- else
- return kvm_mips_bad_load(cause, opc, run, vcpu);
-}
-
-static int kvm_trap_emul_handle_tlb_mod(struct kvm_vcpu *vcpu)
-{
- struct mips_coproc *cop0 = vcpu->arch.cop0;
- struct kvm_run *run = vcpu->run;
- u32 __user *opc = (u32 __user *) vcpu->arch.pc;
- unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
- u32 cause = vcpu->arch.host_cp0_cause;
- struct kvm_mips_tlb *tlb;
- unsigned long entryhi;
- int index;
-
- if (KVM_GUEST_KSEGX(badvaddr) < KVM_GUEST_KSEG0
- || KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG23) {
- /*
- * First find the mapping in the guest TLB. If the failure to
- * write was due to the guest TLB, it should be up to the guest
- * to handle it.
- */
- entryhi = (badvaddr & VPN2_MASK) |
- (kvm_read_c0_guest_entryhi(cop0) & KVM_ENTRYHI_ASID);
- index = kvm_mips_guest_tlb_lookup(vcpu, entryhi);
-
- /*
- * These should never happen.
- * They would indicate stale host TLB entries.
- */
- if (unlikely(index < 0)) {
- run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
- return RESUME_HOST;
- }
- tlb = vcpu->arch.guest_tlb + index;
- if (unlikely(!TLB_IS_VALID(*tlb, badvaddr))) {
- run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
- return RESUME_HOST;
- }
-
- /*
- * Guest entry not dirty? That would explain the TLB modified
- * exception. Relay that on to the guest so it can handle it.
- */
- if (!TLB_IS_DIRTY(*tlb, badvaddr)) {
- kvm_mips_emulate_tlbmod(cause, opc, run, vcpu);
- return RESUME_GUEST;
- }
-
- if (kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb, badvaddr,
- true))
- /* Not writable, needs handling as MMIO */
- return kvm_mips_bad_store(cause, opc, run, vcpu);
- return RESUME_GUEST;
- } else if (KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG0) {
- if (kvm_mips_handle_kseg0_tlb_fault(badvaddr, vcpu, true) < 0)
- /* Not writable, needs handling as MMIO */
- return kvm_mips_bad_store(cause, opc, run, vcpu);
- return RESUME_GUEST;
- } else {
- /* host kernel addresses are all handled as MMIO */
- return kvm_mips_bad_store(cause, opc, run, vcpu);
- }
-}
-
-static int kvm_trap_emul_handle_tlb_miss(struct kvm_vcpu *vcpu, bool store)
-{
- struct kvm_run *run = vcpu->run;
- u32 __user *opc = (u32 __user *) vcpu->arch.pc;
- unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
- u32 cause = vcpu->arch.host_cp0_cause;
- enum emulation_result er = EMULATE_DONE;
- int ret = RESUME_GUEST;
-
- if (((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR)
- && KVM_GUEST_KERNEL_MODE(vcpu)) {
- if (kvm_mips_handle_commpage_tlb_fault(badvaddr, vcpu) < 0) {
- run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
- ret = RESUME_HOST;
- }
- } else if (KVM_GUEST_KSEGX(badvaddr) < KVM_GUEST_KSEG0
- || KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG23) {
- kvm_debug("USER ADDR TLB %s fault: cause %#x, PC: %p, BadVaddr: %#lx\n",
- store ? "ST" : "LD", cause, opc, badvaddr);
-
- /*
- * User Address (UA) fault, this could happen if
- * (1) TLB entry not present/valid in both Guest and shadow host
- * TLBs, in this case we pass on the fault to the guest
- * kernel and let it handle it.
- * (2) TLB entry is present in the Guest TLB but not in the
- * shadow, in this case we inject the TLB from the Guest TLB
- * into the shadow host TLB
- */
-
- er = kvm_mips_handle_tlbmiss(cause, opc, run, vcpu, store);
- if (er == EMULATE_DONE)
- ret = RESUME_GUEST;
- else {
- run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
- ret = RESUME_HOST;
- }
- } else if (KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG0) {
- /*
- * All KSEG0 faults are handled by KVM, as the guest kernel does
- * not expect to ever get them
- */
- if (kvm_mips_handle_kseg0_tlb_fault(badvaddr, vcpu, store) < 0)
- ret = kvm_mips_bad_access(cause, opc, run, vcpu, store);
- } else if (KVM_GUEST_KERNEL_MODE(vcpu)
- && (KSEGX(badvaddr) == CKSEG0 || KSEGX(badvaddr) == CKSEG1)) {
- /*
- * With EVA we may get a TLB exception instead of an address
- * error when the guest performs MMIO to KSeg1 addresses.
- */
- ret = kvm_mips_bad_access(cause, opc, run, vcpu, store);
- } else {
- kvm_err("Illegal TLB %s fault address , cause %#x, PC: %p, BadVaddr: %#lx\n",
- store ? "ST" : "LD", cause, opc, badvaddr);
- kvm_mips_dump_host_tlbs();
- kvm_arch_vcpu_dump_regs(vcpu);
- run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
- ret = RESUME_HOST;
- }
- return ret;
-}
-
-static int kvm_trap_emul_handle_tlb_st_miss(struct kvm_vcpu *vcpu)
-{
- return kvm_trap_emul_handle_tlb_miss(vcpu, true);
-}
-
-static int kvm_trap_emul_handle_tlb_ld_miss(struct kvm_vcpu *vcpu)
-{
- return kvm_trap_emul_handle_tlb_miss(vcpu, false);
-}
-
-static int kvm_trap_emul_handle_addr_err_st(struct kvm_vcpu *vcpu)
-{
- struct kvm_run *run = vcpu->run;
- u32 __user *opc = (u32 __user *) vcpu->arch.pc;
- unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
- u32 cause = vcpu->arch.host_cp0_cause;
- int ret = RESUME_GUEST;
-
- if (KVM_GUEST_KERNEL_MODE(vcpu)
- && (KSEGX(badvaddr) == CKSEG0 || KSEGX(badvaddr) == CKSEG1)) {
- ret = kvm_mips_bad_store(cause, opc, run, vcpu);
- } else {
- kvm_err("Address Error (STORE): cause %#x, PC: %p, BadVaddr: %#lx\n",
- cause, opc, badvaddr);
- run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
- ret = RESUME_HOST;
- }
- return ret;
-}
-
-static int kvm_trap_emul_handle_addr_err_ld(struct kvm_vcpu *vcpu)
-{
- struct kvm_run *run = vcpu->run;
- u32 __user *opc = (u32 __user *) vcpu->arch.pc;
- unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
- u32 cause = vcpu->arch.host_cp0_cause;
- int ret = RESUME_GUEST;
-
- if (KSEGX(badvaddr) == CKSEG0 || KSEGX(badvaddr) == CKSEG1) {
- ret = kvm_mips_bad_load(cause, opc, run, vcpu);
- } else {
- kvm_err("Address Error (LOAD): cause %#x, PC: %p, BadVaddr: %#lx\n",
- cause, opc, badvaddr);
- run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
- ret = RESUME_HOST;
- }
- return ret;
-}
-
-static int kvm_trap_emul_handle_syscall(struct kvm_vcpu *vcpu)
-{
- struct kvm_run *run = vcpu->run;
- u32 __user *opc = (u32 __user *) vcpu->arch.pc;
- u32 cause = vcpu->arch.host_cp0_cause;
- enum emulation_result er = EMULATE_DONE;
- int ret = RESUME_GUEST;
-
- er = kvm_mips_emulate_syscall(cause, opc, run, vcpu);
- if (er == EMULATE_DONE)
- ret = RESUME_GUEST;
- else {
- run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
- ret = RESUME_HOST;
- }
- return ret;
-}
-
-static int kvm_trap_emul_handle_res_inst(struct kvm_vcpu *vcpu)
-{
- struct kvm_run *run = vcpu->run;
- u32 __user *opc = (u32 __user *) vcpu->arch.pc;
- u32 cause = vcpu->arch.host_cp0_cause;
- enum emulation_result er = EMULATE_DONE;
- int ret = RESUME_GUEST;
-
- er = kvm_mips_handle_ri(cause, opc, run, vcpu);
- if (er == EMULATE_DONE)
- ret = RESUME_GUEST;
- else {
- run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
- ret = RESUME_HOST;
- }
- return ret;
-}
-
-static int kvm_trap_emul_handle_break(struct kvm_vcpu *vcpu)
-{
- struct kvm_run *run = vcpu->run;
- u32 __user *opc = (u32 __user *) vcpu->arch.pc;
- u32 cause = vcpu->arch.host_cp0_cause;
- enum emulation_result er = EMULATE_DONE;
- int ret = RESUME_GUEST;
-
- er = kvm_mips_emulate_bp_exc(cause, opc, run, vcpu);
- if (er == EMULATE_DONE)
- ret = RESUME_GUEST;
- else {
- run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
- ret = RESUME_HOST;
- }
- return ret;
-}
-
-static int kvm_trap_emul_handle_trap(struct kvm_vcpu *vcpu)
-{
- struct kvm_run *run = vcpu->run;
- u32 __user *opc = (u32 __user *)vcpu->arch.pc;
- u32 cause = vcpu->arch.host_cp0_cause;
- enum emulation_result er = EMULATE_DONE;
- int ret = RESUME_GUEST;
-
- er = kvm_mips_emulate_trap_exc(cause, opc, run, vcpu);
- if (er == EMULATE_DONE) {
- ret = RESUME_GUEST;
- } else {
- run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
- ret = RESUME_HOST;
- }
- return ret;
-}
-
-static int kvm_trap_emul_handle_msa_fpe(struct kvm_vcpu *vcpu)
-{
- struct kvm_run *run = vcpu->run;
- u32 __user *opc = (u32 __user *)vcpu->arch.pc;
- u32 cause = vcpu->arch.host_cp0_cause;
- enum emulation_result er = EMULATE_DONE;
- int ret = RESUME_GUEST;
-
- er = kvm_mips_emulate_msafpe_exc(cause, opc, run, vcpu);
- if (er == EMULATE_DONE) {
- ret = RESUME_GUEST;
- } else {
- run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
- ret = RESUME_HOST;
- }
- return ret;
-}
-
-static int kvm_trap_emul_handle_fpe(struct kvm_vcpu *vcpu)
-{
- struct kvm_run *run = vcpu->run;
- u32 __user *opc = (u32 __user *)vcpu->arch.pc;
- u32 cause = vcpu->arch.host_cp0_cause;
- enum emulation_result er = EMULATE_DONE;
- int ret = RESUME_GUEST;
-
- er = kvm_mips_emulate_fpe_exc(cause, opc, run, vcpu);
- if (er == EMULATE_DONE) {
- ret = RESUME_GUEST;
- } else {
- run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
- ret = RESUME_HOST;
- }
- return ret;
-}
-
-/**
- * kvm_trap_emul_handle_msa_disabled() - Guest used MSA while disabled in root.
- * @vcpu: Virtual CPU context.
- *
- * Handle when the guest attempts to use MSA when it is disabled.
- */
-static int kvm_trap_emul_handle_msa_disabled(struct kvm_vcpu *vcpu)
-{
- struct mips_coproc *cop0 = vcpu->arch.cop0;
- struct kvm_run *run = vcpu->run;
- u32 __user *opc = (u32 __user *) vcpu->arch.pc;
- u32 cause = vcpu->arch.host_cp0_cause;
- enum emulation_result er = EMULATE_DONE;
- int ret = RESUME_GUEST;
-
- if (!kvm_mips_guest_has_msa(&vcpu->arch) ||
- (kvm_read_c0_guest_status(cop0) & (ST0_CU1 | ST0_FR)) == ST0_CU1) {
- /*
- * No MSA in guest, or FPU enabled and not in FR=1 mode,
- * guest reserved instruction exception
- */
- er = kvm_mips_emulate_ri_exc(cause, opc, run, vcpu);
- } else if (!(kvm_read_c0_guest_config5(cop0) & MIPS_CONF5_MSAEN)) {
- /* MSA disabled by guest, guest MSA disabled exception */
- er = kvm_mips_emulate_msadis_exc(cause, opc, run, vcpu);
- } else {
- /* Restore MSA/FPU state */
- kvm_own_msa(vcpu);
- er = EMULATE_DONE;
- }
-
- switch (er) {
- case EMULATE_DONE:
- ret = RESUME_GUEST;
- break;
-
- case EMULATE_FAIL:
- run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
- ret = RESUME_HOST;
- break;
-
- default:
- BUG();
- }
- return ret;
-}
-
-static int kvm_trap_emul_hardware_enable(void)
-{
- return 0;
-}
-
-static void kvm_trap_emul_hardware_disable(void)
-{
-}
-
-static int kvm_trap_emul_check_extension(struct kvm *kvm, long ext)
-{
- int r;
-
- switch (ext) {
- case KVM_CAP_MIPS_TE:
- r = 1;
- break;
- default:
- r = 0;
- break;
- }
-
- return r;
-}
-
-static int kvm_trap_emul_vcpu_init(struct kvm_vcpu *vcpu)
-{
- struct mm_struct *kern_mm = &vcpu->arch.guest_kernel_mm;
- struct mm_struct *user_mm = &vcpu->arch.guest_user_mm;
-
- /*
- * Allocate GVA -> HPA page tables.
- * MIPS doesn't use the mm_struct pointer argument.
- */
- kern_mm->pgd = pgd_alloc(kern_mm);
- if (!kern_mm->pgd)
- return -ENOMEM;
-
- user_mm->pgd = pgd_alloc(user_mm);
- if (!user_mm->pgd) {
- pgd_free(kern_mm, kern_mm->pgd);
- return -ENOMEM;
- }
-
- return 0;
-}
-
-static void kvm_mips_emul_free_gva_pt(pgd_t *pgd)
-{
- /* Don't free host kernel page tables copied from init_mm.pgd */
- const unsigned long end = 0x80000000;
- unsigned long pgd_va, pud_va, pmd_va;
- p4d_t *p4d;
- pud_t *pud;
- pmd_t *pmd;
- pte_t *pte;
- int i, j, k;
-
- for (i = 0; i < USER_PTRS_PER_PGD; i++) {
- if (pgd_none(pgd[i]))
- continue;
-
- pgd_va = (unsigned long)i << PGDIR_SHIFT;
- if (pgd_va >= end)
- break;
- p4d = p4d_offset(pgd, 0);
- pud = pud_offset(p4d + i, 0);
- for (j = 0; j < PTRS_PER_PUD; j++) {
- if (pud_none(pud[j]))
- continue;
-
- pud_va = pgd_va | ((unsigned long)j << PUD_SHIFT);
- if (pud_va >= end)
- break;
- pmd = pmd_offset(pud + j, 0);
- for (k = 0; k < PTRS_PER_PMD; k++) {
- if (pmd_none(pmd[k]))
- continue;
-
- pmd_va = pud_va | (k << PMD_SHIFT);
- if (pmd_va >= end)
- break;
- pte = pte_offset(pmd + k, 0);
- pte_free_kernel(NULL, pte);
- }
- pmd_free(NULL, pmd);
- }
- pud_free(NULL, pud);
- }
- pgd_free(NULL, pgd);
-}
-
-static void kvm_trap_emul_vcpu_uninit(struct kvm_vcpu *vcpu)
-{
- kvm_mips_emul_free_gva_pt(vcpu->arch.guest_kernel_mm.pgd);
- kvm_mips_emul_free_gva_pt(vcpu->arch.guest_user_mm.pgd);
-}
-
-static int kvm_trap_emul_vcpu_setup(struct kvm_vcpu *vcpu)
-{
- struct mips_coproc *cop0 = vcpu->arch.cop0;
- u32 config, config1;
- int vcpu_id = vcpu->vcpu_id;
-
- /* Start off the timer at 100 MHz */
- kvm_mips_init_count(vcpu, 100*1000*1000);
-
- /*
- * Arch specific stuff, set up config registers properly so that the
- * guest will come up as expected
- */
-#ifndef CONFIG_CPU_MIPSR6
- /* r2-r5, simulate a MIPS 24kc */
- kvm_write_c0_guest_prid(cop0, 0x00019300);
-#else
- /* r6+, simulate a generic QEMU machine */
- kvm_write_c0_guest_prid(cop0, 0x00010000);
-#endif
- /*
- * Have config1, Cacheable, noncoherent, write-back, write allocate.
- * Endianness, arch revision & virtually tagged icache should match
- * host.
- */
- config = read_c0_config() & MIPS_CONF_AR;
- config |= MIPS_CONF_M | CONF_CM_CACHABLE_NONCOHERENT | MIPS_CONF_MT_TLB;
-#ifdef CONFIG_CPU_BIG_ENDIAN
- config |= CONF_BE;
-#endif
- if (cpu_has_vtag_icache)
- config |= MIPS_CONF_VI;
- kvm_write_c0_guest_config(cop0, config);
-
- /* Read the cache characteristics from the host Config1 Register */
- config1 = (read_c0_config1() & ~0x7f);
-
- /* DCache line size not correctly reported in Config1 on Octeon CPUs */
- if (cpu_dcache_line_size()) {
- config1 &= ~MIPS_CONF1_DL;
- config1 |= ((ilog2(cpu_dcache_line_size()) - 1) <<
- MIPS_CONF1_DL_SHF) & MIPS_CONF1_DL;
- }
-
- /* Set up MMU size */
- config1 &= ~(0x3f << 25);
- config1 |= ((KVM_MIPS_GUEST_TLB_SIZE - 1) << 25);
-
- /* We unset some bits that we aren't emulating */
- config1 &= ~(MIPS_CONF1_C2 | MIPS_CONF1_MD | MIPS_CONF1_PC |
- MIPS_CONF1_WR | MIPS_CONF1_CA);
- kvm_write_c0_guest_config1(cop0, config1);
-
- /* Have config3, no tertiary/secondary caches implemented */
- kvm_write_c0_guest_config2(cop0, MIPS_CONF_M);
- /* MIPS_CONF_M | (read_c0_config2() & 0xfff) */
-
- /* Have config4, UserLocal */
- kvm_write_c0_guest_config3(cop0, MIPS_CONF_M | MIPS_CONF3_ULRI);
-
- /* Have config5 */
- kvm_write_c0_guest_config4(cop0, MIPS_CONF_M);
-
- /* No config6 */
- kvm_write_c0_guest_config5(cop0, 0);
-
- /* Set Wait IE/IXMT Ignore in Config7, IAR, AR */
- kvm_write_c0_guest_config7(cop0, (MIPS_CONF7_WII) | (1 << 10));
-
- /* Status */
- kvm_write_c0_guest_status(cop0, ST0_BEV | ST0_ERL);
-
- /*
- * Setup IntCtl defaults, compatibility mode for timer interrupts (HW5)
- */
- kvm_write_c0_guest_intctl(cop0, 0xFC000000);
-
- /* Put in vcpu id as CPUNum into Ebase Reg to handle SMP Guests */
- kvm_write_c0_guest_ebase(cop0, KVM_GUEST_KSEG0 |
- (vcpu_id & MIPS_EBASE_CPUNUM));
-
- /* Put PC at guest reset vector */
- vcpu->arch.pc = KVM_GUEST_CKSEG1ADDR(0x1fc00000);
-
- return 0;
-}
-
-static void kvm_trap_emul_flush_shadow_all(struct kvm *kvm)
-{
- /* Flush GVA page tables and invalidate GVA ASIDs on all VCPUs */
- kvm_flush_remote_tlbs(kvm);
-}
-
-static void kvm_trap_emul_flush_shadow_memslot(struct kvm *kvm,
- const struct kvm_memory_slot *slot)
-{
- kvm_trap_emul_flush_shadow_all(kvm);
-}
-
-static u64 kvm_trap_emul_get_one_regs[] = {
- KVM_REG_MIPS_CP0_INDEX,
- KVM_REG_MIPS_CP0_ENTRYLO0,
- KVM_REG_MIPS_CP0_ENTRYLO1,
- KVM_REG_MIPS_CP0_CONTEXT,
- KVM_REG_MIPS_CP0_USERLOCAL,
- KVM_REG_MIPS_CP0_PAGEMASK,
- KVM_REG_MIPS_CP0_WIRED,
- KVM_REG_MIPS_CP0_HWRENA,
- KVM_REG_MIPS_CP0_BADVADDR,
- KVM_REG_MIPS_CP0_COUNT,
- KVM_REG_MIPS_CP0_ENTRYHI,
- KVM_REG_MIPS_CP0_COMPARE,
- KVM_REG_MIPS_CP0_STATUS,
- KVM_REG_MIPS_CP0_INTCTL,
- KVM_REG_MIPS_CP0_CAUSE,
- KVM_REG_MIPS_CP0_EPC,
- KVM_REG_MIPS_CP0_PRID,
- KVM_REG_MIPS_CP0_EBASE,
- KVM_REG_MIPS_CP0_CONFIG,
- KVM_REG_MIPS_CP0_CONFIG1,
- KVM_REG_MIPS_CP0_CONFIG2,
- KVM_REG_MIPS_CP0_CONFIG3,
- KVM_REG_MIPS_CP0_CONFIG4,
- KVM_REG_MIPS_CP0_CONFIG5,
- KVM_REG_MIPS_CP0_CONFIG7,
- KVM_REG_MIPS_CP0_ERROREPC,
- KVM_REG_MIPS_CP0_KSCRATCH1,
- KVM_REG_MIPS_CP0_KSCRATCH2,
- KVM_REG_MIPS_CP0_KSCRATCH3,
- KVM_REG_MIPS_CP0_KSCRATCH4,
- KVM_REG_MIPS_CP0_KSCRATCH5,
- KVM_REG_MIPS_CP0_KSCRATCH6,
-
- KVM_REG_MIPS_COUNT_CTL,
- KVM_REG_MIPS_COUNT_RESUME,
- KVM_REG_MIPS_COUNT_HZ,
-};
-
-static unsigned long kvm_trap_emul_num_regs(struct kvm_vcpu *vcpu)
-{
- return ARRAY_SIZE(kvm_trap_emul_get_one_regs);
-}
-
-static int kvm_trap_emul_copy_reg_indices(struct kvm_vcpu *vcpu,
- u64 __user *indices)
-{
- if (copy_to_user(indices, kvm_trap_emul_get_one_regs,
- sizeof(kvm_trap_emul_get_one_regs)))
- return -EFAULT;
- indices += ARRAY_SIZE(kvm_trap_emul_get_one_regs);
-
- return 0;
-}
-
-static int kvm_trap_emul_get_one_reg(struct kvm_vcpu *vcpu,
- const struct kvm_one_reg *reg,
- s64 *v)
-{
- struct mips_coproc *cop0 = vcpu->arch.cop0;
-
- switch (reg->id) {
- case KVM_REG_MIPS_CP0_INDEX:
- *v = (long)kvm_read_c0_guest_index(cop0);
- break;
- case KVM_REG_MIPS_CP0_ENTRYLO0:
- *v = kvm_read_c0_guest_entrylo0(cop0);
- break;
- case KVM_REG_MIPS_CP0_ENTRYLO1:
- *v = kvm_read_c0_guest_entrylo1(cop0);
- break;
- case KVM_REG_MIPS_CP0_CONTEXT:
- *v = (long)kvm_read_c0_guest_context(cop0);
- break;
- case KVM_REG_MIPS_CP0_USERLOCAL:
- *v = (long)kvm_read_c0_guest_userlocal(cop0);
- break;
- case KVM_REG_MIPS_CP0_PAGEMASK:
- *v = (long)kvm_read_c0_guest_pagemask(cop0);
- break;
- case KVM_REG_MIPS_CP0_WIRED:
- *v = (long)kvm_read_c0_guest_wired(cop0);
- break;
- case KVM_REG_MIPS_CP0_HWRENA:
- *v = (long)kvm_read_c0_guest_hwrena(cop0);
- break;
- case KVM_REG_MIPS_CP0_BADVADDR:
- *v = (long)kvm_read_c0_guest_badvaddr(cop0);
- break;
- case KVM_REG_MIPS_CP0_ENTRYHI:
- *v = (long)kvm_read_c0_guest_entryhi(cop0);
- break;
- case KVM_REG_MIPS_CP0_COMPARE:
- *v = (long)kvm_read_c0_guest_compare(cop0);
- break;
- case KVM_REG_MIPS_CP0_STATUS:
- *v = (long)kvm_read_c0_guest_status(cop0);
- break;
- case KVM_REG_MIPS_CP0_INTCTL:
- *v = (long)kvm_read_c0_guest_intctl(cop0);
- break;
- case KVM_REG_MIPS_CP0_CAUSE:
- *v = (long)kvm_read_c0_guest_cause(cop0);
- break;
- case KVM_REG_MIPS_CP0_EPC:
- *v = (long)kvm_read_c0_guest_epc(cop0);
- break;
- case KVM_REG_MIPS_CP0_PRID:
- *v = (long)kvm_read_c0_guest_prid(cop0);
- break;
- case KVM_REG_MIPS_CP0_EBASE:
- *v = (long)kvm_read_c0_guest_ebase(cop0);
- break;
- case KVM_REG_MIPS_CP0_CONFIG:
- *v = (long)kvm_read_c0_guest_config(cop0);
- break;
- case KVM_REG_MIPS_CP0_CONFIG1:
- *v = (long)kvm_read_c0_guest_config1(cop0);
- break;
- case KVM_REG_MIPS_CP0_CONFIG2:
- *v = (long)kvm_read_c0_guest_config2(cop0);
- break;
- case KVM_REG_MIPS_CP0_CONFIG3:
- *v = (long)kvm_read_c0_guest_config3(cop0);
- break;
- case KVM_REG_MIPS_CP0_CONFIG4:
- *v = (long)kvm_read_c0_guest_config4(cop0);
- break;
- case KVM_REG_MIPS_CP0_CONFIG5:
- *v = (long)kvm_read_c0_guest_config5(cop0);
- break;
- case KVM_REG_MIPS_CP0_CONFIG7:
- *v = (long)kvm_read_c0_guest_config7(cop0);
- break;
- case KVM_REG_MIPS_CP0_COUNT:
- *v = kvm_mips_read_count(vcpu);
- break;
- case KVM_REG_MIPS_COUNT_CTL:
- *v = vcpu->arch.count_ctl;
- break;
- case KVM_REG_MIPS_COUNT_RESUME:
- *v = ktime_to_ns(vcpu->arch.count_resume);
- break;
- case KVM_REG_MIPS_COUNT_HZ:
- *v = vcpu->arch.count_hz;
- break;
- case KVM_REG_MIPS_CP0_ERROREPC:
- *v = (long)kvm_read_c0_guest_errorepc(cop0);
- break;
- case KVM_REG_MIPS_CP0_KSCRATCH1:
- *v = (long)kvm_read_c0_guest_kscratch1(cop0);
- break;
- case KVM_REG_MIPS_CP0_KSCRATCH2:
- *v = (long)kvm_read_c0_guest_kscratch2(cop0);
- break;
- case KVM_REG_MIPS_CP0_KSCRATCH3:
- *v = (long)kvm_read_c0_guest_kscratch3(cop0);
- break;
- case KVM_REG_MIPS_CP0_KSCRATCH4:
- *v = (long)kvm_read_c0_guest_kscratch4(cop0);
- break;
- case KVM_REG_MIPS_CP0_KSCRATCH5:
- *v = (long)kvm_read_c0_guest_kscratch5(cop0);
- break;
- case KVM_REG_MIPS_CP0_KSCRATCH6:
- *v = (long)kvm_read_c0_guest_kscratch6(cop0);
- break;
- default:
- return -EINVAL;
- }
- return 0;
-}
-
-static int kvm_trap_emul_set_one_reg(struct kvm_vcpu *vcpu,
- const struct kvm_one_reg *reg,
- s64 v)
-{
- struct mips_coproc *cop0 = vcpu->arch.cop0;
- int ret = 0;
- unsigned int cur, change;
-
- switch (reg->id) {
- case KVM_REG_MIPS_CP0_INDEX:
- kvm_write_c0_guest_index(cop0, v);
- break;
- case KVM_REG_MIPS_CP0_ENTRYLO0:
- kvm_write_c0_guest_entrylo0(cop0, v);
- break;
- case KVM_REG_MIPS_CP0_ENTRYLO1:
- kvm_write_c0_guest_entrylo1(cop0, v);
- break;
- case KVM_REG_MIPS_CP0_CONTEXT:
- kvm_write_c0_guest_context(cop0, v);
- break;
- case KVM_REG_MIPS_CP0_USERLOCAL:
- kvm_write_c0_guest_userlocal(cop0, v);
- break;
- case KVM_REG_MIPS_CP0_PAGEMASK:
- kvm_write_c0_guest_pagemask(cop0, v);
- break;
- case KVM_REG_MIPS_CP0_WIRED:
- kvm_write_c0_guest_wired(cop0, v);
- break;
- case KVM_REG_MIPS_CP0_HWRENA:
- kvm_write_c0_guest_hwrena(cop0, v);
- break;
- case KVM_REG_MIPS_CP0_BADVADDR:
- kvm_write_c0_guest_badvaddr(cop0, v);
- break;
- case KVM_REG_MIPS_CP0_ENTRYHI:
- kvm_write_c0_guest_entryhi(cop0, v);
- break;
- case KVM_REG_MIPS_CP0_STATUS:
- kvm_write_c0_guest_status(cop0, v);
- break;
- case KVM_REG_MIPS_CP0_INTCTL:
- /* No VInt, so no VS, read-only for now */
- break;
- case KVM_REG_MIPS_CP0_EPC:
- kvm_write_c0_guest_epc(cop0, v);
- break;
- case KVM_REG_MIPS_CP0_PRID:
- kvm_write_c0_guest_prid(cop0, v);
- break;
- case KVM_REG_MIPS_CP0_EBASE:
- /*
- * Allow core number to be written, but the exception base must
- * remain in guest KSeg0.
- */
- kvm_change_c0_guest_ebase(cop0, 0x1ffff000 | MIPS_EBASE_CPUNUM,
- v);
- break;
- case KVM_REG_MIPS_CP0_COUNT:
- kvm_mips_write_count(vcpu, v);
- break;
- case KVM_REG_MIPS_CP0_COMPARE:
- kvm_mips_write_compare(vcpu, v, false);
- break;
- case KVM_REG_MIPS_CP0_CAUSE:
- /*
- * If the timer is stopped or started (DC bit) it must look
- * atomic with changes to the interrupt pending bits (TI, IRQ5).
- * A timer interrupt should not happen in between.
- */
- if ((kvm_read_c0_guest_cause(cop0) ^ v) & CAUSEF_DC) {
- if (v & CAUSEF_DC) {
- /* disable timer first */
- kvm_mips_count_disable_cause(vcpu);
- kvm_change_c0_guest_cause(cop0, (u32)~CAUSEF_DC,
- v);
- } else {
- /* enable timer last */
- kvm_change_c0_guest_cause(cop0, (u32)~CAUSEF_DC,
- v);
- kvm_mips_count_enable_cause(vcpu);
- }
- } else {
- kvm_write_c0_guest_cause(cop0, v);
- }
- break;
- case KVM_REG_MIPS_CP0_CONFIG:
- /* read-only for now */
- break;
- case KVM_REG_MIPS_CP0_CONFIG1:
- cur = kvm_read_c0_guest_config1(cop0);
- change = (cur ^ v) & kvm_mips_config1_wrmask(vcpu);
- if (change) {
- v = cur ^ change;
- kvm_write_c0_guest_config1(cop0, v);
- }
- break;
- case KVM_REG_MIPS_CP0_CONFIG2:
- /* read-only for now */
- break;
- case KVM_REG_MIPS_CP0_CONFIG3:
- cur = kvm_read_c0_guest_config3(cop0);
- change = (cur ^ v) & kvm_mips_config3_wrmask(vcpu);
- if (change) {
- v = cur ^ change;
- kvm_write_c0_guest_config3(cop0, v);
- }
- break;
- case KVM_REG_MIPS_CP0_CONFIG4:
- cur = kvm_read_c0_guest_config4(cop0);
- change = (cur ^ v) & kvm_mips_config4_wrmask(vcpu);
- if (change) {
- v = cur ^ change;
- kvm_write_c0_guest_config4(cop0, v);
- }
- break;
- case KVM_REG_MIPS_CP0_CONFIG5:
- cur = kvm_read_c0_guest_config5(cop0);
- change = (cur ^ v) & kvm_mips_config5_wrmask(vcpu);
- if (change) {
- v = cur ^ change;
- kvm_write_c0_guest_config5(cop0, v);
- }
- break;
- case KVM_REG_MIPS_CP0_CONFIG7:
- /* writes ignored */
- break;
- case KVM_REG_MIPS_COUNT_CTL:
- ret = kvm_mips_set_count_ctl(vcpu, v);
- break;
- case KVM_REG_MIPS_COUNT_RESUME:
- ret = kvm_mips_set_count_resume(vcpu, v);
- break;
- case KVM_REG_MIPS_COUNT_HZ:
- ret = kvm_mips_set_count_hz(vcpu, v);
- break;
- case KVM_REG_MIPS_CP0_ERROREPC:
- kvm_write_c0_guest_errorepc(cop0, v);
- break;
- case KVM_REG_MIPS_CP0_KSCRATCH1:
- kvm_write_c0_guest_kscratch1(cop0, v);
- break;
- case KVM_REG_MIPS_CP0_KSCRATCH2:
- kvm_write_c0_guest_kscratch2(cop0, v);
- break;
- case KVM_REG_MIPS_CP0_KSCRATCH3:
- kvm_write_c0_guest_kscratch3(cop0, v);
- break;
- case KVM_REG_MIPS_CP0_KSCRATCH4:
- kvm_write_c0_guest_kscratch4(cop0, v);
- break;
- case KVM_REG_MIPS_CP0_KSCRATCH5:
- kvm_write_c0_guest_kscratch5(cop0, v);
- break;
- case KVM_REG_MIPS_CP0_KSCRATCH6:
- kvm_write_c0_guest_kscratch6(cop0, v);
- break;
- default:
- return -EINVAL;
- }
- return ret;
-}
-
-static int kvm_trap_emul_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
-{
- struct mm_struct *kern_mm = &vcpu->arch.guest_kernel_mm;
- struct mm_struct *user_mm = &vcpu->arch.guest_user_mm;
- struct mm_struct *mm;
-
- /*
- * Were we in guest context? If so, restore the appropriate ASID based
- * on the mode of the Guest (Kernel/User).
- */
- if (current->flags & PF_VCPU) {
- mm = KVM_GUEST_KERNEL_MODE(vcpu) ? kern_mm : user_mm;
- check_switch_mmu_context(mm);
- kvm_mips_suspend_mm(cpu);
- ehb();
- }
-
- return 0;
-}
-
-static int kvm_trap_emul_vcpu_put(struct kvm_vcpu *vcpu, int cpu)
-{
- kvm_lose_fpu(vcpu);
-
- if (current->flags & PF_VCPU) {
- /* Restore normal Linux process memory map */
- check_switch_mmu_context(current->mm);
- kvm_mips_resume_mm(cpu);
- ehb();
- }
-
- return 0;
-}
-
-static void kvm_trap_emul_check_requests(struct kvm_vcpu *vcpu, int cpu,
- bool reload_asid)
-{
- struct mm_struct *kern_mm = &vcpu->arch.guest_kernel_mm;
- struct mm_struct *user_mm = &vcpu->arch.guest_user_mm;
- struct mm_struct *mm;
- int i;
-
- if (likely(!kvm_request_pending(vcpu)))
- return;
-
- if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) {
- /*
- * Both kernel & user GVA mappings must be invalidated. The
- * caller is just about to check whether the ASID is stale
- * anyway so no need to reload it here.
- */
- kvm_mips_flush_gva_pt(kern_mm->pgd, KMF_GPA | KMF_KERN);
- kvm_mips_flush_gva_pt(user_mm->pgd, KMF_GPA | KMF_USER);
- for_each_possible_cpu(i) {
- set_cpu_context(i, kern_mm, 0);
- set_cpu_context(i, user_mm, 0);
- }
-
- /* Generate new ASID for current mode */
- if (reload_asid) {
- mm = KVM_GUEST_KERNEL_MODE(vcpu) ? kern_mm : user_mm;
- get_new_mmu_context(mm);
- htw_stop();
- write_c0_entryhi(cpu_asid(cpu, mm));
- TLBMISS_HANDLER_SETUP_PGD(mm->pgd);
- htw_start();
- }
- }
-}
-
-/**
- * kvm_trap_emul_gva_lockless_begin() - Begin lockless access to GVA space.
- * @vcpu: VCPU pointer.
- *
- * Call before a GVA space access outside of guest mode, to ensure that
- * asynchronous TLB flush requests are handled or delayed until completion of
- * the GVA access (as indicated by a matching kvm_trap_emul_gva_lockless_end()).
- *
- * Should be called with IRQs already enabled.
- */
-void kvm_trap_emul_gva_lockless_begin(struct kvm_vcpu *vcpu)
-{
- /* We re-enable IRQs in kvm_trap_emul_gva_lockless_end() */
- WARN_ON_ONCE(irqs_disabled());
-
- /*
- * The caller is about to access the GVA space, so we set the mode to
- * force TLB flush requests to send an IPI, and also disable IRQs to
- * delay IPI handling until kvm_trap_emul_gva_lockless_end().
- */
- local_irq_disable();
-
- /*
- * Make sure the read of VCPU requests is not reordered ahead of the
- * write to vcpu->mode, or we could miss a TLB flush request while
- * the requester sees the VCPU as outside of guest mode and not needing
- * an IPI.
- */
- smp_store_mb(vcpu->mode, READING_SHADOW_PAGE_TABLES);
-
- /*
- * If a TLB flush has been requested (potentially while
- * OUTSIDE_GUEST_MODE and assumed immediately effective), perform it
- * before accessing the GVA space, and be sure to reload the ASID if
- * necessary as it'll be immediately used.
- *
- * TLB flush requests after this check will trigger an IPI due to the
- * mode change above, which will be delayed due to IRQs disabled.
- */
- kvm_trap_emul_check_requests(vcpu, smp_processor_id(), true);
-}
-
-/**
- * kvm_trap_emul_gva_lockless_end() - End lockless access to GVA space.
- * @vcpu: VCPU pointer.
- *
- * Called after a GVA space access outside of guest mode. Should have a matching
- * call to kvm_trap_emul_gva_lockless_begin().
- */
-void kvm_trap_emul_gva_lockless_end(struct kvm_vcpu *vcpu)
-{
- /*
- * Make sure the write to vcpu->mode is not reordered in front of GVA
- * accesses, or a TLB flush requester may not think it necessary to send
- * an IPI.
- */
- smp_store_release(&vcpu->mode, OUTSIDE_GUEST_MODE);
-
- /*
- * Now that the access to GVA space is complete, its safe for pending
- * TLB flush request IPIs to be handled (which indicates completion).
- */
- local_irq_enable();
-}
-
-static void kvm_trap_emul_vcpu_reenter(struct kvm_run *run,
- struct kvm_vcpu *vcpu)
-{
- struct mm_struct *kern_mm = &vcpu->arch.guest_kernel_mm;
- struct mm_struct *user_mm = &vcpu->arch.guest_user_mm;
- struct mm_struct *mm;
- struct mips_coproc *cop0 = vcpu->arch.cop0;
- int i, cpu = smp_processor_id();
- unsigned int gasid;
-
- /*
- * No need to reload ASID, IRQs are disabled already so there's no rush,
- * and we'll check if we need to regenerate below anyway before
- * re-entering the guest.
- */
- kvm_trap_emul_check_requests(vcpu, cpu, false);
-
- if (KVM_GUEST_KERNEL_MODE(vcpu)) {
- mm = kern_mm;
- } else {
- mm = user_mm;
-
- /*
- * Lazy host ASID regeneration / PT flush for guest user mode.
- * If the guest ASID has changed since the last guest usermode
- * execution, invalidate the stale TLB entries and flush GVA PT
- * entries too.
- */
- gasid = kvm_read_c0_guest_entryhi(cop0) & KVM_ENTRYHI_ASID;
- if (gasid != vcpu->arch.last_user_gasid) {
- kvm_mips_flush_gva_pt(user_mm->pgd, KMF_USER);
- for_each_possible_cpu(i)
- set_cpu_context(i, user_mm, 0);
- vcpu->arch.last_user_gasid = gasid;
- }
- }
-
- /*
- * Check if ASID is stale. This may happen due to a TLB flush request or
- * a lazy user MM invalidation.
- */
- check_mmu_context(mm);
-}
-
-static int kvm_trap_emul_vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu)
-{
- int cpu = smp_processor_id();
- int r;
-
- /* Check if we have any exceptions/interrupts pending */
- kvm_mips_deliver_interrupts(vcpu,
- kvm_read_c0_guest_cause(vcpu->arch.cop0));
-
- kvm_trap_emul_vcpu_reenter(run, vcpu);
-
- /*
- * We use user accessors to access guest memory, but we don't want to
- * invoke Linux page faulting.
- */
- pagefault_disable();
-
- /* Disable hardware page table walking while in guest */
- htw_stop();
-
- /*
- * While in guest context we're in the guest's address space, not the
- * host process address space, so we need to be careful not to confuse
- * e.g. cache management IPIs.
- */
- kvm_mips_suspend_mm(cpu);
-
- r = vcpu->arch.vcpu_run(run, vcpu);
-
- /* We may have migrated while handling guest exits */
- cpu = smp_processor_id();
-
- /* Restore normal Linux process memory map */
- check_switch_mmu_context(current->mm);
- kvm_mips_resume_mm(cpu);
-
- htw_start();
-
- pagefault_enable();
-
- return r;
-}
-
-static struct kvm_mips_callbacks kvm_trap_emul_callbacks = {
- /* exit handlers */
- .handle_cop_unusable = kvm_trap_emul_handle_cop_unusable,
- .handle_tlb_mod = kvm_trap_emul_handle_tlb_mod,
- .handle_tlb_st_miss = kvm_trap_emul_handle_tlb_st_miss,
- .handle_tlb_ld_miss = kvm_trap_emul_handle_tlb_ld_miss,
- .handle_addr_err_st = kvm_trap_emul_handle_addr_err_st,
- .handle_addr_err_ld = kvm_trap_emul_handle_addr_err_ld,
- .handle_syscall = kvm_trap_emul_handle_syscall,
- .handle_res_inst = kvm_trap_emul_handle_res_inst,
- .handle_break = kvm_trap_emul_handle_break,
- .handle_trap = kvm_trap_emul_handle_trap,
- .handle_msa_fpe = kvm_trap_emul_handle_msa_fpe,
- .handle_fpe = kvm_trap_emul_handle_fpe,
- .handle_msa_disabled = kvm_trap_emul_handle_msa_disabled,
- .handle_guest_exit = kvm_trap_emul_no_handler,
-
- .hardware_enable = kvm_trap_emul_hardware_enable,
- .hardware_disable = kvm_trap_emul_hardware_disable,
- .check_extension = kvm_trap_emul_check_extension,
- .vcpu_init = kvm_trap_emul_vcpu_init,
- .vcpu_uninit = kvm_trap_emul_vcpu_uninit,
- .vcpu_setup = kvm_trap_emul_vcpu_setup,
- .flush_shadow_all = kvm_trap_emul_flush_shadow_all,
- .flush_shadow_memslot = kvm_trap_emul_flush_shadow_memslot,
- .gva_to_gpa = kvm_trap_emul_gva_to_gpa_cb,
- .queue_timer_int = kvm_mips_queue_timer_int_cb,
- .dequeue_timer_int = kvm_mips_dequeue_timer_int_cb,
- .queue_io_int = kvm_mips_queue_io_int_cb,
- .dequeue_io_int = kvm_mips_dequeue_io_int_cb,
- .irq_deliver = kvm_mips_irq_deliver_cb,
- .irq_clear = kvm_mips_irq_clear_cb,
- .num_regs = kvm_trap_emul_num_regs,
- .copy_reg_indices = kvm_trap_emul_copy_reg_indices,
- .get_one_reg = kvm_trap_emul_get_one_reg,
- .set_one_reg = kvm_trap_emul_set_one_reg,
- .vcpu_load = kvm_trap_emul_vcpu_load,
- .vcpu_put = kvm_trap_emul_vcpu_put,
- .vcpu_run = kvm_trap_emul_vcpu_run,
- .vcpu_reenter = kvm_trap_emul_vcpu_reenter,
-};
-
-int kvm_mips_emulation_init(struct kvm_mips_callbacks **install_callbacks)
-{
- *install_callbacks = &kvm_trap_emul_callbacks;
- return 0;
-}
diff --git a/arch/mips/kvm/vz.c b/arch/mips/kvm/vz.c
index dde20887a70d..99d5a71e4300 100644
--- a/arch/mips/kvm/vz.c
+++ b/arch/mips/kvm/vz.c
@@ -29,6 +29,9 @@
#include <linux/kvm_host.h>
#include "interrupt.h"
+#ifdef CONFIG_CPU_LOONGSON64
+#include "loongson_regs.h"
+#endif
#include "trace.h"
@@ -126,6 +129,11 @@ static inline unsigned int kvm_vz_config5_guest_wrmask(struct kvm_vcpu *vcpu)
return mask;
}
+static inline unsigned int kvm_vz_config6_guest_wrmask(struct kvm_vcpu *vcpu)
+{
+ return LOONGSON_CONF6_INTIMER | LOONGSON_CONF6_EXTIMER;
+}
+
/*
* VZ optionally allows these additional Config bits to be written by root:
* Config: M, [MT]
@@ -180,6 +188,12 @@ static inline unsigned int kvm_vz_config5_user_wrmask(struct kvm_vcpu *vcpu)
return kvm_vz_config5_guest_wrmask(vcpu) | MIPS_CONF5_MRP;
}
+static inline unsigned int kvm_vz_config6_user_wrmask(struct kvm_vcpu *vcpu)
+{
+ return kvm_vz_config6_guest_wrmask(vcpu) |
+ LOONGSON_CONF6_SFBEN | LOONGSON_CONF6_FTLBDIS;
+}
+
static gpa_t kvm_vz_gva_to_gpa_cb(gva_t gva)
{
/* VZ guest has already converted gva to gpa */
@@ -225,23 +239,7 @@ static void kvm_vz_queue_io_int_cb(struct kvm_vcpu *vcpu,
* interrupts are asynchronous to vcpu execution therefore defer guest
* cp0 accesses
*/
- switch (intr) {
- case 2:
- kvm_vz_queue_irq(vcpu, MIPS_EXC_INT_IO);
- break;
-
- case 3:
- kvm_vz_queue_irq(vcpu, MIPS_EXC_INT_IPI_1);
- break;
-
- case 4:
- kvm_vz_queue_irq(vcpu, MIPS_EXC_INT_IPI_2);
- break;
-
- default:
- break;
- }
-
+ kvm_vz_queue_irq(vcpu, kvm_irq_to_priority(intr));
}
static void kvm_vz_dequeue_io_int_cb(struct kvm_vcpu *vcpu,
@@ -253,44 +251,22 @@ static void kvm_vz_dequeue_io_int_cb(struct kvm_vcpu *vcpu,
* interrupts are asynchronous to vcpu execution therefore defer guest
* cp0 accesses
*/
- switch (intr) {
- case -2:
- kvm_vz_dequeue_irq(vcpu, MIPS_EXC_INT_IO);
- break;
-
- case -3:
- kvm_vz_dequeue_irq(vcpu, MIPS_EXC_INT_IPI_1);
- break;
-
- case -4:
- kvm_vz_dequeue_irq(vcpu, MIPS_EXC_INT_IPI_2);
- break;
-
- default:
- break;
- }
-
+ kvm_vz_dequeue_irq(vcpu, kvm_irq_to_priority(-intr));
}
-static u32 kvm_vz_priority_to_irq[MIPS_EXC_MAX] = {
- [MIPS_EXC_INT_TIMER] = C_IRQ5,
- [MIPS_EXC_INT_IO] = C_IRQ0,
- [MIPS_EXC_INT_IPI_1] = C_IRQ1,
- [MIPS_EXC_INT_IPI_2] = C_IRQ2,
-};
-
static int kvm_vz_irq_deliver_cb(struct kvm_vcpu *vcpu, unsigned int priority,
u32 cause)
{
u32 irq = (priority < MIPS_EXC_MAX) ?
- kvm_vz_priority_to_irq[priority] : 0;
+ kvm_priority_to_irq[priority] : 0;
switch (priority) {
case MIPS_EXC_INT_TIMER:
set_gc0_cause(C_TI);
break;
- case MIPS_EXC_INT_IO:
+ case MIPS_EXC_INT_IO_1:
+ case MIPS_EXC_INT_IO_2:
case MIPS_EXC_INT_IPI_1:
case MIPS_EXC_INT_IPI_2:
if (cpu_has_guestctl2)
@@ -311,14 +287,13 @@ static int kvm_vz_irq_clear_cb(struct kvm_vcpu *vcpu, unsigned int priority,
u32 cause)
{
u32 irq = (priority < MIPS_EXC_MAX) ?
- kvm_vz_priority_to_irq[priority] : 0;
+ kvm_priority_to_irq[priority] : 0;
switch (priority) {
case MIPS_EXC_INT_TIMER:
/*
- * Call to kvm_write_c0_guest_compare() clears Cause.TI in
- * kvm_mips_emulate_CP0(). Explicitly clear irq associated with
- * Cause.IP[IPTI] if GuestCtl2 virtual interrupt register not
+ * Explicitly clear irq associated with Cause.IP[IPTI]
+ * if GuestCtl2 virtual interrupt register not
* supported or if not using GuestCtl2 Hardware Clear.
*/
if (cpu_has_guestctl2) {
@@ -329,7 +304,8 @@ static int kvm_vz_irq_clear_cb(struct kvm_vcpu *vcpu, unsigned int priority,
}
break;
- case MIPS_EXC_INT_IO:
+ case MIPS_EXC_INT_IO_1:
+ case MIPS_EXC_INT_IO_2:
case MIPS_EXC_INT_IPI_1:
case MIPS_EXC_INT_IPI_2:
/* Clear GuestCtl2.VIP irq if not using Hardware Clear */
@@ -412,7 +388,6 @@ static void _kvm_vz_restore_htimer(struct kvm_vcpu *vcpu,
u32 compare, u32 cause)
{
u32 start_count, after_count;
- ktime_t freeze_time;
unsigned long flags;
/*
@@ -420,7 +395,7 @@ static void _kvm_vz_restore_htimer(struct kvm_vcpu *vcpu,
* this with interrupts disabled to avoid latency.
*/
local_irq_save(flags);
- freeze_time = kvm_mips_freeze_hrtimer(vcpu, &start_count);
+ kvm_mips_freeze_hrtimer(vcpu, &start_count);
write_c0_gtoffset(start_count - read_c0_count());
local_irq_restore(flags);
@@ -447,7 +422,7 @@ static void _kvm_vz_restore_htimer(struct kvm_vcpu *vcpu,
*/
static void kvm_vz_restore_timer(struct kvm_vcpu *vcpu)
{
- struct mips_coproc *cop0 = vcpu->arch.cop0;
+ struct mips_coproc *cop0 = &vcpu->arch.cop0;
u32 cause, compare;
compare = kvm_read_sw_gc0_compare(cop0);
@@ -483,8 +458,8 @@ void kvm_vz_acquire_htimer(struct kvm_vcpu *vcpu)
/**
* _kvm_vz_save_htimer() - Switch to software emulation of guest timer.
* @vcpu: Virtual CPU.
- * @compare: Pointer to write compare value to.
- * @cause: Pointer to write cause value to.
+ * @out_compare: Pointer to write compare value to.
+ * @out_cause: Pointer to write cause value to.
*
* Save VZ guest timer state and switch to software emulation of guest CP0
* timer. The hard timer must already be in use, so preemption should be
@@ -542,7 +517,7 @@ static void _kvm_vz_save_htimer(struct kvm_vcpu *vcpu,
*/
static void kvm_vz_save_timer(struct kvm_vcpu *vcpu)
{
- struct mips_coproc *cop0 = vcpu->arch.cop0;
+ struct mips_coproc *cop0 = &vcpu->arch.cop0;
u32 gctl0, compare, cause;
gctl0 = read_c0_guestctl0();
@@ -888,7 +863,7 @@ static unsigned long mips_process_maar(unsigned int op, unsigned long val)
static void kvm_write_maari(struct kvm_vcpu *vcpu, unsigned long val)
{
- struct mips_coproc *cop0 = vcpu->arch.cop0;
+ struct mips_coproc *cop0 = &vcpu->arch.cop0;
val &= MIPS_MAARI_INDEX;
if (val == MIPS_MAARI_INDEX)
@@ -899,10 +874,9 @@ static void kvm_write_maari(struct kvm_vcpu *vcpu, unsigned long val)
static enum emulation_result kvm_vz_gpsi_cop0(union mips_instruction inst,
u32 *opc, u32 cause,
- struct kvm_run *run,
struct kvm_vcpu *vcpu)
{
- struct mips_coproc *cop0 = vcpu->arch.cop0;
+ struct mips_coproc *cop0 = &vcpu->arch.cop0;
enum emulation_result er = EMULATE_DONE;
u32 rt, rd, sel;
unsigned long curr_pc;
@@ -966,7 +940,8 @@ static enum emulation_result kvm_vz_gpsi_cop0(union mips_instruction inst,
(sel == 2 || /* SRSCtl */
sel == 3)) || /* SRSMap */
(rd == MIPS_CP0_CONFIG &&
- (sel == 7)) || /* Config7 */
+ (sel == 6 || /* Config6 */
+ sel == 7)) || /* Config7 */
(rd == MIPS_CP0_LLADDR &&
(sel == 2) && /* MAARI */
cpu_guest_has_maar &&
@@ -974,6 +949,11 @@ static enum emulation_result kvm_vz_gpsi_cop0(union mips_instruction inst,
(rd == MIPS_CP0_ERRCTL &&
(sel == 0))) { /* ErrCtl */
val = cop0->reg[rd][sel];
+#ifdef CONFIG_CPU_LOONGSON64
+ } else if (rd == MIPS_CP0_DIAG &&
+ (sel == 0)) { /* Diag */
+ val = cop0->reg[rd][sel];
+#endif
} else {
val = 0;
er = EMULATE_FAIL;
@@ -1036,9 +1016,40 @@ static enum emulation_result kvm_vz_gpsi_cop0(union mips_instruction inst,
cpu_guest_has_maar &&
!cpu_guest_has_dyn_maar) {
kvm_write_maari(vcpu, val);
+ } else if (rd == MIPS_CP0_CONFIG &&
+ (sel == 6)) {
+ cop0->reg[rd][sel] = (int)val;
} else if (rd == MIPS_CP0_ERRCTL &&
(sel == 0)) { /* ErrCtl */
/* ignore the written value */
+#ifdef CONFIG_CPU_LOONGSON64
+ } else if (rd == MIPS_CP0_DIAG &&
+ (sel == 0)) { /* Diag */
+ unsigned long flags;
+
+ local_irq_save(flags);
+ if (val & LOONGSON_DIAG_BTB) {
+ /* Flush BTB */
+ set_c0_diag(LOONGSON_DIAG_BTB);
+ }
+ if (val & LOONGSON_DIAG_ITLB) {
+ /* Flush ITLB */
+ set_c0_diag(LOONGSON_DIAG_ITLB);
+ }
+ if (val & LOONGSON_DIAG_DTLB) {
+ /* Flush DTLB */
+ set_c0_diag(LOONGSON_DIAG_DTLB);
+ }
+ if (val & LOONGSON_DIAG_VTLB) {
+ /* Flush VTLB */
+ kvm_loongson_clear_guest_vtlb();
+ }
+ if (val & LOONGSON_DIAG_FTLB) {
+ /* Flush FTLB */
+ kvm_loongson_clear_guest_ftlb();
+ }
+ local_irq_restore(flags);
+#endif
} else {
er = EMULATE_FAIL;
}
@@ -1062,7 +1073,6 @@ static enum emulation_result kvm_vz_gpsi_cop0(union mips_instruction inst,
static enum emulation_result kvm_vz_gpsi_cache(union mips_instruction inst,
u32 *opc, u32 cause,
- struct kvm_run *run,
struct kvm_vcpu *vcpu)
{
enum emulation_result er = EMULATE_DONE;
@@ -1118,7 +1128,7 @@ static enum emulation_result kvm_vz_gpsi_cache(union mips_instruction inst,
break;
default:
break;
- };
+ }
kvm_err("@ %#lx/%#lx CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
curr_pc, vcpu->arch.gprs[31], cache, op, base, arch->gprs[base],
@@ -1129,12 +1139,81 @@ static enum emulation_result kvm_vz_gpsi_cache(union mips_instruction inst,
return EMULATE_FAIL;
}
+#ifdef CONFIG_CPU_LOONGSON64
+static enum emulation_result kvm_vz_gpsi_lwc2(union mips_instruction inst,
+ u32 *opc, u32 cause,
+ struct kvm_vcpu *vcpu)
+{
+ unsigned int rs, rd;
+ unsigned int hostcfg;
+ unsigned long curr_pc;
+ enum emulation_result er = EMULATE_DONE;
+
+ /*
+ * Update PC and hold onto current PC in case there is
+ * an error and we want to rollback the PC
+ */
+ curr_pc = vcpu->arch.pc;
+ er = update_pc(vcpu, cause);
+ if (er == EMULATE_FAIL)
+ return er;
+
+ rs = inst.loongson3_lscsr_format.rs;
+ rd = inst.loongson3_lscsr_format.rd;
+ switch (inst.loongson3_lscsr_format.fr) {
+ case 0x8: /* Read CPUCFG */
+ ++vcpu->stat.vz_cpucfg_exits;
+ hostcfg = read_cpucfg(vcpu->arch.gprs[rs]);
+
+ switch (vcpu->arch.gprs[rs]) {
+ case LOONGSON_CFG0:
+ vcpu->arch.gprs[rd] = 0x14c000;
+ break;
+ case LOONGSON_CFG1:
+ hostcfg &= (LOONGSON_CFG1_FP | LOONGSON_CFG1_MMI |
+ LOONGSON_CFG1_MSA1 | LOONGSON_CFG1_MSA2 |
+ LOONGSON_CFG1_SFBP);
+ vcpu->arch.gprs[rd] = hostcfg;
+ break;
+ case LOONGSON_CFG2:
+ hostcfg &= (LOONGSON_CFG2_LEXT1 | LOONGSON_CFG2_LEXT2 |
+ LOONGSON_CFG2_LEXT3 | LOONGSON_CFG2_LSPW);
+ vcpu->arch.gprs[rd] = hostcfg;
+ break;
+ case LOONGSON_CFG3:
+ vcpu->arch.gprs[rd] = hostcfg;
+ break;
+ default:
+ /* Don't export any other advanced features to guest */
+ vcpu->arch.gprs[rd] = 0;
+ break;
+ }
+ break;
+
+ default:
+ kvm_err("lwc2 emulate not impl %d rs %lx @%lx\n",
+ inst.loongson3_lscsr_format.fr, vcpu->arch.gprs[rs], curr_pc);
+ er = EMULATE_FAIL;
+ break;
+ }
+
+ /* Rollback PC only if emulation was unsuccessful */
+ if (er == EMULATE_FAIL) {
+ kvm_err("[%#lx]%s: unsupported lwc2 instruction 0x%08x 0x%08x\n",
+ curr_pc, __func__, inst.word, inst.loongson3_lscsr_format.fr);
+
+ vcpu->arch.pc = curr_pc;
+ }
+
+ return er;
+}
+#endif
+
static enum emulation_result kvm_trap_vz_handle_gpsi(u32 cause, u32 *opc,
struct kvm_vcpu *vcpu)
{
enum emulation_result er = EMULATE_DONE;
struct kvm_vcpu_arch *arch = &vcpu->arch;
- struct kvm_run *run = vcpu->run;
union mips_instruction inst;
int rd, rt, sel;
int err;
@@ -1150,12 +1229,17 @@ static enum emulation_result kvm_trap_vz_handle_gpsi(u32 cause, u32 *opc,
switch (inst.r_format.opcode) {
case cop0_op:
- er = kvm_vz_gpsi_cop0(inst, opc, cause, run, vcpu);
+ er = kvm_vz_gpsi_cop0(inst, opc, cause, vcpu);
break;
#ifndef CONFIG_CPU_MIPSR6
case cache_op:
trace_kvm_exit(vcpu, KVM_TRACE_EXIT_CACHE);
- er = kvm_vz_gpsi_cache(inst, opc, cause, run, vcpu);
+ er = kvm_vz_gpsi_cache(inst, opc, cause, vcpu);
+ break;
+#endif
+#ifdef CONFIG_CPU_LOONGSON64
+ case lwc2_op:
+ er = kvm_vz_gpsi_lwc2(inst, opc, cause, vcpu);
break;
#endif
case spec3_op:
@@ -1163,7 +1247,7 @@ static enum emulation_result kvm_trap_vz_handle_gpsi(u32 cause, u32 *opc,
#ifdef CONFIG_CPU_MIPSR6
case cache6_op:
trace_kvm_exit(vcpu, KVM_TRACE_EXIT_CACHE);
- er = kvm_vz_gpsi_cache(inst, opc, cause, run, vcpu);
+ er = kvm_vz_gpsi_cache(inst, opc, cause, vcpu);
break;
#endif
case rdhwr_op:
@@ -1183,7 +1267,7 @@ static enum emulation_result kvm_trap_vz_handle_gpsi(u32 cause, u32 *opc,
trace_kvm_hwr(vcpu, KVM_TRACE_RDHWR,
KVM_TRACE_HWR(rd, sel), 0);
goto unknown;
- };
+ }
trace_kvm_hwr(vcpu, KVM_TRACE_RDHWR,
KVM_TRACE_HWR(rd, sel), arch->gprs[rt]);
@@ -1192,7 +1276,7 @@ static enum emulation_result kvm_trap_vz_handle_gpsi(u32 cause, u32 *opc,
break;
default:
goto unknown;
- };
+ }
break;
unknown:
@@ -1457,15 +1541,17 @@ static int kvm_trap_vz_handle_guest_exit(struct kvm_vcpu *vcpu)
}
/**
- * kvm_trap_vz_handle_cop_unusuable() - Guest used unusable coprocessor.
+ * kvm_trap_vz_handle_cop_unusable() - Guest used unusable coprocessor.
* @vcpu: Virtual CPU context.
*
* Handle when the guest attempts to use a coprocessor which hasn't been allowed
* by the root context.
+ *
+ * Return: value indicating whether to resume the host or the guest
+ * (RESUME_HOST or RESUME_GUEST)
*/
static int kvm_trap_vz_handle_cop_unusable(struct kvm_vcpu *vcpu)
{
- struct kvm_run *run = vcpu->run;
u32 cause = vcpu->arch.host_cp0_cause;
enum emulation_result er = EMULATE_FAIL;
int ret = RESUME_GUEST;
@@ -1493,7 +1579,7 @@ static int kvm_trap_vz_handle_cop_unusable(struct kvm_vcpu *vcpu)
break;
case EMULATE_FAIL:
- run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+ vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
ret = RESUME_HOST;
break;
@@ -1509,11 +1595,12 @@ static int kvm_trap_vz_handle_cop_unusable(struct kvm_vcpu *vcpu)
*
* Handle when the guest attempts to use MSA when it is disabled in the root
* context.
+ *
+ * Return: value indicating whether to resume the host or the guest
+ * (RESUME_HOST or RESUME_GUEST)
*/
static int kvm_trap_vz_handle_msa_disabled(struct kvm_vcpu *vcpu)
{
- struct kvm_run *run = vcpu->run;
-
/*
* If MSA not present or not exposed to guest or FR=0, the MSA operation
* should have been treated as a reserved instruction!
@@ -1524,7 +1611,7 @@ static int kvm_trap_vz_handle_msa_disabled(struct kvm_vcpu *vcpu)
(read_gc0_status() & (ST0_CU1 | ST0_FR)) == ST0_CU1 ||
!(read_gc0_config5() & MIPS_CONF5_MSAEN) ||
vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA) {
- run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+ vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
return RESUME_HOST;
}
@@ -1560,7 +1647,7 @@ static int kvm_trap_vz_handle_tlb_ld_miss(struct kvm_vcpu *vcpu)
}
/* Treat as MMIO */
- er = kvm_mips_emulate_load(inst, cause, run, vcpu);
+ er = kvm_mips_emulate_load(inst, cause, vcpu);
if (er == EMULATE_FAIL) {
kvm_err("Guest Emulate Load from MMIO space failed: PC: %p, BadVaddr: %#lx\n",
opc, badvaddr);
@@ -1607,7 +1694,7 @@ static int kvm_trap_vz_handle_tlb_st_miss(struct kvm_vcpu *vcpu)
}
/* Treat as MMIO */
- er = kvm_mips_emulate_store(inst, cause, run, vcpu);
+ er = kvm_mips_emulate_store(inst, cause, vcpu);
if (er == EMULATE_FAIL) {
kvm_err("Guest Emulate Store to MMIO space failed: PC: %p, BadVaddr: %#lx\n",
opc, badvaddr);
@@ -1652,6 +1739,7 @@ static u64 kvm_vz_get_one_regs[] = {
KVM_REG_MIPS_CP0_CONFIG3,
KVM_REG_MIPS_CP0_CONFIG4,
KVM_REG_MIPS_CP0_CONFIG5,
+ KVM_REG_MIPS_CP0_CONFIG6,
#ifdef CONFIG_64BIT
KVM_REG_MIPS_CP0_XCONTEXT,
#endif
@@ -1706,7 +1794,7 @@ static unsigned long kvm_vz_num_regs(struct kvm_vcpu *vcpu)
ret += ARRAY_SIZE(kvm_vz_get_one_regs_contextconfig);
if (cpu_guest_has_segments)
ret += ARRAY_SIZE(kvm_vz_get_one_regs_segments);
- if (cpu_guest_has_htw)
+ if (cpu_guest_has_htw || cpu_guest_has_ldpte)
ret += ARRAY_SIZE(kvm_vz_get_one_regs_htw);
if (cpu_guest_has_maar && !cpu_guest_has_dyn_maar)
ret += 1 + ARRAY_SIZE(vcpu->arch.maar);
@@ -1755,7 +1843,7 @@ static int kvm_vz_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices)
return -EFAULT;
indices += ARRAY_SIZE(kvm_vz_get_one_regs_segments);
}
- if (cpu_guest_has_htw) {
+ if (cpu_guest_has_htw || cpu_guest_has_ldpte) {
if (copy_to_user(indices, kvm_vz_get_one_regs_htw,
sizeof(kvm_vz_get_one_regs_htw)))
return -EFAULT;
@@ -1823,7 +1911,7 @@ static int kvm_vz_get_one_reg(struct kvm_vcpu *vcpu,
const struct kvm_one_reg *reg,
s64 *v)
{
- struct mips_coproc *cop0 = vcpu->arch.cop0;
+ struct mips_coproc *cop0 = &vcpu->arch.cop0;
unsigned int idx;
switch (reg->id) {
@@ -1878,17 +1966,17 @@ static int kvm_vz_get_one_reg(struct kvm_vcpu *vcpu,
*v = read_gc0_segctl2();
break;
case KVM_REG_MIPS_CP0_PWBASE:
- if (!cpu_guest_has_htw)
+ if (!cpu_guest_has_htw && !cpu_guest_has_ldpte)
return -EINVAL;
*v = read_gc0_pwbase();
break;
case KVM_REG_MIPS_CP0_PWFIELD:
- if (!cpu_guest_has_htw)
+ if (!cpu_guest_has_htw && !cpu_guest_has_ldpte)
return -EINVAL;
*v = read_gc0_pwfield();
break;
case KVM_REG_MIPS_CP0_PWSIZE:
- if (!cpu_guest_has_htw)
+ if (!cpu_guest_has_htw && !cpu_guest_has_ldpte)
return -EINVAL;
*v = read_gc0_pwsize();
break;
@@ -1896,7 +1984,7 @@ static int kvm_vz_get_one_reg(struct kvm_vcpu *vcpu,
*v = (long)read_gc0_wired();
break;
case KVM_REG_MIPS_CP0_PWCTL:
- if (!cpu_guest_has_htw)
+ if (!cpu_guest_has_htw && !cpu_guest_has_ldpte)
return -EINVAL;
*v = read_gc0_pwctl();
break;
@@ -1946,7 +2034,7 @@ static int kvm_vz_get_one_reg(struct kvm_vcpu *vcpu,
default:
*v = (long)kvm_read_c0_guest_prid(cop0);
break;
- };
+ }
break;
case KVM_REG_MIPS_CP0_EBASE:
*v = kvm_vz_read_gc0_ebase();
@@ -1979,6 +2067,9 @@ static int kvm_vz_get_one_reg(struct kvm_vcpu *vcpu,
return -EINVAL;
*v = read_gc0_config5();
break;
+ case KVM_REG_MIPS_CP0_CONFIG6:
+ *v = kvm_read_sw_gc0_config6(cop0);
+ break;
case KVM_REG_MIPS_CP0_MAAR(0) ... KVM_REG_MIPS_CP0_MAAR(0x3f):
if (!cpu_guest_has_maar || cpu_guest_has_dyn_maar)
return -EINVAL;
@@ -1990,7 +2081,7 @@ static int kvm_vz_get_one_reg(struct kvm_vcpu *vcpu,
case KVM_REG_MIPS_CP0_MAARI:
if (!cpu_guest_has_maar || cpu_guest_has_dyn_maar)
return -EINVAL;
- *v = kvm_read_sw_gc0_maari(vcpu->arch.cop0);
+ *v = kvm_read_sw_gc0_maari(&vcpu->arch.cop0);
break;
#ifdef CONFIG_64BIT
case KVM_REG_MIPS_CP0_XCONTEXT:
@@ -2044,7 +2135,7 @@ static int kvm_vz_set_one_reg(struct kvm_vcpu *vcpu,
const struct kvm_one_reg *reg,
s64 v)
{
- struct mips_coproc *cop0 = vcpu->arch.cop0;
+ struct mips_coproc *cop0 = &vcpu->arch.cop0;
unsigned int idx;
int ret = 0;
unsigned int cur, change;
@@ -2101,17 +2192,17 @@ static int kvm_vz_set_one_reg(struct kvm_vcpu *vcpu,
write_gc0_segctl2(v);
break;
case KVM_REG_MIPS_CP0_PWBASE:
- if (!cpu_guest_has_htw)
+ if (!cpu_guest_has_htw && !cpu_guest_has_ldpte)
return -EINVAL;
write_gc0_pwbase(v);
break;
case KVM_REG_MIPS_CP0_PWFIELD:
- if (!cpu_guest_has_htw)
+ if (!cpu_guest_has_htw && !cpu_guest_has_ldpte)
return -EINVAL;
write_gc0_pwfield(v);
break;
case KVM_REG_MIPS_CP0_PWSIZE:
- if (!cpu_guest_has_htw)
+ if (!cpu_guest_has_htw && !cpu_guest_has_ldpte)
return -EINVAL;
write_gc0_pwsize(v);
break;
@@ -2119,7 +2210,7 @@ static int kvm_vz_set_one_reg(struct kvm_vcpu *vcpu,
change_gc0_wired(MIPSR6_WIRED_WIRED, v);
break;
case KVM_REG_MIPS_CP0_PWCTL:
- if (!cpu_guest_has_htw)
+ if (!cpu_guest_has_htw && !cpu_guest_has_ldpte)
return -EINVAL;
write_gc0_pwctl(v);
break;
@@ -2185,7 +2276,7 @@ static int kvm_vz_set_one_reg(struct kvm_vcpu *vcpu,
default:
kvm_write_c0_guest_prid(cop0, v);
break;
- };
+ }
break;
case KVM_REG_MIPS_CP0_EBASE:
kvm_vz_write_gc0_ebase(v);
@@ -2248,6 +2339,14 @@ static int kvm_vz_set_one_reg(struct kvm_vcpu *vcpu,
write_gc0_config5(v);
}
break;
+ case KVM_REG_MIPS_CP0_CONFIG6:
+ cur = kvm_read_sw_gc0_config6(cop0);
+ change = (cur ^ v) & kvm_vz_config6_user_wrmask(vcpu);
+ if (change) {
+ v = cur ^ change;
+ kvm_write_sw_gc0_config6(cop0, (int)v);
+ }
+ break;
case KVM_REG_MIPS_CP0_MAAR(0) ... KVM_REG_MIPS_CP0_MAAR(0x3f):
if (!cpu_guest_has_maar || cpu_guest_has_dyn_maar)
return -EINVAL;
@@ -2463,7 +2562,7 @@ static void kvm_vz_vcpu_load_tlb(struct kvm_vcpu *vcpu, int cpu)
static int kvm_vz_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
{
- struct mips_coproc *cop0 = vcpu->arch.cop0;
+ struct mips_coproc *cop0 = &vcpu->arch.cop0;
bool migrated, all;
/*
@@ -2580,7 +2679,7 @@ static int kvm_vz_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
}
/* restore HTW registers */
- if (cpu_guest_has_htw) {
+ if (cpu_guest_has_htw || cpu_guest_has_ldpte) {
kvm_restore_gc0_pwbase(cop0);
kvm_restore_gc0_pwfield(cop0);
kvm_restore_gc0_pwsize(cop0);
@@ -2597,7 +2696,7 @@ static int kvm_vz_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
* prevents a SC on the next VCPU from succeeding by matching a LL on
* the previous VCPU.
*/
- if (cpu_guest_has_rw_llb)
+ if (vcpu->kvm->created_vcpus > 1)
write_gc0_lladdr(0);
return 0;
@@ -2605,7 +2704,7 @@ static int kvm_vz_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
static int kvm_vz_vcpu_put(struct kvm_vcpu *vcpu, int cpu)
{
- struct mips_coproc *cop0 = vcpu->arch.cop0;
+ struct mips_coproc *cop0 = &vcpu->arch.cop0;
if (current->flags & PF_VCPU)
kvm_vz_vcpu_save_wired(vcpu);
@@ -2685,8 +2784,8 @@ static int kvm_vz_vcpu_put(struct kvm_vcpu *vcpu, int cpu)
}
/* save HTW registers if enabled in guest */
- if (cpu_guest_has_htw &&
- kvm_read_sw_gc0_config3(cop0) & MIPS_CONF3_PW) {
+ if (cpu_guest_has_ldpte || (cpu_guest_has_htw &&
+ kvm_read_sw_gc0_config3(cop0) & MIPS_CONF3_PW)) {
kvm_save_gc0_pwbase(cop0);
kvm_save_gc0_pwfield(cop0);
kvm_save_gc0_pwsize(cop0);
@@ -2853,8 +2952,12 @@ static int kvm_vz_hardware_enable(void)
write_c0_guestctl0(MIPS_GCTL0_CP0 |
(MIPS_GCTL0_AT_GUEST << MIPS_GCTL0_AT_SHIFT) |
MIPS_GCTL0_CG | MIPS_GCTL0_CF);
- if (cpu_has_guestctl0ext)
- set_c0_guestctl0ext(MIPS_GCTL0EXT_CGI);
+ if (cpu_has_guestctl0ext) {
+ if (current_cpu_type() != CPU_LOONGSON64)
+ set_c0_guestctl0ext(MIPS_GCTL0EXT_CGI);
+ else
+ clear_c0_guestctl0ext(MIPS_GCTL0EXT_CGI);
+ }
if (cpu_has_guestid) {
write_c0_guestctl1(0);
@@ -2871,6 +2974,12 @@ static int kvm_vz_hardware_enable(void)
if (cpu_has_guestctl2)
clear_c0_guestctl2(0x3f << 10);
+#ifdef CONFIG_CPU_LOONGSON64
+ /* Control guest CCA attribute */
+ if (cpu_has_csr())
+ csr_writel(csr_readl(0xffffffec) | 0x1, 0xffffffec);
+#endif
+
return 0;
}
@@ -2927,6 +3036,9 @@ static int kvm_vz_check_extension(struct kvm *kvm, long ext)
r = 2;
break;
#endif
+ case KVM_CAP_IOEVENTFD:
+ r = 1;
+ break;
default:
r = 0;
break;
@@ -2964,7 +3076,7 @@ static void kvm_vz_vcpu_uninit(struct kvm_vcpu *vcpu)
static int kvm_vz_vcpu_setup(struct kvm_vcpu *vcpu)
{
- struct mips_coproc *cop0 = vcpu->arch.cop0;
+ struct mips_coproc *cop0 = &vcpu->arch.cop0;
unsigned long count_hz = 100*1000*1000; /* default to 100 MHz */
/*
@@ -2980,7 +3092,7 @@ static int kvm_vz_vcpu_setup(struct kvm_vcpu *vcpu)
*/
/* PageGrain */
- if (cpu_has_mips_r6)
+ if (cpu_has_mips_r5 || cpu_has_mips_r6)
kvm_write_sw_gc0_pagegrain(cop0, PG_RIE | PG_XIE | PG_IEC);
/* Wired */
if (cpu_has_mips_r6)
@@ -2988,7 +3100,7 @@ static int kvm_vz_vcpu_setup(struct kvm_vcpu *vcpu)
read_gc0_wired() & MIPSR6_WIRED_LIMIT);
/* Status */
kvm_write_sw_gc0_status(cop0, ST0_BEV | ST0_ERL);
- if (cpu_has_mips_r6)
+ if (cpu_has_mips_r5 || cpu_has_mips_r6)
kvm_change_sw_gc0_status(cop0, ST0_FR, read_gc0_status());
/* IntCtl */
kvm_write_sw_gc0_intctl(cop0, read_gc0_intctl() &
@@ -3086,7 +3198,7 @@ static int kvm_vz_vcpu_setup(struct kvm_vcpu *vcpu)
}
/* reset HTW registers */
- if (cpu_guest_has_htw && cpu_has_mips_r6) {
+ if (cpu_guest_has_htw && (cpu_has_mips_r5 || cpu_has_mips_r6)) {
/* PWField */
kvm_write_sw_gc0_pwfield(cop0, 0x0c30c302);
/* PWSize */
@@ -3103,33 +3215,23 @@ static int kvm_vz_vcpu_setup(struct kvm_vcpu *vcpu)
return 0;
}
-static void kvm_vz_flush_shadow_all(struct kvm *kvm)
+static void kvm_vz_prepare_flush_shadow(struct kvm *kvm)
{
- if (cpu_has_guestid) {
- /* Flush GuestID for each VCPU individually */
- kvm_flush_remote_tlbs(kvm);
- } else {
+ if (!cpu_has_guestid) {
/*
* For each CPU there is a single GPA ASID used by all VCPUs in
* the VM, so it doesn't make sense for the VCPUs to handle
* invalidation of these ASIDs individually.
*
* Instead mark all CPUs as needing ASID invalidation in
- * asid_flush_mask, and just use kvm_flush_remote_tlbs(kvm) to
+ * asid_flush_mask, and kvm_flush_remote_tlbs(kvm) will
* kick any running VCPUs so they check asid_flush_mask.
*/
cpumask_setall(&kvm->arch.asid_flush_mask);
- kvm_flush_remote_tlbs(kvm);
}
}
-static void kvm_vz_flush_shadow_memslot(struct kvm *kvm,
- const struct kvm_memory_slot *slot)
-{
- kvm_vz_flush_shadow_all(kvm);
-}
-
-static void kvm_vz_vcpu_reenter(struct kvm_run *run, struct kvm_vcpu *vcpu)
+static void kvm_vz_vcpu_reenter(struct kvm_vcpu *vcpu)
{
int cpu = smp_processor_id();
int preserve_guest_tlb;
@@ -3145,7 +3247,7 @@ static void kvm_vz_vcpu_reenter(struct kvm_run *run, struct kvm_vcpu *vcpu)
kvm_vz_vcpu_load_wired(vcpu);
}
-static int kvm_vz_vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu)
+static int kvm_vz_vcpu_run(struct kvm_vcpu *vcpu)
{
int cpu = smp_processor_id();
int r;
@@ -3158,7 +3260,7 @@ static int kvm_vz_vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu)
kvm_vz_vcpu_load_tlb(vcpu, cpu);
kvm_vz_vcpu_load_wired(vcpu);
- r = vcpu->arch.vcpu_run(run, vcpu);
+ r = vcpu->arch.vcpu_run(vcpu);
kvm_vz_vcpu_save_wired(vcpu);
@@ -3184,8 +3286,7 @@ static struct kvm_mips_callbacks kvm_vz_callbacks = {
.vcpu_init = kvm_vz_vcpu_init,
.vcpu_uninit = kvm_vz_vcpu_uninit,
.vcpu_setup = kvm_vz_vcpu_setup,
- .flush_shadow_all = kvm_vz_flush_shadow_all,
- .flush_shadow_memslot = kvm_vz_flush_shadow_memslot,
+ .prepare_flush_shadow = kvm_vz_prepare_flush_shadow,
.gva_to_gpa = kvm_vz_gva_to_gpa_cb,
.queue_timer_int = kvm_vz_queue_timer_int_cb,
.dequeue_timer_int = kvm_vz_dequeue_timer_int_cb,
@@ -3203,7 +3304,10 @@ static struct kvm_mips_callbacks kvm_vz_callbacks = {
.vcpu_reenter = kvm_vz_vcpu_reenter,
};
-int kvm_mips_emulation_init(struct kvm_mips_callbacks **install_callbacks)
+/* FIXME: Get rid of the callbacks now that trap-and-emulate is gone. */
+const struct kvm_mips_callbacks * const kvm_mips_callbacks = &kvm_vz_callbacks;
+
+int kvm_mips_emulation_init(void)
{
if (!cpu_has_vz)
return -ENODEV;
@@ -3217,7 +3321,5 @@ int kvm_mips_emulation_init(struct kvm_mips_callbacks **install_callbacks)
return -ENODEV;
pr_info("Starting KVM with MIPS VZ extensions\n");
-
- *install_callbacks = &kvm_vz_callbacks;
return 0;
}