diff options
Diffstat (limited to 'arch/mips/kvm/kvm_mips.c')
-rw-r--r-- | arch/mips/kvm/kvm_mips.c | 179 |
1 files changed, 90 insertions, 89 deletions
diff --git a/arch/mips/kvm/kvm_mips.c b/arch/mips/kvm/kvm_mips.c index cd5e4f568439..52be52adf030 100644 --- a/arch/mips/kvm/kvm_mips.c +++ b/arch/mips/kvm/kvm_mips.c @@ -7,7 +7,7 @@ * * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. * Authors: Sanjay Lal <sanjayl@kymasys.com> -*/ + */ #include <linux/errno.h> #include <linux/err.h> @@ -31,38 +31,41 @@ #define VECTORSPACING 0x100 /* for EI/VI mode */ #endif -#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU +#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x) struct kvm_stats_debugfs_item debugfs_entries[] = { - { "wait", VCPU_STAT(wait_exits) }, - { "cache", VCPU_STAT(cache_exits) }, - { "signal", VCPU_STAT(signal_exits) }, - { "interrupt", VCPU_STAT(int_exits) }, - { "cop_unsuable", VCPU_STAT(cop_unusable_exits) }, - { "tlbmod", VCPU_STAT(tlbmod_exits) }, - { "tlbmiss_ld", VCPU_STAT(tlbmiss_ld_exits) }, - { "tlbmiss_st", VCPU_STAT(tlbmiss_st_exits) }, - { "addrerr_st", VCPU_STAT(addrerr_st_exits) }, - { "addrerr_ld", VCPU_STAT(addrerr_ld_exits) }, - { "syscall", VCPU_STAT(syscall_exits) }, - { "resvd_inst", VCPU_STAT(resvd_inst_exits) }, - { "break_inst", VCPU_STAT(break_inst_exits) }, - { "flush_dcache", VCPU_STAT(flush_dcache_exits) }, - { "halt_wakeup", VCPU_STAT(halt_wakeup) }, + { "wait", VCPU_STAT(wait_exits), KVM_STAT_VCPU }, + { "cache", VCPU_STAT(cache_exits), KVM_STAT_VCPU }, + { "signal", VCPU_STAT(signal_exits), KVM_STAT_VCPU }, + { "interrupt", VCPU_STAT(int_exits), KVM_STAT_VCPU }, + { "cop_unsuable", VCPU_STAT(cop_unusable_exits), KVM_STAT_VCPU }, + { "tlbmod", VCPU_STAT(tlbmod_exits), KVM_STAT_VCPU }, + { "tlbmiss_ld", VCPU_STAT(tlbmiss_ld_exits), KVM_STAT_VCPU }, + { "tlbmiss_st", VCPU_STAT(tlbmiss_st_exits), KVM_STAT_VCPU }, + { "addrerr_st", VCPU_STAT(addrerr_st_exits), KVM_STAT_VCPU }, + { "addrerr_ld", VCPU_STAT(addrerr_ld_exits), KVM_STAT_VCPU }, + { "syscall", VCPU_STAT(syscall_exits), KVM_STAT_VCPU }, + { "resvd_inst", VCPU_STAT(resvd_inst_exits), KVM_STAT_VCPU }, + { "break_inst", VCPU_STAT(break_inst_exits), KVM_STAT_VCPU }, + { "flush_dcache", VCPU_STAT(flush_dcache_exits), KVM_STAT_VCPU }, + { "halt_wakeup", VCPU_STAT(halt_wakeup), KVM_STAT_VCPU }, {NULL} }; static int kvm_mips_reset_vcpu(struct kvm_vcpu *vcpu) { int i; + for_each_possible_cpu(i) { vcpu->arch.guest_kernel_asid[i] = 0; vcpu->arch.guest_user_asid[i] = 0; } + return 0; } -/* XXXKYMA: We are simulatoring a processor that has the WII bit set in Config7, so we - * are "runnable" if interrupts are pending +/* + * XXXKYMA: We are simulatoring a processor that has the WII bit set in + * Config7, so we are "runnable" if interrupts are pending */ int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu) { @@ -103,7 +106,10 @@ static void kvm_mips_init_tlbs(struct kvm *kvm) { unsigned long wired; - /* Add a wired entry to the TLB, it is used to map the commpage to the Guest kernel */ + /* + * Add a wired entry to the TLB, it is used to map the commpage to + * the Guest kernel + */ wired = read_c0_wired(); write_c0_wired(wired + 1); mtc0_tlbw_hazard(); @@ -130,7 +136,6 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) on_each_cpu(kvm_mips_init_vm_percpu, kvm, 1); } - return 0; } @@ -185,8 +190,8 @@ void kvm_arch_destroy_vm(struct kvm *kvm) } } -long -kvm_arch_dev_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) +long kvm_arch_dev_ioctl(struct file *filp, unsigned int ioctl, + unsigned long arg) { return -ENOIOCTLCMD; } @@ -207,17 +212,17 @@ void kvm_arch_memslots_updated(struct kvm *kvm) } int kvm_arch_prepare_memory_region(struct kvm *kvm, - struct kvm_memory_slot *memslot, - struct kvm_userspace_memory_region *mem, - enum kvm_mr_change change) + struct kvm_memory_slot *memslot, + struct kvm_userspace_memory_region *mem, + enum kvm_mr_change change) { return 0; } void kvm_arch_commit_memory_region(struct kvm *kvm, - struct kvm_userspace_memory_region *mem, - const struct kvm_memory_slot *old, - enum kvm_mr_change change) + struct kvm_userspace_memory_region *mem, + const struct kvm_memory_slot *old, + enum kvm_mr_change change) { unsigned long npages = 0; int i, err = 0; @@ -246,9 +251,8 @@ void kvm_arch_commit_memory_region(struct kvm *kvm, npages, kvm->arch.guest_pmap); /* Now setup the page table */ - for (i = 0; i < npages; i++) { + for (i = 0; i < npages; i++) kvm->arch.guest_pmap[i] = KVM_INVALID_PAGE; - } } } out: @@ -270,8 +274,6 @@ void kvm_arch_flush_shadow(struct kvm *kvm) struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id) { - extern char mips32_exception[], mips32_exceptionEnd[]; - extern char mips32_GuestException[], mips32_GuestExceptionEnd[]; int err, size, offset; void *gebase; int i; @@ -290,14 +292,14 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id) kvm_debug("kvm @ %p: create cpu %d at %p\n", kvm, id, vcpu); - /* Allocate space for host mode exception handlers that handle + /* + * Allocate space for host mode exception handlers that handle * guest mode exits */ - if (cpu_has_veic || cpu_has_vint) { + if (cpu_has_veic || cpu_has_vint) size = 0x200 + VECTORSPACING * 64; - } else { + else size = 0x4000; - } /* Save Linux EBASE */ vcpu->arch.host_ebase = (void *)read_c0_ebase(); @@ -345,7 +347,10 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id) local_flush_icache_range((unsigned long)gebase, (unsigned long)gebase + ALIGN(size, PAGE_SIZE)); - /* Allocate comm page for guest kernel, a TLB will be reserved for mapping GVA @ 0xFFFF8000 to this page */ + /* + * Allocate comm page for guest kernel, a TLB will be reserved for + * mapping GVA @ 0xFFFF8000 to this page + */ vcpu->arch.kseg0_commpage = kzalloc(PAGE_SIZE << 1, GFP_KERNEL); if (!vcpu->arch.kseg0_commpage) { @@ -391,9 +396,8 @@ void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) kvm_arch_vcpu_free(vcpu); } -int -kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, - struct kvm_guest_debug *dbg) +int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, + struct kvm_guest_debug *dbg) { return -ENOIOCTLCMD; } @@ -430,8 +434,8 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) return r; } -int -kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_mips_interrupt *irq) +int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, + struct kvm_mips_interrupt *irq) { int intr = (int)irq->irq; struct kvm_vcpu *dvcpu = NULL; @@ -458,23 +462,20 @@ kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_mips_interrupt *irq) dvcpu->arch.wait = 0; - if (waitqueue_active(&dvcpu->wq)) { + if (waitqueue_active(&dvcpu->wq)) wake_up_interruptible(&dvcpu->wq); - } return 0; } -int -kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, - struct kvm_mp_state *mp_state) +int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, + struct kvm_mp_state *mp_state) { return -ENOIOCTLCMD; } -int -kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, - struct kvm_mp_state *mp_state) +int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, + struct kvm_mp_state *mp_state) { return -ENOIOCTLCMD; } @@ -631,10 +632,12 @@ static int kvm_mips_get_reg(struct kvm_vcpu *vcpu, } if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U64) { u64 __user *uaddr64 = (u64 __user *)(long)reg->addr; + return put_user(v, uaddr64); } else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U32) { u32 __user *uaddr32 = (u32 __user *)(long)reg->addr; u32 v32 = (u32)v; + return put_user(v32, uaddr32); } else { return -EINVAL; @@ -727,8 +730,8 @@ static int kvm_mips_set_reg(struct kvm_vcpu *vcpu, return 0; } -long -kvm_arch_vcpu_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) +long kvm_arch_vcpu_ioctl(struct file *filp, unsigned int ioctl, + unsigned long arg) { struct kvm_vcpu *vcpu = filp->private_data; void __user *argp = (void __user *)arg; @@ -738,6 +741,7 @@ kvm_arch_vcpu_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) case KVM_SET_ONE_REG: case KVM_GET_ONE_REG: { struct kvm_one_reg reg; + if (copy_from_user(®, argp, sizeof(reg))) return -EFAULT; if (ioctl == KVM_SET_ONE_REG) @@ -772,6 +776,7 @@ kvm_arch_vcpu_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) case KVM_INTERRUPT: { struct kvm_mips_interrupt irq; + r = -EFAULT; if (copy_from_user(&irq, argp, sizeof(irq))) goto out; @@ -790,9 +795,7 @@ out: return r; } -/* - * Get (and clear) the dirty memory log for a memory slot. - */ +/* Get (and clear) the dirty memory log for a memory slot. */ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log) { struct kvm_memory_slot *memslot; @@ -859,14 +862,14 @@ void kvm_arch_exit(void) kvm_mips_callbacks = NULL; } -int -kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) +int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, + struct kvm_sregs *sregs) { return -ENOIOCTLCMD; } -int -kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) +int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, + struct kvm_sregs *sregs) { return -ENOIOCTLCMD; } @@ -979,14 +982,11 @@ static void kvm_mips_comparecount_func(unsigned long data) kvm_mips_callbacks->queue_timer_int(vcpu); vcpu->arch.wait = 0; - if (waitqueue_active(&vcpu->wq)) { + if (waitqueue_active(&vcpu->wq)) wake_up_interruptible(&vcpu->wq); - } } -/* - * low level hrtimer wake routine. - */ +/* low level hrtimer wake routine */ static enum hrtimer_restart kvm_mips_comparecount_wakeup(struct hrtimer *timer) { struct kvm_vcpu *vcpu; @@ -1010,8 +1010,8 @@ void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) return; } -int -kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, struct kvm_translation *tr) +int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, + struct kvm_translation *tr) { return 0; } @@ -1022,8 +1022,7 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) return kvm_mips_callbacks->vcpu_setup(vcpu); } -static -void kvm_mips_set_c0_status(void) +static void kvm_mips_set_c0_status(void) { uint32_t status = read_c0_status(); @@ -1053,7 +1052,10 @@ int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu) run->exit_reason = KVM_EXIT_UNKNOWN; run->ready_for_interrupt_injection = 1; - /* Set the appropriate status bits based on host CPU features, before we hit the scheduler */ + /* + * Set the appropriate status bits based on host CPU features, + * before we hit the scheduler + */ kvm_mips_set_c0_status(); local_irq_enable(); @@ -1061,7 +1063,8 @@ int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu) kvm_debug("kvm_mips_handle_exit: cause: %#x, PC: %p, kvm_run: %p, kvm_vcpu: %p\n", cause, opc, run, vcpu); - /* Do a privilege check, if in UM most of these exit conditions end up + /* + * Do a privilege check, if in UM most of these exit conditions end up * causing an exception to be delivered to the Guest Kernel */ er = kvm_mips_check_privilege(cause, opc, run, vcpu); @@ -1080,9 +1083,8 @@ int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu) ++vcpu->stat.int_exits; trace_kvm_exit(vcpu, INT_EXITS); - if (need_resched()) { + if (need_resched()) cond_resched(); - } ret = RESUME_GUEST; break; @@ -1094,9 +1096,8 @@ int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu) trace_kvm_exit(vcpu, COP_UNUSABLE_EXITS); ret = kvm_mips_callbacks->handle_cop_unusable(vcpu); /* XXXKYMA: Might need to return to user space */ - if (run->exit_reason == KVM_EXIT_IRQ_WINDOW_OPEN) { + if (run->exit_reason == KVM_EXIT_IRQ_WINDOW_OPEN) ret = RESUME_HOST; - } break; case T_TLB_MOD: @@ -1106,10 +1107,9 @@ int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu) break; case T_TLB_ST_MISS: - kvm_debug - ("TLB ST fault: cause %#x, status %#lx, PC: %p, BadVaddr: %#lx\n", - cause, kvm_read_c0_guest_status(vcpu->arch.cop0), opc, - badvaddr); + kvm_debug("TLB ST fault: cause %#x, status %#lx, PC: %p, BadVaddr: %#lx\n", + cause, kvm_read_c0_guest_status(vcpu->arch.cop0), opc, + badvaddr); ++vcpu->stat.tlbmiss_st_exits; trace_kvm_exit(vcpu, TLBMISS_ST_EXITS); @@ -1156,10 +1156,9 @@ int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu) break; default: - kvm_err - ("Exception Code: %d, not yet handled, @ PC: %p, inst: 0x%08x BadVaddr: %#lx Status: %#lx\n", - exccode, opc, kvm_get_inst(opc, vcpu), badvaddr, - kvm_read_c0_guest_status(vcpu->arch.cop0)); + kvm_err("Exception Code: %d, not yet handled, @ PC: %p, inst: 0x%08x BadVaddr: %#lx Status: %#lx\n", + exccode, opc, kvm_get_inst(opc, vcpu), badvaddr, + kvm_read_c0_guest_status(vcpu->arch.cop0)); kvm_arch_vcpu_dump_regs(vcpu); run->exit_reason = KVM_EXIT_INTERNAL_ERROR; ret = RESUME_HOST; @@ -1174,7 +1173,7 @@ skip_emul: kvm_mips_deliver_interrupts(vcpu, cause); if (!(ret & RESUME_HOST)) { - /* Only check for signals if not already exiting to userspace */ + /* Only check for signals if not already exiting to userspace */ if (signal_pending(current)) { run->exit_reason = KVM_EXIT_INTR; ret = (-EINTR << 2) | RESUME_HOST; @@ -1195,11 +1194,13 @@ int __init kvm_mips_init(void) if (ret) return ret; - /* On MIPS, kernel modules are executed from "mapped space", which requires TLBs. - * The TLB handling code is statically linked with the rest of the kernel (kvm_tlb.c) - * to avoid the possibility of double faulting. The issue is that the TLB code - * references routines that are part of the the KVM module, - * which are only available once the module is loaded. + /* + * On MIPS, kernel modules are executed from "mapped space", which + * requires TLBs. The TLB handling code is statically linked with + * the rest of the kernel (kvm_tlb.c) to avoid the possibility of + * double faulting. The issue is that the TLB code references + * routines that are part of the the KVM module, which are only + * available once the module is loaded. */ kvm_mips_gfn_to_pfn = gfn_to_pfn; kvm_mips_release_pfn_clean = kvm_release_pfn_clean; |