diff options
Diffstat (limited to 'arch/powerpc/kvm/emulate_loadstore.c')
| -rw-r--r-- | arch/powerpc/kvm/emulate_loadstore.c | 99 |
1 files changed, 36 insertions, 63 deletions
diff --git a/arch/powerpc/kvm/emulate_loadstore.c b/arch/powerpc/kvm/emulate_loadstore.c index f91b1309a0a8..ec60c7979718 100644 --- a/arch/powerpc/kvm/emulate_loadstore.c +++ b/arch/powerpc/kvm/emulate_loadstore.c @@ -1,16 +1,5 @@ +// SPDX-License-Identifier: GPL-2.0-only /* - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License, version 2, as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. * * Copyright IBM Corp. 2007 * Copyright 2011 Freescale Semiconductor, Inc. @@ -39,7 +28,7 @@ static bool kvmppc_check_fp_disabled(struct kvm_vcpu *vcpu) { if (!(kvmppc_get_msr(vcpu) & MSR_FP)) { - kvmppc_core_queue_fpunavail(vcpu); + kvmppc_core_queue_fpunavail(vcpu, kvmppc_get_msr(vcpu) & SRR1_PREFIXED); return true; } @@ -51,7 +40,7 @@ static bool kvmppc_check_fp_disabled(struct kvm_vcpu *vcpu) static bool kvmppc_check_vsx_disabled(struct kvm_vcpu *vcpu) { if (!(kvmppc_get_msr(vcpu) & MSR_VSX)) { - kvmppc_core_queue_vsx_unavail(vcpu); + kvmppc_core_queue_vsx_unavail(vcpu, kvmppc_get_msr(vcpu) & SRR1_PREFIXED); return true; } @@ -63,7 +52,7 @@ static bool kvmppc_check_vsx_disabled(struct kvm_vcpu *vcpu) static bool kvmppc_check_altivec_disabled(struct kvm_vcpu *vcpu) { if (!(kvmppc_get_msr(vcpu) & MSR_VEC)) { - kvmppc_core_queue_vec_unavail(vcpu); + kvmppc_core_queue_vec_unavail(vcpu, kvmppc_get_msr(vcpu) & SRR1_PREFIXED); return true; } @@ -82,11 +71,8 @@ static bool kvmppc_check_altivec_disabled(struct kvm_vcpu *vcpu) */ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu) { - struct kvm_run *run = vcpu->run; - u32 inst; - int ra, rs, rt; + ppc_inst_t inst; enum emulation_result emulated = EMULATE_FAIL; - int advance = 1; struct instruction_op op; /* this default type might be overwritten by subcategories */ @@ -96,16 +82,6 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu) if (emulated != EMULATE_DONE) return emulated; - ra = get_ra(inst); - rs = get_rs(inst); - rt = get_rt(inst); - - /* - * if mmio_vsx_tx_sx_enabled == 0, copy data between - * VSR[0..31] and memory - * if mmio_vsx_tx_sx_enabled == 1, copy data between - * VSR[32..63] and memory - */ vcpu->arch.mmio_vsx_copy_nums = 0; vcpu->arch.mmio_vsx_offset = 0; vcpu->arch.mmio_copy_type = KVMPPC_VSX_COPY_NONE; @@ -116,24 +92,26 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu) vcpu->arch.mmio_host_swabbed = 0; emulated = EMULATE_FAIL; - vcpu->arch.regs.msr = vcpu->arch.shared->msr; + vcpu->arch.regs.msr = kvmppc_get_msr(vcpu); if (analyse_instr(&op, &vcpu->arch.regs, inst) == 0) { int type = op.type & INSTR_TYPE_MASK; int size = GETSIZE(op.type); + vcpu->mmio_is_write = OP_IS_STORE(type); + switch (type) { case LOAD: { int instr_byte_swap = op.type & BYTEREV; if (op.type & SIGNEXT) - emulated = kvmppc_handle_loads(run, vcpu, + emulated = kvmppc_handle_loads(vcpu, op.reg, size, !instr_byte_swap); else - emulated = kvmppc_handle_load(run, vcpu, + emulated = kvmppc_handle_load(vcpu, op.reg, size, !instr_byte_swap); if ((op.type & UPDATE) && (emulated != EMULATE_FAIL)) - kvmppc_set_gpr(vcpu, op.update_reg, op.ea); + kvmppc_set_gpr(vcpu, op.update_reg, vcpu->arch.vaddr_accessed); break; } @@ -146,14 +124,14 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu) vcpu->arch.mmio_sp64_extend = 1; if (op.type & SIGNEXT) - emulated = kvmppc_handle_loads(run, vcpu, + emulated = kvmppc_handle_loads(vcpu, KVM_MMIO_REG_FPR|op.reg, size, 1); else - emulated = kvmppc_handle_load(run, vcpu, + emulated = kvmppc_handle_load(vcpu, KVM_MMIO_REG_FPR|op.reg, size, 1); if ((op.type & UPDATE) && (emulated != EMULATE_FAIL)) - kvmppc_set_gpr(vcpu, op.update_reg, op.ea); + kvmppc_set_gpr(vcpu, op.update_reg, vcpu->arch.vaddr_accessed); break; #endif @@ -186,12 +164,12 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu) if (size == 16) { vcpu->arch.mmio_vmx_copy_nums = 2; - emulated = kvmppc_handle_vmx_load(run, - vcpu, KVM_MMIO_REG_VMX|op.reg, + emulated = kvmppc_handle_vmx_load(vcpu, + KVM_MMIO_REG_VMX|op.reg, 8, 1); } else { vcpu->arch.mmio_vmx_copy_nums = 1; - emulated = kvmppc_handle_vmx_load(run, vcpu, + emulated = kvmppc_handle_vmx_load(vcpu, KVM_MMIO_REG_VMX|op.reg, size, 1); } @@ -239,23 +217,23 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu) io_size_each = op.element_size; } - emulated = kvmppc_handle_vsx_load(run, vcpu, + emulated = kvmppc_handle_vsx_load(vcpu, KVM_MMIO_REG_VSX|op.reg, io_size_each, 1, op.type & SIGNEXT); break; } #endif - case STORE: - /* if need byte reverse, op.val has been reversed by - * analyse_instr(). - */ - emulated = kvmppc_handle_store(run, vcpu, op.val, - size, 1); + case STORE: { + int instr_byte_swap = op.type & BYTEREV; + + emulated = kvmppc_handle_store(vcpu, kvmppc_get_gpr(vcpu, op.reg), + size, !instr_byte_swap); if ((op.type & UPDATE) && (emulated != EMULATE_FAIL)) - kvmppc_set_gpr(vcpu, op.update_reg, op.ea); + kvmppc_set_gpr(vcpu, op.update_reg, vcpu->arch.vaddr_accessed); break; + } #ifdef CONFIG_PPC_FPU case STORE_FP: if (kvmppc_check_fp_disabled(vcpu)) @@ -272,11 +250,11 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu) if (op.type & FPCONV) vcpu->arch.mmio_sp64_extend = 1; - emulated = kvmppc_handle_store(run, vcpu, - VCPU_FPR(vcpu, op.reg), size, 1); + emulated = kvmppc_handle_store(vcpu, + kvmppc_get_fpr(vcpu, op.reg), size, 1); if ((op.type & UPDATE) && (emulated != EMULATE_FAIL)) - kvmppc_set_gpr(vcpu, op.update_reg, op.ea); + kvmppc_set_gpr(vcpu, op.update_reg, vcpu->arch.vaddr_accessed); break; #endif @@ -312,12 +290,12 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu) if (size == 16) { vcpu->arch.mmio_vmx_copy_nums = 2; - emulated = kvmppc_handle_vmx_store(run, - vcpu, op.reg, 8, 1); + emulated = kvmppc_handle_vmx_store(vcpu, + op.reg, 8, 1); } else { vcpu->arch.mmio_vmx_copy_nums = 1; - emulated = kvmppc_handle_vmx_store(run, - vcpu, op.reg, size, 1); + emulated = kvmppc_handle_vmx_store(vcpu, + op.reg, size, 1); } break; @@ -360,7 +338,7 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu) io_size_each = op.element_size; } - emulated = kvmppc_handle_vsx_store(run, vcpu, + emulated = kvmppc_handle_vsx_store(vcpu, op.reg, io_size_each, 1); break; } @@ -379,16 +357,11 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu) } } - if (emulated == EMULATE_FAIL) { - advance = 0; - kvmppc_core_queue_program(vcpu, 0); - } - - trace_kvm_ppc_instr(inst, kvmppc_get_pc(vcpu), emulated); + trace_kvm_ppc_instr(ppc_inst_val(inst), kvmppc_get_pc(vcpu), emulated); /* Advance past emulated instruction. */ - if (advance) - kvmppc_set_pc(vcpu, kvmppc_get_pc(vcpu) + 4); + if (emulated != EMULATE_FAIL) + kvmppc_set_pc(vcpu, kvmppc_get_pc(vcpu) + ppc_inst_len(inst)); return emulated; } |
