diff options
| author | Linus Torvalds <torvalds@linux-foundation.org> | 2008-10-16 15:36:00 -0700 | 
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2008-10-16 15:36:00 -0700 | 
| commit | 08d19f51f05a68ce89a289320ce4ed96e757df72 (patch) | |
| tree | 31c5d718d0aeaff5083fe533cd6e1f9fbbe846bb /arch/powerpc/kvm/powerpc.c | |
| parent | 1c95e1b69073cff5ff179e592fa1a1e182c78a17 (diff) | |
| parent | 2381ad241d0bea1253a37f314b270848067640bb (diff) | |
Merge branch 'kvm-updates/2.6.28' of git://git.kernel.org/pub/scm/linux/kernel/git/avi/kvm
* 'kvm-updates/2.6.28' of git://git.kernel.org/pub/scm/linux/kernel/git/avi/kvm: (134 commits)
  KVM: ia64: Add intel iommu support for guests.
  KVM: ia64: add directed mmio range support for kvm guests
  KVM: ia64: Make pmt table be able to hold physical mmio entries.
  KVM: Move irqchip_in_kernel() from ioapic.h to irq.h
  KVM: Separate irq ack notification out of arch/x86/kvm/irq.c
  KVM: Change is_mmio_pfn to kvm_is_mmio_pfn, and make it common for all archs
  KVM: Move device assignment logic to common code
  KVM: Device Assignment: Move vtd.c from arch/x86/kvm/ to virt/kvm/
  KVM: VMX: enable invlpg exiting if EPT is disabled
  KVM: x86: Silence various LAPIC-related host kernel messages
  KVM: Device Assignment: Map mmio pages into VT-d page table
  KVM: PIC: enhance IPI avoidance
  KVM: MMU: add "oos_shadow" parameter to disable oos
  KVM: MMU: speed up mmu_unsync_walk
  KVM: MMU: out of sync shadow core
  KVM: MMU: mmu_convert_notrap helper
  KVM: MMU: awareness of new kvm_mmu_zap_page behaviour
  KVM: MMU: mmu_parent_walk
  KVM: x86: trap invlpg
  KVM: MMU: sync roots on mmu reload
  ...
Diffstat (limited to 'arch/powerpc/kvm/powerpc.c')
| -rw-r--r-- | arch/powerpc/kvm/powerpc.c | 99 | 
1 files changed, 98 insertions, 1 deletions
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c index 53826a5f6c06..90a6fc422b23 100644 --- a/arch/powerpc/kvm/powerpc.c +++ b/arch/powerpc/kvm/powerpc.c @@ -27,6 +27,7 @@  #include <asm/cputable.h>  #include <asm/uaccess.h>  #include <asm/kvm_ppc.h> +#include <asm/tlbflush.h>  gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn) @@ -239,18 +240,114 @@ void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)  {  } +/* Note: clearing MSR[DE] just means that the debug interrupt will not be + * delivered *immediately*. Instead, it simply sets the appropriate DBSR bits. + * If those DBSR bits are still set when MSR[DE] is re-enabled, the interrupt + * will be delivered as an "imprecise debug event" (which is indicated by + * DBSR[IDE]. + */ +static void kvmppc_disable_debug_interrupts(void) +{ +	mtmsr(mfmsr() & ~MSR_DE); +} + +static void kvmppc_restore_host_debug_state(struct kvm_vcpu *vcpu) +{ +	kvmppc_disable_debug_interrupts(); + +	mtspr(SPRN_IAC1, vcpu->arch.host_iac[0]); +	mtspr(SPRN_IAC2, vcpu->arch.host_iac[1]); +	mtspr(SPRN_IAC3, vcpu->arch.host_iac[2]); +	mtspr(SPRN_IAC4, vcpu->arch.host_iac[3]); +	mtspr(SPRN_DBCR1, vcpu->arch.host_dbcr1); +	mtspr(SPRN_DBCR2, vcpu->arch.host_dbcr2); +	mtspr(SPRN_DBCR0, vcpu->arch.host_dbcr0); +	mtmsr(vcpu->arch.host_msr); +} + +static void kvmppc_load_guest_debug_registers(struct kvm_vcpu *vcpu) +{ +	struct kvm_guest_debug *dbg = &vcpu->guest_debug; +	u32 dbcr0 = 0; + +	vcpu->arch.host_msr = mfmsr(); +	kvmppc_disable_debug_interrupts(); + +	/* Save host debug register state. */ +	vcpu->arch.host_iac[0] = mfspr(SPRN_IAC1); +	vcpu->arch.host_iac[1] = mfspr(SPRN_IAC2); +	vcpu->arch.host_iac[2] = mfspr(SPRN_IAC3); +	vcpu->arch.host_iac[3] = mfspr(SPRN_IAC4); +	vcpu->arch.host_dbcr0 = mfspr(SPRN_DBCR0); +	vcpu->arch.host_dbcr1 = mfspr(SPRN_DBCR1); +	vcpu->arch.host_dbcr2 = mfspr(SPRN_DBCR2); + +	/* set registers up for guest */ + +	if (dbg->bp[0]) { +		mtspr(SPRN_IAC1, dbg->bp[0]); +		dbcr0 |= DBCR0_IAC1 | DBCR0_IDM; +	} +	if (dbg->bp[1]) { +		mtspr(SPRN_IAC2, dbg->bp[1]); +		dbcr0 |= DBCR0_IAC2 | DBCR0_IDM; +	} +	if (dbg->bp[2]) { +		mtspr(SPRN_IAC3, dbg->bp[2]); +		dbcr0 |= DBCR0_IAC3 | DBCR0_IDM; +	} +	if (dbg->bp[3]) { +		mtspr(SPRN_IAC4, dbg->bp[3]); +		dbcr0 |= DBCR0_IAC4 | DBCR0_IDM; +	} + +	mtspr(SPRN_DBCR0, dbcr0); +	mtspr(SPRN_DBCR1, 0); +	mtspr(SPRN_DBCR2, 0); +} +  void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)  { +	int i; + +	if (vcpu->guest_debug.enabled) +		kvmppc_load_guest_debug_registers(vcpu); + +	/* Mark every guest entry in the shadow TLB entry modified, so that they +	 * will all be reloaded on the next vcpu run (instead of being +	 * demand-faulted). */ +	for (i = 0; i <= tlb_44x_hwater; i++) +		kvmppc_tlbe_set_modified(vcpu, i);  }  void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)  { +	if (vcpu->guest_debug.enabled) +		kvmppc_restore_host_debug_state(vcpu); + +	/* Don't leave guest TLB entries resident when being de-scheduled. */ +	/* XXX It would be nice to differentiate between heavyweight exit and +	 * sched_out here, since we could avoid the TLB flush for heavyweight +	 * exits. */ +	_tlbia();  }  int kvm_arch_vcpu_ioctl_debug_guest(struct kvm_vcpu *vcpu,                                      struct kvm_debug_guest *dbg)  { -	return -ENOTSUPP; +	int i; + +	vcpu->guest_debug.enabled = dbg->enabled; +	if (vcpu->guest_debug.enabled) { +		for (i=0; i < ARRAY_SIZE(vcpu->guest_debug.bp); i++) { +			if (dbg->breakpoints[i].enabled) +				vcpu->guest_debug.bp[i] = dbg->breakpoints[i].address; +			else +				vcpu->guest_debug.bp[i] = 0; +		} +	} + +	return 0;  }  static void kvmppc_complete_dcr_load(struct kvm_vcpu *vcpu,  | 
