summaryrefslogtreecommitdiff
path: root/arch/powerpc/include/asm/kvm_book3s.h
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/include/asm/kvm_book3s.h')
-rw-r--r--arch/powerpc/include/asm/kvm_book3s.h619
1 files changed, 423 insertions, 196 deletions
diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h
index 08891d07aeb6..e1ff291ba891 100644
--- a/arch/powerpc/include/asm/kvm_book3s.h
+++ b/arch/powerpc/include/asm/kvm_book3s.h
@@ -1,16 +1,5 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, version 2, as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*
* Copyright SUSE Linux Products GmbH 2009
*
@@ -23,6 +12,7 @@
#include <linux/types.h>
#include <linux/kvm_host.h>
#include <asm/kvm_book3s_asm.h>
+#include <asm/guest-state-buffer.h>
struct kvmppc_bat {
u64 raw;
@@ -58,16 +48,56 @@ struct hpte_cache {
struct hlist_node list_pte_long;
struct hlist_node list_vpte;
struct hlist_node list_vpte_long;
+#ifdef CONFIG_PPC_BOOK3S_64
+ struct hlist_node list_vpte_64k;
+#endif
struct rcu_head rcu_head;
u64 host_vpn;
u64 pfn;
ulong slot;
struct kvmppc_pte pte;
+ int pagesize;
+};
+
+/*
+ * Struct for a virtual core.
+ * Note: entry_exit_map combines a bitmap of threads that have entered
+ * in the bottom 8 bits and a bitmap of threads that have exited in the
+ * next 8 bits. This is so that we can atomically set the entry bit
+ * iff the exit map is 0 without taking a lock.
+ */
+struct kvmppc_vcore {
+ int n_runnable;
+ int num_threads;
+ int entry_exit_map;
+ int napping_threads;
+ int first_vcpuid;
+ u16 pcpu;
+ u16 last_cpu;
+ u8 vcore_state;
+ u8 in_guest;
+ struct kvm_vcpu *runnable_threads[MAX_SMT_THREADS];
+ struct list_head preempt_list;
+ spinlock_t lock;
+ struct rcuwait wait;
+ spinlock_t stoltb_lock; /* protects stolen_tb and preempt_tb */
+ u64 stolen_tb;
+ u64 preempt_tb;
+ struct kvm_vcpu *runner;
+ struct kvm *kvm;
+ u64 tb_offset; /* guest timebase - host timebase */
+ u64 tb_offset_applied; /* timebase offset currently in force */
+ ulong lpcr;
+ u32 arch_compat;
+ ulong pcr;
+ ulong dpdes; /* doorbell state (POWER8) */
+ ulong vtb; /* virtual timebase */
+ ulong conferring_threads;
+ unsigned int halt_poll_ns;
+ atomic_t online_count;
};
struct kvmppc_vcpu_book3s {
- struct kvm_vcpu vcpu;
- struct kvmppc_book3s_shadow_vcpu *shadow_vcpu;
struct kvmppc_sid_map sid_map[SID_MAP_NUM];
struct {
u64 esid;
@@ -81,8 +111,7 @@ struct kvmppc_vcpu_book3s {
u64 sdr1;
u64 hior;
u64 msr_mask;
- u64 purr_offset;
- u64 spurr_offset;
+ u64 vtb;
#ifdef CONFIG_PPC_BOOK3S_32
u32 vsid_pool[VSID_POOL_SIZE];
u32 vsid_next;
@@ -99,16 +128,16 @@ struct kvmppc_vcpu_book3s {
struct hlist_head hpte_hash_pte_long[HPTEG_HASH_NUM_PTE_LONG];
struct hlist_head hpte_hash_vpte[HPTEG_HASH_NUM_VPTE];
struct hlist_head hpte_hash_vpte_long[HPTEG_HASH_NUM_VPTE_LONG];
+#ifdef CONFIG_PPC_BOOK3S_64
+ struct hlist_head hpte_hash_vpte_64k[HPTEG_HASH_NUM_VPTE_64K];
+#endif
int hpte_cache_count;
spinlock_t mmu_lock;
};
-#define CONTEXT_HOST 0
-#define CONTEXT_GUEST 1
-#define CONTEXT_GUEST_END 2
-
-#define VSID_REAL 0x0fffffffffc00000ULL
-#define VSID_BAT 0x0fffffffffb00000ULL
+#define VSID_REAL 0x07ffffffffc00000ULL
+#define VSID_BAT 0x07ffffffffb00000ULL
+#define VSID_64K 0x0800000000000000ULL
#define VSID_1T 0x1000000000000000ULL
#define VSID_REAL_DR 0x2000000000000000ULL
#define VSID_REAL_IR 0x4000000000000000ULL
@@ -118,344 +147,495 @@ extern void kvmppc_mmu_pte_flush(struct kvm_vcpu *vcpu, ulong ea, ulong ea_mask)
extern void kvmppc_mmu_pte_vflush(struct kvm_vcpu *vcpu, u64 vp, u64 vp_mask);
extern void kvmppc_mmu_pte_pflush(struct kvm_vcpu *vcpu, ulong pa_start, ulong pa_end);
extern void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 new_msr);
-extern void kvmppc_set_pvr(struct kvm_vcpu *vcpu, u32 pvr);
extern void kvmppc_mmu_book3s_64_init(struct kvm_vcpu *vcpu);
extern void kvmppc_mmu_book3s_32_init(struct kvm_vcpu *vcpu);
extern void kvmppc_mmu_book3s_hv_init(struct kvm_vcpu *vcpu);
-extern int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte);
+extern int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte,
+ bool iswrite);
+extern void kvmppc_mmu_unmap_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte);
extern int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr);
extern void kvmppc_mmu_flush_segment(struct kvm_vcpu *vcpu, ulong eaddr, ulong seg_size);
extern void kvmppc_mmu_flush_segments(struct kvm_vcpu *vcpu);
-extern int kvmppc_book3s_hv_page_fault(struct kvm_run *run,
- struct kvm_vcpu *vcpu, unsigned long addr,
- unsigned long status);
+extern int kvmppc_book3s_hv_page_fault(struct kvm_vcpu *vcpu,
+ unsigned long addr, unsigned long status);
extern long kvmppc_hv_find_lock_hpte(struct kvm *kvm, gva_t eaddr,
unsigned long slb_v, unsigned long valid);
+extern int kvmppc_hv_emulate_mmio(struct kvm_vcpu *vcpu,
+ unsigned long gpa, gva_t ea, int is_store);
extern void kvmppc_mmu_hpte_cache_map(struct kvm_vcpu *vcpu, struct hpte_cache *pte);
extern struct hpte_cache *kvmppc_mmu_hpte_cache_next(struct kvm_vcpu *vcpu);
+extern void kvmppc_mmu_hpte_cache_free(struct hpte_cache *pte);
extern void kvmppc_mmu_hpte_destroy(struct kvm_vcpu *vcpu);
extern int kvmppc_mmu_hpte_init(struct kvm_vcpu *vcpu);
extern void kvmppc_mmu_invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte);
extern int kvmppc_mmu_hpte_sysinit(void);
extern void kvmppc_mmu_hpte_sysexit(void);
extern int kvmppc_mmu_hv_init(void);
+extern int kvmppc_book3s_hcall_implemented(struct kvm *kvm, unsigned long hc);
+
+extern int kvmppc_book3s_radix_page_fault(struct kvm_vcpu *vcpu,
+ unsigned long ea, unsigned long dsisr);
+extern unsigned long __kvmhv_copy_tofrom_guest_radix(int lpid, int pid,
+ gva_t eaddr, void *to, void *from,
+ unsigned long n);
+extern long kvmhv_copy_from_guest_radix(struct kvm_vcpu *vcpu, gva_t eaddr,
+ void *to, unsigned long n);
+extern long kvmhv_copy_to_guest_radix(struct kvm_vcpu *vcpu, gva_t eaddr,
+ void *from, unsigned long n);
+extern int kvmppc_mmu_walk_radix_tree(struct kvm_vcpu *vcpu, gva_t eaddr,
+ struct kvmppc_pte *gpte, u64 root,
+ u64 *pte_ret_p);
+extern int kvmppc_mmu_radix_translate_table(struct kvm_vcpu *vcpu, gva_t eaddr,
+ struct kvmppc_pte *gpte, u64 table,
+ int table_index, u64 *pte_ret_p);
+extern int kvmppc_mmu_radix_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
+ struct kvmppc_pte *gpte, bool data, bool iswrite);
+extern void kvmppc_radix_tlbie_page(struct kvm *kvm, unsigned long addr,
+ unsigned int pshift, u64 lpid);
+extern void kvmppc_unmap_pte(struct kvm *kvm, pte_t *pte, unsigned long gpa,
+ unsigned int shift,
+ const struct kvm_memory_slot *memslot,
+ u64 lpid);
+extern bool kvmppc_hv_handle_set_rc(struct kvm *kvm, bool nested,
+ bool writing, unsigned long gpa,
+ u64 lpid);
+extern int kvmppc_book3s_instantiate_page(struct kvm_vcpu *vcpu,
+ unsigned long gpa,
+ struct kvm_memory_slot *memslot,
+ bool writing,
+ pte_t *inserted_pte, unsigned int *levelp);
+extern int kvmppc_init_vm_radix(struct kvm *kvm);
+extern void kvmppc_free_radix(struct kvm *kvm);
+extern void kvmppc_free_pgtable_radix(struct kvm *kvm, pgd_t *pgd,
+ u64 lpid);
+extern int kvmppc_radix_init(void);
+extern void kvmppc_radix_exit(void);
+extern void kvm_unmap_radix(struct kvm *kvm, struct kvm_memory_slot *memslot,
+ unsigned long gfn);
+extern bool kvm_age_radix(struct kvm *kvm, struct kvm_memory_slot *memslot,
+ unsigned long gfn);
+extern bool kvm_test_age_radix(struct kvm *kvm, struct kvm_memory_slot *memslot,
+ unsigned long gfn);
+extern long kvmppc_hv_get_dirty_log_radix(struct kvm *kvm,
+ struct kvm_memory_slot *memslot, unsigned long *map);
+extern void kvmppc_radix_flush_memslot(struct kvm *kvm,
+ const struct kvm_memory_slot *memslot);
+extern int kvmhv_get_rmmu_info(struct kvm *kvm, struct kvm_ppc_rmmu_info *info);
+/* XXX remove this export when load_last_inst() is generic */
extern int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr, bool data);
-extern int kvmppc_st(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr, bool data);
extern void kvmppc_book3s_queue_irqprio(struct kvm_vcpu *vcpu, unsigned int vec);
extern void kvmppc_book3s_dequeue_irqprio(struct kvm_vcpu *vcpu,
unsigned int vec);
extern void kvmppc_inject_interrupt(struct kvm_vcpu *vcpu, int vec, u64 flags);
+extern void kvmppc_trigger_fac_interrupt(struct kvm_vcpu *vcpu, ulong fac);
extern void kvmppc_set_bat(struct kvm_vcpu *vcpu, struct kvmppc_bat *bat,
bool upper, u32 val);
extern void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr);
-extern int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu);
-extern pfn_t kvmppc_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn);
+extern int kvmppc_emulate_paired_single(struct kvm_vcpu *vcpu);
+extern kvm_pfn_t kvmppc_gpa_to_pfn(struct kvm_vcpu *vcpu, gpa_t gpa,
+ bool writing, bool *writable, struct page **page);
extern void kvmppc_add_revmap_chain(struct kvm *kvm, struct revmap_entry *rev,
unsigned long *rmap, long pte_index, int realmode);
-extern void kvmppc_invalidate_hpte(struct kvm *kvm, unsigned long *hptep,
+extern void kvmppc_update_dirty_map(const struct kvm_memory_slot *memslot,
+ unsigned long gfn, unsigned long psize);
+extern void kvmppc_invalidate_hpte(struct kvm *kvm, __be64 *hptep,
unsigned long pte_index);
-void kvmppc_clear_ref_hpte(struct kvm *kvm, unsigned long *hptep,
+void kvmppc_clear_ref_hpte(struct kvm *kvm, __be64 *hptep,
unsigned long pte_index);
extern void *kvmppc_pin_guest_page(struct kvm *kvm, unsigned long addr,
unsigned long *nb_ret);
extern void kvmppc_unpin_guest_page(struct kvm *kvm, void *addr,
unsigned long gpa, bool dirty);
-extern long kvmppc_virtmode_h_enter(struct kvm_vcpu *vcpu, unsigned long flags,
- long pte_index, unsigned long pteh, unsigned long ptel);
extern long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
long pte_index, unsigned long pteh, unsigned long ptel,
pgd_t *pgdir, bool realmode, unsigned long *idx_ret);
extern long kvmppc_do_h_remove(struct kvm *kvm, unsigned long flags,
unsigned long pte_index, unsigned long avpn,
unsigned long *hpret);
-extern long kvmppc_hv_get_dirty_log(struct kvm *kvm,
+extern long kvmppc_hv_get_dirty_log_hpt(struct kvm *kvm,
struct kvm_memory_slot *memslot, unsigned long *map);
+extern void kvmppc_harvest_vpa_dirty(struct kvmppc_vpa *vpa,
+ struct kvm_memory_slot *memslot,
+ unsigned long *map);
+extern unsigned long kvmppc_filter_lpcr_hv(struct kvm *kvm,
+ unsigned long lpcr);
+extern void kvmppc_update_lpcr(struct kvm *kvm, unsigned long lpcr,
+ unsigned long mask);
+extern void kvmppc_set_fscr(struct kvm_vcpu *vcpu, u64 fscr);
+
+extern int kvmhv_p9_tm_emulation_early(struct kvm_vcpu *vcpu);
+extern int kvmhv_p9_tm_emulation(struct kvm_vcpu *vcpu);
+extern void kvmhv_emulate_tm_rollback(struct kvm_vcpu *vcpu);
extern void kvmppc_entry_trampoline(void);
extern void kvmppc_hv_entry_trampoline(void);
-extern void kvmppc_load_up_fpu(void);
-extern void kvmppc_load_up_altivec(void);
-extern void kvmppc_load_up_vsx(void);
extern u32 kvmppc_alignment_dsisr(struct kvm_vcpu *vcpu, unsigned int inst);
extern ulong kvmppc_alignment_dar(struct kvm_vcpu *vcpu, unsigned int inst);
extern int kvmppc_h_pr(struct kvm_vcpu *vcpu, unsigned long cmd);
+extern void kvmppc_pr_init_default_hcalls(struct kvm *kvm);
+extern int kvmppc_hcall_impl_pr(unsigned long cmd);
+extern int kvmppc_hcall_impl_hv_realmode(unsigned long cmd);
+extern void kvmppc_copy_to_svcpu(struct kvm_vcpu *vcpu);
+extern void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu);
+
+long kvmppc_read_intr(void);
+void kvmppc_set_msr_hv(struct kvm_vcpu *vcpu, u64 msr);
+void kvmppc_inject_interrupt_hv(struct kvm_vcpu *vcpu, int vec, u64 srr1_flags);
+
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+void kvmppc_save_tm_pr(struct kvm_vcpu *vcpu);
+void kvmppc_restore_tm_pr(struct kvm_vcpu *vcpu);
+void kvmppc_save_tm_sprs(struct kvm_vcpu *vcpu);
+void kvmppc_restore_tm_sprs(struct kvm_vcpu *vcpu);
+#else
+static inline void kvmppc_save_tm_pr(struct kvm_vcpu *vcpu) {}
+static inline void kvmppc_restore_tm_pr(struct kvm_vcpu *vcpu) {}
+static inline void kvmppc_save_tm_sprs(struct kvm_vcpu *vcpu) {}
+static inline void kvmppc_restore_tm_sprs(struct kvm_vcpu *vcpu) {}
+#endif
-static inline struct kvmppc_vcpu_book3s *to_book3s(struct kvm_vcpu *vcpu)
+extern unsigned long nested_capabilities;
+long kvmhv_nested_init(void);
+void kvmhv_nested_exit(void);
+void kvmhv_vm_nested_init(struct kvm *kvm);
+long kvmhv_set_partition_table(struct kvm_vcpu *vcpu);
+long kvmhv_copy_tofrom_guest_nested(struct kvm_vcpu *vcpu);
+void kvmhv_flush_lpid(u64 lpid);
+void kvmhv_set_ptbl_entry(u64 lpid, u64 dw0, u64 dw1);
+void kvmhv_release_all_nested(struct kvm *kvm);
+long kvmhv_enter_nested_guest(struct kvm_vcpu *vcpu);
+long kvmhv_do_nested_tlbie(struct kvm_vcpu *vcpu);
+long do_h_rpt_invalidate_pat(struct kvm_vcpu *vcpu, unsigned long lpid,
+ unsigned long type, unsigned long pg_sizes,
+ unsigned long start, unsigned long end);
+int kvmhv_run_single_vcpu(struct kvm_vcpu *vcpu,
+ u64 time_limit, unsigned long lpcr);
+void kvmhv_save_hv_regs(struct kvm_vcpu *vcpu, struct hv_guest_state *hr);
+void kvmhv_restore_hv_return_state(struct kvm_vcpu *vcpu,
+ struct hv_guest_state *hr);
+long int kvmhv_nested_page_fault(struct kvm_vcpu *vcpu);
+
+void kvmppc_giveup_fac(struct kvm_vcpu *vcpu, ulong fac);
+
+
+#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
+
+extern struct static_key_false __kvmhv_is_nestedv2;
+
+static inline bool kvmhv_is_nestedv2(void)
{
- return container_of(vcpu, struct kvmppc_vcpu_book3s, vcpu);
+ return static_branch_unlikely(&__kvmhv_is_nestedv2);
}
-extern void kvm_return_point(void);
+static inline bool kvmhv_is_nestedv1(void)
+{
+ return !static_branch_likely(&__kvmhv_is_nestedv2);
+}
-/* Also add subarch specific defines */
+#else
+
+static inline bool kvmhv_is_nestedv2(void)
+{
+ return false;
+}
+
+static inline bool kvmhv_is_nestedv1(void)
+{
+ return false;
+}
-#ifdef CONFIG_KVM_BOOK3S_32_HANDLER
-#include <asm/kvm_book3s_32.h>
-#endif
-#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
-#include <asm/kvm_book3s_64.h>
#endif
-#ifdef CONFIG_KVM_BOOK3S_PR
+int __kvmhv_nestedv2_reload_ptregs(struct kvm_vcpu *vcpu, struct pt_regs *regs);
+int __kvmhv_nestedv2_mark_dirty_ptregs(struct kvm_vcpu *vcpu, struct pt_regs *regs);
+int __kvmhv_nestedv2_mark_dirty(struct kvm_vcpu *vcpu, u16 iden);
+int __kvmhv_nestedv2_cached_reload(struct kvm_vcpu *vcpu, u16 iden);
-static inline unsigned long kvmppc_interrupt_offset(struct kvm_vcpu *vcpu)
+static inline int kvmhv_nestedv2_reload_ptregs(struct kvm_vcpu *vcpu,
+ struct pt_regs *regs)
{
- return to_book3s(vcpu)->hior;
+ if (kvmhv_is_nestedv2())
+ return __kvmhv_nestedv2_reload_ptregs(vcpu, regs);
+ return 0;
+}
+static inline int kvmhv_nestedv2_mark_dirty_ptregs(struct kvm_vcpu *vcpu,
+ struct pt_regs *regs)
+{
+ if (kvmhv_is_nestedv2())
+ return __kvmhv_nestedv2_mark_dirty_ptregs(vcpu, regs);
+ return 0;
}
-static inline void kvmppc_update_int_pending(struct kvm_vcpu *vcpu,
- unsigned long pending_now, unsigned long old_pending)
+static inline int kvmhv_nestedv2_mark_dirty(struct kvm_vcpu *vcpu, u16 iden)
{
- if (pending_now)
- vcpu->arch.shared->int_pending = 1;
- else if (old_pending)
- vcpu->arch.shared->int_pending = 0;
+ if (kvmhv_is_nestedv2())
+ return __kvmhv_nestedv2_mark_dirty(vcpu, iden);
+ return 0;
}
+static inline int kvmhv_nestedv2_cached_reload(struct kvm_vcpu *vcpu, u16 iden)
+{
+ if (kvmhv_is_nestedv2())
+ return __kvmhv_nestedv2_cached_reload(vcpu, iden);
+ return 0;
+}
+
+extern int kvm_irq_bypass;
+
+static inline struct kvmppc_vcpu_book3s *to_book3s(struct kvm_vcpu *vcpu)
+{
+ return vcpu->arch.book3s;
+}
+
+/* Also add subarch specific defines */
+
+#ifdef CONFIG_KVM_BOOK3S_32_HANDLER
+#include <asm/kvm_book3s_32.h>
+#endif
+#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
+#include <asm/kvm_book3s_64.h>
+#endif
+
static inline void kvmppc_set_gpr(struct kvm_vcpu *vcpu, int num, ulong val)
{
- if ( num < 14 ) {
- struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
- svcpu->gpr[num] = val;
- svcpu_put(svcpu);
- to_book3s(vcpu)->shadow_vcpu->gpr[num] = val;
- } else
- vcpu->arch.gpr[num] = val;
+ vcpu->arch.regs.gpr[num] = val;
+ kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_GPR(num));
}
static inline ulong kvmppc_get_gpr(struct kvm_vcpu *vcpu, int num)
{
- if ( num < 14 ) {
- struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
- ulong r = svcpu->gpr[num];
- svcpu_put(svcpu);
- return r;
- } else
- return vcpu->arch.gpr[num];
+ WARN_ON(kvmhv_nestedv2_cached_reload(vcpu, KVMPPC_GSID_GPR(num)) < 0);
+ return vcpu->arch.regs.gpr[num];
}
static inline void kvmppc_set_cr(struct kvm_vcpu *vcpu, u32 val)
{
- struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
- svcpu->cr = val;
- svcpu_put(svcpu);
- to_book3s(vcpu)->shadow_vcpu->cr = val;
+ vcpu->arch.regs.ccr = val;
+ kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_CR);
}
static inline u32 kvmppc_get_cr(struct kvm_vcpu *vcpu)
{
- struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
- u32 r;
- r = svcpu->cr;
- svcpu_put(svcpu);
- return r;
+ WARN_ON(kvmhv_nestedv2_cached_reload(vcpu, KVMPPC_GSID_CR) < 0);
+ return vcpu->arch.regs.ccr;
}
-static inline void kvmppc_set_xer(struct kvm_vcpu *vcpu, u32 val)
+static inline void kvmppc_set_xer(struct kvm_vcpu *vcpu, ulong val)
{
- struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
- svcpu->xer = val;
- to_book3s(vcpu)->shadow_vcpu->xer = val;
- svcpu_put(svcpu);
+ vcpu->arch.regs.xer = val;
+ kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_XER);
}
-static inline u32 kvmppc_get_xer(struct kvm_vcpu *vcpu)
+static inline ulong kvmppc_get_xer(struct kvm_vcpu *vcpu)
{
- struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
- u32 r;
- r = svcpu->xer;
- svcpu_put(svcpu);
- return r;
+ WARN_ON(kvmhv_nestedv2_cached_reload(vcpu, KVMPPC_GSID_XER) < 0);
+ return vcpu->arch.regs.xer;
}
static inline void kvmppc_set_ctr(struct kvm_vcpu *vcpu, ulong val)
{
- struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
- svcpu->ctr = val;
- svcpu_put(svcpu);
+ vcpu->arch.regs.ctr = val;
+ kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_CTR);
}
static inline ulong kvmppc_get_ctr(struct kvm_vcpu *vcpu)
{
- struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
- ulong r;
- r = svcpu->ctr;
- svcpu_put(svcpu);
- return r;
+ WARN_ON(kvmhv_nestedv2_cached_reload(vcpu, KVMPPC_GSID_CTR) < 0);
+ return vcpu->arch.regs.ctr;
}
static inline void kvmppc_set_lr(struct kvm_vcpu *vcpu, ulong val)
{
- struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
- svcpu->lr = val;
- svcpu_put(svcpu);
+ vcpu->arch.regs.link = val;
+ kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_LR);
}
static inline ulong kvmppc_get_lr(struct kvm_vcpu *vcpu)
{
- struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
- ulong r;
- r = svcpu->lr;
- svcpu_put(svcpu);
- return r;
+ WARN_ON(kvmhv_nestedv2_cached_reload(vcpu, KVMPPC_GSID_LR) < 0);
+ return vcpu->arch.regs.link;
}
static inline void kvmppc_set_pc(struct kvm_vcpu *vcpu, ulong val)
{
- struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
- svcpu->pc = val;
- svcpu_put(svcpu);
+ vcpu->arch.regs.nip = val;
+ kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_NIA);
}
static inline ulong kvmppc_get_pc(struct kvm_vcpu *vcpu)
{
- struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
- ulong r;
- r = svcpu->pc;
- svcpu_put(svcpu);
- return r;
+ WARN_ON(kvmhv_nestedv2_cached_reload(vcpu, KVMPPC_GSID_NIA) < 0);
+ return vcpu->arch.regs.nip;
}
-static inline u32 kvmppc_get_last_inst(struct kvm_vcpu *vcpu)
+static inline u64 kvmppc_get_msr(struct kvm_vcpu *vcpu);
+static inline bool kvmppc_need_byteswap(struct kvm_vcpu *vcpu)
{
- ulong pc = kvmppc_get_pc(vcpu);
- struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
- u32 r;
-
- /* Load the instruction manually if it failed to do so in the
- * exit path */
- if (svcpu->last_inst == KVM_INST_FETCH_FAILED)
- kvmppc_ld(vcpu, &pc, sizeof(u32), &svcpu->last_inst, false);
-
- r = svcpu->last_inst;
- svcpu_put(svcpu);
- return r;
+ return (kvmppc_get_msr(vcpu) & MSR_LE) != (MSR_KERNEL & MSR_LE);
}
static inline ulong kvmppc_get_fault_dar(struct kvm_vcpu *vcpu)
{
- struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
- ulong r;
- r = svcpu->fault_dar;
- svcpu_put(svcpu);
- return r;
+ return vcpu->arch.fault_dar;
}
-static inline bool kvmppc_critical_section(struct kvm_vcpu *vcpu)
+static inline u64 kvmppc_get_fpr(struct kvm_vcpu *vcpu, int i)
{
- ulong crit_raw = vcpu->arch.shared->critical;
- ulong crit_r1 = kvmppc_get_gpr(vcpu, 1);
- bool crit;
-
- /* Truncate crit indicators in 32 bit mode */
- if (!(vcpu->arch.shared->msr & MSR_SF)) {
- crit_raw &= 0xffffffff;
- crit_r1 &= 0xffffffff;
- }
-
- /* Critical section when crit == r1 */
- crit = (crit_raw == crit_r1);
- /* ... and we're in supervisor mode */
- crit = crit && !(vcpu->arch.shared->msr & MSR_PR);
-
- return crit;
+ WARN_ON(kvmhv_nestedv2_cached_reload(vcpu, KVMPPC_GSID_VSRS(i)) < 0);
+ return vcpu->arch.fp.fpr[i][TS_FPROFFSET];
}
-#else /* CONFIG_KVM_BOOK3S_PR */
-static inline unsigned long kvmppc_interrupt_offset(struct kvm_vcpu *vcpu)
+static inline void kvmppc_set_fpr(struct kvm_vcpu *vcpu, int i, u64 val)
{
- return 0;
+ vcpu->arch.fp.fpr[i][TS_FPROFFSET] = val;
+ kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_VSRS(i));
}
-static inline void kvmppc_update_int_pending(struct kvm_vcpu *vcpu,
- unsigned long pending_now, unsigned long old_pending)
+static inline u64 kvmppc_get_fpscr(struct kvm_vcpu *vcpu)
{
+ WARN_ON(kvmhv_nestedv2_cached_reload(vcpu, KVMPPC_GSID_FPSCR) < 0);
+ return vcpu->arch.fp.fpscr;
}
-static inline void kvmppc_set_gpr(struct kvm_vcpu *vcpu, int num, ulong val)
+static inline void kvmppc_set_fpscr(struct kvm_vcpu *vcpu, u64 val)
{
- vcpu->arch.gpr[num] = val;
+ vcpu->arch.fp.fpscr = val;
+ kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_FPSCR);
}
-static inline ulong kvmppc_get_gpr(struct kvm_vcpu *vcpu, int num)
+
+static inline u64 kvmppc_get_vsx_fpr(struct kvm_vcpu *vcpu, int i, int j)
{
- return vcpu->arch.gpr[num];
+ WARN_ON(kvmhv_nestedv2_cached_reload(vcpu, KVMPPC_GSID_VSRS(i)) < 0);
+ return vcpu->arch.fp.fpr[i][j];
}
-static inline void kvmppc_set_cr(struct kvm_vcpu *vcpu, u32 val)
+static inline void kvmppc_set_vsx_fpr(struct kvm_vcpu *vcpu, int i, int j,
+ u64 val)
{
- vcpu->arch.cr = val;
+ vcpu->arch.fp.fpr[i][j] = val;
+ kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_VSRS(i));
}
-static inline u32 kvmppc_get_cr(struct kvm_vcpu *vcpu)
+#ifdef CONFIG_ALTIVEC
+static inline void kvmppc_get_vsx_vr(struct kvm_vcpu *vcpu, int i, vector128 *v)
{
- return vcpu->arch.cr;
+ WARN_ON(kvmhv_nestedv2_cached_reload(vcpu, KVMPPC_GSID_VSRS(32 + i)) < 0);
+ *v = vcpu->arch.vr.vr[i];
}
-static inline void kvmppc_set_xer(struct kvm_vcpu *vcpu, u32 val)
+static inline void kvmppc_set_vsx_vr(struct kvm_vcpu *vcpu, int i,
+ vector128 *val)
{
- vcpu->arch.xer = val;
+ vcpu->arch.vr.vr[i] = *val;
+ kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_VSRS(32 + i));
}
-static inline u32 kvmppc_get_xer(struct kvm_vcpu *vcpu)
+static inline u32 kvmppc_get_vscr(struct kvm_vcpu *vcpu)
{
- return vcpu->arch.xer;
+ WARN_ON(kvmhv_nestedv2_cached_reload(vcpu, KVMPPC_GSID_VSCR) < 0);
+ return vcpu->arch.vr.vscr.u[3];
}
-static inline void kvmppc_set_ctr(struct kvm_vcpu *vcpu, ulong val)
+static inline void kvmppc_set_vscr(struct kvm_vcpu *vcpu, u32 val)
{
- vcpu->arch.ctr = val;
+ vcpu->arch.vr.vscr.u[3] = val;
+ kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_VSCR);
}
+#endif
-static inline ulong kvmppc_get_ctr(struct kvm_vcpu *vcpu)
-{
- return vcpu->arch.ctr;
+#define KVMPPC_BOOK3S_VCPU_ACCESSOR_SET(reg, size, iden) \
+static inline void kvmppc_set_##reg(struct kvm_vcpu *vcpu, u##size val) \
+{ \
+ \
+ vcpu->arch.reg = val; \
+ kvmhv_nestedv2_mark_dirty(vcpu, iden); \
}
-static inline void kvmppc_set_lr(struct kvm_vcpu *vcpu, ulong val)
-{
- vcpu->arch.lr = val;
+#define KVMPPC_BOOK3S_VCPU_ACCESSOR_GET(reg, size, iden) \
+static inline u##size kvmppc_get_##reg(struct kvm_vcpu *vcpu) \
+{ \
+ WARN_ON(kvmhv_nestedv2_cached_reload(vcpu, iden) < 0); \
+ return vcpu->arch.reg; \
}
-static inline ulong kvmppc_get_lr(struct kvm_vcpu *vcpu)
-{
- return vcpu->arch.lr;
+#define KVMPPC_BOOK3S_VCPU_ACCESSOR(reg, size, iden) \
+ KVMPPC_BOOK3S_VCPU_ACCESSOR_SET(reg, size, iden) \
+ KVMPPC_BOOK3S_VCPU_ACCESSOR_GET(reg, size, iden) \
+
+KVMPPC_BOOK3S_VCPU_ACCESSOR(pid, 32, KVMPPC_GSID_PIDR)
+KVMPPC_BOOK3S_VCPU_ACCESSOR(tar, 64, KVMPPC_GSID_TAR)
+KVMPPC_BOOK3S_VCPU_ACCESSOR(ebbhr, 64, KVMPPC_GSID_EBBHR)
+KVMPPC_BOOK3S_VCPU_ACCESSOR(ebbrr, 64, KVMPPC_GSID_EBBRR)
+KVMPPC_BOOK3S_VCPU_ACCESSOR(bescr, 64, KVMPPC_GSID_BESCR)
+KVMPPC_BOOK3S_VCPU_ACCESSOR(ic, 64, KVMPPC_GSID_IC)
+KVMPPC_BOOK3S_VCPU_ACCESSOR(vrsave, 64, KVMPPC_GSID_VRSAVE)
+
+
+#define KVMPPC_BOOK3S_VCORE_ACCESSOR_SET(reg, size, iden) \
+static inline void kvmppc_set_##reg(struct kvm_vcpu *vcpu, u##size val) \
+{ \
+ vcpu->arch.vcore->reg = val; \
+ kvmhv_nestedv2_mark_dirty(vcpu, iden); \
}
-static inline void kvmppc_set_pc(struct kvm_vcpu *vcpu, ulong val)
-{
- vcpu->arch.pc = val;
+#define KVMPPC_BOOK3S_VCORE_ACCESSOR_GET(reg, size, iden) \
+static inline u##size kvmppc_get_##reg(struct kvm_vcpu *vcpu) \
+{ \
+ WARN_ON(kvmhv_nestedv2_cached_reload(vcpu, iden) < 0); \
+ return vcpu->arch.vcore->reg; \
}
-static inline ulong kvmppc_get_pc(struct kvm_vcpu *vcpu)
+#define KVMPPC_BOOK3S_VCORE_ACCESSOR(reg, size, iden) \
+ KVMPPC_BOOK3S_VCORE_ACCESSOR_SET(reg, size, iden) \
+ KVMPPC_BOOK3S_VCORE_ACCESSOR_GET(reg, size, iden) \
+
+
+KVMPPC_BOOK3S_VCORE_ACCESSOR(vtb, 64, KVMPPC_GSID_VTB)
+KVMPPC_BOOK3S_VCORE_ACCESSOR(dpdes, 64, KVMPPC_GSID_DPDES)
+KVMPPC_BOOK3S_VCORE_ACCESSOR_GET(arch_compat, 32, KVMPPC_GSID_LOGICAL_PVR)
+KVMPPC_BOOK3S_VCORE_ACCESSOR_GET(lpcr, 64, KVMPPC_GSID_LPCR)
+KVMPPC_BOOK3S_VCORE_ACCESSOR_SET(tb_offset, 64, KVMPPC_GSID_TB_OFFSET)
+
+static inline u64 kvmppc_get_tb_offset(struct kvm_vcpu *vcpu)
{
- return vcpu->arch.pc;
+ return vcpu->arch.vcore->tb_offset;
}
-static inline u32 kvmppc_get_last_inst(struct kvm_vcpu *vcpu)
+static inline u64 kvmppc_get_dec_expires(struct kvm_vcpu *vcpu)
{
- ulong pc = kvmppc_get_pc(vcpu);
+ WARN_ON(kvmhv_nestedv2_cached_reload(vcpu, KVMPPC_GSID_DEC_EXPIRY_TB) < 0);
+ return vcpu->arch.dec_expires;
+}
- /* Load the instruction manually if it failed to do so in the
- * exit path */
- if (vcpu->arch.last_inst == KVM_INST_FETCH_FAILED)
- kvmppc_ld(vcpu, &pc, sizeof(u32), &vcpu->arch.last_inst, false);
+static inline void kvmppc_set_dec_expires(struct kvm_vcpu *vcpu, u64 val)
+{
+ vcpu->arch.dec_expires = val;
+ kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_DEC_EXPIRY_TB);
+}
- return vcpu->arch.last_inst;
+/* Expiry time of vcpu DEC relative to host TB */
+static inline u64 kvmppc_dec_expires_host_tb(struct kvm_vcpu *vcpu)
+{
+ return kvmppc_get_dec_expires(vcpu) - kvmppc_get_tb_offset(vcpu);
}
-static inline ulong kvmppc_get_fault_dar(struct kvm_vcpu *vcpu)
+static inline bool is_kvmppc_resume_guest(int r)
{
- return vcpu->arch.fault_dar;
+ return (r == RESUME_GUEST || r == RESUME_GUEST_NV);
}
-static inline bool kvmppc_critical_section(struct kvm_vcpu *vcpu)
+static inline bool is_kvmppc_hv_enabled(struct kvm *kvm);
+static inline bool kvmppc_supports_magic_page(struct kvm_vcpu *vcpu)
{
- return false;
+ /* Only PR KVM supports the magic page */
+ return !is_kvmppc_hv_enabled(vcpu->kvm);
}
-#endif
+
+extern int kvmppc_h_logical_ci_load(struct kvm_vcpu *vcpu);
+extern int kvmppc_h_logical_ci_store(struct kvm_vcpu *vcpu);
/* Magic register values loaded into r3 and r4 before the 'sc' assembly
* instruction for the OSI hypercalls */
@@ -466,7 +646,54 @@ static inline bool kvmppc_critical_section(struct kvm_vcpu *vcpu)
/* TO = 31 for unconditional trap */
#define INS_TW 0x7fe00008
-/* LPIDs we support with this build -- runtime limit may be lower */
-#define KVMPPC_NR_LPIDS (LPID_RSVD + 1)
+#define SPLIT_HACK_MASK 0xff000000
+#define SPLIT_HACK_OFFS 0xfb000000
+
+/*
+ * This packs a VCPU ID from the [0..KVM_MAX_VCPU_IDS) space down to the
+ * [0..KVM_MAX_VCPUS) space, using knowledge of the guest's core stride
+ * (but not its actual threading mode, which is not available) to avoid
+ * collisions.
+ *
+ * The implementation leaves VCPU IDs from the range [0..KVM_MAX_VCPUS) (block
+ * 0) unchanged: if the guest is filling each VCORE completely then it will be
+ * using consecutive IDs and it will fill the space without any packing.
+ *
+ * For higher VCPU IDs, the packed ID is based on the VCPU ID modulo
+ * KVM_MAX_VCPUS (effectively masking off the top bits) and then an offset is
+ * added to avoid collisions.
+ *
+ * VCPU IDs in the range [KVM_MAX_VCPUS..(KVM_MAX_VCPUS*2)) (block 1) are only
+ * possible if the guest is leaving at least 1/2 of each VCORE empty, so IDs
+ * can be safely packed into the second half of each VCORE by adding an offset
+ * of (stride / 2).
+ *
+ * Similarly, if VCPU IDs in the range [(KVM_MAX_VCPUS*2)..(KVM_MAX_VCPUS*4))
+ * (blocks 2 and 3) are seen, the guest must be leaving at least 3/4 of each
+ * VCORE empty so packed IDs can be offset by (stride / 4) and (stride * 3 / 4).
+ *
+ * Finally, VCPU IDs from blocks 5..7 will only be seen if the guest is using a
+ * stride of 8 and 1 thread per core so the remaining offsets of 1, 5, 3 and 7
+ * must be free to use.
+ *
+ * (The offsets for each block are stored in block_offsets[], indexed by the
+ * block number if the stride is 8. For cases where the guest's stride is less
+ * than 8, we can re-use the block_offsets array by multiplying the block
+ * number by (MAX_SMT_THREADS / stride) to reach the correct entry.)
+ */
+static inline u32 kvmppc_pack_vcpu_id(struct kvm *kvm, u32 id)
+{
+ const int block_offsets[MAX_SMT_THREADS] = {0, 4, 2, 6, 1, 5, 3, 7};
+ int stride = kvm->arch.emul_smt_mode;
+ int block = (id / KVM_MAX_VCPUS) * (MAX_SMT_THREADS / stride);
+ u32 packed_id;
+
+ if (WARN_ONCE(block >= MAX_SMT_THREADS, "VCPU ID too large to pack"))
+ return 0;
+ packed_id = (id % KVM_MAX_VCPUS) + block_offsets[block];
+ if (WARN_ONCE(packed_id >= KVM_MAX_VCPUS, "VCPU ID packing failed"))
+ return 0;
+ return packed_id;
+}
#endif /* __ASM_KVM_BOOK3S_H__ */