summaryrefslogtreecommitdiff
path: root/arch/powerpc/include/asm/kvm_book3s.h
blob: 4f527d09c92b973bc57155a0129647e38514c8c1 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
/* SPDX-License-Identifier: GPL-2.0-only */
/*
 *
 * Copyright SUSE Linux Products GmbH 2009
 *
 * Authors: Alexander Graf <agraf@suse.de>
 */

#ifndef __ASM_KVM_BOOK3S_H__
#define __ASM_KVM_BOOK3S_H__

#include <linux/types.h>
#include <linux/kvm_host.h>
#include <asm/kvm_book3s_asm.h>
#include <asm/guest-state-buffer.h>

struct kvmppc_bat {
	u64 raw;
	u32 bepi;
	u32 bepi_mask;
	u32 brpn;
	u8 wimg;
	u8 pp;
	bool vs		: 1;
	bool vp		: 1;
};

struct kvmppc_sid_map {
	u64 guest_vsid;
	u64 guest_esid;
	u64 host_vsid;
	bool valid	: 1;
};

#define SID_MAP_BITS    9
#define SID_MAP_NUM     (1 << SID_MAP_BITS)
#define SID_MAP_MASK    (SID_MAP_NUM - 1)

#ifdef CONFIG_PPC_BOOK3S_64
#define SID_CONTEXTS	1
#else
#define SID_CONTEXTS	128
#define VSID_POOL_SIZE	(SID_CONTEXTS * 16)
#endif

struct hpte_cache {
	struct hlist_node list_pte;
	struct hlist_node list_pte_long;
	struct hlist_node list_vpte;
	struct hlist_node list_vpte_long;
#ifdef CONFIG_PPC_BOOK3S_64
	struct hlist_node list_vpte_64k;
#endif
	struct rcu_head rcu_head;
	u64 host_vpn;
	u64 pfn;
	ulong slot;
	struct kvmppc_pte pte;
	int pagesize;
};

/*
 * Struct for a virtual core.
 * Note: entry_exit_map combines a bitmap of threads that have entered
 * in the bottom 8 bits and a bitmap of threads that have exited in the
 * next 8 bits.  This is so that we can atomically set the entry bit
 * iff the exit map is 0 without taking a lock.
 */
struct kvmppc_vcore {
	int n_runnable;
	int num_threads;
	int entry_exit_map;
	int napping_threads;
	int first_vcpuid;
	u16 pcpu;
	u16 last_cpu;
	u8 vcore_state;
	u8 in_guest;
	struct kvm_vcpu *runnable_threads[MAX_SMT_THREADS];
	struct list_head preempt_list;
	spinlock_t lock;
	struct rcuwait wait;
	spinlock_t stoltb_lock;	/* protects stolen_tb and preempt_tb */
	u64 stolen_tb;
	u64 preempt_tb;
	struct kvm_vcpu *runner;
	struct kvm *kvm;
	u64 tb_offset;		/* guest timebase - host timebase */
	u64 tb_offset_applied;	/* timebase offset currently in force */
	ulong lpcr;
	u32 arch_compat;
	ulong pcr;
	ulong dpdes;		/* doorbell state (POWER8) */
	ulong vtb;		/* virtual timebase */
	ulong conferring_threads;
	unsigned int halt_poll_ns;
	atomic_t online_count;
};

struct kvmppc_vcpu_book3s {
	struct kvmppc_sid_map sid_map[SID_MAP_NUM];
	struct {
		u64 esid;
		u64 vsid;
	} slb_shadow[64];
	u8 slb_shadow_max;
	struct kvmppc_bat ibat[8];
	struct kvmppc_bat dbat[8];
	u64 hid[6];
	u64 gqr[8];
	u64 sdr1;
	u64 hior;
	u64 msr_mask;
	u64 vtb;
#ifdef CONFIG_PPC_BOOK3S_32
	u32 vsid_pool[VSID_POOL_SIZE];
	u32 vsid_next;
#else
	u64 proto_vsid_first;
	u64 proto_vsid_max;
	u64 proto_vsid_next;
#endif
	int context_id[SID_CONTEXTS];

	bool hior_explicit;		/* HIOR is set by ioctl, not PVR */

	struct hlist_head hpte_hash_pte[HPTEG_HASH_NUM_PTE];
	struct hlist_head hpte_hash_pte_long[HPTEG_HASH_NUM_PTE_LONG];
	struct hlist_head hpte_hash_vpte[HPTEG_HASH_NUM_VPTE];
	struct hlist_head hpte_hash_vpte_long[HPTEG_HASH_NUM_VPTE_LONG];
#ifdef CONFIG_PPC_BOOK3S_64
	struct hlist_head hpte_hash_vpte_64k[HPTEG_HASH_NUM_VPTE_64K];
#endif
	int hpte_cache_count;
	spinlock_t mmu_lock;
};

#define VSID_REAL	0x07ffffffffc00000ULL
#define VSID_BAT	0x07ffffffffb00000ULL
#define VSID_64K	0x0800000000000000ULL
#define VSID_1T		0x1000000000000000ULL
#define VSID_REAL_DR	0x2000000000000000ULL
#define VSID_REAL_IR	0x4000000000000000ULL
#define VSID_PR		0x8000000000000000ULL

extern void kvmppc_mmu_pte_flush(struct kvm_vcpu *vcpu, ulong ea, ulong ea_mask);
extern void kvmppc_mmu_pte_vflush(struct kvm_vcpu *vcpu, u64 vp, u64 vp_mask);
extern void kvmppc_mmu_pte_pflush(struct kvm_vcpu *vcpu, ulong pa_start, ulong pa_end);
extern void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 new_msr);
extern void kvmppc_mmu_book3s_64_init(struct kvm_vcpu *vcpu);
extern void kvmppc_mmu_book3s_32_init(struct kvm_vcpu *vcpu);
extern void kvmppc_mmu_book3s_hv_init(struct kvm_vcpu *vcpu);
extern int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte,
			       bool iswrite);
extern void kvmppc_mmu_unmap_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte);
extern int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr);
extern void kvmppc_mmu_flush_segment(struct kvm_vcpu *vcpu, ulong eaddr, ulong seg_size);
extern void kvmppc_mmu_flush_segments(struct kvm_vcpu *vcpu);
extern int kvmppc_book3s_hv_page_fault(struct kvm_vcpu *vcpu,
			unsigned long addr, unsigned long status);
extern long kvmppc_hv_find_lock_hpte(struct kvm *kvm, gva_t eaddr,
			unsigned long slb_v, unsigned long valid);
extern int kvmppc_hv_emulate_mmio(struct kvm_vcpu *vcpu,
			unsigned long gpa, gva_t ea, int is_store);

extern void kvmppc_mmu_hpte_cache_map(struct kvm_vcpu *vcpu, struct hpte_cache *pte);
extern struct hpte_cache *kvmppc_mmu_hpte_cache_next(struct kvm_vcpu *vcpu);
extern void kvmppc_mmu_hpte_cache_free(struct hpte_cache *pte);
extern void kvmppc_mmu_hpte_destroy(struct kvm_vcpu *vcpu);
extern int kvmppc_mmu_hpte_init(struct kvm_vcpu *vcpu);
extern void kvmppc_mmu_invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte);
extern int kvmppc_mmu_hpte_sysinit(void);
extern void kvmppc_mmu_hpte_sysexit(void);
extern int kvmppc_mmu_hv_init(void);
extern int kvmppc_book3s_hcall_implemented(struct kvm *kvm, unsigned long hc);

extern int kvmppc_book3s_radix_page_fault(struct kvm_vcpu *vcpu,
			unsigned long ea, unsigned long dsisr);
extern unsigned long __kvmhv_copy_tofrom_guest_radix(int lpid, int pid,
					gva_t eaddr, void *to, void *from,
					unsigned long n);
extern long kvmhv_copy_from_guest_radix(struct kvm_vcpu *vcpu, gva_t eaddr,
					void *to, unsigned long n);
extern long kvmhv_copy_to_guest_radix(struct kvm_vcpu *vcpu, gva_t eaddr,
				      void *from, unsigned long n);
extern int kvmppc_mmu_walk_radix_tree(struct kvm_vcpu *vcpu, gva_t eaddr,
				      struct kvmppc_pte *gpte, u64 root,
				      u64 *pte_ret_p);
extern int kvmppc_mmu_radix_translate_table(struct kvm_vcpu *vcpu, gva_t eaddr,
			struct kvmppc_pte *gpte, u64 table,
			int table_index, u64 *pte_ret_p);
extern int kvmppc_mmu_radix_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
			struct kvmppc_pte *gpte, bool data, bool iswrite);
extern void kvmppc_radix_tlbie_page(struct kvm *kvm, unsigned long addr,
				    unsigned int pshift, u64 lpid);
extern void kvmppc_unmap_pte(struct kvm *kvm, pte_t *pte, unsigned long gpa,
			unsigned int shift,
			const struct kvm_memory_slot *memslot,
			u64 lpid);
extern bool kvmppc_hv_handle_set_rc(struct kvm *kvm, bool nested,
				    bool writing, unsigned long gpa,
				    u64 lpid);
extern int kvmppc_book3s_instantiate_page(struct kvm_vcpu *vcpu,
				unsigned long gpa,
				struct kvm_memory_slot *memslot,
				bool writing, bool kvm_ro,
				pte_t *inserted_pte, unsigned int *levelp);
extern int kvmppc_init_vm_radix(struct kvm *kvm);
extern void kvmppc_free_radix(struct kvm *kvm);
extern void kvmppc_free_pgtable_radix(struct kvm *kvm, pgd_t *pgd,
				      u64 lpid);
extern int kvmppc_radix_init(void);
extern void kvmppc_radix_exit(void);
extern void kvm_unmap_radix(struct kvm *kvm, struct kvm_memory_slot *memslot,
			    unsigned long gfn);
extern bool kvm_age_radix(struct kvm *kvm, struct kvm_memory_slot *memslot,
			  unsigned long gfn);
extern bool kvm_test_age_radix(struct kvm *kvm, struct kvm_memory_slot *memslot,
			       unsigned long gfn);
extern long kvmppc_hv_get_dirty_log_radix(struct kvm *kvm,
			struct kvm_memory_slot *memslot, unsigned long *map);
extern void kvmppc_radix_flush_memslot(struct kvm *kvm,
			const struct kvm_memory_slot *memslot);
extern int kvmhv_get_rmmu_info(struct kvm *kvm, struct kvm_ppc_rmmu_info *info);

/* XXX remove this export when load_last_inst() is generic */
extern int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr, bool data);
extern void kvmppc_book3s_queue_irqprio(struct kvm_vcpu *vcpu, unsigned int vec);
extern void kvmppc_book3s_dequeue_irqprio(struct kvm_vcpu *vcpu,
					  unsigned int vec);
extern void kvmppc_inject_interrupt(struct kvm_vcpu *vcpu, int vec, u64 flags);
extern void kvmppc_trigger_fac_interrupt(struct kvm_vcpu *vcpu, ulong fac);
extern void kvmppc_set_bat(struct kvm_vcpu *vcpu, struct kvmppc_bat *bat,
			   bool upper, u32 val);
extern void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr);
extern int kvmppc_emulate_paired_single(struct kvm_vcpu *vcpu);
extern kvm_pfn_t kvmppc_gpa_to_pfn(struct kvm_vcpu *vcpu, gpa_t gpa,
			bool writing, bool *writable);
extern void kvmppc_add_revmap_chain(struct kvm *kvm, struct revmap_entry *rev,
			unsigned long *rmap, long pte_index, int realmode);
extern void kvmppc_update_dirty_map(const struct kvm_memory_slot *memslot,
			unsigned long gfn, unsigned long psize);
extern void kvmppc_invalidate_hpte(struct kvm *kvm, __be64 *hptep,
			unsigned long pte_index);
void kvmppc_clear_ref_hpte(struct kvm *kvm, __be64 *hptep,
			unsigned long pte_index);
extern void *kvmppc_pin_guest_page(struct kvm *kvm, unsigned long addr,
			unsigned long *nb_ret);
extern void kvmppc_unpin_guest_page(struct kvm *kvm, void *addr,
			unsigned long gpa, bool dirty);
extern long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
			long pte_index, unsigned long pteh, unsigned long ptel,
			pgd_t *pgdir, bool realmode, unsigned long *idx_ret);
extern long kvmppc_do_h_remove(struct kvm *kvm, unsigned long flags,
			unsigned long pte_index, unsigned long avpn,
			unsigned long *hpret);
extern long kvmppc_hv_get_dirty_log_hpt(struct kvm *kvm,
			struct kvm_memory_slot *memslot, unsigned long *map);
extern void kvmppc_harvest_vpa_dirty(struct kvmppc_vpa *vpa,
			struct kvm_memory_slot *memslot,
			unsigned long *map);
extern unsigned long kvmppc_filter_lpcr_hv(struct kvm *kvm,
			unsigned long lpcr);
extern void kvmppc_update_lpcr(struct kvm *kvm, unsigned long lpcr,
			unsigned long mask);
extern void kvmppc_set_fscr(struct kvm_vcpu *vcpu, u64 fscr);

extern int kvmhv_p9_tm_emulation_early(struct kvm_vcpu *vcpu);
extern int kvmhv_p9_tm_emulation(struct kvm_vcpu *vcpu);
extern void kvmhv_emulate_tm_rollback(struct kvm_vcpu *vcpu);

extern void kvmppc_entry_trampoline(void);
extern void kvmppc_hv_entry_trampoline(void);
extern u32 kvmppc_alignment_dsisr(struct kvm_vcpu *vcpu, unsigned int inst);
extern ulong kvmppc_alignment_dar(struct kvm_vcpu *vcpu, unsigned int inst);
extern int kvmppc_h_pr(struct kvm_vcpu *vcpu, unsigned long cmd);
extern void kvmppc_pr_init_default_hcalls(struct kvm *kvm);
extern int kvmppc_hcall_impl_pr(unsigned long cmd);
extern int kvmppc_hcall_impl_hv_realmode(unsigned long cmd);
extern void kvmppc_copy_to_svcpu(struct kvm_vcpu *vcpu);
extern void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu);

long kvmppc_read_intr(void);
void kvmppc_set_msr_hv(struct kvm_vcpu *vcpu, u64 msr);
void kvmppc_inject_interrupt_hv(struct kvm_vcpu *vcpu, int vec, u64 srr1_flags);

#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
void kvmppc_save_tm_pr(struct kvm_vcpu *vcpu);
void kvmppc_restore_tm_pr(struct kvm_vcpu *vcpu);
void kvmppc_save_tm_sprs(struct kvm_vcpu *vcpu);
void kvmppc_restore_tm_sprs(struct kvm_vcpu *vcpu);
#else
static inline void kvmppc_save_tm_pr(struct kvm_vcpu *vcpu) {}
static inline void kvmppc_restore_tm_pr(struct kvm_vcpu *vcpu) {}
static inline void kvmppc_save_tm_sprs(struct kvm_vcpu *vcpu) {}
static inline void kvmppc_restore_tm_sprs(struct kvm_vcpu *vcpu) {}
#endif

extern unsigned long nested_capabilities;
long kvmhv_nested_init(void);
void kvmhv_nested_exit(void);
void kvmhv_vm_nested_init(struct kvm *kvm);
long kvmhv_set_partition_table(struct kvm_vcpu *vcpu);
long kvmhv_copy_tofrom_guest_nested(struct kvm_vcpu *vcpu);
void kvmhv_set_ptbl_entry(u64 lpid, u64 dw0, u64 dw1);
void kvmhv_release_all_nested(struct kvm *kvm);
long kvmhv_enter_nested_guest(struct kvm_vcpu *vcpu);
long kvmhv_do_nested_tlbie(struct kvm_vcpu *vcpu);
long do_h_rpt_invalidate_pat(struct kvm_vcpu *vcpu, unsigned long lpid,
			     unsigned long type, unsigned long pg_sizes,
			     unsigned long start, unsigned long end);
int kvmhv_run_single_vcpu(struct kvm_vcpu *vcpu,
			  u64 time_limit, unsigned long lpcr);
void kvmhv_save_hv_regs(struct kvm_vcpu *vcpu, struct hv_guest_state *hr);
void kvmhv_restore_hv_return_state(struct kvm_vcpu *vcpu,
				   struct hv_guest_state *hr);
long int kvmhv_nested_page_fault(struct kvm_vcpu *vcpu);

void kvmppc_giveup_fac(struct kvm_vcpu *vcpu, ulong fac);


#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE

extern struct static_key_false __kvmhv_is_nestedv2;

static inline bool kvmhv_is_nestedv2(void)
{
	return static_branch_unlikely(&__kvmhv_is_nestedv2);
}

static inline bool kvmhv_is_nestedv1(void)
{
	return !static_branch_likely(&__kvmhv_is_nestedv2);
}

#else

static inline bool kvmhv_is_nestedv2(void)
{
	return false;
}

static inline bool kvmhv_is_nestedv1(void)
{
	return false;
}

#endif

int __kvmhv_nestedv2_reload_ptregs(struct kvm_vcpu *vcpu, struct pt_regs *regs);
int __kvmhv_nestedv2_mark_dirty_ptregs(struct kvm_vcpu *vcpu, struct pt_regs *regs);
int __kvmhv_nestedv2_mark_dirty(struct kvm_vcpu *vcpu, u16 iden);
int __kvmhv_nestedv2_cached_reload(struct kvm_vcpu *vcpu, u16 iden);

static inline int kvmhv_nestedv2_reload_ptregs(struct kvm_vcpu *vcpu,
					       struct pt_regs *regs)
{
	if (kvmhv_is_nestedv2())
		return __kvmhv_nestedv2_reload_ptregs(vcpu, regs);
	return 0;
}
static inline int kvmhv_nestedv2_mark_dirty_ptregs(struct kvm_vcpu *vcpu,
						   struct pt_regs *regs)
{
	if (kvmhv_is_nestedv2())
		return __kvmhv_nestedv2_mark_dirty_ptregs(vcpu, regs);
	return 0;
}

static inline int kvmhv_nestedv2_mark_dirty(struct kvm_vcpu *vcpu, u16 iden)
{
	if (kvmhv_is_nestedv2())
		return __kvmhv_nestedv2_mark_dirty(vcpu, iden);
	return 0;
}

static inline int kvmhv_nestedv2_cached_reload(struct kvm_vcpu *vcpu, u16 iden)
{
	if (kvmhv_is_nestedv2())
		return __kvmhv_nestedv2_cached_reload(vcpu, iden);
	return 0;
}

extern int kvm_irq_bypass;

static inline struct kvmppc_vcpu_book3s *to_book3s(struct kvm_vcpu *vcpu)
{
	return vcpu->arch.book3s;
}

/* Also add subarch specific defines */

#ifdef CONFIG_KVM_BOOK3S_32_HANDLER
#include <asm/kvm_book3s_32.h>
#endif
#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
#include <asm/kvm_book3s_64.h>
#endif

static inline void kvmppc_set_gpr(struct kvm_vcpu *vcpu, int num, ulong val)
{
	vcpu->arch.regs.gpr[num] = val;
	kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_GPR(num));
}

static inline ulong kvmppc_get_gpr(struct kvm_vcpu *vcpu, int num)
{
	WARN_ON(kvmhv_nestedv2_cached_reload(vcpu, KVMPPC_GSID_GPR(num)) < 0);
	return vcpu->arch.regs.gpr[num];
}

static inline void kvmppc_set_cr(struct kvm_vcpu *vcpu, u32 val)
{
	vcpu->arch.regs.ccr = val;
	kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_CR);
}

static inline u32 kvmppc_get_cr(struct kvm_vcpu *vcpu)
{
	WARN_ON(kvmhv_nestedv2_cached_reload(vcpu, KVMPPC_GSID_CR) < 0);
	return vcpu->arch.regs.ccr;
}

static inline void kvmppc_set_xer(struct kvm_vcpu *vcpu, ulong val)
{
	vcpu->arch.regs.xer = val;
	kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_XER);
}

static inline ulong kvmppc_get_xer(struct kvm_vcpu *vcpu)
{
	WARN_ON(kvmhv_nestedv2_cached_reload(vcpu, KVMPPC_GSID_XER) < 0);
	return vcpu->arch.regs.xer;
}

static inline void kvmppc_set_ctr(struct kvm_vcpu *vcpu, ulong val)
{
	vcpu->arch.regs.ctr = val;
	kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_CTR);
}

static inline ulong kvmppc_get_ctr(struct kvm_vcpu *vcpu)
{
	WARN_ON(kvmhv_nestedv2_cached_reload(vcpu, KVMPPC_GSID_CTR) < 0);
	return vcpu->arch.regs.ctr;
}

static inline void kvmppc_set_lr(struct kvm_vcpu *vcpu, ulong val)
{
	vcpu->arch.regs.link = val;
	kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_LR);
}

static inline ulong kvmppc_get_lr(struct kvm_vcpu *vcpu)
{
	WARN_ON(kvmhv_nestedv2_cached_reload(vcpu, KVMPPC_GSID_LR) < 0);
	return vcpu->arch.regs.link;
}

static inline void kvmppc_set_pc(struct kvm_vcpu *vcpu, ulong val)
{
	vcpu->arch.regs.nip = val;
	kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_NIA);
}

static inline ulong kvmppc_get_pc(struct kvm_vcpu *vcpu)
{
	WARN_ON(kvmhv_nestedv2_cached_reload(vcpu, KVMPPC_GSID_NIA) < 0);
	return vcpu->arch.regs.nip;
}

static inline u64 kvmppc_get_msr(struct kvm_vcpu *vcpu);
static inline bool kvmppc_need_byteswap(struct kvm_vcpu *vcpu)
{
	return (kvmppc_get_msr(vcpu) & MSR_LE) != (MSR_KERNEL & MSR_LE);
}

static inline ulong kvmppc_get_fault_dar(struct kvm_vcpu *vcpu)
{
	return vcpu->arch.fault_dar;
}

static inline u64 kvmppc_get_fpr(struct kvm_vcpu *vcpu, int i)
{
	WARN_ON(kvmhv_nestedv2_cached_reload(vcpu, KVMPPC_GSID_VSRS(i)) < 0);
	return vcpu->arch.fp.fpr[i][TS_FPROFFSET];
}

static inline void kvmppc_set_fpr(struct kvm_vcpu *vcpu, int i, u64 val)
{
	vcpu->arch.fp.fpr[i][TS_FPROFFSET] = val;
	kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_VSRS(i));
}

static inline u64 kvmppc_get_fpscr(struct kvm_vcpu *vcpu)
{
	WARN_ON(kvmhv_nestedv2_cached_reload(vcpu, KVMPPC_GSID_FPSCR) < 0);
	return vcpu->arch.fp.fpscr;
}

static inline void kvmppc_set_fpscr(struct kvm_vcpu *vcpu, u64 val)
{
	vcpu->arch.fp.fpscr = val;
	kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_FPSCR);
}


static inline u64 kvmppc_get_vsx_fpr(struct kvm_vcpu *vcpu, int i, int j)
{
	WARN_ON(kvmhv_nestedv2_cached_reload(vcpu, KVMPPC_GSID_VSRS(i)) < 0);
	return vcpu->arch.fp.fpr[i][j];
}

static inline void kvmppc_set_vsx_fpr(struct kvm_vcpu *vcpu, int i, int j,
				      u64 val)
{
	vcpu->arch.fp.fpr[i][j] = val;
	kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_VSRS(i));
}

#ifdef CONFIG_ALTIVEC
static inline void kvmppc_get_vsx_vr(struct kvm_vcpu *vcpu, int i, vector128 *v)
{
	WARN_ON(kvmhv_nestedv2_cached_reload(vcpu, KVMPPC_GSID_VSRS(32 + i)) < 0);
	*v =  vcpu->arch.vr.vr[i];
}

static inline void kvmppc_set_vsx_vr(struct kvm_vcpu *vcpu, int i,
				     vector128 *val)
{
	vcpu->arch.vr.vr[i] = *val;
	kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_VSRS(32 + i));
}

static inline u32 kvmppc_get_vscr(struct kvm_vcpu *vcpu)
{
	WARN_ON(kvmhv_nestedv2_cached_reload(vcpu, KVMPPC_GSID_VSCR) < 0);
	return vcpu->arch.vr.vscr.u[3];
}

static inline void kvmppc_set_vscr(struct kvm_vcpu *vcpu, u32 val)
{
	vcpu->arch.vr.vscr.u[3] = val;
	kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_VSCR);
}
#endif

#define KVMPPC_BOOK3S_VCPU_ACCESSOR_SET(reg, size, iden)		\
static inline void kvmppc_set_##reg(struct kvm_vcpu *vcpu, u##size val)	\
{									\
									\
	vcpu->arch.reg = val;						\
	kvmhv_nestedv2_mark_dirty(vcpu, iden);				\
}

#define KVMPPC_BOOK3S_VCPU_ACCESSOR_GET(reg, size, iden)		\
static inline u##size kvmppc_get_##reg(struct kvm_vcpu *vcpu)		\
{									\
	WARN_ON(kvmhv_nestedv2_cached_reload(vcpu, iden) < 0);		\
	return vcpu->arch.reg;						\
}

#define KVMPPC_BOOK3S_VCPU_ACCESSOR(reg, size, iden)			\
	KVMPPC_BOOK3S_VCPU_ACCESSOR_SET(reg, size, iden)		\
	KVMPPC_BOOK3S_VCPU_ACCESSOR_GET(reg, size, iden)		\

KVMPPC_BOOK3S_VCPU_ACCESSOR(pid, 32, KVMPPC_GSID_PIDR)
KVMPPC_BOOK3S_VCPU_ACCESSOR(tar, 64, KVMPPC_GSID_TAR)
KVMPPC_BOOK3S_VCPU_ACCESSOR(ebbhr, 64, KVMPPC_GSID_EBBHR)
KVMPPC_BOOK3S_VCPU_ACCESSOR(ebbrr, 64, KVMPPC_GSID_EBBRR)
KVMPPC_BOOK3S_VCPU_ACCESSOR(bescr, 64, KVMPPC_GSID_BESCR)
KVMPPC_BOOK3S_VCPU_ACCESSOR(ic, 64, KVMPPC_GSID_IC)
KVMPPC_BOOK3S_VCPU_ACCESSOR(vrsave, 64, KVMPPC_GSID_VRSAVE)


#define KVMPPC_BOOK3S_VCORE_ACCESSOR_SET(reg, size, iden)		\
static inline void kvmppc_set_##reg(struct kvm_vcpu *vcpu, u##size val)	\
{									\
	vcpu->arch.vcore->reg = val;					\
	kvmhv_nestedv2_mark_dirty(vcpu, iden);				\
}

#define KVMPPC_BOOK3S_VCORE_ACCESSOR_GET(reg, size, iden)		\
static inline u##size kvmppc_get_##reg(struct kvm_vcpu *vcpu)		\
{									\
	WARN_ON(kvmhv_nestedv2_cached_reload(vcpu, iden) < 0);		\
	return vcpu->arch.vcore->reg;					\
}

#define KVMPPC_BOOK3S_VCORE_ACCESSOR(reg, size, iden)			\
	KVMPPC_BOOK3S_VCORE_ACCESSOR_SET(reg, size, iden)		\
	KVMPPC_BOOK3S_VCORE_ACCESSOR_GET(reg, size, iden)		\


KVMPPC_BOOK3S_VCORE_ACCESSOR(vtb, 64, KVMPPC_GSID_VTB)
KVMPPC_BOOK3S_VCORE_ACCESSOR(tb_offset, 64, KVMPPC_GSID_TB_OFFSET)
KVMPPC_BOOK3S_VCORE_ACCESSOR_GET(arch_compat, 32, KVMPPC_GSID_LOGICAL_PVR)
KVMPPC_BOOK3S_VCORE_ACCESSOR_GET(lpcr, 64, KVMPPC_GSID_LPCR)

static inline u64 kvmppc_get_dec_expires(struct kvm_vcpu *vcpu)
{
	WARN_ON(kvmhv_nestedv2_cached_reload(vcpu, KVMPPC_GSID_TB_OFFSET) < 0);
	WARN_ON(kvmhv_nestedv2_cached_reload(vcpu, KVMPPC_GSID_DEC_EXPIRY_TB) < 0);
	return vcpu->arch.dec_expires;
}

static inline void kvmppc_set_dec_expires(struct kvm_vcpu *vcpu, u64 val)
{
	vcpu->arch.dec_expires = val;
	WARN_ON(kvmhv_nestedv2_cached_reload(vcpu, KVMPPC_GSID_TB_OFFSET) < 0);
	kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_DEC_EXPIRY_TB);
}

/* Expiry time of vcpu DEC relative to host TB */
static inline u64 kvmppc_dec_expires_host_tb(struct kvm_vcpu *vcpu)
{
	return kvmppc_get_dec_expires(vcpu) - kvmppc_get_tb_offset(vcpu);
}

static inline bool is_kvmppc_resume_guest(int r)
{
	return (r == RESUME_GUEST || r == RESUME_GUEST_NV);
}

static inline bool is_kvmppc_hv_enabled(struct kvm *kvm);
static inline bool kvmppc_supports_magic_page(struct kvm_vcpu *vcpu)
{
	/* Only PR KVM supports the magic page */
	return !is_kvmppc_hv_enabled(vcpu->kvm);
}

extern int kvmppc_h_logical_ci_load(struct kvm_vcpu *vcpu);
extern int kvmppc_h_logical_ci_store(struct kvm_vcpu *vcpu);

/* Magic register values loaded into r3 and r4 before the 'sc' assembly
 * instruction for the OSI hypercalls */
#define OSI_SC_MAGIC_R3			0x113724FA
#define OSI_SC_MAGIC_R4			0x77810F9B

#define INS_DCBZ			0x7c0007ec
/* TO = 31 for unconditional trap */
#define INS_TW				0x7fe00008

#define SPLIT_HACK_MASK			0xff000000
#define SPLIT_HACK_OFFS			0xfb000000

/*
 * This packs a VCPU ID from the [0..KVM_MAX_VCPU_IDS) space down to the
 * [0..KVM_MAX_VCPUS) space, using knowledge of the guest's core stride
 * (but not its actual threading mode, which is not available) to avoid
 * collisions.
 *
 * The implementation leaves VCPU IDs from the range [0..KVM_MAX_VCPUS) (block
 * 0) unchanged: if the guest is filling each VCORE completely then it will be
 * using consecutive IDs and it will fill the space without any packing.
 *
 * For higher VCPU IDs, the packed ID is based on the VCPU ID modulo
 * KVM_MAX_VCPUS (effectively masking off the top bits) and then an offset is
 * added to avoid collisions.
 *
 * VCPU IDs in the range [KVM_MAX_VCPUS..(KVM_MAX_VCPUS*2)) (block 1) are only
 * possible if the guest is leaving at least 1/2 of each VCORE empty, so IDs
 * can be safely packed into the second half of each VCORE by adding an offset
 * of (stride / 2).
 *
 * Similarly, if VCPU IDs in the range [(KVM_MAX_VCPUS*2)..(KVM_MAX_VCPUS*4))
 * (blocks 2 and 3) are seen, the guest must be leaving at least 3/4 of each
 * VCORE empty so packed IDs can be offset by (stride / 4) and (stride * 3 / 4).
 *
 * Finally, VCPU IDs from blocks 5..7 will only be seen if the guest is using a
 * stride of 8 and 1 thread per core so the remaining offsets of 1, 5, 3 and 7
 * must be free to use.
 *
 * (The offsets for each block are stored in block_offsets[], indexed by the
 * block number if the stride is 8. For cases where the guest's stride is less
 * than 8, we can re-use the block_offsets array by multiplying the block
 * number by (MAX_SMT_THREADS / stride) to reach the correct entry.)
 */
static inline u32 kvmppc_pack_vcpu_id(struct kvm *kvm, u32 id)
{
	const int block_offsets[MAX_SMT_THREADS] = {0, 4, 2, 6, 1, 5, 3, 7};
	int stride = kvm->arch.emul_smt_mode;
	int block = (id / KVM_MAX_VCPUS) * (MAX_SMT_THREADS / stride);
	u32 packed_id;

	if (WARN_ONCE(block >= MAX_SMT_THREADS, "VCPU ID too large to pack"))
		return 0;
	packed_id = (id % KVM_MAX_VCPUS) + block_offsets[block];
	if (WARN_ONCE(packed_id >= KVM_MAX_VCPUS, "VCPU ID packing failed"))
		return 0;
	return packed_id;
}

#endif /* __ASM_KVM_BOOK3S_H__ */