summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--arch/powerpc/include/asm/kvm_book3s_uvmem.h6
-rw-r--r--arch/powerpc/include/asm/ultravisor-api.h1
-rw-r--r--arch/powerpc/include/asm/ultravisor.h5
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu_radix.c22
-rw-r--r--arch/powerpc/kvm/book3s_hv_uvmem.c32
5 files changed, 66 insertions, 0 deletions
diff --git a/arch/powerpc/include/asm/kvm_book3s_uvmem.h b/arch/powerpc/include/asm/kvm_book3s_uvmem.h
index 95f389c2937b..3033a9585b43 100644
--- a/arch/powerpc/include/asm/kvm_book3s_uvmem.h
+++ b/arch/powerpc/include/asm/kvm_book3s_uvmem.h
@@ -18,6 +18,7 @@ unsigned long kvmppc_h_svm_page_out(struct kvm *kvm,
unsigned long page_shift);
unsigned long kvmppc_h_svm_init_start(struct kvm *kvm);
unsigned long kvmppc_h_svm_init_done(struct kvm *kvm);
+int kvmppc_send_page_to_uv(struct kvm *kvm, unsigned long gfn);
#else
static inline int kvmppc_uvmem_init(void)
{
@@ -58,5 +59,10 @@ static inline unsigned long kvmppc_h_svm_init_done(struct kvm *kvm)
{
return H_UNSUPPORTED;
}
+
+static inline int kvmppc_send_page_to_uv(struct kvm *kvm, unsigned long gfn)
+{
+ return -EFAULT;
+}
#endif /* CONFIG_PPC_UV */
#endif /* __ASM_KVM_BOOK3S_UVMEM_H__ */
diff --git a/arch/powerpc/include/asm/ultravisor-api.h b/arch/powerpc/include/asm/ultravisor-api.h
index 2483f15bd71a..e774274ab30e 100644
--- a/arch/powerpc/include/asm/ultravisor-api.h
+++ b/arch/powerpc/include/asm/ultravisor-api.h
@@ -32,5 +32,6 @@
#define UV_SHARE_PAGE 0xF130
#define UV_UNSHARE_PAGE 0xF134
#define UV_UNSHARE_ALL_PAGES 0xF140
+#define UV_PAGE_INVAL 0xF138
#endif /* _ASM_POWERPC_ULTRAVISOR_API_H */
diff --git a/arch/powerpc/include/asm/ultravisor.h b/arch/powerpc/include/asm/ultravisor.h
index 79bb005e8ee9..40cc8bace654 100644
--- a/arch/powerpc/include/asm/ultravisor.h
+++ b/arch/powerpc/include/asm/ultravisor.h
@@ -67,4 +67,9 @@ static inline int uv_register_mem_slot(u64 lpid, u64 start_gpa, u64 size,
size, flags, slotid);
}
+static inline int uv_page_inval(u64 lpid, u64 gpa, u64 page_shift)
+{
+ return ucall_norets(UV_PAGE_INVAL, lpid, gpa, page_shift);
+}
+
#endif /* _ASM_POWERPC_ULTRAVISOR_H */
diff --git a/arch/powerpc/kvm/book3s_64_mmu_radix.c b/arch/powerpc/kvm/book3s_64_mmu_radix.c
index 2d415c36a61d..9f6ba113ffe3 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_radix.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_radix.c
@@ -19,6 +19,8 @@
#include <asm/pgtable.h>
#include <asm/pgalloc.h>
#include <asm/pte-walk.h>
+#include <asm/ultravisor.h>
+#include <asm/kvm_book3s_uvmem.h>
/*
* Supported radix tree geometry.
@@ -915,6 +917,9 @@ int kvmppc_book3s_radix_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
if (!(dsisr & DSISR_PRTABLE_FAULT))
gpa |= ea & 0xfff;
+ if (kvm->arch.secure_guest & KVMPPC_SECURE_INIT_DONE)
+ return kvmppc_send_page_to_uv(kvm, gfn);
+
/* Get the corresponding memslot */
memslot = gfn_to_memslot(kvm, gfn);
@@ -972,6 +977,11 @@ int kvm_unmap_radix(struct kvm *kvm, struct kvm_memory_slot *memslot,
unsigned long gpa = gfn << PAGE_SHIFT;
unsigned int shift;
+ if (kvm->arch.secure_guest & KVMPPC_SECURE_INIT_DONE) {
+ uv_page_inval(kvm->arch.lpid, gpa, PAGE_SHIFT);
+ return 0;
+ }
+
ptep = __find_linux_pte(kvm->arch.pgtable, gpa, NULL, &shift);
if (ptep && pte_present(*ptep))
kvmppc_unmap_pte(kvm, ptep, gpa, shift, memslot,
@@ -989,6 +999,9 @@ int kvm_age_radix(struct kvm *kvm, struct kvm_memory_slot *memslot,
int ref = 0;
unsigned long old, *rmapp;
+ if (kvm->arch.secure_guest & KVMPPC_SECURE_INIT_DONE)
+ return ref;
+
ptep = __find_linux_pte(kvm->arch.pgtable, gpa, NULL, &shift);
if (ptep && pte_present(*ptep) && pte_young(*ptep)) {
old = kvmppc_radix_update_pte(kvm, ptep, _PAGE_ACCESSED, 0,
@@ -1013,6 +1026,9 @@ int kvm_test_age_radix(struct kvm *kvm, struct kvm_memory_slot *memslot,
unsigned int shift;
int ref = 0;
+ if (kvm->arch.secure_guest & KVMPPC_SECURE_INIT_DONE)
+ return ref;
+
ptep = __find_linux_pte(kvm->arch.pgtable, gpa, NULL, &shift);
if (ptep && pte_present(*ptep) && pte_young(*ptep))
ref = 1;
@@ -1030,6 +1046,9 @@ static int kvm_radix_test_clear_dirty(struct kvm *kvm,
int ret = 0;
unsigned long old, *rmapp;
+ if (kvm->arch.secure_guest & KVMPPC_SECURE_INIT_DONE)
+ return ret;
+
ptep = __find_linux_pte(kvm->arch.pgtable, gpa, NULL, &shift);
if (ptep && pte_present(*ptep) && pte_dirty(*ptep)) {
ret = 1;
@@ -1082,6 +1101,9 @@ void kvmppc_radix_flush_memslot(struct kvm *kvm,
unsigned long gpa;
unsigned int shift;
+ if (kvm->arch.secure_guest & KVMPPC_SECURE_INIT_DONE)
+ return;
+
gpa = memslot->base_gfn << PAGE_SHIFT;
spin_lock(&kvm->mmu_lock);
for (n = memslot->npages; n; --n) {
diff --git a/arch/powerpc/kvm/book3s_hv_uvmem.c b/arch/powerpc/kvm/book3s_hv_uvmem.c
index 4d0f544ad5f5..ed51498b20ee 100644
--- a/arch/powerpc/kvm/book3s_hv_uvmem.c
+++ b/arch/powerpc/kvm/book3s_hv_uvmem.c
@@ -69,6 +69,17 @@
* Shared pages: Whenever guest shares a secure page, UV will split and
* remap the 2MB page if required and issue H_SVM_PAGE_IN with 64K page size.
*
+ * HV invalidating a page: When a regular page belonging to secure
+ * guest gets unmapped, HV informs UV with UV_PAGE_INVAL of 64K
+ * page size. Using 64K page size is correct here because any non-secure
+ * page will essentially be of 64K page size. Splitting by UV during sharing
+ * and page-out ensures this.
+ *
+ * Page fault handling: When HV handles page fault of a page belonging
+ * to secure guest, it sends that to UV with a 64K UV_PAGE_IN request.
+ * Using 64K size is correct here too as UV would have split the 2MB page
+ * into 64k mappings and would have done page-outs earlier.
+ *
* In summary, the current secure pages handling code in HV assumes
* 64K page size and in fact fails any page-in/page-out requests of
* non-64K size upfront. If and when UV starts supporting multiple
@@ -630,6 +641,27 @@ out:
return ret;
}
+int kvmppc_send_page_to_uv(struct kvm *kvm, unsigned long gfn)
+{
+ unsigned long pfn;
+ int ret = U_SUCCESS;
+
+ pfn = gfn_to_pfn(kvm, gfn);
+ if (is_error_noslot_pfn(pfn))
+ return -EFAULT;
+
+ mutex_lock(&kvm->arch.uvmem_lock);
+ if (kvmppc_gfn_is_uvmem_pfn(gfn, kvm, NULL))
+ goto out;
+
+ ret = uv_page_in(kvm->arch.lpid, pfn << PAGE_SHIFT, gfn << PAGE_SHIFT,
+ 0, PAGE_SHIFT);
+out:
+ kvm_release_pfn_clean(pfn);
+ mutex_unlock(&kvm->arch.uvmem_lock);
+ return (ret == U_SUCCESS) ? RESUME_GUEST : -EFAULT;
+}
+
static u64 kvmppc_get_secmem_size(void)
{
struct device_node *np;