summaryrefslogtreecommitdiff
path: root/arch/x86/kvm/svm
diff options
context:
space:
mode:
authorCfir Cohen <cfir@google.com>2020-08-07 17:37:46 -0700
committerPaolo Bonzini <pbonzini@redhat.com>2020-09-28 07:57:24 -0400
commit50085beee878e7c4e8ddbd3d634c0170589802e0 (patch)
tree5328b8b8a4571b567ab5d8f3228ceb39e0c273b1 /arch/x86/kvm/svm
parentfc595f35994233a566904bf811ffb9c3c80a84a9 (diff)
KVM: SVM: Mark SEV launch secret pages as dirty.
The LAUNCH_SECRET command performs encryption of the launch secret memory contents. Mark pinned pages as dirty, before unpinning them. This matches the logic in sev_launch_update_data(). Signed-off-by: Cfir Cohen <cfir@google.com> Message-Id: <20200808003746.66687-1-cfir@google.com> Reviewed-by: Brijesh Singh <brijesh.singh@amd.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'arch/x86/kvm/svm')
-rw-r--r--arch/x86/kvm/svm/sev.c15
1 files changed, 14 insertions, 1 deletions
diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c
index 7bf7bf734979..bb0e89c79a04 100644
--- a/arch/x86/kvm/svm/sev.c
+++ b/arch/x86/kvm/svm/sev.c
@@ -856,7 +856,7 @@ static int sev_launch_secret(struct kvm *kvm, struct kvm_sev_cmd *argp)
struct kvm_sev_launch_secret params;
struct page **pages;
void *blob, *hdr;
- unsigned long n;
+ unsigned long n, i;
int ret, offset;
if (!sev_guest(kvm))
@@ -870,6 +870,14 @@ static int sev_launch_secret(struct kvm *kvm, struct kvm_sev_cmd *argp)
return PTR_ERR(pages);
/*
+ * The LAUNCH_SECRET command will perform in-place encryption of the
+ * memory content (i.e it will write the same memory region with C=1).
+ * It's possible that the cache may contain the data with C=0, i.e.,
+ * unencrypted so invalidate it first.
+ */
+ sev_clflush_pages(pages, n);
+
+ /*
* The secret must be copied into contiguous memory region, lets verify
* that userspace memory pages are contiguous before we issue command.
*/
@@ -914,6 +922,11 @@ e_free_blob:
e_free:
kfree(data);
e_unpin_memory:
+ /* content of memory is updated, mark pages dirty */
+ for (i = 0; i < n; i++) {
+ set_page_dirty_lock(pages[i]);
+ mark_page_accessed(pages[i]);
+ }
sev_unpin_memory(kvm, pages, n);
return ret;
}