summaryrefslogtreecommitdiff
path: root/arch/x86/kvm/mmu/mmu.c
diff options
context:
space:
mode:
authorPaolo Bonzini <pbonzini@redhat.com>2022-03-25 12:42:52 -0400
committerPaolo Bonzini <pbonzini@redhat.com>2022-04-02 05:34:38 -0400
commita1a39128faabc9883a7f9e3f8777b3fbd560fa5f (patch)
tree78a135733742d3f12bd3b2927b25d52683adcda0 /arch/x86/kvm/mmu/mmu.c
parentb1e34d325397a33d97d845e312d7cf2a8b646b44 (diff)
KVM: MMU: propagate alloc_workqueue failure
If kvm->arch.tdp_mmu_zap_wq cannot be created, the failure has to be propagated up to kvm_mmu_init_vm and kvm_arch_init_vm. kvm_arch_init_vm also has to undo all the initialization, so group all the MMU initialization code at the beginning and handle cleaning up of kvm_page_track_init. Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'arch/x86/kvm/mmu/mmu.c')
-rw-r--r--arch/x86/kvm/mmu/mmu.c11
1 files changed, 9 insertions, 2 deletions
diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index 51671cb34fb6..857ba93b5c92 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -5768,17 +5768,24 @@ static void kvm_mmu_invalidate_zap_pages_in_memslot(struct kvm *kvm,
kvm_mmu_zap_all_fast(kvm);
}
-void kvm_mmu_init_vm(struct kvm *kvm)
+int kvm_mmu_init_vm(struct kvm *kvm)
{
struct kvm_page_track_notifier_node *node = &kvm->arch.mmu_sp_tracker;
+ int r;
+ INIT_LIST_HEAD(&kvm->arch.active_mmu_pages);
+ INIT_LIST_HEAD(&kvm->arch.zapped_obsolete_pages);
+ INIT_LIST_HEAD(&kvm->arch.lpage_disallowed_mmu_pages);
spin_lock_init(&kvm->arch.mmu_unsync_pages_lock);
- kvm_mmu_init_tdp_mmu(kvm);
+ r = kvm_mmu_init_tdp_mmu(kvm);
+ if (r < 0)
+ return r;
node->track_write = kvm_mmu_pte_write;
node->track_flush_slot = kvm_mmu_invalidate_zap_pages_in_memslot;
kvm_page_track_register_notifier(kvm, node);
+ return 0;
}
void kvm_mmu_uninit_vm(struct kvm *kvm)