diff options
Diffstat (limited to 'virt/kvm/kvm_main.c')
| -rw-r--r-- | virt/kvm/kvm_main.c | 44 | 
1 files changed, 36 insertions, 8 deletions
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 70e05af5ebea..6d971fb1b08d 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -164,6 +164,10 @@ __weak void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm,  {  } +__weak void kvm_arch_guest_memory_reclaimed(struct kvm *kvm) +{ +} +  bool kvm_is_zone_device_pfn(kvm_pfn_t pfn)  {  	/* @@ -357,6 +361,12 @@ void kvm_flush_remote_tlbs(struct kvm *kvm)  EXPORT_SYMBOL_GPL(kvm_flush_remote_tlbs);  #endif +static void kvm_flush_shadow_all(struct kvm *kvm) +{ +	kvm_arch_flush_shadow_all(kvm); +	kvm_arch_guest_memory_reclaimed(kvm); +} +  #ifdef KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE  static inline void *mmu_memory_cache_alloc_obj(struct kvm_mmu_memory_cache *mc,  					       gfp_t gfp_flags) @@ -434,8 +444,8 @@ static void kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id)  static void kvm_vcpu_destroy(struct kvm_vcpu *vcpu)  { -	kvm_dirty_ring_free(&vcpu->dirty_ring);  	kvm_arch_vcpu_destroy(vcpu); +	kvm_dirty_ring_free(&vcpu->dirty_ring);  	/*  	 * No need for rcu_read_lock as VCPU_RUN is the only place that changes @@ -485,12 +495,15 @@ typedef bool (*hva_handler_t)(struct kvm *kvm, struct kvm_gfn_range *range);  typedef void (*on_lock_fn_t)(struct kvm *kvm, unsigned long start,  			     unsigned long end); +typedef void (*on_unlock_fn_t)(struct kvm *kvm); +  struct kvm_hva_range {  	unsigned long start;  	unsigned long end;  	pte_t pte;  	hva_handler_t handler;  	on_lock_fn_t on_lock; +	on_unlock_fn_t on_unlock;  	bool flush_on_ret;  	bool may_block;  }; @@ -578,8 +591,11 @@ static __always_inline int __kvm_handle_hva_range(struct kvm *kvm,  	if (range->flush_on_ret && ret)  		kvm_flush_remote_tlbs(kvm); -	if (locked) +	if (locked) {  		KVM_MMU_UNLOCK(kvm); +		if (!IS_KVM_NULL_FN(range->on_unlock)) +			range->on_unlock(kvm); +	}  	srcu_read_unlock(&kvm->srcu, idx); @@ -600,6 +616,7 @@ static __always_inline int kvm_handle_hva_range(struct mmu_notifier *mn,  		.pte		= pte,  		.handler	= handler,  		.on_lock	= (void *)kvm_null_fn, +		.on_unlock	= (void *)kvm_null_fn,  		.flush_on_ret	= true,  		.may_block	= false,  	}; @@ -619,6 +636,7 @@ static __always_inline int kvm_handle_hva_range_no_flush(struct mmu_notifier *mn  		.pte		= __pte(0),  		.handler	= handler,  		.on_lock	= (void *)kvm_null_fn, +		.on_unlock	= (void *)kvm_null_fn,  		.flush_on_ret	= false,  		.may_block	= false,  	}; @@ -662,7 +680,7 @@ void kvm_inc_notifier_count(struct kvm *kvm, unsigned long start,  		kvm->mmu_notifier_range_end = end;  	} else {  		/* -		 * Fully tracking multiple concurrent ranges has dimishing +		 * Fully tracking multiple concurrent ranges has diminishing  		 * returns. Keep things simple and just find the minimal range  		 * which includes the current and new ranges. As there won't be  		 * enough information to subtract a range after its invalidate @@ -687,6 +705,7 @@ static int kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn,  		.pte		= __pte(0),  		.handler	= kvm_unmap_gfn_range,  		.on_lock	= kvm_inc_notifier_count, +		.on_unlock	= kvm_arch_guest_memory_reclaimed,  		.flush_on_ret	= true,  		.may_block	= mmu_notifier_range_blockable(range),  	}; @@ -741,6 +760,7 @@ static void kvm_mmu_notifier_invalidate_range_end(struct mmu_notifier *mn,  		.pte		= __pte(0),  		.handler	= (void *)kvm_null_fn,  		.on_lock	= kvm_dec_notifier_count, +		.on_unlock	= (void *)kvm_null_fn,  		.flush_on_ret	= false,  		.may_block	= mmu_notifier_range_blockable(range),  	}; @@ -813,7 +833,7 @@ static void kvm_mmu_notifier_release(struct mmu_notifier *mn,  	int idx;  	idx = srcu_read_lock(&kvm->srcu); -	kvm_arch_flush_shadow_all(kvm); +	kvm_flush_shadow_all(kvm);  	srcu_read_unlock(&kvm->srcu, idx);  } @@ -932,7 +952,7 @@ static void kvm_destroy_vm_debugfs(struct kvm *kvm)  	int kvm_debugfs_num_entries = kvm_vm_stats_header.num_desc +  				      kvm_vcpu_stats_header.num_desc; -	if (!kvm->debugfs_dentry) +	if (IS_ERR(kvm->debugfs_dentry))  		return;  	debugfs_remove_recursive(kvm->debugfs_dentry); @@ -1075,6 +1095,12 @@ static struct kvm *kvm_create_vm(unsigned long type)  	BUILD_BUG_ON(KVM_MEM_SLOTS_NUM > SHRT_MAX); +	/* +	 * Force subsequent debugfs file creations to fail if the VM directory +	 * is not created (by kvm_create_vm_debugfs()). +	 */ +	kvm->debugfs_dentry = ERR_PTR(-ENOENT); +  	if (init_srcu_struct(&kvm->srcu))  		goto out_err_no_srcu;  	if (init_srcu_struct(&kvm->irq_srcu)) @@ -1219,7 +1245,7 @@ static void kvm_destroy_vm(struct kvm *kvm)  	WARN_ON(rcuwait_active(&kvm->mn_memslots_update_rcuwait));  	kvm->mn_active_invalidate_count = 0;  #else -	kvm_arch_flush_shadow_all(kvm); +	kvm_flush_shadow_all(kvm);  #endif  	kvm_arch_destroy_vm(kvm);  	kvm_destroy_devices(kvm); @@ -1646,6 +1672,7 @@ static void kvm_invalidate_memslot(struct kvm *kvm,  	 *	- kvm_is_visible_gfn (mmu_check_root)  	 */  	kvm_arch_flush_shadow_memslot(kvm, old); +	kvm_arch_guest_memory_reclaimed(kvm);  	/* Was released by kvm_swap_active_memslots, reacquire. */  	mutex_lock(&kvm->slots_arch_lock); @@ -1793,7 +1820,7 @@ static int kvm_set_memslot(struct kvm *kvm,  	/*  	 * No need to refresh new->arch, changes after dropping slots_arch_lock -	 * will directly hit the final, active memsot.  Architectures are +	 * will directly hit the final, active memslot.  Architectures are  	 * responsible for knowing that new->arch may be stale.  	 */  	kvm_commit_memory_region(kvm, old, new, change); @@ -4327,6 +4354,7 @@ static long kvm_vm_ioctl_check_extension_generic(struct kvm *kvm, long arg)  		return 0;  #endif  	case KVM_CAP_BINARY_STATS_FD: +	case KVM_CAP_SYSTEM_EVENT_DATA:  		return 1;  	default:  		break; @@ -5479,7 +5507,7 @@ static void kvm_uevent_notify_change(unsigned int type, struct kvm *kvm)  	}  	add_uevent_var(env, "PID=%d", kvm->userspace_pid); -	if (kvm->debugfs_dentry) { +	if (!IS_ERR(kvm->debugfs_dentry)) {  		char *tmp, *p = kmalloc(PATH_MAX, GFP_KERNEL_ACCOUNT);  		if (p) {  | 
