diff options
-rw-r--r-- | virt/kvm/dirty_ring.c | 44 |
1 files changed, 35 insertions, 9 deletions
diff --git a/virt/kvm/dirty_ring.c b/virt/kvm/dirty_ring.c index 97cca0c02fd1..939198ac6695 100644 --- a/virt/kvm/dirty_ring.c +++ b/virt/kvm/dirty_ring.c @@ -55,9 +55,6 @@ static void kvm_reset_dirty_gfn(struct kvm *kvm, u32 slot, u64 offset, u64 mask) struct kvm_memory_slot *memslot; int as_id, id; - if (!mask) - return; - as_id = slot >> 16; id = (u16)slot; @@ -108,15 +105,24 @@ static inline bool kvm_dirty_gfn_harvested(struct kvm_dirty_gfn *gfn) int kvm_dirty_ring_reset(struct kvm *kvm, struct kvm_dirty_ring *ring, int *nr_entries_reset) { + /* + * To minimize mmu_lock contention, batch resets for harvested entries + * whose gfns are in the same slot, and are within N frame numbers of + * each other, where N is the number of bits in an unsigned long. For + * simplicity, process the current set of entries when the next entry + * can't be included in the batch. + * + * Track the current batch slot, the gfn offset into the slot for the + * batch, and the bitmask of gfns that need to be reset (relative to + * offset). Note, the offset may be adjusted backwards, e.g. so that + * a sequence of gfns X, X-1, ... X-N-1 can be batched. + */ u32 cur_slot, next_slot; u64 cur_offset, next_offset; - unsigned long mask; + unsigned long mask = 0; struct kvm_dirty_gfn *entry; bool first_round = true; - /* This is only needed to make compilers happy */ - cur_slot = cur_offset = mask = 0; - while (likely((*nr_entries_reset) < INT_MAX)) { if (signal_pending(current)) return -EINTR; @@ -164,14 +170,34 @@ int kvm_dirty_ring_reset(struct kvm *kvm, struct kvm_dirty_ring *ring, continue; } } - kvm_reset_dirty_gfn(kvm, cur_slot, cur_offset, mask); + + /* + * Reset the slot for all the harvested entries that have been + * gathered, but not yet fully processed. + */ + if (mask) + kvm_reset_dirty_gfn(kvm, cur_slot, cur_offset, mask); + + /* + * The current slot was reset or this is the first harvested + * entry, (re)initialize the metadata. + */ cur_slot = next_slot; cur_offset = next_offset; mask = 1; first_round = false; } - kvm_reset_dirty_gfn(kvm, cur_slot, cur_offset, mask); + /* + * Perform a final reset if there are harvested entries that haven't + * been processed, which is guaranteed if at least one harvested was + * found. The loop only performs a reset when the "next" entry can't + * be batched with the "current" entry(s), and that reset processes the + * _current_ entry(s); i.e. the last harvested entry, a.k.a. next, will + * always be left pending. + */ + if (mask) + kvm_reset_dirty_gfn(kvm, cur_slot, cur_offset, mask); /* * The request KVM_REQ_DIRTY_RING_SOFT_FULL will be cleared |