summaryrefslogtreecommitdiff
path: root/arch/powerpc/kvm/book3s_hv.c
diff options
context:
space:
mode:
authorPaul Mackerras <paulus@ozlabs.org>2017-10-26 16:39:19 +1100
committerPaul Mackerras <paulus@ozlabs.org>2017-11-01 15:36:21 +1100
commite641a317830b6bd26e6dc2ef5fe2c1c181dd5cc2 (patch)
tree14e3877e7e848d10fde4c20b0d6161a3ba75ea2c /arch/powerpc/kvm/book3s_hv.c
parent1b151ce466175746b1b1a87d42ba5f5a050a5aba (diff)
KVM: PPC: Book3S HV: Unify dirty page map between HPT and radix
Currently, the HPT code in HV KVM maintains a dirty bit per guest page in the rmap array, whether or not dirty page tracking has been enabled for the memory slot. In contrast, the radix code maintains a dirty bit per guest page in memslot->dirty_bitmap, and only does so when dirty page tracking has been enabled. This changes the HPT code to maintain the dirty bits in the memslot dirty_bitmap like radix does. This results in slightly less code overall, and will mean that we do not lose the dirty bits when transitioning between HPT and radix mode in future. There is one minor change to behaviour as a result. With HPT, when dirty tracking was enabled for a memslot, we would previously clear all the dirty bits at that point (both in the HPT entries and in the rmap arrays), meaning that a KVM_GET_DIRTY_LOG ioctl immediately following would show no pages as dirty (assuming no vcpus have run in the meantime). With this change, the dirty bits on HPT entries are not cleared at the point where dirty tracking is enabled, so KVM_GET_DIRTY_LOG would show as dirty any guest pages that are resident in the HPT and dirty. This is consistent with what happens on radix. This also fixes a bug in the mark_pages_dirty() function for radix (in the sense that the function no longer exists). In the case where a large page of 64 normal pages or more is marked dirty, the addressing of the dirty bitmap was incorrect and could write past the end of the bitmap. Fortunately this case was never hit in practice because a 2MB large page is only 32 x 64kB pages, and we don't support backing the guest with 1GB huge pages at this point. Signed-off-by: Paul Mackerras <paulus@ozlabs.org>
Diffstat (limited to 'arch/powerpc/kvm/book3s_hv.c')
-rw-r--r--arch/powerpc/kvm/book3s_hv.c30
1 files changed, 13 insertions, 17 deletions
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index f6522075edbe..96b92d4adb87 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -3376,7 +3376,7 @@ static int kvm_vm_ioctl_get_dirty_log_hv(struct kvm *kvm,
struct kvm_memory_slot *memslot;
int i, r;
unsigned long n;
- unsigned long *buf;
+ unsigned long *buf, *p;
struct kvm_vcpu *vcpu;
mutex_lock(&kvm->slots_lock);
@@ -3392,8 +3392,8 @@ static int kvm_vm_ioctl_get_dirty_log_hv(struct kvm *kvm,
goto out;
/*
- * Use second half of bitmap area because radix accumulates
- * bits in the first half.
+ * Use second half of bitmap area because both HPT and radix
+ * accumulate bits in the first half.
*/
n = kvm_dirty_bitmap_bytes(memslot);
buf = memslot->dirty_bitmap + n / sizeof(long);
@@ -3406,6 +3406,16 @@ static int kvm_vm_ioctl_get_dirty_log_hv(struct kvm *kvm,
if (r)
goto out;
+ /*
+ * We accumulate dirty bits in the first half of the
+ * memslot's dirty_bitmap area, for when pages are paged
+ * out or modified by the host directly. Pick up these
+ * bits and add them to the map.
+ */
+ p = memslot->dirty_bitmap;
+ for (i = 0; i < n / sizeof(long); ++i)
+ buf[i] |= xchg(&p[i], 0);
+
/* Harvest dirty bits from VPA and DTL updates */
/* Note: we never modify the SLB shadow buffer areas */
kvm_for_each_vcpu(i, vcpu, kvm) {
@@ -3466,8 +3476,6 @@ static void kvmppc_core_commit_memory_region_hv(struct kvm *kvm,
const struct kvm_memory_slot *new)
{
unsigned long npages = mem->memory_size >> PAGE_SHIFT;
- struct kvm_memslots *slots;
- struct kvm_memory_slot *memslot;
/*
* If we are making a new memslot, it might make
@@ -3477,18 +3485,6 @@ static void kvmppc_core_commit_memory_region_hv(struct kvm *kvm,
*/
if (npages)
atomic64_inc(&kvm->arch.mmio_update);
-
- if (npages && old->npages && !kvm_is_radix(kvm)) {
- /*
- * If modifying a memslot, reset all the rmap dirty bits.
- * If this is a new memslot, we don't need to do anything
- * since the rmap array starts out as all zeroes,
- * i.e. no pages are dirty.
- */
- slots = kvm_memslots(kvm);
- memslot = id_to_memslot(slots, mem->slot);
- kvmppc_hv_get_dirty_log_hpt(kvm, memslot, NULL);
- }
}
/*