summaryrefslogtreecommitdiff
path: root/arch/powerpc/kvm/book3s_64_mmu_hv.c
diff options
context:
space:
mode:
authorPaul Mackerras <paulus@ozlabs.org>2017-10-26 16:39:19 +1100
committerPaul Mackerras <paulus@ozlabs.org>2017-11-01 15:36:21 +1100
commite641a317830b6bd26e6dc2ef5fe2c1c181dd5cc2 (patch)
tree14e3877e7e848d10fde4c20b0d6161a3ba75ea2c /arch/powerpc/kvm/book3s_64_mmu_hv.c
parent1b151ce466175746b1b1a87d42ba5f5a050a5aba (diff)
KVM: PPC: Book3S HV: Unify dirty page map between HPT and radix
Currently, the HPT code in HV KVM maintains a dirty bit per guest page in the rmap array, whether or not dirty page tracking has been enabled for the memory slot. In contrast, the radix code maintains a dirty bit per guest page in memslot->dirty_bitmap, and only does so when dirty page tracking has been enabled. This changes the HPT code to maintain the dirty bits in the memslot dirty_bitmap like radix does. This results in slightly less code overall, and will mean that we do not lose the dirty bits when transitioning between HPT and radix mode in future. There is one minor change to behaviour as a result. With HPT, when dirty tracking was enabled for a memslot, we would previously clear all the dirty bits at that point (both in the HPT entries and in the rmap arrays), meaning that a KVM_GET_DIRTY_LOG ioctl immediately following would show no pages as dirty (assuming no vcpus have run in the meantime). With this change, the dirty bits on HPT entries are not cleared at the point where dirty tracking is enabled, so KVM_GET_DIRTY_LOG would show as dirty any guest pages that are resident in the HPT and dirty. This is consistent with what happens on radix. This also fixes a bug in the mark_pages_dirty() function for radix (in the sense that the function no longer exists). In the case where a large page of 64 normal pages or more is marked dirty, the addressing of the dirty bitmap was incorrect and could write past the end of the bitmap. Fortunately this case was never hit in practice because a 2MB large page is only 32 x 64kB pages, and we don't support backing the guest with 1GB huge pages at this point. Signed-off-by: Paul Mackerras <paulus@ozlabs.org>
Diffstat (limited to 'arch/powerpc/kvm/book3s_64_mmu_hv.c')
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu_hv.c39
1 files changed, 11 insertions, 28 deletions
diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c
index 84728038e943..944f7a532879 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_hv.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c
@@ -776,6 +776,7 @@ static int kvm_handle_hva(struct kvm *kvm, unsigned long hva,
/* Must be called with both HPTE and rmap locked */
static void kvmppc_unmap_hpte(struct kvm *kvm, unsigned long i,
+ struct kvm_memory_slot *memslot,
unsigned long *rmapp, unsigned long gfn)
{
__be64 *hptep = (__be64 *) (kvm->arch.hpt.virt + (i << 4));
@@ -807,8 +808,8 @@ static void kvmppc_unmap_hpte(struct kvm *kvm, unsigned long i,
/* Harvest R and C */
rcbits = be64_to_cpu(hptep[1]) & (HPTE_R_R | HPTE_R_C);
*rmapp |= rcbits << KVMPPC_RMAP_RC_SHIFT;
- if (rcbits & HPTE_R_C)
- kvmppc_update_rmap_change(rmapp, psize);
+ if ((rcbits & HPTE_R_C) && memslot->dirty_bitmap)
+ kvmppc_update_dirty_map(memslot, gfn, psize);
if (rcbits & ~rev[i].guest_rpte) {
rev[i].guest_rpte = ptel | rcbits;
note_hpte_modification(kvm, &rev[i]);
@@ -846,7 +847,7 @@ static int kvm_unmap_rmapp(struct kvm *kvm, struct kvm_memory_slot *memslot,
continue;
}
- kvmppc_unmap_hpte(kvm, i, rmapp, gfn);
+ kvmppc_unmap_hpte(kvm, i, memslot, rmapp, gfn);
unlock_rmap(rmapp);
__unlock_hpte(hptep, be64_to_cpu(hptep[0]));
}
@@ -1029,14 +1030,6 @@ static int kvm_test_clear_dirty_npages(struct kvm *kvm, unsigned long *rmapp)
retry:
lock_rmap(rmapp);
- if (*rmapp & KVMPPC_RMAP_CHANGED) {
- long change_order = (*rmapp & KVMPPC_RMAP_CHG_ORDER)
- >> KVMPPC_RMAP_CHG_SHIFT;
- *rmapp &= ~(KVMPPC_RMAP_CHANGED | KVMPPC_RMAP_CHG_ORDER);
- npages_dirty = 1;
- if (change_order > PAGE_SHIFT)
- npages_dirty = 1ul << (change_order - PAGE_SHIFT);
- }
if (!(*rmapp & KVMPPC_RMAP_PRESENT)) {
unlock_rmap(rmapp);
return npages_dirty;
@@ -1128,7 +1121,7 @@ void kvmppc_harvest_vpa_dirty(struct kvmppc_vpa *vpa,
long kvmppc_hv_get_dirty_log_hpt(struct kvm *kvm,
struct kvm_memory_slot *memslot, unsigned long *map)
{
- unsigned long i, j;
+ unsigned long i;
unsigned long *rmapp;
preempt_disable();
@@ -1140,9 +1133,8 @@ long kvmppc_hv_get_dirty_log_hpt(struct kvm *kvm,
* since we always put huge-page HPTEs in the rmap chain
* corresponding to their page base address.
*/
- if (npages && map)
- for (j = i; npages; ++j, --npages)
- __set_bit_le(j, map);
+ if (npages)
+ set_dirty_bits(map, i, npages);
++rmapp;
}
preempt_enable();
@@ -1186,7 +1178,6 @@ void kvmppc_unpin_guest_page(struct kvm *kvm, void *va, unsigned long gpa,
struct page *page = virt_to_page(va);
struct kvm_memory_slot *memslot;
unsigned long gfn;
- unsigned long *rmap;
int srcu_idx;
put_page(page);
@@ -1194,20 +1185,12 @@ void kvmppc_unpin_guest_page(struct kvm *kvm, void *va, unsigned long gpa,
if (!dirty)
return;
- /* We need to mark this page dirty in the rmap chain */
+ /* We need to mark this page dirty in the memslot dirty_bitmap, if any */
gfn = gpa >> PAGE_SHIFT;
srcu_idx = srcu_read_lock(&kvm->srcu);
memslot = gfn_to_memslot(kvm, gfn);
- if (memslot) {
- if (!kvm_is_radix(kvm)) {
- rmap = &memslot->arch.rmap[gfn - memslot->base_gfn];
- lock_rmap(rmap);
- *rmap |= KVMPPC_RMAP_CHANGED;
- unlock_rmap(rmap);
- } else if (memslot->dirty_bitmap) {
- mark_page_dirty(kvm, gfn);
- }
- }
+ if (memslot && memslot->dirty_bitmap)
+ set_bit_le(gfn - memslot->base_gfn, memslot->dirty_bitmap);
srcu_read_unlock(&kvm->srcu, srcu_idx);
}
@@ -1282,7 +1265,7 @@ static unsigned long resize_hpt_rehash_hpte(struct kvm_resize_hpt *resize,
rmapp = &memslot->arch.rmap[gfn - memslot->base_gfn];
lock_rmap(rmapp);
- kvmppc_unmap_hpte(kvm, idx, rmapp, gfn);
+ kvmppc_unmap_hpte(kvm, idx, memslot, rmapp, gfn);
unlock_rmap(rmapp);
}