summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSean Christopherson <sean.j.christopherson@intel.com>2019-08-01 13:35:21 -0700
committerPaolo Bonzini <pbonzini@redhat.com>2019-08-22 10:09:23 +0200
commit871bd0346018df53055141f09754cb5ffb334c7b (patch)
treeec98529c1d0e1dac77f6ba0506537d5416ed55e8
parentc8e16b78c6142afea428dc316c900644951cf7f3 (diff)
KVM: x86: Rename access permissions cache member in struct kvm_vcpu_arch
Rename "access" to "mmio_access" to match the other MMIO cache members and to make it more obvious that it's tracking the access permissions for the MMIO cache. Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
-rw-r--r--Documentation/virt/kvm/mmu.txt4
-rw-r--r--arch/x86/include/asm/kvm_host.h2
-rw-r--r--arch/x86/kvm/x86.c2
-rw-r--r--arch/x86/kvm/x86.h2
4 files changed, 5 insertions, 5 deletions
diff --git a/Documentation/virt/kvm/mmu.txt b/Documentation/virt/kvm/mmu.txt
index 1b9880dfba0a..dadb29e8738f 100644
--- a/Documentation/virt/kvm/mmu.txt
+++ b/Documentation/virt/kvm/mmu.txt
@@ -294,7 +294,7 @@ Handling a page fault is performed as follows:
- walk shadow page table
- check for valid generation number in the spte (see "Fast invalidation of
MMIO sptes" below)
- - cache the information to vcpu->arch.mmio_gva, vcpu->arch.access and
+ - cache the information to vcpu->arch.mmio_gva, vcpu->arch.mmio_access and
vcpu->arch.mmio_gfn, and call the emulator
- If both P bit and R/W bit of error code are set, this could possibly
be handled as a "fast page fault" (fixed without taking the MMU lock). See
@@ -304,7 +304,7 @@ Handling a page fault is performed as follows:
- if permissions are insufficient, reflect the fault back to the guest
- determine the host page
- if this is an mmio request, there is no host page; cache the info to
- vcpu->arch.mmio_gva, vcpu->arch.access and vcpu->arch.mmio_gfn
+ vcpu->arch.mmio_gva, vcpu->arch.mmio_access and vcpu->arch.mmio_gfn
- walk the shadow page table to find the spte for the translation,
instantiating missing intermediate page tables as necessary
- If this is an mmio request, cache the mmio info to the spte and set some
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 707ae7ff8e1e..44a5ce57a905 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -717,7 +717,7 @@ struct kvm_vcpu_arch {
/* Cache MMIO info */
u64 mmio_gva;
- unsigned access;
+ unsigned mmio_access;
gfn_t mmio_gfn;
u64 mmio_gen;
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 9f46b0456c4b..c4aea2adeaea 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -5377,7 +5377,7 @@ static int vcpu_mmio_gva_to_gpa(struct kvm_vcpu *vcpu, unsigned long gva,
*/
if (vcpu_match_mmio_gva(vcpu, gva)
&& !permission_fault(vcpu, vcpu->arch.walk_mmu,
- vcpu->arch.access, 0, access)) {
+ vcpu->arch.mmio_access, 0, access)) {
*gpa = vcpu->arch.mmio_gfn << PAGE_SHIFT |
(gva & (PAGE_SIZE - 1));
trace_vcpu_match_mmio(gva, *gpa, write, false);
diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h
index 6594020c0691..b5274e2a53cf 100644
--- a/arch/x86/kvm/x86.h
+++ b/arch/x86/kvm/x86.h
@@ -196,7 +196,7 @@ static inline void vcpu_cache_mmio_info(struct kvm_vcpu *vcpu,
* actually a nGPA.
*/
vcpu->arch.mmio_gva = mmu_is_nested(vcpu) ? 0 : gva & PAGE_MASK;
- vcpu->arch.access = access;
+ vcpu->arch.mmio_access = access;
vcpu->arch.mmio_gfn = gfn;
vcpu->arch.mmio_gen = gen;
}