summaryrefslogtreecommitdiff
path: root/arch/x86/kvm/vmx/vmx.c
diff options
context:
space:
mode:
authorSean Christopherson <sean.j.christopherson@intel.com>2020-09-23 11:04:06 -0700
committerPaolo Bonzini <pbonzini@redhat.com>2020-09-28 07:58:00 -0400
commitd85a8034c016a1f50e879124bc5839c986d87a0a (patch)
tree954ace7609ea5aa970f14a052789bdde7514c4ef /arch/x86/kvm/vmx/vmx.c
parentbd65ba82b324e1765121f3602f9b0a89b7aa1c08 (diff)
KVM: VMX: Rename "find_msr_entry" to "vmx_find_uret_msr"
Rename "find_msr_entry" to scope it to VMX and to associate it with guest_uret_msrs. Drop the "entry" so that the function name pairs with the existing __vmx_find_uret_msr(), which intentionally uses a double underscore prefix instead of appending "index" or "slot" as those names are already claimed by other pieces of the user return MSR stack. No functional change intended. Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com> Message-Id: <20200923180409.32255-13-sean.j.christopherson@intel.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'arch/x86/kvm/vmx/vmx.c')
-rw-r--r--arch/x86/kvm/vmx/vmx.c10
1 files changed, 5 insertions, 5 deletions
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index 3300e373fadf..4cf1ccdb99ce 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -621,7 +621,7 @@ static inline int __vmx_find_uret_msr(struct vcpu_vmx *vmx, u32 msr)
return -1;
}
-struct vmx_uret_msr *find_msr_entry(struct vcpu_vmx *vmx, u32 msr)
+struct vmx_uret_msr *vmx_find_uret_msr(struct vcpu_vmx *vmx, u32 msr)
{
int i;
@@ -1856,7 +1856,7 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
goto find_uret_msr;
default:
find_uret_msr:
- msr = find_msr_entry(vmx, msr_info->index);
+ msr = vmx_find_uret_msr(vmx, msr_info->index);
if (msr) {
msr_info->data = msr->data;
break;
@@ -2130,7 +2130,7 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
default:
find_uret_msr:
- msr = find_msr_entry(vmx, msr_index);
+ msr = vmx_find_uret_msr(vmx, msr_index);
if (msr)
ret = vmx_set_guest_msr(vmx, msr, data);
else
@@ -2763,7 +2763,7 @@ static void enter_rmode(struct kvm_vcpu *vcpu)
void vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
- struct vmx_uret_msr *msr = find_msr_entry(vmx, MSR_EFER);
+ struct vmx_uret_msr *msr = vmx_find_uret_msr(vmx, MSR_EFER);
if (!msr)
return;
@@ -7108,7 +7108,7 @@ static void vmx_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
if (boot_cpu_has(X86_FEATURE_RTM)) {
struct vmx_uret_msr *msr;
- msr = find_msr_entry(vmx, MSR_IA32_TSX_CTRL);
+ msr = vmx_find_uret_msr(vmx, MSR_IA32_TSX_CTRL);
if (msr) {
bool enabled = guest_cpuid_has(vcpu, X86_FEATURE_RTM);
vmx_set_guest_msr(vmx, msr, enabled ? 0 : TSX_CTRL_RTM_DISABLE);