summaryrefslogtreecommitdiff
path: root/arch/x86/kvm/x86.h
diff options
context:
space:
mode:
authorBinbin Wu <binbin.wu@linux.intel.com>2023-03-22 12:58:22 +0800
committerSean Christopherson <seanjc@google.com>2023-03-22 10:18:11 -0700
commitbede6eb4db19424477c36dace426ae12255f4a0d (patch)
treeac2e27614a571387bcbfb7a68e0bd636119fd7d3 /arch/x86/kvm/x86.h
parent607475cfa0f753862c0030de836875fe4ac5cf8a (diff)
KVM: x86: Use boolean return value for is_{pae,pse,paging}()
Convert is_{pae,pse,paging}() to use kvm_is_cr{0,4}_bit_set() and return bools. Returning an "int" requires not one, but two implicit casts, first from "unsigned long" to "int", and then again to a "bool". Both casts are more than a bit dangerous; the ulong=>int casts would drop a bit on 64-bit kernels _if_ the bits in question weren't in the lower 32 bits, and the int=>bool cast can result in false negatives/positives, e.g. see commit 0c928ff26bd6 ("KVM: SVM: Fix benign "bool vs. int" comparison in svm_set_cr0()"). Suggested-by: Sean Christopherson <seanjc@google.com> Signed-off-by: Binbin Wu <binbin.wu@linux.intel.com> Link: https://lore.kernel.org/r/20230322045824.22970-3-binbin.wu@linux.intel.com Signed-off-by: Sean Christopherson <seanjc@google.com>
Diffstat (limited to 'arch/x86/kvm/x86.h')
-rw-r--r--arch/x86/kvm/x86.h12
1 files changed, 6 insertions, 6 deletions
diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h
index 6d68c6772105..577b82358529 100644
--- a/arch/x86/kvm/x86.h
+++ b/arch/x86/kvm/x86.h
@@ -171,19 +171,19 @@ static inline bool mmu_is_nested(struct kvm_vcpu *vcpu)
return vcpu->arch.walk_mmu == &vcpu->arch.nested_mmu;
}
-static inline int is_pae(struct kvm_vcpu *vcpu)
+static inline bool is_pae(struct kvm_vcpu *vcpu)
{
- return kvm_read_cr4_bits(vcpu, X86_CR4_PAE);
+ return kvm_is_cr4_bit_set(vcpu, X86_CR4_PAE);
}
-static inline int is_pse(struct kvm_vcpu *vcpu)
+static inline bool is_pse(struct kvm_vcpu *vcpu)
{
- return kvm_read_cr4_bits(vcpu, X86_CR4_PSE);
+ return kvm_is_cr4_bit_set(vcpu, X86_CR4_PSE);
}
-static inline int is_paging(struct kvm_vcpu *vcpu)
+static inline bool is_paging(struct kvm_vcpu *vcpu)
{
- return likely(kvm_read_cr0_bits(vcpu, X86_CR0_PG));
+ return likely(kvm_is_cr0_bit_set(vcpu, X86_CR0_PG));
}
static inline bool is_pae_paging(struct kvm_vcpu *vcpu)