[PATCH 2/2] KVM: x86: Introduce kvm_gfn_to_hva_cache_valid()

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



It simplifies validation of gfn_to_hva_cache to make it less error prone
per the discussion at
https://lore.kernel.org/all/4e29402770a7a254a1ea8ca8165af641ed0832ed.camel@xxxxxxxxxxxxx.

Signed-off-by: Metin Kaya <metikaya@xxxxxxxxxxxx>
---
 arch/x86/kvm/x86.c | 26 ++++++++++++++------------
 1 file changed, 14 insertions(+), 12 deletions(-)

diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 43a6a7efc6ec..07d368dc69ad 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -3425,11 +3425,22 @@ void kvm_service_local_tlb_flush_requests(struct kvm_vcpu *vcpu)
 }
 EXPORT_SYMBOL_GPL(kvm_service_local_tlb_flush_requests);
 
+static inline bool kvm_gfn_to_hva_cache_valid(struct kvm *kvm,
+					      struct gfn_to_hva_cache *ghc,
+					      gpa_t gpa)
+{
+	struct kvm_memslots *slots = kvm_memslots(kvm);
+
+	return !unlikely(slots->generation != ghc->generation ||
+			 gpa != ghc->gpa ||
+			 kvm_is_error_hva(ghc->hva) ||
+			 !ghc->memslot);
+}
+
 static void record_steal_time(struct kvm_vcpu *vcpu)
 {
 	struct gfn_to_hva_cache *ghc = &vcpu->arch.st.cache;
 	struct kvm_steal_time __user *st;
-	struct kvm_memslots *slots;
 	gpa_t gpa = vcpu->arch.st.msr_val & KVM_STEAL_VALID_BITS;
 	u64 steal;
 	u32 version;
@@ -3445,11 +3456,7 @@ static void record_steal_time(struct kvm_vcpu *vcpu)
 	if (WARN_ON_ONCE(current->mm != vcpu->kvm->mm))
 		return;
 
-	slots = kvm_memslots(vcpu->kvm);
-
-	if (unlikely(slots->generation != ghc->generation ||
-		     gpa != ghc->gpa ||
-		     kvm_is_error_hva(ghc->hva) || !ghc->memslot)) {
+	if (!kvm_gfn_to_hva_cache_valid(vcpu->kvm, ghc, gpa)) {
 		/* We rely on the fact that it fits in a single page. */
 		BUILD_BUG_ON((sizeof(*st) - 1) & KVM_STEAL_VALID_BITS);
 
@@ -4729,7 +4736,6 @@ static void kvm_steal_time_set_preempted(struct kvm_vcpu *vcpu)
 {
 	struct gfn_to_hva_cache *ghc = &vcpu->arch.st.cache;
 	struct kvm_steal_time __user *st;
-	struct kvm_memslots *slots;
 	static const u8 preempted = KVM_VCPU_PREEMPTED;
 	gpa_t gpa = vcpu->arch.st.msr_val & KVM_STEAL_VALID_BITS;
 
@@ -4756,11 +4762,7 @@ static void kvm_steal_time_set_preempted(struct kvm_vcpu *vcpu)
 	if (unlikely(current->mm != vcpu->kvm->mm))
 		return;
 
-	slots = kvm_memslots(vcpu->kvm);
-
-	if (unlikely(slots->generation != ghc->generation ||
-		     gpa != ghc->gpa ||
-		     kvm_is_error_hva(ghc->hva) || !ghc->memslot))
+	if (!kvm_gfn_to_hva_cache_valid(vcpu->kvm, ghc, gpa))
 		return;
 
 	st = (struct kvm_steal_time __user *)ghc->hva;
-- 
2.37.1




[Index of Archives]     [KVM ARM]     [KVM ia64]     [KVM ppc]     [Virtualization Tools]     [Spice Development]     [Libvirt]     [Libvirt Users]     [Linux USB Devel]     [Linux Audio Users]     [Yosemite Questions]     [Linux Kernel]     [Linux SCSI]     [XFree86]

  Powered by Linux