Re: [PATCH v6 03/11] KVM: Allow range-based TLB invalidation from common code

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 





On 7/18/23 00:37, Raghavendra Rao Ananta wrote:
On Mon, Jul 17, 2023 at 4:40 AM Shaoqin Huang <shahuang@xxxxxxxxxx> wrote:



On 7/15/23 08:53, Raghavendra Rao Ananta wrote:
From: David Matlack <dmatlack@xxxxxxxxxx>

Make kvm_flush_remote_tlbs_range() visible in common code and create a
default implementation that just invalidates the whole TLB.

This paves the way for several future features/cleanups:

   - Introduction of range-based TLBI on ARM.
   - Eliminating kvm_arch_flush_remote_tlbs_memslot()
   - Moving the KVM/x86 TDP MMU to common code.

No functional change intended.

Signed-off-by: David Matlack <dmatlack@xxxxxxxxxx>
Signed-off-by: Raghavendra Rao Ananta <rananta@xxxxxxxxxx>
Reviewed-by: Gavin Shan <gshan@xxxxxxxxxx>
---
   arch/x86/include/asm/kvm_host.h |  3 +++
   arch/x86/kvm/mmu/mmu.c          |  9 ++++-----
   arch/x86/kvm/mmu/mmu_internal.h |  3 ---
   include/linux/kvm_host.h        |  9 +++++++++
   virt/kvm/kvm_main.c             | 13 +++++++++++++
   5 files changed, 29 insertions(+), 8 deletions(-)

diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index a2d3cfc2eb75..08900afbf2ad 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -1804,6 +1804,9 @@ static inline int kvm_arch_flush_remote_tlbs(struct kvm *kvm)
               return -ENOTSUPP;
   }

+#define __KVM_HAVE_ARCH_FLUSH_REMOTE_TLBS_RANGE
+int kvm_arch_flush_remote_tlbs_range(struct kvm *kvm, gfn_t start_gfn, u64 pages);
+
   #define kvm_arch_pmi_in_guest(vcpu) \
       ((vcpu) && (vcpu)->arch.handling_intr_from_guest)

diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index ec169f5c7dce..aaa5e336703a 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -278,16 +278,15 @@ static inline bool kvm_available_flush_remote_tlbs_range(void)
       return kvm_x86_ops.flush_remote_tlbs_range;
   }

-void kvm_flush_remote_tlbs_range(struct kvm *kvm, gfn_t start_gfn,
-                              gfn_t nr_pages)
+int kvm_arch_flush_remote_tlbs_range(struct kvm *kvm, gfn_t start_gfn, u64 pages)
   {
       int ret = -EOPNOTSUPP;

       if (kvm_x86_ops.flush_remote_tlbs_range)
               ret = static_call(kvm_x86_flush_remote_tlbs_range)(kvm, start_gfn,
-                                                                nr_pages);
-     if (ret)
-             kvm_flush_remote_tlbs(kvm);
+                                                                     pages);
This will be good if parameter pages aligned with parameter kvm.

Agreed, but pulling 'pages' above brings the char count to 83. If
that's acceptable, I'm happy to do it in v7.
Hi Raghavendra,

no need to pulling 'pages' above, just delete one tab, and add some space before the pages, just like the original `nr_pages` position.

Thanks,
Shaoqin
Thank you.
Raghavendra
Reviewed-by: Shaoqin Huang <shahuang@xxxxxxxxxx>
+
+     return ret;
   }

   static gfn_t kvm_mmu_page_get_gfn(struct kvm_mmu_page *sp, int index);
diff --git a/arch/x86/kvm/mmu/mmu_internal.h b/arch/x86/kvm/mmu/mmu_internal.h
index d39af5639ce9..86cb83bb3480 100644
--- a/arch/x86/kvm/mmu/mmu_internal.h
+++ b/arch/x86/kvm/mmu/mmu_internal.h
@@ -170,9 +170,6 @@ bool kvm_mmu_slot_gfn_write_protect(struct kvm *kvm,
                                   struct kvm_memory_slot *slot, u64 gfn,
                                   int min_level);

-void kvm_flush_remote_tlbs_range(struct kvm *kvm, gfn_t start_gfn,
-                              gfn_t nr_pages);
-
   /* Flush the given page (huge or not) of guest memory. */
   static inline void kvm_flush_remote_tlbs_gfn(struct kvm *kvm, gfn_t gfn, int level)
   {
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index e3f968b38ae9..a731967b24ff 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -1359,6 +1359,7 @@ int kvm_vcpu_yield_to(struct kvm_vcpu *target);
   void kvm_vcpu_on_spin(struct kvm_vcpu *vcpu, bool yield_to_kernel_mode);

   void kvm_flush_remote_tlbs(struct kvm *kvm);
+void kvm_flush_remote_tlbs_range(struct kvm *kvm, gfn_t gfn, u64 pages);

   #ifdef KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE
   int kvm_mmu_topup_memory_cache(struct kvm_mmu_memory_cache *mc, int min);
@@ -1486,6 +1487,14 @@ static inline int kvm_arch_flush_remote_tlbs(struct kvm *kvm)
   }
   #endif

+#ifndef __KVM_HAVE_ARCH_FLUSH_REMOTE_TLBS_RANGE
+static inline int kvm_arch_flush_remote_tlbs_range(struct kvm *kvm,
+                                                gfn_t gfn, u64 pages)
+{
+     return -EOPNOTSUPP;
+}
+#endif
+
   #ifdef __KVM_HAVE_ARCH_NONCOHERENT_DMA
   void kvm_arch_register_noncoherent_dma(struct kvm *kvm);
   void kvm_arch_unregister_noncoherent_dma(struct kvm *kvm);
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index d6b050786155..804470fccac7 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -366,6 +366,19 @@ void kvm_flush_remote_tlbs(struct kvm *kvm)
   }
   EXPORT_SYMBOL_GPL(kvm_flush_remote_tlbs);

+void kvm_flush_remote_tlbs_range(struct kvm *kvm, gfn_t gfn, u64 pages)
+{
+     if (!kvm_arch_flush_remote_tlbs_range(kvm, gfn, pages))
+             return;
+
+     /*
+      * Fall back to a flushing entire TLBs if the architecture range-based
+      * TLB invalidation is unsupported or can't be performed for whatever
+      * reason.
+      */
+     kvm_flush_remote_tlbs(kvm);
+}
+
   static void kvm_flush_shadow_all(struct kvm *kvm)
   {
       kvm_arch_flush_shadow_all(kvm);

--
Shaoqin



--
Shaoqin




[Index of Archives]     [LKML Archive]     [Linux ARM Kernel]     [Linux ARM]     [Git]     [Yosemite News]     [Linux SCSI]     [Linux Hams]

  Powered by Linux