Currently the dirty-logging paths, including kvm_arch_flush_remote_tlbs_memslot() and kvm_mmu_wp_memory_region() ivalidates the entire VM's TLB entries using kvm_flush_remote_tlbs(). As the range of IPAs is provided by these functions, this is highly inefficient on the systems which support FEAT_TLBIRANGE. Hence, use kvm_flush_remote_tlbs_range() to flush the TLBs instead. Signed-off-by: Raghavendra Rao Ananta <rananta@xxxxxxxxxx> --- arch/arm64/kvm/arm.c | 7 ++++++- arch/arm64/kvm/mmu.c | 2 +- 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c index 00da570ed72bd..179520888c697 100644 --- a/arch/arm64/kvm/arm.c +++ b/arch/arm64/kvm/arm.c @@ -1433,7 +1433,12 @@ void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot) void kvm_arch_flush_remote_tlbs_memslot(struct kvm *kvm, const struct kvm_memory_slot *memslot) { - kvm_flush_remote_tlbs(kvm); + phys_addr_t start, end; + + start = memslot->base_gfn << PAGE_SHIFT; + end = (memslot->base_gfn + memslot->npages) << PAGE_SHIFT; + + kvm_flush_remote_tlbs_range(kvm, start, end); } static int kvm_vm_ioctl_set_device_addr(struct kvm *kvm, diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c index 70f76bc909c5d..e34b81f5922ce 100644 --- a/arch/arm64/kvm/mmu.c +++ b/arch/arm64/kvm/mmu.c @@ -976,7 +976,7 @@ static void kvm_mmu_wp_memory_region(struct kvm *kvm, int slot) write_lock(&kvm->mmu_lock); stage2_wp_range(&kvm->arch.mmu, start, end); write_unlock(&kvm->mmu_lock); - kvm_flush_remote_tlbs(kvm); + kvm_flush_remote_tlbs_range(kvm, start, end); } /** -- 2.39.0.314.g84b9a713c41-goog