From: Lan Tianyu <Tianyu.Lan@xxxxxxxxxxxxx> This patch is to make kvm_arch_mmu_enable_log_dirty_pt_masked() return value and caller can use it to determine whether tlb flush is necessary. kvm_get_dirty_log_protect() and kvm_clear_dirty_log_protect() use the return value of kvm_arch_mmu_enable_log_dirty_pt_masked() to populate flush parameter. Signed-off-by: Lan Tianyu <Tianyu.Lan@xxxxxxxxxxxxx> --- arch/mips/kvm/mmu.c | 5 ++++- arch/x86/kvm/mmu.c | 6 +++++- include/linux/kvm_host.h | 2 +- virt/kvm/arm/mmu.c | 5 ++++- virt/kvm/kvm_main.c | 10 ++++------ 5 files changed, 18 insertions(+), 10 deletions(-) diff --git a/arch/mips/kvm/mmu.c b/arch/mips/kvm/mmu.c index 97e538a8c1be..f36ccb2d43ec 100644 --- a/arch/mips/kvm/mmu.c +++ b/arch/mips/kvm/mmu.c @@ -437,8 +437,10 @@ int kvm_mips_mkclean_gpa_pt(struct kvm *kvm, gfn_t start_gfn, gfn_t end_gfn) * * Walks bits set in mask write protects the associated pte's. Caller must * acquire @kvm->mmu_lock. + * + * Returns: Whether caller needs to flush tlb. */ -void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm, +bool kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm, struct kvm_memory_slot *slot, gfn_t gfn_offset, unsigned long mask) { @@ -447,6 +449,7 @@ void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm, gfn_t end = base_gfn + __fls(mask); kvm_mips_mkclean_gpa_pt(kvm, start, end); + return true; } /* diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 6d4f7dfeaa57..9d8ee6ea02db 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -1676,8 +1676,10 @@ EXPORT_SYMBOL_GPL(kvm_mmu_clear_dirty_pt_masked); * * Used when we do not need to care about huge page mappings: e.g. during dirty * logging we do not have any such mappings. + * + * Return value means whether caller needs to flush tlb. */ -void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm, +bool kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm, struct kvm_memory_slot *slot, gfn_t gfn_offset, unsigned long mask) { @@ -1686,6 +1688,8 @@ void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm, mask); else kvm_mmu_write_protect_pt_masked(kvm, slot, gfn_offset, mask); + + return true; } /** diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index c38cc5eb7e73..e86b8c38342b 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -759,7 +759,7 @@ int kvm_get_dirty_log_protect(struct kvm *kvm, int kvm_clear_dirty_log_protect(struct kvm *kvm, struct kvm_clear_dirty_log *log, bool *flush); -void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm, +bool kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm, struct kvm_memory_slot *slot, gfn_t gfn_offset, unsigned long mask); diff --git a/virt/kvm/arm/mmu.c b/virt/kvm/arm/mmu.c index 3053bf2584f8..232007ff3208 100644 --- a/virt/kvm/arm/mmu.c +++ b/virt/kvm/arm/mmu.c @@ -1564,12 +1564,15 @@ static void kvm_mmu_write_protect_pt_masked(struct kvm *kvm, * * It calls kvm_mmu_write_protect_pt_masked to write protect selected pages to * enable dirty logging for them. + * + * Return value means whether caller needs to flush tlb. */ -void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm, +bool kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm, struct kvm_memory_slot *slot, gfn_t gfn_offset, unsigned long mask) { kvm_mmu_write_protect_pt_masked(kvm, slot, gfn_offset, mask); + return true; } static void clean_dcache_guest_page(kvm_pfn_t pfn, unsigned long size) diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index e75dbb15fd09..bcbe059d98be 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -1202,13 +1202,12 @@ int kvm_get_dirty_log_protect(struct kvm *kvm, if (!dirty_bitmap[i]) continue; - *flush = true; mask = xchg(&dirty_bitmap[i], 0); dirty_bitmap_buffer[i] = mask; offset = i * BITS_PER_LONG; - kvm_arch_mmu_enable_log_dirty_pt_masked(kvm, memslot, - offset, mask); + *flush = kvm_arch_mmu_enable_log_dirty_pt_masked(kvm, + memslot, offset, mask); } spin_unlock(&kvm->mmu_lock); } @@ -1275,9 +1274,8 @@ int kvm_clear_dirty_log_protect(struct kvm *kvm, * a problem if userspace sets them in log->dirty_bitmap. */ if (mask) { - *flush = true; - kvm_arch_mmu_enable_log_dirty_pt_masked(kvm, memslot, - offset, mask); + *flush = kvm_arch_mmu_enable_log_dirty_pt_masked(kvm, + memslot, offset, mask); } } spin_unlock(&kvm->mmu_lock); -- 2.14.4