From: Lan Tianyu <Tianyu.Lan@xxxxxxxxxxxxx> This patch is to introduce tlb flush with range list interface and use struct kvm_mmu_page as list entry. Use flush list function in the kvm_mmu_commit_zap_page(). Signed-off-by: Lan Tianyu <Tianyu.Lan@xxxxxxxxxxxxx> --- arch/x86/kvm/mmu.c | 25 ++++++++++++++++++++++++- 1 file changed, 24 insertions(+), 1 deletion(-) diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 70cafd3f95ab..d57574b49823 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -289,6 +289,20 @@ static void kvm_flush_remote_tlbs_with_address(struct kvm *kvm, range.start_gfn = start_gfn; range.pages = pages; + range.flush_list = NULL; + + kvm_flush_remote_tlbs_with_range(kvm, &range); +} + +static void kvm_flush_remote_tlbs_with_list(struct kvm *kvm, + struct hlist_head *flush_list) +{ + struct kvm_tlb_range range; + + if (hlist_empty(flush_list)) + return; + + range.flush_list = flush_list; kvm_flush_remote_tlbs_with_range(kvm, &range); } @@ -2708,6 +2722,7 @@ static void kvm_mmu_commit_zap_page(struct kvm *kvm, struct list_head *invalid_list) { struct kvm_mmu_page *sp, *nsp; + HLIST_HEAD(flush_list); if (list_empty(invalid_list)) return; @@ -2721,7 +2736,15 @@ static void kvm_mmu_commit_zap_page(struct kvm *kvm, * In addition, kvm_flush_remote_tlbs waits for all vcpus to exit * guest mode and/or lockless shadow page table walks. */ - kvm_flush_remote_tlbs(kvm); + if (kvm_available_flush_tlb_with_range()) { + list_for_each_entry(sp, invalid_list, link) + if (sp->last_level) + hlist_add_head(&sp->flush_link, &flush_list); + + kvm_flush_remote_tlbs_with_list(kvm, &flush_list); + } else { + kvm_flush_remote_tlbs(kvm); + } list_for_each_entry_safe(sp, nsp, invalid_list, link) { WARN_ON(!sp->role.invalid || sp->root_count); -- 2.14.4