From: Lan Tianyu <Tianyu.Lan@xxxxxxxxxxxxx> Populate ranges on the flush list into struct hv_guest_mapping_flush_list when flush list is available in the struct kvm_tlb_range. Signed-off-by: Lan Tianyu <Tianyu.Lan@xxxxxxxxxxxxx> --- Change since v1: Make flush list as a "hlist" instead of a "list" in order to keep struct kvm_mmu_page size. arch/x86/include/asm/kvm_host.h | 7 +++++++ arch/x86/kvm/vmx/vmx.c | 18 ++++++++++++++++-- 2 files changed, 23 insertions(+), 2 deletions(-) diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 49f449f56434..4a3d3e58fe0a 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -317,6 +317,12 @@ struct kvm_rmap_head { struct kvm_mmu_page { struct list_head link; + + /* + * Tlb flush with range list uses struct kvm_mmu_page as list entry + * and all list operations should be under protection of mmu_lock. + */ + struct hlist_node flush_link; struct hlist_node hash_link; bool unsync; @@ -443,6 +449,7 @@ struct kvm_mmu { struct kvm_tlb_range { u64 start_gfn; u64 pages; + struct hlist_head *flush_list; }; enum pmc_type { diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index 9d954b4adce3..6452d0efd2cc 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -427,9 +427,23 @@ int kvm_fill_hv_flush_list_func(struct hv_guest_mapping_flush_list *flush, void *data) { struct kvm_tlb_range *range = data; + struct kvm_mmu_page *sp; - return hyperv_fill_flush_guest_mapping_list(flush, 0, range->start_gfn, - range->pages); + if (!range->flush_list) { + return hyperv_fill_flush_guest_mapping_list(flush, + 0, range->start_gfn, range->pages); + } else { + int offset = 0; + + hlist_for_each_entry(sp, range->flush_list, flush_link) { + int pages = KVM_PAGES_PER_HPAGE(sp->role.level); + + offset = hyperv_fill_flush_guest_mapping_list(flush, + offset, sp->gfn, pages); + } + + return offset; + } } static inline int __hv_remote_flush_tlb_with_range(struct kvm *kvm, -- 2.14.4