From: Lan Tianyu <Tianyu.Lan@xxxxxxxxxxxxx> Populate ranges on the flush list into struct hv_guest_mapping_flush_list when flush list is available in the struct kvm_tlb_range. Signed-off-by: Lan Tianyu <Tianyu.Lan at microsoft.com> --- Change since v2: - Fix calculation of flush pages in the kvm_fill_hv_flush_list_func() --- arch/x86/include/asm/kvm_host.h | 7 +++++++ arch/x86/kvm/vmx/vmx.c | 18 ++++++++++++++++-- 2 files changed, 23 insertions(+), 2 deletions(-) diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 875ae7256608..9fc9dd0c92cb 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -318,6 +318,12 @@ struct kvm_rmap_head { struct kvm_mmu_page { struct list_head link; + + /* + * Tlb flush with range list uses struct kvm_mmu_page as list entry + * and all list operations should be under protection of mmu_lock. + */ + struct hlist_node flush_link; struct hlist_node hash_link; bool unsync; bool mmio_cached; @@ -441,6 +447,7 @@ struct kvm_mmu { struct kvm_tlb_range { u64 start_gfn; u64 pages; + struct hlist_head *flush_list; }; enum pmc_type { diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index 77b5379e3655..85139d318490 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -432,9 +432,23 @@ static int kvm_fill_hv_flush_list_func(struct hv_guest_mapping_flush_list *flush void *data) { struct kvm_tlb_range *range = data; + struct kvm_mmu_page *sp; - return hyperv_fill_flush_guest_mapping_list(flush, 0, range->start_gfn, - range->pages); + if (!range->flush_list) { + return hyperv_fill_flush_guest_mapping_list(flush, + 0, range->start_gfn, range->pages); + } else { + int offset = 0; + + hlist_for_each_entry(sp, range->flush_list, flush_link) { + int pages = KVM_PAGES_PER_HPAGE(sp->role.level + 1); + + offset = hyperv_fill_flush_guest_mapping_list(flush, + offset, sp->gfn, pages); + } + + return offset; + } } static inline int __hv_remote_flush_tlb_with_range(struct kvm *kvm, -- 2.14.4