Hyper-V provides HvFlushGuestAddressList() hypercall to flush EPT tlb with specified ranges. This patch is to add the hypercall support. Signed-off-by: Lan Tianyu <Tianyu.Lan@xxxxxxxxxxxxx> --- arch/x86/hyperv/nested.c | 110 +++++++++++++++++++++++++++++++++++++ arch/x86/include/asm/hyperv-tlfs.h | 17 ++++++ arch/x86/include/asm/mshyperv.h | 8 +++ 3 files changed, 135 insertions(+) diff --git a/arch/x86/hyperv/nested.c b/arch/x86/hyperv/nested.c index b8e60cc50461..bb716ed94320 100644 --- a/arch/x86/hyperv/nested.c +++ b/arch/x86/hyperv/nested.c @@ -7,15 +7,32 @@ * * Author : Lan Tianyu <Tianyu.Lan@xxxxxxxxxxxxx> */ +#define pr_fmt(fmt) "Hyper-V: " fmt #include <linux/types.h> #include <asm/hyperv-tlfs.h> #include <asm/mshyperv.h> #include <asm/tlbflush.h> +#include <asm/kvm_host.h> #include <asm/trace/hyperv.h> +/* + * MAX_FLUSH_PAGES = "additional_pages" + 1. It's limited + * by the bitwidth of "additional_pages" in union hv_gpa_page_range. + */ +#define MAX_FLUSH_PAGES (2048) + +/* + * All input flush parameters are in single page. The max flush count + * is equal with how many entries of union hv_gpa_page_range can be + * populated in the input parameter page. MAX_FLUSH_REP_COUNT + * = (4096 - 16) / 8. (“Page Size” - "Address Space" - "Flags") / + * "GPA Range". + */ +#define MAX_FLUSH_REP_COUNT (510) + int hyperv_flush_guest_mapping(u64 as) { struct hv_guest_mapping_flush **flush_pcpu; @@ -54,3 +71,96 @@ int hyperv_flush_guest_mapping(u64 as) return ret; } EXPORT_SYMBOL_GPL(hyperv_flush_guest_mapping); + +static inline int fill_flush_list(union hv_gpa_page_range gpa_list[], + int offset, u64 start_gfn, u64 end_gfn) +{ + int gpa_n = offset; + u64 cur = start_gfn; + u64 pages = end_gfn - start_gfn + 1; + u64 additional_pages; + + if (end_gfn < start_gfn) + return -EINVAL; + + do { + if (gpa_n == MAX_FLUSH_REP_COUNT) { + pr_warn("Request exceeds HvFlushGuestList max flush count.\n"); + return -ENOSPC; + } + + if (pages > MAX_FLUSH_PAGES) { + additional_pages = MAX_FLUSH_PAGES - 1; + pages -= MAX_FLUSH_PAGES; + } else { + additional_pages = pages - 1; + pages = 0; + } + + gpa_list[gpa_n].page.additional_pages = additional_pages; + gpa_list[gpa_n].page.largepage = false; + gpa_list[gpa_n].page.basepfn = cur; + + cur += additional_pages + 1; + gpa_n++; + } while (pages > 0); + + return gpa_n; +} + +int hyperv_flush_guest_mapping_range(u64 as, struct kvm_tlb_range *range) +{ + struct kvm_mmu_page *sp; + struct hv_guest_mapping_flush_list **flush_pcpu; + struct hv_guest_mapping_flush_list *flush; + u64 status = 0; + unsigned long flags; + int ret = -ENOTSUPP; + int gpa_n = 0; + + if (!hv_hypercall_pg) + goto fault; + + local_irq_save(flags); + + flush_pcpu = (struct hv_guest_mapping_flush_list **) + this_cpu_ptr(hyperv_pcpu_input_arg); + + flush = *flush_pcpu; + if (unlikely(!flush)) { + local_irq_restore(flags); + goto fault; + } + + flush->address_space = as; + flush->flags = 0; + + if (!range->flush_list) { + gpa_n = fill_flush_list(flush->gpa_list, gpa_n, + range->start_gfn, range->end_gfn); + } else { + list_for_each_entry(sp, range->flush_list, + flush_link) { + u64 end_gfn = sp->gfn + + KVM_PAGES_PER_HPAGE(sp->role.level) - 1; + gpa_n = fill_flush_list(flush->gpa_list, gpa_n, + sp->gfn, end_gfn); + } + } + + if (gpa_n < 0) { + local_irq_restore(flags); + goto fault; + } + + status = hv_do_rep_hypercall(HVCALL_FLUSH_GUEST_PHYSICAL_ADDRESS_LIST, + gpa_n, 0, flush, NULL); + + local_irq_restore(flags); + + if (!(status & HV_HYPERCALL_RESULT_MASK)) + ret = 0; +fault: + return ret; +} +EXPORT_SYMBOL_GPL(hyperv_flush_guest_mapping_range); diff --git a/arch/x86/include/asm/hyperv-tlfs.h b/arch/x86/include/asm/hyperv-tlfs.h index e977b6b3a538..512f22b49999 100644 --- a/arch/x86/include/asm/hyperv-tlfs.h +++ b/arch/x86/include/asm/hyperv-tlfs.h @@ -353,6 +353,7 @@ struct hv_tsc_emulation_status { #define HVCALL_POST_MESSAGE 0x005c #define HVCALL_SIGNAL_EVENT 0x005d #define HVCALL_FLUSH_GUEST_PHYSICAL_ADDRESS_SPACE 0x00af +#define HVCALL_FLUSH_GUEST_PHYSICAL_ADDRESS_LIST 0x00b0 #define HV_X64_MSR_VP_ASSIST_PAGE_ENABLE 0x00000001 #define HV_X64_MSR_VP_ASSIST_PAGE_ADDRESS_SHIFT 12 @@ -750,6 +751,22 @@ struct hv_guest_mapping_flush { u64 flags; }; +/* HvFlushGuestPhysicalAddressList hypercall */ +union hv_gpa_page_range { + u64 address_space; + struct { + u64 additional_pages:11; + u64 largepage:1; + u64 basepfn:52; + } page; +}; + +struct hv_guest_mapping_flush_list { + u64 address_space; + u64 flags; + union hv_gpa_page_range gpa_list[]; +}; + /* HvFlushVirtualAddressSpace, HvFlushVirtualAddressList hypercalls */ struct hv_tlb_flush { u64 address_space; diff --git a/arch/x86/include/asm/mshyperv.h b/arch/x86/include/asm/mshyperv.h index f37704497d8f..da68574404bf 100644 --- a/arch/x86/include/asm/mshyperv.h +++ b/arch/x86/include/asm/mshyperv.h @@ -22,6 +22,8 @@ struct ms_hyperv_info { extern struct ms_hyperv_info ms_hyperv; +struct kvm_tlb_range; + /* * Generate the guest ID. */ @@ -348,6 +350,7 @@ void set_hv_tscchange_cb(void (*cb)(void)); void clear_hv_tscchange_cb(void); void hyperv_stop_tsc_emulation(void); int hyperv_flush_guest_mapping(u64 as); +int hyperv_flush_guest_mapping_range(u64 as, struct kvm_tlb_range *range); #ifdef CONFIG_X86_64 void hv_apic_init(void); @@ -368,6 +371,11 @@ static inline struct hv_vp_assist_page *hv_get_vp_assist_page(unsigned int cpu) return NULL; } static inline int hyperv_flush_guest_mapping(u64 as) { return -1; } +static inline int hyperv_flush_guest_mapping_range(u64 as, + struct kvm_tlb_range *range) +{ + return -1; +} #endif /* CONFIG_HYPERV */ #ifdef CONFIG_HYPERV_TSCPAGE -- 2.14.4 _______________________________________________ devel mailing list devel@xxxxxxxxxxxxxxxxxxxxxx http://driverdev.linuxdriverproject.org/mailman/listinfo/driverdev-devel