On Thu, Apr 07, 2022, Vitaly Kuznetsov wrote: > Currently, HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST{,EX} calls are handled > the exact same way as HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE{,EX}: by > flushing the whole VPID and this is sub-optimal. Switch to handling > these requests with 'flush_tlb_gva()' hooks instead. Use the newly > introduced TLB flush ring to queue the requests. > > Signed-off-by: Vitaly Kuznetsov <vkuznets@xxxxxxxxxx> > --- > arch/x86/kvm/hyperv.c | 141 ++++++++++++++++++++++++++++++++++++------ > 1 file changed, 121 insertions(+), 20 deletions(-) > > diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c > index 81c44e0eadf9..a54d41656f30 100644 > --- a/arch/x86/kvm/hyperv.c > +++ b/arch/x86/kvm/hyperv.c > @@ -1792,6 +1792,35 @@ static u64 kvm_get_sparse_vp_set(struct kvm *kvm, struct kvm_hv_hcall *hc, > var_cnt * sizeof(*sparse_banks)); > } > > +static int kvm_hv_get_tlbflush_entries(struct kvm *kvm, struct kvm_hv_hcall *hc, u64 entries[], > + u32 data_offset, int consumed_xmm_halves) data_offset should be gpa_t, and the order of params should be consistent between this and kvm_get_sparse_vp_set(). > +{ > + int i; > + > + if (hc->fast) { > + /* > + * Each XMM holds two entries, but do not count halves that > + * have already been consumed. > + */ > + if (hc->rep_cnt > (2 * HV_HYPERCALL_MAX_XMM_REGISTERS - consumed_xmm_halves)) > + return -EINVAL; > + > + for (i = 0; i < hc->rep_cnt; i++) { > + int j = i + consumed_xmm_halves; > + > + if (j % 2) > + entries[i] = sse128_hi(hc->xmm[j / 2]); > + else > + entries[i] = sse128_lo(hc->xmm[j / 2]); > + } > + > + return 0; > + } > + > + return kvm_read_guest(kvm, hc->ingpa + data_offset, > + entries, hc->rep_cnt * sizeof(entries[0])); This is almost verbatim copy+pasted from kvm_get_sparse_vp_set(). If you slot in the attached patched before this, then this function becomes: static int kvm_hv_get_tlbflush_entries(struct kvm *kvm, struct kvm_hv_hcall *hc, u64 entries[], int consumed_xmm_halves, gpa_t offset) { return kvm_hv_get_hc_data(kvm, hc, hc->rep_cnt, hc->rep_cnt, entries, consumed_xmm_halves, offset); } > +} ... > @@ -1840,15 +1891,47 @@ void kvm_hv_vcpu_flush_tlb(struct kvm_vcpu *vcpu) > { > struct kvm_vcpu_hv_tlbflush_ring *tlb_flush_ring; > struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu); > - > - kvm_vcpu_flush_tlb_guest(vcpu); > - > - if (!hv_vcpu) > + struct kvm_vcpu_hv_tlbflush_entry *entry; > + int read_idx, write_idx; > + u64 address; > + u32 count; > + int i, j; > + > + if (!tdp_enabled || !hv_vcpu) { > + kvm_vcpu_flush_tlb_guest(vcpu); > return; > + } > > tlb_flush_ring = &hv_vcpu->tlb_flush_ring; > + read_idx = READ_ONCE(tlb_flush_ring->read_idx); > + write_idx = READ_ONCE(tlb_flush_ring->write_idx); > + > + /* Pairs with smp_wmb() in hv_tlb_flush_ring_enqueue() */ > + smp_rmb(); > > - tlb_flush_ring->read_idx = tlb_flush_ring->write_idx; > + for (i = read_idx; i != write_idx; i = (i + 1) % KVM_HV_TLB_FLUSH_RING_SIZE) { > + entry = &tlb_flush_ring->entries[i]; > + > + if (entry->flush_all) > + goto out_flush_all; > + > + /* > + * Lower 12 bits of 'address' encode the number of additional > + * pages to flush. > + */ > + address = entry->addr & PAGE_MASK; > + count = (entry->addr & ~PAGE_MASK) + 1; > + for (j = 0; j < count; j++) > + static_call(kvm_x86_flush_tlb_gva)(vcpu, address + j * PAGE_SIZE); > + } > + ++vcpu->stat.tlb_flush; > + goto out_empty_ring; > + > +out_flush_all: > + kvm_vcpu_flush_tlb_guest(vcpu); > + > +out_empty_ring: > + tlb_flush_ring->read_idx = write_idx; > } > > static u64 kvm_hv_flush_tlb(struct kvm_vcpu *vcpu, struct kvm_hv_hcall *hc) > @@ -1857,12 +1940,13 @@ static u64 kvm_hv_flush_tlb(struct kvm_vcpu *vcpu, struct kvm_hv_hcall *hc) > struct hv_tlb_flush_ex flush_ex; > struct hv_tlb_flush flush; > DECLARE_BITMAP(vcpu_mask, KVM_MAX_VCPUS); > + u64 entries[KVM_HV_TLB_FLUSH_RING_SIZE - 2]; What's up with the -2? And given the multitude of things going on in this code, I'd strongly prefer this be tlbflush_entries. Actually, if you do: u64 __tlbflush_entries[KVM_HV_TLB_FLUSH_RING_SIZE - 2]; u64 *tlbflush_entries; and drop all_addr, the code to get entries can be if (hc->code == HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE || hc->code == HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX || hc->rep_cnt > ARRAY_SIZE(tlbflush_entries)) { tlbfluish_entries = NULL; } else { if (kvm_hv_get_tlbflush_entries(kvm, hc, __tlbflush_entries, consumed_xmm_halves, data_offset)) return HV_STATUS_INVALID_HYPERCALL_INPUT; tlbfluish_entries = __tlbflush_entries; } and the calls to queue flushes becomes hv_tlb_flush_ring_enqueue(v, tlbflush_entries, hc->rep_cnt); That way a bug will "just" be a NULL pointer dereference and not consumption of uninitialized data (though such a bug might be caught be caught by the compiler). > u64 valid_bank_mask; > u64 sparse_banks[KVM_HV_MAX_SPARSE_VCPU_SET_BITS]; > struct kvm_vcpu *v; > unsigned long i; > - bool all_cpus; > - > + bool all_cpus, all_addr; > + int data_offset = 0, consumed_xmm_halves = 0; data_offset should be a gpa_t. > /* > * The Hyper-V TLFS doesn't allow more than 64 sparse banks, e.g. the > * valid mask is a u64. Fail the build if KVM's max allowed number of ... > +read_flush_entries: > + if (hc->code == HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE || > + hc->code == HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX || > + hc->rep_cnt > (KVM_HV_TLB_FLUSH_RING_SIZE - 2)) { Rather than duplicate the -2 magic, it's far better to do: > + all_addr = true; > + } else { > + if (kvm_hv_get_tlbflush_entries(kvm, hc, entries, > + data_offset, consumed_xmm_halves)) As mentioned, the order for this call should match kvm_get_sparse_vp_set(). > return HV_STATUS_INVALID_HYPERCALL_INPUT; > + all_addr = false; > } > > -do_flush: > + > /* > * vcpu->arch.cr3 may not be up-to-date for running vCPUs so we can't > * analyze it here, flush TLB regardless of the specified address space. > */ > if (all_cpus) { > kvm_for_each_vcpu(i, v, kvm) > - hv_tlb_flush_ring_enqueue(v); > + hv_tlb_flush_ring_enqueue(v, all_addr, entries, hc->rep_cnt); > > kvm_make_all_cpus_request(kvm, KVM_REQ_HV_TLB_FLUSH); > } else { > @@ -1951,7 +2052,7 @@ static u64 kvm_hv_flush_tlb(struct kvm_vcpu *vcpu, struct kvm_hv_hcall *hc) > v = kvm_get_vcpu(kvm, i); > if (!v) > continue; > - hv_tlb_flush_ring_enqueue(v); > + hv_tlb_flush_ring_enqueue(v, all_addr, entries, hc->rep_cnt); > } > > kvm_make_vcpus_request_mask(kvm, KVM_REQ_HV_TLB_FLUSH, vcpu_mask); > -- > 2.35.1 >