On Tue, Oct 04, 2022, Vitaly Kuznetsov wrote: > int kvm_hv_vcpu_flush_tlb(struct kvm_vcpu *vcpu) > { > struct kvm_vcpu_hv_tlb_flush_fifo *tlb_flush_fifo; > struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu); > + u64 entries[KVM_HV_TLB_FLUSH_FIFO_SIZE]; > + int i, j, count; > + gva_t gva; > > - if (!hv_vcpu) > + if (!tdp_enabled || !hv_vcpu) > return -EINVAL; > > tlb_flush_fifo = &hv_vcpu->tlb_flush_fifo; > > + count = kfifo_out(&tlb_flush_fifo->entries, entries, KVM_HV_TLB_FLUSH_FIFO_SIZE); > + > + for (i = 0; i < count; i++) { > + if (entries[i] == KVM_HV_TLB_FLUSHALL_ENTRY) > + goto out_flush_all; > + > + /* > + * Lower 12 bits of 'address' encode the number of additional > + * pages to flush. > + */ > + gva = entries[i] & PAGE_MASK; > + for (j = 0; j < (entries[i] & ~PAGE_MASK) + 1; j++) > + static_call(kvm_x86_flush_tlb_gva)(vcpu, gva + j * PAGE_SIZE); > + > + ++vcpu->stat.tlb_flush; > + } > + return 0; > + > +out_flush_all: > + kvm_vcpu_flush_tlb_guest(vcpu); No need to do kvm_vcpu_flush_tlb_guest() here, the caller is responsible for flushing on "failure", as indicated by -ENOSPC below. > kfifo_reset_out(&tlb_flush_fifo->entries); > > - /* Precise flushing isn't implemented yet. */ > - return -EOPNOTSUPP; > + /* Fall back to full flush. */ > + return -ENOSPC; > }