On 9/4/24 05:07, Rick Edgecombe wrote:
+static void vt_flush_tlb_all(struct kvm_vcpu *vcpu) +{ + /* + * TDX calls tdx_track() in tdx_sept_remove_private_spte() to ensure + * private EPT will be flushed on the next TD enter. + * No need to call tdx_track() here again even when this callback is as + * a result of zapping private EPT. + * Just invoke invept() directly here to work for both shared EPT and + * private EPT. + */ + if (is_td_vcpu(vcpu)) { + ept_sync_global(); + return; + } + + vmx_flush_tlb_all(vcpu); +} + +static void vt_flush_tlb_current(struct kvm_vcpu *vcpu) +{ + if (is_td_vcpu(vcpu)) { + tdx_flush_tlb_current(vcpu); + return; + } + + vmx_flush_tlb_current(vcpu); +} +
I'd do it slightly different: static void vt_flush_tlb_all(struct kvm_vcpu *vcpu) { if (is_td_vcpu(vcpu)) { tdx_flush_tlb_all(vcpu); return; } vmx_flush_tlb_all(vcpu); } static void vt_flush_tlb_current(struct kvm_vcpu *vcpu) { if (is_td_vcpu(vcpu)) { /* * flush_tlb_current() is used only the first time for * the vcpu runs, since TDX supports neither shadow * nested paging nor SMM. Keep this function simple. */ tdx_flush_tlb_all(vcpu); return; } vmx_flush_tlb_current(vcpu); } and put the implementation details close to tdx_track: void tdx_flush_tlb_all(struct kvm_vcpu *vcpu) { /* * TDX calls tdx_track() in tdx_sept_remove_private_spte() to * ensure private EPT will be flushed on the next TD enter. * No need to call tdx_track() here again, even when this * callback is a result of zapping private EPT. Just * invoke invept() directly here, which works for both shared * EPT and private EPT. */ ept_sync_global(); }