Shadow EPT invalidation is required to emulate INVEPT. And as a VM only has one shadow EPT, when vEPTP got changed, the shadow EPT shall be invalidated as well. The invalidation is implemented by just simply destroying all the existing mappings for corresponding shadow EPT without freeing the root. Signed-off-by: Chuanxiao Dong <chuanxiao.dong@xxxxxxxxx> Signed-off-by: Jason Chen CJ <jason.cj.chen@xxxxxxxxx> --- arch/x86/kvm/vmx/pkvm/hyp/ept.c | 18 ++++++++++++++++++ arch/x86/kvm/vmx/pkvm/hyp/ept.h | 1 + arch/x86/kvm/vmx/pkvm/hyp/nested.c | 12 ++++++++++++ arch/x86/kvm/vmx/pkvm/hyp/pkvm_hyp.h | 3 +++ 4 files changed, 34 insertions(+) diff --git a/arch/x86/kvm/vmx/pkvm/hyp/ept.c b/arch/x86/kvm/vmx/pkvm/hyp/ept.c index a0793e4d02ef..de68f8c9eeb0 100644 --- a/arch/x86/kvm/vmx/pkvm/hyp/ept.c +++ b/arch/x86/kvm/vmx/pkvm/hyp/ept.c @@ -305,6 +305,24 @@ static struct pkvm_mm_ops shadow_ept_mm_ops = { .flush_tlb = flush_tlb_noop, }; +void pkvm_invalidate_shadow_ept(struct shadow_ept_desc *desc) +{ + struct pkvm_shadow_vm *vm = sept_desc_to_shadow_vm(desc); + struct pkvm_pgtable *sept = &desc->sept; + unsigned long size = sept->pgt_ops->pgt_level_to_size(sept->level + 1); + + pkvm_spin_lock(&vm->lock); + + if (!is_valid_eptp(desc->shadow_eptp)) + goto out; + + pkvm_pgtable_unmap(sept, 0, size); + + flush_ept(desc->shadow_eptp); +out: + pkvm_spin_unlock(&vm->lock); +} + void pkvm_shadow_ept_deinit(struct shadow_ept_desc *desc) { struct pkvm_pgtable *sept = &desc->sept; diff --git a/arch/x86/kvm/vmx/pkvm/hyp/ept.h b/arch/x86/kvm/vmx/pkvm/hyp/ept.h index 92a4f18535ea..f63538368746 100644 --- a/arch/x86/kvm/vmx/pkvm/hyp/ept.h +++ b/arch/x86/kvm/vmx/pkvm/hyp/ept.h @@ -32,6 +32,7 @@ void pkvm_guest_ept_init(struct shadow_vcpu_state *shadow_vcpu, u64 guest_eptp); void pkvm_guest_ept_deinit(struct shadow_vcpu_state *shadow_vcpu); enum sept_handle_ret pkvm_handle_shadow_ept_violation(struct shadow_vcpu_state *shadow_vcpu, u64 l2_gpa, u64 exit_quali); +void pkvm_invalidate_shadow_ept(struct shadow_ept_desc *desc); static inline bool is_valid_eptp(u64 eptp) { diff --git a/arch/x86/kvm/vmx/pkvm/hyp/nested.c b/arch/x86/kvm/vmx/pkvm/hyp/nested.c index 22c161100145..8b9202ecafff 100644 --- a/arch/x86/kvm/vmx/pkvm/hyp/nested.c +++ b/arch/x86/kvm/vmx/pkvm/hyp/nested.c @@ -708,6 +708,8 @@ static void nested_vmx_run(struct kvm_vcpu *vcpu, bool launch) static void setup_guest_ept(struct shadow_vcpu_state *shadow_vcpu, u64 guest_eptp) { struct vmcs12 *vmcs12 = (struct vmcs12 *)shadow_vcpu->cached_vmcs12; + struct pkvm_shadow_vm *vm = shadow_vcpu->vm; + bool invalidate = false; if (!is_valid_eptp(guest_eptp)) pkvm_guest_ept_deinit(shadow_vcpu); @@ -715,6 +717,16 @@ static void setup_guest_ept(struct shadow_vcpu_state *shadow_vcpu, u64 guest_ept pkvm_guest_ept_deinit(shadow_vcpu); pkvm_guest_ept_init(shadow_vcpu, guest_eptp); } + + pkvm_spin_lock(&vm->lock); + if (vm->sept_desc.last_guest_eptp != guest_eptp) { + vm->sept_desc.last_guest_eptp = guest_eptp; + invalidate = true; + } + pkvm_spin_unlock(&vm->lock); + + if (invalidate) + pkvm_invalidate_shadow_ept(&vm->sept_desc); } int handle_vmxon(struct kvm_vcpu *vcpu) diff --git a/arch/x86/kvm/vmx/pkvm/hyp/pkvm_hyp.h b/arch/x86/kvm/vmx/pkvm/hyp/pkvm_hyp.h index f891660d9085..cc7ec8505a98 100644 --- a/arch/x86/kvm/vmx/pkvm/hyp/pkvm_hyp.h +++ b/arch/x86/kvm/vmx/pkvm/hyp/pkvm_hyp.h @@ -15,6 +15,9 @@ struct shadow_ept_desc { /* shadow EPTP value configured by pkvm */ u64 shadow_eptp; + /* Save the last guest EPTP value configured by kvm high */ + u64 last_guest_eptp; + struct pkvm_pgtable sept; }; -- 2.25.1