From: Cathy Avery <cavery@xxxxxxxxxx> This patch moves the asid_generation from the vcpu to the vmcb in order to track the ASID generation that was active the last time the vmcb was run. If sd->asid_generation changes between two runs, the old ASID is invalid and must be changed. Suggested-by: Paolo Bonzini <pbonzini@xxxxxxxxxx> Signed-off-by: Cathy Avery <cavery@xxxxxxxxxx> Message-Id: <20210112164313.4204-3-cavery@xxxxxxxxxx> Signed-off-by: Paolo Bonzini <pbonzini@xxxxxxxxxx> --- arch/x86/kvm/svm/svm.c | 21 +++++++-------------- arch/x86/kvm/svm/svm.h | 2 +- 2 files changed, 8 insertions(+), 15 deletions(-) diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index c35285c926e0..aa1baf646ff0 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -1227,7 +1227,7 @@ static void init_vmcb(struct vcpu_svm *svm) save->cr3 = 0; save->cr4 = 0; } - svm->asid_generation = 0; + svm->current_vmcb->asid_generation = 0; svm->asid = 0; svm->nested.vmcb12_gpa = 0; @@ -1309,13 +1309,6 @@ void svm_switch_vmcb(struct vcpu_svm *svm, struct kvm_vmcb_info *target_vmcb) svm->vmcb = target_vmcb->ptr; svm->vmcb_pa = target_vmcb->pa; - /* - * Workaround: we don't yet track the ASID generation - * that was active the last time target_vmcb was run. - */ - - svm->asid_generation = 0; - /* * Track the physical CPU the target_vmcb is running on * in order to mark the VMCB dirty if the cpu changes at @@ -1382,7 +1375,6 @@ static int svm_create_vcpu(struct kvm_vcpu *vcpu) if (vmsa_page) svm->vmsa = page_address(vmsa_page); - svm->asid_generation = 0; svm->guest_state_loaded = false; svm_switch_vmcb(svm, &svm->vmcb01); @@ -1864,7 +1856,7 @@ static void new_asid(struct vcpu_svm *svm, struct svm_cpu_data *sd) vmcb_mark_dirty(svm->vmcb, VMCB_ASID); } - svm->asid_generation = sd->asid_generation; + svm->current_vmcb->asid_generation = sd->asid_generation; svm->asid = sd->next_asid++; } @@ -3432,10 +3424,11 @@ static void pre_svm_run(struct vcpu_svm *svm) /* * If the previous vmrun of the vmcb occurred on * a different physical cpu then we must mark the vmcb dirty. - */ + * and assign a new asid. + */ if (unlikely(svm->current_vmcb->cpu != svm->vcpu.cpu)) { - svm->asid_generation = 0; + svm->current_vmcb->asid_generation = 0; vmcb_mark_all_dirty(svm->vmcb); svm->current_vmcb->cpu = svm->vcpu.cpu; } @@ -3444,7 +3437,7 @@ static void pre_svm_run(struct vcpu_svm *svm) return pre_sev_run(svm, svm->vcpu.cpu); /* FIXME: handle wraparound of asid_generation */ - if (svm->asid_generation != sd->asid_generation) + if (svm->current_vmcb->asid_generation != sd->asid_generation) new_asid(svm, sd); } @@ -3668,7 +3661,7 @@ void svm_flush_tlb(struct kvm_vcpu *vcpu) if (static_cpu_has(X86_FEATURE_FLUSHBYASID)) svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ASID; else - svm->asid_generation--; + svm->current_vmcb->asid_generation--; } static void svm_flush_tlb_gva(struct kvm_vcpu *vcpu, gva_t gva) diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h index a37281097751..993155195212 100644 --- a/arch/x86/kvm/svm/svm.h +++ b/arch/x86/kvm/svm/svm.h @@ -85,6 +85,7 @@ struct kvm_vmcb_info { struct vmcb *ptr; unsigned long pa; int cpu; + uint64_t asid_generation; }; struct svm_nested_state { @@ -114,7 +115,6 @@ struct vcpu_svm { struct kvm_vmcb_info *current_vmcb; struct svm_cpu_data *svm_data; u32 asid; - uint64_t asid_generation; uint64_t sysenter_esp; uint64_t sysenter_eip; uint64_t tsc_aux; -- 2.26.2