Now that the int_ctl field is stored in svm->nested.int_ctl, we can use it instead of vcpu->arch.hflags. Signed-off-by: Paolo Bonzini <pbonzini@xxxxxxxxxx> --- arch/x86/include/asm/kvm_host.h | 1 - arch/x86/kvm/svm/nested.c | 12 ++++-------- arch/x86/kvm/svm/svm.c | 2 +- arch/x86/kvm/svm/svm.h | 4 +++- 4 files changed, 8 insertions(+), 11 deletions(-) diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index fd78bd44b2d6..6c8417d01bf9 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -1594,7 +1594,6 @@ enum { #define HF_GIF_MASK (1 << 0) #define HF_HIF_MASK (1 << 1) -#define HF_VINTR_MASK (1 << 2) #define HF_NMI_MASK (1 << 3) #define HF_IRET_MASK (1 << 4) #define HF_GUEST_MASK (1 << 5) /* VCPU is in guest-mode */ diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c index 54be341322d8..e3338aa8b0a3 100644 --- a/arch/x86/kvm/svm/nested.c +++ b/arch/x86/kvm/svm/nested.c @@ -116,13 +116,13 @@ void recalc_intercepts(struct vcpu_svm *svm) c->intercept_exceptions = h->intercept_exceptions; c->intercept = h->intercept; - if (svm->vcpu.arch.hflags & HF_VINTR_MASK) { + if (svm->nested.int_ctl & V_INTR_MASKING_MASK) { /* We only want the cr8 intercept bits of L1 */ c->intercept_cr &= ~(1U << INTERCEPT_CR8_READ); c->intercept_cr &= ~(1U << INTERCEPT_CR8_WRITE); /* - * Once running L2 with HF_VINTR_MASK, EFLAGS.IF does not + * Once running L2 with V_INTR_MASKING set, EFLAGS.IF does not * affect any interrupt we may want to inject; therefore, * interrupt window vmexits are irrelevant to L0. */ @@ -297,10 +297,6 @@ static void nested_prepare_vmcb_control(struct vcpu_svm *svm) kvm_mmu_reset_context(&svm->vcpu); svm_flush_tlb(&svm->vcpu); - if (svm->nested.int_ctl & V_INTR_MASKING_MASK) - svm->vcpu.arch.hflags |= HF_VINTR_MASK; - else - svm->vcpu.arch.hflags &= ~HF_VINTR_MASK; svm->vmcb->control.tsc_offset = svm->vcpu.arch.tsc_offset; @@ -553,8 +549,8 @@ int nested_svm_vmexit(struct vcpu_svm *svm) nested_vmcb->control.pause_filter_thresh = svm->vmcb->control.pause_filter_thresh; - /* We always set V_INTR_MASKING and remember the old value in hflags */ - if (!(svm->vcpu.arch.hflags & HF_VINTR_MASK)) + /* We always set V_INTR_MASKING and remember the old value in svm->nested */ + if (!(svm->nested.int_ctl & V_INTR_MASKING_MASK)) nested_vmcb->control.int_ctl &= ~V_INTR_MASKING_MASK; /* Restore the original control entries */ diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index 2b63d15328ba..95d16aa76ebb 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -3096,7 +3096,7 @@ bool svm_interrupt_blocked(struct kvm_vcpu *vcpu) if (is_guest_mode(vcpu)) { /* As long as interrupts are being delivered... */ - if ((svm->vcpu.arch.hflags & HF_VINTR_MASK) + if ((svm->nested.int_ctl & V_INTR_MASKING_MASK) ? !(svm->vcpu.arch.hflags & HF_HIF_MASK) : !(kvm_get_rflags(vcpu) & X86_EFLAGS_IF)) return true; diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h index 5cabed9c733a..39706aa845f2 100644 --- a/arch/x86/kvm/svm/svm.h +++ b/arch/x86/kvm/svm/svm.h @@ -388,7 +388,9 @@ bool svm_interrupt_blocked(struct kvm_vcpu *vcpu); static inline bool svm_nested_virtualize_tpr(struct kvm_vcpu *vcpu) { - return is_guest_mode(vcpu) && (vcpu->arch.hflags & HF_VINTR_MASK); + struct vcpu_svm *svm = to_svm(vcpu); + + return is_guest_mode(vcpu) && (svm->nested.int_ctl & V_INTR_MASKING_MASK); } static inline bool nested_exit_on_smi(struct vcpu_svm *svm) -- 2.18.2