Modify intercept_exceptions to generic intercepts in vmcb_control_area. Use the generic __set_intercept, __clr_intercept and __is_intercept to set the intercept_exceptions bits. Signed-off-by: Babu Moger <babu.moger@xxxxxxx> --- arch/x86/include/asm/svm.h | 22 +++++++++++++++++++++- arch/x86/kvm/svm/nested.c | 12 +++++------- arch/x86/kvm/svm/svm.c | 22 +++++++++++----------- arch/x86/kvm/svm/svm.h | 4 ++-- 4 files changed, 39 insertions(+), 21 deletions(-) diff --git a/arch/x86/include/asm/svm.h b/arch/x86/include/asm/svm.h index ffc89d8e4fcb..751a6deb64ef 100644 --- a/arch/x86/include/asm/svm.h +++ b/arch/x86/include/asm/svm.h @@ -3,6 +3,7 @@ #define __SVM_H #include <uapi/asm/svm.h> +#include <uapi/asm/kvm.h> /* * VMCB Control Area intercept bits starting @@ -12,6 +13,7 @@ enum vector_offset { CR_VECTOR = 0, DR_VECTOR, + EXCEPTION_VECTOR, MAX_VECTORS, }; @@ -52,6 +54,25 @@ enum { INTERCEPT_DR5_WRITE, INTERCEPT_DR6_WRITE, INTERCEPT_DR7_WRITE, + /* Byte offset 008h (Vector 2) */ + INTERCEPT_DE_VECTOR = 64 + DE_VECTOR, + INTERCEPT_DB_VECTOR, + INTERCEPT_BP_VECTOR = 64 + BP_VECTOR, + INTERCEPT_OF_VECTOR, + INTERCEPT_BR_VECTOR, + INTERCEPT_UD_VECTOR, + INTERCEPT_NM_VECTOR, + INTERCEPT_DF_VECTOR, + INTERCEPT_TS_VECTOR = 64 + TS_VECTOR, + INTERCEPT_NP_VECTOR, + INTERCEPT_SS_VECTOR, + INTERCEPT_GP_VECTOR, + INTERCEPT_PF_VECTOR, + INTERCEPT_MF_VECTOR = 64 + MF_VECTOR, + INTERCEPT_AC_VECTOR, + INTERCEPT_MC_VECTOR, + INTERCEPT_XM_VECTOR, + INTERCEPT_VE_VECTOR, }; enum { @@ -107,7 +128,6 @@ enum { struct __attribute__ ((__packed__)) vmcb_control_area { u32 intercepts[MAX_VECTORS]; - u32 intercept_exceptions; u64 intercept; u8 reserved_1[40]; u16 pause_filter_thresh; diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c index 71ca89afb2a3..ee126d5d3348 100644 --- a/arch/x86/kvm/svm/nested.c +++ b/arch/x86/kvm/svm/nested.c @@ -116,12 +116,11 @@ void recalc_intercepts(struct vcpu_svm *svm) h = &svm->nested.hsave->control; g = &svm->nested.ctl; - svm->nested.host_intercept_exceptions = h->intercept_exceptions; + svm->nested.host_intercept_exceptions = h->intercepts[EXCEPTION_VECTOR]; for (i = 0; i < MAX_VECTORS; i++) c->intercepts[i] = h->intercepts[i]; - c->intercept_exceptions = h->intercept_exceptions; c->intercept = h->intercept; if (g->int_ctl & V_INTR_MASKING_MASK) { @@ -143,7 +142,6 @@ void recalc_intercepts(struct vcpu_svm *svm) for (i = 0; i < MAX_VECTORS; i++) c->intercepts[i] |= g->intercepts[i]; - c->intercept_exceptions |= g->intercept_exceptions; c->intercept |= g->intercept; } @@ -155,7 +153,6 @@ static void copy_vmcb_control_area(struct vmcb_control_area *dst, for (i = 0; i < MAX_VECTORS; i++) dst->intercepts[i] = from->intercepts[i]; - dst->intercept_exceptions = from->intercept_exceptions; dst->intercept = from->intercept; dst->iopm_base_pa = from->iopm_base_pa; dst->msrpm_base_pa = from->msrpm_base_pa; @@ -438,7 +435,7 @@ int nested_svm_vmrun(struct vcpu_svm *svm) trace_kvm_nested_intercepts(nested_vmcb->control.intercepts[CR_VECTOR] & 0xffff, nested_vmcb->control.intercepts[CR_VECTOR] >> 16, - nested_vmcb->control.intercept_exceptions, + nested_vmcb->control.intercepts[EXCEPTION_VECTOR], nested_vmcb->control.intercept); /* Clear internal status */ @@ -773,7 +770,7 @@ static bool nested_exit_on_exception(struct vcpu_svm *svm) { unsigned int nr = svm->vcpu.arch.exception.nr; - return (svm->nested.ctl.intercept_exceptions & (1 << nr)); + return (svm->nested.ctl.intercepts[EXCEPTION_VECTOR] & (1 << nr)); } static void nested_svm_inject_exception_vmexit(struct vcpu_svm *svm) @@ -922,7 +919,8 @@ int nested_svm_exit_special(struct vcpu_svm *svm) case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 0x1f: { u32 excp_bits = 1 << (exit_code - SVM_EXIT_EXCP_BASE); - if (get_host_vmcb(svm)->control.intercept_exceptions & excp_bits) + if (get_host_vmcb(svm)->control.intercepts[EXCEPTION_VECTOR] & + excp_bits) return NESTED_EXIT_HOST; else if (exit_code == SVM_EXIT_EXCP_BASE + PF_VECTOR && svm->vcpu.arch.apf.host_apf_flags) diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index 6d95025938d8..d4ac2c5bb365 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -988,11 +988,11 @@ static void init_vmcb(struct vcpu_svm *svm) set_dr_intercepts(svm); - set_exception_intercept(svm, PF_VECTOR); - set_exception_intercept(svm, UD_VECTOR); - set_exception_intercept(svm, MC_VECTOR); - set_exception_intercept(svm, AC_VECTOR); - set_exception_intercept(svm, DB_VECTOR); + set_exception_intercept(svm, INTERCEPT_PF_VECTOR); + set_exception_intercept(svm, INTERCEPT_UD_VECTOR); + set_exception_intercept(svm, INTERCEPT_MC_VECTOR); + set_exception_intercept(svm, INTERCEPT_AC_VECTOR); + set_exception_intercept(svm, INTERCEPT_DB_VECTOR); /* * Guest access to VMware backdoor ports could legitimately * trigger #GP because of TSS I/O permission bitmap. @@ -1000,7 +1000,7 @@ static void init_vmcb(struct vcpu_svm *svm) * as VMware does. */ if (enable_vmware_backdoor) - set_exception_intercept(svm, GP_VECTOR); + set_exception_intercept(svm, INTERCEPT_GP_VECTOR); set_intercept(svm, INTERCEPT_INTR); set_intercept(svm, INTERCEPT_NMI); @@ -1078,7 +1078,7 @@ static void init_vmcb(struct vcpu_svm *svm) /* Setup VMCB for Nested Paging */ control->nested_ctl |= SVM_NESTED_CTL_NP_ENABLE; clr_intercept(svm, INTERCEPT_INVLPG); - clr_exception_intercept(svm, PF_VECTOR); + clr_exception_intercept(svm, INTERCEPT_PF_VECTOR); clr_cr_intercept(svm, INTERCEPT_CR3_READ); clr_cr_intercept(svm, INTERCEPT_CR3_WRITE); save->g_pat = svm->vcpu.arch.pat; @@ -1120,7 +1120,7 @@ static void init_vmcb(struct vcpu_svm *svm) if (sev_guest(svm->vcpu.kvm)) { svm->vmcb->control.nested_ctl |= SVM_NESTED_CTL_SEV_ENABLE; - clr_exception_intercept(svm, UD_VECTOR); + clr_exception_intercept(svm, INTERCEPT_UD_VECTOR); } mark_all_dirty(svm->vmcb); @@ -1631,11 +1631,11 @@ static void update_bp_intercept(struct kvm_vcpu *vcpu) { struct vcpu_svm *svm = to_svm(vcpu); - clr_exception_intercept(svm, BP_VECTOR); + clr_exception_intercept(svm, INTERCEPT_BP_VECTOR); if (vcpu->guest_debug & KVM_GUESTDBG_ENABLE) { if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP) - set_exception_intercept(svm, BP_VECTOR); + set_exception_intercept(svm, INTERCEPT_BP_VECTOR); } else vcpu->guest_debug = 0; } @@ -2801,7 +2801,7 @@ static void dump_vmcb(struct kvm_vcpu *vcpu) pr_err("%-20s%04x\n", "cr_write:", control->intercepts[CR_VECTOR] >> 16); pr_err("%-20s%04x\n", "dr_read:", control->intercepts[DR_VECTOR] & 0xffff); pr_err("%-20s%04x\n", "dr_write:", control->intercepts[DR_VECTOR] >> 16); - pr_err("%-20s%08x\n", "exceptions:", control->intercept_exceptions); + pr_err("%-20s%08x\n", "exceptions:", control->intercepts[EXCEPTION_VECTOR]); pr_err("%-20s%016llx\n", "intercepts:", control->intercept); pr_err("%-20s%d\n", "pause filter count:", control->pause_filter_count); pr_err("%-20s%d\n", "pause filter threshold:", diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h index f33a50f92b92..9c798781172d 100644 --- a/arch/x86/kvm/svm/svm.h +++ b/arch/x86/kvm/svm/svm.h @@ -294,7 +294,7 @@ static inline void set_exception_intercept(struct vcpu_svm *svm, int bit) { struct vmcb *vmcb = get_host_vmcb(svm); - vmcb->control.intercept_exceptions |= (1U << bit); + __set_intercept(&vmcb->control.intercepts, bit); recalc_intercepts(svm); } @@ -303,7 +303,7 @@ static inline void clr_exception_intercept(struct vcpu_svm *svm, int bit) { struct vmcb *vmcb = get_host_vmcb(svm); - vmcb->control.intercept_exceptions &= ~(1U << bit); + __clr_intercept(&vmcb->control.intercepts, bit); recalc_intercepts(svm); }