Add Hyper-V specific fields in VMCB to support SVM enlightenments. Also a small refactoring of VMCB clean bits handling. Signed-off-by: Vineeth Pillai <viremana@xxxxxxxxxxxxxxxxxxx> --- arch/x86/include/asm/svm.h | 24 +++++++++++++++++++++++- arch/x86/kvm/svm/svm.h | 30 ++++++++++++++++++++++++++++-- 2 files changed, 51 insertions(+), 3 deletions(-) diff --git a/arch/x86/include/asm/svm.h b/arch/x86/include/asm/svm.h index 1c561945b426..3586d7523ce8 100644 --- a/arch/x86/include/asm/svm.h +++ b/arch/x86/include/asm/svm.h @@ -322,9 +322,31 @@ static inline void __unused_size_checks(void) BUILD_BUG_ON(sizeof(struct ghcb) != EXPECTED_GHCB_SIZE); } + +#if IS_ENABLED(CONFIG_HYPERV) +struct __packed hv_enlightenments { + struct __packed hv_enlightenments_control { + u32 nested_flush_hypercall:1; + u32 msr_bitmap:1; + u32 enlightened_npt_tlb: 1; + u32 reserved:29; + } hv_enlightenments_control; + u32 hv_vp_id; + u64 hv_vm_id; + u64 partition_assist_page; + u64 reserved; +}; +#define VMCB_CONTROL_END 992 // 32 bytes for Hyper-V +#else +#define VMCB_CONTROL_END 1024 +#endif + struct vmcb { struct vmcb_control_area control; - u8 reserved_control[1024 - sizeof(struct vmcb_control_area)]; + u8 reserved_control[VMCB_CONTROL_END - sizeof(struct vmcb_control_area)]; +#if IS_ENABLED(CONFIG_HYPERV) + struct hv_enlightenments hv_enlightenments; +#endif struct vmcb_save_area save; } __packed; diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h index 39e071fdab0c..842c8764a68c 100644 --- a/arch/x86/kvm/svm/svm.h +++ b/arch/x86/kvm/svm/svm.h @@ -33,6 +33,11 @@ static const u32 host_save_user_msrs[] = { extern u32 msrpm_offsets[MSRPM_OFFSETS] __read_mostly; extern bool npt_enabled; +/* + * Clean bits in VMCB. + * Should update VMCB_ALL_CLEAN_MASK also + * if this enum is modified. + */ enum { VMCB_INTERCEPTS, /* Intercept vectors, TSC offset, pause filter count */ @@ -50,9 +55,25 @@ enum { * AVIC PHYSICAL_TABLE pointer, * AVIC LOGICAL_TABLE pointer */ - VMCB_DIRTY_MAX, +#if IS_ENABLED(CONFIG_HYPERV) + VMCB_HV_NESTED_ENLIGHTENMENTS = 31, +#endif }; +#define __CLEAN_MASK ( \ + (1U << VMCB_INTERCEPTS) | (1U << VMCB_PERM_MAP) | \ + (1U << VMCB_ASID) | (1U << VMCB_INTR) | \ + (1U << VMCB_NPT) | (1U << VMCB_CR) | (1U << VMCB_DR) | \ + (1U << VMCB_DT) | (1U << VMCB_SEG) | (1U << VMCB_CR2) | \ + (1U << VMCB_LBR) | (1U << VMCB_AVIC) \ + ) + +#if IS_ENABLED(CONFIG_HYPERV) +#define VMCB_ALL_CLEAN_MASK (__CLEAN_MASK | (1U << VMCB_HV_NESTED_ENLIGHTENMENTS)) +#else +#define VMCB_ALL_CLEAN_MASK __CLEAN_MASK +#endif + /* TPR and CR2 are always written before VMRUN */ #define VMCB_ALWAYS_DIRTY_MASK ((1U << VMCB_INTR) | (1U << VMCB_CR2)) @@ -230,7 +251,7 @@ static inline void vmcb_mark_all_dirty(struct vmcb *vmcb) static inline void vmcb_mark_all_clean(struct vmcb *vmcb) { - vmcb->control.clean = ((1 << VMCB_DIRTY_MAX) - 1) + vmcb->control.clean = VMCB_ALL_CLEAN_MASK & ~VMCB_ALWAYS_DIRTY_MASK; } @@ -239,6 +260,11 @@ static inline void vmcb_mark_dirty(struct vmcb *vmcb, int bit) vmcb->control.clean &= ~(1 << bit); } +static inline bool vmcb_is_clean(struct vmcb *vmcb, int bit) +{ + return (vmcb->control.clean & (1 << bit)); +} + static inline struct vcpu_svm *to_svm(struct kvm_vcpu *vcpu) { return container_of(vcpu, struct vcpu_svm, vcpu); -- 2.25.1