Add VMX/SVM specific interrupt injection info to vm entry tracepoint. Also add a flag showing that immediate vm exit is set to happen after the entry. Signed-off-by: Maxim Levitsky <mlevitsk@xxxxxxxxxx> --- arch/x86/include/asm/kvm-x86-ops.h | 1 + arch/x86/include/asm/kvm_host.h | 5 ++++- arch/x86/kvm/svm/svm.c | 17 +++++++++++++++++ arch/x86/kvm/trace.h | 17 ++++++++++++++--- arch/x86/kvm/vmx/main.c | 1 + arch/x86/kvm/vmx/vmx.c | 11 +++++++++++ arch/x86/kvm/vmx/x86_ops.h | 4 ++++ 7 files changed, 52 insertions(+), 4 deletions(-) diff --git a/arch/x86/include/asm/kvm-x86-ops.h b/arch/x86/include/asm/kvm-x86-ops.h index 03b7e13f15bbd..af5c4d55d47bc 100644 --- a/arch/x86/include/asm/kvm-x86-ops.h +++ b/arch/x86/include/asm/kvm-x86-ops.h @@ -99,6 +99,7 @@ KVM_X86_OP(get_l2_tsc_multiplier) KVM_X86_OP(write_tsc_offset) KVM_X86_OP(write_tsc_multiplier) KVM_X86_OP(get_exit_info) +KVM_X86_OP(get_entry_info) KVM_X86_OP(check_intercept) KVM_X86_OP(handle_exit_irqoff) KVM_X86_OP_OPTIONAL(update_cpu_dirty_logging) diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 52443ccda320f..8118f75a8a35d 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -1756,13 +1756,16 @@ struct kvm_x86_ops { void (*write_tsc_multiplier)(struct kvm_vcpu *vcpu); /* - * Retrieve somewhat arbitrary exit information. Intended to + * Retrieve somewhat arbitrary exit/entry information. Intended to * be used only from within tracepoints or error paths. */ void (*get_exit_info)(struct kvm_vcpu *vcpu, u32 *reason, u64 *info1, u64 *info2, u32 *exit_int_info, u32 *exit_int_info_err_code); + void (*get_entry_info)(struct kvm_vcpu *vcpu, + u32 *inj_info, u32 *inj_info_error_code); + int (*check_intercept)(struct kvm_vcpu *vcpu, struct x86_instruction_info *info, enum x86_intercept_stage stage, diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index 9a0506ef87dfb..485f3c2826312 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -3525,6 +3525,22 @@ static void svm_get_exit_info(struct kvm_vcpu *vcpu, u32 *reason, *error_code = 0; } +static void svm_get_entry_info(struct kvm_vcpu *vcpu, + u32 *inj_info, + u32 *inj_info_error_code) +{ + struct vmcb_control_area *control = &to_svm(vcpu)->vmcb->control; + + *inj_info = control->event_inj; + + if ((*inj_info & SVM_EXITINTINFO_VALID) && + (*inj_info & SVM_EXITINTINFO_VALID_ERR)) + *inj_info_error_code = control->event_inj_err; + else + *inj_info_error_code = 0; + +} + static int svm_handle_exit(struct kvm_vcpu *vcpu, fastpath_t exit_fastpath) { struct vcpu_svm *svm = to_svm(vcpu); @@ -5057,6 +5073,7 @@ static struct kvm_x86_ops svm_x86_ops __initdata = { .required_apicv_inhibits = AVIC_REQUIRED_APICV_INHIBITS, .get_exit_info = svm_get_exit_info, + .get_entry_info = svm_get_entry_info, .vcpu_after_set_cpuid = svm_vcpu_after_set_cpuid, diff --git a/arch/x86/kvm/trace.h b/arch/x86/kvm/trace.h index d3aeffd6ae753..b4c014b1aa9a1 100644 --- a/arch/x86/kvm/trace.h +++ b/arch/x86/kvm/trace.h @@ -22,16 +22,27 @@ TRACE_EVENT(kvm_entry, __field( unsigned int, vcpu_id ) __field( unsigned long, rip ) __field( bool, immediate_exit ) + __field( u32, inj_info ) + __field( u32, inj_info_err ) + __field( bool, guest_mode ) ), TP_fast_assign( __entry->vcpu_id = vcpu->vcpu_id; __entry->rip = kvm_rip_read(vcpu); - __entry->immediate_exit = force_immediate_exit; + __entry->immediate_exit = force_immediate_exit; + __entry->guest_mode = is_guest_mode(vcpu); + + static_call(kvm_x86_get_entry_info)(vcpu, + &__entry->inj_info, + &__entry->inj_info_err); ), - TP_printk("vcpu %u, rip 0x%lx%s", __entry->vcpu_id, __entry->rip, - __entry->immediate_exit ? "[immediate exit]" : "") + TP_printk("vcpu %u, rip 0x%lx inj 0x%08x inj_error_code 0x%08x%s%s", + __entry->vcpu_id, __entry->rip, + __entry->inj_info, __entry->inj_info_err, + __entry->immediate_exit ? "[immediate exit]" : "", + __entry->guest_mode ? "[guest]" : "") ); /* diff --git a/arch/x86/kvm/vmx/main.c b/arch/x86/kvm/vmx/main.c index 7e2e78a142574..769a7ca06f59b 100644 --- a/arch/x86/kvm/vmx/main.c +++ b/arch/x86/kvm/vmx/main.c @@ -108,6 +108,7 @@ struct kvm_x86_ops vt_x86_ops __initdata = { .get_mt_mask = vmx_get_mt_mask, .get_exit_info = vmx_get_exit_info, + .get_entry_info = vmx_get_entry_info, .vcpu_after_set_cpuid = vmx_vcpu_after_set_cpuid, diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index 89682832dded7..af18174cc4a20 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -6180,6 +6180,17 @@ void vmx_get_exit_info(struct kvm_vcpu *vcpu, u32 *reason, } } +void vmx_get_entry_info(struct kvm_vcpu *vcpu, + u32 *inj_info, + u32 *inj_info_error_code) +{ + *inj_info = vmcs_read32(VM_ENTRY_INTR_INFO_FIELD); + if (is_exception_with_error_code(*inj_info)) + *inj_info_error_code = vmcs_read32(VM_ENTRY_EXCEPTION_ERROR_CODE); + else + *inj_info_error_code = 0; +} + static void vmx_destroy_pml_buffer(struct vcpu_vmx *vmx) { if (vmx->pml_pg) { diff --git a/arch/x86/kvm/vmx/x86_ops.h b/arch/x86/kvm/vmx/x86_ops.h index b6a7cfc6ae317..124425997ec15 100644 --- a/arch/x86/kvm/vmx/x86_ops.h +++ b/arch/x86/kvm/vmx/x86_ops.h @@ -104,8 +104,12 @@ void vmx_load_eoi_exitmap(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap); int vmx_set_tss_addr(struct kvm *kvm, unsigned int addr); int vmx_set_identity_map_addr(struct kvm *kvm, u64 ident_addr); u8 vmx_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio); + void vmx_get_exit_info(struct kvm_vcpu *vcpu, u32 *reason, u64 *info1, u64 *info2, u32 *intr_info, u32 *error_code); +void vmx_get_entry_info(struct kvm_vcpu *vcpu, u32 *inj_info, + u32 *inj_info_error_code); + u64 vmx_get_l2_tsc_offset(struct kvm_vcpu *vcpu); u64 vmx_get_l2_tsc_multiplier(struct kvm_vcpu *vcpu); void vmx_write_tsc_offset(struct kvm_vcpu *vcpu); -- 2.26.3