On 4/26/22 18:39, Maxim Levitsky wrote:
[ 1355.807187] kvm_vcpu_map+0x159/0x190 [kvm]
[ 1355.807628] nested_svm_vmexit+0x4c/0x7f0 [kvm_amd]
[ 1355.808036] ? kvm_vcpu_block+0x54/0xa0 [kvm]
[ 1355.808450] svm_check_nested_events+0x97/0x390 [kvm_amd]
[ 1355.808920] kvm_check_nested_events+0x1c/0x40 [kvm]
When called from kvm_vcpu_halt, it is not even necessary to do the
vmexit immediately. kvm_arch_vcpu_runnable should do the right thing
anyway (e.g. kvm_arch_interrupt_allowed checks is_guest_mode for both
VMX and SVM).
The only case that is missing is MTF; it needs to be added to
hv_timer_pending, like this
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 4ff36610af6a..e2e4f60159e9 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -1504,7 +1504,7 @@ struct kvm_x86_ops {
struct kvm_x86_nested_ops {
void (*leave_nested)(struct kvm_vcpu *vcpu);
int (*check_events)(struct kvm_vcpu *vcpu);
- bool (*hv_timer_pending)(struct kvm_vcpu *vcpu);
+ bool (*has_events)(struct kvm_vcpu *vcpu);
void (*triple_fault)(struct kvm_vcpu *vcpu);
int (*get_state)(struct kvm_vcpu *vcpu,
struct kvm_nested_state __user *user_kvm_nested_state,
diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
index 856c87563883..2744b905865c 100644
--- a/arch/x86/kvm/vmx/nested.c
+++ b/arch/x86/kvm/vmx/nested.c
@@ -3857,6 +3857,10 @@ static bool nested_vmx_preemption_timer_pending(struct kvm_vcpu *vcpu)
to_vmx(vcpu)->nested.preemption_timer_expired;
}
+static bool nested_vmx_has_events(struct kvm_vcpu *vcpu)
+{
+ return nested_vmx_preemption_timer_pending(vcpu) || vmx->nested.mtf_pending;
+
static int vmx_check_nested_events(struct kvm_vcpu *vcpu)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
@@ -6809,7 +6813,7 @@ __init int nested_vmx_hardware_setup(int (*exit_handlers[])(struct kvm_vcpu *))
struct kvm_x86_nested_ops vmx_nested_ops = {
.leave_nested = vmx_leave_nested,
.check_events = vmx_check_nested_events,
- .hv_timer_pending = nested_vmx_preemption_timer_pending,
+ .has_events = nested_vmx_has_events,
.triple_fault = nested_vmx_triple_fault,
.get_state = vmx_get_nested_state,
.set_state = vmx_set_nested_state,
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 7f21d9fe816f..231c55c4b33d 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -9471,8 +9471,8 @@ static int inject_pending_event(struct kvm_vcpu *vcpu, bool *req_immediate_exit)
}
if (is_guest_mode(vcpu) &&
- kvm_x86_ops.nested_ops->hv_timer_pending &&
- kvm_x86_ops.nested_ops->hv_timer_pending(vcpu))
+ kvm_x86_ops.nested_ops->has_events &&
+ kvm_x86_ops.nested_ops->has_events(vcpu))
*req_immediate_exit = true;
WARN_ON(vcpu->arch.exception.pending);
@@ -12185,8 +12185,8 @@ static inline bool kvm_vcpu_has_events(struct kvm_vcpu *vcpu)
return true;
if (is_guest_mode(vcpu) &&
- kvm_x86_ops.nested_ops->hv_timer_pending &&
- kvm_x86_ops.nested_ops->hv_timer_pending(vcpu))
+ kvm_x86_ops.nested_ops->has_events &&
+ kvm_x86_ops.nested_ops->has_events(vcpu))
return true;
return false;
[ 1355.809396] kvm_arch_vcpu_runnable+0x4e/0x190 [kvm]
[ 1355.809892] kvm_vcpu_check_block+0x4f/0x100 [kvm]
[ 1355.810349] ? kvm_vcpu_check_block+0x5/0x100 [kvm]
[ 1355.810806] ? kvm_vcpu_block+0x54/0xa0 [kvm]
[ 1355.811259] kvm_vcpu_block+0x6b/0xa0 [kvm]
[ 1355.811666] kvm_vcpu_halt+0x3f/0x490 [kvm]
[ 1355.812049] kvm_arch_vcpu_ioctl_run+0xb0b/0x1d00 [kvm]
[ 1355.812539] ? rcu_read_lock_sched_held+0x16/0x80
[ 1355.813013] ? lock_release+0x1c4/0x270
[ 1355.813365] ? __wake_up_common+0x8d/0x180
[ 1355.813743] ? _raw_spin_unlock_irq+0x28/0x40