Re: [PATCH 2/2] KVM: x86: Fix INIT signal handling in various CPU states

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 




> On 11 Sep 2019, at 19:23, Paolo Bonzini <pbonzini@xxxxxxxxxx> wrote:
> 
> On 26/08/19 12:24, Liran Alon wrote:
>> 	/*
>> -	 * INITs are latched while in SMM.  Because an SMM CPU cannot
>> -	 * be in KVM_MP_STATE_INIT_RECEIVED state, just eat SIPIs
>> -	 * and delay processing of INIT until the next RSM.
>> +	 * INITs are latched while CPU is in specific states.
>> +	 * Because a CPU cannot be in these states immediately
>> +	 * after it have processed an INIT signal (and thus in
>> +	 * KVM_MP_STATE_INIT_RECEIVED state), just eat SIPIs
>> +	 * and delay processing of INIT until CPU leaves
>> +	 * the state which latch INIT signal.
>> 	 */
>> -	if (is_smm(vcpu)) {
>> +	if (kvm_x86_ops->apic_init_signal_blocked(vcpu)) {
> 
> I'd prefer keeping is_smm(vcpu) here, since that is not vendor-specific.
> 
> Together with some edits to the comments, this is what I got.
> Let me know if you prefer to have the latched_init changes
> on top, or you'd like to post v2 with everything.
> 
> Thanks,
> 
> Paolo

The code change below seems good to me.
(The only thing you changed is moving is_smm(vcpu) to the non-vendor specific part and rephrased comments right?)

I plan to submit a patch for the latched_init changes soon. (And respective kvm-unit-test which I already have ready and working).
We also have another small patch on top to add support for wait-for-SIPI activity-state which I will submit soon as-well.
So I’m fine with either option of you first applying this change and then I submit another patch on top,
or me just submitting a new v2 patch series with a new version for this patch and the rest of the changes.
I don’t have strong preference. Whatever you prefer is fine by me. :)

-Liran

> 
> diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
> index c4f271a9b306..b523949a8df8 100644
> --- a/arch/x86/include/asm/kvm_host.h
> +++ b/arch/x86/include/asm/kvm_host.h
> @@ -1209,6 +1209,8 @@ struct kvm_x86_ops {
> 	uint16_t (*nested_get_evmcs_version)(struct kvm_vcpu *vcpu);
> 
> 	bool (*need_emulation_on_page_fault)(struct kvm_vcpu *vcpu);
> +
> +	bool (*apic_init_signal_blocked)(struct kvm_vcpu *vcpu);
> };
> 
> struct kvm_arch_async_pf {
> diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
> index 559e1c4c0832..dbbe4781fbb2 100644
> --- a/arch/x86/kvm/lapic.c
> +++ b/arch/x86/kvm/lapic.c
> @@ -2706,11 +2706,14 @@ void kvm_apic_accept_events(struct kvm_vcpu *vcpu)
> 		return;
> 
> 	/*
> -	 * INITs are latched while in SMM.  Because an SMM CPU cannot
> -	 * be in KVM_MP_STATE_INIT_RECEIVED state, just eat SIPIs
> -	 * and delay processing of INIT until the next RSM.
> +	 * INITs are latched while CPU is in specific states
> +	 * (SMM, VMX non-root mode, SVM with GIF=0).
> +	 * Because a CPU cannot be in these states immediately
> +	 * after it has processed an INIT signal (and thus in
> +	 * KVM_MP_STATE_INIT_RECEIVED state), just eat SIPIs
> +	 * and leave the INIT pending.
> 	 */
> -	if (is_smm(vcpu)) {
> +	if (is_smm(vcpu) || kvm_x86_ops->apic_init_signal_blocked(vcpu)) {
> 		WARN_ON_ONCE(vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED);
> 		if (test_bit(KVM_APIC_SIPI, &apic->pending_events))
> 			clear_bit(KVM_APIC_SIPI, &apic->pending_events);
> diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
> index 2854aafc489e..d24050b647c7 100644
> --- a/arch/x86/kvm/svm.c
> +++ b/arch/x86/kvm/svm.c
> @@ -7170,6 +7170,21 @@ static bool svm_need_emulation_on_page_fault(struct kvm_vcpu *vcpu)
> 	return false;
> }
> 
> +static bool svm_apic_init_signal_blocked(struct kvm_vcpu *vcpu)
> +{
> +	struct vcpu_svm *svm = to_svm(vcpu);
> +
> +	/*
> +	 * TODO: Last condition latch INIT signals on vCPU when
> +	 * vCPU is in guest-mode and vmcb12 defines intercept on INIT.
> +	 * To properly emulate the INIT intercept, SVM should implement
> +	 * kvm_x86_ops->check_nested_events() and call nested_svm_vmexit()
> +	 * there if an INIT signal is pending.
> +	 */
> +	return !gif_set(svm) ||
> +		   (svm->vmcb->control.intercept & (1ULL << INTERCEPT_INIT));
> +}
> +
> static struct kvm_x86_ops svm_x86_ops __ro_after_init = {
> 	.cpu_has_kvm_support = has_svm,
> 	.disabled_by_bios = is_disabled,
> @@ -7306,6 +7321,8 @@ static bool svm_need_emulation_on_page_fault(struct kvm_vcpu *vcpu)
> 	.nested_get_evmcs_version = nested_get_evmcs_version,
> 
> 	.need_emulation_on_page_fault = svm_need_emulation_on_page_fault,
> +
> +	.apic_init_signal_blocked = svm_apic_init_signal_blocked,
> };
> 
> static int __init svm_init(void)
> diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
> index ad2453317c4b..6ce83c602e7f 100644
> --- a/arch/x86/kvm/vmx/nested.c
> +++ b/arch/x86/kvm/vmx/nested.c
> @@ -3409,6 +3409,15 @@ static int vmx_check_nested_events(struct kvm_vcpu *vcpu, bool external_intr)
> 	unsigned long exit_qual;
> 	bool block_nested_events =
> 	    vmx->nested.nested_run_pending || kvm_event_needs_reinjection(vcpu);
> +	struct kvm_lapic *apic = vcpu->arch.apic;
> +
> +	if (lapic_in_kernel(vcpu) &&
> +		test_bit(KVM_APIC_INIT, &apic->pending_events)) {
> +		if (block_nested_events)
> +			return -EBUSY;
> +		nested_vmx_vmexit(vcpu, EXIT_REASON_INIT_SIGNAL, 0, 0);
> +		return 0;
> +	}
> 
> 	if (vcpu->arch.exception.pending &&
> 		nested_vmx_check_exception(vcpu, &exit_qual)) {
> @@ -4470,7 +4479,12 @@ static int handle_vmoff(struct kvm_vcpu *vcpu)
> {
> 	if (!nested_vmx_check_permission(vcpu))
> 		return 1;
> +
> 	free_nested(vcpu);
> +
> +	/* Process a latched INIT during time CPU was in VMX operation */
> +	kvm_make_request(KVM_REQ_EVENT, vcpu);
> +
> 	return nested_vmx_succeed(vcpu);
> }
> 
> diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
> index 99f52f8c969a..73bf9a2e6fb6 100644
> --- a/arch/x86/kvm/vmx/vmx.c
> +++ b/arch/x86/kvm/vmx/vmx.c
> @@ -7470,6 +7470,11 @@ static bool vmx_need_emulation_on_page_fault(struct kvm_vcpu *vcpu)
> 	return false;
> }
> 
> +static bool vmx_apic_init_signal_blocked(struct kvm_vcpu *vcpu)
> +{
> +	return to_vmx(vcpu)->nested.vmxon;
> +}
> +
> static __init int hardware_setup(void)
> {
> 	unsigned long host_bndcfgs;
> @@ -7794,6 +7799,7 @@ static __exit void hardware_unsetup(void)
> 	.get_vmcs12_pages = NULL,
> 	.nested_enable_evmcs = NULL,
> 	.need_emulation_on_page_fault = vmx_need_emulation_on_page_fault,
> +	.apic_init_signal_blocked = vmx_apic_init_signal_blocked,
> };
> 
> static void vmx_cleanup_l1d_flush(void)





[Index of Archives]     [KVM ARM]     [KVM ia64]     [KVM ppc]     [Virtualization Tools]     [Spice Development]     [Libvirt]     [Libvirt Users]     [Linux USB Devel]     [Linux Audio Users]     [Yosemite Questions]     [Linux Kernel]     [Linux SCSI]     [XFree86]

  Powered by Linux