From: David Woodhouse <dwmw@xxxxxxxxxxxx> Windows uses a per-vCPU vector, and it's delivered via the local APIC basically like an MSI (with associated EOI) unlike the traditional guest-wide vector which is just magically asserted by Xen (and in the KVM case by kvm_xen_has_interrupt() / kvm_cpu_get_extint()). Now that the kernel is able to raise event channel events for itself, being able to do so for Windows guests is also going to be useful. Signed-off-by: David Woodhouse <dwmw@xxxxxxxxxxxx> --- arch/x86/include/asm/kvm_host.h | 1 + arch/x86/kvm/xen.c | 40 +++++++++++++++++++++++++++++++++ include/uapi/linux/kvm.h | 2 ++ 3 files changed, 43 insertions(+) diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 74b10c5859aa..30f6fc549235 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -602,6 +602,7 @@ struct kvm_vcpu_hv { struct kvm_vcpu_xen { u64 hypercall_rip; u32 current_runstate; + u8 upcall_vector; struct gfn_to_pfn_cache vcpu_info_cache; struct gfn_to_pfn_cache vcpu_time_info_cache; struct gfn_to_pfn_cache runstate_cache; diff --git a/arch/x86/kvm/xen.c b/arch/x86/kvm/xen.c index 747a11ee0950..fe8661545db3 100644 --- a/arch/x86/kvm/xen.c +++ b/arch/x86/kvm/xen.c @@ -332,6 +332,22 @@ void kvm_xen_update_runstate_guest(struct kvm_vcpu *v, int state) mark_page_dirty_in_slot(v->kvm, gpc->memslot, gpc->gpa >> PAGE_SHIFT); } +static void kvm_xen_inject_vcpu_vector(struct kvm_vcpu *v) +{ + struct kvm_lapic_irq irq = { }; + int r; + + irq.dest_id = v->vcpu_id; + irq.vector = v->arch.xen.upcall_vector; + irq.dest_mode = APIC_DEST_PHYSICAL; + irq.shorthand = APIC_DEST_NOSHORT; + irq.delivery_mode = APIC_DM_FIXED; + irq.level = 1; + + /* The fast version will always work for physical unicast */ + WARN_ON_ONCE(!kvm_irq_delivery_to_apic_fast(v->kvm, NULL, &irq, &r, NULL)); +} + /* * On event channel delivery, the vcpu_info may not have been accessible. * In that case, there are bits in vcpu->arch.xen.evtchn_pending_sel which @@ -392,6 +408,10 @@ void kvm_xen_inject_pending_events(struct kvm_vcpu *v) } read_unlock_irqrestore(&gpc->lock, flags); + /* For the per-vCPU lapic vector, deliver it as MSI. */ + if (v->arch.xen.upcall_vector) + kvm_xen_inject_vcpu_vector(v); + mark_page_dirty_in_slot(v->kvm, gpc->memslot, gpc->gpa >> PAGE_SHIFT); } @@ -726,6 +746,15 @@ int kvm_xen_vcpu_set_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data) r = 0; break; + case KVM_XEN_VCPU_ATTR_TYPE_UPCALL_VECTOR: + if (data->u.vector && data->u.vector < 0x10) + r = -EINVAL; + else { + vcpu->arch.xen.upcall_vector = data->u.vector; + r = 0; + } + break; + default: break; } @@ -813,6 +842,11 @@ int kvm_xen_vcpu_get_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data) r = 0; break; + case KVM_XEN_VCPU_ATTR_TYPE_UPCALL_VECTOR: + data->u.vector = vcpu->arch.xen.upcall_vector; + r = 0; + break; + default: break; } @@ -1246,6 +1280,12 @@ int kvm_xen_set_evtchn_fast(struct kvm_xen_evtchn *xe, struct kvm *kvm) kick_vcpu = true; } } + + /* For the per-vCPU lapic vector, deliver it as MSI. */ + if (kick_vcpu && vcpu->arch.xen.upcall_vector) { + kvm_xen_inject_vcpu_vector(vcpu); + kick_vcpu = false; + } } out_rcu: diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index bffaae565550..a3cf8a4b8a50 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h @@ -1753,6 +1753,7 @@ struct kvm_xen_vcpu_attr { __u32 priority; __u64 expires_ns; } timer; + __u8 vector; } u; }; @@ -1766,6 +1767,7 @@ struct kvm_xen_vcpu_attr { /* Available with KVM_CAP_XEN_HVM / KVM_XEN_HVM_CONFIG_EVTCHN_SEND */ #define KVM_XEN_VCPU_ATTR_TYPE_VCPU_ID 0x6 #define KVM_XEN_VCPU_ATTR_TYPE_TIMER 0x7 +#define KVM_XEN_VCPU_ATTR_TYPE_UPCALL_VECTOR 0x8 /* Secure Encrypted Virtualization command */ enum sev_cmd_id { -- 2.33.1