From: Lai Jiangshan <jiangshan.ljs@xxxxxxxxxxxx> For PVM, APIC virtualization for the guest is supported by reusing APIC emulation. Signed-off-by: Lai Jiangshan <jiangshan.ljs@xxxxxxxxxxxx> Signed-off-by: Hou Wenlong <houwenlong.hwl@xxxxxxxxxxxx> --- arch/x86/kvm/pvm/pvm.c | 25 +++++++++++++++++++++++++ 1 file changed, 25 insertions(+) diff --git a/arch/x86/kvm/pvm/pvm.c b/arch/x86/kvm/pvm/pvm.c index 3735baee1d5f..ce047d211657 100644 --- a/arch/x86/kvm/pvm/pvm.c +++ b/arch/x86/kvm/pvm/pvm.c @@ -566,6 +566,25 @@ static int pvm_get_cpl(struct kvm_vcpu *vcpu) return 3; } +static void pvm_deliver_interrupt(struct kvm_lapic *apic, int delivery_mode, + int trig_mode, int vector) +{ + struct kvm_vcpu *vcpu = apic->vcpu; + + kvm_lapic_set_irr(vector, apic); + kvm_make_request(KVM_REQ_EVENT, vcpu); + kvm_vcpu_kick(vcpu); +} + +static void pvm_refresh_apicv_exec_ctrl(struct kvm_vcpu *vcpu) +{ +} + +static bool pvm_apic_init_signal_blocked(struct kvm_vcpu *vcpu) +{ + return false; +} + static void pvm_setup_mce(struct kvm_vcpu *vcpu) { } @@ -1083,19 +1102,25 @@ static struct kvm_x86_ops pvm_x86_ops __initdata = { .vcpu_pre_run = pvm_vcpu_pre_run, .vcpu_run = pvm_vcpu_run, .handle_exit = pvm_handle_exit, + .refresh_apicv_exec_ctrl = pvm_refresh_apicv_exec_ctrl, + .deliver_interrupt = pvm_deliver_interrupt, .vcpu_after_set_cpuid = pvm_vcpu_after_set_cpuid, .handle_exit_irqoff = pvm_handle_exit_irqoff, + .request_immediate_exit = __kvm_request_immediate_exit, + .sched_in = pvm_sched_in, .nested_ops = &pvm_nested_ops, .setup_mce = pvm_setup_mce, + .apic_init_signal_blocked = pvm_apic_init_signal_blocked, .msr_filter_changed = pvm_msr_filter_changed, .complete_emulated_msr = kvm_complete_insn_gp, + .vcpu_deliver_sipi_vector = kvm_vcpu_deliver_sipi_vector, }; static struct kvm_x86_init_ops pvm_init_ops __initdata = { -- 2.19.1.6.gb485710b