The vcpu info supersedes the per vcpu area of the shared info page and the guest vcpus will use this instead. Signed-off-by: Joao Martins <joao.m.martins@xxxxxxxxxx> Signed-off-by: Ankur Arora <ankur.a.arora@xxxxxxxxxx> --- arch/x86/include/asm/kvm_host.h | 2 + arch/x86/kvm/x86.c | 1 + arch/x86/kvm/xen.c | 93 ++++++++++++++++++++++++++++++++++++++--- arch/x86/kvm/xen.h | 14 +++++++ include/uapi/linux/kvm.h | 5 +++ 5 files changed, 110 insertions(+), 5 deletions(-) diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index befc0e37f162..96f65ba4b3c0 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -537,6 +537,8 @@ struct kvm_vcpu_hv { /* Xen per vcpu emulation context */ struct kvm_vcpu_xen { struct kvm_xen_exit exit; + gpa_t vcpu_info_addr; + struct vcpu_info *vcpu_info; }; struct kvm_vcpu_arch { diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 31a102b22042..3ce97860e6ee 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -9124,6 +9124,7 @@ void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) int idx; kvm_hv_vcpu_uninit(vcpu); + kvm_xen_vcpu_uninit(vcpu); kvm_pmu_destroy(vcpu); kfree(vcpu->arch.mce_banks); kvm_free_lapic(vcpu); diff --git a/arch/x86/kvm/xen.c b/arch/x86/kvm/xen.c index 879bcfdd7b1d..36d6dd0ea4b8 100644 --- a/arch/x86/kvm/xen.c +++ b/arch/x86/kvm/xen.c @@ -15,6 +15,33 @@ #include "trace.h" +static void set_vcpu_attr(struct kvm_vcpu *v, u16 type, gpa_t gpa, void *addr) +{ + struct kvm_vcpu_xen *vcpu_xen = vcpu_to_xen_vcpu(v); + + switch (type) { + case KVM_XEN_ATTR_TYPE_VCPU_INFO: + vcpu_xen->vcpu_info_addr = gpa; + vcpu_xen->vcpu_info = addr; + kvm_xen_setup_pvclock_page(v); + break; + default: + break; + } +} + +static gpa_t get_vcpu_attr(struct kvm_vcpu *v, u16 type) +{ + struct kvm_vcpu_xen *vcpu_xen = vcpu_to_xen_vcpu(v); + + switch (type) { + case KVM_XEN_ATTR_TYPE_VCPU_INFO: + return vcpu_xen->vcpu_info_addr; + default: + return 0; + } +} + static int kvm_xen_shared_info_init(struct kvm *kvm, gfn_t gfn) { struct shared_info *shared_info; @@ -37,26 +64,44 @@ static int kvm_xen_shared_info_init(struct kvm *kvm, gfn_t gfn) return 0; } +static void *xen_vcpu_info(struct kvm_vcpu *v) +{ + struct kvm_vcpu_xen *vcpu_xen = vcpu_to_xen_vcpu(v); + struct kvm_xen *kvm = &v->kvm->arch.xen; + unsigned int offset = 0; + void *hva = NULL; + + if (vcpu_xen->vcpu_info_addr) + return vcpu_xen->vcpu_info; + + if (kvm->shinfo_addr && v->vcpu_id < MAX_VIRT_CPUS) { + hva = kvm->shinfo; + offset += offsetof(struct shared_info, vcpu_info); + offset += v->vcpu_id * sizeof(struct vcpu_info); + } + + return hva + offset; +} + void kvm_xen_setup_pvclock_page(struct kvm_vcpu *v) { struct kvm_vcpu_arch *vcpu = &v->arch; struct pvclock_vcpu_time_info *guest_hv_clock; + void *hva = xen_vcpu_info(v); unsigned int offset; - if (v->vcpu_id >= MAX_VIRT_CPUS) + if (!hva) return; offset = offsetof(struct vcpu_info, time); - offset += offsetof(struct shared_info, vcpu_info); - offset += v->vcpu_id * sizeof(struct vcpu_info); guest_hv_clock = (struct pvclock_vcpu_time_info *) - (((void *)v->kvm->arch.xen.shinfo) + offset); + (hva + offset); BUILD_BUG_ON(offsetof(struct pvclock_vcpu_time_info, version) != 0); if (guest_hv_clock->version & 1) - ++guest_hv_clock->version; /* first time write, random junk */ + ++guest_hv_clock->version; vcpu->hv_clock.version = guest_hv_clock->version + 1; guest_hv_clock->version = vcpu->hv_clock.version; @@ -93,6 +138,25 @@ int kvm_xen_hvm_set_attr(struct kvm *kvm, struct kvm_xen_hvm_attr *data) r = kvm_xen_shared_info_init(kvm, gfn); break; } + case KVM_XEN_ATTR_TYPE_VCPU_INFO: { + gpa_t gpa = data->u.vcpu_attr.gpa; + struct kvm_vcpu *v; + struct page *page; + void *addr; + + v = kvm_get_vcpu(kvm, data->u.vcpu_attr.vcpu); + if (!v) + return -EINVAL; + + page = gfn_to_page(v->kvm, gpa_to_gfn(gpa)); + if (is_error_page(page)) + return -EFAULT; + + addr = page_to_virt(page) + offset_in_page(gpa); + set_vcpu_attr(v, data->type, gpa, addr); + r = 0; + break; + } default: break; } @@ -109,6 +173,17 @@ int kvm_xen_hvm_get_attr(struct kvm *kvm, struct kvm_xen_hvm_attr *data) data->u.shared_info.gfn = kvm->arch.xen.shinfo_addr; break; } + case KVM_XEN_ATTR_TYPE_VCPU_INFO: { + struct kvm_vcpu *v; + + v = kvm_get_vcpu(kvm, data->u.vcpu_attr.vcpu); + if (!v) + return -EINVAL; + + data->u.vcpu_attr.gpa = get_vcpu_attr(v, data->type); + r = 0; + break; + } default: break; } @@ -180,6 +255,14 @@ int kvm_xen_hypercall(struct kvm_vcpu *vcpu) return 0; } +void kvm_xen_vcpu_uninit(struct kvm_vcpu *vcpu) +{ + struct kvm_vcpu_xen *vcpu_xen = vcpu_to_xen_vcpu(vcpu); + + if (vcpu_xen->vcpu_info) + put_page(virt_to_page(vcpu_xen->vcpu_info)); +} + void kvm_xen_destroy_vm(struct kvm *kvm) { struct kvm_xen *xen = &kvm->arch.xen; diff --git a/arch/x86/kvm/xen.h b/arch/x86/kvm/xen.h index 827c9390da34..10ebd0b7a25e 100644 --- a/arch/x86/kvm/xen.h +++ b/arch/x86/kvm/xen.h @@ -3,6 +3,19 @@ #ifndef __ARCH_X86_KVM_XEN_H__ #define __ARCH_X86_KVM_XEN_H__ +static inline struct kvm_vcpu_xen *vcpu_to_xen_vcpu(struct kvm_vcpu *vcpu) +{ + return &vcpu->arch.xen; +} + +static inline struct kvm_vcpu *xen_vcpu_to_vcpu(struct kvm_vcpu_xen *xen_vcpu) +{ + struct kvm_vcpu_arch *arch; + + arch = container_of(xen_vcpu, struct kvm_vcpu_arch, xen); + return container_of(arch, struct kvm_vcpu, arch); +} + void kvm_xen_setup_pvclock_page(struct kvm_vcpu *vcpu); int kvm_xen_hvm_set_attr(struct kvm *kvm, struct kvm_xen_hvm_attr *data); int kvm_xen_hvm_get_attr(struct kvm *kvm, struct kvm_xen_hvm_attr *data); @@ -11,5 +24,6 @@ bool kvm_xen_hypercall_set(struct kvm *kvm); int kvm_xen_hypercall(struct kvm_vcpu *vcpu); void kvm_xen_destroy_vm(struct kvm *kvm); +void kvm_xen_vcpu_uninit(struct kvm_vcpu *vcpu); #endif diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index de2168d235af..782f497a0fdd 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h @@ -1465,10 +1465,15 @@ struct kvm_xen_hvm_attr { struct { __u64 gfn; } shared_info; + struct { + __u32 vcpu; + __u64 gpa; + } vcpu_attr; } u; }; #define KVM_XEN_ATTR_TYPE_SHARED_INFO 0x0 +#define KVM_XEN_ATTR_TYPE_VCPU_INFO 0x1 /* Secure Encrypted Virtualization command */ enum sev_cmd_id { -- 2.11.0