From: Paul Durrant <pdurrant@xxxxxxxxxx> The shared_info cache should only need to be set up once by the VMM, but the content of the shared_info page may need changed if the mode of guest changes from 32-bit to 64-bit or vice versa. This re-initialization of the content will be handles in a subsequent patch. Signed-off-by: Paul Durrant <pdurrant@xxxxxxxxxx> --- arch/x86/kvm/xen.c | 70 ++++++++++++++++++++++++++-------------------- 1 file changed, 40 insertions(+), 30 deletions(-) diff --git a/arch/x86/kvm/xen.c b/arch/x86/kvm/xen.c index cfd5051e0800..7bead3f65e55 100644 --- a/arch/x86/kvm/xen.c +++ b/arch/x86/kvm/xen.c @@ -34,7 +34,7 @@ static bool kvm_xen_hcall_evtchn_send(struct kvm_vcpu *vcpu, u64 param, u64 *r); DEFINE_STATIC_KEY_DEFERRED_FALSE(kvm_xen_enabled, HZ); -static int kvm_xen_shared_info_init(struct kvm *kvm, u64 addr, bool addr_is_gfn) +static int kvm_xen_shared_info_init(struct kvm *kvm) { struct gfn_to_pfn_cache *gpc = &kvm->arch.xen.shinfo_cache; struct pvclock_wall_clock *wc; @@ -44,34 +44,22 @@ static int kvm_xen_shared_info_init(struct kvm *kvm, u64 addr, bool addr_is_gfn) int ret = 0; int idx = srcu_read_lock(&kvm->srcu); - if ((addr_is_gfn && addr == KVM_XEN_INVALID_GFN) || - (!addr_is_gfn && addr == 0)) { - kvm_gpc_deactivate(gpc); - goto out; - } + read_lock_irq(&gpc->lock); + while (!kvm_gpc_check(gpc, PAGE_SIZE)) { + read_unlock_irq(&gpc->lock); - do { - if (addr_is_gfn) - ret = kvm_gpc_activate(gpc, gfn_to_gpa(addr), PAGE_SIZE); - else - ret = kvm_gpc_activate_hva(gpc, addr, PAGE_SIZE); + ret = kvm_gpc_refresh(gpc, PAGE_SIZE); if (ret) goto out; - /* - * This code mirrors kvm_write_wall_clock() except that it writes - * directly through the pfn cache and doesn't mark the page dirty. - */ - wall_nsec = kvm_get_wall_clock_epoch(kvm); - - /* It could be invalid again already, so we need to check */ read_lock_irq(&gpc->lock); + } - if (gpc->valid) - break; - - read_unlock_irq(&gpc->lock); - } while (1); + /* + * This code mirrors kvm_write_wall_clock() except that it writes + * directly through the pfn cache and doesn't mark the page dirty. + */ + wall_nsec = ktime_get_real_ns() - get_kvmclock_ns(kvm); /* Paranoia checks on the 32-bit struct layout */ BUILD_BUG_ON(offsetof(struct compat_shared_info, wc) != 0x900); @@ -642,17 +630,39 @@ int kvm_xen_hvm_set_attr(struct kvm *kvm, struct kvm_xen_hvm_attr *data) break; case KVM_XEN_ATTR_TYPE_SHARED_INFO: - mutex_lock(&kvm->arch.xen.xen_lock); - r = kvm_xen_shared_info_init(kvm, data->u.shared_info.gfn, true); - mutex_unlock(&kvm->arch.xen.xen_lock); - break; + case KVM_XEN_ATTR_TYPE_SHARED_INFO_HVA: { + int idx; - case KVM_XEN_ATTR_TYPE_SHARED_INFO_HVA: mutex_lock(&kvm->arch.xen.xen_lock); - r = kvm_xen_shared_info_init(kvm, data->u.shared_info.hva, false); + + idx = srcu_read_lock(&kvm->srcu); + if (data->type == KVM_XEN_ATTR_TYPE_SHARED_INFO) { + if (data->u.shared_info.gfn == KVM_XEN_INVALID_GFN) { + kvm_gpc_deactivate(&kvm->arch.xen.shinfo_cache); + r = 0; + } else { + r = kvm_gpc_activate(&kvm->arch.xen.shinfo_cache, + gfn_to_gpa(data->u.shared_info.gfn), + PAGE_SIZE); + } + } else { + if (data->u.shared_info.hva == 0) { + kvm_gpc_deactivate(&kvm->arch.xen.shinfo_cache); + r = 0; + } else { + r = kvm_gpc_activate_hva(&kvm->arch.xen.shinfo_cache, + data->u.shared_info.hva, + PAGE_SIZE); + } + } + srcu_read_unlock(&kvm->srcu, idx); + + if (!r && kvm->arch.xen.shinfo_cache.active) + r = kvm_xen_shared_info_init(kvm); + mutex_unlock(&kvm->arch.xen.xen_lock); break; - + } case KVM_XEN_ATTR_TYPE_UPCALL_VECTOR: if (data->u.vector && data->u.vector < 0x10) r = -EINVAL; -- 2.39.2