On Mon, 2021-11-15 at 16:50 +0000, David Woodhouse wrote: > + /* > + * Now we have a valid (protected by srcu) userspace HVA in > + * ghc->hva which points to the struct vcpu_info. If there > + * are any bits in the in-kernel evtchn_pending_sel then > + * we need to write those to the guest vcpu_info and set > + * its evtchn_upcall_pending flag. If there aren't any bits > + * to add, we only want to *check* evtchn_upcall_pending. > + */ > + if (evtchn_pending_sel) { > + bool long_mode = v->kvm->arch.xen.long_mode; > + > + if (!user_access_begin((void *)ghc->hva, sizeof(struct vcpu_info))) > + return 0; > + > + if (IS_ENABLED(CONFIG_64BIT) && long_mode) { > + struct vcpu_info __user *vi = (void *)ghc->hva; > + > + /* Attempt to set the evtchn_pending_sel bits in the > + * guest, and if that succeeds then clear the same > + * bits in the in-kernel version. */ > + asm volatile("1:\t" LOCK_PREFIX "orq %0, %1\n" > + "\tnotq %0\n" > + "\t" LOCK_PREFIX "andq %0, %2\n" > + "2:\n" > + "\t.section .fixup,\"ax\"\n" > + "3:\tjmp\t2b\n" > + "\t.previous\n" > + _ASM_EXTABLE_UA(1b, 3b) > + : "=r" (evtchn_pending_sel) > + : "m" (vi->evtchn_pending_sel), > + "m" (v->arch.xen.evtchn_pending_sel), > + "0" (evtchn_pending_sel)); > + } else { > + struct compat_vcpu_info __user *vi = (void *)ghc->hva; > + u32 evtchn_pending_sel32 = evtchn_pending_sel; > + > + /* Attempt to set the evtchn_pending_sel bits in the > + * guest, and if that succeeds then clear the same > + * bits in the in-kernel version. */ > + asm volatile("1:\t" LOCK_PREFIX "orl %0, %1\n" > + "\tnotl %0\n" > + "\t" LOCK_PREFIX "andl %0, %2\n" > + "2:\n" > + "\t.section .fixup,\"ax\"\n" > + "3:\tjmp\t2b\n" > + "\t.previous\n" > + _ASM_EXTABLE_UA(1b, 3b) > + : "=r" (evtchn_pending_sel32) > + : "m" (vi->evtchn_pending_sel), > + "m" (v->arch.xen.evtchn_pending_sel), > + "0" (evtchn_pending_sel32)); > + } > + rc = 1; > + unsafe_put_user(rc, (u8 __user *)ghc->hva + offset, err); > + > + err: > + user_access_end(); > + > + mark_page_dirty_in_slot(v->kvm, ghc->memslot, ghc->gpa >> PAGE_SHIFT); > + } else { > + __get_user(rc, (u8 __user *)ghc->hva + offset); > + } I will probably concede that my assertion that "it already has a perfectly serviceable userspace HVA and it's just a matter of writing a trivial bit of inline asm" is probably stretching the definition of the word "trivial" a little bit. I can convert this bit to use a gfn_to_gpa_cache for the vcpu_info too, once the dust settles on the implementation of that.
Attachment:
smime.p7s
Description: S/MIME cryptographic signature