On 11/2/21 18:36, David Woodhouse wrote:
+ asm volatile("1:\t" LOCK_PREFIX "xchgb %0, %2\n"
+ "\txor %1, %1\n"
+ "2:\n"
+ "\t.section .fixup,\"ax\"\n"
+ "3:\tmovl %3, %1\n"
+ "\tjmp\t2b\n"
+ "\t.previous\n"
+ _ASM_EXTABLE_UA(1b, 3b)
+ : "=r" (st_preempted),
+ "=r" (err)
+ : "m" (st->preempted),
+ "i" (-EFAULT),
+ "0" (st_preempted));
Since Peter is removing custom fixups, I'm going for code that is
slightly suboptimal (though just by one extra instruction) but doesn't
interfere with him.
Also, xchg doesn't need a lock prefix.
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -3301,21 +3301,15 @@ static void record_steal_time(struct kvm_vcpu *vcpu)
*/
if (guest_pv_has(vcpu, KVM_FEATURE_PV_TLB_FLUSH)) {
u8 st_preempted = 0;
- int err;
+ int err = -EFAULT;
- asm volatile("1:\t" LOCK_PREFIX "xchgb %0, %2\n"
- "\txor %1, %1\n"
+ asm volatile("1: xchgb %0, %2\n"
+ "xor %1, %1\n"
"2:\n"
- "\t.section .fixup,\"ax\"\n"
- "3:\tmovl %3, %1\n"
- "\tjmp\t2b\n"
- "\t.previous\n"
- _ASM_EXTABLE_UA(1b, 3b)
- : "=r" (st_preempted),
- "=r" (err)
- : "m" (st->preempted),
- "i" (-EFAULT),
- "0" (st_preempted));
+ _ASM_EXTABLE_UA(1b, 2b)
+ : "+r" (st_preempted),
+ "+&r" (err)
+ : "m" (st->preempted));
if (err)
goto out;
Queued with these changes.
Paolo