[PATCH 35/35] exec: push BQL down to cpu->cpu_exec_interrupt

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



From: Paolo Bonzini <pbonzini@xxxxxxxxxx>

Most interrupt requests do not need to take the BQL, and in fact
most architectures do not need it at all. Push the BQL acquisition
down to target code.

Cc: Aleksandar Markovic <amarkovic@xxxxxxxxxxxx>
Cc: Alexander Graf <agraf@xxxxxxx>
Cc: Anthony Green <green@xxxxxxxxxxxxxx>
Cc: Artyom Tarasenko <atar4qemu@xxxxxxxxx>
Cc: Aurelien Jarno <aurelien@xxxxxxxxxxx>
Cc: Christian Borntraeger <borntraeger@xxxxxxxxxx>
Cc: Chris Wulff <crwulff@xxxxxxxxx>
Cc: Cornelia Huck <cohuck@xxxxxxxxxx>
Cc: David Gibson <david@xxxxxxxxxxxxxxxxxxxxx>
Cc: David Hildenbrand <david@xxxxxxxxxx>
Cc: "Edgar E. Iglesias" <edgar.iglesias@xxxxxxxxx>
Cc: Eduardo Habkost <ehabkost@xxxxxxxxxx>
Cc: Guan Xuetao <gxt@xxxxxxxxxxxxxxx>
Cc: James Hogan <jhogan@xxxxxxxxxx>
Cc: kvm@xxxxxxxxxxxxxxx
Cc: Laurent Vivier <laurent@xxxxxxxxx>
Cc: Marcelo Tosatti <mtosatti@xxxxxxxxxx>
Cc: Marek Vasut <marex@xxxxxxx>
Cc: Mark Cave-Ayland <mark.cave-ayland@xxxxxxxxxxxx>
Cc: Michael Walle <michael@xxxxxxxx>
Cc: Peter Crosthwaite <crosthwaite.peter@xxxxxxxxx>
Cc: Peter Maydell <peter.maydell@xxxxxxxxxx>
Cc: qemu-arm@xxxxxxxxxx
Cc: qemu-ppc@xxxxxxxxxx
Cc: qemu-s390x@xxxxxxxxxx
Cc: Richard Henderson <rth@xxxxxxxxxxx>
Cc: Stafford Horne <shorne@xxxxxxxxx>
Signed-off-by: Paolo Bonzini <pbonzini@xxxxxxxxxx>
Signed-off-by: Emilio G. Cota <cota@xxxxxxxxx>
---
 docs/devel/multi-thread-tcg.txt |  7 +++++--
 accel/tcg/cpu-exec.c            |  9 +--------
 target/arm/cpu.c                | 15 ++++++++++++++-
 target/i386/seg_helper.c        |  3 +++
 target/ppc/excp_helper.c        |  2 ++
 target/s390x/excp_helper.c      |  3 +++
 6 files changed, 28 insertions(+), 11 deletions(-)

diff --git a/docs/devel/multi-thread-tcg.txt b/docs/devel/multi-thread-tcg.txt
index 782bebc28b..422de4736b 100644
--- a/docs/devel/multi-thread-tcg.txt
+++ b/docs/devel/multi-thread-tcg.txt
@@ -231,8 +231,11 @@ BQL. Currently ARM targets serialise all ARM_CP_IO register accesses
 and also defer the reset/startup of vCPUs to the vCPU context by way
 of async_run_on_cpu().
 
-Updates to interrupt state are also protected by the BQL as they can
-often be cross vCPU.
+The CPUClass callbacks cpu_exec_interrupt and do_interrupt are invoked
+without BQL protection.  Accesses to the interrupt controller from
+the vCPU thread, for example while processing CPU_INTERRUPT_HARD, must
+either call qemu_mutex_lock_iothread/qemu_mutex_unlock_iothread or use
+a separate mutex.
 
 Memory Consistency
 ==================
diff --git a/accel/tcg/cpu-exec.c b/accel/tcg/cpu-exec.c
index b649e3d772..f5e08e79d1 100644
--- a/accel/tcg/cpu-exec.c
+++ b/accel/tcg/cpu-exec.c
@@ -524,7 +524,7 @@ static inline bool cpu_handle_interrupt(CPUState *cpu,
 
     if (unlikely(atomic_read(&cpu->interrupt_request))) {
         int interrupt_request;
-        qemu_mutex_lock_iothread();
+
         interrupt_request = atomic_read(&cpu->interrupt_request);
         if (unlikely(cpu->singlestep_enabled & SSTEP_NOIRQ)) {
             /* Mask out external interrupts for this step. */
@@ -533,7 +533,6 @@ static inline bool cpu_handle_interrupt(CPUState *cpu,
         if (interrupt_request & CPU_INTERRUPT_DEBUG) {
             cpu_reset_interrupt(cpu, CPU_INTERRUPT_DEBUG);
             cpu->exception_index = EXCP_DEBUG;
-            qemu_mutex_unlock_iothread();
             return true;
         }
         if (replay_mode == REPLAY_MODE_PLAY && !replay_has_interrupt()) {
@@ -543,7 +542,6 @@ static inline bool cpu_handle_interrupt(CPUState *cpu,
             cpu_reset_interrupt(cpu, CPU_INTERRUPT_HALT);
             cpu->halted = 1;
             cpu->exception_index = EXCP_HLT;
-            qemu_mutex_unlock_iothread();
             return true;
         }
 #if defined(TARGET_I386)
@@ -554,14 +552,12 @@ static inline bool cpu_handle_interrupt(CPUState *cpu,
             cpu_svm_check_intercept_param(env, SVM_EXIT_INIT, 0, 0);
             do_cpu_init(x86_cpu);
             cpu->exception_index = EXCP_HALTED;
-            qemu_mutex_unlock_iothread();
             return true;
         }
 #else
         else if (interrupt_request & CPU_INTERRUPT_RESET) {
             replay_interrupt();
             cpu_reset(cpu);
-            qemu_mutex_unlock_iothread();
             return true;
         }
 #endif
@@ -585,9 +581,6 @@ static inline bool cpu_handle_interrupt(CPUState *cpu,
                the program flow was changed */
             *last_tb = NULL;
         }
-
-        /* If we exit via cpu_loop_exit/longjmp it is reset in cpu_exec */
-        qemu_mutex_unlock_iothread();
     }
 
     /* Finally, check if we need to exit to the main loop.  */
diff --git a/target/arm/cpu.c b/target/arm/cpu.c
index e2c492efdf..246ea13d8f 100644
--- a/target/arm/cpu.c
+++ b/target/arm/cpu.c
@@ -347,7 +347,8 @@ static void arm_cpu_reset(CPUState *s)
     hw_watchpoint_update_all(cpu);
 }
 
-bool arm_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
+/* call with the BQL held */
+static bool arm_cpu_exec_interrupt_locked(CPUState *cs, int interrupt_request)
 {
     CPUClass *cc = CPU_GET_CLASS(cs);
     CPUARMState *env = cs->env_ptr;
@@ -401,6 +402,16 @@ bool arm_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
     return ret;
 }
 
+bool arm_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
+{
+    bool ret;
+
+    qemu_mutex_lock_iothread();
+    ret = arm_cpu_exec_interrupt_locked(cs, interrupt_request);
+    qemu_mutex_unlock_iothread();
+    return ret;
+}
+
 #if !defined(CONFIG_USER_ONLY) || !defined(TARGET_AARCH64)
 static bool arm_v7m_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
 {
@@ -409,6 +420,7 @@ static bool arm_v7m_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
     CPUARMState *env = &cpu->env;
     bool ret = false;
 
+    qemu_mutex_lock_iothread();
     /* ARMv7-M interrupt masking works differently than -A or -R.
      * There is no FIQ/IRQ distinction. Instead of I and F bits
      * masking FIQ and IRQ interrupts, an exception is taken only
@@ -422,6 +434,7 @@ static bool arm_v7m_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
         cc->do_interrupt(cs);
         ret = true;
     }
+    qemu_mutex_unlock_iothread();
     return ret;
 }
 #endif
diff --git a/target/i386/seg_helper.c b/target/i386/seg_helper.c
index 0dd85329db..2fdfbd3c37 100644
--- a/target/i386/seg_helper.c
+++ b/target/i386/seg_helper.c
@@ -19,6 +19,7 @@
  */
 
 #include "qemu/osdep.h"
+#include "qemu/main-loop.h"
 #include "cpu.h"
 #include "qemu/log.h"
 #include "exec/helper-proto.h"
@@ -1324,7 +1325,9 @@ bool x86_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
 #if !defined(CONFIG_USER_ONLY)
     if (interrupt_request & CPU_INTERRUPT_POLL) {
         cpu_reset_interrupt(cs, CPU_INTERRUPT_POLL);
+        qemu_mutex_lock_iothread();
         apic_poll_irq(cpu->apic_state);
+        qemu_mutex_unlock_iothread();
         /* Don't process multiple interrupt requests in a single call.
            This is required to make icount-driven execution deterministic. */
         return true;
diff --git a/target/ppc/excp_helper.c b/target/ppc/excp_helper.c
index 8b2cc48cad..57acba2a80 100644
--- a/target/ppc/excp_helper.c
+++ b/target/ppc/excp_helper.c
@@ -885,10 +885,12 @@ bool ppc_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
     CPUPPCState *env = &cpu->env;
 
     if (interrupt_request & CPU_INTERRUPT_HARD) {
+        qemu_mutex_lock_iothread();
         ppc_hw_interrupt(env);
         if (env->pending_interrupts == 0) {
             cpu_reset_interrupt(cs, CPU_INTERRUPT_HARD);
         }
+        qemu_mutex_unlock_iothread();
         return true;
     }
     return false;
diff --git a/target/s390x/excp_helper.c b/target/s390x/excp_helper.c
index 931c0103c8..f2a93abf01 100644
--- a/target/s390x/excp_helper.c
+++ b/target/s390x/excp_helper.c
@@ -480,10 +480,13 @@ bool s390_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
                the parent EXECUTE insn.  */
             return false;
         }
+        qemu_mutex_lock_iothread();
         if (s390_cpu_has_int(cpu)) {
             s390_cpu_do_interrupt(cs);
+            qemu_mutex_unlock_iothread();
             return true;
         }
+        qemu_mutex_unlock_iothread();
         if (env->psw.mask & PSW_MASK_WAIT) {
             /* Woken up because of a floating interrupt but it has already
              * been delivered. Go back to sleep. */
-- 
2.17.1




[Index of Archives]     [KVM ARM]     [KVM ia64]     [KVM ppc]     [Virtualization Tools]     [Spice Development]     [Libvirt]     [Libvirt Users]     [Linux USB Devel]     [Linux Audio Users]     [Yosemite Questions]     [Linux Kernel]     [Linux SCSI]     [XFree86]

  Powered by Linux