On Thu, Mar 10, 2011 at 3:13 PM, Corentin Chary <corentin.chary@xxxxxxxxx> wrote: > The threaded VNC servers messed up with QEMU fd handlers without > any kind of locking, and that can cause some nasty race conditions. > > Using qemu_mutex_lock_iothread() won't work because vnc_dpy_cpy(), > which will wait for the current job queue to finish, can be called with > the iothread lock held. > > Instead, we now store the data in a temporary buffer, and use a bottom > half to notify the main thread that new data is available. > > vnc_[un]lock_ouput() is still needed to access VncState members like > abort, csock or jobs_buffer. > > Signed-off-by: Corentin Chary <corentin.chary@xxxxxxxxx> > --- > Âui/vnc-jobs-async.c |  48 +++++++++++++++++++++++++++++------------------- > Âui/vnc-jobs.h    |  Â1 + > Âui/vnc.c      Â|  12 ++++++++++++ > Âui/vnc.h      Â|  Â2 ++ > Â4 files changed, 44 insertions(+), 19 deletions(-) > > diff --git a/ui/vnc-jobs-async.c b/ui/vnc-jobs-async.c > index f596247..4438776 100644 > --- a/ui/vnc-jobs-async.c > +++ b/ui/vnc-jobs-async.c > @@ -28,6 +28,7 @@ > > Â#include "vnc.h" > Â#include "vnc-jobs.h" > +#include "qemu_socket.h" > > Â/* > Â* Locking: > @@ -155,6 +156,24 @@ void vnc_jobs_join(VncState *vs) >     qemu_cond_wait(&queue->cond, &queue->mutex); >   } >   vnc_unlock_queue(queue); > +  Âvnc_jobs_consume_buffer(vs); > +} > + > +void vnc_jobs_consume_buffer(VncState *vs) > +{ > +  Âbool flush; > + > +  Âvnc_lock_output(vs); > +  Âif (vs->jobs_buffer.offset) { > +    Âvnc_write(vs, vs->jobs_buffer.buffer, vs->jobs_buffer.offset); > +    Âbuffer_reset(&vs->jobs_buffer); > +  Â} > +  Âflush = vs->csock != -1 && vs->abort != true; > +  Âvnc_unlock_output(vs); > + > +  Âif (flush) { > +   Âvnc_flush(vs); > +  Â} > Â} > > Â/* > @@ -197,7 +216,6 @@ static int vnc_worker_thread_loop(VncJobQueue *queue) >   VncState vs; >   int n_rectangles; >   int saved_offset; > -  Âbool flush; > >   vnc_lock_queue(queue); >   while (QTAILQ_EMPTY(&queue->jobs) && !queue->exit) { > @@ -213,6 +231,7 @@ static int vnc_worker_thread_loop(VncJobQueue *queue) > >   vnc_lock_output(job->vs); >   if (job->vs->csock == -1 || job->vs->abort == true) { > +    Âvnc_unlock_output(job->vs); >     goto disconnected; >   } >   vnc_unlock_output(job->vs); > @@ -233,10 +252,6 @@ static int vnc_worker_thread_loop(VncJobQueue *queue) > >     if (job->vs->csock == -1) { >       vnc_unlock_display(job->vs->vd); > -      Â/* output mutex must be locked before going to > -       * disconnected: > -       */ > -      Âvnc_lock_output(job->vs); >       goto disconnected; >     } > > @@ -254,24 +269,19 @@ static int vnc_worker_thread_loop(VncJobQueue *queue) >   vs.output.buffer[saved_offset] = (n_rectangles >> 8) & 0xFF; >   vs.output.buffer[saved_offset + 1] = n_rectangles & 0xFF; > > -  Â/* Switch back buffers */ >   vnc_lock_output(job->vs); > -  Âif (job->vs->csock == -1) { > -    Âgoto disconnected; > +  Âif (job->vs->csock != -1) { > +    Âbuffer_reserve(&job->vs->jobs_buffer, vs.output.offset); > +    Âbuffer_append(&job->vs->jobs_buffer, vs.output.buffer, > +           Âvs.output.offset); > +    Â/* Copy persistent encoding data */ > +    Âvnc_async_encoding_end(job->vs, &vs); > + > +    qemu_bh_schedule(job->vs->bh); >   } > - > -  Âvnc_write(job->vs, vs.output.buffer, vs.output.offset); > - > -disconnected: > -  Â/* Copy persistent encoding data */ > -  Âvnc_async_encoding_end(job->vs, &vs); > -  Âflush = (job->vs->csock != -1 && job->vs->abort != true); >   vnc_unlock_output(job->vs); > > -  Âif (flush) { > -    Âvnc_flush(job->vs); > -  Â} > - > +disconnected: >   vnc_lock_queue(queue); >   QTAILQ_REMOVE(&queue->jobs, job, next); >   vnc_unlock_queue(queue); > diff --git a/ui/vnc-jobs.h b/ui/vnc-jobs.h > index b8dab81..4c661f9 100644 > --- a/ui/vnc-jobs.h > +++ b/ui/vnc-jobs.h > @@ -40,6 +40,7 @@ void vnc_jobs_join(VncState *vs); > > Â#ifdef CONFIG_VNC_THREAD > > +void vnc_jobs_consume_buffer(VncState *vs); > Âvoid vnc_start_worker_thread(void); > Âbool vnc_worker_thread_running(void); > Âvoid vnc_stop_worker_thread(void); > diff --git a/ui/vnc.c b/ui/vnc.c > index 610f884..f28873b 100644 > --- a/ui/vnc.c > +++ b/ui/vnc.c > @@ -1011,7 +1011,10 @@ static void vnc_disconnect_finish(VncState *vs) > > Â#ifdef CONFIG_VNC_THREAD >   qemu_mutex_destroy(&vs->output_mutex); > +  Âqemu_bh_delete(vs->bh); > +  Âbuffer_free(&vs->jobs_buffer); > Â#endif > + >   for (i = 0; i < VNC_STAT_ROWS; ++i) { >     qemu_free(vs->lossy_rect[i]); >   } > @@ -1226,6 +1229,14 @@ static long vnc_client_read_plain(VncState *vs) >   return ret; > Â} > > +#ifdef CONFIG_VNC_THREAD > +static void vnc_jobs_bh(void *opaque) > +{ > +  ÂVncState *vs = opaque; > + > +  Âvnc_jobs_consume_buffer(vs); > +} > +#endif > > Â/* > Â* First function called whenever there is more data to be read from > @@ -2525,6 +2536,7 @@ static void vnc_connect(VncDisplay *vd, int csock) > > Â#ifdef CONFIG_VNC_THREAD >   qemu_mutex_init(&vs->output_mutex); > +  Âvs->bh = qemu_bh_new(vnc_jobs_bh, vs); > Â#endif > >   QTAILQ_INSERT_HEAD(&vd->clients, vs, next); > diff --git a/ui/vnc.h b/ui/vnc.h > index 8a1e7b9..bca5f87 100644 > --- a/ui/vnc.h > +++ b/ui/vnc.h > @@ -283,6 +283,8 @@ struct VncState >   VncJob job; > Â#else >   QemuMutex output_mutex; > +  ÂQEMUBH *bh; > +  ÂBuffer jobs_buffer; > Â#endif > >   /* Encoding specific, if you add something here, don't forget to > -- > 1.7.3.4 > > Paolo, Anthony, Jan, are you ok with that one ? Peter Lieve, could you test that patch ? Thanks -- Corentin Chary http://xf.iksaif.net -- To unsubscribe from this list: send the line "unsubscribe kvm" in the body of a message to majordomo@xxxxxxxxxxxxxxx More majordomo info at http://vger.kernel.org/majordomo-info.html