Re: [PATCH v5] vnc: don't mess up with iohandlers in the vnc thread

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



On Thu, Mar 10, 2011 at 3:13 PM, Corentin Chary
<corentin.chary@xxxxxxxxx> wrote:
> The threaded VNC servers messed up with QEMU fd handlers without
> any kind of locking, and that can cause some nasty race conditions.
>
> Using qemu_mutex_lock_iothread() won't work because vnc_dpy_cpy(),
> which will wait for the current job queue to finish, can be called with
> the iothread lock held.
>
> Instead, we now store the data in a temporary buffer, and use a bottom
> half to notify the main thread that new data is available.
>
> vnc_[un]lock_ouput() is still needed to access VncState members like
> abort, csock or jobs_buffer.
>
> Signed-off-by: Corentin Chary <corentin.chary@xxxxxxxxx>
> ---
> Âui/vnc-jobs-async.c | Â 48 +++++++++++++++++++++++++++++-------------------
> Âui/vnc-jobs.h    |  Â1 +
> Âui/vnc.c      Â|  12 ++++++++++++
> Âui/vnc.h      Â|  Â2 ++
> Â4 files changed, 44 insertions(+), 19 deletions(-)
>
> diff --git a/ui/vnc-jobs-async.c b/ui/vnc-jobs-async.c
> index f596247..4438776 100644
> --- a/ui/vnc-jobs-async.c
> +++ b/ui/vnc-jobs-async.c
> @@ -28,6 +28,7 @@
>
> Â#include "vnc.h"
> Â#include "vnc-jobs.h"
> +#include "qemu_socket.h"
>
> Â/*
> Â* Locking:
> @@ -155,6 +156,24 @@ void vnc_jobs_join(VncState *vs)
> Â Â Â Â qemu_cond_wait(&queue->cond, &queue->mutex);
> Â Â }
> Â Â vnc_unlock_queue(queue);
> + Â Âvnc_jobs_consume_buffer(vs);
> +}
> +
> +void vnc_jobs_consume_buffer(VncState *vs)
> +{
> + Â Âbool flush;
> +
> + Â Âvnc_lock_output(vs);
> + Â Âif (vs->jobs_buffer.offset) {
> + Â Â Â Âvnc_write(vs, vs->jobs_buffer.buffer, vs->jobs_buffer.offset);
> + Â Â Â Âbuffer_reset(&vs->jobs_buffer);
> + Â Â}
> + Â Âflush = vs->csock != -1 && vs->abort != true;
> + Â Âvnc_unlock_output(vs);
> +
> + Â Âif (flush) {
> + Â Â Âvnc_flush(vs);
> + Â Â}
> Â}
>
> Â/*
> @@ -197,7 +216,6 @@ static int vnc_worker_thread_loop(VncJobQueue *queue)
> Â Â VncState vs;
> Â Â int n_rectangles;
> Â Â int saved_offset;
> - Â Âbool flush;
>
> Â Â vnc_lock_queue(queue);
> Â Â while (QTAILQ_EMPTY(&queue->jobs) && !queue->exit) {
> @@ -213,6 +231,7 @@ static int vnc_worker_thread_loop(VncJobQueue *queue)
>
> Â Â vnc_lock_output(job->vs);
> Â Â if (job->vs->csock == -1 || job->vs->abort == true) {
> + Â Â Â Âvnc_unlock_output(job->vs);
> Â Â Â Â goto disconnected;
> Â Â }
> Â Â vnc_unlock_output(job->vs);
> @@ -233,10 +252,6 @@ static int vnc_worker_thread_loop(VncJobQueue *queue)
>
> Â Â Â Â if (job->vs->csock == -1) {
> Â Â Â Â Â Â vnc_unlock_display(job->vs->vd);
> - Â Â Â Â Â Â/* output mutex must be locked before going to
> - Â Â Â Â Â Â * disconnected:
> - Â Â Â Â Â Â */
> - Â Â Â Â Â Âvnc_lock_output(job->vs);
> Â Â Â Â Â Â goto disconnected;
> Â Â Â Â }
>
> @@ -254,24 +269,19 @@ static int vnc_worker_thread_loop(VncJobQueue *queue)
> Â Â vs.output.buffer[saved_offset] = (n_rectangles >> 8) & 0xFF;
> Â Â vs.output.buffer[saved_offset + 1] = n_rectangles & 0xFF;
>
> - Â Â/* Switch back buffers */
> Â Â vnc_lock_output(job->vs);
> - Â Âif (job->vs->csock == -1) {
> - Â Â Â Âgoto disconnected;
> + Â Âif (job->vs->csock != -1) {
> + Â Â Â Âbuffer_reserve(&job->vs->jobs_buffer, vs.output.offset);
> + Â Â Â Âbuffer_append(&job->vs->jobs_buffer, vs.output.buffer,
> + Â Â Â Â Â Â Â Â Â Â Âvs.output.offset);
> + Â Â Â Â/* Copy persistent encoding data */
> + Â Â Â Âvnc_async_encoding_end(job->vs, &vs);
> +
> + Â Â Â qemu_bh_schedule(job->vs->bh);
> Â Â }
> -
> - Â Âvnc_write(job->vs, vs.output.buffer, vs.output.offset);
> -
> -disconnected:
> - Â Â/* Copy persistent encoding data */
> - Â Âvnc_async_encoding_end(job->vs, &vs);
> - Â Âflush = (job->vs->csock != -1 && job->vs->abort != true);
> Â Â vnc_unlock_output(job->vs);
>
> - Â Âif (flush) {
> - Â Â Â Âvnc_flush(job->vs);
> - Â Â}
> -
> +disconnected:
> Â Â vnc_lock_queue(queue);
> Â Â QTAILQ_REMOVE(&queue->jobs, job, next);
> Â Â vnc_unlock_queue(queue);
> diff --git a/ui/vnc-jobs.h b/ui/vnc-jobs.h
> index b8dab81..4c661f9 100644
> --- a/ui/vnc-jobs.h
> +++ b/ui/vnc-jobs.h
> @@ -40,6 +40,7 @@ void vnc_jobs_join(VncState *vs);
>
> Â#ifdef CONFIG_VNC_THREAD
>
> +void vnc_jobs_consume_buffer(VncState *vs);
> Âvoid vnc_start_worker_thread(void);
> Âbool vnc_worker_thread_running(void);
> Âvoid vnc_stop_worker_thread(void);
> diff --git a/ui/vnc.c b/ui/vnc.c
> index 610f884..f28873b 100644
> --- a/ui/vnc.c
> +++ b/ui/vnc.c
> @@ -1011,7 +1011,10 @@ static void vnc_disconnect_finish(VncState *vs)
>
> Â#ifdef CONFIG_VNC_THREAD
> Â Â qemu_mutex_destroy(&vs->output_mutex);
> + Â Âqemu_bh_delete(vs->bh);
> + Â Âbuffer_free(&vs->jobs_buffer);
> Â#endif
> +
> Â Â for (i = 0; i < VNC_STAT_ROWS; ++i) {
> Â Â Â Â qemu_free(vs->lossy_rect[i]);
> Â Â }
> @@ -1226,6 +1229,14 @@ static long vnc_client_read_plain(VncState *vs)
> Â Â return ret;
> Â}
>
> +#ifdef CONFIG_VNC_THREAD
> +static void vnc_jobs_bh(void *opaque)
> +{
> + Â ÂVncState *vs = opaque;
> +
> + Â Âvnc_jobs_consume_buffer(vs);
> +}
> +#endif
>
> Â/*
> Â* First function called whenever there is more data to be read from
> @@ -2525,6 +2536,7 @@ static void vnc_connect(VncDisplay *vd, int csock)
>
> Â#ifdef CONFIG_VNC_THREAD
> Â Â qemu_mutex_init(&vs->output_mutex);
> + Â Âvs->bh = qemu_bh_new(vnc_jobs_bh, vs);
> Â#endif
>
> Â Â QTAILQ_INSERT_HEAD(&vd->clients, vs, next);
> diff --git a/ui/vnc.h b/ui/vnc.h
> index 8a1e7b9..bca5f87 100644
> --- a/ui/vnc.h
> +++ b/ui/vnc.h
> @@ -283,6 +283,8 @@ struct VncState
> Â Â VncJob job;
> Â#else
> Â Â QemuMutex output_mutex;
> + Â ÂQEMUBH *bh;
> + Â ÂBuffer jobs_buffer;
> Â#endif
>
> Â Â /* Encoding specific, if you add something here, don't forget to
> --
> 1.7.3.4
>
>

Paolo, Anthony, Jan, are you ok with that one ?
Peter Lieve, could you test that patch ?
Thanks

-- 
Corentin Chary
http://xf.iksaif.net
--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[Index of Archives]     [KVM ARM]     [KVM ia64]     [KVM ppc]     [Virtualization Tools]     [Spice Development]     [Libvirt]     [Libvirt Users]     [Linux USB Devel]     [Linux Audio Users]     [Yosemite Questions]     [Linux Kernel]     [Linux SCSI]     [XFree86]
  Powered by Linux