Re: [PATCH 10/15] worker: use glib main loop

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



On Thu, 2015-12-03 at 16:27 +0000, Frediano Ziglio wrote:
> From: Marc-André Lureau <marcandre.lureau@xxxxxxxxx>
> 
> Clean up, more extensible.

I think this should be a bit less terse... perhaps:
Use the glib mainloop instead of writing our own. The glib loop is both cleaner
to use and is more extensible. It is also very mature and reduces the
maintenance burden on the spice server.

> 
> Avoid server hanging when no client are connected.


This sounds like something that might belong in a separate patch?


> ---
>  server/Makefile.am         |   2 -
>  server/red-worker.c        | 395 ++++++++++++++++++++++++++++----------------
> -
>  server/red-worker.h        |   1 +
>  server/spice_timer_queue.c | 273 -------------------------------
>  server/spice_timer_queue.h |  43 -----
>  5 files changed, 246 insertions(+), 468 deletions(-)
>  delete mode 100644 server/spice_timer_queue.c
>  delete mode 100644 server/spice_timer_queue.h
> 
> diff --git a/server/Makefile.am b/server/Makefile.am
> index d4fc972..88825d8 100644
> --- a/server/Makefile.am
> +++ b/server/Makefile.am
> @@ -121,8 +121,6 @@ libspice_server_la_SOURCES =			\
>  	spice.h					\
>  	stat.h					\
>  	spicevmc.c				\
> -	spice_timer_queue.c			\
> -	spice_timer_queue.h			\
>  	zlib-encoder.c				\
>  	zlib-encoder.h				\
>  	image-cache.h			\
> diff --git a/server/red-worker.c b/server/red-worker.c
> index aecfcf9..9e8fcbb 100644
> --- a/server/red-worker.c
> +++ b/server/red-worker.c
> @@ -49,31 +49,21 @@
>  
>  #include "spice.h"
>  #include "red-worker.h"
> -#include "spice_timer_queue.h"
>  #include "cursor-channel.h"
>  #include "tree.h"
>  
>  #define CMD_RING_POLL_TIMEOUT 10 //milli
>  #define CMD_RING_POLL_RETRIES 200
>  
> -#define MAX_EVENT_SOURCES 20
> -#define INF_EVENT_WAIT ~0
> -
> -struct SpiceWatch {
> -    struct RedWorker *worker;
> -    SpiceWatchFunc watch_func;
> -    void *watch_func_opaque;
> -};
> -
>  struct RedWorker {
>      pthread_t thread;
>      clockid_t clockid;
> +    GMainContext *main_context;
>      QXLInstance *qxl;
>      RedDispatcher *red_dispatcher;
>      int running;
> -    struct pollfd poll_fds[MAX_EVENT_SOURCES];
> -    struct SpiceWatch watches[MAX_EVENT_SOURCES];
> -    unsigned int event_timeout;
> +
> +    gint timeout;
>  
>      DisplayChannel *display_channel;
>      uint32_t display_poll_tries;
> @@ -99,6 +89,13 @@ struct RedWorker {
>      FILE *record_fd;
>  };
>  
> +GMainContext* red_worker_get_context(RedWorker *worker)


I'd prefer _get_main_context()


> +{
> +    spice_return_val_if_fail(worker, NULL);
> +
> +    return worker->main_context;
> +}
> +
>  QXLInstance* red_worker_get_qxl(RedWorker *worker)
>  {
>      spice_return_val_if_fail(worker != NULL, NULL);
> @@ -182,7 +179,9 @@ static int red_process_cursor(RedWorker *worker, uint32_t
> max_pipe_size, int *ri
>              *ring_is_empty = TRUE;
>              if (worker->cursor_poll_tries < CMD_RING_POLL_RETRIES) {
>                  worker->cursor_poll_tries++;
> -                worker->event_timeout = MIN(worker->event_timeout,
> CMD_RING_POLL_TIMEOUT);
> +                worker->timeout = worker->timeout == -1 ?
> +                    CMD_RING_POLL_TIMEOUT :
> +                    MIN(worker->timeout, CMD_RING_POLL_TIMEOUT);
>                  return n;
>              }
>              if (worker->cursor_poll_tries > CMD_RING_POLL_RETRIES ||
> @@ -228,7 +227,6 @@ static int red_process_display(RedWorker *worker, uint32_t
> max_pipe_size, int *r
>  {
>      QXLCommandExt ext_cmd;
>      int n = 0;
> -    uint64_t start = red_get_monotonic_time();
>  
>      if (!worker->running) {
>          *ring_is_empty = TRUE;
> @@ -237,14 +235,30 @@ static int red_process_display(RedWorker *worker,
> uint32_t max_pipe_size, int *r
>  
>      worker->process_display_generation++;
>      *ring_is_empty = FALSE;
> -    while (!display_is_connected(worker) ||
> -           // TODO: change to average pipe size?
> -           red_channel_min_pipe_size(RED_CHANNEL(worker->display_channel)) <=
> max_pipe_size) {
> +    for (;;) {
> +
> +        if (display_is_connected(worker)) {
> +
> +            if (red_channel_all_blocked(RED_CHANNEL(worker
> ->display_channel))) {
> +                spice_info("all display clients are blocking");
> +                return n;
> +            }

This is a change of behavior. The previous code checked for all_blocked() after
calling to qif->get_command() and incrementing n. (Could this be related to the
"avoid server hanging" comment from the commit log?)


> +
> +
> +            // TODO: change to average pipe size?
> +            if (red_channel_min_pipe_size(RED_CHANNEL(worker
> ->display_channel)) > max_pipe_size) {
> +                spice_info("too much item in the display clients pipe
> already");

"too much item" is not proper. Change to "Too many items". Also, "clients" ->
"client's"


> +                return n;
> +            }
> +        }
> +
>          if (!worker->qxl->st->qif->get_command(worker->qxl, &ext_cmd)) {
>              *ring_is_empty = TRUE;;
>              if (worker->display_poll_tries < CMD_RING_POLL_RETRIES) {
>                  worker->display_poll_tries++;
> -                worker->event_timeout = MIN(worker->event_timeout,
> CMD_RING_POLL_TIMEOUT);
> +                worker->timeout = worker->timeout == -1 ?
> +                    CMD_RING_POLL_TIMEOUT :
> +                    MIN(worker->timeout, CMD_RING_POLL_TIMEOUT);


I find this a bit awkward. I'd almost consider reverting to the previous
approach (where the timeout was an unsigned in initially set to ~0) or adding a
simple inline function such as red_worker_update_timeout() to hide this logic.


>                  return n;
>              }
>              if (worker->display_poll_tries > CMD_RING_POLL_RETRIES ||
> @@ -329,13 +343,8 @@ static int red_process_display(RedWorker *worker,
> uint32_t max_pipe_size, int *r
>              spice_error("bad command type");
>          }
>          n++;
> -        if ((worker->display_channel &&
> -             red_channel_all_blocked(&worker->display_channel->common.base))
> -            || red_get_monotonic_time() - start > 10 * 1000 * 1000) {

this all_blocked() check was moved up (as mentioned above), but elapsed time
check was simply dropped with no explanation...


> -            worker->event_timeout = 0;
> -            return n;
> -        }
>      }
> +
>      return n;
>  }
>  
> @@ -511,81 +520,159 @@ static int
> common_channel_config_socket(RedChannelClient *rcc)
>      return TRUE;
>  }
>  
> -static void worker_watch_update_mask(SpiceWatch *watch, int event_mask)
> +typedef struct SpiceTimer {
> +    SpiceTimerFunc func;
> +    void *opaque;
> +    guint source_id;
> +} SpiceTimer;
> +
> +static SpiceTimer* worker_timer_add(SpiceTimerFunc func, void *opaque)
>  {
> -    struct RedWorker *worker;
> -    int i;
> +    SpiceTimer *timer = g_new0(SpiceTimer, 1);
> +
> +    timer->func = func;
> +    timer->opaque = opaque;
> +
> +    return timer;
> +}
> +
> +static gboolean worker_timer_func(gpointer user_data)
> +{
> +    SpiceTimer *timer = user_data;
> +
> +    timer->source_id = 0;
> +    timer->func(timer->opaque);
> +    /* timer might be free after func(), don't touch */
> +
> +    return FALSE;
> +}
>  
> -    if (!watch) {
> +static void worker_timer_cancel(SpiceTimer *timer)
> +{
> +    if (timer->source_id == 0)
>          return;

use g_return_if_fail() here? It seems to me that canceling a timer without a
source_id may indicate programmer error...


> -    }
>  
> -    worker = watch->worker;
> -    i = watch - worker->watches;
> +    g_source_remove(timer->source_id);
> +    timer->source_id = 0;
> +}
>  
> -    worker->poll_fds[i].events = 0;
> -    if (event_mask & SPICE_WATCH_EVENT_READ) {
> -        worker->poll_fds[i].events |= POLLIN;
> -    }
> -    if (event_mask & SPICE_WATCH_EVENT_WRITE) {
> -        worker->poll_fds[i].events |= POLLOUT;
> +static void worker_timer_start(SpiceTimer *timer, uint32_t ms)
> +{
> +    worker_timer_cancel(timer);
> +
> +    timer->source_id = g_timeout_add(ms, worker_timer_func, timer);
> +}
> +
> +static void worker_timer_remove(SpiceTimer *timer)
> +{
> +    worker_timer_cancel(timer);
> +    g_free(timer);
> +}
> +
> +static GIOCondition spice_event_to_giocondition(int event_mask)
> +{
> +    GIOCondition condition = 0;
> +
> +    if (event_mask & SPICE_WATCH_EVENT_READ)
> +        condition |= G_IO_IN;
> +    if (event_mask & SPICE_WATCH_EVENT_WRITE)
> +        condition |= G_IO_OUT;
> +
> +    return condition;
> +}
> +
> +static int giocondition_to_spice_event(GIOCondition condition)
> +{
> +    int event = 0;
> +
> +    if (condition & G_IO_IN)
> +        event |= SPICE_WATCH_EVENT_READ;
> +    if (condition & G_IO_OUT)
> +        event |= SPICE_WATCH_EVENT_WRITE;
> +
> +    return event;
> +}
> +
> +struct SpiceWatch {
> +    GIOChannel *channel;
> +    GSource *source;
> +    RedChannelClient *rcc;
> +    SpiceWatchFunc func;
> +};
> +
> +static gboolean watch_func(GIOChannel *source, GIOCondition condition,
> +                           gpointer data)
> +{
> +    SpiceWatch *watch = data;
> +    int fd = g_io_channel_unix_get_fd(source);
> +
> +    watch->func(fd, giocondition_to_spice_event(condition), watch->rcc);
> +
> +    return TRUE;
> +}
> +
> +static void worker_watch_update_mask(SpiceWatch *watch, int events)
> +{
> +    RedWorker *worker;
> +
> +    spice_return_if_fail(watch != NULL);
> +    worker = SPICE_CONTAINEROF(watch->rcc->channel, CommonChannel, base)
> ->worker;
> +
> +    if (watch->source) {
> +        g_source_destroy(watch->source);
> +        watch->source = NULL;
>      }
> +
> +    if (!events)
> +        return;
> +
> +    watch->source = g_io_create_watch(watch->channel,
> spice_event_to_giocondition(events));
> +    g_source_set_callback(watch->source, (GSourceFunc)watch_func, watch,
> NULL);
> +    g_source_attach(watch->source, worker->main_context);
>  }
>  
> -static SpiceWatch *worker_watch_add(int fd, int event_mask, SpiceWatchFunc
> func, void *opaque)
> +static SpiceWatch* worker_watch_add(int fd, int events, SpiceWatchFunc func,
> void *opaque)
>  {
> -    /* Since we are a channel core implementation, we always get called from
> -       red_channel_client_create(), so opaque always is our rcc */

I think I would leave this comment in. It feels a bit fragile to make this
assumption in the first place, so I'd at least like a comment explaining why we
can assume this.


>      RedChannelClient *rcc = opaque;
> -    struct RedWorker *worker;
> -    int i;
> +    RedWorker *worker;
> +    SpiceWatch *watch;
> +
> +    spice_return_val_if_fail(rcc != NULL, NULL);
> +    spice_return_val_if_fail(fd != -1, NULL);
> +    spice_return_val_if_fail(func != NULL, NULL);
>  
>      /* Since we are called from red_channel_client_create()
>         CommonChannelClient->worker has not been set yet! */
>      worker = SPICE_CONTAINEROF(rcc->channel, CommonChannel, base)->worker;
> +    spice_return_val_if_fail(worker != NULL, NULL);
> +    spice_return_val_if_fail(worker->main_context != NULL, NULL);
>  
> -    /* Search for a free slot in our poll_fds & watches arrays */
> -    for (i = 0; i < MAX_EVENT_SOURCES; i++) {
> -        if (worker->poll_fds[i].fd == -1) {
> -            break;
> -        }
> -    }
> -    if (i == MAX_EVENT_SOURCES) {
> -        spice_warning("could not add a watch for channel type %u id %u",
> -                      rcc->channel->type, rcc->channel->id);
> -        return NULL;
> -    }
> +    watch = g_new0(SpiceWatch, 1);
> +    watch->channel = g_io_channel_unix_new(fd);
> +    watch->rcc = rcc;
> +    watch->func = func;
>  
> -    worker->poll_fds[i].fd = fd;
> -    worker->watches[i].worker = worker;
> -    worker->watches[i].watch_func = func;
> -    worker->watches[i].watch_func_opaque = opaque;
> -    worker_watch_update_mask(&worker->watches[i], event_mask);
> +    worker_watch_update_mask(watch, events);
>  
> -    return &worker->watches[i];
> +    return watch;
>  }
>  
>  static void worker_watch_remove(SpiceWatch *watch)
>  {
> -    if (!watch) {
> -        return;
> -    }
> +    spice_return_if_fail(watch != NULL);
>  
> -    /* Note we don't touch the poll_fd here, to avoid the
> -       poll_fds/watches table entry getting re-used in the same
> -       red_worker_main loop over the fds as it is removed.
> +    if (watch->source)
> +        g_source_destroy(watch->source);
>  
> -       This is done because re-using it while events were pending on
> -       the fd previously occupying the slot would lead to incorrectly
> -       calling the watch_func for the new fd. */
> -    memset(watch, 0, sizeof(SpiceWatch));
> +    g_io_channel_unref(watch->channel);
> +    g_free(watch);
>  }
>  
> -SpiceCoreInterface worker_core = {
> -    .timer_add = spice_timer_queue_add,
> -    .timer_start = spice_timer_set,
> -    .timer_cancel = spice_timer_cancel,
> -    .timer_remove = spice_timer_remove,
> +static const SpiceCoreInterface worker_core = {
> +    .timer_add = worker_timer_add,
> +    .timer_start = worker_timer_start,
> +    .timer_cancel = worker_timer_cancel,
> +    .timer_remove = worker_timer_remove,
>  
>      .watch_update_mask = worker_watch_update_mask,
>      .watch_add = worker_watch_add,
> @@ -1528,24 +1615,87 @@ static void register_callbacks(Dispatcher *dispatcher)
>  
>  
>  
> -static void handle_dev_input(int fd, int event, void *opaque)
> +static gboolean worker_dispatcher_cb(GIOChannel *source, GIOCondition
> condition,
> +                                     gpointer data)
>  {
> -    RedWorker *worker = opaque;
> +    RedWorker *worker = data;
>  
> +    spice_debug(NULL);

I don't think this is necessary.

>      dispatcher_handle_recv_read(red_dispatcher_get_dispatcher(worker
> ->red_dispatcher));
> +
> +    return TRUE;
> +}
> +
> +typedef struct RedWorkerSource {
> +    GSource source;
> +    RedWorker *worker;
> +} RedWorkerSource;
> +
> +static gboolean worker_source_prepare(GSource *source, gint *timeout)
> +{
> +    RedWorkerSource *wsource = (RedWorkerSource *)source;
> +    RedWorker *worker = wsource->worker;
> +
> +    *timeout = worker->timeout;
> +    *timeout = MIN(worker->timeout,
> +                   display_channel_get_streams_timeout(worker
> ->display_channel));
> +
> +    return FALSE; /* do no timeout poll */

This comment is confusing. it implies that returning FALSE means that we don't
poll? In fact, returning TRUE from the prepare function indicates that the
source is already ready and we don't need to poll...

> +}
> +
> +static gboolean worker_source_check(GSource *source)
> +{
> +    RedWorkerSource *wsource = (RedWorkerSource *)source;
> +    RedWorker *worker = wsource->worker;
> +
> +    return worker->running /* TODO && worker->pending_process */;
>  }
>  
> +static gboolean worker_source_dispatch(GSource *source, GSourceFunc callback,
> +                                       gpointer user_data)
> +{
> +    RedWorkerSource *wsource = (RedWorkerSource *)source;
> +    RedWorker *worker = wsource->worker;
> +    DisplayChannel *display = worker->display_channel;
> +    int ring_is_empty;
> +
> +    /* during migration, in the dest, the display channel can be initialized
> +       while the global lz data not since migrate data msg hasn't been
> +       received yet */
> +    /* FIXME: why is this here, and not in display_channel_create */
> +    display_channel_free_glz_drawables_to_free(display);

In the previous version, this call was inside of a 'if (worker
->display_channel)' branch. Here that is not checked. 

> +
> +    /* FIXME: could use its own source */
> +    stream_timeout(display);
> +
> +    worker->timeout = -1;
> +    red_process_cursor(worker, MAX_PIPE_SIZE, &ring_is_empty);
> +    red_process_display(worker, MAX_PIPE_SIZE, &ring_is_empty);
> +
> +    /* FIXME: remove me? that should be handled by watch out condition */
> +    red_push(worker);
> +
> +    return TRUE;
> +}
> +
> +/* cannot be const */
> +static GSourceFuncs worker_source_funcs = {
> +    .prepare = worker_source_prepare,
> +    .check = worker_source_check,
> +    .dispatch = worker_source_dispatch,
> +};
> +
>  RedWorker* red_worker_new(QXLInstance *qxl, RedDispatcher *red_dispatcher)
>  {
>      QXLDevInitInfo init_info;
>      RedWorker *worker;
>      Dispatcher *dispatcher;
> -    int i;
>      const char *record_filename;
>  
>      qxl->st->qif->get_init_info(qxl, &init_info);
>  
>      worker = spice_new0(RedWorker, 1);
> +    worker->main_context = g_main_context_new();
>  
>      record_filename = getenv("SPICE_WORKER_RECORD_FILENAME");
>      if (record_filename) {
> @@ -1579,15 +1729,18 @@ RedWorker* red_worker_new(QXLInstance *qxl,
> RedDispatcher *red_dispatcher)
>      worker->wakeup_counter = stat_add_counter(worker->stat, "wakeups", TRUE);
>      worker->command_counter = stat_add_counter(worker->stat, "commands",
> TRUE);
>  #endif
> -    for (i = 0; i < MAX_EVENT_SOURCES; i++) {
> -        worker->poll_fds[i].fd = -1;
> -    }
>  
> -    worker->poll_fds[0].fd = dispatcher_get_recv_fd(dispatcher);
> -    worker->poll_fds[0].events = POLLIN;
> -    worker->watches[0].worker = worker;
> -    worker->watches[0].watch_func = handle_dev_input;
> -    worker->watches[0].watch_func_opaque = worker;
> +    GIOChannel *channel =
> g_io_channel_unix_new(dispatcher_get_recv_fd(dispatcher));
> +    GSource *source = g_io_create_watch(channel, G_IO_IN);
> +    g_source_set_callback(source, (GSourceFunc)worker_dispatcher_cb, worker,
> NULL);
> +    g_source_attach(source, worker->main_context);
> +    g_source_unref(source);
> +
> +    source = g_source_new(&worker_source_funcs, sizeof(RedWorkerSource));
> +    RedWorkerSource *wsource = (RedWorkerSource *)source;
> +    wsource->worker = worker;
> +    g_source_attach(source, worker->main_context);
> +    g_source_unref(source);
>  
>      memslot_info_init(&worker->mem_slots,
>                        init_info.num_memslots_groups,
> @@ -1598,7 +1751,7 @@ RedWorker* red_worker_new(QXLInstance *qxl,
> RedDispatcher *red_dispatcher)
>  
>      spice_warn_if(init_info.n_surfaces > NUM_SURFACES);
>  
> -    worker->event_timeout = INF_EVENT_WAIT;
> +    worker->timeout = -1;
>  
>      worker->cursor_channel = cursor_channel_new(worker);
>      // TODO: handle seemless migration. Temp, setting migrate to FALSE
> @@ -1616,10 +1769,6 @@ SPICE_GNUC_NORETURN static void *red_worker_main(void
> *arg)
>      spice_assert(MAX_PIPE_SIZE > WIDE_CLIENT_ACK_WINDOW &&
>             MAX_PIPE_SIZE > NARROW_CLIENT_ACK_WINDOW); //ensure wakeup by ack
> message
>  
> -    if (!spice_timer_queue_create()) {
> -        spice_error("failed to create timer queue");
> -    }
> -
>      if (pthread_getcpuclockid(pthread_self(), &worker->clockid)) {
>          spice_warning("getcpuclockid failed");
>      }
> @@ -1627,66 +1776,12 @@ SPICE_GNUC_NORETURN static void *red_worker_main(void
> *arg)
>      RED_CHANNEL(worker->cursor_channel)->thread_id = pthread_self();
>      RED_CHANNEL(worker->display_channel)->thread_id = pthread_self();
>  
> -    for (;;) {
> -        int i, num_events;
> -        unsigned int timeout;
> -
> -        timeout = spice_timer_queue_get_timeout_ms();
> -        worker->event_timeout = MIN(timeout, worker->event_timeout);
> -        timeout = display_channel_get_streams_timeout(worker
> ->display_channel);
> -        worker->event_timeout = MIN(timeout, worker->event_timeout);
> -        num_events = poll(worker->poll_fds, MAX_EVENT_SOURCES, worker
> ->event_timeout);
> -        stream_timeout(worker->display_channel);
> -        spice_timer_queue_cb();
> -
> -        if (worker->display_channel) {
> -            /* during migration, in the dest, the display channel can be
> initialized
> -               while the global lz data not since migrate data msg hasn't
> been
> -               received yet */
> -            display_channel_free_glz_drawables_to_free(worker
> ->display_channel);
> -        }
> -
> -        worker->event_timeout = INF_EVENT_WAIT;
> -        if (num_events == -1) {
> -            if (errno != EINTR) {
> -                spice_error("poll failed, %s", strerror(errno));
> -            }
> -        }
> -
> -        for (i = 0; i < MAX_EVENT_SOURCES; i++) {
> -            /* The watch may have been removed by the watch-func from
> -               another fd (ie a disconnect through the dispatcher),
> -               in this case watch_func is NULL. */
> -            if (worker->poll_fds[i].revents && worker->watches[i].watch_func)
> {
> -                int events = 0;
> -                if (worker->poll_fds[i].revents & POLLIN) {
> -                    events |= SPICE_WATCH_EVENT_READ;
> -                }
> -                if (worker->poll_fds[i].revents & POLLOUT) {
> -                    events |= SPICE_WATCH_EVENT_WRITE;
> -                }
> -                worker->watches[i].watch_func(worker->poll_fds[i].fd, events,
> -                                        worker
> ->watches[i].watch_func_opaque);
> -            }
> -        }
> -
> -        /* Clear the poll_fd for any removed watches, see the comment in
> -           watch_remove for why we don't do this there. */
> -        for (i = 0; i < MAX_EVENT_SOURCES; i++) {
> -            if (!worker->watches[i].watch_func) {
> -                worker->poll_fds[i].fd = -1;
> -            }
> -        }
> -
> -        if (worker->running) {
> -            int ring_is_empty;
> -            red_process_cursor(worker, MAX_PIPE_SIZE, &ring_is_empty);
> -            red_process_display(worker, MAX_PIPE_SIZE, &ring_is_empty);
> -        }
> -        red_push(worker);
> -    }
> +    GMainLoop *loop = g_main_loop_new(worker->main_context, FALSE);
> +    g_main_loop_run(loop);
> +    g_main_loop_unref(loop);
>  
> -    spice_warn_if_reached();
> +    /* FIXME: free worker, and join threads */
> +    abort();

why abort?

>  }
>  
>  bool red_worker_run(RedWorker *worker)
> diff --git a/server/red-worker.h b/server/red-worker.h
> index 44f35f7..b55a45c 100644
> --- a/server/red-worker.h
> +++ b/server/red-worker.h
> @@ -97,6 +97,7 @@ bool       red_worker_run(RedWorker *worker);
>  QXLInstance* red_worker_get_qxl(RedWorker *worker);
>  RedChannel* red_worker_get_cursor_channel(RedWorker *worker);
>  RedChannel* red_worker_get_display_channel(RedWorker *worker);
> +GMainContext* red_worker_get_context(RedWorker *worker);
>  clockid_t red_worker_get_clockid(RedWorker *worker);
>  RedMemSlotInfo* red_worker_get_memslot(RedWorker *worker);
>  
> diff --git a/server/spice_timer_queue.c b/server/spice_timer_queue.c
> deleted file mode 100644
> index 60017cc..0000000
> --- a/server/spice_timer_queue.c
> +++ /dev/null
> @@ -1,273 +0,0 @@
> -/* -*- Mode: C; c-basic-offset: 4; indent-tabs-mode: nil -*- */
> -/*
> -   Copyright (C) 2013 Red Hat, Inc.
> -
> -   This library is free software; you can redistribute it and/or
> -   modify it under the terms of the GNU Lesser General Public
> -   License as published by the Free Software Foundation; either
> -   version 2.1 of the License, or (at your option) any later version.
> -
> -   This library is distributed in the hope that it will be useful,
> -   but WITHOUT ANY WARRANTY; without even the implied warranty of
> -   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
> -   Lesser General Public License for more details.
> -
> -   You should have received a copy of the GNU Lesser General Public
> -   License along with this library; if not, see <http://www.gnu.org/licenses/
> >.
> -*/
> -#include <config.h>
> -#include <pthread.h>
> -#include "red-common.h"
> -#include "spice_timer_queue.h"
> -#include "common/ring.h"
> -
> -static Ring timer_queue_list;
> -static int queue_count = 0;
> -static pthread_mutex_t queue_list_lock = PTHREAD_MUTEX_INITIALIZER;
> -
> -static void spice_timer_queue_init(void)
> -{
> -    ring_init(&timer_queue_list);
> -}
> -
> -struct SpiceTimer {
> -    RingItem link;
> -    RingItem active_link;
> -
> -    SpiceTimerFunc func;
> -    void *opaque;
> -
> -    SpiceTimerQueue *queue;
> -
> -    int is_active;
> -    uint32_t ms;
> -    uint64_t expiry_time;
> -};
> -
> -struct SpiceTimerQueue {
> -    RingItem link;
> -    pthread_t thread;
> -    Ring timers;
> -    Ring active_timers;
> -};
> -
> -static SpiceTimerQueue *spice_timer_queue_find(void)
> -{
> -    pthread_t self = pthread_self();
> -    RingItem *queue_item;
> -
> -    RING_FOREACH(queue_item, &timer_queue_list) {
> -         SpiceTimerQueue *queue = SPICE_CONTAINEROF(queue_item,
> SpiceTimerQueue, link);
> -
> -         if (pthread_equal(self, queue->thread) != 0) {
> -            return queue;
> -         }
> -    }
> -
> -    return NULL;
> -}
> -
> -static SpiceTimerQueue *spice_timer_queue_find_with_lock(void)
> -{
> -    SpiceTimerQueue *queue;
> -
> -    pthread_mutex_lock(&queue_list_lock);
> -    queue = spice_timer_queue_find();
> -    pthread_mutex_unlock(&queue_list_lock);
> -    return queue;
> -}
> -
> -int spice_timer_queue_create(void)
> -{
> -    SpiceTimerQueue *queue;
> -
> -    pthread_mutex_lock(&queue_list_lock);
> -    if (queue_count == 0) {
> -        spice_timer_queue_init();
> -    }
> -
> -    if (spice_timer_queue_find() != NULL) {
> -        spice_printerr("timer queue was already created for the thread");
> -        return FALSE;
> -    }
> -
> -    queue = spice_new0(SpiceTimerQueue, 1);
> -    queue->thread = pthread_self();
> -    ring_init(&queue->timers);
> -    ring_init(&queue->active_timers);
> -
> -    ring_add(&timer_queue_list, &queue->link);
> -    queue_count++;
> -
> -    pthread_mutex_unlock(&queue_list_lock);
> -
> -    return TRUE;
> -}
> -
> -void spice_timer_queue_destroy(void)
> -{
> -    RingItem *item;
> -    SpiceTimerQueue *queue;
> -
> -    pthread_mutex_lock(&queue_list_lock);
> -    queue = spice_timer_queue_find();
> -
> -    spice_assert(queue != NULL);
> -
> -    while ((item = ring_get_head(&queue->timers))) {
> -        SpiceTimer *timer;
> -
> -        timer = SPICE_CONTAINEROF(item, SpiceTimer, link);
> -        spice_timer_remove(timer);
> -    }
> -
> -    ring_remove(&queue->link);
> -    free(queue);
> -    queue_count--;
> -
> -    pthread_mutex_unlock(&queue_list_lock);
> -}
> -
> -SpiceTimer *spice_timer_queue_add(SpiceTimerFunc func, void *opaque)
> -{
> -    SpiceTimer *timer = spice_new0(SpiceTimer, 1);
> -    SpiceTimerQueue *queue = spice_timer_queue_find_with_lock();
> -
> -    spice_assert(queue != NULL);
> -
> -    ring_item_init(&timer->link);
> -    ring_item_init(&timer->active_link);
> -
> -    timer->opaque = opaque;
> -    timer->func = func;
> -    timer->queue = queue;
> -
> -    ring_add(&queue->timers, &timer->link);
> -
> -    return timer;
> -}
> -
> -static void _spice_timer_set(SpiceTimer *timer, uint32_t ms, uint64_t now)
> -{
> -    RingItem *next_item;
> -    SpiceTimerQueue *queue;
> -
> -    if (timer->is_active) {
> -        spice_timer_cancel(timer);
> -    }
> -
> -    queue = timer->queue;
> -    timer->expiry_time = now + ms;
> -    timer->ms = ms;
> -
> -    RING_FOREACH(next_item, &queue->active_timers) {
> -        SpiceTimer *next_timer = SPICE_CONTAINEROF(next_item, SpiceTimer,
> active_link);
> -
> -        if (timer->expiry_time <= next_timer->expiry_time) {
> -            break;
> -        }
> -    }
> -
> -    if (next_item) {
> -        ring_add_before(&timer->active_link, next_item);
> -    } else {
> -        ring_add_before(&timer->active_link, &queue->active_timers);
> -    }
> -    timer->is_active = TRUE;
> -}
> -
> -void spice_timer_set(SpiceTimer *timer, uint32_t ms)
> -{
> -    struct timespec now;
> -
> -    spice_assert(pthread_equal(timer->queue->thread, pthread_self()) != 0);
> -
> -    clock_gettime(CLOCK_MONOTONIC, &now);
> -    _spice_timer_set(timer, ms,
> -                     (uint64_t)now.tv_sec * 1000 + (now.tv_nsec / 1000 /
> 1000));
> -}
> -
> -void spice_timer_cancel(SpiceTimer *timer)
> -{
> -    spice_assert(pthread_equal(timer->queue->thread, pthread_self()) != 0);
> -
> -    if (!ring_item_is_linked(&timer->active_link)) {
> -        spice_assert(!timer->is_active);
> -        return;
> -    }
> -
> -    spice_assert(timer->is_active);
> -    ring_remove(&timer->active_link);
> -    timer->is_active = FALSE;
> -}
> -
> -void spice_timer_remove(SpiceTimer *timer)
> -{
> -    spice_assert(timer->queue);
> -    spice_assert(ring_item_is_linked(&timer->link));
> -    spice_assert(pthread_equal(timer->queue->thread, pthread_self()) != 0);
> -
> -    if (timer->is_active) {
> -        spice_assert(ring_item_is_linked(&timer->active_link));
> -        ring_remove(&timer->active_link);
> -    }
> -    ring_remove(&timer->link);
> -    free(timer);
> -}
> -
> -unsigned int spice_timer_queue_get_timeout_ms(void)
> -{
> -    struct timespec now;
> -    int64_t now_ms;
> -    RingItem *head;
> -    SpiceTimer *head_timer;
> -    SpiceTimerQueue *queue = spice_timer_queue_find_with_lock();
> -
> -    spice_assert(queue != NULL);
> -
> -    if (ring_is_empty(&queue->active_timers)) {
> -        return -1;
> -    }
> -
> -    head = ring_get_head(&queue->active_timers);
> -    head_timer = SPICE_CONTAINEROF(head, SpiceTimer, active_link);
> -
> -    clock_gettime(CLOCK_MONOTONIC, &now);
> -    now_ms = ((int64_t)now.tv_sec * 1000) + (now.tv_nsec / 1000 / 1000);
> -
> -    return MAX(0, ((int64_t)head_timer->expiry_time - now_ms));
> -}
> -
> -
> -void spice_timer_queue_cb(void)
> -{
> -    struct timespec now;
> -    uint64_t now_ms;
> -    RingItem *head;
> -    SpiceTimerQueue *queue = spice_timer_queue_find_with_lock();
> -
> -    spice_assert(queue != NULL);
> -
> -    if (ring_is_empty(&queue->active_timers)) {
> -        return;
> -    }
> -
> -    clock_gettime(CLOCK_MONOTONIC, &now);
> -    now_ms = ((uint64_t)now.tv_sec * 1000) + (now.tv_nsec / 1000 / 1000);
> -
> -    while ((head = ring_get_head(&queue->active_timers))) {
> -        SpiceTimer *timer = SPICE_CONTAINEROF(head, SpiceTimer, active_link);
> -
> -        if (timer->expiry_time > now_ms) {
> -            break;
> -        } else {
> -            /* Remove active timer before calling the timer function.
> -             * Timer function could delete the timer making the timer
> -             * pointer point to freed data.
> -             */
> -            spice_timer_cancel(timer);
> -            timer->func(timer->opaque);
> -            /* timer could now be invalid ! */
> -        }
> -    }
> -}
> diff --git a/server/spice_timer_queue.h b/server/spice_timer_queue.h
> deleted file mode 100644
> index a84f6cd..0000000
> --- a/server/spice_timer_queue.h
> +++ /dev/null
> @@ -1,43 +0,0 @@
> -/*
> -   Copyright (C) 2013 Red Hat, Inc.
> -
> -   This library is free software; you can redistribute it and/or
> -   modify it under the terms of the GNU Lesser General Public
> -   License as published by the Free Software Foundation; either
> -   version 2.1 of the License, or (at your option) any later version.
> -
> -   This library is distributed in the hope that it will be useful,
> -   but WITHOUT ANY WARRANTY; without even the implied warranty of
> -   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
> -   Lesser General Public License for more details.
> -
> -   You should have received a copy of the GNU Lesser General Public
> -   License along with this library; if not, see <http://www.gnu.org/licenses/
> >.
> -*/
> -
> -#ifndef _H_SPICE_TIMER_QUEUE
> -#define _H_SPICE_TIMER_QUEUE
> -
> -#include  <stdint.h>
> -#include "spice.h"
> -
> -typedef struct SpiceTimerQueue SpiceTimerQueue;
> -
> -/* create/destroy a timer queue for the current thread.
> - * In order to execute the timers functions, spice_timer_queue_cb should be
> called
> - * periodically, according to spice_timer_queue_get_timeout_ms */
> -int spice_timer_queue_create(void);
> -void spice_timer_queue_destroy(void);
> -
> -SpiceTimer *spice_timer_queue_add(SpiceTimerFunc func, void *opaque);
> -void spice_timer_set(SpiceTimer *timer, uint32_t ms);
> -void spice_timer_cancel(SpiceTimer *timer);
> -void spice_timer_remove(SpiceTimer *timer);
> -
> -/* returns the time left till the earliest timer in the queue expires.
> - * returns (unsigned)-1 if there are no active timers */
> -unsigned int spice_timer_queue_get_timeout_ms(void);
> -/* call the timeout callbacks of all the expired timers */
> -void spice_timer_queue_cb(void);
> -
> -#endif


Reviewed-by: Jonathon Jongsma <jjongsma@xxxxxxxxxx>
_______________________________________________
Spice-devel mailing list
Spice-devel@xxxxxxxxxxxxxxxxxxxxx
http://lists.freedesktop.org/mailman/listinfo/spice-devel




[Index of Archives]     [Linux ARM Kernel]     [Linux ARM]     [Linux Omap]     [Fedora ARM]     [IETF Annouce]     [Security]     [Bugtraq]     [Linux]     [Linux OMAP]     [Linux MIPS]     [ECOS]     [Asterisk Internet PBX]     [Linux API]     [Monitors]