[RFC^2 PATCH] implement no pushing on RedWorker

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



This patch is inspired from a comment after GLib loop.

Basic: why calling red_channel_push and not let the poll loop
detect if sockets are ready to accept data?

After half a day (literally!) of testing I have honestly
no idea if this patch is an improvement or not. Looks like
more or less the same.

Less as it execute a bit more loops (something that GLib patch
seems to do anyway).

There is something like a 1% improvement on the items sent
(that means less discarded).

Looks like the latency is a bit worst (like 1-2%) but we are
really talking about nothing.

What I tried:
- different latency and bandwidths;
- putting delays in the reproduction (-s parameter to
  spice-server-replay utility);
- increasing polling delay in red-worker.c (CMD_RING_POLL_TIMEOUT);
- reducing wakeups in replay utility (this IMHO should be fixed).

One good thing of this patch is that you can remove calls to
red_channel_client_push and red_channel_push.

I should probably test with GLib loop too.

Comments and opinions are welcome!

---
 server/red-channel.c | 24 +++++++++++++++---------
 server/red-worker.c  | 11 -----------
 2 files changed, 15 insertions(+), 20 deletions(-)

diff --git a/server/red-channel.c b/server/red-channel.c
index 2cf190c..43ec10b 100644
--- a/server/red-channel.c
+++ b/server/red-channel.c
@@ -1345,6 +1345,10 @@ void red_channel_client_push(RedChannelClient *rcc)
     while ((pipe_item = red_channel_client_pipe_item_get(rcc))) {
         red_channel_client_send_item(rcc, pipe_item);
     }
+    if (red_channel_client_no_item_being_sent(rcc)) {
+        rcc->channel->core->watch_update_mask(rcc->stream->watch,
+                                              SPICE_WATCH_EVENT_READ);
+    }
     rcc->during_send = FALSE;
     red_channel_client_unref(rcc);
 }
@@ -1645,7 +1649,7 @@ void red_channel_pipe_item_init(RedChannel *channel, PipeItem *item, int type)
     item->type = type;
 }
 
-static inline int validate_pipe_add(RedChannelClient *rcc, PipeItem *item)
+static inline int prepare_pipe_add(RedChannelClient *rcc, PipeItem *item)
 {
     spice_assert(rcc && item);
     if (SPICE_UNLIKELY(!red_channel_client_is_connected(rcc))) {
@@ -1653,16 +1657,21 @@ static inline int validate_pipe_add(RedChannelClient *rcc, PipeItem *item)
         red_channel_client_release_item(rcc, item, FALSE);
         return FALSE;
     }
+    if (ring_is_empty(&rcc->pipe) && rcc->stream->watch) {
+        rcc->channel->core->watch_update_mask(rcc->stream->watch,
+                                         SPICE_WATCH_EVENT_READ |
+                                         SPICE_WATCH_EVENT_WRITE);
+    }
+    rcc->pipe_size++;
     return TRUE;
 }
 
 void red_channel_client_pipe_add(RedChannelClient *rcc, PipeItem *item)
 {
 
-    if (!validate_pipe_add(rcc, item)) {
+    if (!prepare_pipe_add(rcc, item)) {
         return;
     }
-    rcc->pipe_size++;
     ring_add(&rcc->pipe, &item->link);
 }
 
@@ -1676,10 +1685,9 @@ void red_channel_client_pipe_add_after(RedChannelClient *rcc,
                                        PipeItem *item, PipeItem *pos)
 {
     spice_assert(pos);
-    if (!validate_pipe_add(rcc, item)) {
+    if (!prepare_pipe_add(rcc, item)) {
         return;
     }
-    rcc->pipe_size++;
     ring_add_after(&item->link, &pos->link);
 }
 
@@ -1692,19 +1700,17 @@ int red_channel_client_pipe_item_is_linked(RedChannelClient *rcc,
 void red_channel_client_pipe_add_tail_no_push(RedChannelClient *rcc,
                                               PipeItem *item)
 {
-    if (!validate_pipe_add(rcc, item)) {
+    if (!prepare_pipe_add(rcc, item)) {
         return;
     }
-    rcc->pipe_size++;
     ring_add_before(&item->link, &rcc->pipe);
 }
 
 void red_channel_client_pipe_add_tail(RedChannelClient *rcc, PipeItem *item)
 {
-    if (!validate_pipe_add(rcc, item)) {
+    if (!prepare_pipe_add(rcc, item)) {
         return;
     }
-    rcc->pipe_size++;
     ring_add_before(&item->link, &rcc->pipe);
     red_channel_client_push(rcc);
 }
diff --git a/server/red-worker.c b/server/red-worker.c
index ad8ba1a..e5ec37c 100644
--- a/server/red-worker.c
+++ b/server/red-worker.c
@@ -338,16 +338,6 @@ static int red_process_display(RedWorker *worker, uint32_t max_pipe_size, int *r
     return n;
 }
 
-static void red_push(RedWorker *worker)
-{
-    if (worker->cursor_channel) {
-        red_channel_push(RED_CHANNEL(worker->cursor_channel));
-    }
-    if (worker->display_channel) {
-        red_channel_push(RED_CHANNEL(worker->display_channel));
-    }
-}
-
 void red_disconnect_all_display_TODO_remove_me(RedChannel *channel)
 {
     // TODO: we need to record the client that actually causes the timeout. So
@@ -1676,7 +1666,6 @@ SPICE_GNUC_NORETURN static void *red_worker_main(void *arg)
             red_process_cursor(worker, MAX_PIPE_SIZE, &ring_is_empty);
             red_process_display(worker, MAX_PIPE_SIZE, &ring_is_empty);
         }
-        red_push(worker);
     }
 
     spice_warn_if_reached();
-- 
2.4.3

_______________________________________________
Spice-devel mailing list
Spice-devel@xxxxxxxxxxxxxxxxxxxxx
http://lists.freedesktop.org/mailman/listinfo/spice-devel




[Index of Archives]     [Linux ARM Kernel]     [Linux ARM]     [Linux Omap]     [Fedora ARM]     [IETF Annouce]     [Security]     [Bugtraq]     [Linux]     [Linux OMAP]     [Linux MIPS]     [ECOS]     [Asterisk Internet PBX]     [Linux API]     [Monitors]