Re: [PATCH v5 1/3] contrib: add ivshmem client and server

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



On Thu, Sep 04, 2014 at 02:50:59PM +0200, David Marchand wrote:
> When using ivshmem devices, notifications between guests can be sent as
> interrupts using a ivshmem-server (typical use described in documentation).
> The client is provided as a debug tool.
> 
> Signed-off-by: Olivier Matz <olivier.matz@xxxxxxxxx>
> Signed-off-by: David Marchand <david.marchand@xxxxxxxxx>
> ---
>  Makefile                                |    8 +
>  configure                               |    3 +
>  contrib/ivshmem-client/ivshmem-client.c |  405 +++++++++++++++++++++++++++++++
>  contrib/ivshmem-client/ivshmem-client.h |  239 ++++++++++++++++++
>  contrib/ivshmem-client/main.c           |  237 ++++++++++++++++++
>  contrib/ivshmem-server/ivshmem-server.c |  395 ++++++++++++++++++++++++++++++
>  contrib/ivshmem-server/ivshmem-server.h |  186 ++++++++++++++
>  contrib/ivshmem-server/main.c           |  244 +++++++++++++++++++
>  qemu-doc.texi                           |   10 +-
>  9 files changed, 1724 insertions(+), 3 deletions(-)
>  create mode 100644 contrib/ivshmem-client/ivshmem-client.c
>  create mode 100644 contrib/ivshmem-client/ivshmem-client.h
>  create mode 100644 contrib/ivshmem-client/main.c
>  create mode 100644 contrib/ivshmem-server/ivshmem-server.c
>  create mode 100644 contrib/ivshmem-server/ivshmem-server.h
>  create mode 100644 contrib/ivshmem-server/main.c
> 
> diff --git a/Makefile b/Makefile
> index b33aaac..0575898 100644
> --- a/Makefile
> +++ b/Makefile
> @@ -283,6 +283,14 @@ $(qga-obj-y) qemu-ga.o: $(QGALIB_GEN)
>  qemu-ga$(EXESUF): $(qga-obj-y) libqemuutil.a libqemustub.a
>  	$(call LINK, $^)
>  
> +IVSHMEM_CLIENT_OBJS=$(addprefix $(SRC_PATH)/contrib/ivshmem-client/, ivshmem-client.o main.o)
> +ivshmem-client$(EXESUF): $(IVSHMEM_CLIENT_OBJS)
> +	$(call LINK, $^)
> +
> +IVSHMEM_SERVER_OBJS=$(addprefix $(SRC_PATH)/contrib/ivshmem-server/, ivshmem-server.o main.o)
> +ivshmem-server$(EXESUF): $(IVSHMEM_SERVER_OBJS) libqemuutil.a libqemustub.a
> +	$(call LINK, $^)
> +
>  clean:
>  # avoid old build problems by removing potentially incorrect old files
>  	rm -f config.mak op-i386.h opc-i386.h gen-op-i386.h op-arm.h opc-arm.h gen-op-arm.h
> diff --git a/configure b/configure
> index 961bf6f..a41a16c 100755
> --- a/configure
> +++ b/configure
> @@ -4125,6 +4125,9 @@ if test "$want_tools" = "yes" ; then
>    if [ "$linux" = "yes" -o "$bsd" = "yes" -o "$solaris" = "yes" ] ; then
>      tools="qemu-nbd\$(EXESUF) $tools"
>    fi
> +  if [ "$kvm" = "yes" ] ; then
> +    tools="ivshmem-client\$(EXESUF) ivshmem-server\$(EXESUF) $tools"
> +  fi
>  fi
>  if test "$softmmu" = yes ; then
>    if test "$virtfs" != no ; then
> diff --git a/contrib/ivshmem-client/ivshmem-client.c b/contrib/ivshmem-client/ivshmem-client.c
> new file mode 100644
> index 0000000..ad210c8
> --- /dev/null
> +++ b/contrib/ivshmem-client/ivshmem-client.c
> @@ -0,0 +1,405 @@
> +/*
> + * Copyright 6WIND S.A., 2014
> + *
> + * This work is licensed under the terms of the GNU GPL, version 2 or
> + * (at your option) any later version.  See the COPYING file in the
> + * top-level directory.
> + */
> +
> +#include <sys/types.h>
> +#include <sys/socket.h>
> +#include <sys/un.h>
> +
> +#include "qemu-common.h"
> +#include "qemu/queue.h"
> +
> +#include "ivshmem-client.h"
> +
> +/* log a message on stdout if verbose=1 */
> +#define debug_log(client, fmt, ...) do { \
> +        if ((client)->verbose) {         \
> +            printf(fmt, ## __VA_ARGS__); \
> +        }                                \
> +    } while (0)
> +
> +/* read message from the unix socket */
> +static int
> +read_one_msg(IvshmemClient *client, long *index, int *fd)
> +{
> +    int ret;
> +    struct msghdr msg;
> +    struct iovec iov[1];
> +    union {
> +        struct cmsghdr cmsg;
> +        char control[CMSG_SPACE(sizeof(int))];
> +    } msg_control;
> +    struct cmsghdr *cmsg;
> +
> +    iov[0].iov_base = index;
> +    iov[0].iov_len = sizeof(*index);
> +
> +    memset(&msg, 0, sizeof(msg));
> +    msg.msg_iov = iov;
> +    msg.msg_iovlen = 1;
> +    msg.msg_control = &msg_control;
> +    msg.msg_controllen = sizeof(msg_control);
> +
> +    ret = recvmsg(client->sock_fd, &msg, 0);
> +    if (ret < 0) {
> +        debug_log(client, "cannot read message: %s\n", strerror(errno));
> +        return -1;
> +    }
> +    if (ret == 0) {
> +        debug_log(client, "lost connection to server\n");
> +        return -1;
> +    }
> +
> +    *fd = -1;
> +
> +    for (cmsg = CMSG_FIRSTHDR(&msg); cmsg; cmsg = CMSG_NXTHDR(&msg, cmsg)) {
> +
> +        if (cmsg->cmsg_len != CMSG_LEN(sizeof(int)) ||
> +            cmsg->cmsg_level != SOL_SOCKET ||
> +            cmsg->cmsg_type != SCM_RIGHTS) {
> +            continue;
> +        }
> +
> +        memcpy(fd, CMSG_DATA(cmsg), sizeof(*fd));
> +    }
> +
> +    return 0;
> +}
> +
> +/* free a peer when the server advertise a disconnection or when the
> + * client is freed */
> +static void
> +free_peer(IvshmemClient *client, IvshmemClientPeer *peer)
> +{
> +    unsigned vector;
> +
> +    QTAILQ_REMOVE(&client->peer_list, peer, next);
> +    for (vector = 0; vector < peer->vectors_count; vector++) {
> +        close(peer->vectors[vector]);
> +    }
> +
> +    g_free(peer);
> +}
> +
> +/* handle message coming from server (new peer, new vectors) */
> +static int
> +handle_server_msg(IvshmemClient *client)
> +{
> +    IvshmemClientPeer *peer;
> +    long peer_id;
> +    int ret, fd;
> +
> +    ret = read_one_msg(client, &peer_id, &fd);
> +    if (ret < 0) {
> +        return -1;
> +    }
> +
> +    /* can return a peer or the local client */
> +    peer = ivshmem_client_search_peer(client, peer_id);
> +
> +    /* delete peer */
> +    if (fd == -1) {
> +
> +        if (peer == NULL || peer == &client->local) {
> +            debug_log(client, "receive delete for invalid peer %ld\n", peer_id);
> +            return -1;
> +        }
> +
> +        debug_log(client, "delete peer id = %ld\n", peer_id);
> +        free_peer(client, peer);
> +        return 0;
> +    }
> +
> +    /* new peer */
> +    if (peer == NULL) {
> +        peer = g_malloc0(sizeof(*peer));
> +        peer->id = peer_id;
> +        peer->vectors_count = 0;
> +        QTAILQ_INSERT_TAIL(&client->peer_list, peer, next);
> +        debug_log(client, "new peer id = %ld\n", peer_id);
> +    }
> +
> +    /* new vector */
> +    debug_log(client, "  new vector %d (fd=%d) for peer id %ld\n",
> +              peer->vectors_count, fd, peer->id);
> +    peer->vectors[peer->vectors_count] = fd;
> +    peer->vectors_count++;
> +
> +    return 0;
> +}
> +
> +/* init a new ivshmem client */
> +int
> +ivshmem_client_init(IvshmemClient *client, const char *unix_sock_path,
> +                    ivshmem_client_notif_cb_t notif_cb, void *notif_arg,
> +                    bool verbose)
> +{
> +    unsigned i;
> +
> +    memset(client, 0, sizeof(*client));
> +
> +    snprintf(client->unix_sock_path, sizeof(client->unix_sock_path),
> +             "%s", unix_sock_path);

You don't check return code, can overflow buffer. Same all snprintf
calls.

> +
> +    for (i = 0; i < IVSHMEM_CLIENT_MAX_VECTORS; i++) {
> +        client->local.vectors[i] = -1;
> +    }
> +
> +    QTAILQ_INIT(&client->peer_list);
> +    client->local.id = -1;
> +
> +    client->notif_cb = notif_cb;
> +    client->notif_arg = notif_arg;
> +    client->verbose = verbose;
> +    client->sock_fd = -1;
> +
> +    return 0;
> +}
> +
> +/* create and connect to the unix socket */
> +int
> +ivshmem_client_connect(IvshmemClient *client)
> +{
> +    struct sockaddr_un sun;
> +    int fd;
> +    long tmp;
> +
> +    debug_log(client, "connect to client %s\n", client->unix_sock_path);
> +
> +    client->sock_fd = socket(AF_UNIX, SOCK_STREAM, 0);
> +    if (client->sock_fd < 0) {
> +        debug_log(client, "cannot create socket: %s\n", strerror(errno));
> +        return -1;
> +    }
> +
> +    sun.sun_family = AF_UNIX;
> +    snprintf(sun.sun_path, sizeof(sun.sun_path), "%s", client->unix_sock_path);
> +    if (connect(client->sock_fd, (struct sockaddr *)&sun, sizeof(sun)) < 0) {
> +        debug_log(client, "cannot connect to %s: %s\n", sun.sun_path,
> +                  strerror(errno));
> +        goto err_close;
> +    }
> +
> +    /* first, we expect our index + a fd == -1 */
> +    if (read_one_msg(client, &client->local.id, &fd) < 0 ||
> +        client->local.id < 0 || fd != -1) {
> +        debug_log(client, "cannot read from server\n");
> +        goto err_close;
> +    }
> +    debug_log(client, "our_id=%ld\n", client->local.id);
> +
> +    /* now, we expect shared mem fd + a -1 index, note that shm fd
> +     * is not used */
> +    if (read_one_msg(client, &tmp, &fd) < 0 ||
> +        tmp != -1 || fd < 0) {
> +        debug_log(client, "cannot read from server (2)\n");
> +        goto err_close;
> +    }
> +    debug_log(client, "shm_fd=%d\n", fd);
> +
> +    return 0;
> +
> +err_close:
> +    close(client->sock_fd);
> +    client->sock_fd = -1;
> +    return -1;
> +}
> +
> +/* close connection to the server, and free all peer structures */
> +void
> +ivshmem_client_close(IvshmemClient *client)
> +{
> +    IvshmemClientPeer *peer;
> +    unsigned i;
> +
> +    debug_log(client, "close client\n");
> +
> +    while ((peer = QTAILQ_FIRST(&client->peer_list)) != NULL) {
> +        free_peer(client, peer);
> +    }
> +
> +    close(client->sock_fd);
> +    client->sock_fd = -1;
> +    client->local.id = -1;
> +    for (i = 0; i < IVSHMEM_CLIENT_MAX_VECTORS; i++) {
> +        client->local.vectors[i] = -1;
> +    }
> +}
> +
> +/* get the fd_set according to the unix socket and peer list */
> +void
> +ivshmem_client_get_fds(const IvshmemClient *client, fd_set *fds, int *maxfd)
> +{
> +    int fd;
> +    unsigned vector;
> +
> +    FD_SET(client->sock_fd, fds);
> +    if (client->sock_fd >= *maxfd) {
> +        *maxfd = client->sock_fd + 1;
> +    }
> +
> +    for (vector = 0; vector < client->local.vectors_count; vector++) {
> +        fd = client->local.vectors[vector];
> +        FD_SET(fd, fds);
> +        if (fd >= *maxfd) {
> +            *maxfd = fd + 1;
> +        }
> +    }
> +}
> +
> +/* handle events from eventfd: just print a message on notification */
> +static int
> +handle_event(IvshmemClient *client, const fd_set *cur, int maxfd)
> +{
> +    IvshmemClientPeer *peer;
> +    uint64_t kick;
> +    unsigned i;
> +    int ret;
> +
> +    peer = &client->local;
> +
> +    for (i = 0; i < peer->vectors_count; i++) {
> +        if (peer->vectors[i] >= maxfd || !FD_ISSET(peer->vectors[i], cur)) {
> +            continue;
> +        }
> +
> +        ret = read(peer->vectors[i], &kick, sizeof(kick));
> +        if (ret < 0) {
> +            return ret;
> +        }
> +        if (ret != sizeof(kick)) {
> +            debug_log(client, "invalid read size = %d\n", ret);
> +            errno = EINVAL;
> +            return -1;
> +        }
> +        debug_log(client, "received event on fd %d vector %d: %ld\n",
> +                  peer->vectors[i], i, kick);
> +        if (client->notif_cb != NULL) {
> +            client->notif_cb(client, peer, i, client->notif_arg);
> +        }
> +    }
> +
> +    return 0;
> +}
> +
> +/* read and handle new messages on the given fd_set */
> +int
> +ivshmem_client_handle_fds(IvshmemClient *client, fd_set *fds, int maxfd)
> +{
> +    if (client->sock_fd < maxfd && FD_ISSET(client->sock_fd, fds) &&
> +        handle_server_msg(client) < 0 && errno != EINTR) {
> +        debug_log(client, "handle_server_msg() failed\n");
> +        return -1;
> +    } else if (handle_event(client, fds, maxfd) < 0 && errno != EINTR) {
> +        debug_log(client, "handle_event() failed\n");
> +        return -1;
> +    }
> +
> +    return 0;
> +}
> +
> +/* send a notification on a vector of a peer */
> +int
> +ivshmem_client_notify(const IvshmemClient *client,
> +                      const IvshmemClientPeer *peer, unsigned vector)
> +{
> +    uint64_t kick;
> +    int fd;
> +
> +    if (vector >= peer->vectors_count) {
> +        debug_log(client, "invalid vector %u on peer %ld\n", vector, peer->id);
> +        return -1;
> +    }
> +    fd = peer->vectors[vector];
> +    debug_log(client, "notify peer %ld on vector %d, fd %d\n", peer->id, vector,
> +              fd);
> +
> +    kick = 1;
> +    if (write(fd, &kick, sizeof(kick)) != sizeof(kick)) {
> +        fprintf(stderr, "could not write to %d: %s\n", peer->vectors[vector],
> +                strerror(errno));
> +        return -1;
> +    }
> +    return 0;
> +}
> +
> +/* send a notification to all vectors of a peer */
> +int
> +ivshmem_client_notify_all_vects(const IvshmemClient *client,
> +                                const IvshmemClientPeer *peer)
> +{
> +    unsigned vector;
> +    int ret = 0;
> +
> +    for (vector = 0; vector < peer->vectors_count; vector++) {
> +        if (ivshmem_client_notify(client, peer, vector) < 0) {
> +            ret = -1;
> +        }
> +    }
> +
> +    return ret;
> +}
> +
> +/* send a notification to all peers */
> +int
> +ivshmem_client_notify_broadcast(const IvshmemClient *client)
> +{
> +    IvshmemClientPeer *peer;
> +    int ret = 0;
> +
> +    QTAILQ_FOREACH(peer, &client->peer_list, next) {
> +        if (ivshmem_client_notify_all_vects(client, peer) < 0) {
> +            ret = -1;
> +        }
> +    }
> +
> +    return ret;
> +}
> +
> +/* lookup peer from its id */
> +IvshmemClientPeer *
> +ivshmem_client_search_peer(IvshmemClient *client, long peer_id)
> +{
> +    IvshmemClientPeer *peer;
> +
> +    if (peer_id == client->local.id) {
> +        return &client->local;
> +    }
> +
> +    QTAILQ_FOREACH(peer, &client->peer_list, next) {
> +        if (peer->id == peer_id) {
> +            return peer;
> +        }
> +    }
> +    return NULL;
> +}
> +
> +/* dump our info, the list of peers their vectors on stdout */
> +void
> +ivshmem_client_dump(const IvshmemClient *client)
> +{
> +    const IvshmemClientPeer *peer;
> +    unsigned vector;
> +
> +    /* dump local infos */
> +    peer = &client->local;
> +    printf("our_id = %ld\n", peer->id);
> +    for (vector = 0; vector < peer->vectors_count; vector++) {
> +        printf("  vector %d is enabled (fd=%d)\n", vector,
> +               peer->vectors[vector]);
> +    }
> +
> +    /* dump peers */
> +    QTAILQ_FOREACH(peer, &client->peer_list, next) {
> +        printf("peer_id = %ld\n", peer->id);
> +
> +        for (vector = 0; vector < peer->vectors_count; vector++) {
> +            printf("  vector %d is enabled (fd=%d)\n", vector,
> +                   peer->vectors[vector]);
> +        }
> +    }
> +}
> diff --git a/contrib/ivshmem-client/ivshmem-client.h b/contrib/ivshmem-client/ivshmem-client.h
> new file mode 100644
> index 0000000..45f2b64
> --- /dev/null
> +++ b/contrib/ivshmem-client/ivshmem-client.h
> @@ -0,0 +1,239 @@
> +/*
> + * Copyright 6WIND S.A., 2014
> + *
> + * This work is licensed under the terms of the GNU GPL, version 2 or
> + * (at your option) any later version.  See the COPYING file in the
> + * top-level directory.
> + */
> +
> +#ifndef _IVSHMEM_CLIENT_
> +#define _IVSHMEM_CLIENT_
> +
> +/**
> + * This file provides helper to implement an ivshmem client. It is used
> + * on the host to ask QEMU to send an interrupt to an ivshmem PCI device in a
> + * guest. QEMU also implements an ivshmem client similar to this one, they both
> + * connect to an ivshmem server.
> + *
> + * A standalone ivshmem client based on this file is provided for debug/test
> + * purposes.
> + */
> +
> +#include <limits.h>
> +#include <sys/select.h>
> +
> +#include "qemu/queue.h"
> +
> +/**
> + * Maximum number of notification vectors supported by the client
> + */
> +#define IVSHMEM_CLIENT_MAX_VECTORS 64
> +
> +/**
> + * Structure storing a peer
> + *
> + * Each time a client connects to an ivshmem server, it is advertised to
> + * all connected clients through the unix socket. When our ivshmem
> + * client receives a notification, it creates a IvshmemClientPeer
> + * structure to store the infos of this peer.
> + *
> + * This structure is also used to store the information of our own
> + * client in (IvshmemClient)->local.
> + */
> +typedef struct IvshmemClientPeer {
> +    QTAILQ_ENTRY(IvshmemClientPeer) next;    /**< next in list*/
> +    long id;                                 /**< the id of the peer */
> +    int vectors[IVSHMEM_CLIENT_MAX_VECTORS]; /**< one fd per vector */
> +    unsigned vectors_count;                  /**< number of vectors */
> +} IvshmemClientPeer;
> +QTAILQ_HEAD(IvshmemClientPeerList, IvshmemClientPeer);
> +
> +typedef struct IvshmemClientPeerList IvshmemClientPeerList;
> +typedef struct IvshmemClient IvshmemClient;
> +
> +/**
> + * Typedef of callback function used when our IvshmemClient receives a
> + * notification from a peer.
> + */
> +typedef void (*ivshmem_client_notif_cb_t)(
> +    const IvshmemClient *client,
> +    const IvshmemClientPeer *peer,
> +    unsigned vect, void *arg);

Pls fix type name to use CamelCase.

> +
> +/**
> + * Structure describing an ivshmem client
> + *
> + * This structure stores all information related to our client: the name
> + * of the server unix socket, the list of peers advertised by the
> + * server, our own client information, and a pointer the notification
> + * callback function used when we receive a notification from a peer.
> + */
> +struct IvshmemClient {
> +    char unix_sock_path[PATH_MAX];      /**< path to unix sock */
> +    int sock_fd;                        /**< unix sock filedesc */
> +
> +    IvshmemClientPeerList peer_list;    /**< list of peers */
> +    IvshmemClientPeer local;            /**< our own infos */
> +
> +    ivshmem_client_notif_cb_t notif_cb; /**< notification callback */
> +    void *notif_arg;                    /**< notification argument */
> +
> +    bool verbose;                       /**< true to enable debug */
> +};
> +
> +/**
> + * Initialize an ivshmem client
> + *
> + * @param client
> + *   A pointer to an uninitialized IvshmemClient structure
> + * @param unix_sock_path
> + *   The pointer to the unix socket file name
> + * @param notif_cb
> + *   If not NULL, the pointer to the function to be called when we our
> + *   IvshmemClient receives a notification from a peer
> + * @param notif_arg
> + *   Opaque pointer given as-is to the notification callback function
> + * @param verbose
> + *   True to enable debug
> + *
> + * @return
> + *   0 on success, or a negative value on error

That's not how we format this.
Pls do
* @notif_arg:  Opaque pointer given as-is to the notification callback function
..
* Returns  0 on success, or a negative value on error


> + */
> +int ivshmem_client_init(IvshmemClient *client, const char *unix_sock_path,
> +                        ivshmem_client_notif_cb_t notif_cb, void *notif_arg,
> +                        bool verbose);
> +
> +/**
> + * Connect to the server
> + *
> + * Connect to the server unix socket, and read the first initial
> + * messages sent by the server, giving the ID of the client and the file
> + * descriptor of the shared memory.
> + *
> + * @param client
> + *   The ivshmem client
> + *
> + * @return
> + *   0 on success, or a negative value on error
> + */
> +int ivshmem_client_connect(IvshmemClient *client);
> +
> +/**
> + * Close connection to the server and free all peer structures
> + *
> + * @param client
> + *   The ivshmem client
> + */
> +void ivshmem_client_close(IvshmemClient *client);
> +
> +/**
> + * Fill a fd_set with file descriptors to be monitored
> + *
> + * This function will fill a fd_set with all file descriptors
> + * that must be polled (unix server socket and peers eventfd). The
> + * function will not initialize the fd_set, it is up to the caller
> + * to do this.
> + *
> + * @param client
> + *   The ivshmem client
> + * @param fds
> + *   The fd_set to be updated
> + * @param maxfd
> + *   Must be set to the max file descriptor + 1 in fd_set. This value is
> + *   updated if this function adds a greated fd in fd_set.
> + */
> +void ivshmem_client_get_fds(const IvshmemClient *client, fd_set *fds,
> +                            int *maxfd);
> +
> +/**
> + * Read and handle new messages
> + *
> + * Given a fd_set filled by select(), handle incoming messages from
> + * server or peers.
> + *
> + * @param client
> + *   The ivshmem client
> + * @param fds
> + *   The fd_set containing the file descriptors to be checked. Note
> + *   that file descriptors that are not related to our client are
> + *   ignored.
> + * @param maxfd
> + *   The maximum fd in fd_set, plus one.
> +  *
> + * @return
> + *   0 on success, negative value on failure.
> + */
> +int ivshmem_client_handle_fds(IvshmemClient *client, fd_set *fds, int maxfd);
> +
> +/**
> + * Send a notification to a vector of a peer
> + *
> + * @param client
> + *   The ivshmem client
> + * @param peer
> + *   The peer to be notified
> + * @param vector
> + *   The number of the vector
> + *
> + * @return
> + *   0 on success, and a negative error on failure.
> + */
> +int ivshmem_client_notify(const IvshmemClient *client,
> +                          const IvshmemClientPeer *peer, unsigned vector);
> +
> +/**
> + * Send a notification to all vectors of a peer
> + *
> + * @param client
> + *   The ivshmem client
> + * @param peer
> + *   The peer to be notified
> + *
> + * @return
> + *   0 on success, and a negative error on failure (at least one
> + *   notification failed).
> + */
> +int ivshmem_client_notify_all_vects(const IvshmemClient *client,
> +                                    const IvshmemClientPeer *peer);
> +
> +/**
> + * Broadcat a notification to all vectors of all peers
> + *
> + * @param client
> + *   The ivshmem client
> + *
> + * @return
> + *   0 on success, and a negative error on failure (at least one
> + *   notification failed).
> + */
> +int ivshmem_client_notify_broadcast(const IvshmemClient *client);
> +
> +/**
> + * Search a peer from its identifier
> + *
> + * Return the peer structure from its peer_id. If the given peer_id is
> + * the local id, the function returns the local peer structure.
> + *
> + * @param client
> + *   The ivshmem client
> + * @param peer_id
> + *   The identifier of the peer structure
> + *
> + * @return
> + *   The peer structure, or NULL if not found
> + */
> +IvshmemClientPeer *
> +ivshmem_client_search_peer(IvshmemClient *client, long peer_id);
> +
> +/**
> + * Dump information of this ivshmem client on stdout
> + *
> + * Dump the id and the vectors of the given ivshmem client and the list
> + * of its peers and their vectors on stdout.
> + *
> + * @param client
> + *   The ivshmem client
> + */
> +void ivshmem_client_dump(const IvshmemClient *client);
> +
> +#endif /* _IVSHMEM_CLIENT_ */
> diff --git a/contrib/ivshmem-client/main.c b/contrib/ivshmem-client/main.c
> new file mode 100644
> index 0000000..a8e1586
> --- /dev/null
> +++ b/contrib/ivshmem-client/main.c
> @@ -0,0 +1,237 @@
> +/*
> + * Copyright 6WIND S.A., 2014
> + *
> + * This work is licensed under the terms of the GNU GPL, version 2 or
> + * (at your option) any later version.  See the COPYING file in the
> + * top-level directory.
> + */
> +
> +#include "qemu-common.h"
> +
> +#include "ivshmem-client.h"
> +
> +#define DEFAULT_VERBOSE        0
> +#define DEFAULT_UNIX_SOCK_PATH "/tmp/ivshmem_socket"
> +
> +typedef struct IvshmemClientArgs {
> +    bool verbose;
> +    const char *unix_sock_path;
> +} IvshmemClientArgs;
> +
> +/* show usage and exit with given error code */
> +static void
> +usage(const char *name, int code)
> +{
> +    fprintf(stderr, "%s [opts]\n", name);
> +    fprintf(stderr, "  -h: show this help\n");
> +    fprintf(stderr, "  -v: verbose mode\n");
> +    fprintf(stderr, "  -S <unix_sock_path>: path to the unix socket\n"
> +                    "     to listen to.\n"
> +                    "     default=%s\n", DEFAULT_UNIX_SOCK_PATH);
> +    exit(code);
> +}
> +
> +/* parse the program arguments, exit on error */
> +static void
> +parse_args(IvshmemClientArgs *args, int argc, char *argv[])
> +{
> +    char c;
> +
> +    while ((c = getopt(argc, argv,
> +                       "h"  /* help */
> +                       "v"  /* verbose */
> +                       "S:" /* unix_sock_path */
> +                      )) != -1) {
> +
> +        switch (c) {
> +        case 'h': /* help */
> +            usage(argv[0], 0);
> +            break;
> +
> +        case 'v': /* verbose */
> +            args->verbose = 1;
> +            break;
> +
> +        case 'S': /* unix_sock_path */
> +            args->unix_sock_path = strdup(optarg);
> +            break;
> +
> +        default:
> +            usage(argv[0], 1);
> +            break;
> +        }
> +    }
> +}
> +
> +/* show command line help */
> +static void
> +cmdline_help(void)
> +{
> +    printf("dump: dump peers (including us)\n"
> +           "int <peer> <vector>: notify one vector on a peer\n"
> +           "int <peer> all: notify all vectors of a peer\n"
> +           "int all: notify all vectors of all peers (excepting us)\n");
> +}
> +
> +/* read stdin and handle commands */
> +static int
> +handle_stdin_command(IvshmemClient *client)
> +{
> +    IvshmemClientPeer *peer;
> +    char buf[128];
> +    char *s, *token;
> +    int ret;
> +    int peer_id, vector;
> +
> +    memset(buf, 0, sizeof(buf));
> +    ret = read(0, buf, sizeof(buf) - 1);
> +    if (ret < 0) {
> +        return -1;
> +    }
> +
> +    s = buf;
> +    while ((token = strsep(&s, "\n\r;")) != NULL) {
> +        if (!strcmp(token, "")) {
> +            continue;
> +        }
> +        if (!strcmp(token, "?")) {
> +            cmdline_help();
> +        }
> +        if (!strcmp(token, "help")) {
> +            cmdline_help();
> +        } else if (!strcmp(token, "dump")) {
> +            ivshmem_client_dump(client);
> +        } else if (!strcmp(token, "int all")) {
> +            ivshmem_client_notify_broadcast(client);
> +        } else if (sscanf(token, "int %d %d", &peer_id, &vector) == 2) {
> +            peer = ivshmem_client_search_peer(client, peer_id);
> +            if (peer == NULL) {
> +                printf("cannot find peer_id = %d\n", peer_id);
> +                continue;
> +            }
> +            ivshmem_client_notify(client, peer, vector);
> +        } else if (sscanf(token, "int %d all", &peer_id) == 1) {
> +            peer = ivshmem_client_search_peer(client, peer_id);
> +            if (peer == NULL) {
> +                printf("cannot find peer_id = %d\n", peer_id);
> +                continue;
> +            }
> +            ivshmem_client_notify_all_vects(client, peer);
> +        } else {
> +            printf("invalid command, type help\n");
> +        }
> +    }
> +
> +    printf("cmd> ");
> +    fflush(stdout);
> +    return 0;
> +}
> +
> +/* listen on stdin (command line), on unix socket (notifications of new
> + * and dead peers), and on eventfd (IRQ request) */
> +static int
> +poll_events(IvshmemClient *client)
> +{
> +    fd_set fds;
> +    int ret, maxfd;
> +
> +    while (1) {
> +
> +        FD_ZERO(&fds);
> +        FD_SET(0, &fds); /* add stdin in fd_set */
> +        maxfd = 1;
> +
> +        ivshmem_client_get_fds(client, &fds, &maxfd);
> +
> +        ret = select(maxfd, &fds, NULL, NULL, NULL);
> +        if (ret < 0) {
> +            if (errno == EINTR) {
> +                continue;
> +            }
> +
> +            fprintf(stderr, "select error: %s\n", strerror(errno));
> +            break;
> +        }
> +        if (ret == 0) {
> +            continue;
> +        }
> +
> +        if (FD_ISSET(0, &fds) &&
> +            handle_stdin_command(client) < 0 && errno != EINTR) {
> +            fprintf(stderr, "handle_stdin_command() failed\n");
> +            break;
> +        }
> +
> +        if (ivshmem_client_handle_fds(client, &fds, maxfd) < 0) {
> +            fprintf(stderr, "ivshmem_client_handle_fds() failed\n");
> +            break;
> +        }
> +    }
> +
> +    return ret;
> +}
> +
> +/* callback when we receive a notification (just display it) */
> +static void
> +notification_cb(const IvshmemClient *client, const IvshmemClientPeer *peer,
> +                unsigned vect, void *arg)
> +{
> +    (void)client;
> +    (void)arg;
> +    printf("receive notification from peer_id=%ld vector=%d\n", peer->id, vect);
> +}
> +
> +int
> +main(int argc, char *argv[])
> +{
> +    struct sigaction sa;
> +    IvshmemClient client;
> +    IvshmemClientArgs args = {
> +        .verbose = DEFAULT_VERBOSE,
> +        .unix_sock_path = DEFAULT_UNIX_SOCK_PATH,
> +    };
> +
> +    /* parse arguments, will exit on error */
> +    parse_args(&args, argc, argv);
> +
> +    /* Ignore SIGPIPE, see this link for more info:
> +     * http://www.mail-archive.com/libevent-users@xxxxxxxxxx/msg01606.html */
> +    sa.sa_handler = SIG_IGN;
> +    sa.sa_flags = 0;
> +    if (sigemptyset(&sa.sa_mask) == -1 ||
> +        sigaction(SIGPIPE, &sa, 0) == -1) {
> +        perror("failed to ignore SIGPIPE; sigaction");
> +        return 1;
> +    }
> +
> +    cmdline_help();
> +    printf("cmd> ");
> +    fflush(stdout);
> +
> +    if (ivshmem_client_init(&client, args.unix_sock_path, notification_cb,
> +                            NULL, args.verbose) < 0) {
> +        fprintf(stderr, "cannot init client\n");
> +        return 1;
> +    }
> +
> +    while (1) {
> +        if (ivshmem_client_connect(&client) < 0) {
> +            fprintf(stderr, "cannot connect to server, retry in 1 second\n");
> +            sleep(1);
> +            continue;
> +        }
> +
> +        fprintf(stdout, "listen on server socket %d\n", client.sock_fd);
> +
> +        if (poll_events(&client) == 0) {
> +            continue;
> +        }
> +
> +        /* disconnected from server, reset all peers */
> +        fprintf(stdout, "disconnected from server\n");
> +
> +        ivshmem_client_close(&client);
> +    }
> +
> +    return 0;
> +}
> diff --git a/contrib/ivshmem-server/ivshmem-server.c b/contrib/ivshmem-server/ivshmem-server.c
> new file mode 100644
> index 0000000..f441da7
> --- /dev/null
> +++ b/contrib/ivshmem-server/ivshmem-server.c
> @@ -0,0 +1,395 @@
> +/*
> + * Copyright 6WIND S.A., 2014
> + *
> + * This work is licensed under the terms of the GNU GPL, version 2 or
> + * (at your option) any later version.  See the COPYING file in the
> + * top-level directory.
> + */
> +
> +#include <sys/mman.h>
> +#include <sys/types.h>
> +#include <sys/socket.h>
> +#include <sys/un.h>
> +#include <sys/eventfd.h>
> +
> +#include "qemu-common.h"
> +#include "qemu/queue.h"
> +
> +#include "ivshmem-server.h"
> +
> +/* log a message on stdout if verbose=1 */
> +#define debug_log(server, fmt, ...) do { \
> +        if ((server)->verbose) {         \
> +            printf(fmt, ## __VA_ARGS__); \
> +        }                                \
> +    } while (0)

macros must be UPPERCASE

> +
> +/** maximum size of a huge page, used by ivshmem_ftruncate() */
> +#define MAX_HUGEPAGE_SIZE (1024 * 1024 * 1024)

Pls prefix all macros with IVSHMEM_SERVER, don't pollute global
namespace.

> +
> +/** default listen backlog (number of sockets not accepted) */
> +#define IVSHMEM_SERVER_LISTEN_BACKLOG 10
> +
> +/* send message to a client unix socket */
> +static int
> +send_one_msg(int sock_fd, long peer_id, int fd)
> +{
> +    int ret;
> +    struct msghdr msg;
> +    struct iovec iov[1];
> +    union {
> +        struct cmsghdr cmsg;
> +        char control[CMSG_SPACE(sizeof(int))];
> +    } msg_control;
> +    struct cmsghdr *cmsg;
> +
> +    iov[0].iov_base = &peer_id;
> +    iov[0].iov_len = sizeof(peer_id);
> +
> +    memset(&msg, 0, sizeof(msg));
> +    msg.msg_iov = iov;
> +    msg.msg_iovlen = 1;
> +
> +    /* if fd is specified, add it in a cmsg */
> +    if (fd >= 0) {
> +        msg.msg_control = &msg_control;
> +        msg.msg_controllen = sizeof(msg_control);
> +        cmsg = CMSG_FIRSTHDR(&msg);
> +        cmsg->cmsg_level = SOL_SOCKET;
> +        cmsg->cmsg_type = SCM_RIGHTS;
> +        cmsg->cmsg_len = CMSG_LEN(sizeof(int));
> +        memcpy(CMSG_DATA(cmsg), &fd, sizeof(fd));
> +    }
> +
> +    ret = sendmsg(sock_fd, &msg, 0);
> +    if (ret <= 0) {
> +        return -1;
> +    }
> +
> +    return 0;
> +}
> +
> +/* free a peer when the server advertise a disconnection or when the

s/advertise/advertises/

> + * server is freed */
> +static void
> +free_peer(IvshmemServer *server, IvshmemServerPeer *peer)
> +{
> +    unsigned vector;
> +    IvshmemServerPeer *other_peer;
> +
> +    debug_log(server, "free peer %ld\n", peer->id);
> +    close(peer->sock_fd);
> +    QTAILQ_REMOVE(&server->peer_list, peer, next);
> +
> +    /* advertise the deletion to other peers */
> +    QTAILQ_FOREACH(other_peer, &server->peer_list, next) {
> +        send_one_msg(other_peer->sock_fd, peer->id, -1);
> +    }
> +
> +    for (vector = 0; vector < peer->vectors_count; vector++) {
> +        close(peer->vectors[vector]);
> +    }
> +
> +    g_free(peer);
> +}
> +
> +/* send the peer id and the shm_fd just after a new client connection */
> +static int
> +send_initial_info(IvshmemServer *server, IvshmemServerPeer *peer)
> +{
> +    int ret;
> +
> +    /* send the peer id to the client */
> +    ret = send_one_msg(peer->sock_fd, peer->id, -1);
> +    if (ret < 0) {
> +        debug_log(server, "cannot send peer id: %s\n", strerror(errno));
> +        return -1;
> +    }
> +
> +    /* send the shm_fd */
> +    ret = send_one_msg(peer->sock_fd, -1, server->shm_fd);
> +    if (ret < 0) {
> +        debug_log(server, "cannot send shm fd: %s\n", strerror(errno));
> +        return -1;
> +    }
> +
> +    return 0;
> +}
> +
> +/* handle message on listening unix socket (new client connection) */
> +static int
> +handle_new_conn(IvshmemServer *server)
> +{
> +    IvshmemServerPeer *peer, *other_peer;
> +    struct sockaddr_un unaddr;
> +    socklen_t unaddr_len;
> +    int newfd;
> +    unsigned i;
> +
> +    /* accept the incoming connection */
> +    unaddr_len = sizeof(unaddr);
> +    newfd = accept4(server->sock_fd, (struct sockaddr *)&unaddr, &unaddr_len,
> +                    SOCK_NONBLOCK);
> +    if (newfd < 0) {
> +        debug_log(server, "cannot accept() %s\n", strerror(errno));
> +        return -1;
> +    }
> +
> +    debug_log(server, "accept()=%d\n", newfd);
> +
> +    /* allocate new structure for this peer */
> +    peer = g_malloc0(sizeof(*peer));
> +    peer->sock_fd = newfd;
> +
> +    /* get an unused peer id */
> +    while (ivshmem_server_search_peer(server, server->cur_id) != NULL) {
> +        server->cur_id++;
> +    }
> +    peer->id = server->cur_id++;
> +
> +    /* create eventfd, one per vector */
> +    peer->vectors_count = server->n_vectors;
> +    for (i = 0; i < peer->vectors_count; i++) {
> +        peer->vectors[i] = eventfd(0, 0);
> +        if (peer->vectors[i] < 0) {
> +            debug_log(server, "cannot create eventfd\n");
> +            goto fail;
> +        }
> +    }
> +
> +    /* send peer id and shm fd */
> +    if (send_initial_info(server, peer) < 0) {
> +        debug_log(server, "cannot send initial info\n");
> +        goto fail;
> +    }
> +
> +    /* advertise the new peer to others */
> +    QTAILQ_FOREACH(other_peer, &server->peer_list, next) {
> +        for (i = 0; i < peer->vectors_count; i++) {
> +            send_one_msg(other_peer->sock_fd, peer->id, peer->vectors[i]);
> +        }
> +    }
> +
> +    /* advertise the other peers to the new one */
> +    QTAILQ_FOREACH(other_peer, &server->peer_list, next) {
> +        for (i = 0; i < peer->vectors_count; i++) {
> +            send_one_msg(peer->sock_fd, other_peer->id, other_peer->vectors[i]);
> +        }
> +    }
> +
> +    /* advertise the new peer to itself */
> +    for (i = 0; i < peer->vectors_count; i++) {
> +        send_one_msg(peer->sock_fd, peer->id, peer->vectors[i]);
> +    }
> +
> +    QTAILQ_INSERT_TAIL(&server->peer_list, peer, next);
> +    debug_log(server, "new peer id = %ld\n", peer->id);
> +    return 0;
> +
> +fail:
> +    while (i--) {
> +        close(peer->vectors[i]);
> +    }
> +    close(newfd);
> +    g_free(peer);
> +    return -1;
> +}
> +
> +/* Try to ftruncate a file to next power of 2 of shmsize.
> + * If it fails; all power of 2 above shmsize are tested until
> + * we reach the maximum huge page size. This is useful
> + * if the shm file is in a hugetlbfs that cannot be truncated to the
> + * shm_size value. */
> +static int
> +ivshmem_ftruncate(int fd, unsigned shmsize)
> +{
> +    int ret;
> +
> +    /* align shmsize to next power of 2 */
> +    shmsize--;
> +    shmsize |= shmsize >> 1;
> +    shmsize |= shmsize >> 2;
> +    shmsize |= shmsize >> 4;
> +    shmsize |= shmsize >> 8;
> +    shmsize |= shmsize >> 16;
> +    shmsize++;
> +
> +    while (shmsize <= MAX_HUGEPAGE_SIZE) {
> +        ret = ftruncate(fd, shmsize);
> +        if (ret == 0) {
> +            return ret;
> +        }
> +        shmsize *= 2;
> +    }
> +
> +    return -1;
> +}
> +
> +/* Init a new ivshmem server */
> +int
> +ivshmem_server_init(IvshmemServer *server, const char *unix_sock_path,
> +                    const char *shm_path, size_t shm_size, unsigned n_vectors,
> +                    bool verbose)
> +{
> +    memset(server, 0, sizeof(*server));
> +
> +    snprintf(server->unix_sock_path, sizeof(server->unix_sock_path),
> +             "%s", unix_sock_path);
> +    snprintf(server->shm_path, sizeof(server->shm_path),
> +             "%s", shm_path);
> +
> +    server->shm_size = shm_size;
> +    server->n_vectors = n_vectors;
> +    server->verbose = verbose;
> +
> +    QTAILQ_INIT(&server->peer_list);
> +
> +    return 0;
> +}
> +
> +/* open shm, create and bind to the unix socket */
> +int
> +ivshmem_server_start(IvshmemServer *server)
> +{
> +    struct sockaddr_un sun;
> +    int shm_fd, sock_fd;
> +
> +    /* open shm file */
> +    shm_fd = shm_open(server->shm_path, O_CREAT|O_RDWR, S_IRWXU);
> +    if (shm_fd < 0) {
> +        fprintf(stderr, "cannot open shm file %s: %s\n", server->shm_path,
> +                strerror(errno));
> +        return -1;
> +    }
> +    if (ivshmem_ftruncate(shm_fd, server->shm_size) < 0) {
> +        fprintf(stderr, "ftruncate(%s) failed: %s\n", server->shm_path,
> +                strerror(errno));
> +        goto err_close_shm;
> +    }
> +
> +    debug_log(server, "create & bind socket %s\n", server->unix_sock_path);
> +
> +    /* create the unix listening socket */
> +    sock_fd = socket(AF_UNIX, SOCK_STREAM, 0);
> +    if (sock_fd < 0) {
> +        debug_log(server, "cannot create socket: %s\n", strerror(errno));
> +        goto err_close_shm;
> +    }
> +
> +    sun.sun_family = AF_UNIX;
> +    snprintf(sun.sun_path, sizeof(sun.sun_path), "%s", server->unix_sock_path);
> +    unlink(sun.sun_path);

why unlink it?

> +    if (bind(sock_fd, (struct sockaddr *)&sun, sizeof(sun)) < 0) {
> +        debug_log(server, "cannot connect to %s: %s\n", sun.sun_path,
> +                  strerror(errno));
> +        goto err_close_sock;
> +    }
> +
> +    if (listen(sock_fd, IVSHMEM_SERVER_LISTEN_BACKLOG) < 0) {
> +        debug_log(server, "listen() failed: %s\n", strerror(errno));
> +        goto err_close_sock;
> +    }
> +
> +    server->sock_fd = sock_fd;
> +    server->shm_fd = shm_fd;
> +
> +    return 0;
> +
> +err_close_sock:
> +    close(sock_fd);
> +err_close_shm:
> +    close(shm_fd);
> +    return -1;
> +}
> +
> +/* close connections to clients, the unix socket and the shm fd */
> +void
> +ivshmem_server_close(IvshmemServer *server)
> +{
> +    IvshmemServerPeer *peer;
> +
> +    debug_log(server, "close server\n");
> +
> +    QTAILQ_FOREACH(peer, &server->peer_list, next) {
> +        free_peer(server, peer);
> +    }
> +
> +    close(server->sock_fd);
> +    close(server->shm_fd);
> +    server->sock_fd = -1;
> +    server->shm_fd = -1;
> +}
> +
> +/* get the fd_set according to the unix socket and the peer list */
> +void
> +ivshmem_server_get_fds(const IvshmemServer *server, fd_set *fds, int *maxfd)
> +{
> +    IvshmemServerPeer *peer;
> +
> +    FD_SET(server->sock_fd, fds);
> +    if (server->sock_fd >= *maxfd) {
> +        *maxfd = server->sock_fd + 1;
> +    }
> +
> +    QTAILQ_FOREACH(peer, &server->peer_list, next) {
> +        FD_SET(peer->sock_fd, fds);
> +        if (peer->sock_fd >= *maxfd) {
> +            *maxfd = peer->sock_fd + 1;
> +        }
> +    }
> +}
> +
> +/* process incoming messages on the sockets in fd_set */
> +int
> +ivshmem_server_handle_fds(IvshmemServer *server, fd_set *fds, int maxfd)
> +{
> +    IvshmemServerPeer *peer, *peer_next;
> +
> +    if (server->sock_fd < maxfd && FD_ISSET(server->sock_fd, fds) &&
> +        handle_new_conn(server) < 0 && errno != EINTR) {
> +        debug_log(server, "handle_new_conn() failed\n");
> +        return -1;
> +    }
> +
> +    QTAILQ_FOREACH_SAFE(peer, &server->peer_list, next, peer_next) {
> +        /* any message from a peer socket result in a close() */
> +        debug_log(server, "peer->sock_fd=%d\n", peer->sock_fd);
> +        if (peer->sock_fd < maxfd && FD_ISSET(peer->sock_fd, fds)) {
> +            free_peer(server, peer);
> +        }
> +    }
> +
> +    return 0;
> +}
> +
> +/* lookup peer from its id */
> +IvshmemServerPeer *
> +ivshmem_server_search_peer(IvshmemServer *server, long peer_id)
> +{
> +    IvshmemServerPeer *peer;
> +
> +    QTAILQ_FOREACH(peer, &server->peer_list, next) {
> +        if (peer->id == peer_id) {
> +            return peer;
> +        }
> +    }
> +    return NULL;
> +}
> +
> +/* dump our info, the list of peers their vectors on stdout */
> +void
> +ivshmem_server_dump(const IvshmemServer *server)
> +{
> +    const IvshmemServerPeer *peer;
> +    unsigned vector;
> +
> +    /* dump peers */
> +    QTAILQ_FOREACH(peer, &server->peer_list, next) {
> +        printf("peer_id = %ld\n", peer->id);
> +
> +        for (vector = 0; vector < peer->vectors_count; vector++) {
> +            printf("  vector %d is enabled (fd=%d)\n", vector,
> +                   peer->vectors[vector]);
> +        }
> +    }
> +}
> diff --git a/contrib/ivshmem-server/ivshmem-server.h b/contrib/ivshmem-server/ivshmem-server.h
> new file mode 100644
> index 0000000..5ccc7af
> --- /dev/null
> +++ b/contrib/ivshmem-server/ivshmem-server.h
> @@ -0,0 +1,186 @@
> +/*
> + * Copyright 6WIND S.A., 2014
> + *
> + * This work is licensed under the terms of the GNU GPL, version 2 or
> + * (at your option) any later version.  See the COPYING file in the
> + * top-level directory.
> + */
> +
> +#ifndef _IVSHMEM_SERVER_
> +#define _IVSHMEM_SERVER_
> +
> +/**
> + * The ivshmem server is a daemon that creates a unix socket in listen
> + * mode. The ivshmem clients (qemu or ivshmem-client) connect to this
> + * unix socket. For each client, the server will create some eventfd
> + * (see EVENTFD(2)), one per vector. These fd are transmitted to all
> + * clients using the SCM_RIGHTS cmsg message. Therefore, each client is
> + * able to send a notification to another client without beeing
> + * "profixied" by the server.
> + *
> + * We use this mechanism to send interruptions between guests.
> + * qemu is able to transform an event on a eventfd into a PCI MSI-x
> + * interruption in the guest.
> + *
> + * The ivshmem server is also able to share the file descriptor
> + * associated to the ivshmem shared memory.
> + */
> +
> +#include <limits.h>
> +#include <sys/select.h>
> +
> +#include "qemu/queue.h"
> +
> +/**
> + * Maximum number of notification vectors supported by the server
> + */
> +#define IVSHMEM_SERVER_MAX_VECTORS 64
> +
> +/**
> + * Structure storing a peer
> + *
> + * Each time a client connects to an ivshmem server, a new
> + * IvshmemServerPeer structure is created. This peer and all its
> + * vectors are advertised to all connected clients through the connected
> + * unix sockets.
> + */
> +typedef struct IvshmemServerPeer {
> +    QTAILQ_ENTRY(IvshmemServerPeer) next;    /**< next in list*/
> +    int sock_fd;                             /**< connected unix sock */
> +    long id;                                 /**< the id of the peer */
> +    int vectors[IVSHMEM_SERVER_MAX_VECTORS]; /**< one fd per vector */
> +    unsigned vectors_count;                  /**< number of vectors */
> +} IvshmemServerPeer;
> +QTAILQ_HEAD(IvshmemServerPeerList, IvshmemServerPeer);
> +
> +typedef struct IvshmemServerPeerList IvshmemServerPeerList;
> +
> +/**
> + * Structure describing an ivshmem server
> + *
> + * This structure stores all information related to our server: the name
> + * of the server unix socket and the list of connected peers.
> + */
> +typedef struct IvshmemServer {
> +    char unix_sock_path[PATH_MAX];   /**< path to unix socket */
> +    int sock_fd;                     /**< unix sock file descriptor */
> +    char shm_path[PATH_MAX];         /**< path to shm */
> +    size_t shm_size;                 /**< size of shm */
> +    int shm_fd;                      /**< shm file descriptor */
> +    unsigned n_vectors;              /**< number of vectors */
> +    long cur_id;                     /**< id to be given to next client */
> +    bool verbose;                    /**< true in verbose mode */
> +    IvshmemServerPeerList peer_list; /**< list of peers */
> +} IvshmemServer;
> +
> +/**
> + * Initialize an ivshmem server
> + *
> + * @param server
> + *   A pointer to an uninitialized IvshmemServer structure
> + * @param unix_sock_path
> + *   The pointer to the unix socket file name
> + * @param shm_path
> + *   Path to the shared memory. The path corresponds to a POSIX shm name.
> + *   To use a real file, for instance in a hugetlbfs, it is possible to
> + *   use /../../abspath/to/file.
> + * @param shm_size
> + *   Size of shared memory
> + * @param n_vectors
> + *   Number of interrupt vectors per client
> + * @param verbose
> + *   True to enable verbose mode
> + *
> + * @return
> + *   0 on success, negative value on error
> + */
> +int
> +ivshmem_server_init(IvshmemServer *server, const char *unix_sock_path,
> +                    const char *shm_path, size_t shm_size, unsigned n_vectors,
> +                    bool verbose);
> +
> +/**
> + * Open the shm, then create and bind to the unix socket
> + *
> + * @param server
> + *   The pointer to the initialized IvshmemServer structure
> + *
> + * @return
> + *   0 on success, or a negative value on error
> + */
> +int ivshmem_server_start(IvshmemServer *server);
> +
> +/**
> + * Close the server
> + *
> + * Close connections to all clients, close the unix socket and the
> + * shared memory file descriptor. The structure remains initialized, so
> + * it is possible to call ivshmem_server_start() again after a call to
> + * ivshmem_server_close().
> + *
> + * @param server
> + *   The ivshmem server
> + */
> +void ivshmem_server_close(IvshmemServer *server);
> +
> +/**
> + * Fill a fd_set with file descriptors to be monitored
> + *
> + * This function will fill a fd_set with all file descriptors that must
> + * be polled (unix server socket and peers unix socket). The function
> + * will not initialize the fd_set, it is up to the caller to do it.
> + *
> + * @param server
> + *   The ivshmem server
> + * @param fds
> + *   The fd_set to be updated
> + * @param maxfd
> + *   Must be set to the max file descriptor + 1 in fd_set. This value is
> + *   updated if this function adds a greated fd in fd_set.
> + */
> +void
> +ivshmem_server_get_fds(const IvshmemServer *server, fd_set *fds, int *maxfd);
> +
> +/**
> + * Read and handle new messages
> + *
> + * Given a fd_set (for instance filled by a call to select()), handle
> + * incoming messages from peers.
> + *
> + * @param server
> + *   The ivshmem server
> + * @param fds
> + *   The fd_set containing the file descriptors to be checked. Note
> + *   that file descriptors that are not related to our server are
> + *   ignored.
> + * @param maxfd
> + *   The maximum fd in fd_set, plus one.
> + *
> + * @return
> + *   0 on success, negative value on failure.
> + */
> +int ivshmem_server_handle_fds(IvshmemServer *server, fd_set *fds, int maxfd);
> +
> +/**
> + * Search a peer from its identifier
> + *
> + * @param server
> + *   The ivshmem server
> + * @param peer_id
> + *   The identifier of the peer structure
> + *
> + * @return
> + *   The peer structure, or NULL if not found
> + */
> +IvshmemServerPeer *
> +ivshmem_server_search_peer(IvshmemServer *server, long peer_id);
> +
> +/**
> + * Dump information of this ivshmem server and its peers on stdout
> + *
> + * @param server
> + *   The ivshmem server
> + */
> +void ivshmem_server_dump(const IvshmemServer *server);
> +
> +#endif /* _IVSHMEM_SERVER_ */
> diff --git a/contrib/ivshmem-server/main.c b/contrib/ivshmem-server/main.c
> new file mode 100644
> index 0000000..31a4f98
> --- /dev/null
> +++ b/contrib/ivshmem-server/main.c
> @@ -0,0 +1,244 @@
> +/*
> + * Copyright 6WIND S.A., 2014
> + *
> + * This work is licensed under the terms of the GNU GPL, version 2 or
> + * (at your option) any later version.  See the COPYING file in the
> + * top-level directory.
> + */
> +
> +#include "qemu-common.h"
> +
> +#include "ivshmem-server.h"
> +
> +#define DEFAULT_VERBOSE        0
> +#define DEFAULT_FOREGROUND     0
> +#define DEFAULT_PID_FILE       "/var/run/ivshmem-server.pid"
> +#define DEFAULT_UNIX_SOCK_PATH "/tmp/ivshmem_socket"
> +#define DEFAULT_SHM_PATH       "ivshmem"
> +#define DEFAULT_SHM_SIZE       (4*1024*1024)
> +#define DEFAULT_N_VECTORS      1
> +
> +/* arguments given by the user */
> +typedef struct IvshmemServerArgs {
> +    bool verbose;
> +    bool foreground;
> +    const char *pid_file;
> +    const char *unix_socket_path;
> +    const char *shm_path;
> +    size_t shm_size;
> +    unsigned n_vectors;
> +} IvshmemServerArgs;
> +
> +/* show usage and exit with given error code */
> +static void
> +usage(const char *name, int code)
> +{
> +    fprintf(stderr, "%s [opts]\n", name);
> +    fprintf(stderr, "  -h: show this help\n");
> +    fprintf(stderr, "  -v: verbose mode\n");
> +    fprintf(stderr, "  -F: foreground mode (default is to daemonize)\n");
> +    fprintf(stderr, "  -p <pid_file>: path to the PID file (used in daemon\n"
> +                    "     mode only).\n"
> +                    "     Default=%s\n", DEFAULT_SHM_PATH);
> +    fprintf(stderr, "  -S <unix_socket_path>: path to the unix socket\n"
> +                    "     to listen to.\n"
> +                    "     Default=%s\n", DEFAULT_UNIX_SOCK_PATH);
> +    fprintf(stderr, "  -m <shm_path>: path to the shared memory.\n"
> +                    "     The path corresponds to a POSIX shm name. To use a\n"
> +                    "     real file, for instance in a hugetlbfs, use\n"
> +                    "     /../../abspath/to/file.\n"
> +                    "     default=%s\n", DEFAULT_SHM_PATH);
> +    fprintf(stderr, "  -l <size>: size of shared memory in bytes. The suffix\n"
> +                    "     K, M and G can be used (ex: 1K means 1024).\n"
> +                    "     default=%u\n", DEFAULT_SHM_SIZE);
> +    fprintf(stderr, "  -n <n_vects>: number of vectors.\n"
> +                    "     default=%u\n", DEFAULT_N_VECTORS);
> +
> +    exit(code);
> +}
> +
> +/* parse the program arguments, exit on error */
> +static void
> +parse_args(IvshmemServerArgs *args, int argc, char *argv[])
> +{
> +    char c;
> +    unsigned long long v;
> +    Error *errp;
> +
> +    while ((c = getopt(argc, argv,
> +                       "h"  /* help */
> +                       "v"  /* verbose */
> +                       "F"  /* foreground */
> +                       "p:" /* pid_file */
> +                       "S:" /* unix_socket_path */
> +                       "m:" /* shm_path */
> +                       "l:" /* shm_size */
> +                       "n:" /* n_vectors */
> +                      )) != -1) {
> +
> +        switch (c) {
> +        case 'h': /* help */
> +            usage(argv[0], 0);
> +            break;
> +
> +        case 'v': /* verbose */
> +            args->verbose = 1;
> +            break;
> +
> +        case 'F': /* foreground */
> +            args->foreground = 1;
> +            break;
> +
> +        case 'p': /* pid_file */
> +            args->pid_file = strdup(optarg);
> +            break;
> +
> +        case 'S': /* unix_socket_path */
> +            args->unix_socket_path = strdup(optarg);
> +            break;
> +
> +        case 'm': /* shm_path */
> +            args->shm_path = strdup(optarg);
> +            break;
> +
> +        case 'l': /* shm_size */
> +            parse_option_size("shm_size", optarg, &args->shm_size, &errp);
> +            if (errp) {
> +                fprintf(stderr, "cannot parse shm size: %s\n",
> +                        error_get_pretty(errp));
> +                error_free(errp);
> +                usage(argv[0], 1);
> +            }
> +            break;
> +
> +        case 'n': /* n_vectors */
> +            if (parse_uint_full(optarg, &v, 0) < 0) {
> +                fprintf(stderr, "cannot parse n_vectors\n");
> +                usage(argv[0], 1);
> +            }
> +            args->n_vectors = v;
> +            break;
> +
> +        default:
> +            usage(argv[0], 1);
> +            break;
> +        }
> +    }
> +
> +    if (args->n_vectors > IVSHMEM_SERVER_MAX_VECTORS) {
> +        fprintf(stderr, "too many requested vectors (max is %d)\n",
> +                IVSHMEM_SERVER_MAX_VECTORS);
> +        usage(argv[0], 1);
> +    }
> +
> +    if (args->verbose == 1 && args->foreground == 0) {
> +        fprintf(stderr, "cannot use verbose in daemon mode\n");
> +        usage(argv[0], 1);
> +    }
> +}
> +
> +/* wait for events on listening server unix socket and connected client
> + * sockets */
> +static int
> +poll_events(IvshmemServer *server)
> +{
> +    fd_set fds;
> +    int ret, maxfd;
> +
> +    while (1) {
> +
> +        FD_ZERO(&fds);
> +        maxfd = 0;
> +        ivshmem_server_get_fds(server, &fds, &maxfd);
> +
> +        ret = select(maxfd, &fds, NULL, NULL, NULL);
> +
> +        if (ret < 0) {
> +            if (errno == EINTR) {
> +                continue;
> +            }
> +
> +            fprintf(stderr, "select error: %s\n", strerror(errno));
> +            break;
> +        }
> +        if (ret == 0) {
> +            continue;
> +        }
> +
> +        if (ivshmem_server_handle_fds(server, &fds, maxfd) < 0) {
> +            fprintf(stderr, "ivshmem_server_handle_fds() failed\n");
> +            break;
> +        }
> +    }
> +
> +    return ret;
> +}
> +
> +int
> +main(int argc, char *argv[])
> +{
> +    IvshmemServer server;
> +    struct sigaction sa;
> +    IvshmemServerArgs args = {
> +        .verbose = DEFAULT_VERBOSE,
> +        .foreground = DEFAULT_FOREGROUND,
> +        .pid_file = DEFAULT_PID_FILE,
> +        .unix_socket_path = DEFAULT_UNIX_SOCK_PATH,
> +        .shm_path = DEFAULT_SHM_PATH,
> +        .shm_size = DEFAULT_SHM_SIZE,
> +        .n_vectors = DEFAULT_N_VECTORS,
> +    };
> +
> +    /* parse arguments, will exit on error */
> +    parse_args(&args, argc, argv);
> +
> +    /* Ignore SIGPIPE, see this link for more info:
> +     * http://www.mail-archive.com/libevent-users@xxxxxxxxxx/msg01606.html */
> +    sa.sa_handler = SIG_IGN;
> +    sa.sa_flags = 0;
> +    if (sigemptyset(&sa.sa_mask) == -1 ||
> +        sigaction(SIGPIPE, &sa, 0) == -1) {
> +        perror("failed to ignore SIGPIPE; sigaction");
> +        return 1;
> +    }
> +
> +    /* init the ivshms structure */
> +    if (ivshmem_server_init(&server, args.unix_socket_path, args.shm_path,
> +                            args.shm_size, args.n_vectors, args.verbose) < 0) {
> +        fprintf(stderr, "cannot init server\n");
> +        return 1;
> +    }
> +
> +    /* start the ivshmem server (open shm & unix socket) */
> +    if (ivshmem_server_start(&server) < 0) {
> +        fprintf(stderr, "cannot bind\n");
> +        return 1;
> +    }
> +
> +    /* daemonize if asked to */
> +    if (!args.foreground) {
> +        FILE *fp;
> +
> +        if (daemon(1, 1) < 0) {
> +            fprintf(stderr, "cannot daemonize: %s\n", strerror(errno));
> +            return 1;
> +        }
> +
> +        /* write pid file */
> +        fp = fopen(args.pid_file, "w");
> +        if (fp == NULL) {
> +            fprintf(stderr, "cannot write pid file: %s\n", strerror(errno));
> +            return 1;
> +        }
> +
> +        fprintf(fp, "%d\n", (int) getpid());
> +        fclose(fp);
> +    }
> +
> +    poll_events(&server);
> +
> +    fprintf(stdout, "server disconnected\n");
> +    ivshmem_server_close(&server);
> +
> +    return 0;
> +}
> diff --git a/qemu-doc.texi b/qemu-doc.texi
> index 2b232ae..380d573 100644
> --- a/qemu-doc.texi
> +++ b/qemu-doc.texi
> @@ -1250,9 +1250,13 @@ is qemu.git/contrib/ivshmem-server.  An example syntax when using the shared
>  memory server is:
>  
>  @example
> -qemu-system-i386 -device ivshmem,size=<size in format accepted by -m>[,chardev=<id>]
> -                 [,msi=on][,ioeventfd=on][,vectors=n][,role=peer|master]
> -qemu-system-i386 -chardev socket,path=<path>,id=<id>
> +# First start the ivshmem server once and for all
> +ivshmem-server -p <pidfile> -S <path> -m <shm name> -l <shm size> -n <vectors n>
> +
> +# Then start your qemu instances with matching arguments
> +qemu-system-i386 -device ivshmem,size=<shm size>,vectors=<vectors n>,chardev=<id>
> +                 [,msi=on][,ioeventfd=on][,role=peer|master]
> +                 -chardev socket,path=<path>,id=<id>
>  @end example
>  
>  When using the server, the guest will be assigned a VM ID (>=0) that allows guests
> -- 
> 1.7.10.4
> 
--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at  http://vger.kernel.org/majordomo-info.html




[Index of Archives]     [KVM ARM]     [KVM ia64]     [KVM ppc]     [Virtualization Tools]     [Spice Development]     [Libvirt]     [Libvirt Users]     [Linux USB Devel]     [Linux Audio Users]     [Yosemite Questions]     [Linux Kernel]     [Linux SCSI]     [XFree86]
  Powered by Linux